repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
cemarchi/biosphere | Src/BioAnalyzer/Analysis/GenePrioritization/Steps/DataIntegration/IntermediateRepresentation/Transformers/MicroRnaToGeneTransformer.py | 1 | 4546 | import math
import statistics
from itertools import groupby
from random import randint
from typing import Dict, Tuple, Counter
import pandas as pd
from Src.BioAnalyzer.Analysis.GenePrioritization.Steps.DataIntegration.IntermediateRepresentation.Generators import \
IntermediateRepresentationGeneratorBase
from Src.BioAnalyzer.Analysis.GenePrioritization.Steps.DataIntegration.IntermediateRepresentation.Transformers.SampleTransformerBase import \
SampleTransformerBase
from Src.BioDataManagement.CrossCutting.DTOs.ExpressionLevelStatusDto import ExpressionLevelStatusDto
class MicroRnaToGeneTransformer(SampleTransformerBase):
"""
"""
def __init__(self,
intermediateRepresentationGenerator: IntermediateRepresentationGeneratorBase,
get_global_diff_values_action,
get_mirna_gene_target_action):
super().__init__(intermediateRepresentationGenerator)
self.__get_mirna_gene_target_action = get_mirna_gene_target_action
self.__get_global_diff_values_action = get_global_diff_values_action
def transform(self, from_sample_matrix: pd.DataFrame, is_highly_significant: bool) -> Tuple[pd.DataFrame, Dict[int, ExpressionLevelStatusDto]]:
mirna_gene_targets = {mirna.lower(): g for mirna, g in
self.__get_mirna_gene_targets(from_sample_matrix.columns.tolist()).items()}
mirna_samples = self.__get_mirna_samples(from_sample_matrix, mirna_gene_targets)
id_entrez_list = list(set([id_entrez for mirna_symbol, id_entrez_list in mirna_gene_targets.items()
for id_entrez in id_entrez_list]))
measure_matrix = dict([(g, []) for g in id_entrez_list])
key_func = lambda gene: gene[0]
for patient_id, exp_values in mirna_samples.items():
gene_values = [(id_entrez,
exp_value) for mirna_symbol, exp_value in exp_values.items()
for id_entrez in mirna_gene_targets[mirna_symbol]]
gene_values = sorted(gene_values, key=key_func)
for id_entrez, measures in groupby(gene_values, key_func):
measures = [measure for id_entrez, measure in list(measures) if not math.isnan(measure)]
measure_matrix[id_entrez].append(float('NaN') if not measures else statistics.mean(measures))
gene_matrix = pd.DataFrame.from_dict(measure_matrix).dropna(axis=1,how='all')
gene_matrix = self.intermediateRepresentationGenerator.generate(gene_matrix).dropna(axis=1,how='all')
return gene_matrix, \
self.__get_gene_status(mirna_gene_targets, gene_matrix.columns.tolist(), is_highly_significant)
def __get_mirna_gene_targets(self, mirnas):
gene_targets = {}
fe_target = self.__get_mirna_gene_target_action(mirnas)
gene_targets.update(dict([(t.microrna_symbol, list(set(gene_targets[t.microrna_symbol] + t.id_entrez_genes)))
if t.microrna_symbol in gene_targets
else (t.microrna_symbol, t.id_entrez_genes) for t in fe_target.result_list]))
return gene_targets
def __get_mirna_samples(self, from_sample_matrix, mirna_gene_targets):
from_sample_matrix = from_sample_matrix[list(mirna_gene_targets.keys()) + ['patient_id']]
from_sample_matrix.set_index("patient_id", drop=True, inplace=True)
return from_sample_matrix.to_dict(orient="index")
def __get_gene_status(self, mirna_gene_targets, genes, is_highly_significant):
diff_mirna = [diff for diff in self.__get_global_diff_values_action(is_highly_significant).result.values
if diff.element_id in mirna_gene_targets]
genes_status = [(g, diff.status) for diff in diff_mirna
for g in mirna_gene_targets[diff.element_id] if g in genes]
key_func = lambda gene: gene[0]
genes_status = sorted(genes_status, key=key_func)
genes_status_dict = {}
for id_entrez, status in groupby(genes_status, key_func):
status = list(status)
status_counter = Counter(status)
status = [k for k, v in status_counter.most_common()]
len_status = len(status) - 1
genes_status_dict[id_entrez] = status[0] if len_status == 1 else status[randint(0, len_status)]
return dict([(entrez_id, status[1]) for entrez_id, status in genes_status_dict.items()]) | bsd-3-clause | 261,306,271,858,045,380 | 49.522222 | 147 | 0.653322 | false | 3.625199 | false | false | false |
aplanas/kmanga | kmanga/core/models.py | 1 | 21424 | import os.path
from django.conf import settings
from django.db import connection
from django.db import models
from django.db.models import Count
from django.db.models import F
from django.db.models import Q
from django.urls import reverse
from django.utils import timezone
class TimeStampedModel(models.Model):
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
class Meta:
abstract = True
class Source(TimeStampedModel):
name = models.CharField(max_length=200)
spider = models.CharField(max_length=80)
url = models.URLField(unique=True)
has_footer = models.BooleanField(default=False)
enabled = models.BooleanField(default=True)
def __str__(self):
return self.name
class SourceLanguage(TimeStampedModel):
GERMAN = 'DE'
ENGLISH = 'EN'
SPANISH = 'ES'
FRENCH = 'FR'
ITALIAN = 'IT'
RUSSIAN = 'RU'
PORTUGUESE = 'PT'
LANGUAGE_CHOICES = (
(ENGLISH, 'English'),
(SPANISH, 'Spanish'),
(GERMAN, 'German'),
(FRENCH, 'French'),
(ITALIAN, 'Italian'),
(RUSSIAN, 'Russian'),
(PORTUGUESE, 'Portuguese'),
)
language = models.CharField(max_length=2, choices=LANGUAGE_CHOICES)
source = models.ForeignKey(Source, on_delete=models.CASCADE)
def __str__(self):
return '%s (%s)' % (self.get_language_display(), self.language)
class ConsolidateGenre(TimeStampedModel):
name = models.CharField(max_length=200)
def __str__(self):
return self.name
class Genre(TimeStampedModel):
name = models.CharField(max_length=200)
source = models.ForeignKey(Source, on_delete=models.CASCADE)
# consolidategenre = models.ForeignKey(ConsolidateGenre,
# on_delete=models.CASCADE)
def __str__(self):
return self.name
class AdvRawQuerySet(models.query.RawQuerySet):
"""RawQuerySet subclass with advanced options."""
def __init__(self, raw_query, paged_query, count_query,
model=None, query=None, params=None,
translations=None, using=None, hints=None):
super(AdvRawQuerySet, self).__init__(raw_query, model=model,
query=query,
params=params,
translations=translations,
using=using, hints=hints)
self.raw_query = raw_query
self.paged_query = paged_query
self.count_query = count_query
def __getitem__(self, key):
if isinstance(key, slice):
start, stop = key.start, key.stop
else:
start, stop = key, key + 1
if self.params:
params = self.params + [stop-start, start]
else:
params = (stop-start, start)
return models.query.RawQuerySet(self.paged_query,
model=self.model,
params=params,
translations=self.translations,
using=self._db,
hints=self._hints)
def __len__(self):
cursor = connection.cursor()
cursor.execute(self.count_query, self.params)
return cursor.fetchone()[0]
class MangaQuerySet(models.QuerySet):
def latests(self):
"""Return the lastest mangas with new/updated issues."""
# The correct annotation expression is the next one, but due
# to an error in Django ORM, this empression uses a full GROUP
# BY with the data fields. This produce a slow query.
#
# return self.annotate(
# models.Max('issue__modified')
# ).order_by('-issue__modified__max')
#
# Alternative (without deferreds)
#
# extra_query = '''
# SELECT MAX(core_issue.modified)
# FROM core_issue
# WHERE core_issue.manga_id = core_manga.id
# '''
# Manga.objects.extra({
# 'issue__modified__max': extra_query
# }).order_by('-issue__modified__max')
raw_query = '''
SELECT core_manga.id,
MAX(core_issue.modified) AS issue__modified__max
FROM core_manga
LEFT OUTER JOIN core_issue
ON (core_manga.id = core_issue.manga_id)
GROUP BY core_manga.id
ORDER BY issue__modified__max DESC NULLS LAST,
core_manga.name ASC,
core_manga.url ASC;
'''
paged_query = '''
SELECT core_manga.id,
MAX(core_issue.modified) AS issue__modified__max
FROM core_manga
LEFT OUTER JOIN core_issue
ON (core_manga.id = core_issue.manga_id)
GROUP BY core_manga.id
ORDER BY issue__modified__max DESC NULLS LAST,
core_manga.name ASC,
core_manga.url ASC
LIMIT %s
OFFSET %s;
'''
count_query = '''
SELECT COUNT(*)
FROM core_manga;
'''
return AdvRawQuerySet(raw_query=raw_query,
paged_query=paged_query,
count_query=count_query,
model=self.model,
using=self.db)
def _to_tsquery(self, q):
"""Convert a query to a PostgreSQL tsquery."""
# Remove special chars (except parens)
q = ''.join(c if c.isalnum() or c in '()' else ' ' for c in q)
# Separate parentesis from words
for token in ('(', ')'):
q = q.replace(token, ' %s ' % token)
# Parse the query
op = {
'and': '&',
'or': '|',
'not': '-',
'(': '(',
')': ')',
}
# Join operators
j = '&|'
# Operators that expect and join before
ops_j = '-('
tsquery = []
for token in q.split():
if token in op:
if tsquery and op[token] in ops_j and tsquery[-1] not in j:
tsquery.append(op['and'])
tsquery.append(op[token])
else:
if tsquery and tsquery[-1] not in (j + ops_j):
tsquery.append(op['and'])
tsquery.append('%s:*' % token)
# Add spaces between join operators
tsquery = [(t if t not in j else ' %s ' % t) for t in tsquery]
return ''.join(tsquery)
def is_valid(self, q):
"""Check is the query is a valid query."""
q = self._to_tsquery(q)
# Separate parentesis from words
for token in ('(', ')'):
q = q.replace(token, ' %s ' % token)
s = []
for token in q.split():
if token == '(':
s.append(token)
elif token == ')':
try:
t = s.pop()
except IndexError:
return False
if t != '(':
return False
return not len(s)
def search(self, q):
q = self._to_tsquery(q)
raw_query = '''
SELECT core_manga.*
FROM (
SELECT id
FROM core_manga_fts_view,
to_tsquery(%s) AS q
WHERE document @@ q
ORDER BY ts_rank(document, q) DESC,
name ASC,
url ASC
) AS ids
INNER JOIN core_manga ON core_manga.id = ids.id;
'''
paged_query = '''
SELECT core_manga.*
FROM (
SELECT id
FROM core_manga_fts_view,
to_tsquery(%s) AS q
WHERE document @@ q
ORDER BY ts_rank(document, q) DESC,
name ASC,
url ASC
LIMIT %s
OFFSET %s
) AS ids
INNER JOIN core_manga ON core_manga.id = ids.id;
'''
count_query = '''
SELECT COUNT(*)
FROM core_manga_fts_view
WHERE document @@ to_tsquery(%s);
'''
return AdvRawQuerySet(raw_query=raw_query,
paged_query=paged_query,
count_query=count_query,
model=self.model,
params=[q],
using=self.db)
def refresh(self):
cursor = connection.cursor()
cursor.execute('REFRESH MATERIALIZED VIEW core_manga_fts_view;')
def _cover_path(instance, filename):
return os.path.join(instance.source.spider, filename)
class Manga(TimeStampedModel):
LEFT_TO_RIGHT = 'LR'
RIGHT_TO_LEFT = 'RL'
READING_DIRECTION = (
(LEFT_TO_RIGHT, 'Left-to-right'),
(RIGHT_TO_LEFT, 'Right-to-left'),
)
ONGOING = 'O'
COMPLETED = 'C'
STATUS = (
(ONGOING, 'Ongoing'),
(COMPLETED, 'Completed'),
)
ASC = 'ASC'
DESC = 'DESC'
RANK_ORDER = (
(ASC, 'Ascending'),
(DESC, 'Descending'),
)
name = models.CharField(max_length=200, db_index=True)
# slug = models.SlugField(max_length=200)
# release = models.DateField()
author = models.CharField(max_length=200)
artist = models.CharField(max_length=200)
reading_direction = models.CharField(max_length=2,
choices=READING_DIRECTION,
default=RIGHT_TO_LEFT)
status = models.CharField(max_length=1,
choices=STATUS,
default=ONGOING)
genres = models.ManyToManyField(Genre)
rank = models.FloatField(null=True, blank=True)
rank_order = models.CharField(max_length=4,
choices=RANK_ORDER,
default=ASC)
description = models.TextField()
cover = models.ImageField(upload_to=_cover_path)
url = models.URLField(unique=True, db_index=True)
source = models.ForeignKey(Source, on_delete=models.CASCADE)
objects = MangaQuerySet.as_manager()
def __str__(self):
return self.name
def subscribe(self, user, language=None, issues_per_day=4, paused=False):
"""Subscribe an User to the current manga."""
language = language if language else user.userprofile.language
obj, created = Subscription.all_objects.update_or_create(
manga=self,
user=user,
defaults={
'language': language,
'issues_per_day': issues_per_day,
'paused': paused,
'deleted': False,
})
return obj
def is_subscribed(self, user):
"""Check if an user is subscribed to this manga."""
return self.subscription(user).exists()
def subscription(self, user):
"""Return the users' subscription of this manga."""
return self.subscription_set.filter(user=user)
def languages(self):
"""Return the number of issues per language."""
return self.issue_set\
.values('language')\
.order_by('language')\
.annotate(Count('language'))
class AltName(TimeStampedModel):
name = models.CharField(max_length=200)
manga = models.ForeignKey(Manga, on_delete=models.CASCADE)
def __str__(self):
return self.name
class Issue(TimeStampedModel):
name = models.CharField(max_length=200)
number = models.CharField(max_length=10)
order = models.IntegerField()
language = models.CharField(max_length=2,
choices=SourceLanguage.LANGUAGE_CHOICES)
release = models.DateField()
url = models.URLField(unique=True, max_length=255)
manga = models.ForeignKey(Manga, on_delete=models.CASCADE)
class Meta:
ordering = ('order', 'name')
def __str__(self):
return self.name
def is_sent(self, user):
"""Check if an user has received this issue."""
return self.result(user, status=Result.SENT).exists()
def create_result_if_needed(self, user, status, set_send_date=True):
"""Create `Result` if is new with a status."""
defaults = {'status': status}
if set_send_date:
defaults['send_date'] = timezone.now()
subscription = Subscription.objects.get(
manga=self.manga, user=user)
result, _ = Result.objects.update_or_create(
issue=self,
subscription=subscription,
defaults=defaults)
return result
def result(self, user, status=None):
"""Return the Result for an user for this issue."""
# XXX TODO - Avoid filtering by subscription__deleted using
# the Subscription manager.
query = self.result_set.filter(
subscription__user=user,
subscription__deleted=False)
if status:
query = query.filter(status=status)
return query
def retry_if_failed(self, user):
"""Increment the retry field of `Result` if status is FAIL."""
self.result(user, status=Result.FAILED).update(retry=F('retry') + 1)
class SubscriptionQuerySet(models.QuerySet):
def latests(self, user):
"""Return the latests subscriptions with changes in Result."""
# See the notes from `MangaQuerySet.latests()`
raw_query = '''
SELECT core_subscription.id,
MAX(core_result.modified) AS result__modified__max
FROM core_subscription
LEFT OUTER JOIN core_result
ON (core_subscription.id = core_result.subscription_id)
WHERE core_subscription.deleted = false
AND core_subscription.user_id = %s
GROUP BY core_subscription.id
ORDER BY result__modified__max DESC NULLS LAST,
core_subscription.id ASC;
'''
paged_query = '''
SELECT core_subscription.id,
MAX(core_result.modified) AS result__modified__max
FROM core_subscription
LEFT OUTER JOIN core_result
ON (core_subscription.id = core_result.subscription_id)
WHERE core_subscription.deleted = false
AND core_subscription.user_id = %s
GROUP BY core_subscription.id
ORDER BY result__modified__max DESC NULLS LAST,
core_subscription.id ASC
LIMIT %s
OFFSET %s;
'''
count_query = '''
SELECT COUNT(*)
FROM core_subscription
WHERE core_subscription.deleted = false
AND core_subscription.user_id = %s;
'''
return AdvRawQuerySet(raw_query=raw_query,
paged_query=paged_query,
count_query=count_query,
model=self.model,
params=[user.id],
using=self.db)
class SubscriptionManager(models.Manager):
def get_queryset(self):
"""Exclude deleted subscriptions."""
return super(SubscriptionManager,
self).get_queryset().exclude(deleted=True)
class SubscriptionActiveManager(models.Manager):
def get_queryset(self):
"""Exclude paused and deleted subscriptions."""
return super(SubscriptionActiveManager,
self).get_queryset().exclude(
Q(paused=True) | Q(deleted=True))
class Subscription(TimeStampedModel):
# Number of retries before giving up in a FAILED result
RETRY = 3
manga = models.ForeignKey(Manga, on_delete=models.CASCADE)
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
language = models.CharField(max_length=2,
choices=SourceLanguage.LANGUAGE_CHOICES)
issues_per_day = models.IntegerField(default=4)
paused = models.BooleanField(default=False)
deleted = models.BooleanField(default=False)
objects = SubscriptionManager.from_queryset(SubscriptionQuerySet)()
actives = SubscriptionActiveManager.from_queryset(SubscriptionQuerySet)()
all_objects = models.Manager()
class Meta:
unique_together = ('manga', 'user')
def __str__(self):
return '%s (%d per day)' % (self.manga, self.issues_per_day)
def issues(self):
"""Return the list of issues in the language of the Subscription."""
return self.manga.issue_set.filter(language=self.language)
def issues_to_send(self, retry=None):
"""Return the list of issues to send, ordered by number."""
if not retry:
retry = Subscription.RETRY
already_sent = Result.objects.processed_last_24hs(self.user,
subscription=self)
remains = max(0, self.issues_per_day-already_sent)
return self.manga.issue_set.filter(
language=self.language
).exclude(
pk__in=self.result_set.filter(
Q(status__in=(Result.PROCESSING, Result.SENT)) |
(Q(status=Result.FAILED) & Q(retry__gt=retry))
).values('issue__id')
).order_by('order')[:remains]
def issues_to_retry(self, retry=None):
"""Return the list of issues to retry, ordered by number."""
# This method doesn't take care about the limits of the user
if not retry:
retry = Subscription.RETRY
return self.manga.issue_set.filter(
language=self.language,
result__subscription=self,
result__status=Result.FAILED,
result__retry__lte=retry
).order_by('order')
def add_sent(self, issue):
"""Add or update a Result to a Subscription."""
# XXX TODO - add_sent is deprecated, use
# Issue.create_result_if_needed, or extend the features inside
# Subscription.
return Result.objects.update_or_create(
issue=issue,
subscription=self,
defaults={
'status': Result.SENT,
'send_date': timezone.now(),
})
def latest_issues(self):
"""Return the list of issues ordered by modified result."""
return self.issues().filter(
result__subscription=self
).annotate(
models.Max('result__modified')
).order_by('-result__modified')
class ResultQuerySet(models.QuerySet):
TIME_DELTA = 2
def latests(self, status=None):
query = self
if status:
query = query.filter(status=status)
return query.order_by('-modified')
def _processed_last_24hs(self, user, subscription=None):
"""Return the list of `Result` processed during the last 24 hours."""
today = timezone.now()
yesterday = today - timezone.timedelta(days=1)
# XXX TODO - Objects are created / modified always after time
# T. If the send process is slow, the error margin can be
# bigger than the one used here.
yesterday += timezone.timedelta(hours=ResultQuerySet.TIME_DELTA)
query = self.filter(
subscription__user=user,
send_date__range=[yesterday, today],
)
if subscription:
query = query.filter(subscription=subscription)
return query
def processed_last_24hs(self, user, subscription=None):
"""Return the number of `Result` processed during the last 24 hours."""
return self._processed_last_24hs(user, subscription).count()
def pending(self):
return self.latests(status=Result.PENDING)
def processing(self):
return self.latests(status=Result.PROCESSING)
def sent(self):
return self.latests(status=Result.SENT)
def failed(self):
return self.latests(status=Result.FAILED)
class Result(TimeStampedModel):
PENDING = 'PE'
PROCESSING = 'PR'
SENT = 'SE'
FAILED = 'FA'
STATUS_CHOICES = (
(PENDING, 'Pending'),
(PROCESSING, 'Processing'),
(SENT, 'Sent'),
(FAILED, 'Failed'),
)
issue = models.ForeignKey(Issue, on_delete=models.CASCADE)
subscription = models.ForeignKey(Subscription, on_delete=models.CASCADE)
status = models.CharField(max_length=2, choices=STATUS_CHOICES,
default=PENDING)
missing_pages = models.IntegerField(default=0)
send_date = models.DateTimeField(null=True, blank=True)
retry = models.IntegerField(default=0)
objects = ResultQuerySet.as_manager()
class Meta:
unique_together = ('issue', 'subscription')
def __str__(self):
return '%s (%s)' % (self.issue, self.get_status_display())
def get_absolute_url(self):
return reverse('result-detail', kwargs={'pk': self.pk})
def set_status(self, status):
self.status = status
# If the result is marked as FAILED, unset the `send_date`.
# In this way, if the result is moved to PENDING is not
# counted as SENT. Also if is not moved, the user can have
# one more issue for this day.
if status == Result.FAILED:
self.send_date = None
self.save()
def is_pending(self):
return self.status == Result.PENDING
def is_processing(self):
return self.status == Result.PROCESSING
def is_sent(self):
return self.status == Result.SENT
def is_failed(self):
return self.status == Result.FAILED
| gpl-3.0 | -7,570,314,442,123,775,000 | 32.632653 | 80 | 0.56334 | false | 4.111303 | true | false | false |
HingeChat/HingeChat | src/hingechat/qt/qChatWidget.py | 1 | 8829 | import re
from PyQt5.QtCore import Qt
from PyQt5.QtCore import QTimer
from PyQt5.QtGui import QFontMetrics
from PyQt5.QtWidgets import QHBoxLayout
from PyQt5.QtWidgets import QMessageBox
from PyQt5.QtWidgets import QLabel
from PyQt5.QtWidgets import QPushButton
from PyQt5.QtWidgets import QSplitter
from PyQt5.QtWidgets import QTextBrowser
from PyQt5.QtWidgets import QTextEdit
from PyQt5.QtWidgets import QLineEdit
from PyQt5.QtWidgets import QWidget
from src.hingechat.qt import qtUtils
from src.hinge.utils import *
class QChatWidget(QWidget):
def __init__(self, chat_window, nick, parent=None):
QWidget.__init__(self, parent)
self.chat_window = chat_window
self.nick = nick
self.disabled = False
self.cleared = False
self.url_regex = re.compile(URL_REGEX)
self.chat_log = QTextBrowser()
self.chat_log.setOpenExternalLinks(True)
self.chat_input = QTextEdit()
self.chat_input.textChanged.connect(self.chatInputTextChanged)
self.send_button = QPushButton("Send")
self.send_button.clicked.connect(self.sendMessage)
# Set the min height for the chatlog and a matching fixed height for the send button
chat_input_font_metrics = QFontMetrics(self.chat_input.font())
self.chat_input.setMinimumHeight(chat_input_font_metrics.lineSpacing() * 3)
self.send_button.setFixedHeight(chat_input_font_metrics.lineSpacing() * 3)
hbox = QHBoxLayout()
hbox.addWidget(self.chat_input)
hbox.addWidget(self.send_button)
# Put the chatinput and send button in a wrapper widget so they may be added to the splitter
chat_input_wrapper = QWidget()
chat_input_wrapper.setLayout(hbox)
chat_input_wrapper.setMinimumHeight(chat_input_font_metrics.lineSpacing() * 3.7)
# Put the chat log and chat input into a splitter so the user can resize them at will
splitter = QSplitter(Qt.Vertical)
splitter.addWidget(self.chat_log)
splitter.addWidget(chat_input_wrapper)
splitter.setSizes([int(parent.height()), 1])
hbox = QHBoxLayout()
hbox.addWidget(splitter)
self.setLayout(hbox)
self.typing_timer = QTimer()
self.typing_timer.setSingleShot(True)
self.typing_timer.timeout.connect(self.stoppedTyping)
def setRemoteNick(self, nick):
self.nick = nick
def chatInputTextChanged(self):
# Check if the text changed was the text box being cleared to avoid sending an invalid typing status
if self.cleared:
self.cleared = False
return
if str(self.chat_input.toPlainText())[-1:] == '\n':
self.sendMessage()
else:
# Start a timer to check for the user stopping typing
self.typing_timer.start(TYPING_TIMEOUT)
self.sendTypingStatus(TYPING_START)
def stoppedTyping(self):
self.typing_timer.stop()
if str(self.chat_input.toPlainText()) == '':
self.sendTypingStatus(TYPING_STOP_WITHOUT_TEXT)
else:
self.sendTypingStatus(TYPING_STOP_WITH_TEXT)
def sendMessage(self):
if self.disabled:
return
else:
pass
self.typing_timer.stop()
text = str(self.chat_input.toPlainText())[:-1]
# Don't send empty messages
if text == '':
return
# Convert URLs into clickable links
text = self.__linkify(text)
# Add the message to the message queue to be sent
self.chat_window.client.getSession(self.remote_id).sendChatMessage(text)
# Clear the chat input
self.wasCleared = True
self.chat_input.clear()
self.appendMessage(text, MSG_SENDER)
def sendTypingStatus(self, status):
self.chat_window.client.getSession(self.remote_id).sendTypingMessage(status)
def showNowChattingMessage(self, nick):
self.nick = nick
self.remote_id = self.chat_window.client.getClientId(self.nick)
self.appendMessage("You are now securely chatting with " + self.nick + " :)",
MSG_SERVICE, show_timestamp_and_nick=False)
self.appendMessage("It's a good idea to verify the communcation is secure by selecting "
"\"authenticate buddy\" in the options menu.", MSG_SERVICE, show_timestamp_and_nick=False)
self.addNickButton = QPushButton('Add', self)
self.addNickButton.setGeometry(584, 8, 31, 23)
self.addNickButton.clicked.connect(self.addNickScreen)
self.addNickButton.show()
def addUser(self, user):
nick = str(user.text()).lower()
# Validate the given nick
nickStatus = utils.isValidNick(nick)
if nickStatus == errors.VALID_NICK:
# TODO: Group chats
pass
elif nickStatus == errors.INVALID_NICK_CONTENT:
QMessageBox.warning(self, errors.TITLE_INVALID_NICK, errors.INVALID_NICK_CONTENT)
elif nickStatus == errors.INVALID_NICK_LENGTH:
QMessageBox.warning(self, errors.TITLE_INVALID_NICK, errors.INVALID_NICK_LENGTH)
elif nickStatus == errors.INVALID_EMPTY_NICK:
QMessageBox.warning(self, errors.TITLE_EMPTY_NICK, errors.EMPTY_NICK)
def addNickScreen(self):
self.chat_log.setEnabled(False)
self.chat_input.setEnabled(False)
self.send_button.setEnabled(False)
self.addNickButton.hide()
self.addUserText = QLabel("Enter a username to add a user to the group chat.", self)
self.addUserText.setGeometry(200, 20, 300, 100)
self.addUserText.show()
self.user = QLineEdit(self)
self.user.setGeometry(200, 120, 240, 20)
self.user.returnPressed.connect(self.addUser)
self.user.show()
self.addUserButton = QPushButton('Add User', self)
self.addUserButton.setGeometry(250, 150, 150, 25)
self.addUserButton.clicked.connect(lambda: self.addUser(self.user))
self.addUserButton.show()
self.cancel = QPushButton('Cancel', self)
self.cancel.setGeometry(298, 210, 51, 23)
self.cancel.clicked.connect(lambda: self.chat_log.setEnabled(True))
self.cancel.clicked.connect(lambda: self.chat_input.setEnabled(True))
self.cancel.clicked.connect(lambda: self.send_button.setEnabled(True))
self.cancel.clicked.connect(self.addUserText.hide)
self.cancel.clicked.connect(self.user.hide)
self.cancel.clicked.connect(self.addUserButton.hide)
self.cancel.clicked.connect(self.addNickButton.show)
self.cancel.clicked.connect(self.cancel.hide)
self.cancel.show()
def appendMessage(self, message, source, show_timestamp_and_nick=True):
color = self.__getColor(source)
if show_timestamp_and_nick:
timestamp = '<font color="' + color + '">(' + getTimestamp() + ') <strong>' + \
(self.chat_window.client.nick if source == MSG_SENDER else self.nick) + \
':</strong></font> '
else:
timestamp = ''
# If the user has scrolled up (current value != maximum), do not move the scrollbar
# to the bottom after appending the message
shouldScroll = True
scrollbar = self.chat_log.verticalScrollBar()
if scrollbar.value() != scrollbar.maximum() and source != constants.SENDER:
shouldScroll = False
self.chat_log.append(timestamp + message)
# Move the vertical scrollbar to the bottom of the chat log
if shouldScroll:
scrollbar.setValue(scrollbar.maximum())
def __linkify(self, text):
matches = self.url_regex.findall(text)
for match in matches:
text = text.replace(match[0], '<a href="%s">%s</a>' % (match[0], match[0]))
return text
def __getColor(self, source):
if source == MSG_SENDER:
if qtUtils.is_light_theme:
return '#0000CC'
else:
return '#6666FF'
elif source == MSG_RECEIVER:
if qtUtils.is_light_theme:
return '#CC0000'
else:
return '#CC3333'
else:
if qtUtils.is_light_theme:
return '#000000'
else:
return '#FFFFFF'
def disable(self):
self.disabled = True
self.chat_input.setReadOnly(True)
def enable(self):
self.disabled = False
self.chat_input.setReadOnly(False)
| lgpl-3.0 | 7,082,114,969,052,042,000 | 36.220779 | 117 | 0.619209 | false | 3.998641 | false | false | false |
JiahuiZHONG/Internship_Thread | tests/scripts/thread-cert/Cert_5_6_08_ContextManagement.py | 1 | 4303 | #!/usr/bin/python
#
# Copyright (c) 2016, Nest Labs, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import pexpect
import time
import unittest
import node
LEADER = 1
ROUTER = 2
ED = 3
class Cert_5_6_8_ContextManagement(unittest.TestCase):
def setUp(self):
self.nodes = {}
for i in range(1,4):
self.nodes[i] = node.Node(i)
self.nodes[LEADER].set_panid(0xface)
self.nodes[LEADER].set_mode('rsdn')
self.nodes[LEADER].add_whitelist(self.nodes[ROUTER].get_addr64())
self.nodes[LEADER].add_whitelist(self.nodes[ED].get_addr64())
self.nodes[LEADER].enable_whitelist()
self.nodes[LEADER].set_context_reuse_delay(10)
self.nodes[ROUTER].set_panid(0xface)
self.nodes[ROUTER].set_mode('rsdn')
self.nodes[ROUTER].add_whitelist(self.nodes[LEADER].get_addr64())
self.nodes[ROUTER].enable_whitelist()
self.nodes[ED].set_panid(0xface)
self.nodes[ED].set_mode('rsn')
self.nodes[ED].add_whitelist(self.nodes[LEADER].get_addr64())
self.nodes[ED].enable_whitelist()
def tearDown(self):
for node in self.nodes.itervalues():
node.stop()
del self.nodes
def test(self):
self.nodes[LEADER].start()
self.nodes[LEADER].set_state('leader')
self.assertEqual(self.nodes[LEADER].get_state(), 'leader')
self.nodes[ROUTER].start()
time.sleep(3)
self.assertEqual(self.nodes[ROUTER].get_state(), 'router')
self.nodes[ED].start()
time.sleep(3)
self.assertEqual(self.nodes[ED].get_state(), 'child')
self.nodes[ROUTER].add_prefix('2001::/64', 'pvcrs')
self.nodes[ROUTER].register_netdata()
time.sleep(2)
addrs = self.nodes[LEADER].get_addrs()
for addr in addrs:
if addr[0:3] == '200':
self.nodes[ED].ping(addr)
self.nodes[ROUTER].remove_prefix('2001::/64')
self.nodes[ROUTER].register_netdata()
time.sleep(5)
addrs = self.nodes[LEADER].get_addrs()
for addr in addrs:
if addr[0:3] == '200':
self.nodes[ED].ping(addr)
self.nodes[ROUTER].add_prefix('2002::/64', 'pvcrs')
self.nodes[ROUTER].register_netdata()
time.sleep(5)
addrs = self.nodes[LEADER].get_addrs()
for addr in addrs:
if addr[0:3] == '200':
self.nodes[ED].ping(addr)
time.sleep(5)
self.nodes[ROUTER].add_prefix('2003::/64', 'pvcrs')
self.nodes[ROUTER].register_netdata()
time.sleep(5)
addrs = self.nodes[LEADER].get_addrs()
for addr in addrs:
if addr[0:3] == '200':
self.nodes[ED].ping(addr)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | -5,238,427,176,629,124,000 | 35.159664 | 78 | 0.647223 | false | 3.709483 | false | false | false |
scherroman/mugen | mugen/location_utility.py | 1 | 2143 | from typing import List, Tuple
"""
Module for Location & Interval manipulation
"""
def intervals_from_locations(locations: List[float]) -> List[float]:
intervals = []
previous_location = None
for index, location in enumerate(locations):
if index == 0:
intervals.append(location)
else:
intervals.append(location - previous_location)
previous_location = location
return intervals
def locations_from_intervals(intervals: List[float]) -> List[float]:
locations = []
running_duration = 0
for index, interval in enumerate(intervals):
if index < len(intervals):
running_duration += interval
locations.append(running_duration)
return locations
def start_end_locations_from_locations(locations: List[float]) -> Tuple[List[float], List[float]]:
"""
Calculates the start and end times of each location
Ex) 5, 10, 15
start_times == 5, 10, 15
end_times == 10, 15, 15
Returns
-------
A tuple of start and end times
"""
start_locations = []
end_locations = []
for index, location in enumerate(locations):
start_time = location
if index == len(locations) - 1:
end_time = location
else:
end_time = locations[index + 1]
start_locations.append(start_time)
end_locations.append(end_time)
return start_locations, end_locations
def start_end_locations_from_intervals(intervals: List[float]) -> Tuple[List[float], List[float]]:
"""
Calculates the start and end times of each interval
Ex) 5, 10, 15
start_times == 0, 5, 10
end_times == 5, 10, 15
Returns
-------
A tuple of start and end times
"""
start_locations = []
end_locations = []
running_duration = 0
for index, duration in enumerate(intervals):
start_time = running_duration
end_time = start_time + duration
start_locations.append(start_time)
end_locations.append(end_time)
running_duration += duration
return start_locations, end_locations
| mit | 7,078,705,883,654,456,000 | 23.918605 | 98 | 0.615492 | false | 4.268924 | false | false | false |
adbrebs/spynet | models/max_pool_3d.py | 1 | 3198 |
from theano import tensor
from theano.tensor.signal.downsample import DownsampleFactorMax
def max_pool_3d(input, ds, ignore_border=False):
"""
Takes as input a N-D tensor, where N >= 3. It downscales the input by
the specified factor, by keeping only the maximum value of non-overlapping
patches of size (ds[0],ds[1],ds[2]) (depth, height, width)
Arguments:
input (N-D theano tensor of input images): input images. Max pooling will be done over the 3 last dimensions.
ds (tuple of length 3): factor by which to downscale. (2,2,2) will halve the video in each dimension.
ignore_border (boolean): When True, (5,5,5) input with ds=(2,2,2)
will generate a (2,2,2) output. (3,3,3) otherwise.
"""
if input.ndim < 3:
raise NotImplementedError('max_pool_3d requires a dimension >= 3')
# extract nr dimensions
vid_dim = input.ndim
# max pool in two different steps, so we can use the 2d implementation of
# downsamplefactormax. First maxpool frames as usual.
# Then maxpool the depth dimension. Shift the depth dimension to the third
# position, so rows and cols are in the back
# extract dimensions
frame_shape = input.shape[-2:]
# count the number of "leading" dimensions, store as dmatrix
batch_size = tensor.prod(input.shape[:-2])
batch_size = tensor.shape_padright(batch_size,1)
# store as 4D tensor with shape: (batch_size,1,height,width)
new_shape = tensor.cast(tensor.join(0, batch_size,
tensor.as_tensor([1,]),
frame_shape), 'int32')
input_4D = tensor.reshape(input, new_shape, ndim=4)
# downsample mini-batch of videos in rows and cols
op = DownsampleFactorMax((ds[1],ds[2]), ignore_border)
output = op(input_4D)
# restore to original shape
outshape = tensor.join(0, input.shape[:-2], output.shape[-2:])
out = tensor.reshape(output, outshape, ndim=input.ndim)
# now maxpool depth
# output (depth, rows, cols), reshape so that depth is in the back
shufl = (list(range(vid_dim-3)) + [vid_dim-2]+[vid_dim-1]+[vid_dim-3])
input_depth = out.dimshuffle(shufl)
# reset dimensions
vid_shape = input_depth.shape[-2:]
# count the number of "leading" dimensions, store as dmatrix
batch_size = tensor.prod(input_depth.shape[:-2])
batch_size = tensor.shape_padright(batch_size,1)
# store as 4D tensor with shape: (batch_size,1,width,depth)
new_shape = tensor.cast(tensor.join(0, batch_size,
tensor.as_tensor([1,]),
vid_shape), 'int32')
input_4D_depth = tensor.reshape(input_depth, new_shape, ndim=4)
# downsample mini-batch of videos in depth
op = DownsampleFactorMax((1,ds[0]), ignore_border)
outdepth = op(input_4D_depth)
# output
# restore to original shape (xxx, rows, cols, depth)
outshape = tensor.join(0, input_depth.shape[:-2], outdepth.shape[-2:])
shufl = (list(range(vid_dim-3)) + [vid_dim-1]+[vid_dim-3]+[vid_dim-2])
return tensor.reshape(outdepth, outshape, ndim=input.ndim).dimshuffle(shufl) | bsd-2-clause | 608,790,142,119,456,300 | 42.821918 | 117 | 0.641026 | false | 3.468547 | false | false | false |
mikedh/trimesh | trimesh/path/exchange/load.py | 1 | 2622 | import os
from .dxf import _dxf_loaders
from .svg_io import svg_to_path
from ..path import Path
from . import misc
from ... import util
def load_path(file_obj, file_type=None, **kwargs):
"""
Load a file to a Path file_object.
Parameters
-----------
file_obj : One of the following:
- Path, Path2D, or Path3D file_objects
- open file file_object (dxf or svg)
- file name (dxf or svg)
- shapely.geometry.Polygon
- shapely.geometry.MultiLineString
- dict with kwargs for Path constructor
- (n,2,(2|3)) float, line segments
file_type : str
Type of file is required if file
file_object passed.
Returns
---------
path : Path, Path2D, Path3D file_object
Data as a native trimesh Path file_object
"""
if isinstance(file_obj, Path):
# we have been passed a Path file_object so
# do nothing and return the passed file_object
return file_obj
elif util.is_file(file_obj):
# for open file file_objects use loaders
kwargs.update(path_loaders[file_type](
file_obj, file_type=file_type))
elif util.is_string(file_obj):
# strings passed are evaluated as file file_objects
with open(file_obj, 'rb') as file_file_obj:
# get the file type from the extension
file_type = os.path.splitext(file_obj)[-1][1:].lower()
# call the loader
kwargs.update(path_loaders[file_type](
file_file_obj, file_type=file_type))
elif util.is_instance_named(file_obj, 'Polygon'):
# convert from shapely polygons to Path2D
kwargs.update(misc.polygon_to_path(file_obj))
elif util.is_instance_named(file_obj, 'MultiLineString'):
# convert from shapely LineStrings to Path2D
kwargs.update(misc.linestrings_to_path(file_obj))
elif isinstance(file_obj, dict):
# load as kwargs
from ...exchange.load import load_kwargs
return load_kwargs(file_obj)
elif util.is_sequence(file_obj):
# load as lines in space
kwargs.update(misc.lines_to_path(file_obj))
else:
raise ValueError('Not a supported object type!')
from ...exchange.load import load_kwargs
return load_kwargs(kwargs)
def path_formats():
"""
Get a list of supported path formats.
Returns
------------
loaders : list of str
Extensions of loadable formats, ie:
['svg', 'dxf']
"""
return list(path_loaders.keys())
path_loaders = {'svg': svg_to_path}
path_loaders.update(_dxf_loaders)
| mit | 7,173,151,525,337,611,000 | 29.847059 | 66 | 0.615179 | false | 3.78355 | false | false | false |
jakdot/pyactr | tutorials/u7_simplecompilation.py | 1 | 1254 | """
Testing a simple case of production compilation. The compilation also allows for utility learning, shown in the model below, as well.
"""
import warnings
import pyactr as actr
class Compilation1(object):
"""
Model testing compilation -- basic cases.
"""
def __init__(self, **kwargs):
actr.chunktype("state", "starting ending")
self.m = actr.ACTRModel(**kwargs)
self.m.goal.add(actr.makechunk(nameofchunk="start", typename="state", starting=1))
self.m.productionstring(name="one", string="""
=g>
isa state
starting =x
ending ~=x
==>
=g>
isa state
ending =x""", utility=2)
self.m.productionstring(name="two", string="""
=g>
isa state
starting =x
ending =x
==>
=g>
isa state
starting =x
ending 4""")
if __name__ == "__main__":
warnings.simplefilter("ignore")
mm = Compilation1(production_compilation=True, utility_learning=True)
model = mm.m
sim = model.simulation(realtime=True)
sim.run(0.5)
print(model.productions["one and two"])
| gpl-3.0 | 8,944,572,004,527,267,000 | 24.591837 | 133 | 0.53429 | false | 4.045161 | false | false | false |
coyotevz/nobix-app | nbs/models/misc.py | 1 | 3865 | # -*- coding: utf-8 -*-
from datetime import datetime
from sqlalchemy.ext.declarative import declared_attr
from nbs.models import db
class TimestampMixin(object):
created = db.Column(db.DateTime, default=datetime.now)
modified = db.Column(db.DateTime, default=datetime.now,
onupdate=datetime.now)
@staticmethod
def stamp_modified(mapper, connection, target):
if db.object_session(target).is_modified(target):
target.modified = datetime.now()
@classmethod
def __declare_last__(cls):
db.event.listen(cls, 'before_update', cls.stamp_modified)
class RefEntityMixin(object):
@declared_attr
def entity_id(cls):
return db.Column('entity_id', db.Integer, db.ForeignKey('entity.id'),
nullable=False)
@declared_attr
def entity(cls):
name = cls.__name__.lower()
return db.relationship('Entity',
backref=db.backref(name, lazy='joined'),
lazy='joined')
class Address(RefEntityMixin, db.Model):
"""Stores addresses information"""
__tablename__ = 'address'
id = db.Column(db.Integer, primary_key=True)
address_type = db.Column(db.Unicode)
street = db.Column(db.Unicode(128), nullable=False)
city = db.Column(db.Unicode(64))
province = db.Column(db.Unicode(32), nullable=False)
postal_code = db.Column(db.Unicode(32))
def __str__(eslf):
retval = self.street
if self.city:
retval += ", {}".format(self.city)
retval += ", {}".format(self.province)
if self.postal_code:
retval += " ({})".format(self.postal_code)
return retval
def __repr__(self):
return "<Address '{}' of '{}: {}'>".format(
str(self),
self.entity.entity_type,
self.entity.full_name
)
class Phone(RefEntityMixin, db.Model):
"""Model to store phone information"""
__tablename__ = 'phone'
id = db.Column(db.Integer, primary_key=True)
phone_type = db.Column(db.Unicode)
prefix = db.Column(db.Unicode(8))
number = db.Column(db.Unicode, nullable=False)
extension = db.Column(db.Unicode(5))
def __str__(self):
retval = self.phone_type+': ' if self.phone_type else ''
if self.prefix:
retval += "({})".format(self.prefix)
retval += self.number
if self.extension:
retval += " ext: {}".format(self.extension)
return retval
def __repr__(self):
return "<Phone '{}' of '{}: {}'>".format(
str(self),
self.entity.entity_type,
self.entity.full_name
)
class Email(RefEntityMixin, db.Model):
"""Model to store email information"""
__tablename__ = 'email'
id = db.Column(db.Integer, primary_key=True)
email_type = db.Column(db.Unicode(50))
email = db.Column(db.Unicode(50), nullable=False)
def __str__(self):
retval = self.email_type + ': ' if self.email_type else ''
retval += self.email
return retval
def __repr__(self):
return "<Email '{}' of '{}: {}'>".format(
str(self),
self.entity.entity_type,
self.entity.full_name
)
class ExtraField(RefEntityMixin, db.Model):
"""Model to store information of additional data"""
__tablename__ = 'extra_field'
id = db.Column(db.Integer, primary_key=True)
field_name = db.Column(db.Unicode(50), nullable=False)
field_value = db.Column(db.Unicode(50), nullable=False)
def __str__(self):
return self.field_name + ': ' + self.field_value
def __repr__(self):
return "<ExtraField '{}' of '{}: {}'>".format(
str(self),
self.entity.entity_type,
self.entity.full_name
)
| mit | 6,016,156,342,892,989,000 | 28.280303 | 77 | 0.574386 | false | 3.789216 | false | false | false |
pablodiguerero/asterisk.api | migrations/versions/4_add_physical_users_.py | 1 | 1968 | """empty message
Revision ID: a374e36d0888
Revises: 4a6559da7594
Create Date: 2017-05-21 22:53:53.490856
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.orm import Session
from models import physical
from models import user
# revision identifiers, used by Alembic.
revision = '4_add_physical_users'
down_revision = '3_modify_user_fields'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('crm_physical',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('fam', sa.String(length=255), nullable=True),
sa.Column('name', sa.String(length=255), nullable=False),
sa.Column('otch', sa.String(length=255), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('crm_users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('login', sa.String(length=255), nullable=False),
sa.Column('password', sa.LargeBinary(), nullable=False),
sa.Column('access_level', sa.Integer(), server_default='10', nullable=False),
sa.Column('is_active', sa.Boolean(), server_default='f', nullable=False),
sa.ForeignKeyConstraint(['id'], ['crm_physical.id'], ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('login')
)
op.add_column('crm_users', sa.Column('sip_id', sa.Integer(), nullable=True))
op.create_foreign_key(None, 'crm_users', 'asterisk_sip_users', ['sip_id'], ['id'], ondelete='SET NULL')
# ### end Alembic commands ###
bind = op.get_bind()
session = Session(bind)
phys = physical.Physical("Администратор")
phys.user = user.User("admin", "admin")
phys.user.access_level = user.User.BOSS
session.add(phys)
session.commit()
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('crm_users')
op.drop_table('crm_physical')
# ### end Alembic commands ###
| mit | 1,240,968,507,135,020,300 | 31.583333 | 107 | 0.670588 | false | 3.347603 | false | false | false |
wesm/ibis | dev/merge-pr.py | 1 | 8184 | #!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Utility for creating well-formed pull request merges and pushing them to
# Apache.
# usage: ./apache-pr-merge.py (see config env vars below)
#
# Lightly modified from version of this script in incubator-parquet-format
from __future__ import print_function
from requests.auth import HTTPBasicAuth
import requests
import os
import subprocess
import sys
import textwrap
from six.moves import input
import six
IBIS_HOME = os.path.abspath(__file__).rsplit("/", 2)[0]
PROJECT_NAME = 'ibis'
print("IBIS_HOME = " + IBIS_HOME)
# Remote name with the PR
PR_REMOTE_NAME = os.environ.get("PR_REMOTE_NAME", "upstream")
# Remote name where results pushed
PUSH_REMOTE_NAME = os.environ.get("PUSH_REMOTE_NAME", "upstream")
GITHUB_BASE = "https://github.com/cloudera/" + PROJECT_NAME + "/pull"
GITHUB_API_BASE = "https://api.github.com/repos/cloudera/" + PROJECT_NAME
# Prefix added to temporary branches
BRANCH_PREFIX = "PR_TOOL"
os.chdir(IBIS_HOME)
auth_required = False
if auth_required:
GITHUB_USERNAME = os.environ['GITHUB_USER']
import getpass
GITHUB_PASSWORD = getpass.getpass('Enter github.com password for %s:'
% GITHUB_USERNAME)
def get_json_auth(url):
auth = HTTPBasicAuth(GITHUB_USERNAME, GITHUB_PASSWORD)
req = requests.get(url, auth=auth)
return req.json()
get_json = get_json_auth
else:
def get_json_no_auth(url):
req = requests.get(url)
return req.json()
get_json = get_json_no_auth
def fail(msg):
print(msg)
clean_up()
sys.exit(-1)
def run_cmd(cmd):
if isinstance(cmd, six.string_types):
cmd = cmd.split(' ')
try:
output = subprocess.check_output(cmd)
except subprocess.CalledProcessError as e:
# this avoids hiding the stdout / stderr of failed processes
print('Command failed: %s' % cmd)
print('With output:')
print('--------------')
print(e.output)
print('--------------')
raise e
if isinstance(output, six.binary_type):
output = output.decode('utf-8')
return output
def continue_maybe(prompt):
result = input("\n%s (y/n): " % prompt)
if result.lower() != "y":
fail("Okay, exiting")
original_head = run_cmd("git rev-parse HEAD")[:8]
def clean_up():
print("Restoring head pointer to %s" % original_head)
run_cmd("git checkout %s" % original_head)
branches = run_cmd("git branch").replace(" ", "").split("\n")
for branch in filter(lambda x: x.startswith(BRANCH_PREFIX), branches):
print("Deleting local branch %s" % branch)
run_cmd("git branch -D %s" % branch)
# merge the requested PR and return the merge hash
def merge_pr(pr_num, target_ref):
pr_branch_name = "%s_MERGE_PR_%s" % (BRANCH_PREFIX, pr_num)
target_branch_name = "%s_MERGE_PR_%s_%s" % (BRANCH_PREFIX, pr_num,
target_ref.upper())
run_cmd("git fetch %s pull/%s/head:%s" % (PR_REMOTE_NAME, pr_num,
pr_branch_name))
run_cmd("git fetch %s %s:%s" % (PUSH_REMOTE_NAME, target_ref,
target_branch_name))
run_cmd("git checkout %s" % target_branch_name)
had_conflicts = False
try:
run_cmd(['git', 'merge', pr_branch_name, '--squash'])
except Exception as e:
msg = ("Error merging: %s\nWould you like to "
"manually fix-up this merge?" % e)
continue_maybe(msg)
msg = ("Okay, please fix any conflicts and 'git add' "
"conflicting files... Finished?")
continue_maybe(msg)
had_conflicts = True
commit_authors = run_cmd(['git', 'log', 'HEAD..%s' % pr_branch_name,
'--pretty=format:%an <%ae>']).split("\n")
distinct_authors = sorted(set(commit_authors),
key=lambda x: commit_authors.count(x),
reverse=True)
primary_author = distinct_authors[0]
commits = run_cmd(['git', 'log', 'HEAD..%s' % pr_branch_name,
'--pretty=format:%h [%an] %s']).split("\n\n")
merge_message_flags = []
merge_message_flags += ["-m", title]
if body is not None:
merge_message_flags += ["-m", '\n'.join(textwrap.wrap(body))]
authors = "\n".join(["Author: %s" % a for a in distinct_authors])
merge_message_flags += ["-m", authors]
if had_conflicts:
committer_name = run_cmd("git config --get user.name").strip()
committer_email = run_cmd("git config --get user.email").strip()
message = ("This patch had conflicts when merged, "
"resolved by\nCommitter: %s <%s>" %
(committer_name, committer_email))
merge_message_flags += ["-m", message]
# The string "Closes #%s" string is required for GitHub to correctly close
# the PR
merge_message_flags += [
"-m",
"Closes #%s from %s and squashes the following commits:"
% (pr_num, pr_repo_desc)]
for c in commits:
merge_message_flags += ["-m", c]
run_cmd(['git', 'commit',
'--no-verify', # do not run commit hooks
'--author="%s"' % primary_author] +
merge_message_flags)
continue_maybe("Merge complete (local ref %s). Push to %s?" % (
target_branch_name, PUSH_REMOTE_NAME))
try:
run_cmd('git push %s %s:%s' % (PUSH_REMOTE_NAME, target_branch_name,
target_ref))
except Exception as e:
clean_up()
fail("Exception while pushing: %s" % e)
merge_hash = run_cmd("git rev-parse %s" % target_branch_name)[:8]
clean_up()
print("Pull request #%s merged!" % pr_num)
print("Merge hash: %s" % merge_hash)
return merge_hash
branches = get_json("%s/branches" % GITHUB_API_BASE)
branch_names = filter(lambda x: x.startswith("branch-"),
[x['name'] for x in branches])
pr_num = input("Which pull request would you like to merge? (e.g. 34): ")
pr = get_json("%s/pulls/%s" % (GITHUB_API_BASE, pr_num))
url = pr["url"]
title = pr["title"]
body = pr["body"]
target_ref = pr["base"]["ref"]
user_login = pr["user"]["login"]
base_ref = pr["head"]["ref"]
pr_repo_desc = "%s/%s" % (user_login, base_ref)
if pr["merged"] is True:
print("Pull request {0} has already been merged, assuming "
"you want to backport".format(pr_num))
merge_commit_desc = run_cmd([
'git', 'log', '--merges', '--first-parent',
'--grep=pull request #%s' % pr_num, '--oneline']).split("\n")[0]
if merge_commit_desc == "":
fail("Couldn't find any merge commit for #{0}"
", you may need to update HEAD.".format(pr_num))
merge_hash = merge_commit_desc[:7]
message = merge_commit_desc[8:]
print("Found: %s" % message)
sys.exit(0)
if not bool(pr["mergeable"]):
msg = ("Pull request {0} is not mergeable in its current form.\n"
"Continue? (experts only!)".format(pr_num))
continue_maybe(msg)
print("\n=== Pull Request #%s ===" % pr_num)
print("title\t%s\nsource\t%s\ntarget\t%s\nurl\t%s" % (
title, pr_repo_desc, target_ref, url))
continue_maybe("Proceed with merging pull request #%s?" % pr_num)
merged_refs = [target_ref]
merge_hash = merge_pr(pr_num, target_ref)
| apache-2.0 | -6,758,902,495,747,892,000 | 31.86747 | 78 | 0.601417 | false | 3.487005 | false | false | false |
vinoth3v/In | In/core/valuator.py | 1 | 7873 | import re
from In.core.object_meta import ObjectMetaBase
class ValuatorContainer(dict):
def __missing__(self, key):
vcls = IN.register.get_class(key, 'Valuator')
obj = vcls()
self[key] = obj
return obj
class ValuatorEngine:
'''Valuator class that valuate values based on validation rules.
Instance available as IN.valuator
'''
# dict of all Valuator instances
valuators = ValuatorContainer()
def validate(self, value, rule): # rule is ['type', args] or [[], [], []]
'''
#TODO: allow per false error message
rule = [
'And', [
['Length', '>', 6, 'The value length should be greater than 6.'],
['Not', [['Num']],
['Or', [
['Email', 'Invalid email address.'],
['Domain'],
['Url', 'Invalid Url.'],
]],
]],
]
'''
if not rule: # empty list
return [True]
try:
firstitem = rule[0]
item_type = type(firstitem)
if item_type is str: # ['type', args]
args = rule[1:]
result = self.valuators[firstitem].validate(value, *args)
if not result[0]:
#return [False, args[-1]] # last item is error message
return result
elif item_type is list: # [[], [], []]
for subrule in rule:
result = self.validate(value, subrule) # recursive
if not result[0]:
return result
except Exception as e:
IN.logger.debug()
return [False, str(e)]
return [True]
def __getattr__(self, key):
self.key = self.valuators[key]
return self.key
class ValuatorMeta(ObjectMetaBase):
__class_type_base_name__ = 'ValuatorBase'
__class_type_name__ = 'Valuator'
class ValuatorBase(dict, metaclass = ValuatorMeta):
'''Base class of all IN ValuatorBase.
'''
__allowed_children__ = None
__default_child__ = None
ops = {
'=' : lambda l, al, ml: l == al,
'==' : lambda l, al, ml: l == al,
'!=' : lambda l, al, ml: l != al,
'>' : lambda l, al, ml: l > al,
'<' : lambda l, al, ml: l < al,
'>=' : lambda l, al, ml: l >= al,
'<=' : lambda l, al, ml: l <= al,
'<>' : lambda l, al, ml: al < l > ml,
'><' : lambda l, al, ml: al > l < ml,
}
def validate(self, value):
'''return value should be a list like [False, 'Error message.'] or [True]
'''
return [True]
@IN.register('Valuator', type = 'Valuator')
class Valuator(ValuatorBase):
'''Base class of all IN ValuatorBase.
'''
pass
class And(Valuator):
pass
class Or(Valuator):
pass
class Not(Valuator):
def validate(self, value, rule, message = ''):
'''not validator'''
result = IN.valuator.validate(value, rule[0])
not_result = not result[0]
return [not_result, message]
class Empty(Valuator):
def validate(self, value, message = ''):
# returning value itself makes it evaluates again
return [False, message] if value else [True]
class NotEmpty(Valuator):
def validate(self, value, message = ''):
# returning value itself makes it evaluates again
return [False, message] if not value else [True]
class Length(Valuator):
def validate(self, value, length = 0, op = '=', mlength = 0, message = ''):
try:
# does multiple ifs are good?
result = self.ops[op](len(value), length, mlength)
result = [result or False, message]
return result
except KeyError:
IN.logger.debug()
return [False, message] # always false
class Equal(Valuator):
def validate(self, value, tvalue, op = '=', mvalue = 0, message = ''):
try:
# does multiple ifs are good?
result = self.ops[op](value, tvalue, mvalue)
result = [result or False, message]
return result
except KeyError:
IN.logger.debug()
return [False, message] # always false
class Regx(Valuator):
'''Valuator rule class that using regex'''
re_compiled = {} # we dont want to compile again
def get_regx(self, regx):
try:
return self.re_compiled[regx]
except KeyError:
self.re_compiled[regx] = re.compile(regx)
return self.re_compiled[regx]
def validate(self, value, regx, message = ''):
result = self.get_regx(regx).match(value)
return [result, message]
class Domain(Regx):
regex_host = r'(?:(?:[a-zA-Z0-9][a-zA-Z0-9\-]*)?[a-zA-Z0-9])'
def validate(self, domain, message = ''):
false_message = [False, message]
dlen = len(domain)
if dlen < 4 or dlen > 255 or domain.endswith('.') or '.' not in domain:
return false_message
try:
domain = domain.encode('idna').decode('ascii')
except Exception:
return false_message
try:
domain.encode('ascii').decode('idna')
except Exception:
return false_message
reg = self.regex_host + r'(?:\.' + self.regex_host + r')*'
m = re.match(reg + "$", domain)
if not m:
return false_message
return [True]
class Email(Regx):
regex = re.compile(r'^[A-Za-z0-9\.\+_-]')
atext = r'a-zA-Z0-9_\.\-' # !#\$%&\'\*\+/=\?\^`\{\|\}~
atext_utf8 = atext + r"\u0080-\U0010FFFF"
regex_local = re.compile(''.join(('[', atext, ']+(?:\\.[', atext, ']+)*$')))
regex_local_utf8 = re.compile(''.join(('[', atext_utf8, ']+(?:\\.[', atext_utf8, ']+)*$')))
def validate(self, value, message = ''):
parts = value.split('@')
if len(parts) != 2:
return [False, message]
local = self.validate_local(parts[0])
if not local:
return [False, message]
# check domain part
domain_result = IN.valuator.validate(parts[1], ['Domain', message])
if not domain_result[0]:
return domain_result
return [True] # valid
def validate_local(self, local):
# check nabar name part
if not local or len(local) > 64 or '..' in local:
return False
m = re.match(self.regex_local, local) # ASCII
if m: # True
return True
else:
# unicode
m = re.match(self.regex_local_utf8, local)
if m:
return True
else:
return False
class Url(Regx):
def validate(self, value, message = ''):
return True
class Alpha(Valuator):
def validate(self, value, message = ''):
return [str.isalpha(value), message]
class AlphaNum(Valuator):
def validate(self, value, message = ''):
return [str.isalnum(value), message]
class Digit(Valuator):
def validate(self, value, message = ''):
return [str.isdigit(value), message]
class Decimal(Valuator):
def validate(self, value, message = ''):
return [str.isdecimal(value), message]
class Lower(Valuator):
def validate(self, value, message = ''):
return [str.islower(value), message]
class Upper(Valuator):
def validate(self, value, message = ''):
return [str.isupper(value), message]
class Numeric(Valuator):
def validate(self, value, message = ''):
return [str.isnumeric(value), message]
class Space(Valuator):
'''Is value has only non printable chars'''
def validate(self, value, message = ''):
return [str.isspace(value), message]
class Startswith(Valuator):
def validate(self, value, start, message = ''):
return [str(value).startswith(start), message]
class Endswith(Valuator):
def validate(self, value, start, message = ''):
return [str(value).endswith(start), message]
class In(Valuator):
def validate(self, value, itr, message = ''):
return [value in itr, message]
class INPath(Valuator):
'''Check whether this string is a valid IN route.'''
def validate(self, value, message = ''):
return True
class NabarRole(Valuator):
'''Check whether nabar has this role.'''
def validate(self, value, message = ''):
return True
class NabarAccess(Valuator):
'''Check whether nabar has this access permissions.'''
def validate(self, value):
return True
class Callback(Valuator):
'''call the Callback to valuate.'''
def validate(self, value, message = ''):
return True
#@IN.hook
#def __In_app_init__(app):
### set the valuator
#IN.valuator = ValuatorEngine()
| apache-2.0 | 3,151,906,794,283,461,600 | 23.396774 | 92 | 0.61133 | false | 3.104495 | false | false | false |
plotly/plotly.py | packages/python/plotly/plotly/graph_objs/heatmapgl/legendgrouptitle/_font.py | 1 | 8487 | from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Font(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "heatmapgl.legendgrouptitle"
_path_str = "heatmapgl.legendgrouptitle.font"
_valid_props = {"color", "family", "size"}
# color
# -----
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# family
# ------
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The Chart Studio Cloud (at https://chart-
studio.plotly.com or on-premise) generates images on a server,
where only a select number of fonts are installed and
supported. These include "Arial", "Balto", "Courier New",
"Droid Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
# size
# ----
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
size
"""
def __init__(self, arg=None, color=None, family=None, size=None, **kwargs):
"""
Construct a new Font object
Sets this legend group's title font.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.heatmapgl.lege
ndgrouptitle.Font`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
size
Returns
-------
Font
"""
super(Font, self).__init__("font")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.heatmapgl.legendgrouptitle.Font
constructor must be a dict or
an instance of :class:`plotly.graph_objs.heatmapgl.legendgrouptitle.Font`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("family", None)
_v = family if family is not None else _v
if _v is not None:
self["family"] = _v
_v = arg.pop("size", None)
_v = size if size is not None else _v
if _v is not None:
self["size"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| mit | 5,632,017,995,005,265,000 | 36.387665 | 82 | 0.559797 | false | 4.001414 | false | false | false |
danhooper/sandbox | pinball/attract.py | 1 | 2386 | from procgame import *
class Mode(game.Mode):
def __init__(self, game):
super(Mode, self).__init__(game, 1)
highscore_categories = []
cat = highscore.HighScoreCategory()
cat.game_data_key = "HighScores"
cat.titles = [
"Grand Champion",
"High Score 1",
"High Score 2",
"High Score 3",
"High Score 4"
]
highscore_categories.append(cat)
for category in highscore_categories:
category.load_from_game(game)
frame_proc = dmd.Animation().load('dmd/P-ROC.dmd').frames[0]
layer_proc = dmd.FrameLayer(opaque=True, frame=frame_proc)
layer_th = dmd.TextLayer(128/2, 7, game.font_jazz, "center",
opaque=True).set_text("Town Hall")
layer_presents = dmd.TextLayer(128/2, 7, game.font_jazz, "center",
opaque=True).set_text("Presents")
layer_name = dmd.TextLayer(128/2, 7, game.font_jazz, "center",
opaque=True).set_text("TBD")
layer_high_scores = []
for frame in highscore.generate_highscore_frames(highscore_categories):
layer_high_scores.append(dmd.FrameLayer(opaque=True, frame=frame))
self.layer = dmd.ScriptedLayer(128, 32, [
{ "layer": None, "seconds": 10.0 },
{ "layer": layer_proc, "seconds": 3.0 },
{ "layer": layer_th, "seconds": 3.0 },
{ "layer": layer_presents, "seconds": 3.0 },
{ "layer": layer_name, "seconds": 3.0 },
{ "layer": layer_high_scores[0], "seconds": 3.0 },
{ "layer": layer_high_scores[1], "seconds": 3.0 },
{ "layer": layer_high_scores[2], "seconds": 3.0 },
{ "layer": layer_high_scores[3], "seconds": 3.0 },
{ "layer": layer_high_scores[4], "seconds": 3.0 },
])
def mode_stopped(self):
self.layer.script_index = 0
self.frame_start_time = None
self.is_new_script_item = True
def sw_enter_active(self, sw):
self.game.modes.add(self.game.service_mode)
return True
def sw_exit_active(self, sw):
return True
def sw_startButton_active(self, sw):
self.game.modes.remove(self)
self.game.modes.add(self.game.mode.base)
return True
| mit | -5,484,256,992,289,313,000 | 38.766667 | 79 | 0.536463 | false | 3.433094 | false | false | false |
userzimmermann/robotframework-python3 | src/robot/utils/text.py | 1 | 3235 | # Copyright 2008-2014 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .charwidth import get_char_width
from .misc import seq2str2
from .unic import unic
_MAX_ASSIGN_LENGTH = 200
_MAX_ERROR_LINES = 40
_MAX_ERROR_LINE_LENGTH = 78
_ERROR_CUT_EXPLN = ' [ Message content over the limit has been removed. ]'
def cut_long_message(msg):
lines = msg.splitlines()
lengths = _count_line_lengths(lines)
if sum(lengths) <= _MAX_ERROR_LINES:
return msg
start = _prune_excess_lines(lines, lengths)
end = _prune_excess_lines(lines, lengths, from_end=True)
return '\n'.join(start + [_ERROR_CUT_EXPLN] + end)
def _prune_excess_lines(lines, lengths, from_end=False):
if from_end:
lines.reverse()
lengths.reverse()
ret = []
total = 0
# Use // (explicit int div) for Python 3 compatibility:
limit = _MAX_ERROR_LINES//2
for line, length in zip(lines[:limit], lengths[:limit]):
if total + length >= limit:
ret.append(_cut_long_line(line, total, from_end))
break
total += length
ret.append(line)
if from_end:
ret.reverse()
return ret
def _cut_long_line(line, used, from_end):
# Use // (explicit int div) for Python 3 compatibility:
available_lines = _MAX_ERROR_LINES//2 - used
available_chars = available_lines * _MAX_ERROR_LINE_LENGTH - 3
if len(line) > available_chars:
if not from_end:
line = line[:available_chars] + '...'
else:
line = '...' + line[-available_chars:]
return line
def _count_line_lengths(lines):
return [ _count_virtual_line_length(line) for line in lines ]
def _count_virtual_line_length(line):
if not line:
return 1
lines, remainder = divmod(len(line), _MAX_ERROR_LINE_LENGTH)
return lines if not remainder else lines + 1
def format_assign_message(variable, value, cut_long=True):
value = unic(value) if variable.startswith('$') else seq2str2(value)
if cut_long and len(value) > _MAX_ASSIGN_LENGTH:
value = value[:_MAX_ASSIGN_LENGTH] + '...'
return '%s = %s' % (variable, value)
def get_console_length(text):
return sum(get_char_width(char) for char in text)
def pad_console_length(text, width):
if width < 5:
width = 5
diff = get_console_length(text) - width
if diff > 0:
text = _lose_width(text, diff+3) + '...'
return _pad_width(text, width)
def _pad_width(text, width):
more = width - get_console_length(text)
return text + ' ' * more
def _lose_width(text, diff):
lost = 0
while lost < diff:
lost += get_console_length(text[-1])
text = text[:-1]
return text
| apache-2.0 | 7,472,979,966,690,661,000 | 30.715686 | 77 | 0.643586 | false | 3.456197 | false | false | false |
onyxfish/votersdaily_web | api/couchdb/log_views.py | 1 | 2166 | import couchdb
from couchdb.design import ViewDefinition
"""
This module defines a collection of functions which accept a CouchDB database
as an argument, are named with a 'make_views_*' convention, and return a list
of generated CouchDB ViewDefinitions.
The 'syncviews' management command dynamically executes each method to compile
a list of all Couchdb views.
"""
def make_views_all_documents(event_db):
"""
Generate a view that includes all documents.
"""
all_view_map_function = \
'''
function(doc) {
emit(doc.access_datetime, doc)
}
'''
return [ViewDefinition('api', 'all', all_view_map_function)]
def make_views_error_documents(event_db):
"""
Generate a view that includes all documents.
"""
error_view_map_function = \
'''
function(doc) {
if (doc.result != "success") {
emit(doc.access_datetime, doc)
}
}
'''
return [ViewDefinition('api', 'errors', error_view_map_function)]
def get_parser_list(event_db):
"""
Return a list of unique parser names in the database.
"""
parser_list_map_function = \
'''
function(doc) {
emit(doc.parser_name, null);
}
'''
parser_list_reduce_function = \
'''
function(keys, values) {
return null;
}
'''
return [
e.key for e in event_db.query(
parser_list_map_function,
parser_list_reduce_function,
group=True)]
def make_views_parser_lists(event_db):
"""
Return a list of views, one for each parser, using templated view
functions.
"""
parser_names = get_parser_list(event_db)
parser_view_map_function = \
'''
function(doc) {
if (doc.parser_name == "%(parser_name)s") {
emit(doc.parser_name, doc)
}
}
'''
return [
ViewDefinition('api', name,
parser_view_map_function % { 'parser_name': name })
for name in parser_names] | gpl-3.0 | 4,641,316,289,698,557,000 | 23.908046 | 78 | 0.548476 | false | 4.094518 | false | false | false |
pr-omethe-us/PyKED | pyked/chemked.py | 1 | 44185 | """
Main ChemKED module
"""
# Standard libraries
from os.path import exists
from collections import namedtuple
from warnings import warn
from copy import deepcopy
import xml.etree.ElementTree as etree
import xml.dom.minidom as minidom
from itertools import chain
import numpy as np
# Local imports
from .validation import schema, OurValidator, yaml, Q_
from .converters import datagroup_properties, ReSpecTh_to_ChemKED
VolumeHistory = namedtuple('VolumeHistory', ['time', 'volume'])
VolumeHistory.__doc__ = 'Time history of the volume in an RCM experiment. Deprecated, to be removed after PyKED 0.4' # noqa: E501
VolumeHistory.time.__doc__ = '(`~numpy.ndarray`): the time during the experiment'
VolumeHistory.volume.__doc__ = '(`~numpy.ndarray`): the volume during the experiment'
TimeHistory = namedtuple('TimeHistory', ['time', 'quantity', 'type'])
TimeHistory.__doc__ = 'Time history of the quantity in an RCM experiment'
TimeHistory.time.__doc__ = '(`~numpy.ndarray`): the time during the experiment'
TimeHistory.quantity.__doc__ = '(`~numpy.ndarray`): the quantity of interest during the experiment'
TimeHistory.type.__doc__ = """\
(`str`): the type of time history represented. Possible options are:
* volume
* temperature
* pressure
* piston position
* light emission
* OH emission
* absorption
"""
RCMData = namedtuple(
'RCMData',
['compressed_pressure', 'compressed_temperature', 'compression_time', 'stroke',
'clearance', 'compression_ratio']
)
RCMData.__doc__ = 'Data fields specific to rapid compression machine experiments'
RCMData.compressed_pressure.__doc__ = '(`~pint.Quantity`) The pressure at the end of compression'
RCMData.compressed_temperature.__doc__ = """\
(`~pint.Quantity`) The temperature at the end of compression"""
RCMData.compression_time.__doc__ = '(`~pint.Quantity`) The duration of the compression stroke'
RCMData.stroke.__doc__ = '(`~pint.Quantity`) The length of the stroke'
RCMData.clearance.__doc__ = """\
(`~pint.Quantity`) The clearance between piston face and end wall at the end of compression"""
RCMData.compression_ratio.__doc__ = '(`~pint.Quantity`) The volumetric compression ratio'
Reference = namedtuple('Reference',
['volume', 'journal', 'doi', 'authors', 'detail', 'year', 'pages'])
Reference.__doc__ = 'Information about the article or report where the data can be found'
Reference.volume.__doc__ = '(`str`) The journal volume'
Reference.journal.__doc__ = '(`str`) The name of the journal'
Reference.doi.__doc__ = '(`str`) The Digital Object Identifier of the article'
Reference.authors.__doc__ = '(`list`) The list of authors of the article'
Reference.detail.__doc__ = '(`str`) Detail about where the data can be found in the article'
Reference.year.__doc__ = '(`str`) The year the article was published'
Reference.pages.__doc__ = '(`str`) The pages in the journal where the article was published'
Apparatus = namedtuple('Apparatus', ['kind', 'institution', 'facility'])
Apparatus.__doc__ = 'Information about the experimental apparatus used to generate the data'
Apparatus.kind.__doc__ = '(`str`) The kind of experimental apparatus'
Apparatus.institution.__doc__ = '(`str`) The institution where the experiment is located'
Apparatus.facility.__doc__ = '(`str`) The particular experimental facility at the location'
Composition = namedtuple('Composition', 'species_name InChI SMILES atomic_composition amount')
Composition.__doc__ = 'Detail of the initial composition of the mixture for the experiment'
Composition.species_name.__doc__ = '(`str`) The name of the species'
Composition.InChI.__doc__ = '(`str`) The InChI identifier for the species'
Composition.SMILES.__doc__ = '(`str`) The SMILES identifier for the species'
Composition.atomic_composition.__doc__ = '(`dict`) The atomic composition of the species'
Composition.amount.__doc__ = '(`~pint.Quantity`) The amount of this species'
class ChemKED(object):
"""Main ChemKED class.
The ChemKED class stores information about the contents of a ChemKED database
file. It stores each datapoint associated with the database and provides access
the the reference information, versions, and file author.
Arguments:
yaml_file (`str`, optional): The filename of the YAML database in ChemKED format.
dict_input (`dict`, optional): A dictionary with the parsed ouput of YAML file in ChemKED
format.
skip_validation (`bool`, optional): Whether validation of the ChemKED should be done. Must
be supplied as a keyword-argument.
Attributes:
datapoints (`list`): List of `DataPoint` objects storing each datapoint in the database.
reference (`~collections.namedtuple`): Attributes include ``volume``, ``journal``, ``doi``,
``authors``, ``detail``, ``year``, and ``pages`` describing the reference from which the
datapoints are derived.
apparatus (`~collections.namedtuple`): Attributes include ``kind`` of experimental
apparatus, and the ``institution`` and ``facility`` where the experimental apparatus is
located.
chemked_version (`str`): Version of the ChemKED database schema used in this file.
experiment_type (`str`): Type of exeperimental data contained in this database.
file_author (`dict`): Information about the author of the ChemKED database file.
file_version (`str`): Version of the ChemKED database file.
_properties (`dict`): Original dictionary read from ChemKED database file, meant for
internal use.
"""
def __init__(self, yaml_file=None, dict_input=None, *, skip_validation=False):
if yaml_file is not None:
with open(yaml_file, 'r') as f:
self._properties = yaml.safe_load(f)
elif dict_input is not None:
self._properties = dict_input
else:
raise NameError("ChemKED needs either a YAML filename or dictionary as input.")
if not skip_validation:
self.validate_yaml(self._properties)
self.datapoints = []
for point in self._properties['datapoints']:
self.datapoints.append(DataPoint(point))
self.reference = Reference(
volume=self._properties['reference'].get('volume'),
journal=self._properties['reference'].get('journal'),
doi=self._properties['reference'].get('doi'),
authors=self._properties['reference'].get('authors'),
detail=self._properties['reference'].get('detail'),
year=self._properties['reference'].get('year'),
pages=self._properties['reference'].get('pages'),
)
self.apparatus = Apparatus(
kind=self._properties['apparatus'].get('kind'),
institution=self._properties['apparatus'].get('institution'),
facility=self._properties['apparatus'].get('facility'),
)
for prop in ['chemked-version', 'experiment-type', 'file-authors', 'file-version']:
setattr(self, prop.replace('-', '_'), self._properties[prop])
@classmethod
def from_respecth(cls, filename_xml, file_author='', file_author_orcid=''):
"""Construct a ChemKED instance directly from a ReSpecTh file.
Arguments:
filename_xml (`str`): Filename of the ReSpecTh-formatted XML file to be imported
file_author (`str`, optional): File author to be added to the list generated from the
XML file
file_author_orcid (`str`, optional): ORCID for the file author being added to the list
of file authors
Returns:
`ChemKED`: Instance of the `ChemKED` class containing the data in ``filename_xml``.
Examples:
>>> ck = ChemKED.from_respecth('respecth_file.xml')
>>> ck = ChemKED.from_respecth('respecth_file.xml', file_author='Bryan W. Weber')
>>> ck = ChemKED.from_respecth('respecth_file.xml', file_author='Bryan W. Weber',
file_author_orcid='0000-0000-0000-0000')
"""
properties = ReSpecTh_to_ChemKED(filename_xml, file_author, file_author_orcid,
validate=False)
return cls(dict_input=properties)
def validate_yaml(self, properties):
"""Validate the parsed YAML file for adherance to the ChemKED format.
Arguments:
properties (`dict`): Dictionary created from the parsed YAML file
Raises:
`ValueError`: If the YAML file cannot be validated, a `ValueError` is raised whose
string contains the errors that are present.
"""
validator = OurValidator(schema)
if not validator.validate(properties):
for key, value in validator.errors.items():
if any(['unallowed value' in v for v in value]):
print(('{key} has an illegal value. Allowed values are {values} and are case '
'sensitive.').format(key=key, values=schema[key]['allowed']))
raise ValueError(validator.errors)
def get_dataframe(self, output_columns=None):
"""Get a Pandas DataFrame of the datapoints in this instance.
Arguments:
output_columns (`list`, optional): List of strings specifying the columns to include
in the output DataFrame. The default is `None`, which outputs all of the
columns. Options include (not case sensitive):
* ``Temperature``
* ``Pressure``
* ``Ignition Delay``
* ``Composition``
* ``Equivalence Ratio``
* ``Reference``
* ``Apparatus``
* ``Experiment Type``
* ``File Author``
* ``File Version``
* ``ChemKED Version``
In addition, specific fields from the ``Reference`` and ``Apparatus`` attributes can
be included by specifying the name after a colon. These options are:
* ``Reference:Volume``
* ``Reference:Journal``
* ``Reference:DOI``
* ``Reference:Authors``
* ``Reference:Detail``
* ``Reference:Year``
* ``Reference:Pages``
* ``Apparatus:Kind``
* ``Apparatus:Facility``
* ``Apparatus:Institution``
Only the first author is printed when ``Reference`` or ``Reference:Authors`` is
selected because the whole author list may be quite long.
Note:
If the Composition is selected as an output type, the composition specified in the
`DataPoint` is used. No attempt is made to convert to a consistent basis; mole fractions
will remain mole fractions, mass fractions will remain mass fractions, and mole percent
will remain mole percent. Therefore, it is possible to end up with more than one type of
composition specification in a given column. However, if the composition is included
in the resulting dataframe, the type of each composition will be specified by the "Kind"
field in each row.
Examples:
>>> df = ChemKED(yaml_file).get_dataframe()
>>> df = ChemKED(yaml_file).get_dataframe(['Temperature', 'Ignition Delay'])
Returns:
`~pandas.DataFrame`: Contains the information regarding each point in the ``datapoints``
attribute
"""
import pandas as pd
valid_labels = [a.replace('_', ' ') for a in self.__dict__
if not (a.startswith('__') or a.startswith('_'))
]
valid_labels.remove('datapoints')
valid_labels.extend(
['composition', 'ignition delay', 'temperature', 'pressure', 'equivalence ratio']
)
ref_index = valid_labels.index('reference')
valid_labels[ref_index:ref_index + 1] = ['reference:' + a for a in Reference._fields]
app_index = valid_labels.index('apparatus')
valid_labels[app_index:app_index + 1] = ['apparatus:' + a for a in Apparatus._fields]
species_list = list(set(chain(*[list(d.composition.keys()) for d in self.datapoints])))
if output_columns is None or len(output_columns) == 0:
col_labels = valid_labels
comp_index = col_labels.index('composition')
col_labels[comp_index:comp_index + 1] = species_list + ['Composition:Kind']
else:
output_columns = [a.lower() for a in output_columns]
col_labels = []
for col in output_columns:
if col in valid_labels or col in ['reference', 'apparatus']:
col_labels.append(col)
else:
raise ValueError('{} is not a valid output column choice'.format(col))
if 'composition' in col_labels:
comp_index = col_labels.index('composition')
col_labels[comp_index:comp_index + 1] = species_list + ['Composition:Kind']
if 'reference' in col_labels:
ref_index = col_labels.index('reference')
col_labels[ref_index:ref_index + 1] = ['reference:' + a for a in Reference._fields]
if 'apparatus' in col_labels:
app_index = col_labels.index('apparatus')
col_labels[app_index:app_index + 1] = ['apparatus:' + a for a in Apparatus._fields]
data = []
for d in self.datapoints:
row = []
d_species = list(d.composition.keys())
for col in col_labels:
if col in species_list:
if col in d_species:
row.append(d.composition[col].amount)
else:
row.append(Q_(0.0, 'dimensionless'))
elif 'reference' in col or 'apparatus' in col:
split_col = col.split(':')
if split_col[1] == 'authors':
row.append(getattr(getattr(self, split_col[0]), split_col[1])[0]['name'])
else:
row.append(getattr(getattr(self, split_col[0]), split_col[1]))
elif col in ['temperature', 'pressure', 'ignition delay', 'equivalence ratio']:
row.append(getattr(d, col.replace(' ', '_')))
elif col == 'file authors':
row.append(getattr(self, col.replace(' ', '_'))[0]['name'])
elif col == 'Composition:Kind':
row.append(d.composition_type)
else:
row.append(getattr(self, col.replace(' ', '_')))
data.append(row)
col_labels = [a.title() for a in col_labels]
columns = pd.Index(col_labels)
return pd.DataFrame(data=data, columns=columns)
def write_file(self, filename, *, overwrite=False):
"""Write new ChemKED YAML file based on object.
Arguments:
filename (`str`): Filename for target YAML file
overwrite (`bool`, optional): Whether to overwrite file with given name if present.
Must be supplied as a keyword-argument.
Raises:
`NameError`: If ``filename`` is already present, and ``overwrite`` is not ``True``.
Example:
>>> dataset = ChemKED(yaml_file)
>>> dataset.write_file(new_yaml_file)
"""
# Ensure file isn't already present
if exists(filename) and not overwrite:
raise OSError(filename + ' already present. Specify "overwrite=True" '
'to overwrite, or rename.'
)
with open(filename, 'w') as yaml_file:
yaml.dump(self._properties, yaml_file)
def convert_to_ReSpecTh(self, filename):
"""Convert ChemKED record to ReSpecTh XML file.
This converter uses common information in a ChemKED file to generate a
ReSpecTh XML file. Note that some information may be lost, as ChemKED stores
some additional attributes.
Arguments:
filename (`str`): Filename for output ReSpecTh XML file.
Example:
>>> dataset = ChemKED(yaml_file)
>>> dataset.convert_to_ReSpecTh(xml_file)
"""
root = etree.Element('experiment')
file_author = etree.SubElement(root, 'fileAuthor')
file_author.text = self.file_authors[0]['name']
# right now ChemKED just uses an integer file version
file_version = etree.SubElement(root, 'fileVersion')
major_version = etree.SubElement(file_version, 'major')
major_version.text = str(self.file_version)
minor_version = etree.SubElement(file_version, 'minor')
minor_version.text = '0'
respecth_version = etree.SubElement(root, 'ReSpecThVersion')
major_version = etree.SubElement(respecth_version, 'major')
major_version.text = '1'
minor_version = etree.SubElement(respecth_version, 'minor')
minor_version.text = '0'
# Only ignition delay currently supported
exp = etree.SubElement(root, 'experimentType')
if self.experiment_type == 'ignition delay':
exp.text = 'Ignition delay measurement'
else:
raise NotImplementedError('Only ignition delay type supported for conversion.')
reference = etree.SubElement(root, 'bibliographyLink')
citation = ''
for author in self.reference.authors:
citation += author['name'] + ', '
citation += (self.reference.journal + ' (' + str(self.reference.year) + ') ' +
str(self.reference.volume) + ':' + self.reference.pages + '. ' +
self.reference.detail
)
reference.set('preferredKey', citation)
reference.set('doi', self.reference.doi)
apparatus = etree.SubElement(root, 'apparatus')
kind = etree.SubElement(apparatus, 'kind')
kind.text = self.apparatus.kind
common_properties = etree.SubElement(root, 'commonProperties')
# ChemKED objects have no common properties once loaded. Check for properties
# among datapoints that tend to be common
common = []
composition = self.datapoints[0].composition
# Composition type *has* to be the same
composition_type = self.datapoints[0].composition_type
if not all(dp.composition_type == composition_type for dp in self.datapoints):
raise NotImplementedError('Error: ReSpecTh does not support varying composition '
'type among datapoints.'
)
if all([composition == dp.composition for dp in self.datapoints]):
# initial composition is common
common.append('composition')
prop = etree.SubElement(common_properties, 'property')
prop.set('name', 'initial composition')
for species_name, species in composition.items():
component = etree.SubElement(prop, 'component')
species_link = etree.SubElement(component, 'speciesLink')
species_link.set('preferredKey', species_name)
if species.InChI is not None:
species_link.set('InChI', species.InChI)
amount = etree.SubElement(component, 'amount')
amount.set('units', composition_type)
amount.text = str(species.amount.magnitude)
# If multiple datapoints present, then find any common properties. If only
# one datapoint, then composition should be the only "common" property.
if len(self.datapoints) > 1:
for prop_name in datagroup_properties:
attribute = prop_name.replace(' ', '_')
quantities = [getattr(dp, attribute, False) for dp in self.datapoints]
# All quantities must have the property in question and all the
# values must be equal
if all(quantities) and quantities.count(quantities[0]) == len(quantities):
common.append(prop_name)
prop = etree.SubElement(common_properties, 'property')
prop.set('description', '')
prop.set('name', prop_name)
prop.set('units', str(quantities[0].units))
value = etree.SubElement(prop, 'value')
value.text = str(quantities[0].magnitude)
# Ignition delay can't be common, unless only a single datapoint.
datagroup = etree.SubElement(root, 'dataGroup')
datagroup.set('id', 'dg1')
datagroup_link = etree.SubElement(datagroup, 'dataGroupLink')
datagroup_link.set('dataGroupID', '')
datagroup_link.set('dataPointID', '')
property_idx = {}
labels = {'temperature': 'T', 'pressure': 'P',
'ignition delay': 'tau', 'pressure rise': 'dP/dt',
}
for prop_name in datagroup_properties:
attribute = prop_name.replace(' ', '_')
# This can't be hasattr because properties are set to the value None
# if no value is specified in the file, so the attribute always exists
prop_indices = [i for i, dp in enumerate(self.datapoints)
if getattr(dp, attribute) is not None
]
if prop_name in common or not prop_indices:
continue
prop = etree.SubElement(datagroup, 'property')
prop.set('description', '')
prop.set('name', prop_name)
units = str(getattr(self.datapoints[prop_indices[0]], attribute).units)
prop.set('units', units)
idx = 'x{}'.format(len(property_idx) + 1)
property_idx[idx] = {'name': prop_name, 'units': units}
prop.set('id', idx)
prop.set('label', labels[prop_name])
# Need to handle datapoints with possibly different species in the initial composition
if 'composition' not in common:
for dp in self.datapoints:
for species in dp.composition.values():
# Only add new property for species not already considered
has_spec = any([species.species_name in d.values()
for d in property_idx.values()
])
if not has_spec:
prop = etree.SubElement(datagroup, 'property')
prop.set('description', '')
idx = 'x{}'.format(len(property_idx) + 1)
property_idx[idx] = {'name': species.species_name}
prop.set('id', idx)
prop.set('label', '[' + species.species_name + ']')
prop.set('name', 'composition')
prop.set('units', self.datapoints[0].composition_type)
species_link = etree.SubElement(prop, 'speciesLink')
species_link.set('preferredKey', species.species_name)
if species.InChI is not None:
species_link.set('InChI', species.InChI)
for dp in self.datapoints:
datapoint = etree.SubElement(datagroup, 'dataPoint')
for idx, val in property_idx.items():
# handle regular properties a bit differently than composition
if val['name'] in datagroup_properties:
value = etree.SubElement(datapoint, idx)
quantity = getattr(dp, val['name'].replace(' ', '_')).to(val['units'])
value.text = str(quantity.magnitude)
else:
# composition
for item in dp.composition.values():
if item.species_name == val['name']:
value = etree.SubElement(datapoint, idx)
value.text = str(item.amount.magnitude)
# See https://stackoverflow.com/a/16097112 for the None.__ne__
history_types = ['volume_history', 'temperature_history', 'pressure_history',
'piston_position_history', 'light_emission_history',
'OH_emission_history', 'absorption_history']
time_histories = [getattr(dp, p) for dp in self.datapoints for p in history_types]
time_histories = list(filter(None.__ne__, time_histories))
if len(self.datapoints) > 1 and len(time_histories) > 1:
raise NotImplementedError('Error: ReSpecTh files do not support multiple datapoints '
'with a time history.')
elif len(time_histories) > 0:
for dg_idx, hist in enumerate(time_histories):
if hist.type not in ['volume', 'temperature', 'pressure']:
warn('The time-history type {} is not supported by ReSpecTh for '
'ignition delay experiments'.format(hist.type))
continue
datagroup = etree.SubElement(root, 'dataGroup')
datagroup.set('id', 'dg{}'.format(dg_idx))
datagroup_link = etree.SubElement(datagroup, 'dataGroupLink')
datagroup_link.set('dataGroupID', '')
datagroup_link.set('dataPointID', '')
# Time history has two properties: time and quantity.
prop = etree.SubElement(datagroup, 'property')
prop.set('description', '')
prop.set('name', 'time')
prop.set('units', str(hist.time.units))
time_idx = 'x{}'.format(len(property_idx) + 1)
property_idx[time_idx] = {'name': 'time'}
prop.set('id', time_idx)
prop.set('label', 't')
prop = etree.SubElement(datagroup, 'property')
prop.set('description', '')
prop.set('name', hist.type)
prop.set('units', str(hist.quantity.units))
quant_idx = 'x{}'.format(len(property_idx) + 1)
property_idx[quant_idx] = {'name': hist.type}
prop.set('id', quant_idx)
prop.set('label', 'V')
for time, quantity in zip(hist.time, hist.quantity):
datapoint = etree.SubElement(datagroup, 'dataPoint')
value = etree.SubElement(datapoint, time_idx)
value.text = str(time.magnitude)
value = etree.SubElement(datapoint, quant_idx)
value.text = str(quantity.magnitude)
ign_types = [getattr(dp, 'ignition_type', False) for dp in self.datapoints]
# All datapoints must have the same ignition target and type
if all(ign_types) and ign_types.count(ign_types[0]) == len(ign_types):
# In ReSpecTh files all datapoints must share ignition type
ignition = etree.SubElement(root, 'ignitionType')
if ign_types[0]['target'] in ['pressure', 'temperature']:
ignition.set('target', ign_types[0]['target'][0].upper())
else:
# options left are species
ignition.set('target', self.datapoints[0].ignition_type['target'])
if ign_types[0]['type'] == 'd/dt max extrapolated':
ignition.set('type', 'baseline max intercept from d/dt')
else:
ignition.set('type', self.datapoints[0].ignition_type['type'])
else:
raise NotImplementedError('Different ignition targets or types for multiple datapoints '
'are not supported in ReSpecTh.')
et = etree.ElementTree(root)
et.write(filename, encoding='utf-8', xml_declaration=True)
# now do a "pretty" rewrite
xml = minidom.parse(filename)
xml_string = xml.toprettyxml(indent=' ')
with open(filename, 'w') as f:
f.write(xml_string)
print('Converted to ' + filename)
class DataPoint(object):
"""Class for a single datapoint.
The `DataPoint` class stores the information associated with a single data point in the dataset
parsed from the `ChemKED` YAML input.
Arguments:
properties (`dict`): Dictionary adhering to the ChemKED format for ``datapoints``
Attributes:
composition (`list`): List of dictionaries representing the species and their quantities
ignition_delay (pint.Quantity): The ignition delay of the experiment
temperature (pint.Quantity): The temperature of the experiment
pressure (pint.Quantity): The pressure of the experiment
pressure_rise (pint.Quantity, optional): The amount of pressure rise during the induction
period of a shock tube experiment.
compression_time (pint.Quantity, optional): The compression time for an RCM experiment.
compressed_pressure (pint.Quantity, optional): The pressure at the end of compression for
an RCM experiment.
compressed_temperature (pint.Quantity, optional): The temperature at the end of compression
for an RCM experiment.
first_stage_ignition_delay (pint.Quantity, optional): The first stage ignition delay of the
experiment.
compression_time (pint.Quantity, optional): The compression time for an RCM experiment.
ignition_type (`dict`): Dictionary with the ignition target and type.
volume_history (`~collections.namedtuple`, optional): The volume history of the reactor
during an RCM experiment.
pressure_history (`~collections.namedtuple`, optional): The pressure history of the reactor
during an experiment.
temperature_history (`~collections.namedtuple`, optional): The temperature history of the
reactor during an experiment.
piston_position_history (`~collections.namedtuple`, optional): The piston position history
of the reactor during an RCM experiment.
light_emission_history (`~collections.namedtuple`, optional): The light emission history
of the reactor during an experiment.
OH_emission_history (`~collections.namedtuple`, optional): The OH emission history of the
reactor during an experiment.
absorption_history (`~collections.namedtuple`, optional): The absorption history of the
reactor during an experiment.
"""
value_unit_props = [
'ignition-delay', 'first-stage-ignition-delay', 'temperature', 'pressure',
'pressure-rise',
]
rcm_data_props = [
'compressed-pressure', 'compressed-temperature', 'compression-time', 'stroke', 'clearance',
'compression-ratio'
]
def __init__(self, properties):
for prop in self.value_unit_props:
if prop in properties:
quant = self.process_quantity(properties[prop])
setattr(self, prop.replace('-', '_'), quant)
else:
setattr(self, prop.replace('-', '_'), None)
if 'rcm-data' in properties:
orig_rcm_data = properties['rcm-data']
rcm_props = {}
for prop in self.rcm_data_props:
if prop in orig_rcm_data:
quant = self.process_quantity(orig_rcm_data[prop])
rcm_props[prop.replace('-', '_')] = quant
else:
rcm_props[prop.replace('-', '_')] = None
self.rcm_data = RCMData(**rcm_props)
else:
self.rcm_data = None
self.composition_type = properties['composition']['kind']
composition = {}
for species in properties['composition']['species']:
species_name = species['species-name']
amount = self.process_quantity(species['amount'])
InChI = species.get('InChI')
SMILES = species.get('SMILES')
atomic_composition = species.get('atomic-composition')
composition[species_name] = Composition(
species_name=species_name, InChI=InChI, SMILES=SMILES,
atomic_composition=atomic_composition, amount=amount)
setattr(self, 'composition', composition)
self.equivalence_ratio = properties.get('equivalence-ratio')
self.ignition_type = deepcopy(properties.get('ignition-type'))
if 'time-histories' in properties and 'volume-history' in properties:
raise TypeError('time-histories and volume-history are mutually exclusive')
if 'time-histories' in properties:
for hist in properties['time-histories']:
if hasattr(self, '{}_history'.format(hist['type'].replace(' ', '_'))):
raise ValueError('Each history type may only be specified once. {} was '
'specified multiple times'.format(hist['type']))
time_col = hist['time']['column']
time_units = hist['time']['units']
quant_col = hist['quantity']['column']
quant_units = hist['quantity']['units']
if isinstance(hist['values'], list):
values = np.array(hist['values'])
else:
# Load the values from a file
values = np.genfromtxt(hist['values']['filename'], delimiter=',')
time_history = TimeHistory(
time=Q_(values[:, time_col], time_units),
quantity=Q_(values[:, quant_col], quant_units),
type=hist['type'],
)
setattr(self, '{}_history'.format(hist['type'].replace(' ', '_')), time_history)
if 'volume-history' in properties:
warn('The volume-history field should be replaced by time-histories. '
'volume-history will be removed after PyKED 0.4',
DeprecationWarning)
time_col = properties['volume-history']['time']['column']
time_units = properties['volume-history']['time']['units']
volume_col = properties['volume-history']['volume']['column']
volume_units = properties['volume-history']['volume']['units']
values = np.array(properties['volume-history']['values'])
self.volume_history = VolumeHistory(
time=Q_(values[:, time_col], time_units),
volume=Q_(values[:, volume_col], volume_units),
)
history_types = ['volume', 'temperature', 'pressure', 'piston_position', 'light_emission',
'OH_emission', 'absorption']
for h in history_types:
if not hasattr(self, '{}_history'.format(h)):
setattr(self, '{}_history'.format(h), None)
def process_quantity(self, properties):
"""Process the uncertainty information from a given quantity and return it
"""
quant = Q_(properties[0])
if len(properties) > 1:
unc = properties[1]
uncertainty = unc.get('uncertainty', False)
upper_uncertainty = unc.get('upper-uncertainty', False)
lower_uncertainty = unc.get('lower-uncertainty', False)
uncertainty_type = unc.get('uncertainty-type')
if uncertainty_type == 'relative':
if uncertainty:
quant = quant.plus_minus(float(uncertainty), relative=True)
elif upper_uncertainty and lower_uncertainty:
warn('Asymmetric uncertainties are not supported. The '
'maximum of lower-uncertainty and upper-uncertainty '
'has been used as the symmetric uncertainty.')
uncertainty = max(float(upper_uncertainty), float(lower_uncertainty))
quant = quant.plus_minus(uncertainty, relative=True)
else:
raise ValueError('Either "uncertainty" or "upper-uncertainty" and '
'"lower-uncertainty" need to be specified.')
elif uncertainty_type == 'absolute':
if uncertainty:
uncertainty = Q_(uncertainty)
quant = quant.plus_minus(uncertainty.to(quant.units).magnitude)
elif upper_uncertainty and lower_uncertainty:
warn('Asymmetric uncertainties are not supported. The '
'maximum of lower-uncertainty and upper-uncertainty '
'has been used as the symmetric uncertainty.')
uncertainty = max(Q_(upper_uncertainty), Q_(lower_uncertainty))
quant = quant.plus_minus(uncertainty.to(quant.units).magnitude)
else:
raise ValueError('Either "uncertainty" or "upper-uncertainty" and '
'"lower-uncertainty" need to be specified.')
else:
raise ValueError('uncertainty-type must be one of "absolute" or "relative"')
return quant
def get_cantera_composition_string(self, species_conversion=None):
"""Get the composition in a string format suitable for input to Cantera.
Returns a formatted string no matter the type of composition. As such, this method
is not recommended for end users; instead, prefer the `get_cantera_mole_fraction`
or `get_cantera_mass_fraction` methods.
Arguments:
species_conversion (`dict`, optional): Mapping of species identifier to a
species name. This argument should be supplied when the name of the
species in the ChemKED YAML file does not match the name of the same
species in a chemical kinetic mechanism. The species identifier (the key
of the mapping) can be the name, InChI, or SMILES provided in the ChemKED
file, while the value associated with a key should be the desired name in
the Cantera format output string.
Returns:
`str`: String in the ``SPEC:AMT, SPEC:AMT`` format
Raises:
`ValueError`: If the composition type of the `DataPoint` is not one of
``'mass fraction'``, ``'mole fraction'``, or ``'mole percent'``
"""
if self.composition_type in ['mole fraction', 'mass fraction']:
factor = 1.0
elif self.composition_type == 'mole percent':
factor = 100.0
else:
raise ValueError('Unknown composition type: {}'.format(self.composition_type))
if species_conversion is None:
comps = ['{!s}:{:.4e}'.format(c.species_name,
c.amount.magnitude/factor) for c in self.composition.values()]
else:
comps = []
for c in self.composition.values():
amount = c.amount.magnitude/factor
idents = [getattr(c, s, False) for s in ['species_name', 'InChI', 'SMILES']]
present = [i in species_conversion for i in idents]
if not any(present):
comps.append('{!s}:{:.4e}'.format(c.species_name, amount))
else:
if len([i for i in present if i]) > 1:
raise ValueError('More than one conversion present for species {}'.format(
c.species_name))
ident = idents[present.index(True)]
species_replacement_name = species_conversion.pop(ident)
comps.append('{!s}:{:.4e}'.format(species_replacement_name, amount))
if len(species_conversion) > 0:
raise ValueError('Unknown species in conversion: {}'.format(species_conversion))
return ', '.join(comps)
def get_cantera_mole_fraction(self, species_conversion=None):
"""Get the mole fractions in a string format suitable for input to Cantera.
Arguments:
species_conversion (`dict`, optional): Mapping of species identifier to a
species name. This argument should be supplied when the name of the
species in the ChemKED YAML file does not match the name of the same
species in a chemical kinetic mechanism. The species identifier (the key
of the mapping) can be the name, InChI, or SMILES provided in the ChemKED
file, while the value associated with a key should be the desired name in
the Cantera format output string.
Returns:
`str`: String of mole fractions in the ``SPEC:AMT, SPEC:AMT`` format
Raises:
`ValueError`: If the composition type is ``'mass fraction'``, the conversion cannot
be done because no molecular weight information is known
Examples:
>>> dp = DataPoint(properties)
>>> dp.get_cantera_mole_fraction()
'H2:4.4400e-03, O2:5.5600e-03, Ar:9.9000e-01'
>>> species_conversion = {'H2': 'h2', 'O2': 'o2'}
>>> dp.get_cantera_mole_fraction(species_conversion)
'h2:4.4400e-03, o2:5.5600e-03, Ar:9.9000e-01'
>>> species_conversion = {'1S/H2/h1H': 'h2', '1S/O2/c1-2': 'o2'}
>>> dp.get_cantera_mole_fraction(species_conversion)
'h2:4.4400e-03, o2:5.5600e-03, Ar:9.9000e-01'
"""
if self.composition_type == 'mass fraction':
raise ValueError('Cannot get mole fractions from the given composition.\n'
'{}'.format(self.composition))
else:
return self.get_cantera_composition_string(species_conversion)
def get_cantera_mass_fraction(self, species_conversion=None):
"""Get the mass fractions in a string format suitable for input to Cantera.
Arguments:
species_conversion (`dict`, optional): Mapping of species identifier to a
species name. This argument should be supplied when the name of the
species in the ChemKED YAML file does not match the name of the same
species in a chemical kinetic mechanism. The species identifier (the key
of the mapping) can be the name, InChI, or SMILES provided in the ChemKED
file, while the value associated with a key should be the desired name in
the Cantera format output string.
Returns:
`str`: String of mass fractions in the ``SPEC:AMT, SPEC:AMT`` format
Raises:
`ValueError`: If the composition type is ``'mole fraction'`` or
``'mole percent'``, the conversion cannot be done because no molecular
weight information is known
Examples:
>>> dp = DataPoint(properties)
>>> dp.get_cantera_mass_fraction()
'H2:2.2525e-04, O2:4.4775e-03, Ar:9.9530e-01'
>>> species_conversion = {'H2': 'h2', 'O2': 'o2'}
>>> dp.get_cantera_mass_fraction(species_conversion)
'h2:2.2525e-04, o2:4.4775e-03, Ar:9.9530e-01'
>>> species_conversion = {'1S/H2/h1H': 'h2', '1S/O2/c1-2': 'o2'}
>>> dp.get_cantera_mass_fraction(species_conversion)
'h2:2.2525e-04, o2:4.4775e-03, Ar:9.9530e-01'
"""
if self.composition_type in ['mole fraction', 'mole percent']:
raise ValueError('Cannot get mass fractions from the given composition.\n'
'{}'.format(self.composition)
)
else:
return self.get_cantera_composition_string(species_conversion)
| bsd-3-clause | -6,256,631,093,448,223,000 | 48.701912 | 130 | 0.583388 | false | 4.368265 | false | false | false |
Anvil/maestro-ng | maestro/loader.py | 1 | 2758 | # Copyright (C) 2015 SignalFx, Inc. All rights reserved.
#
# Docker container orchestration utility.
import jinja2
import os
import sys
import yaml
from . import exceptions
class MaestroYamlConstructor(yaml.constructor.Constructor):
"""A PyYAML object constructor that errors on duplicate keys in YAML
mappings. Because for some reason PyYAML doesn't do that since 3.x."""
def construct_mapping(self, node, deep=False):
if not isinstance(node, yaml.nodes.MappingNode):
raise yaml.constructor.ConstructorError(
None, None,
"expected a mapping node, but found %s" % node.id,
node.start_mark)
keys = set()
for key_node, value_node in node.value:
key = self.construct_object(key_node, deep=deep)
if key in keys:
raise yaml.constructor.ConstructorError(
"while constructing a mapping", node.start_mark,
"found duplicate key (%s)" % key, key_node.start_mark)
keys.add(key)
return yaml.constructor.Constructor.construct_mapping(self, node, deep)
class MaestroYamlLoader(yaml.reader.Reader, yaml.scanner.Scanner,
yaml.parser.Parser, yaml.composer.Composer,
MaestroYamlConstructor, yaml.resolver.Resolver):
"""A custom YAML Loader that uses the custom MaestroYamlConstructor."""
def __init__(self, stream):
yaml.reader.Reader.__init__(self, stream)
yaml.scanner.Scanner.__init__(self)
yaml.parser.Parser.__init__(self)
yaml.composer.Composer.__init__(self)
MaestroYamlConstructor.__init__(self)
yaml.resolver.Resolver.__init__(self)
def load(filename):
"""Load a config from the given file.
Args:
filename (string): Path to the YAML environment description
configuration file to load. Use '-' for stdin.
Returns:
A python data structure corresponding to the YAML configuration.
"""
env = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(filename)),
extensions=['jinja2.ext.with_'])
try:
if filename == '-':
template = env.from_string(sys.stdin.read())
else:
template = env.get_template(os.path.basename(filename))
except jinja2.exceptions.TemplateNotFound:
raise exceptions.MaestroException(
'Environment description file {} not found!'.format(filename))
except Exception as e:
raise exceptions.MaestroException(
'Error reading environment description file {}: {}!'
.format(filename, e))
return yaml.load(template.render(env=os.environ), Loader=MaestroYamlLoader)
| apache-2.0 | 4,613,575,730,410,633,000 | 36.27027 | 79 | 0.638869 | false | 4.275969 | false | false | false |
bootstraponline/testdroid_device_finder | device_finder.py | 1 | 4900 | # -*- coding: utf-8 -*-
# from: https://github.com/bitbar/testdroid-samples/blob/03fc043ba98235b9ea46a0ab8646f3b20dd1960e/appium/sample-scripts/python/device_finder.py
import os, sys, requests, json, time, httplib
from optparse import OptionParser
from urlparse import urljoin
from datetime import datetime
class DeviceFinder:
# Cloud URL (not including API path)
url = None
# Oauth access token
access_token = None
# Oauth refresh token
refresh_token = None
# Unix timestamp (seconds) when token expires
token_expiration_time = None
""" Full constructor with username and password
"""
def __init__(self, username=None, password=None, url="https://cloud.testdroid.com", download_buffer_size=65536):
self.username = username
self.password = password
self.cloud_url = url
self.download_buffer_size = download_buffer_size
""" Get Oauth2 token
"""
def get_token(self):
if not self.access_token:
# TODO: refresh
url = "%s/oauth/token" % self.cloud_url
payload = {
"client_id": "testdroid-cloud-api",
"grant_type": "password",
"username": self.username,
"password": self.password
}
res = requests.post(
url,
data = payload,
headers = { "Accept": "application/json" }
)
if res.status_code != 200:
print "FAILED: Authentication or connection failure. Check Testdroid Cloud URL and your credentials."
sys.exit(-1)
reply = res.json()
self.access_token = reply['access_token']
self.refresh_token = reply['refresh_token']
self.token_expiration_time = time.time() + reply['expires_in']
elif self.token_expiration_time < time.time():
url = "%s/oauth/token" % self.cloud_url
payload = {
"client_id": "testdroid-cloud-api",
"grant_type": "refresh_token",
"refresh_token": self.refresh_token
}
res = requests.post(
url,
data = payload,
headers = { "Accept": "application/json" }
)
if res.status_code != 200:
print "FAILED: Unable to get a new access token using refresh token"
self.access_token = None
return self.get_token()
reply = res.json()
self.access_token = reply['access_token']
self.refresh_token = reply['refresh_token']
self.token_expiration_time = time.time() + reply['expires_in']
return self.access_token
""" Helper method for getting necessary headers to use for API calls, including authentication
"""
def _build_headers(self):
return { "Authorization": "Bearer %s" % self.get_token(), "Accept": "application/json" }
""" GET from API resource
"""
def get(self, path=None, payload={}, headers={}):
if path.find('v2/') >= 0:
cut_path = path.split('v2/')
path = cut_path[1]
url = "%s/api/v2/%s" % (self.cloud_url, path)
headers = dict(self._build_headers().items() + headers.items())
res = requests.get(url, params=payload, headers=headers)
if headers['Accept'] == 'application/json':
return res.json()
else:
return res.text
""" Returns list of devices
"""
def get_devices(self, limit=0):
return self.get("devices?limit=%s" % (limit))
""" Find available free Android device
"""
def available_free_android_device(self, limit=0):
print "Searching Available Free Android Device..."
for device in self.get_devices(limit)['data']:
if device['creditsPrice'] == 0 and device['locked'] == False and device['osType'] == "ANDROID" and device['softwareVersion']['apiLevel'] > 16:
print "Found device '%s'" % device['displayName']
print ""
return device['displayName']
print "No available device found"
print ""
return ""
""" Find available free iOS device
"""
def available_free_ios_device(self, limit=0):
print "Searching Available Free iOS Device..."
for device in self.get_devices(limit)['data']:
if device['creditsPrice'] == 0 and device['locked'] == False and device['osType'] == "IOS":
print "Found device '%s'" % device['displayName']
print ""
return device['displayName']
print "No available device found"
print ""
return ""
| apache-2.0 | -8,658,598,604,070,463,000 | 35.842105 | 154 | 0.546327 | false | 4.340124 | false | false | false |
redhat-cip/python-tripleo-wrapper | rdomhelper/ssh.py | 1 | 11391 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2016 Red Hat, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import paramiko
from paramiko import ssh_exception
import io
import logging
import select
import time
LOG = logging.getLogger('__chainsaw__')
class SshClient(object):
"""SSH client based on Paramiko.
This class implements the following features:
- run commands on a remote host
- send file to a remote host
- redirect connection to another ssh server so that every commands will
be executed on the redirected host
- send files
- create remote files
"""
def __init__(self, hostname, user, key_filename=None,
via_ip=None):
""":param hostname: the host on which to connect
:type hostname: str
:param user: the user to use for the connection
:type user: str
:param key_filename: the private key path to use, by default it will
use the system host keys
:type key_filename: str
:param redirect_to_host: the host on which to redirect, by default it
will use the port 22
:type redirect_to_host: str
"""
assert hostname, 'hostname is defined.'
assert user, 'user is defined.'
self._hostname = hostname
self._user = user
self._key_filename = key_filename
self.load_private_key(key_filename)
self._client = paramiko.SSHClient()
self._client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
self._via_ip = via_ip
self._transport = None
self._started = False
self.description = 'not started yet'
self._environment_filenames = []
def load_private_key(self, priv_key):
"""Register the SSH private key."""
with open(priv_key) as fd:
self._private_key = paramiko.RSAKey.from_private_key(fd)
def _get_transport_via_ip(self):
exception = None
for i in range(60):
try:
channel = self._client.get_transport().open_channel(
'direct-tcpip',
(self._hostname, 22),
(self._via_ip, 0))
except ssh_exception.ChannelException as exception:
LOG.debug('%s creating the direct-tcip connections' % self.description)
time.sleep(1)
else:
transport = paramiko.Transport(channel)
transport.start_client()
transport.auth_publickey(self._user, self._private_key)
return transport
raise exception
def _get_transport(self):
if self._via_ip:
transport = self._get_transport_via_ip()
else:
transport = self._client.get_transport()
transport.set_keepalive(10)
return transport
def start(self):
"""Start the ssh client and connect to the host.
It will wait until the ssh service is available during 90 seconds.
If it doesn't succed to connect then the function will raise
an SSHException.
"""
if self._via_ip:
connect_to = self._via_ip
self.description = '[%s@%s via %s]' % (self._user,
self._hostname,
self._via_ip)
else:
connect_to = self._hostname
self.description = '[%s@%s]' % (self._user,
self._hostname)
for i in range(60):
try:
self._client.connect(
connect_to,
username=self._user,
allow_agent=True,
key_filename=self._key_filename)
# NOTE(Gonéri): TypeError is in the list because of
# https://github.com/paramiko/paramiko/issues/615
self._transport = self._get_transport()
except (OSError,
TypeError,
ssh_exception.SSHException,
ssh_exception.NoValidConnectionsError) as e:
LOG.info('%s waiting for %s' % (self.description, connect_to))
LOG.debug("exception: '%s'" % str(e))
time.sleep(1)
else:
LOG.debug('%s connected' % self.description)
self._started = True
return
_error = ("unable to connect to ssh service on '%s'" % self._hostname)
LOG.error(_error)
raise ssh_exception.SSHException(_error)
def _check_started(self):
if not self._started:
_error = "ssh client not started, please start the client"
LOG.error(_error)
raise ssh_exception.SSHException(_error)
def stop(self):
"""Close the ssh connection."""
self._started = False
self._client.close()
def run(self, cmd, sudo=False, ignore_error=False, success_status=(0,),
error_callback=None, custom_log=None):
"""Run a command on the remote host.
The command is run on the remote host, if there is a redirected host
then the command will be run on that redirected host. See __init__.
:param cmd: the command to run
:type cmd: str
:param sudo: True if the command should be run with sudo, this parameter
disable the use of environment files.
:type sudo: str
:param success_status: the list of the possible success status
:type success_status: list
:param error_callback: if provided, the callback to call in case of
a failure. it will be called with two args, the output of the command
and the returned error code.
:return: the tuple (output of the command, returned code)
:rtype: tuple
:param custom_log: a optional string to record in the log instead of the command.
This is useful for example if you want to hide a password.
:type custom_log: str
"""
self._check_started()
cmd_output = io.StringIO()
channel = self._get_channel()
if sudo:
cmd = "sudo %s" % cmd
else:
for filename in self._environment_filenames:
cmd = '. %s; %s' % (filename, cmd)
if not custom_log:
custom_log = cmd
LOG.info("%s run '%s'" % (self.description, custom_log))
channel.exec_command(cmd)
while True:
if channel.exit_status_ready():
break
rl, _, _ = select.select([channel], [], [], 30)
if rl:
received = channel.recv(1024).decode('UTF-8', 'ignore').strip()
if received:
LOG.debug(received)
cmd_output.write(received)
cmd_output = cmd_output.getvalue()
exit_status = channel.exit_status
if ignore_error or channel.exit_status in success_status:
return cmd_output, channel.exit_status
elif error_callback:
return error_callback(cmd_output, exit_status)
else:
_error = ("%s command %s has failed with, rc='%s'" %
(self.description, custom_log, exit_status))
LOG.error(_error)
raise ssh_exception.SSHException(_error)
def _get_channel(self):
"""Returns a channel according to if there is a redirection to do or
not.
"""
channel = self._transport.open_session()
channel.set_combine_stderr(True)
channel.get_pty()
return channel
def send_file(self, local_path, remote_path):
"""Send a file to the remote host.
:param local_path: the local path of the file
:type local_path: str
:param remote_path: the remote path of the file
:type remote_path: str
:return: the file attributes
:rtype: paramiko.sftp_attr.SFTPAttributes
"""
self._check_started()
sftp = paramiko.SFTPClient.from_transport(self._transport)
return sftp.put(local_path, remote_path)
def create_file(self, path, content, mode='w'):
"""Create a file with a content.
:param path: the path of the file.
:type path: str
:param content: the content of the file
:type content: str
:param mode: the mode of the file while opening it
:type mode: str
"""
self._check_started()
sftp = paramiko.SFTPClient.from_transport(self._transport)
with sftp.open(path, mode) as remote_file:
remote_file.write(content)
remote_file.flush()
def info(self):
return {'hostname': self._hostname,
'user': self._user,
'key_filename': self._key_filename}
def add_environment_file(self, filename):
self._environment_filenames.append(filename)
class PoolSshClient(object):
def __init__(self):
self._ssh_clients = {}
def build_ssh_client(self, hostname, user, key_filename=None,
via_ip=None):
_ssh_client = SshClient(hostname, user, key_filename,
via_ip)
_ssh_client.start()
self._ssh_clients[user] = _ssh_client
def add_ssh_client(self, user, ssh_client):
self._ssh_clients[user] = ssh_client
def del_ssh_client(self, user):
self._check_ssh_client(user)
del self._ssh_clients[user]
def get_client(self, user):
self._check_ssh_client(user)
return self._ssh_clients[user]
def _check_ssh_client(self, user):
if user not in self._ssh_clients.keys():
_error = "ssh client for user %s not existing" % user
LOG.error(_error)
raise ssh_exception.SSHException(_error)
def run(self, user, cmd, sudo=False, ignore_error=False,
success_status=(0,), error_callback=None, custom_log=None):
self._check_ssh_client(user)
return self._ssh_clients[user].run(
cmd,
sudo=sudo,
ignore_error=ignore_error,
success_status=success_status,
error_callback=error_callback,
custom_log=custom_log)
def send_file(self, user, local_path, remote_path):
self._check_ssh_client(user)
return self._ssh_clients[user].send_file(local_path, remote_path)
def create_file(self, user, path, content, mode='w'):
self._check_ssh_client(user)
return self._ssh_clients[user].create_file(path, content, mode)
def stop_all(self):
for ssh_client in self._ssh_clients.values():
ssh_client.stop()
def add_environment_file(self, user, filename):
self._check_ssh_client(user)
self._ssh_clients[user].add_environment_file(filename)
| apache-2.0 | -2,446,650,625,284,174,300 | 35.623794 | 89 | 0.576207 | false | 4.251586 | false | false | false |
hermestrimegiste/patchtgtel | patchConnectionTogotelecom.py | 1 | 2031 | #-*- coding:utf-8 -*-
__author__ = 'hermes'
import socket
from os import system
from time import sleep
from datetime import datetime
global connectionName
connectionName = 'TOGOTELECOM' # Definir le nom de votre reseau
def is_connected():
# http://stackoverflow.com/questions/20913411/test-if-an-internet-connection-is-present-in-python
try:
#host = socket.gethostbyname("www.google.com")
#socket.create_connection(('173.194.67.94', 80), 25)
#methode 2 sans test de connection
socket.gethostbyname("www.google.com")
return True
except:
try:
socket.create_connection(('173.194.67.94', 80), 15)
return True
except:
pass
pass
return False
def hardRestartNetwork():
system('nmcli nm enable false')
system('nmcli nm enable true')
sleep(5)
system("nmcli con up id '%s'"% connectionName)
def patchTogotelecom():
activeReseau = system('nmcli nm enable true')
deconnectionSoft = system('nmcli dev disconnect iface ttyUSB0')
sleep(5)
if (deconnectionSoft == 0 or deconnectionSoft == 1536):
activeTGTEL = system("nmcli con up id '%s'"% connectionName)
if activeTGTEL == 768:
# si Erreur : le délai d'attente de 90 sec a expiré.
#system('modprobe --force-vermagic usb_wwan usbserial')
hardRestartNetwork()
else:
# redemarrer le reseau si la methode soft ne marche pas
hardRestartNetwork()
if is_connected():
print(u'Connecté le %s '%str(datetime.now().strftime('%d-%m-%Y -> %H:%M:%S')))
else:
print(u'Tentative echoué le %s '%str(datetime.now().strftime('%d-%m-%Y -> %H:%M:%S')))
# sleep(5)
# debut de l execution du script
#system('modprobe --force-vermagic usb_wwan usbserial')
hardRestartNetwork()
print(u'debut du script > %s '%str(datetime.now().strftime('%d-%m-%Y -> %H:%M:%S')))
sleep(5)
while True:
if is_connected():
sleep(60)
else:
print(u'Tentative de reconnexion le %s '%str(datetime.now().strftime('%d-%m-%Y -> %H:%M:%S')))
patchTogotelecom()
| gpl-2.0 | -6,435,792,570,533,313,000 | 26.378378 | 100 | 0.659427 | false | 3.165625 | false | false | false |
nigelb/SerialGrabber | serial_grabber/cli.py | 1 | 2827 | #!/usr/bin/env python
# SerialGrabber reads data from a serial port and processes it with the
# configured processor.
# Copyright (C) 2012 NigelB
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import signal
import time
from SerialGrabber_Storage import storage_cache
from serial_grabber.commander import MultiProcessParameterFactory
from serial_grabber.util import config_helper
from serial_grabber.watchdog import running, counter, Watchdog
from serial_grabber.processor import ProcessorManager
class status:
def __init__(self, logger):
self.logger = logger
def set_tooltip(self, tooltip):
self.logger.info(tooltip)
def register_handler(running, watchdog, reader, processor, command):
def signal_handler(signal, frame):
print 'You pressed Ctrl+C!'
running.running = False
if command:
command.stop()
watchdog.join()
if reader:
reader.close()
exit(0)
signal.signal(signal.SIGINT, signal_handler)
def start(logger, reader, processor, command):
try:
si = status(logger)
isRunning = running(True)
c = counter(si)
params = config_helper({
"counter": c,
"running": isRunning
})
if issubclass(command.__class__, MultiProcessParameterFactory):
command.populate_parameters(params)
if issubclass(reader.__class__, MultiProcessParameterFactory):
reader.populate_parameters(params)
if issubclass(processor.__class__, MultiProcessParameterFactory):
processor.populate_parameters(params)
watchdog = Watchdog(isRunning)
register_handler(isRunning, watchdog, reader, processor, command)
if reader:
watchdog.start_thread(reader, (isRunning, c, params), "Runner")
if processor:
watchdog.start_thread(ProcessorManager(processor), (isRunning, c, params), "Processor")
if command and reader:
watchdog.start_thread(command, (isRunning, c, params), "Commander")
while isRunning.running:
time.sleep(1)
finally:
storage_cache.close_cache()
| gpl-2.0 | -6,709,110,638,634,718,000 | 34.3375 | 99 | 0.686594 | false | 4.263952 | false | false | false |
Hybrid-Cloud/badam | patches_tool/aws_patch/aws_deps/libcloud/compute/drivers/vultr.py | 1 | 6023 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Vultr Driver
"""
import time
from libcloud.utils.py3 import httplib
from libcloud.utils.py3 import urlencode
from libcloud.common.base import ConnectionKey, JsonResponse
from libcloud.compute.types import Provider, NodeState
from libcloud.common.types import LibcloudError, InvalidCredsError
from libcloud.compute.base import NodeDriver
from libcloud.compute.base import Node, NodeImage, NodeSize, NodeLocation
class VultrResponse(JsonResponse):
def parse_error(self):
if self.status == httplib.OK:
body = self.parse_body()
return body
elif self.status == httplib.FORBIDDEN:
raise InvalidCredsError(self.body)
else:
raise LibcloudError(self.body)
class VultrConnection(ConnectionKey):
"""
Connection class for the Vultr driver.
"""
host = 'api.vultr.com'
responseCls = VultrResponse
def add_default_params(self, params):
"""
Add parameters that are necessary for every request
This method add ``api_key`` to
the request.
"""
params['api_key'] = self.key
return params
def encode_data(self, data):
return urlencode(data)
def get(self, url):
return self.request(url)
def post(self, url, data):
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
return self.request(url, data=data, headers=headers, method='POST')
class VultrNodeDriver(NodeDriver):
"""
VultrNode node driver.
"""
connectionCls = VultrConnection
type = Provider.VULTR
name = 'Vultr'
website = 'https://www.vultr.com'
NODE_STATE_MAP = {'pending': NodeState.PENDING,
'active': NodeState.RUNNING}
def list_nodes(self):
return self._list_resources('/v1/server/list', self._to_node)
def list_locations(self):
return self._list_resources('/v1/regions/list', self._to_location)
def list_sizes(self):
return self._list_resources('/v1/plans/list', self._to_size)
def list_images(self):
return self._list_resources('/v1/os/list', self._to_image)
def create_node(self, name, size, image, location):
params = {'DCID': location.id, 'VPSPLANID': size.id,
'OSID': image.id, 'label': name}
result = self.connection.post('/v1/server/create', params)
if result.status != httplib.OK:
return False
subid = result.object['SUBID']
retry_count = 3
created_node = None
for i in range(retry_count):
try:
nodes = self.list_nodes()
created_node = [n for n in nodes if n.id == subid][0]
except IndexError:
time.sleep(1)
pass
else:
break
return created_node
def reboot_node(self, node):
params = {'SUBID': node.id}
res = self.connection.post('/v1/server/reboot', params)
return res.status == httplib.OK
def destroy_node(self, node):
params = {'SUBID': node.id}
res = self.connection.post('/v1/server/destroy', params)
return res.status == httplib.OK
def _list_resources(self, url, tranform_func):
data = self.connection.get(url).object
sorted_key = sorted(data)
return [tranform_func(data[key]) for key in sorted_key]
def _to_node(self, data):
if 'status' in data:
state = self.NODE_STATE_MAP.get(data['status'], NodeState.UNKNOWN)
if state == NodeState.RUNNING and \
data['power_status'] != 'running':
state = NodeState.STOPPED
else:
state = NodeState.UNKNOWN
if 'main_ip' in data and data['main_ip'] is not None:
public_ips = [data['main_ip']]
else:
public_ips = []
extra_keys = []
extra = {}
for key in extra_keys:
if key in data:
extra[key] = data[key]
node = Node(id=data['SUBID'], name=data['label'], state=state,
public_ips=public_ips, private_ips=None, extra=extra,
driver=self)
return node
def _to_location(self, data):
return NodeLocation(id=data['DCID'], name=data['name'],
country=data['country'], driver=self)
def _to_size(self, data):
extra = {'vcpu_count': int(data['vcpu_count'])}
ram = int(data['ram'])
disk = int(data['disk'])
bandwidth = float(data['bandwidth'])
price = float(data['price_per_month'])
return NodeSize(id=data['VPSPLANID'], name=data['name'],
ram=ram, disk=disk,
bandwidth=bandwidth, price=price,
extra=extra, driver=self)
def _to_image(self, data):
extra = {'arch': data['arch'], 'family': data['family']}
return NodeImage(id=data['OSID'], name=data['name'], extra=extra,
driver=self)
| apache-2.0 | -2,399,194,911,052,198,000 | 30.733696 | 78 | 0.584592 | false | 4.015333 | false | false | false |
YourCyborg/Sun-RPI | src/objects/admin.py | 1 | 5428 | #
# This sets up how models are displayed
# in the web admin interface.
#
from django import forms
from django.conf import settings
from django.contrib import admin
from src.objects.models import ObjAttribute, ObjectDB, ObjectNick, Alias
from src.utils.utils import mod_import
class ObjAttributeInline(admin.TabularInline):
model = ObjAttribute
fields = ('db_key', 'db_value')
extra = 0
class NickInline(admin.TabularInline):
model = ObjectNick
fields = ('db_nick', 'db_real', 'db_type')
extra = 0
class AliasInline(admin.TabularInline):
model = Alias
fields = ("db_key",)
extra = 0
class ObjectCreateForm(forms.ModelForm):
"This form details the look of the fields"
class Meta:
model = ObjectDB
db_key = forms.CharField(label="Name/Key",
widget=forms.TextInput(attrs={'size':'78'}),
help_text="Main identifier, like 'apple', 'strong guy', 'Elizabeth' etc. If creating a Character, check so the name is unique among characters!",)
db_typeclass_path = forms.CharField(label="Typeclass",initial="Change to (for example) %s or %s." % (settings.BASE_OBJECT_TYPECLASS, settings.BASE_CHARACTER_TYPECLASS),
widget=forms.TextInput(attrs={'size':'78'}),
help_text="This defines what 'type' of entity this is. This variable holds a Python path to a module with a valid Evennia Typeclass. If you are creating a Character you should use the typeclass defined by settings.BASE_CHARACTER_TYPECLASS or one derived from that.")
db_permissions = forms.CharField(label="Permissions",
initial=settings.PERMISSION_PLAYER_DEFAULT,
required=False,
widget=forms.TextInput(attrs={'size':'78'}),
help_text="a comma-separated list of text strings checked by certain locks. They are mainly of use for Character objects. Character permissions overload permissions defined on a controlling Player. Most objects normally don't have any permissions defined.")
db_cmdset_storage = forms.CharField(label="CmdSet",
initial=settings.CMDSET_DEFAULT,
required=False,
widget=forms.TextInput(attrs={'size':'78'}),
help_text="Most non-character objects don't need a cmdset and can leave this field blank.")
class ObjectEditForm(ObjectCreateForm):
"Form used for editing. Extends the create one with more fields"
db_lock_storage = forms.CharField(label="Locks",
required=False,
widget=forms.Textarea(attrs={'cols':'100', 'rows':'2'}),
help_text="In-game lock definition string. If not given, defaults will be used. This string should be on the form <i>type:lockfunction(args);type2:lockfunction2(args);...")
class ObjectDBAdmin(admin.ModelAdmin):
list_display = ('id', 'db_key', 'db_location', 'db_player', 'db_typeclass_path')
list_display_links = ('id', 'db_key')
ordering = ['db_player', 'db_typeclass_path', 'id']
search_fields = ['^db_key', 'db_typeclass_path']
save_as = True
save_on_top = True
list_select_related = True
list_filter = ('db_permissions', 'db_location', 'db_typeclass_path')
# editing fields setup
form = ObjectEditForm
fieldsets = (
(None, {
'fields': (('db_key','db_typeclass_path'), ('db_permissions', 'db_lock_storage'),
('db_location', 'db_home'), 'db_destination','db_cmdset_storage'
)}),
)
#deactivated temporarily, they cause empty objects to be created in admin
inlines = [AliasInline]#, ObjAttributeInline]
# Custom modification to give two different forms wether adding or not.
add_form = ObjectCreateForm
add_fieldsets = (
(None, {
'fields': (('db_key','db_typeclass_path'), 'db_permissions',
('db_location', 'db_home'), 'db_destination','db_cmdset_storage'
)}),
)
def get_fieldsets(self, request, obj=None):
if not obj:
return self.add_fieldsets
return super(ObjectDBAdmin, self).get_fieldsets(request, obj)
def get_form(self, request, obj=None, **kwargs):
"""
Use special form during creation
"""
defaults = {}
if obj is None:
defaults.update({
'form': self.add_form,
'fields': admin.util.flatten_fieldsets(self.add_fieldsets),
})
defaults.update(kwargs)
return super(ObjectDBAdmin, self).get_form(request, obj, **defaults)
def save_model(self, request, obj, form, change):
if not change:
# adding a new object
obj = obj.typeclass
obj.basetype_setup()
obj.basetype_posthook_setup()
obj.at_object_creation()
obj.at_init()
admin.site.register(ObjectDB, ObjectDBAdmin)
| bsd-3-clause | 8,773,561,773,923,069,000 | 42.130081 | 306 | 0.573876 | false | 4.338929 | false | false | false |
hiatobr/midiacapoeira | modules/queries.py | 1 | 1403 | # -*- coding: utf-8 -*-
from gluon import current
def tagQuery(tags, ctbl, ttbl, query = 0, op = 'or', field =
'texto_id'):
'''
Busca no banco de dados por conteúdo marcado pelas tags em <tags>.
A operação é recursiva, tag por tag, juntando o resultado de uma
busca ao resultado referente à tag anterior. Essa junção pode ser por
intersecção (op = 'and') ou por união (op = 'or').
Esta implementação preza por generalidade, de modo que a função
pode ser utilizada para buscar qualquer tipo de conteúdo, desde
que a variável <field> seja corretamente preenchida na chamada da
função.
<ctbl> = tabela de conteúdo
<ttbl> = tabela de tags
'''
db = current.db
try:
# Escolhe uma tag e procura por índices de textos que a contêm
tag_ref = db(ttbl.tag==tags.pop()).select(ttbl[field]).as_list()
tag_ref = map(list.pop, map(dict.values, tag_ref))
if query and op == 'or':
return tagQuery(tags, ctbl, ttbl, ctbl.id.belongs(tag_ref) |
query)
elif query and op == 'and':
return tagQuery (tags, ctbl, ttbl,
ctbl.id.belongs(tag_ref) & query)
else:
return tagQuery(tags, ctbl, ttbl, ctbl.id.belongs(tag_ref))
except IndexError:
return db(query).select(ctbl.ALL).as_list()
| gpl-3.0 | 3,060,283,172,362,273,000 | 34.435897 | 73 | 0.606368 | false | 3.098655 | false | false | false |
jsaponara/opentaxforms | opentaxforms/serve.py | 1 | 3082 | #!/usr/bin/env python
from __future__ import print_function, absolute_import
import flask_restless
from argparse import ArgumentParser
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import scoped_session, sessionmaker
from .db import connect
from .version import appname, apiVersion
from .ut import Bag
def createApi(app,**kw):
db = SQLAlchemy(app)
conn, engine, metadata, md = connect(appname, **kw)
Base = declarative_base()
Session = sessionmaker(autocommit=False, autoflush=False, bind=engine)
mysession = scoped_session(Session)
apimanager = flask_restless.APIManager(app, session=mysession)
counts = {}
for tabl in md:
tablobj = md[tabl]
counts[tabl] = tablobj.count().execute().fetchone()[0]
attrs = dict(
__table__=tablobj,
# todo should flask_restless need __tablename__?
__tablename__=str(tabl),
)
attrs.update(dict(
orgn=dict(
form=db.relationship('Form'),
),
form=dict(
orgn=db.relationship('Orgn', back_populates='form'),
slot=db.relationship('Slot', back_populates='form'),
),
slot=dict(
form=db.relationship('Form'),
),
)[tabl])
tablcls = type(str(tabl).capitalize(), (Base, ), attrs)
colsToAdd = dict(
orgn=(),
form=(
'orgn', 'orgn.code',
),
slot=(
'form', 'form.code',
),
)[tabl]
colsToShow = [c.name for c in tablobj.columns]
colsToShow.extend(colsToAdd)
# print tabl,colsToShow
apimanager.create_api(
tablcls,
url_prefix='/api/v%s' % (apiVersion, ),
include_columns=colsToShow,
)
return counts
def parseCmdline():
'''Load command line arguments'''
parser = ArgumentParser(
description='Automates tax forms'
' and provides an API for new tax form interfaces'
)
parser.add_argument(
'-P', '--postgres',
help='use postgres database [default=sqlite]', action="store_true")
return parser.parse_args()
def createApp(**kw):
cmdline = kw.get('cmdline')
verbose = kw.get('verbose')
if 'cmdline' in kw:
del kw['cmdline']
if 'verbose' in kw:
del kw['verbose']
args = parseCmdline() if cmdline else Bag(dict(postgres=False))
app = Flask(appname)
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False # to suppress warning
counts = createApi(app,postgres=args.postgres, **kw)
if verbose:
print('serving {slot} slots in {form} forms from {orgn} orgns'.format(
**counts))
return app
def main(**kw):
app = createApp(dbpath='sqlite:///opentaxforms.sqlite3', **kw)
app.run()
if __name__ == "__main__":
main(cmdline=True, verbose=True)
| agpl-3.0 | 2,853,290,267,090,020,000 | 30.131313 | 79 | 0.576249 | false | 3.926115 | false | false | false |
bbglab/wok | wok/core/flow/reader.py | 1 | 6997 | ###############################################################################
#
# Copyright 2009-2011, Universitat Pompeu Fabra
#
# This file is part of Wok.
#
# Wok is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Wok is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses
#
###############################################################################
import os.path
try:
from lxml import etree
except ImportError:
try:
# Python 2.5
import xml.etree.cElementTree as etree
except ImportError:
try:
# Python 2.5+
import xml.etree.ElementTree as etree
except ImportError:
try:
# normal cElementTree install
import cElementTree as etree
except ImportError:
try:
# normal ElementTree install
import elementtree.ElementTree as etree
except ImportError:
import sys
sys.stderr.write("Failed to import ElementTree from any known place\n")
raise
from wok.config.data import DataElement, Data
from wok.core.flow.model import *
def str_to_bool(s):
s2b = {
"0" : False, "1" : True,
"no" : False, "yes" : True,
"false" : False, "true" : True}
if s in s2b:
return s2b[s]
else:
return False
class FlowReader(object):
def __init__(self, source):
if isinstance(source, basestring):
self.path = os.path.abspath(source)
self.fp = open(source, "r")
else:
self.path = None
self.fp = source
self.__doc = None
def __read_doc(self):
if self.__doc is None:
self.__doc = etree.parse(self.fp)
return self.__doc
def read_meta(self):
doc = self.__read_doc()
root = doc.getroot()
if root.tag != "flow":
raise Exception("<flow> expected but <{}> found".format(xmle.tag))
name = root.attrib.get("name")
library = root.attrib.get("library")
version = root.attrib.get("version")
return (name, library, version)
def read(self):
doc = self.__read_doc()
root = doc.getroot()
flow = self._parse_flow(root)
if self.path:
flow.path = self.path
return flow
def _parse_base_desc(self, xmle, obj):
if "name" not in xmle.attrib:
raise Exception("'name' attribute not found in tag <{}>".format(xmle.tag))
obj.name = xmle.attrib["name"]
obj.title = xmle.findtext("title")
obj.desc = xmle.findtext("desc")
if "enabled" in xmle:
obj.enabled = str_to_bool(xmle.attr["enabled"])
def _parse_base_port(self, xmle, obj):
self._parse_base_desc(xmle, obj)
if "serializer" in xmle.attrib:
obj.serializer = xmle.attrib["serializer"]
if "wsize" in xmle.attrib:
try:
obj.wsize = int(xmle.attrib["wsize"])
except:
raise Exception("At {} {}: 'wsize' should be a number greater than 0".format(xmle.tag, obj.name))
if obj.wsize < 1:
raise Exception("At {} {}: 'wsize' should be a number greater than 0".format(xmle.tag, obj.name))
def _parse_base_module(self, xmle, obj):
self._parse_base_port(xmle, obj)
if "maxpar" in xmle.attrib:
try:
obj.maxpar = int(xmle.attrib["maxpar"])
except:
raise Exception("At {} {}: 'maxpar' should be a number greater than 0".format(xmle.tag, obj.name))
if obj.maxpar < 1:
raise Exception("At {} {}: 'maxpar' should be a number greater than 0".format(xmle.tag, obj.name))
conf_xml = xmle.find("conf")
if conf_xml is not None:
obj.conf = self._parse_conf(conf_xml)
res_xml = xmle.find("resources")
if res_xml is not None:
obj.resources = self._parse_conf(res_xml)
for x in xmle.findall("param"):
obj.params += [self._parse_param(x)]
for x in xmle.findall("in"):
obj.add_in_port(self._parse_port(x))
for x in xmle.findall("out"):
obj.add_out_port(self._parse_port(x))
def _parse_flow(self, xmle):
if xmle.tag != "flow":
raise Exception("<flow> expected but <{}> found".format(xmle.tag))
flow = Flow(name = None)
self._parse_base_module(xmle, flow)
if "library" in xmle.attrib:
flow.library = xmle.attrib["library"]
if "version" in xmle.attrib:
flow.version = xmle.attrib["version"]
for xmle in xmle.findall("module"):
module = self._parse_module(flow, xmle)
# TODO check that there is no other module with the same name
flow.add_module(module)
return flow
def _parse_module(self, flow, xmle):
mod = Module(name = None)
self._parse_base_module(xmle, mod)
if "depends" in xmle.attrib:
depends = [d.strip() for d in xmle.attrib["depends"].split(",")]
mod.depends = [d for d in depends if len(d) > 0]
exec_xml = xmle.find("exec")
if exec_xml is None:
run_xml = xmle.find("run")
if run_xml is None:
flow_ref_xml = xmle.find("flow")
if flow_ref_xml is None:
raise Exception("Missing either <exec>, <run> or <flow> in module {}".format(mod.name))
else:
mod.flow_ref = self._parse_flow_ref(flow, mod, flow_ref_xml)
else:
mod.execution = self._parse_run(mod, run_xml)
else:
mod.execution = self._parse_exec(exec_xml)
return mod
def _parse_param(self, xmle):
raise Exception("Unimplemented")
def _parse_port(self, xmle):
if xmle.tag == "in":
mode = PORT_MODE_IN
elif xmle.tag == "out":
mode = PORT_MODE_OUT
port = Port(name = None, mode = mode)
self._parse_base_port(xmle, port)
if "link" in xmle.attrib:
link = [x.strip() for x in xmle.attrib["link"].split(",")]
port.link = [l for l in link if len(l) > 0]
return port
def _parse_conf(self, xmle):
return Data.from_xmle(xmle)
def _parse_exec(self, xmle):
execution = Exec()
if "launcher" in xmle.attrib:
execution.mode = xmle.attrib["launcher"].lower()
if execution.mode == "python":
execution.mode = "native"
execution.conf = Data.from_xmle(xmle)
return execution
def _parse_run(self, mod, xmle):
if xmle.text is None or len(xmle.text) == 0:
raise Exception("Missing script name for <run> in module {}".format(mod.name))
execution = Exec()
execution.mode = "native"
execution.conf = DataElement()
execution.conf["script_path"] = xmle.text
return execution
def _parse_flow_ref(self, flow, mod, xmle):
if xmle.text is None or len(xmle.text) == 0:
raise Exception("Missing flow name for <flow> in module {}".format(mod.name))
flow_ref = FlowRef()
pos = xmle.text.rfind(".")
if pos == -1 and flow.library is not None:
flow_ref.canonical_name = "{}.{}".format(flow.library, xmle.text)
else:
flow_ref.canonical_name = xmle.text
if "version" in xmle.attrib:
flow_ref.version = xmle.attrib["version"]
return flow_ref
def close(self):
self.fp.close()
| gpl-3.0 | 6,454,725,803,999,862,000 | 26.225681 | 102 | 0.641561 | false | 3.003004 | false | false | false |
arruda/rmr | rmr/apps/accounts/migrations/0001_initial.py | 1 | 4385 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'UserProfile'
db.create_table('accounts_userprofile', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('quota', self.gf('django.db.models.fields.DecimalField')(default='0', null=True, max_digits=10, decimal_places=2, blank=True)),
))
db.send_create_signal('accounts', ['UserProfile'])
def backwards(self, orm):
# Deleting model 'UserProfile'
db.delete_table('accounts_userprofile')
models = {
'accounts.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'quota': ('django.db.models.fields.DecimalField', [], {'default': "'0'", 'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['accounts'] | mit | 4,301,774,263,361,745,000 | 61.657143 | 182 | 0.560547 | false | 3.76072 | false | false | false |
Azure/azure-sdk-for-python | sdk/keyvault/azure-keyvault-keys/samples/backup_restore_operations_async.py | 1 | 3785 | # ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
import asyncio
import os
from azure.keyvault.keys.aio import KeyClient
from azure.identity.aio import DefaultAzureCredential
from azure.core.exceptions import HttpResponseError
# ----------------------------------------------------------------------------------------------------------
# Prerequisites:
# 1. An Azure Key Vault (https://docs.microsoft.com/en-us/azure/key-vault/quick-create-cli)
#
# 2. azure-keyvault-keys and azure-identity libraries (pip install these)
#
# 3. Set Environment variables AZURE_CLIENT_ID, AZURE_TENANT_ID, AZURE_CLIENT_SECRET, VAULT_URL
# (See https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/keyvault/azure-keyvault-keys#authenticate-the-client)
#
# ----------------------------------------------------------------------------------------------------------
# Sample - demonstrates the basic backup and restore operations on a vault(key) resource for Azure Key Vault
#
# 1. Create a key (create_key)
#
# 2. Backup a key (backup_key)
#
# 3. Delete a key (delete_key)
#
# 4. Purge a key (purge_deleted_key)
#
# 5. Restore a key (restore_key_backup)
# ----------------------------------------------------------------------------------------------------------
async def run_sample():
# Instantiate a key client that will be used to call the service.
# Notice that the client is using default Azure credentials.
# To make default credentials work, ensure that environment variables 'AZURE_CLIENT_ID',
# 'AZURE_CLIENT_SECRET' and 'AZURE_TENANT_ID' are set with the service principal credentials.
VAULT_URL = os.environ["VAULT_URL"]
credential = DefaultAzureCredential()
client = KeyClient(vault_url=VAULT_URL, credential=credential)
try:
# Let's create a Key of type RSA.
# if the key already exists in the Key Vault, then a new version of the key is created.
print("\n.. Create Key")
key = await client.create_key("keyName", "RSA")
print("Key with name '{0}' created with key type '{1}'".format(key.name, key.key_type))
# Backups are good to have, if in case keys gets deleted accidentally.
# For long term storage, it is ideal to write the backup to a file.
print("\n.. Create a backup for an existing Key")
key_backup = await client.backup_key(key.name)
print("Backup created for key with name '{0}'.".format(key.name))
# The rsa key is no longer in use, so you delete it.
deleted_key = await client.delete_key(key.name)
print("Deleted key with name '{0}'".format(deleted_key.name))
# Purge the deleted key.
# The purge will take some time, so wait before restoring the backup to avoid a conflict.
print("\n.. Purge the key")
await client.purge_deleted_key(key.name)
await asyncio.sleep(60)
print("Purged key with name '{0}'".format(deleted_key.name))
# In the future, if the key is required again, we can use the backup value to restore it in the Key Vault.
print("\n.. Restore the key using the backed up key bytes")
key = await client.restore_key_backup(key_backup)
print("Restored key with name '{0}'".format(key.name))
except HttpResponseError as e:
print("\nrun_sample has caught an error. {0}".format(e.message))
finally:
print("\nrun_sample done")
await credential.close()
await client.close()
if __name__ == "__main__":
try:
loop = asyncio.get_event_loop()
loop.run_until_complete(run_sample())
loop.close()
except Exception as e:
print("Top level Error: {0}".format(str(e)))
| mit | -6,305,333,877,761,251,000 | 43.011628 | 123 | 0.60317 | false | 3.988409 | false | false | false |
masschallenge/django-accelerator | accelerator/tests/contexts/judge_feedback_context.py | 1 | 13749 | from accelerator_abstract.models import (
FORM_ELEM_FEEDBACK_TO_MC,
FORM_ELEM_FEEDBACK_TO_STARTUP,
FORM_ELEM_OVERALL_RECOMMENDATION,
)
from accelerator.models import (
ACTIVE_PROGRAM_STATUS,
ASSIGNED_PANEL_ASSIGNMENT_STATUS,
COMPLETE_PANEL_ASSIGNMENT_STATUS,
FEEDBACK_DISPLAY_DISABLED as DISABLED,
FEEDBACK_DISPLAY_ENABLED as ENABLED,
IN_PERSON_JUDGING_ROUND_TYPE,
ONLINE_JUDGING_ROUND_TYPE,
JUDGING_FEEDBACK_STATUS_INCOMPLETE as INCOMPLETE,
PREVIEW_PANEL_STATUS,
SUBMITTED_APP_STATUS,
UserRole,
)
from accelerator.tests.factories import (
ApplicationAnswerFactory,
ApplicationFactory,
ApplicationPanelAssignmentFactory,
ExpertFactory,
JudgeApplicationFeedbackFactory,
JudgeFeedbackComponentFactory,
JudgePanelAssignmentFactory,
JudgeRoundCommitmentFactory,
JudgingFormElementFactory,
PanelFactory,
ProgramCycleFactory,
ProgramRoleFactory,
ProgramRoleGrantFactory,
ScenarioFactory,
StartupCycleInterestFactory,
StartupProgramInterestFactory,
)
from accelerator.tests.contexts.context_utils import get_user_role_by_name
from .judging_round_context import JudgingRoundContext
ELEMENT_NAMES = [
FORM_ELEM_OVERALL_RECOMMENDATION,
FORM_ELEM_FEEDBACK_TO_STARTUP,
FORM_ELEM_FEEDBACK_TO_MC,
]
_round_type = {True: ONLINE_JUDGING_ROUND_TYPE,
False: IN_PERSON_JUDGING_ROUND_TYPE}
class JudgeFeedbackContext:
def __init__(self,
application=None,
num_components=1,
complete=True,
panel_status=PREVIEW_PANEL_STATUS,
display_feedback=False,
merge_feedback_with=None,
cycle_based_round=False,
online_round=True,
is_active=True,
judge_capacity=10,
program_status=ACTIVE_PROGRAM_STATUS):
self.judging_capacity = 0
if application:
self.application = application
self.cycle = application.cycle
else:
self.cycle = ProgramCycleFactory()
self.application = ApplicationFactory(
application_status=SUBMITTED_APP_STATUS,
application_type=self.cycle.default_application_type,
cycle=self.cycle)
self.application_type = self.application.application_type
self.applications = [self.application]
self.startup = self.application.startup
self.industry = self.startup.primary_industry
feedback_display = ENABLED if display_feedback else DISABLED
jr_kwargs = {
'program__cycle': self.cycle,
'round_type': _round_type[online_round],
'feedback_display': feedback_display,
'cycle_based_round': cycle_based_round,
'application_type': self.application_type,
'is_active': False,
'program__program_status': program_status,
}
if merge_feedback_with:
jr_kwargs['feedback_merge_with'] = merge_feedback_with
self.judging_round = JudgingRoundContext(**jr_kwargs).judging_round
self.program = self.judging_round.program
self.panel = PanelFactory(status=panel_status,
panel_time__judging_round=self.judging_round)
self.scenario = ScenarioFactory(judging_round=self.judging_round)
user_role = get_user_role_by_name(UserRole.JUDGE)
self.judge_role = ProgramRoleFactory(program=self.program,
user_role=user_role)
self.judges = []
self.judge = self.add_judge(complete=complete,
capacity=judge_capacity)
self.feedback = JudgeApplicationFeedbackFactory(
judge=self.judge,
application=self.application,
panel=self.panel,
form_type=self.judging_round.judging_form)
self.judging_form = self.feedback.form_type
self.application_assignment = ApplicationPanelAssignmentFactory(
application=self.application,
panel=self.panel,
scenario=self.scenario)
cycle_interest = StartupCycleInterestFactory(cycle=self.program.cycle,
startup=self.startup)
StartupProgramInterestFactory(program=self.program,
startup=self.startup,
startup_cycle_interest=cycle_interest,
applying=True,
order=1)
self.components = []
self.elements = []
self.application_questions = []
self.application_answers = []
for element_name in ELEMENT_NAMES:
self.add_component(element_name=element_name)
if complete:
self.feedback.save()
for _ in range(num_components):
self.add_component()
else:
for _ in range(num_components):
self.add_element()
self.judging_round.is_active = is_active
self.judging_round.save()
def add_application_answer(self, question=None, answer_text=None):
question = question or self.application_questions[0]
kwargs = {"application_question": question,
"application": self.application}
if answer_text:
kwargs["answer_text"] = answer_text
app_answer = ApplicationAnswerFactory(**kwargs)
self.application_answers.append(app_answer)
return app_answer
def add_component(self, element_name=None,
feedback_element=None,
add_answer=True,
answer_text=None):
factory_params = {
"judge_feedback": self.feedback, }
if feedback_element is None:
app_type_key = "__".join(["feedback_element",
"application_question",
"application_type"])
factory_params.update(
{
"feedback_element__form_type": self.judging_form,
"feedback_element__element_type": "feedback",
"feedback_element__mandatory": True,
"feedback_element__sharing": "share-with-startup",
app_type_key: self.application_type}
)
if element_name:
factory_params['feedback_element__element_name'] = element_name
else:
factory_params.update({"feedback_element": feedback_element})
if answer_text:
factory_params["answer_text"] = answer_text
component = JudgeFeedbackComponentFactory(
**factory_params)
self.components.append(component)
question = component.feedback_element.application_question
self.application_questions.append(question)
if add_answer:
app_answer = ApplicationAnswerFactory(
application_question=question,
application=self.application)
self.application_answers.append(app_answer)
if feedback_element is None:
self.elements.append(component.feedback_element)
self.feedback.save()
return component
def add_element(self,
feedback_type="",
element_type="feedback",
choice_layout="",
mandatory=True,
text_minimum=0,
text_minimum_units="",
answer_text=None,
text_limit=0,
text_limit_units=""):
element = JudgingFormElementFactory(
form_type=self.judging_form,
mandatory=mandatory,
element_type=element_type,
feedback_type=feedback_type,
choice_layout=choice_layout,
sharing="share-with-startup",
application_question__application_type=self.application_type,
text_minimum=text_minimum,
text_minimum_units=text_minimum_units,
text_limit=text_limit,
text_limit_units=text_limit_units,
)
application_question = element.application_question
self.application_questions.append(application_question)
answer_kwargs = {"application_question": application_question,
"application": self.application}
if answer_text:
answer_kwargs["answer_text"] = answer_text
application_answer = ApplicationAnswerFactory(**answer_kwargs)
self.application_answers.append(application_answer)
self.elements.append(element)
self.feedback.save()
return element
def add_extra_scenario(self):
return ScenarioFactory(judging_round=self.judging_round)
def add_panel(self):
return PanelFactory(
panel_time__judging_round=self.judging_round,
panel_type__judging_round=self.judging_round,
location__judging_round=self.judging_round)
def add_assignment(self,
judge=None,
panel=None,
scenario=None):
scenario = scenario or self.scenario
judge = judge or self.judge
panel = panel or self.panel
return JudgePanelAssignmentFactory(
judge=judge,
panel=panel,
scenario=scenario)
def add_feedback(self,
application=None,
judge=None,
panel=None,
feedback_status=INCOMPLETE):
judge = judge or self.judge
application = application or self.application
panel = panel or self.panel
if not panel.applicationpanelassignment_set.filter(
application=application).exists():
ApplicationPanelAssignmentFactory(
application=application,
panel=panel,
scenario=self.scenario)
return JudgeApplicationFeedbackFactory(
feedback_status=feedback_status,
judge=judge,
application=application,
panel=panel,
form_type=self.judging_round.judging_form)
def add_application(self,
application=None,
field=None,
option=None,
program=None):
program = program or self.program
if application is None:
fields = {
"application_status": SUBMITTED_APP_STATUS,
"application_type": self.application_type,
}
if field:
fields[field] = option
application = ApplicationFactory(**fields)
self.applications.append(application)
startup = application.startup
cycle_interest = StartupCycleInterestFactory(cycle=program.cycle,
startup=startup)
StartupProgramInterestFactory(program=program,
startup=startup,
startup_cycle_interest=cycle_interest,
applying=True,
order=1)
return application
def add_applications(self, count, field=None, options=[], programs=[]):
result = []
option_count = len(options)
option = None
program_count = len(programs)
program = None
for i in range(count):
if option_count > 0:
option = options[i % option_count]
if program_count > 0:
program = programs[i % program_count]
result.append(self.add_application(field=field,
option=option,
program=program))
return result
def add_judge(self,
assigned=True,
complete=True,
judge=None,
panel=None,
capacity=10):
if judge is None:
judge = ExpertFactory(
profile__primary_industry=self.industry,
profile__home_program_family=self.program.program_family)
ProgramRoleGrantFactory(person=judge, program_role=self.judge_role)
self.judging_round.confirmed_judge_label.users.add(judge)
JudgeRoundCommitmentFactory(judging_round=self.judging_round,
judge=judge,
capacity=10,
commitment_state=True)
self.judging_capacity += capacity
if assigned:
if complete:
status = COMPLETE_PANEL_ASSIGNMENT_STATUS
else:
status = ASSIGNED_PANEL_ASSIGNMENT_STATUS
JudgePanelAssignmentFactory(
judge=judge,
assignment_status=status,
panel=panel or self.panel,
scenario=self.scenario)
self.judges.append(judge)
return judge
@classmethod
def create_batch(cls, qty, *args, **kwargs):
if 'merge_feedback' in kwargs:
merge_feedback = kwargs.pop('merge_feedback')
else:
merge_feedback = False
contexts = [cls(*args, **kwargs)]
if merge_feedback:
kwargs['merge_feedback_with'] = contexts[0].judging_round
for _ in range(1, qty):
contexts.append(cls(*args, **kwargs))
return contexts
| mit | 4,584,632,868,787,122,000 | 38.852174 | 79 | 0.56695 | false | 4.558687 | false | false | false |
superbatlc/dtailweb | phonegroups/migrations/0001_initial.py | 1 | 1870 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('systems', '0001_initial'),
('calls', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Phonegroup',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=255, verbose_name=b'Nome')),
('code', models.CharField(max_length=10, verbose_name=b'Codice')),
('parent', models.ForeignKey(related_name='child_phonegroup_set', blank=True, to='phonegroups.Phonegroup', help_text=b'The father of this group', null=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='PhonegroupCall',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('call', models.ForeignKey(to='calls.Call')),
('phonegroup', models.ForeignKey(to='phonegroups.Phonegroup')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='PhonegroupExtension',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('extension', models.CharField(max_length=4)),
('phonegroup', models.ForeignKey(to='phonegroups.Phonegroup')),
('system', models.ForeignKey(to='systems.System')),
],
options={
},
bases=(models.Model,),
),
]
| gpl-2.0 | -5,535,782,326,201,843,000 | 36.4 | 174 | 0.536364 | false | 4.452381 | false | false | false |
hiraditya/fool | tensorflow/scaling-up-ml-using-cmle.py | 1 | 6449 | '''
In this lab, you will learn how to:
Package up TensorFlow model
Run training locally
Run training on cloud
Deploy model to cloud
Invoke model to carry out predictions
'''
'''
Scaling up ML using Cloud ML Engine
In this notebook, we take a previously developed TensorFlow model to predict taxifare rides and package it up so that it can be run in Cloud MLE. For now, we'll run this on a small dataset. The model that was developed is rather simplistic, and therefore, the accuracy of the model is not great either. However, this notebook illustrates how to package up a TensorFlow model to run it within Cloud ML.
Later in the course, we will look at ways to make a more effective machine learning model.
Environment variables for project and bucket
Note that:
Your project id is the unique string that identifies your project (not the project name). You can find this from the GCP Console dashboard's Home page. My dashboard reads: Project ID: cloud-training-demos
Cloud training often involves saving and restoring model files. If you don't have a bucket already, I suggest that you create one from the GCP console (because it will dynamically check whether the bucket name you want is available). A common pattern is to prefix the bucket name by the project id, so that it is unique. Also, for cost reasons, you might want to use a single region bucket.
Change the cell below to reflect your Project ID and bucket name.
'''
import os
PROJECT = 'cloud-training-demos' # REPLACE WITH YOUR PROJECT ID
BUCKET = 'cloud-training-demos-ml' # REPLACE WITH YOUR BUCKET NAME
REGION = 'us-central1' # REPLACE WITH YOUR BUCKET REGION e.g. us-central1
# for bash
os.environ['PROJECT'] = PROJECT
os.environ['BUCKET'] = BUCKET
os.environ['REGION'] = REGION
os.environ['TFVERSION'] = '1.7' # Tensorflow version
%bash
gcloud config set project $PROJECT
gcloud config set compute/region $REGION
%bash
PROJECT_ID=$PROJECT
AUTH_TOKEN=$(gcloud auth print-access-token)
SVC_ACCOUNT=$(curl -X GET -H "Content-Type: application/json" \
-H "Authorization: Bearer $AUTH_TOKEN" \
https://ml.googleapis.com/v1/projects/${PROJECT_ID}:getConfig \
| python -c "import json; import sys; response = json.load(sys.stdin); \
print response['serviceAccount']")
echo "Authorizing the Cloud ML Service account $SVC_ACCOUNT to access files in $BUCKET"
gsutil -m defacl ch -u $SVC_ACCOUNT:R gs://$BUCKET
gsutil -m acl ch -u $SVC_ACCOUNT:R -r gs://$BUCKET # error message (if bucket is empty) can be ignored
gsutil -m acl ch -u $SVC_ACCOUNT:W gs://$BUCKET
'''
Packaging up the code
Take your code and put into a standard Python package structure. model.py and task.py contain the Tensorflow code from earlier (explore the directory structure).
'''
!find taxifare
!cat taxifare/trainer/model.py
'''
Find absolute paths to your data
Note the absolute paths below. /content is mapped in Datalab to where the home icon takes you
'''
%bash
echo $PWD
rm -rf $PWD/taxi_trained
head -1 $PWD/taxi-train.csv
head -1 $PWD/taxi-valid.csv
'''
Running the Python module from the command-line
'''
%bash
rm -rf taxifare.tar.gz taxi_trained
export PYTHONPATH=${PYTHONPATH}:${PWD}/taxifare
python -m trainer.task \
--train_data_paths="${PWD}/taxi-train*" \
--eval_data_paths=${PWD}/taxi-valid.csv \
--output_dir=${PWD}/taxi_trained \
--train_steps=1000 --job-dir=./tmp
%bash
ls $PWD/taxi_trained/export/exporter/
%writefile ./test.json
{"pickuplon": -73.885262,"pickuplat": 40.773008,"dropofflon": -73.987232,"dropofflat": 40.732403,"passengers": 2}
%bash
model_dir=$(ls ${PWD}/taxi_trained/export/exporter)
gcloud ml-engine local predict \
--model-dir=${PWD}/taxi_trained/export/exporter/${model_dir} \
--json-instances=./test.json
'''
Running locally using gcloud
'''
%bash
rm -rf taxifare.tar.gz taxi_trained
gcloud ml-engine local train \
--module-name=trainer.task \
--package-path=${PWD}/taxifare/trainer \
-- \
--train_data_paths=${PWD}/taxi-train.csv \
--eval_data_paths=${PWD}/taxi-valid.csv \
--train_steps=1000 \
--output_dir=${PWD}/taxi_trained
'''
When I ran it (due to random seeds, your results will be different), the average_loss (Mean Squared Error) on the evaluation dataset was 187, meaning that the RMSE was around 13.
'''
from google.datalab.ml import TensorBoard
TensorBoard().start('./taxi_trained')
for pid in TensorBoard.list()['pid']:
TensorBoard().stop(pid)
print 'Stopped TensorBoard with pid {}'.format(pid)
'''
If the above step (to stop TensorBoard) appears stalled, just move on to the next step. You don't need to wait for it to return.
'''
!ls $PWD/taxi_trained
'''
Submit training job using gcloud
First copy the training data to the cloud. Then, launch a training job.
After you submit the job, go to the cloud console (http://console.cloud.google.com) and select Machine Learning | Jobs to monitor progress.
Note: Don't be concerned if the notebook stalls (with a blue progress bar) or returns with an error about being unable to refresh auth tokens. This is a long-lived Cloud job and work is going on in the cloud. Use the Cloud Console link (above) to monitor the job.
https://cloud.google.com/ml-engine/docs/tensorflow/getting-started-training-prediction
'''
%bash
echo $BUCKET
gsutil -m rm -rf gs://${BUCKET}/taxifare/smallinput/
gsutil -m cp ${PWD}/*.csv gs://${BUCKET}/taxifare/smallinput/
%%bash
OUTDIR=gs://${BUCKET}/taxifare/smallinput/taxi_trained
JOBNAME=lab3a_$(date -u +%y%m%d_%H%M%S)
echo $OUTDIR $REGION $JOBNAME
gsutil -m rm -rf $OUTDIR
gcloud ml-engine jobs submit training $JOBNAME \
--region=$REGION \
--module-name=trainer.task \
--package-path=${PWD}/taxifare/trainer \
--job-dir=$OUTDIR \
--staging-bucket=gs://$BUCKET \
--scale-tier=BASIC \
--runtime-version=$TFVERSION \
-- \
--train_data_paths="gs://${BUCKET}/taxifare/smallinput/taxi-train*" \
--eval_data_paths="gs://${BUCKET}/taxifare/smallinput/taxi-valid*" \
--output_dir=$OUTDIR \
--train_steps=10000
Job [lab3a_180607_192245] submitted successfully.
Your job is still active. You may view the status of your job with the command (on google cloud consile)
$ gcloud ml-engine jobs describe lab3a_180607_192245
or continue streaming the logs with the command
$ gcloud ml-engine jobs stream-logs lab3a_180607_192245
Use the Cloud Console link to monitor the job and do NOT proceed until the job is done.
| mit | 2,726,381,697,759,011,000 | 36.71345 | 401 | 0.728485 | false | 3.317387 | false | false | false |
wangyixiaohuihui/spark2-annotation | python/pyspark/mllib/stat/_statistics.py | 1 | 13703 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
if sys.version >= '3':
basestring = str
from pyspark.rdd import RDD, ignore_unicode_prefix
from pyspark.mllib.common import callMLlibFunc, JavaModelWrapper
from pyspark.mllib.linalg import Matrix, _convert_to_vector
from pyspark.mllib.regression import LabeledPoint
from pyspark.mllib.stat.test import ChiSqTestResult, KolmogorovSmirnovTestResult
__all__ = ['MultivariateStatisticalSummary', 'Statistics']
class MultivariateStatisticalSummary(JavaModelWrapper):
"""
Trait for multivariate statistical summary of a data matrix.
"""
def mean(self):
return self.call("mean").toArray()
def variance(self):
return self.call("variance").toArray()
def count(self):
return int(self.call("count"))
def numNonzeros(self):
return self.call("numNonzeros").toArray()
def max(self):
return self.call("max").toArray()
def min(self):
return self.call("min").toArray()
def normL1(self):
return self.call("normL1").toArray()
def normL2(self):
return self.call("normL2").toArray()
class Statistics(object):
@staticmethod
def colStats(rdd):
"""
Computes column-wise summary statistics for the input RDD[Vector].
:param rdd: an RDD[Vector] for which column-wise summary statistics
are to be computed.
:return: :class:`MultivariateStatisticalSummary` object containing
column-wise summary statistics.
>>> from pyspark.mllib.linalg import Vectors
>>> rdd = sc.parallelize([Vectors.dense([2, 0, 0, -2]),
... Vectors.dense([4, 5, 0, 3]),
... Vectors.dense([6, 7, 0, 8])])
>>> cStats = Statistics.colStats(rdd)
>>> cStats.mean()
array([ 4., 4., 0., 3.])
>>> cStats.variance()
array([ 4., 13., 0., 25.])
>>> cStats.count()
3
>>> cStats.numNonzeros()
array([ 3., 2., 0., 3.])
>>> cStats.max()
array([ 6., 7., 0., 8.])
>>> cStats.min()
array([ 2., 0., 0., -2.])
"""
cStats = callMLlibFunc("colStats", rdd.map(_convert_to_vector))
return MultivariateStatisticalSummary(cStats)
@staticmethod
def corr(x, y=None, method=None):
"""
Compute the correlation (matrix) for the input RDD(s) using the
specified method.
Methods currently supported: I{pearson (default), spearman}.
If a single RDD of Vectors is passed in, a correlation matrix
comparing the columns in the input RDD is returned. Use C{method=}
to specify the method to be used for single RDD inout.
If two RDDs of floats are passed in, a single float is returned.
:param x: an RDD of vector for which the correlation matrix is to be computed,
or an RDD of float of the same cardinality as y when y is specified.
:param y: an RDD of float of the same cardinality as x.
:param method: String specifying the method to use for computing correlation.
Supported: `pearson` (default), `spearman`
:return: Correlation matrix comparing columns in x.
>>> x = sc.parallelize([1.0, 0.0, -2.0], 2)
>>> y = sc.parallelize([4.0, 5.0, 3.0], 2)
>>> zeros = sc.parallelize([0.0, 0.0, 0.0], 2)
>>> abs(Statistics.corr(x, y) - 0.6546537) < 1e-7
True
>>> Statistics.corr(x, y) == Statistics.corr(x, y, "pearson")
True
>>> Statistics.corr(x, y, "spearman")
0.5
>>> from math import isnan
>>> isnan(Statistics.corr(x, zeros))
True
>>> from pyspark.mllib.linalg import Vectors
>>> rdd = sc.parallelize([Vectors.dense([1, 0, 0, -2]), Vectors.dense([4, 5, 0, 3]),
... Vectors.dense([6, 7, 0, 8]), Vectors.dense([9, 0, 0, 1])])
>>> pearsonCorr = Statistics.corr(rdd)
>>> print(str(pearsonCorr).replace('nan', 'NaN'))
[[ 1. 0.05564149 NaN 0.40047142]
[ 0.05564149 1. NaN 0.91359586]
[ NaN NaN 1. NaN]
[ 0.40047142 0.91359586 NaN 1. ]]
>>> spearmanCorr = Statistics.corr(rdd, method="spearman")
>>> print(str(spearmanCorr).replace('nan', 'NaN'))
[[ 1. 0.10540926 NaN 0.4 ]
[ 0.10540926 1. NaN 0.9486833 ]
[ NaN NaN 1. NaN]
[ 0.4 0.9486833 NaN 1. ]]
>>> try:
... Statistics.corr(rdd, "spearman")
... print("Method name as second argument without 'method=' shouldn't be allowed.")
... except TypeError:
... pass
"""
# Check inputs to determine whether a single value or a matrix is needed for output.
# Since it's legal for users to use the method name as the second argument, we need to
# check if y is used to specify the method name instead.
if type(y) == str:
raise TypeError("Use 'method=' to specify method name.")
if not y:
return callMLlibFunc("corr", x.map(_convert_to_vector), method).toArray()
else:
return callMLlibFunc("corr", x.map(float), y.map(float), method)
@staticmethod
@ignore_unicode_prefix
def chiSqTest(observed, expected=None):
"""
If `observed` is Vector, conduct Pearson's chi-squared goodness
of fit test of the observed data against the expected distribution,
or againt the uniform distribution (by default), with each category
having an expected frequency of `1 / len(observed)`.
If `observed` is matrix, conduct Pearson's independence test on the
input contingency matrix, which cannot contain negative entries or
columns or rows that sum up to 0.
If `observed` is an RDD of LabeledPoint, conduct Pearson's independence
test for every feature against the label across the input RDD.
For each feature, the (feature, label) pairs are converted into a
contingency matrix for which the chi-squared statistic is computed.
All label and feature values must be categorical.
.. note:: `observed` cannot contain negative values
:param observed: it could be a vector containing the observed categorical
counts/relative frequencies, or the contingency matrix
(containing either counts or relative frequencies),
or an RDD of LabeledPoint containing the labeled dataset
with categorical features. Real-valued features will be
treated as categorical for each distinct value.
:param expected: Vector containing the expected categorical counts/relative
frequencies. `expected` is rescaled if the `expected` sum
differs from the `observed` sum.
:return: ChiSquaredTest object containing the test statistic, degrees
of freedom, p-value, the method used, and the null hypothesis.
>>> from pyspark.mllib.linalg import Vectors, Matrices
>>> observed = Vectors.dense([4, 6, 5])
>>> pearson = Statistics.chiSqTest(observed)
>>> print(pearson.statistic)
0.4
>>> pearson.degreesOfFreedom
2
>>> print(round(pearson.pValue, 4))
0.8187
>>> pearson.method
u'pearson'
>>> pearson.nullHypothesis
u'observed follows the same distribution as expected.'
>>> observed = Vectors.dense([21, 38, 43, 80])
>>> expected = Vectors.dense([3, 5, 7, 20])
>>> pearson = Statistics.chiSqTest(observed, expected)
>>> print(round(pearson.pValue, 4))
0.0027
>>> data = [40.0, 24.0, 29.0, 56.0, 32.0, 42.0, 31.0, 10.0, 0.0, 30.0, 15.0, 12.0]
>>> chi = Statistics.chiSqTest(Matrices.dense(3, 4, data))
>>> print(round(chi.statistic, 4))
21.9958
>>> data = [LabeledPoint(0.0, Vectors.dense([0.5, 10.0])),
... LabeledPoint(0.0, Vectors.dense([1.5, 20.0])),
... LabeledPoint(1.0, Vectors.dense([1.5, 30.0])),
... LabeledPoint(0.0, Vectors.dense([3.5, 30.0])),
... LabeledPoint(0.0, Vectors.dense([3.5, 40.0])),
... LabeledPoint(1.0, Vectors.dense([3.5, 40.0])),]
>>> rdd = sc.parallelize(data, 4)
>>> chi = Statistics.chiSqTest(rdd)
>>> print(chi[0].statistic)
0.75
>>> print(chi[1].statistic)
1.5
"""
if isinstance(observed, RDD):
if not isinstance(observed.first(), LabeledPoint):
raise ValueError("observed should be an RDD of LabeledPoint")
jmodels = callMLlibFunc("chiSqTest", observed)
return [ChiSqTestResult(m) for m in jmodels]
if isinstance(observed, Matrix):
jmodel = callMLlibFunc("chiSqTest", observed)
else:
if expected and len(expected) != len(observed):
raise ValueError("`expected` should have same length with `observed`")
jmodel = callMLlibFunc("chiSqTest", _convert_to_vector(observed), expected)
return ChiSqTestResult(jmodel)
@staticmethod
@ignore_unicode_prefix
def kolmogorovSmirnovTest(data, distName="norm", *params):
"""
Performs the Kolmogorov-Smirnov (KS) test for data sampled from
a continuous distribution. It tests the null hypothesis that
the data is generated from a particular distribution.
The given data is sorted and the Empirical Cumulative
Distribution Function (ECDF) is calculated
which for a given point is the number of points having a CDF
value lesser than it divided by the total number of points.
Since the data is sorted, this is a step function
that rises by (1 / length of data) for every ordered point.
The KS statistic gives us the maximum distance between the
ECDF and the CDF. Intuitively if this statistic is large, the
probabilty that the null hypothesis is true becomes small.
For specific details of the implementation, please have a look
at the Scala documentation.
:param data: RDD, samples from the data
:param distName: string, currently only "norm" is supported.
(Normal distribution) to calculate the
theoretical distribution of the data.
:param params: additional values which need to be provided for
a certain distribution.
If not provided, the default values are used.
:return: KolmogorovSmirnovTestResult object containing the test
statistic, degrees of freedom, p-value,
the method used, and the null hypothesis.
>>> kstest = Statistics.kolmogorovSmirnovTest
>>> data = sc.parallelize([-1.0, 0.0, 1.0])
>>> ksmodel = kstest(data, "norm")
>>> print(round(ksmodel.pValue, 3))
1.0
>>> print(round(ksmodel.statistic, 3))
0.175
>>> ksmodel.nullHypothesis
u'Sample follows theoretical distribution'
>>> data = sc.parallelize([2.0, 3.0, 4.0])
>>> ksmodel = kstest(data, "norm", 3.0, 1.0)
>>> print(round(ksmodel.pValue, 3))
1.0
>>> print(round(ksmodel.statistic, 3))
0.175
"""
if not isinstance(data, RDD):
raise TypeError("data should be an RDD, got %s." % type(data))
if not isinstance(distName, basestring):
raise TypeError("distName should be a string, got %s." % type(distName))
params = [float(param) for param in params]
return KolmogorovSmirnovTestResult(
callMLlibFunc("kolmogorovSmirnovTest", data, distName, params))
def _test():
import doctest
from pyspark.sql import SparkSession
globs = globals().copy()
spark = SparkSession.builder\
.master("local[4]")\
.appName("mllib.stat.statistics tests")\
.getOrCreate()
globs['sc'] = spark.sparkContext
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
spark.stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 | 1,669,562,013,303,584,300 | 40.821875 | 95 | 0.583157 | false | 4.06376 | true | false | false |
googleapis/googleapis-gen | google/ads/googleads/v6/googleads-py/google/ads/googleads/v6/enums/types/lead_form_desired_intent.py | 1 | 1209 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package='google.ads.googleads.v6.enums',
marshal='google.ads.googleads.v6',
manifest={
'LeadFormDesiredIntentEnum',
},
)
class LeadFormDesiredIntentEnum(proto.Message):
r"""Describes the desired level of intent of generated leads. """
class LeadFormDesiredIntent(proto.Enum):
r"""Enum describing the desired level of intent of generated
leads.
"""
UNSPECIFIED = 0
UNKNOWN = 1
LOW_INTENT = 2
HIGH_INTENT = 3
__all__ = tuple(sorted(__protobuf__.manifest))
| apache-2.0 | -8,953,890,120,698,834,000 | 29.225 | 74 | 0.687345 | false | 3.88746 | false | false | false |
praekelt/jmbo-twitter | jmbo_twitter/models.py | 1 | 5204 | import datetime, twitter
from urllib2 import URLError
import logging
from django.db import models
from django.core.cache import cache
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
from jmbo.models import ModelBase
logger = logging.getLogger('django')
class Status(ModelBase):
"""Purely a wrapper that allows us to use jmbo-foundry's listings for
tweets."""
def __init__(self, status):
# Copy attributes over
attrs = ('contributors', 'coordinates', 'created_at', \
'created_at_in_seconds', 'favorited', 'geo', 'hashtags', 'id', \
'in_reply_to_screen_name', 'in_reply_to_status_id', \
'in_reply_to_user_id', 'location', 'now', 'place', \
'relative_created_at', 'retweet_count', 'retweeted', \
'retweeted_status', 'source', 'text', 'truncated', 'urls', 'user', \
'user_mentions', 'created_at_datetime')
for attr in attrs:
setattr(self, attr, getattr(status, attr))
@property
def as_leaf_class(self):
return self
def save(self):
raise NotImplemented
class StatusMixin(object):
def get_statuses(self, api):
raise NotImplemented
def fetch(self, force=False):
klass_name = self.__class__.__name__
cache_key = 'jmbo_twitter_%s_%s' % (klass_name, self.id)
cached = cache.get(cache_key, None)
if (cached is not None) and not force:
return cached
# Get and check settings
di = getattr(settings, 'JMBO_TWITTER', {})
ck = di.get('consumer_key')
cs = di.get('consumer_secret')
atk = di.get('access_token_key')
ats = di.get('access_token_secret')
if not all([ck, cs, atk, ats]):
logger.error(
'jmbo_twitter.models.%s.fetch - incomplete settings' \
% klass_name
)
return []
# Query twitter taking care to handle network errors
api = twitter.Api(
consumer_key=ck, consumer_secret=cs, access_token_key=atk,
access_token_secret=ats, requests_timeout=10
)
try:
statuses = self.get_statuses(api)
except (URLError, ValueError, twitter.TwitterError):
statuses = []
except Exception, e:
# All manner of things can go wrong with integration
logger.error(
'jmbo_twitter.models.%s.fetch - %s' % (klass_name, e.message)
)
statuses = []
for status in statuses:
status.created_at_datetime = datetime.datetime.fromtimestamp(
status.created_at_in_seconds
)
if statuses:
# Only set if there are statuses. Twitter may randomly throttle us
# and destroy our cache without this check. Cache for a long time
# incase Twitter goes down.
cache.set(cache_key, statuses, 86400)
# Legacy return
return statuses
@property
def fetched(self):
klass_name = self.__class__.__name__
cache_key = 'jmbo_twitter_%s_%s' % (klass_name, self.id)
return cache.get(cache_key, [])
@property
def tweets(self):
class MyList(list):
"""Slightly emulate QuerySet API so jmbo-foundry listings work"""
@property
def exists(self):
return len(self) > 0
result = []
for status in self.fetched:
result.append(Status(status))
return MyList(result)
class Feed(ModelBase, StatusMixin):
"""A feed represents a twitter user account"""
name = models.CharField(
max_length=255,
unique=True,
help_text="A twitter account name, eg. johnsmith"
)
profile_image_url = models.CharField(
null=True, editable=False, max_length=255
)
twitter_id = models.CharField(max_length=255, default='', editable=False)
def get_statuses(self, api):
# Fall back to slug for historical reasons
statuses = api.GetUserTimeline(
screen_name=self.name or self.slug, include_rts=True
)
return statuses
def fetch(self, force=False):
statuses = super(Feed, self).fetch(force=force)
if statuses:
# This is a convenient place to set the feed image url
status = statuses[0]
changed = False
if status.user.profile_image_url != self.profile_image_url:
self.profile_image_url = status.user.profile_image_url
changed = True
if status.user.name != self.title:
self.title = status.user.name
changed = True
if changed:
self.save()
return statuses
class Search(ModelBase, StatusMixin):
"""A search represents a twitter keyword search"""
criteria = models.CharField(
max_length=255,
unique=True,
help_text="Search string or a hashtag"
)
class Meta:
verbose_name_plural = _("Searches")
def get_statuses(self, api):
return api.GetSearch(self.criteria)
| bsd-3-clause | 148,431,581,819,713,100 | 30.349398 | 80 | 0.580515 | false | 4.071987 | false | false | false |
ronas/PythonGNF | Fabulao/PedidosCapa.py | 1 | 3001 | # -*- coding: latin -*-
import sys
#from PyQt5 import QtGui, QtCore, QtWidgets #, QTableWidget, QTableWidgetItem
from PyQt5.QtWidgets import QApplication, QWidget, QTableWidget, QTableWidgetItem, QLineEdit, QLabel
from PyQt5.QtCore import QSize, Qt
import pymysql
config = {
'host': 'localhost',
'port': 3306,
'database': 'LojaDB',
'user': 'root',
'password' : 'fbl1978'
}
class ClasseAPP(QWidget):
def __init__(self):
super(ClasseAPP, self).__init__()
self.initUI()
def initUI(self):
self.setWindowTitle('Pedidos')
self.resize(850, 400)
self.move(300, 200)
self.tabela = QTableWidget(3,5,self)
self.tabela.setGeometry(20,20,760,300)
self.tabela.setHorizontalHeaderLabels(('Numero Pedido','Data','Codigo Cliente','Telefone','Cond Pagamento'))
self.dbBuscarPedidos()
self.lblNumeroPedido = QLabel('Numero Pedido',self)
self.lblNumeroPedido.setGeometry(20,330,130,25)
self.lblData = QLabel('Data',self)
self.lblData.setGeometry(100.360,50,25)
#self.lblCodigoCliente = QLabel('Codigo Cliente',self)
#self.lblCodigoCliente.setGeometry()
#self.lblTelefone = QLabel('Telefone',self)
#self.lblTelefone.setGeometry()
#self.lblCondPagamento = QLabel('Cond Pagamento',self)
#self.lblCondPagamento.setGeometry()
self.txtNumeroPedido = QLineEdit(self)
self.txtNumeroPedido.setGeometry(130,330,130,25)
self.txtData = QLineEdit(self)
self.txtData.setGeometry(130,360,50,25)
#self.txtCodigoCliente = QLineEdit(self)
#self.txtCOdigoCliente.setGeometry()
#self.txtTelefone = QLineEdit(self)
#self.txtTelefone.setGeometry()
#self.txtCondPagamento = QLineEdit(self)
#self.txtCondPagamento.setGeometry()
self.tabela.resizeColumnsToContents()
self.show()
def dbBuscarPedidos(self):
db = pymysql.connect(**config)
cursor = db.cursor()
comando = ('select * from LojaDB.Pedidos ')
cursor.execute(comando )
self.tabela.setRowCount(0)
registros = cursor.fetchall()
for registro in registros:
numerolinhas = self.tabela.rowCount()
self.tabela.insertRow(numerolinhas)
self.tabela.setItem(numerolinhas, 0, QTableWidgetItem( str(registro[0]) ))
self.tabela.setItem(numerolinhas, 1, QTableWidgetItem( str(registro[1]) ))
self.tabela.setItem(numerolinhas, 2, QTableWidgetItem( registro[2] ))
self.tabela.setItem(numerolinhas, 3, QTableWidgetItem( str(registro[3]) ))
self.tabela.setItem(numerolinhas, 4, QTableWidgetItem( registro[4] ))
cursor.close()
db.close()
def main():
app = QApplication(sys.argv)
MeuApp = ClasseAPP()
sys.exit(app.exec_())
if __name__ == '__main__':
main() | gpl-3.0 | -1,464,602,394,802,016,300 | 29.632653 | 116 | 0.625125 | false | 3.286966 | false | false | false |
caronc/newsreap | newsreap/Logging.py | 1 | 6395 | # -*- coding: utf-8 -*-
#
# Common Logging Parameters and Defaults
#
# Copyright (C) 2015-2017 Chris Caron <[email protected]>
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
# The first part of the file defines all of the namespacing
# used by this application
import sys
import logging
# We intentionally import this module so it preconfigures it's logging
# From there we can choose to manipulate it later without worrying about
# it's configuration over-riding ours; This creates a lint warning
# that we're importing a module we're not using; but this is intended.
# do not comment out or remove this entry
import sqlalchemy
# The default logger identifier used for general logging
NEWSREAP_LOGGER = 'newsreap'
# The default logger which displays backend engine and
# NNTP Server Interaction
NEWSREAP_ENGINE = '%s.engine' % NEWSREAP_LOGGER
# Codec Manipulation such as yEnc, uuencoded, etc
NEWSREAP_CODEC = '%s.codec' % NEWSREAP_LOGGER
# Users should utilize this for their hook logging
NEWSREAP_HOOKS = '%s.hooks' % NEWSREAP_LOGGER
# Command Line Interface Logger
NEWSREAP_CLI = '%s.cli' % NEWSREAP_LOGGER
# For a common reference point, we include the static logging
# Resource at the time for this information was:
# - http://docs.sqlalchemy.org/en/rel_1_0/core/engines.html#dbengine-logging
#
# namespaces used by SQLAlchemy
SQLALCHEMY_LOGGER = 'sqlalchemy'
# Defines the logger for the SQLAlchemy Engine
SQLALCHEMY_ENGINE = '%s.engine' % SQLALCHEMY_LOGGER
# Controls SQLAlchemy's connection pool logging.
SQLALCHEMY_POOL = '%s.pool' % SQLALCHEMY_LOGGER
# Controls SQLAlchemy's various Object Relational Mapping (ORM) logging.
SQLALCHEMY_ORM = '%s.orm' % SQLALCHEMY_LOGGER
# The number of bytes reached before automatically rotating the log file
# if this option was specified
# 5000000 bytes == 5 Megabytes
LOG_ROTATE_FILESIZE_BYTES = 5000000
def add_handler(logger, sendto=True, backupCount=5):
"""
Add handler to idenfied logger
sendto == None then logging is disabled
sendto == True then logging is put to stdout
sendto == False then logging is put to stderr
sendto == <string> then logging is routed to the filename specified
if sendto is a <string>, then backupCount defines the number of logs
to keep around. Set this to 0 or None if you don't wish the python
logger to backupCount the files ever. By default logs are rotated
once they reach 5MB
"""
if sendto is True:
# redirect to stdout
handler = logging.StreamHandler(sys.stdout)
elif sendto is False:
# redirect to stderr
handler = logging.StreamHandler(sys.stderr)
elif sendto is None:
# redirect to null
try:
handler = logging.NullHandler()
except AttributeError:
# Python <= v2.6
class NullHandler(logging.Handler):
def emit(self, record):
pass
handler = NullHandler()
# Set data to NOTSET just to eliminate the
# extra checks done internally
if logger.level != logging.NOTSET:
logger.setLevel(logging.NOTSET)
elif isinstance(sendto, basestring):
if backupCount is None:
handler = logging.FileHandler(filename=sendto)
elif isinstance(backupCount, int):
handler = logging.RotatingFileHandler(
filename=sendto,
maxBytes=LOG_ROTATE_FILESIZE_BYTES,
backupCount=backupCount,
)
else:
# We failed to add a handler
return False
# Setup Log Format
handler.setFormatter(logging.Formatter(
'%(asctime)s %(levelname)s %(name)s %(message)s'))
# Add Handler
logger.addHandler(handler)
return True
def init(verbose=2, sendto=True, backupCount=5):
"""
Set's up some simple default handling to make it
easier for those wrapping this library.
You do not need to call this function if you
don't wnat to; ideally one might want to set up
things their own way.
"""
# Add our handlers at the parent level
add_handler(
logging.getLogger(SQLALCHEMY_LOGGER),
sendto=True,
backupCount=backupCount,
)
add_handler(
logging.getLogger(NEWSREAP_LOGGER),
sendto=True,
backupCount=backupCount,
)
if verbose:
set_verbosity(verbose=verbose)
def set_verbosity(verbose):
"""
A simple function one can use to set the verbosity of
the app.
"""
# Default
logging.getLogger(SQLALCHEMY_LOGGER).setLevel(logging.ERROR)
logging.getLogger(SQLALCHEMY_ENGINE).setLevel(logging.ERROR)
logging.getLogger(NEWSREAP_LOGGER).setLevel(logging.ERROR)
logging.getLogger(NEWSREAP_CLI).setLevel(logging.ERROR)
logging.getLogger(NEWSREAP_CODEC).setLevel(logging.ERROR)
logging.getLogger(NEWSREAP_HOOKS).setLevel(logging.ERROR)
logging.getLogger(NEWSREAP_ENGINE).setLevel(logging.ERROR)
# Handle Verbosity
if verbose > 0:
logging.getLogger(NEWSREAP_CLI).setLevel(logging.INFO)
logging.getLogger(NEWSREAP_HOOKS).setLevel(logging.INFO)
logging.getLogger(NEWSREAP_ENGINE).setLevel(logging.INFO)
if verbose > 1:
logging.getLogger(NEWSREAP_CLI).setLevel(logging.DEBUG)
logging.getLogger(NEWSREAP_HOOKS).setLevel(logging.DEBUG)
logging.getLogger(NEWSREAP_ENGINE).setLevel(logging.DEBUG)
if verbose > 2:
logging.getLogger(SQLALCHEMY_ENGINE).setLevel(logging.INFO)
logging.getLogger(NEWSREAP_CODEC).setLevel(logging.INFO)
if verbose > 3:
logging.getLogger(NEWSREAP_CODEC).setLevel(logging.DEBUG)
if verbose > 4:
logging.getLogger(SQLALCHEMY_ENGINE).setLevel(logging.DEBUG)
# set initial level to WARN.
rootlogger = logging.getLogger(NEWSREAP_LOGGER)
if rootlogger.level == logging.NOTSET:
set_verbosity(-1)
| gpl-3.0 | 3,607,045,367,080,225,300 | 31.794872 | 76 | 0.696638 | false | 3.89227 | false | false | false |
Mirantis/octane | octane/commands/sync_images.py | 1 | 2791 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import tempfile
from cliff import command as cmd
from fuelclient.objects import environment as environment_obj
from octane.helpers.sync_glance_images import sync_glance_images
from octane.util import db
from octane.util import env as env_util
from octane.util import ssh
def prepare(orig_id, seed_id):
orig_env = environment_obj.Environment(orig_id)
seed_env = environment_obj.Environment(seed_id)
controller = env_util.get_one_controller(seed_env)
with tempfile.NamedTemporaryFile() as temp:
db.mysqldump_from_env(orig_env, ['keystone'], temp.name)
db.mysqldump_restore_to_env(seed_env, temp.name)
ssh.call(['keystone-manage', 'db_sync'],
node=controller, parse_levels=True)
for controller in env_util.get_controllers(seed_env):
ssh.call(['service', 'memcached', 'restart'], node=controller)
class SyncImagesCommand(cmd.Command):
"""Sync glance images between ORIG and SEED environments"""
def get_parser(self, prog_name):
parser = super(SyncImagesCommand, self).get_parser(prog_name)
parser.add_argument(
'orig_id', type=int, metavar='ORIG_ID',
help="ID of original environment")
parser.add_argument(
'seed_id', type=int, metavar='SEED_ID',
help="ID of seed environment")
parser.add_argument(
'swift_ep', type=str,
help="Endpoint's name where swift-proxy service is listening on")
return parser
def take_action(self, parsed_args):
sync_glance_images(parsed_args.orig_id, parsed_args.seed_id,
parsed_args.swift_ep)
class SyncImagesPrepareCommand(cmd.Command):
"""Sync glance images between ORIG and SEED environments"""
def get_parser(self, prog_name):
parser = super(SyncImagesPrepareCommand, self).get_parser(prog_name)
parser.add_argument(
'orig_id', type=int, metavar='ORIG_ID',
help="ID of original environment")
parser.add_argument(
'seed_id', type=int, metavar='SEED_ID',
help="ID of seed environment")
return parser
def take_action(self, parsed_args):
prepare(parsed_args.orig_id, parsed_args.seed_id)
| apache-2.0 | -4,300,452,264,042,446,300 | 36.716216 | 77 | 0.677893 | false | 3.860304 | false | false | false |
Wolnosciowiec/file-repository | client/bahub/bahubapp/handler/__init__.py | 1 | 4733 | from ..entity.definition import BackupDefinition
from ..service.client import FileRepositoryClient
from ..service.pipefactory import PipeFactory
from ..exceptions import ReadWriteException
from ..result import CommandExecutionResult
from logging import Logger
import string
import random
import subprocess
from shutil import copyfileobj
class BackupHandler:
""" Manages the process of backup and restore, interacts with different sources of backup data using adapters """
_client = None # type: FileRepositoryClient
_pipe_factory = None # type: PipeFactory
_logger = None # type: Logger
_definition = None
def __init__(self,
_client: FileRepositoryClient,
_pipe_factory: PipeFactory,
_logger: Logger,
_definition: BackupDefinition):
self._client = _client
self._pipe_factory = _pipe_factory
self._logger = _logger
self._definition = _definition
def perform_backup(self):
self._validate()
self._validate_running_command()
response = self._read()
if response.return_code != 0 and response.return_code is not None:
raise ReadWriteException('Backup source read error, use --debug and retry to investigate')
upload_response = self._client.send(response.stdout, self._get_definition())
response.process.wait(15)
response.stdout.close()
return upload_response
def perform_restore(self, version: str):
response = self._write(
self._read_from_storage(version)
)
response.process.wait()
self._logger.info('Waiting for process to finish')
if response.return_code is not None and response.return_code > 0:
raise ReadWriteException('Cannot write files to disk while restoring from backup. Errors: '
+ str(response.stderr.read().decode('utf-8')))
self._logger.info('No errors found, sending success information')
return '{"status": "OK"}'
def close(self):
self._logger.info('Finishing the process')
self._close()
def _get_definition(self) -> BackupDefinition:
return self._definition
def _execute_command(self, command: str, stdin=None) -> CommandExecutionResult:
"""
Executes a command on local machine, returning stdout as a stream, and streaming in the stdin (optionally)
"""
self._logger.debug('shell(' + command + ')')
process = subprocess.Popen(command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE if stdin else None,
executable='/bin/bash',
shell=True)
if stdin:
self._logger.info('Copying stdin to process')
try:
copyfileobj(stdin, process.stdin)
except BrokenPipeError:
raise ReadWriteException(
'Cannot write to process, broken pipe occurred, probably a tar process died. '
+ str(process.stdin.read()) + str(process.stderr.read())
)
process.stdin.close()
return CommandExecutionResult(process.stdout, process.stderr, process.returncode, process)
def _validate_running_command(self):
""" Validate if the command really exports the data, does not end up with an error """
response = self._read()
response.stdout.read(1024)
response.process.kill()
response.process.wait(15)
if response.process.returncode > 0:
raise ReadWriteException(
'The process exited with incorrect code, try to verify the command in with --debug switch'
)
def _validate(self):
raise Exception('_validate() not implemented for handler')
def _read(self) -> CommandExecutionResult:
""" TAR output or file stream buffered from ANY source for example """
raise Exception('_read() not implemented for handler')
def _write(self, stream) -> CommandExecutionResult:
""" A file stream or tar output be written into the storage. May be OpenSSL encoded, depends on definition """
raise Exception('_write() not implemented for handler')
def _read_from_storage(self, version: str):
return self._client.fetch(version, self._get_definition())
def _close(self):
pass
@staticmethod
def generate_id(size=6, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
| lgpl-3.0 | -7,351,242,393,543,664,000 | 35.129771 | 118 | 0.616734 | false | 4.761569 | false | false | false |
aerokappa/SantaClaus | handCodedOptimum_v4.py | 1 | 2216 | import numpy as np
import pandas as pd
from processInput import processInput
def handCodedOptimum_v4 ( ):
fileName = 'gifts.csv'
giftList, giftListSummary = processInput( fileName )
packedBags = []
for i in np.arange(1000):
print i
currentBag = []
if (i< 333):
itemCount = np.array([0 ,3 ,0 ,0 ,0 ,0 ,0 ,3 ,0])
elif ((i>=333) & (i<458)):
itemCount = np.array([8, 0, 0, 0, 0, 0, 0, 0, 0])
elif ((i>=458) & (i<583)):
itemCount = np.array([0, 0, 0, 0, 0, 0, 8, 0, 0])
elif ((i>=583) & (i<916)):
itemCount = np.array([0, 0, 0, 3, 0, 2, 0, 0, 0])
elif ((i>=916) & (i<924)):
itemCount = np.array([ 0, 0, 0, 0, 0, 0, 0, 0, 25])
elif ((i>=924) & (i<928)):
itemCount = np.array([ 0, 23, 0, 0, 0, 0, 0, 0, 0])
elif ((i>=928) & (i<938)):
itemCount = np.array([ 0, 0, 0, 0, 0, 19, 0, 0, 0])
elif ((i>=938) & (i<939)):
itemCount = np.array([ 0, 0, 0, 0, 0, 11, 0, 1, 0])
elif ((i>=939) & (i<940)):
itemCount = np.array([0, 9, 0, 1, 0, 0, 0, 0, 0])
else:
itemCount = np.array([0, 0, 1, 0, 0, 5, 0, 0, 0])
for i in np.arange(len(itemCount)):
if (itemCount[i] <= giftListSummary['nGiftsNotPacked'][i]):
for j in np.arange(itemCount[i]):
giftName = giftListSummary['GiftType'][i]
currGiftID = giftListSummary['nGiftsPacked'][i]
currentBag.append(giftName+'_'+str(currGiftID))
giftListSummary['nGiftsPacked'][i] += 1
giftListSummary['nGiftsNotPacked'][i] -= 1
packedBags.append(currentBag)
# Write to File 'submission.csv'
subFile = open('submission_5.csv','w')
subFile.write('Gifts\n')
for currentBag in packedBags:
subFile.write(currentBag[0])
for currentItem in currentBag[1:]:
subFile.write(' ')
subFile.write(currentItem)
subFile.write('\n')
subFile.close()
return packedBags | mit | -6,132,476,382,293,829,000 | 35.344262 | 73 | 0.476083 | false | 3.014966 | false | false | false |
Jbonnett/Mutagen-flo | mutagen/ogg.py | 1 | 17770 | # Copyright 2006 Joe Wreschnig
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# $Id: ogg.py 3975 2007-01-13 21:51:17Z piman $
"""Read and write Ogg bitstreams and pages.
This module reads and writes a subset of the Ogg bitstream format
version 0. It does *not* read or write Ogg Vorbis files! For that,
you should use mutagen.oggvorbis.
This implementation is based on the RFC 3533 standard found at
http://www.xiph.org/ogg/doc/rfc3533.txt.
"""
import struct
import sys
import zlib
from cStringIO import StringIO
from mutagen import FileType
from mutagen._util import cdata, insert_bytes, delete_bytes, WrappedFileobj
class error(IOError):
"""Ogg stream parsing errors."""
pass
class OggPage(object):
"""A single Ogg page (not necessarily a single encoded packet).
A page is a header of 26 bytes, followed by the length of the
data, followed by the data.
The constructor is givin a file-like object pointing to the start
of an Ogg page. After the constructor is finished it is pointing
to the start of the next page.
Attributes:
version -- stream structure version (currently always 0)
position -- absolute stream position (default -1)
serial -- logical stream serial number (default 0)
sequence -- page sequence number within logical stream (default 0)
offset -- offset this page was read from (default None)
complete -- if the last packet on this page is complete (default True)
packets -- list of raw packet data (default [])
Note that if 'complete' is false, the next page's 'continued'
property must be true (so set both when constructing pages).
If a file-like object is supplied to the constructor, the above
attributes will be filled in based on it.
"""
version = 0
__type_flags = 0
position = 0L
serial = 0
sequence = 0
offset = None
complete = True
def __init__(self, fileobj=None):
self.packets = []
if fileobj is None:
return
self.offset = fileobj.tell()
header = fileobj.read(27)
if len(header) == 0:
raise EOFError
try:
(oggs, self.version, self.__type_flags, self.position,
self.serial, self.sequence, crc, segments) = struct.unpack(
"<4sBBqIIiB", header)
except struct.error:
raise error("unable to read full header; got %r" % header)
if oggs != "OggS":
raise error("read %r, expected %r, at 0x%x" % (
oggs, "OggS", fileobj.tell() - 27))
if self.version != 0:
raise error("version %r unsupported" % self.version)
total = 0
lacings = []
lacing_bytes = fileobj.read(segments)
if len(lacing_bytes) != segments:
raise error("unable to read %r lacing bytes" % segments)
for c in map(ord, lacing_bytes):
total += c
if c < 255:
lacings.append(total)
total = 0
if total:
lacings.append(total)
self.complete = False
self.packets = map(fileobj.read, lacings)
if map(len, self.packets) != lacings:
raise error("unable to read full data")
def __eq__(self, other):
"""Two Ogg pages are the same if they write the same data."""
try:
return (self.write() == other.write())
except AttributeError:
return False
__hash__ = object.__hash__
def __repr__(self):
attrs = ['version', 'position', 'serial', 'sequence', 'offset',
'complete', 'continued', 'first', 'last']
values = ["%s=%r" % (attr, getattr(self, attr)) for attr in attrs]
return "<%s %s, %d bytes in %d packets>" % (
type(self).__name__, " ".join(values), sum(map(len, self.packets)),
len(self.packets))
def write(self):
"""Return a string encoding of the page header and data.
A ValueError is raised if the data is too big to fit in a
single page.
"""
data = [
struct.pack("<4sBBqIIi", "OggS", self.version, self.__type_flags,
self.position, self.serial, self.sequence, 0)
]
lacing_data = []
for datum in self.packets:
quot, rem = divmod(len(datum), 255)
lacing_data.append("\xff" * quot + chr(rem))
lacing_data = "".join(lacing_data)
if not self.complete and lacing_data.endswith("\x00"):
lacing_data = lacing_data[:-1]
data.append(chr(len(lacing_data)))
data.append(lacing_data)
data.extend(self.packets)
data = "".join(data)
# Python's CRC is swapped relative to Ogg's needs.
crc = ~zlib.crc32(data.translate(cdata.bitswap), -1)
# Although we're using to_int_be, this actually makes the CRC
# a proper le integer, since Python's CRC is byteswapped.
crc = cdata.to_int_be(crc).translate(cdata.bitswap)
data = data[:22] + crc + data[26:]
return data
def __size(self):
size = 27 # Initial header size
for datum in self.packets:
quot, rem = divmod(len(datum), 255)
size += quot + 1
if not self.complete and rem == 0:
# Packet contains a multiple of 255 bytes and is not
# terminated, so we don't have a \x00 at the end.
size -= 1
size += sum(map(len, self.packets))
return size
size = property(__size, doc="Total frame size.")
def __set_flag(self, bit, val):
mask = 1 << bit
if val: self.__type_flags |= mask
else: self.__type_flags &= ~mask
continued = property(
lambda self: cdata.test_bit(self.__type_flags, 0),
lambda self, v: self.__set_flag(0, v),
doc="The first packet is continued from the previous page.")
first = property(
lambda self: cdata.test_bit(self.__type_flags, 1),
lambda self, v: self.__set_flag(1, v),
doc="This is the first page of a logical bitstream.")
last = property(
lambda self: cdata.test_bit(self.__type_flags, 2),
lambda self, v: self.__set_flag(2, v),
doc="This is the last page of a logical bitstream.")
def renumber(klass, fileobj, serial, start):
"""Renumber pages belonging to a specified logical stream.
fileobj must be opened with mode r+b or w+b.
Starting at page number 'start', renumber all pages belonging
to logical stream 'serial'. Other pages will be ignored.
fileobj must point to the start of a valid Ogg page; any
occuring after it and part of the specified logical stream
will be numbered. No adjustment will be made to the data in
the pages nor the granule position; only the page number, and
so also the CRC.
If an error occurs (e.g. non-Ogg data is found), fileobj will
be left pointing to the place in the stream the error occured,
but the invalid data will be left intact (since this function
does not change the total file size).
"""
number = start
while True:
try: page = OggPage(fileobj)
except EOFError:
break
else:
if page.serial != serial:
# Wrong stream, skip this page.
continue
# Changing the number can't change the page size,
# so seeking back based on the current size is safe.
fileobj.seek(-page.size, 1)
page.sequence = number
fileobj.write(page.write())
fileobj.seek(page.offset + page.size, 0)
number += 1
renumber = classmethod(renumber)
def to_packets(klass, pages, strict=False):
"""Construct a list of packet data from a list of Ogg pages.
If strict is true, the first page must start a new packet,
and the last page must end the last packet.
"""
serial = pages[0].serial
sequence = pages[0].sequence
packets = []
if strict:
if pages[0].continued:
raise ValueError("first packet is continued")
if not pages[-1].complete:
raise ValueError("last packet does not complete")
elif pages and pages[0].continued:
packets.append("")
for page in pages:
if serial != page.serial:
raise ValueError("invalid serial number in %r" % page)
elif sequence != page.sequence:
raise ValueError("bad sequence number in %r" % page)
else: sequence += 1
if page.continued: packets[-1] += page.packets[0]
else: packets.append(page.packets[0])
packets.extend(page.packets[1:])
return packets
to_packets = classmethod(to_packets)
def from_packets(klass, packets, sequence=0,
default_size=4096, wiggle_room=2048):
"""Construct a list of Ogg pages from a list of packet data.
The algorithm will generate pages of approximately
default_size in size (rounded down to the nearest multiple of
255). However, it will also allow pages to increase to
approximately default_size + wiggle_room if allowing the
wiggle room would finish a packet (only one packet will be
finished in this way per page; if the next packet would fit
into the wiggle room, it still starts on a new page).
This method reduces packet fragmentation when packet sizes are
slightly larger than the default page size, while still
ensuring most pages are of the average size.
Pages are numbered started at 'sequence'; other information is
uninitialized.
"""
chunk_size = (default_size // 255) * 255
pages = []
page = OggPage()
page.sequence = sequence
for packet in packets:
page.packets.append("")
while packet:
data, packet = packet[:chunk_size], packet[chunk_size:]
if page.size < default_size and len(page.packets) < 255:
page.packets[-1] += data
else:
# If we've put any packet data into this page yet,
# we need to mark it incomplete. However, we can
# also have just started this packet on an already
# full page, in which case, just start the new
# page with this packet.
if page.packets[-1]:
page.complete = False
if len(page.packets) == 1:
page.position = -1L
else:
page.packets.pop(-1)
pages.append(page)
page = OggPage()
page.continued = not pages[-1].complete
page.sequence = pages[-1].sequence + 1
page.packets.append(data)
if len(packet) < wiggle_room:
page.packets[-1] += packet
packet = ""
if page.packets:
pages.append(page)
return pages
from_packets = classmethod(from_packets)
def replace(klass, fileobj, old_pages, new_pages):
"""Replace old_pages with new_pages within fileobj.
old_pages must have come from reading fileobj originally.
new_pages are assumed to have the 'same' data as old_pages,
and so the serial and sequence numbers will be copied, as will
the flags for the first and last pages.
fileobj will be resized and pages renumbered as necessary. As
such, it must be opened r+b or w+b.
"""
# Number the new pages starting from the first old page.
first = old_pages[0].sequence
for page, seq in zip(new_pages, range(first, first + len(new_pages))):
page.sequence = seq
page.serial = old_pages[0].serial
new_pages[0].first = old_pages[0].first
new_pages[0].last = old_pages[0].last
new_pages[0].continued = old_pages[0].continued
new_pages[-1].first = old_pages[-1].first
new_pages[-1].last = old_pages[-1].last
new_pages[-1].complete = old_pages[-1].complete
if not new_pages[-1].complete and len(new_pages[-1].packets) == 1:
new_pages[-1].position = -1L
new_data = "".join(map(klass.write, new_pages))
# Make room in the file for the new data.
delta = len(new_data)
fileobj.seek(old_pages[0].offset, 0)
insert_bytes(fileobj, delta, old_pages[0].offset)
fileobj.seek(old_pages[0].offset, 0)
fileobj.write(new_data)
new_data_end = old_pages[0].offset + delta
# Go through the old pages and delete them. Since we shifted
# the data down the file, we need to adjust their offsets. We
# also need to go backwards, so we don't adjust the deltas of
# the other pages.
old_pages.reverse()
for old_page in old_pages:
adj_offset = old_page.offset + delta
delete_bytes(fileobj, old_page.size, adj_offset)
# Finally, if there's any discrepency in length, we need to
# renumber the pages for the logical stream.
if len(old_pages) != len(new_pages):
fileobj.seek(new_data_end, 0)
serial = new_pages[-1].serial
sequence = new_pages[-1].sequence + 1
klass.renumber(fileobj, serial, sequence)
replace = classmethod(replace)
def find_last(klass, fileobj, serial):
"""Find the last page of the stream 'serial'.
If the file is not multiplexed this function is fast. If it is,
it must read the whole the stream.
This finds the last page in the actual file object, or the last
page in the stream (with eos set), whichever comes first.
"""
# For non-muxed streams, look at the last page.
try: fileobj.seek(-256*256, 2)
except IOError:
# The file is less than 64k in length.
fileobj.seek(0)
data = fileobj.read()
try: index = data.rindex("OggS")
except ValueError:
raise error("unable to find final Ogg header")
stringobj = StringIO(data[index:])
best_page = None
try:
page = OggPage(stringobj)
except error:
pass
else:
if page.serial == serial:
if page.last: return page
else: best_page = page
else: best_page = None
# The stream is muxed, so use the slow way.
fileobj.seek(0)
try:
page = OggPage(fileobj)
while not page.last:
page = OggPage(fileobj)
while page.serial != serial:
page = OggPage(fileobj)
best_page = page
return page
except error:
return best_page
except EOFError:
return best_page
find_last = classmethod(find_last)
class OggFileType(FileType):
"""An generic Ogg file."""
_Info = None
_Tags = None
_Error = None
_mimes = ["application/ogg", "application/x-ogg"]
def load(self, filename):
"""Load file information from a filename."""
self.filename = filename
fileobj = WrappedFileobj(filename, "rb")
try:
try:
self.info = self._Info(fileobj)
self.tags = self._Tags(fileobj, self.info)
if self.info.length:
# The streaminfo gave us real length information,
# don't waste time scanning the Ogg.
return
last_page = OggPage.find_last(fileobj, self.info.serial)
samples = last_page.position
try:
denom = self.info.sample_rate
except AttributeError:
denom = self.info.fps
self.info.length = samples / float(denom)
except error, e:
raise self._Error, e, sys.exc_info()[2]
except EOFError:
raise self._Error, "no appropriate stream found"
finally:
fileobj.close()
def delete(self, filename=None):
"""Remove tags from a file.
If no filename is given, the one most recently loaded is used.
"""
if filename is None:
filename = self.filename
self.tags.clear()
fileobj = WrappedFileobj(filename, "rb+")
try:
try: self.tags._inject(fileobj)
except error, e:
raise self._Error, e, sys.exc_info()[2]
except EOFError:
raise self._Error, "no appropriate stream found"
finally:
fileobj.close()
def save(self, filename=None):
"""Save a tag to a file.
If no filename is given, the one most recently loaded is used.
"""
if filename is None:
filename = self.filename
fileobj = WrappedFileobj(filename, "rb+")
try:
try: self.tags._inject(fileobj)
except error, e:
raise self._Error, e, sys.exc_info()[2]
except EOFError:
raise self._Error, "no appropriate stream found"
finally:
fileobj.close()
| gpl-2.0 | -2,453,146,239,836,660,000 | 34.54 | 79 | 0.570568 | false | 4.24105 | false | false | false |
arcticfoxnv/slackminion | slackminion/plugin/base.py | 1 | 4847 | from six import string_types
from builtins import object
import logging
import threading
from slackminion.slack import SlackChannel, SlackIM, SlackUser, SlackRoom
class BasePlugin(object):
def __init__(self, bot, **kwargs):
self.log = logging.getLogger(type(self).__name__)
self._bot = bot
self._dont_save = False # By default, we want to save a plugin's state during save_state()
self._state_handler = False # State storage backends should set this to true
self._timer_callbacks = {}
self.config = {}
if 'config' in kwargs:
self.config = kwargs['config']
def on_load(self):
"""
Executes when a plugin is loaded.
Override this if your plugin needs to do initialization when loading.
Do not use this to restore runtime changes to variables -- they will be overwritten later on by
PluginManager.load_state()
"""
return True
def on_unload(self):
"""
Executes when a plugin is unloaded.
Override this if your plugin needs to do cleanup when unloading.
"""
return True
def on_connect(self):
"""
Executes immediately after connecting to slack.
Will not fire on reconnects.
"""
return True
def send_message(self, channel, text, thread=None, reply_broadcast=False):
"""
Used to send a message to the specified channel.
* channel - can be a channel or user
* text - message to send
* thread - thread to reply in
* reply_broadcast - whether or not to also send the message to the channel
"""
self.log.debug('Sending message to channel {} of type {}'.format(channel, type(channel)))
if isinstance(channel, SlackIM) or isinstance(channel, SlackUser):
self._bot.send_im(channel, text)
elif isinstance(channel, SlackRoom):
self._bot.send_message(channel, text, thread, reply_broadcast)
elif isinstance(channel, string_types):
if channel[0] == '@':
self._bot.send_im(channel[1:], text)
elif channel[0] == '#':
self._bot.send_message(channel[1:], text, thread, reply_broadcast)
else:
self._bot.send_message(channel, text, thread, reply_broadcast)
else:
self._bot.send_message(channel, text, thread, reply_broadcast)
def start_timer(self, duration, func, *args):
"""
Schedules a function to be called after some period of time.
* duration - time in seconds to wait before firing
* func - function to be called
* args - arguments to pass to the function
"""
self.log.info("Scheduling call to %s in %ds: %s", func.__name__, duration, args)
if self._bot.runnable:
t = threading.Timer(duration, self._timer_callback, (func, args))
self._timer_callbacks[func] = t
self._bot.timers.append(t)
t.start()
self.log.info("Scheduled call to %s in %ds", func.__name__, duration)
else:
self.log.warning("Not scheduling call to %s in %ds because we're shutting down.", func.__name__, duration)
def stop_timer(self, func):
"""
Stops a timer if it hasn't fired yet
* func - the function passed in start_timer
"""
self.log.debug('Stopping timer {}'.format(func.__name__))
if func in self._timer_callbacks:
t = self._timer_callbacks[func]
self._bot.timers.remove(t)
t.cancel()
del self._timer_callbacks[func]
def _timer_callback(self, func, args):
self.log.debug('Executing timer function {}'.format(func.__name__))
try:
func(*args)
except Exception:
self.log.exception("Caught exception executing timer function: {}".format(func.__name__))
def get_user(self, username):
"""
Utility function to query slack for a particular user
:param username: The username of the user to lookup
:return: SlackUser object or None
"""
if hasattr(self._bot, 'user_manager'):
user = self._bot.user_manager.get_by_username(username)
if user:
return user
user = SlackUser.get_user(self._bot.sc, username)
self._bot.user_manager.set(user)
return user
return SlackUser.get_user(self._bot.sc, username)
def get_channel(self, channel):
"""
Utility function to query slack for a particular channel
:param channel: The channel name or id of the channel to lookup
:return: SlackChannel object or None
"""
return SlackChannel.get_channel(self._bot.sc, channel)
| mit | -3,488,926,449,185,535,000 | 36 | 118 | 0.595007 | false | 4.27425 | false | false | false |
linxdcn/iS3 | IS3Py/is3.py | 2 | 7512 | # Copyright (C) 2015 iS3 Software Foundation
# Author: Xiaojun Li
# Contact: [email protected]
import sys
import clr
import System
# Load System.Windows.Media in PresentationCore.dll
sys.path.append('C:\\Program Files (x86)\\Reference Assemblies\\Microsoft\\Framework\\.NETFramework\\v4.5')
prcore = clr.LoadAssemblyFromFile('PresentationCore.dll')
clr.AddReference(prcore)
# Import classes in System
from System import Func,Action
from System.Windows.Media import Colors
from System.Collections.ObjectModel import ObservableCollection
from System.Threading.Tasks import Task
# Load IS3 namespaces
iS3Core = clr.LoadAssemblyFromFile('IS3.Core.dll')
clr.AddReference(iS3Core)
# Import classes in IS3
from IS3.Core import (Globals, Runtime, ErrorReport, ErrorReportTarget,
DGObject, DGObjects,
ProjectDefinition, Project,
EngineeringMap, EngineeringMapType, DrawShapeType,
IView, LayerDef, Domain, DomainType, ToolTreeItem)
from IS3.Core.Geometry import *
from IS3.Core.Graphics import *
def output(text):
print(text)
# Redirect ErrorReport to python cosole
ErrorReport.target = ErrorReportTarget.DelegateConsole
ErrorReport.consoleDelegate = output
# In Windows, UI thread vars and functions are restricted to other threads.
# So, be caution with python calls to functions in UI thread.
# Classes in the main UI thread include: mainframe, view, layer, ...
# Therefore, calling to functions in mainframe, view, layer etc. are restricted.
mainframe = Globals.mainframe # Global var: mainframe
prj = mainframe.prj # Global var: prj
dispatcher = mainframe.Dispatcher # Global var: dispatcher -> UI thread manager
graphicsEngine = Runtime.graphicEngine # Global var: graphics Engine
geometryEngine = Runtime.geometryEngine # Global var: geometry Engine
class MainframeWrapper():
"Define thread safe calls to mainframe methods"
@staticmethod
def addView(emap, canClose = True):
"A thread safe call to -> mainframe.addView(emap, canclose)"
if (Globals.isThreadUnsafe()):
func = Func[EngineeringMap, bool, Task[IView]](mainframe.addView)
view = dispatcher.Invoke(func, emap, canClose)
else:
view = mainframe.addView(emap, canClose)
viewWrapper = ViewWrapper(view.Result)
return viewWrapper
@staticmethod
def loadDomainPanels():
"A thread safe call to -> mainframe.loadDomainPanels()"
if (Globals.isThreadUnsafe()):
dispatcher.Invoke(mainframe.loadDomainPanels)
else:
mainframe.loadDomainPanels()
class ViewWrapper():
"Define thread safe calls to IS3View methods"
def __init__(self, view):
self.view = view
def addLayer(self, layer):
"A thread safe call to -> IS3View.addLayer"
if (Globals.isThreadUnsafe()):
func = Action[IGraphicsLayer](self.view.addLayer)
dispatcher.Invoke(func, layer)
else:
self.view.addLayer(layer)
def addLocalTiledLayer(self, file, id):
"A thread safe call to -> IS3View.addLocalTiledLayer"
if (Globals.isThreadUnsafe()):
func = Action[str, str](self.view.addLocalTiledLayer)
dispatcher.Invoke(func, file, id)
else:
self.view.addLocalTiledLayer(file, id)
def addGdbLayer(self, layerDef, gdbFile, start = 0, maxFeatures = 0):
"A thread safe call to -> IS3View.addGdbLayer"
if (Globals.isThreadUnsafe()):
func = Func[LayerDef, str, int, int, Task[IGraphicsLayer]](self.view.addGdbLayer)
layer = dispatcher.Invoke(func, layerDef, gdbFile, start, maxFeatures)
else:
layer = self.view.addGdbLayer(layerDef, gdbFile, start, maxFeatures)
layerWrapper = GraphicsLayerWrapper(layer.Result)
return layerWrapper
def addShpLayer(self, layerDef, shpFile, start = 0, maxFeatures = 0):
"A thread safe call to -> IS3View.addShpLayer"
if (Globals.isThreadUnsafe()):
func = Func[LayerDef, str, int, int, Task[IGraphicsLayer]](self.view.addShpLayer)
layer = dispatcher.Invoke(func, layerDef, shpFile, start, maxFeatures)
else:
self.view.addShpLayer(layerDef, shpFile, start, maxFeatures)
layerWrapper = GraphicsLayerWrapper(layer.Result)
return layerWrapper
def selectByRect(self):
"A thread safe call to -> IS3View.selectByRect"
if (Globals.isThreadUnsafe()):
dispatcher.Invoke(self.view.selectByRect)
else:
self.view.selectByRect()
class GraphicsLayerWrapper():
"Define thread safe calls to IS3GraphicsLayer methods"
def __init__(self, glayer):
self.layer = glayer
def setRenderer(self, renderer):
"A thread safe call to -> IS3GraphicsLayer.setRenderer"
if (Globals.isThreadUnsafe()):
func = Action[IRenderer](self.layer.setRenderer)
dispatcher.Invoke(func, renderer)
else:
self.layer.setRenderer(renderer)
def addGraphic(self, graphic):
"A thread safe call to -> IS3GraphicsLayer.addGraphic"
if (Globals.isThreadUnsafe()):
func = Action[IGraphic](self.layer.addGraphic)
dispatcher.Invoke(func, graphic)
else:
self.layer.addGraphic(graphic)
def newGraphicsLayer(id, displayName):
layer = graphicsEngine.newGraphicsLayer(id, displayName)
layerWrapper = GraphicsLayerWrapper(layer)
return layerWrapper
def addView3d(id, file):
map3d = EngineeringMap()
map3d.MapID = id
map3d.MapType = EngineeringMapType.Map3D
map3d.LocalMapFileName = file
view3d = MainframeWrapper.addView(map3d, True)
return view3d
def addGdbLayer(viewWrapper, layerDef, gdbFile = None, start = 0, maxFeatures = 0):
prj = Globals.project
layerWrapper = viewWrapper.addGdbLayer(layerDef, gdbFile, start, maxFeatures)
if (layerWrapper.layer == None):
print('addGdbFileELayer failed: ' + layerDef.Name)
return None
else:
print('addGdbFileELayer succeeded: ' + layerDef.Name)
objs = prj.findObjects(layerDef.Name)
if (objs == None):
print('Layer ' + layerDef.Name + ' has no corresponding objects in the project.')
else:
count = layerWrapper.layer.syncObjects(objs)
print('Sync with ' + str(count) + ' objects for layer ' + layerDef.Name)
return layerWrapper
def addGdbLayerLazy(view, name, type, gdbFile = None, start = 0, maxFeatures = 0):
layerDef = LayerDef()
layerDef.Name = name
layerDef.GeometryType = type
layerWrapper = addGdbLayer(view, layerDef, gdbFile, start, maxFeatures)
return layerWrapper
def addShpLayer(viewWrapper, layerDef, shpfile, start = 0, maxFeatures = 0):
prj = Globals.project
layerWrapper = viewWrapper.addShpLayer(layerDef, shpfile, start, maxFeatures)
if (layerWrapper.layer == None):
print('addShpFileELayer failed: ' + layerDef.Name)
return None
else:
print('addShpFileELayer succeeded: ' + layerDef.Name)
objs = prj.findObjects(layerDef.Name)
if (objs == None):
print('Layer ' + layerDef.Name + ' has no corresponding objects in the project.')
else:
count = layerWrapper.layer.syncObjects(objs)
print('Sync with ' + str(count) + ' objects for layer ' + layerDef.Name)
return layerWrapper
| lgpl-3.0 | -7,984,074,763,647,838,000 | 37.523077 | 107 | 0.676384 | false | 3.729891 | false | false | false |
abendig/django-mailchimp | mailchimp/models.py | 1 | 9678 | from django.db import models
import json as simplejson
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes import generic
from django.core.urlresolvers import reverse
from django.shortcuts import get_object_or_404
from django.utils.translation import ugettext_lazy as _
from mailchimp.utils import get_connection
class QueueManager(models.Manager):
def queue(self, campaign_type, contents, list_id, template_id, subject,
from_email, from_name, to_email, folder_id=None, tracking_opens=True,
tracking_html_clicks=True, tracking_text_clicks=False, title=None,
authenticate=False, google_analytics=None, auto_footer=False,
auto_tweet=False, segment_options=False, segment_options_all=True,
segment_options_conditions=[], type_opts={}, obj=None, extra_info=[]):
"""
Queue a campaign
"""
kwargs = locals().copy()
kwargs['segment_options_conditions'] = simplejson.dumps(segment_options_conditions)
kwargs['type_opts'] = simplejson.dumps(type_opts)
kwargs['contents'] = simplejson.dumps(contents)
kwargs['extra_info'] = simplejson.dumps(extra_info)
for thing in ('template_id', 'list_id'):
thingy = kwargs[thing]
if hasattr(thingy, 'id'):
kwargs[thing] = thingy.id
del kwargs['self']
del kwargs['obj']
if obj:
kwargs['object_id'] = obj.pk
kwargs['content_type'] = ContentType.objects.get_for_model(obj)
return self.create(**kwargs)
def dequeue(self, limit=None):
if limit:
qs = self.filter(locked=False)[:limit]
else:
qs = self.filter(locked=False)
for obj in qs:
yield obj.send()
def get_or_404(self, *args, **kwargs):
return get_object_or_404(self.model, *args, **kwargs)
class Queue(models.Model):
"""
A FIFO queue for async sending of campaigns
"""
campaign_type = models.CharField(max_length=50)
contents = models.TextField()
list_id = models.CharField(max_length=50)
template_id = models.PositiveIntegerField()
subject = models.CharField(max_length=255)
from_email = models.EmailField()
from_name = models.CharField(max_length=255)
to_email = models.EmailField()
folder_id = models.CharField(max_length=50, null=True, blank=True)
tracking_opens = models.BooleanField(default=True)
tracking_html_clicks = models.BooleanField(default=True)
tracking_text_clicks = models.BooleanField(default=False)
title = models.CharField(max_length=255, null=True, blank=True)
authenticate = models.BooleanField(default=False)
google_analytics = models.CharField(max_length=100, blank=True, null=True)
auto_footer = models.BooleanField(default=False)
generate_text = models.BooleanField(default=False)
auto_tweet = models.BooleanField(default=False)
segment_options = models.BooleanField(default=False)
segment_options_all = models.BooleanField(default=False)
segment_options_conditions = models.TextField()
type_opts = models.TextField()
content_type = models.ForeignKey(ContentType, null=True, blank=True)
object_id = models.PositiveIntegerField(null=True, blank=True)
content_object = generic.GenericForeignKey('content_type', 'object_id')
extra_info = models.TextField(null=True)
locked = models.BooleanField(default=False)
objects = QueueManager()
def send(self):
"""
send (schedule) this queued object
"""
# check lock
if self.locked:
return False
# aquire lock
self.locked = True
self.save()
# get connection and send the mails
c = get_connection()
tpl = c.get_template_by_id(self.template_id)
content_data = dict([(str(k), v) for k,v in simplejson.loads(self.contents).items()])
built_template = tpl.build(**content_data)
tracking = {'opens': self.tracking_opens,
'html_clicks': self.tracking_html_clicks,
'text_clicks': self.tracking_text_clicks}
if self.google_analytics:
analytics = {'google': self.google_analytics}
else:
analytics = {}
segment_opts = {'match': 'all' if self.segment_options_all else 'any',
'conditions': simplejson.loads(self.segment_options_conditions)}
type_opts = simplejson.loads(self.type_opts)
title = self.title or self.subject
camp = c.create_campaign(self.campaign_type, c.get_list_by_id(self.list_id),
built_template, self.subject, self.from_email, self.from_name,
self.to_email, self.folder_id, tracking, title, self.authenticate,
analytics, self.auto_footer, self.generate_text, self.auto_tweet,
segment_opts, type_opts)
if camp.send_now_async():
self.delete()
kwargs = {}
if self.content_type and self.object_id:
kwargs['content_type'] = self.content_type
kwargs['object_id'] = self.object_id
if self.extra_info:
kwargs['extra_info'] = simplejson.loads(self.extra_info)
return Campaign.objects.create(camp.id, segment_opts, **kwargs)
# release lock if failed
self.locked = False
self.save()
return False
def get_dequeue_url(self):
return reverse('mailchimp_dequeue', kwargs={'id': self.id})
def get_cancel_url(self):
return reverse('mailchimp_cancel', kwargs={'id': self.id})
def get_list(self):
return get_connection().lists[self.list_id]
@property
def object(self):
"""
The object might have vanished until now, so triple check that it's there!
"""
if self.object_id:
model = self.content_type.model_class()
try:
return model.objects.get(id=self.object_id)
except model.DoesNotExist:
return None
return None
def get_object_admin_url(self):
if not self.object:
return ''
name = 'admin:%s_%s_change' % (self.object._meta.app_label,
self.object._meta.module_name)
return reverse(name, args=(self.object.pk,))
def can_dequeue(self, user):
if user.is_superuser:
return True
if not user.is_staff:
return False
if callable(getattr(self.object, 'mailchimp_can_dequeue', None)):
return self.object.mailchimp_can_dequeue(user)
return user.has_perm('mailchimp.can_send') and user.has_perm('mailchimp.can_dequeue')
class CampaignManager(models.Manager):
def create(self, campaign_id, segment_opts, content_type=None, object_id=None,
extra_info=[]):
con = get_connection()
camp = con.get_campaign_by_id(campaign_id)
extra_info = simplejson.dumps(extra_info)
obj = self.model(content=camp.content, campaign_id=campaign_id,
name=camp.title, content_type=content_type, object_id=object_id,
extra_info=extra_info)
obj.save()
segment_opts = dict([(str(k), v) for k,v in segment_opts.items()])
for email in camp.list.filter_members(segment_opts):
Reciever.objects.create(campaign=obj, email=email)
return obj
def get_or_404(self, *args, **kwargs):
return get_object_or_404(self.model, *args, **kwargs)
class DeletedCampaign(object):
subject = u'<deleted from mailchimp>'
class Campaign(models.Model):
sent_date = models.DateTimeField(auto_now_add=True)
campaign_id = models.CharField(max_length=50)
content = models.TextField()
name = models.CharField(max_length=255)
content_type = models.ForeignKey(ContentType, null=True, blank=True)
object_id = models.PositiveIntegerField(null=True, blank=True)
content_object = generic.GenericForeignKey('content_type', 'object_id')
extra_info = models.TextField(null=True)
objects = CampaignManager()
class Meta:
ordering = ['-sent_date']
permissions = [('can_view', 'Can view Mailchimp information'),
('can_send', 'Can send Mailchimp newsletters')]
verbose_name = _('Mailchimp Log')
verbose_name_plural = _('Mailchimp Logs')
def get_absolute_url(self):
return reverse('mailchimp_campaign_info', kwargs={'campaign_id': self.campaign_id})
def get_object_admin_url(self):
if not self.object:
return ''
name = 'admin:%s_%s_change' % (self.object._meta.app_label,
self.object._meta.module_name)
return reverse(name, args=(self.object.pk,))
def get_extra_info(self):
if self.extra_info:
return simplejson.loads(self.extra_info)
return []
@property
def object(self):
"""
The object might have vanished until now, so triple check that it's there!
"""
if self.object_id:
model = self.content_type.model_class()
try:
return model.objects.get(id=self.object_id)
except model.DoesNotExist:
return None
return None
@property
def mc(self):
try:
if not hasattr(self, '_mc'):
self._mc = get_connection().get_campaign_by_id(self.campaign_id)
return self._mc
except:
return DeletedCampaign()
class Reciever(models.Model):
campaign = models.ForeignKey(Campaign, related_name='recievers')
email = models.EmailField()
| bsd-3-clause | -397,447,282,722,257,540 | 37.712 | 93 | 0.626886 | false | 3.8712 | false | false | false |
bashu/fluentcms-filer | fluentcms_filer/file/south_migrations/0001_initial.py | 1 | 10131 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'FilerFileItem'
db.create_table(u'contentitem_file_filerfileitem', (
(u'contentitem_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['fluent_contents.ContentItem'], unique=True, primary_key=True)),
('file', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['filer.File'])),
('name', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('target', self.gf('django.db.models.fields.CharField')(default='', max_length=100, blank=True)),
))
db.send_create_signal(u'file', ['FilerFileItem'])
def backwards(self, orm):
# Deleting model 'FilerFileItem'
db.delete_table(u'contentitem_file_filerfileitem')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'file.filerfileitem': {
'Meta': {'ordering': "('placeholder', 'sort_order')", 'object_name': 'FilerFileItem', 'db_table': "u'contentitem_file_filerfileitem'", '_ormbases': ['fluent_contents.ContentItem']},
u'contentitem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['fluent_contents.ContentItem']", 'unique': 'True', 'primary_key': 'True'}),
'file': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['filer.File']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'target': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'})
},
u'filer.file': {
'Meta': {'object_name': 'File'},
'_file_size': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'folder': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'all_files'", 'null': 'True', 'to': u"orm['filer.Folder']"}),
'has_all_mandatory_data': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}),
'original_filename': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'owned_files'", 'null': 'True', 'to': u"orm['auth.User']"}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'polymorphic_filer.file_set+'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'sha1': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '40', 'blank': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
u'filer.folder': {
'Meta': {'ordering': "(u'name',)", 'unique_together': "((u'parent', u'name'),)", 'object_name': 'Folder'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
u'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
u'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'filer_owned_folders'", 'null': 'True', 'to': u"orm['auth.User']"}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'children'", 'null': 'True', 'to': u"orm['filer.Folder']"}),
u'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
u'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'fluent_contents.contentitem': {
'Meta': {'ordering': "('placeholder', 'sort_order')", 'object_name': 'ContentItem'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language_code': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '15', 'db_index': 'True'}),
'parent_id': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'parent_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'contentitems'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['fluent_contents.Placeholder']"}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'polymorphic_fluent_contents.contentitem_set+'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'sort_order': ('django.db.models.fields.IntegerField', [], {'default': '1', 'db_index': 'True'})
},
'fluent_contents.placeholder': {
'Meta': {'unique_together': "(('parent_type', 'parent_id', 'slot'),)", 'object_name': 'Placeholder'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parent_id': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'parent_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']", 'null': 'True', 'blank': 'True'}),
'role': ('django.db.models.fields.CharField', [], {'default': "'m'", 'max_length': '1'}),
'slot': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
}
}
complete_apps = ['file'] | apache-2.0 | -6,681,037,321,661,970,000 | 82.04918 | 210 | 0.566183 | false | 3.562236 | false | false | false |
WilliamDiakite/ExperimentationsACA | processing/lsa.py | 1 | 3364 |
import os
import sys
import itertools
import operator
import nltk
import numpy as np
import matplotlib.pyplot as plt
from nltk.util import ngrams
from collections import Counter
from spell_checker import SpellChecker
from sklearn.decomposition import TruncatedSVD
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import Normalizer
sys.path.insert(0, '/Users/diakite_w/Documents/Dev/ExperimentationsACA/FrenchLefffLemmatizer')
from FrenchLefffLemmatizer import FrenchLefffLemmatizer
def extract_ngrams(documents, n):
'''
Return list of n-grams
'''
chained_documents = list(itertools.chain.from_iterable(documents))
return Counter(ngrams(chained_documents, n))
def tokenize(text):
fll = FrenchLefffLemmatizer()
splck = SpellChecker()
contracted_pronouns = ["l'", "m'", "n'", "d'", "c'", "j'", "qu'", "s'"]
dictionnary = []
stopwords = [w.rstrip() for w in open('stopwords-fr.txt')]
# Put everything to lower case
text = text.lower()
# Tokenize text
tokens = nltk.tokenize.word_tokenize(text)
print('Nombre de tokens dans le texte :', len(tokens))
#tokens = [splck.correct(t) if t not in dictionnary else t for t in tokens]
# Remove contacted pronous from tokens
tokens = [t[2:] if t[:2] in contracted_pronouns else t for t in tokens]
tokens = [t for t in tokens if len(t) > 2]
tokens = [t for t in tokens if t not in stopwords]
tokens = [fll.lemmatize(t) for t in tokens]
print('Nombre de tokens apres traitement :', len(tokens), '\n')
return tokens
def tokens_to_vec(tokens):
vec = np.zeros(len(word_index_map))
for token in tokens:
idx = word_index_map[token]
vec[idx] = 1
return vec
def read_txt(textfile):
with open(textfile, 'r') as f:
text = f.read()
text = text.replace('\n', ' ')
text = text.replace('- ', '')
text = text.replace('.', '')
text = text.replace('-', '')
text = text.replace("‘l'", 'ï')
return text
def get_all_doc(directory):
'''
Read all txt documents and append them in string
'''
documents = []
counter = 1
for filename in os.listdir(directory):
if filename.endswith('.txt'):
print('\n[...] Reading document', counter)
filename = 'data/' + filename
documents.append(read_txt(filename))
counter += 1
return documents
documents = get_all_doc('data/')
all_tokens = [tokenize(doc) for doc in documents]
vocabulary = list(set(itertools.chain.from_iterable(all_tokens)))
print ('\nVocab size:', len(vocabulary))
# Computing n-grams
bigrams = extract_ngrams(all_tokens, 2)
trigrams = extract_ngrams(all_tokens, 3)
[print(t) for t in trigrams.most_common(5)]
print('\n')
[print(t) for t in bigrams.most_common(10)]
'''
# Key: word - value: index
word_index_map = {j: i for i, j in enumerate(vocabulary)}
# Key: index - value: word
index_word_map = sorted(word_index_map.items(), key=operator.itemgetter(1))
index_word_map = [t[0] for t in index_word_map]
N = len(documents)
D = len(word_index_map)
X = np.zeros((D,N))
i = 0
for tokens in all_tokens:
X[:,i] = tokens_to_vec(tokens)
i += 1
print(X.shape)
svd = TruncatedSVD()
Z = svd.fit_transform(X)
print('Z shape', Z.shape)
plt.scatter(Z[:,0], Z[:,1])
print('D:', D)
for i in range(D):
plt.annotate(s=index_word_map[i], xy=(Z[i,0], Z[i,1]))
plt.show()
'''
| mit | -8,408,729,182,994,486,000 | 20.96732 | 94 | 0.664683 | false | 2.995544 | false | false | false |
babble/babble | include/jython/Lib/asyncore.py | 1 | 17033 | # -*- Mode: Python -*-
# Id: asyncore.py,v 2.51 2000/09/07 22:29:26 rushing Exp
# Author: Sam Rushing <[email protected]>
# ======================================================================
# Copyright 1996 by Sam Rushing
#
# All Rights Reserved
#
# Permission to use, copy, modify, and distribute this software and
# its documentation for any purpose and without fee is hereby
# granted, provided that the above copyright notice appear in all
# copies and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of Sam
# Rushing not be used in advertising or publicity pertaining to
# distribution of the software without specific, written prior
# permission.
#
# SAM RUSHING DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
# INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN
# NO EVENT SHALL SAM RUSHING BE LIABLE FOR ANY SPECIAL, INDIRECT OR
# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
# OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
# ======================================================================
"""Basic infrastructure for asynchronous socket service clients and servers.
There are only two ways to have a program on a single processor do "more
than one thing at a time". Multi-threaded programming is the simplest and
most popular way to do it, but there is another very different technique,
that lets you have nearly all the advantages of multi-threading, without
actually using multiple threads. it's really only practical if your program
is largely I/O bound. If your program is CPU bound, then pre-emptive
scheduled threads are probably what you really need. Network servers are
rarely CPU-bound, however.
If your operating system supports the select() system call in its I/O
library (and nearly all do), then you can use it to juggle multiple
communication channels at once; doing other work while your I/O is taking
place in the "background." Although this strategy can seem strange and
complex, especially at first, it is in many ways easier to understand and
control than multi-threaded programming. The module documented here solves
many of the difficult problems for you, making the task of building
sophisticated high-performance network servers and clients a snap.
"""
import exceptions
import select
import socket
import sys
import time
import os
from errno import EALREADY, EINPROGRESS, EWOULDBLOCK, ECONNRESET, \
ENOTCONN, ESHUTDOWN, EINTR, EISCONN
try:
socket_map
except NameError:
socket_map = {}
class ExitNow(exceptions.Exception):
pass
def read(obj):
try:
obj.handle_read_event()
except ExitNow:
raise
except:
obj.handle_error()
def write(obj):
try:
obj.handle_write_event()
except ExitNow:
raise
except:
obj.handle_error()
def readwrite(obj, flags):
try:
if flags & select.POLLIN:
obj.handle_read_event()
if flags & select.POLLOUT:
obj.handle_write_event()
except ExitNow:
raise
except:
obj.handle_error()
def poll(timeout=0.0, map=None):
if map is None:
map = socket_map
if map:
r = []; w = []; e = []
for fd, obj in map.items():
if obj.readable():
r.append(fd)
if obj.writable():
w.append(fd)
if [] == r == w == e:
time.sleep(timeout)
else:
try:
r, w, e = select.select(r, w, e, timeout)
except select.error, err:
if err[0] != EINTR:
raise
else:
return
for fd in r:
obj = map.get(fd)
if obj is None:
continue
read(obj)
for fd in w:
obj = map.get(fd)
if obj is None:
continue
write(obj)
def poll2(timeout=0.0, map=None):
import poll
if map is None:
map = socket_map
if timeout is not None:
# timeout is in milliseconds
timeout = int(timeout*1000)
if map:
l = []
for fd, obj in map.items():
flags = 0
if obj.readable():
flags = poll.POLLIN
if obj.writable():
flags = flags | poll.POLLOUT
if flags:
l.append((fd, flags))
r = poll.poll(l, timeout)
for fd, flags in r:
obj = map.get(fd)
if obj is None:
continue
readwrite(obj, flags)
def poll3(timeout=0.0, map=None):
# Use the poll() support added to the select module in Python 2.0
if map is None:
map = socket_map
if timeout is not None:
# timeout is in milliseconds
timeout = int(timeout*1000)
pollster = select.poll()
if map:
for fd, obj in map.items():
flags = 0
if obj.readable():
flags = select.POLLIN
if obj.writable():
flags = flags | select.POLLOUT
if flags:
pollster.register(fd, flags)
try:
r = pollster.poll(timeout)
except select.error, err:
if err[0] != EINTR:
raise
r = []
for fd, flags in r:
obj = map.get(fd)
if obj is None:
continue
readwrite(obj, flags)
def loop(timeout=30.0, use_poll=0, map=None):
if map is None:
map = socket_map
if use_poll:
if hasattr(select, 'poll'):
poll_fun = poll3
else:
poll_fun = poll2
else:
poll_fun = poll
while map:
poll_fun(timeout, map)
class dispatcher:
debug = 0
connected = 0
accepting = 0
closing = 0
addr = None
def __init__(self, sock=None, map=None):
if sock:
self.set_socket(sock, map)
# I think it should inherit this anyway
self.socket.setblocking(0)
self.connected = 1
# XXX Does the constructor require that the socket passed
# be connected?
try:
self.addr = sock.getpeername()
except socket.error:
# The addr isn't crucial
pass
else:
self.socket = None
def __repr__(self):
status = [self.__class__.__module__+"."+self.__class__.__name__]
if self.accepting and self.addr:
status.append('listening')
elif self.connected:
status.append('connected')
if self.addr is not None:
try:
status.append('%s:%d' % self.addr)
except TypeError:
status.append(repr(self.addr))
# On some systems (RH10) id() can be a negative number.
# work around this.
MAX = 2L*sys.maxint+1
return '<%s at %#x>' % (' '.join(status), id(self)&MAX)
def add_channel(self, map=None):
#self.log_info('adding channel %s' % self)
if map is None:
if hasattr(self, '_map'):
map = self._map
del self._map
else:
map = socket_map
if not hasattr(self, '_fileno'):
self._fileno = self.socket.fileno()
map[self._fileno] = self
def del_channel(self, map=None):
fd = self._fileno
if map is None:
map = socket_map
if map.has_key(fd):
#self.log_info('closing channel %d:%s' % (fd, self))
del map[fd]
def create_socket(self, family, type):
self.family_and_type = family, type
self.socket = socket.socket(family, type)
self.socket.setblocking(0)
def set_socket(self, sock, map=None):
self.socket = sock
## self.__dict__['socket'] = sock
if sock.fileno():
self.add_channel(map)
else:
self._map = map
def set_reuse_addr(self):
# try to re-use a server port if possible
try:
self.socket.setsockopt(
socket.SOL_SOCKET, socket.SO_REUSEADDR,
self.socket.getsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR) | 1
)
except socket.error:
pass
# ==================================================
# predicates for select()
# these are used as filters for the lists of sockets
# to pass to select().
# ==================================================
def readable(self):
return True
if os.name == 'mac':
# The macintosh will select a listening socket for
# write if you let it. What might this mean?
def writable(self):
return not self.accepting
else:
def writable(self):
return True
# ==================================================
# socket object methods.
# ==================================================
def listen(self, num):
self.accepting = 1
if os.name == 'nt' and num > 5:
num = 1
ret = self.socket.listen(num)
self.add_channel()
return ret
def bind(self, addr):
self.addr = addr
return self.socket.bind(addr)
def connect(self, address):
self.connected = 0
err = self.socket.connect_ex(address)
# XXX Should interpret Winsock return values
if err in (EINPROGRESS, EALREADY, EWOULDBLOCK):
return
if err in (0, EISCONN):
self.add_channel()
self.addr = address
self.connected = 1
self.handle_connect()
else:
raise socket.error, err
def accept(self):
# XXX can return either an address pair or None
try:
conn, addr = self.socket.accept()
self.add_channel()
return conn, addr
except socket.error, why:
if why[0] == EWOULDBLOCK:
pass
else:
raise socket.error, why
def send(self, data):
try:
result = self.socket.send(data)
return result
except socket.error, why:
if why[0] == EWOULDBLOCK:
return 0
else:
raise socket.error, why
return 0
def recv(self, buffer_size):
try:
data = self.socket.recv(buffer_size)
if not data:
# a closed connection is indicated by signaling
# a read condition, and having recv() return 0.
self.handle_close()
return ''
else:
return data
except socket.error, why:
# winsock sometimes throws ENOTCONN
if why[0] in [ECONNRESET, ENOTCONN, ESHUTDOWN]:
self.handle_close()
return ''
else:
raise socket.error, why
def close(self):
self.del_channel()
self.socket.close()
# cheap inheritance, used to pass all other attribute
# references to the underlying socket object.
def __getattr__(self, attr):
return getattr(self.socket, attr)
# log and log_info may be overridden to provide more sophisticated
# logging and warning methods. In general, log is for 'hit' logging
# and 'log_info' is for informational, warning and error logging.
def log(self, message):
sys.stderr.write('log: %s\n' % str(message))
def log_info(self, message, type='info'):
if __debug__ or type != 'info':
print '%s: %s' % (type, message)
def handle_read_event(self):
if self.accepting:
# for an accepting socket, getting a read implies
# that we are connected
if not self.connected:
self.connected = 1
self.handle_accept()
elif not self.connected:
self.handle_connect()
self.connected = 1
self.handle_read()
else:
self.handle_read()
def handle_write_event(self):
# getting a write implies that we are connected
if not self.connected:
self.handle_connect()
self.connected = 1
self.handle_write()
def handle_expt_event(self):
self.handle_expt()
def handle_error(self):
nil, t, v, tbinfo = compact_traceback()
# sometimes a user repr method will crash.
try:
self_repr = repr(self)
except:
self_repr = '<__repr__(self) failed for object at %0x>' % id(self)
self.log_info(
'uncaptured python exception, closing channel %s (%s:%s %s)' % (
self_repr,
t,
v,
tbinfo
),
'error'
)
self.close()
def handle_expt(self):
self.log_info('unhandled exception', 'warning')
def handle_read(self):
self.log_info('unhandled read event', 'warning')
def handle_write(self):
self.log_info('unhandled write event', 'warning')
def handle_connect(self):
self.log_info('unhandled connect event', 'warning')
def handle_accept(self):
self.log_info('unhandled accept event', 'warning')
def handle_close(self):
self.log_info('unhandled close event', 'warning')
self.close()
# ---------------------------------------------------------------------------
# adds simple buffered output capability, useful for simple clients.
# [for more sophisticated usage use asynchat.async_chat]
# ---------------------------------------------------------------------------
class dispatcher_with_send(dispatcher):
def __init__(self, sock=None):
dispatcher.__init__(self, sock)
self.out_buffer = ''
def initiate_send(self):
num_sent = 0
num_sent = dispatcher.send(self, self.out_buffer[:512])
self.out_buffer = self.out_buffer[num_sent:]
def handle_write(self):
self.initiate_send()
def writable(self):
return (not self.connected) or len(self.out_buffer)
def send(self, data):
if self.debug:
self.log_info('sending %s' % repr(data))
self.out_buffer = self.out_buffer + data
self.initiate_send()
# ---------------------------------------------------------------------------
# used for debugging.
# ---------------------------------------------------------------------------
def compact_traceback():
t, v, tb = sys.exc_info()
tbinfo = []
assert tb # Must have a traceback
while tb:
tbinfo.append((
tb.tb_frame.f_code.co_filename,
tb.tb_frame.f_code.co_name,
str(tb.tb_lineno)
))
tb = tb.tb_next
# just to be safe
del tb
file, function, line = tbinfo[-1]
info = ' '.join(['[%s|%s|%s]' % x for x in tbinfo])
return (file, function, line), t, v, info
def close_all(map=None):
if map is None:
map = socket_map
for x in map.values():
x.socket.close()
map.clear()
# Asynchronous File I/O:
#
# After a little research (reading man pages on various unixen, and
# digging through the linux kernel), I've determined that select()
# isn't meant for doing asynchronous file i/o.
# Heartening, though - reading linux/mm/filemap.c shows that linux
# supports asynchronous read-ahead. So _MOST_ of the time, the data
# will be sitting in memory for us already when we go to read it.
#
# What other OS's (besides NT) support async file i/o? [VMS?]
#
# Regardless, this is useful for pipes, and stdin/stdout...
if os.name == 'posix':
import fcntl
class file_wrapper:
# here we override just enough to make a file
# look like a socket for the purposes of asyncore.
def __init__(self, fd):
self.fd = fd
def recv(self, *args):
return os.read(self.fd, *args)
def send(self, *args):
return os.write(self.fd, *args)
read = recv
write = send
def close(self):
return os.close(self.fd)
def fileno(self):
return self.fd
class file_dispatcher(dispatcher):
def __init__(self, fd):
dispatcher.__init__(self)
self.connected = 1
# set it to non-blocking mode
flags = fcntl.fcntl(fd, fcntl.F_GETFL, 0)
flags = flags | os.O_NONBLOCK
fcntl.fcntl(fd, fcntl.F_SETFL, flags)
self.set_file(fd)
def set_file(self, fd):
self._fileno = fd
self.socket = file_wrapper(fd)
self.add_channel()
| apache-2.0 | 6,117,678,477,258,131,000 | 29.361854 | 78 | 0.540422 | false | 4.155404 | false | false | false |
pjh/vm-analyze | analyze/ip_to_fn.py | 1 | 21352 | # Virtual memory analysis scripts.
# Developed 2012-2014 by Peter Hornyack, [email protected]
# Copyright (c) 2012-2014 Peter Hornyack and University of Washington
# This file contains methods that implement a wrapper around the
# binutils "addr2line" utility, which can be used to look up instruction
# pointer values in executable files and shared object files to find
# the function (and sometimes the source code file + line number) that
# contains the ip.
# Note that each instance of "addr2line -e /path/to/binary..." will load
# that entire binary into memory while it runs; this is annoying for
# enormous binaries like firefox's libxul.so.
from util.pjh_utils import *
from analyze.vm_mapping_class import UNKNOWN_FN
import fcntl
import os
import shlex
import subprocess
import sys
import time
cache_addr2line_lookups = True
# With caching disabled, less memory will be consumed, but it will take
# 14 minutes to analyze the function lookups of a firefox trace. With
# caching enabled, the analysis only takes 2 minutes.
addr2line_prog = '/usr/bin/addr2line'
file_prog = '/usr/bin/file'
linux_code_startaddr = int("0x400000", 16)
# On x86_64 Linux anyway, all non-relocatable executables are loaded
# into virtual address space at this address, I believe.
# Given the filename of an executable file or a shared object file,
# determines if the file is relocatable. All shared object files should
# be relocatable, and most executable files are non-relocatable, but it
# is possible to build "position independent executables" (see the "-fpic"
# and "-pie" flags in gcc(1)).
#
# This method is intended to be used when determining function names
# from instruction pointers using addr2line: if the file is relocatable,
# then an absolute ip should have the address of the file's memory mapping
# subtracted from it before passing it to addr2line. If the file is not
# relocatable, then the absolute ip can be passed directly to addr2line.
# Note that this method must create a child subprocess to check the file,
# so try not to call it too often.
#
# Returns: True/False if object file is relocatable or not, or None if an
# error occurred.
def is_objfile_relocatable(name):
tag = 'is_objfile_relocatable'
global file_prog
# Command line that I think makes sense:
# file -e apptype -e ascii -e encoding -e tokens -e cdf -e elf -e tar
# -bn <filename>
# This should return one of the following strings, indicating that the
# file is relocatable or not:
# ELF 64-bit LSB shared object, x86-64, version 1 (SYSV)
# ELF 64-bit LSB executable, x86-64, version 1 (SYSV)
# (even position-independent executables will be described as "shared
# object").
filecmd = ("{} -e apptype -e ascii -e encoding -e tokens -e cdf "
"-e elf -e tar -bn {}").format(file_prog, name)
# don't use -p flag, so that output will *always* have two lines
fileargs = shlex.split(filecmd)
print_debug(tag, ("fileargs: {}").format(fileargs))
p = subprocess.Popen(fileargs, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
if not p:
print_error(tag, "Popen failed for command {}".format(filecmd))
return None
# communicate() will wait for the process to terminate and will
# read its output. A "timeout" arg was added for Python 3.3, but
# verbena is only running 3.2.3 right now, so hope that the process
# will always terminate.
(out, err) = p.communicate()
#retcode = p.poll() # unnecessary, I think
#retcode = p.wait() # unnecessary, I think
retcode = p.returncode
if retcode is None:
print_error(tag, ("unexpected: got a None retcode - subprocess "
"has not terminated yet?!").format())
return None
elif retcode != 0:
print_error(tag, ("file command returned a non-zero error code: "
"{}").format(retcode))
return None
if out:
# Convert from bytes back to string:
out = out.decode('utf-8').strip()
else:
print_error(tag, "got no output from file subprocess")
return None
if err:
err = err.decode('utf-8').strip()
else:
err = ''
print_debug(tag, ("call to file subprocess succeeded, got stdout "
"{} and stderr {}").format(out, err))
# It's probably not necessary to define the expected output strings
# so strictly here, but this will cause an error if we ever e.g.
# move to a different architecture, at which point we can double-
# check this code to make sure it makes sense for non-x86-64.
# Ah, I already found one thing that's not consistent: some files
# are "version 1 (SYSV)", others are "version 1 (GNU/Linux)".
reloc_str = 'ELF 64-bit LSB shared object, x86-64, version 1'
nonreloc_str = 'ELF 64-bit LSB executable, x86-64, version 1'
if reloc_str in out:
print_debug(tag, ("relocatable: {}").format(reloc_str))
return True
elif nonreloc_str in out:
print_debug(tag, ("nonrelocatable: {}").format(nonreloc_str))
return False
print_error(tag, ("unexpected output \"{}\", doesn't match "
"expected output from file command").format(out))
print_error(tag, ("output: {}").format(repr(out)))
print_error(tag, ("reloc_str: {}").format(repr(reloc_str)))
print_error(tag, ("nonreloc_str: {}").format(repr(nonreloc_str)))
return None
##############################################################################
# Creates an addr2line instance (subprocess) for a particular code module
# (executable file or shared object file).
# This class probably shouldn't be used directly; use the ip_to_fn_converter
# class below instead.
class addr2line_module:
tag = 'addr2line_module'
# Members:
objname = None
relocatable = None
a2l = None # Popen class instance representing an addr2line subprocess
cache = None
def __init__(self, objname):
tag = "{}.__init__".format(self.tag)
if not objname:
print_error_exit(tag, "must provide an object name")
self.objname = objname
self.tag = "addr2line_module-{}".format(objname)
self.relocatable = is_objfile_relocatable(objname)
if self.relocatable is None:
#print_error_exit(tag, ("is_objfile_relocatable() returned "
# "error, not sure how to handle gracefully inside of "
# "this constructor so aborting.").format())
print_error(tag, ("is_objfile_relocatable() returned "
"error, not sure how to handle gracefully inside of "
"this constructor so aborting...").format())
return None
elif self.relocatable is True:
print_debug(tag, ("determined that object file {} is "
"relocatable, will subtract vma_start_addr from ips "
"passed to this addr2line_module").format(objname))
else:
print_debug(tag, ("determined that object file {} is "
"not relocatable, will use absolute ips that are passed "
"to this addr2line_module").format(objname))
ret = self.start_addr2line()
if ret != 0:
print_error_exit(tag, ("failed to start addr2line "
"subprocess").format())
self.cache = dict()
return
# Returns: the fn corresponding to this ip if it is found in the
# cache map, or None if not found.
def cache_lookup(self, ip):
tag = "{}.cache_lookup".format(self.tag)
try:
fn = self.cache[ip]
except KeyError:
return None
return fn
# Inserts the specified ip, fn pair into the addr2line "cache" for
# this module.
# "Cache" isn't quite the right term, as nothing is ever evicted;
# it's just a dictionary...
def cache_insert(self, ip, fn):
tag = "{}.cache_insert".format(self.tag)
try:
fn = self.cache[ip]
print_error_exit(tag, ("unexpected: already a cache entry "
"for ip {} -> {}").format(hex(ip), fn))
except KeyError:
self.cache[ip] = fn
print_debug(tag, ("cache insert {} -> {}").format(hex(ip), fn))
return
# Passes the specified ip to addr2line and returns the function that
# it corresponds to, if found.
# ip should be a base-10 integer!
# Returns: the function name if addr2line was able to lookup the ip
# successfully, or '' if addr2line was unsuccessful. Returns None
# on error.
def ip_to_fn(self, ip, vma_start_addr):
tag = "{}.ip_to_fn".format(self.tag)
global linux_code_startaddr
global cache_addr2line_lookups
if not self.a2l:
print_debug(tag, ("self.a2l is None, addr2line subprocess "
"is already terminated (or was never started)").format())
return None
if type(ip) != int:
print_error(tag, ("ip argument {} is not an int").format(ip))
return None
if vma_start_addr is None or type(vma_start_addr) != int:
print_error(tag, ("invalid vma_start_addr: {}").format(
vma_start_addr))
return None
# For relocatable object files, we must subtract the vma start
# addr (the address where the file was mapped into the process'
# address space) from the ip, which is assumed to be an absolute
# ip from an execution's userstacktrace. For non-relocatable
# executables, we directly use the absolute ip.
if self.relocatable:
#print_debug(tag, ("file {} is relocatable, so subtracting "
# "vma_start_addr {} from absolute ip {} to get ip for "
# "addr2line function lookup: {}").format(self.objname,
# hex(vma_start_addr), hex(ip), hex(ip - vma_start_addr)))
if vma_start_addr > ip:
print_error_exit(tag, ("unexpected: vma_start_addr {} "
"> ip {}").format(hex(vma_start_addr), hex(ip)))
ip -= vma_start_addr
else:
#print_debug(tag, ("file {} is not relocatable, so directly "
# "using absolute ip {} and ignoring vma_start_addr "
# "{}").format(self.objname, hex(ip), hex(vma_start_addr)))
if vma_start_addr != linux_code_startaddr:
print_error_exit(tag, ("file is non-relocatable, but "
"its start addr {} doesn't match expected value for "
"64-bit Linux, {} - is this expected?").format(
hex(vma_start_addr), hex(linux_code_startaddr)))
# See if we've already looked up this ip for this module.
# Important: this must come after the ip is offset for relocatable
# modules; ip must not change between now and when it is inserted
# into the cache below.
if cache_addr2line_lookups:
cache_lookup_ip = ip # for sanity checking
fn = self.cache_lookup(ip)
if fn:
print_debug(tag, ("cache hit: ip {} -> fn '{}'").format(
hex(ip), fn))
else:
print_debug(tag, ("cache miss: ip {}").format(hex(ip)))
# Communicate with addr2line process if cache lookups are disabled
# or the cache lookup just missed.
if not cache_addr2line_lookups or fn is None:
# Stupidly, it appears that Python's subprocess module can't
# be used to communicate multiple times with an interactive
# subprocess.
# http://docs.python.org/3/library/subprocess.html#subprocess.Popen.communicate
# http://stackoverflow.com/questions/3065060/communicate-multiple-times-with-a-process-without-breaking-the-pipe
# http://stackoverflow.com/questions/375427/non-blocking-read-on-a-subprocess-pipe-in-python
# http://stackoverflow.com/questions/11457931/running-an-interactive-command-from-within-python
# It appears that the subprocess' stdin and stdout can just be
# written and read directly instead. It appears that the input
# string written to stdin must be converted to bytes first, and
# then any output read from stdout must be converted from a byte
# string back to a standard str as well.
#print_debug(tag, ("addr2line: lookup ip {} in object file "
# "{}").format(hex(ip), self.objname))
ip_input = """{}
""".format(hex(ip))
# send Enter keypress: to enter in vim insert mode, hit
# Ctrl-v first
self.a2l.stdin.write(bytearray(ip_input, 'utf-8'))
#print_debug(tag, "a2l.stdin.write returned")
# Read the output from addr2line:
# http://docs.python.org/3/tutorial/inputoutput.html#methods-of-file-objects
# If self.a2l.stdout.readline() is used to read lines of output
# here, then after reading all of the lines, the next call to
# readline() will block forever. A possible workaround is to
# always just call readline() exactly twice, since that's what
# we expect addr2line's output to be, but this seems fragile.
# Instead, can we just call read(), which will read "the entire
# contents of the file"? This will block as well, since there
# is no EOF at the end of the output. According to some stack
# overflow answer for providing non-blocking reads in Python,
# we may be able to use the fcntl module to mark file
# descriptors as non-blocking.
# http://stackoverflow.com/a/1810703/1230197
# This seems to work a little better, although now the problem
# is that after writing to stdin, the python script here will
# likely attempt to read stdout before addr2line has had a
# chance to write to it. The problem is that we want to block
# <a little bit>, but not forever...
# Fragile but working solution: keep reading until two newlines
# have been encountered, or until the process has terminated.
# As far as I can tell addr2line will always return two lines
# of output when started with the "-Cif" flags, even if
# gibberish input is provided.
# $ addr2line -e test-programs/hello-world -Cif
# 1234
# ??
# ??:0
# 0x4006d9
# _start
# ??:0
fd = self.a2l.stdout.fileno()
flags = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, flags | os.O_NONBLOCK)
output = ""
linecount = 0
loopcount = 0
while linecount < 2:
# In practice, it looks like this loop may run one or more
# times (e.g. 41 times) without reading anything from
# self.a2l.stdout, but then when there is data available
# for reading, it is all available at once (both lines that
# we expect).
bytestr = self.a2l.stdout.read()
if bytestr and len(bytestr) > 0:
buf = bytestr.decode('utf-8')
output += buf
linecount = len(output.splitlines())
if False:
# When this code is enabled and stderr is set to
# subprocess.PIPE when self.a2l if Popen'd, it
# didn't seem to help - stderr.read() here never
# ever returns.
bytestrerr = self.a2l.stderr.read()
if bytestrerr and len(bytestrerr) > 0:
buf = bytestrerr.decode('utf-8')
output += buf
linecount = len(output.splitlines())
print_error_exit(tag, ("stderr.read(): output={}, "
"linecount={}").format(output, linecount))
print_error_exit(tag, ("BUMMER: this code was broken for "
"some reason after upgrading from Ubuntu 12.04 to 13.04 "
"(or something else broke it, but I'm not sure what); "
"perhaps due to python3 upgrade, or maybe a change to "
"addr2line. In the loop below, the stdout.read() never "
"actually returns anything, and we will just loop "
"here forever.").format())
loopcount += 1
if loopcount % 50000 == 0:
# Lookup time appears to depend on the size of the object
# file, which makes sense I guess; for a test lookup in
# my version of libc, I saw loopcount up to 10,000.
#print_debug(tag, ("loopcount is {}, checking if "
# "addr2line is still alive").format(loopcount))
self.a2l.poll()
if self.a2l.returncode:
print_error(tag, ("addr2line subprocess has "
"terminated with retcode {}, returning error "
"from this fn").format(self.a2l.returncode))
return None
else:
print_debug(tag, ("addr2line subprocess is still "
"alive, will keep looping; output buffer so far "
"is {}").format(output))
pass
lines = output.splitlines()
# Ok, now, if addr2line was able to lookup the function name, it
# should be found in the first line of output; if not, then it
# should have printed "??".
fn = lines[0].strip()
if cache_addr2line_lookups:
if ip != cache_lookup_ip:
print_error_exit(tag, ("cache_insert ip {} doesn't match "
"cache_lookup_ip {}").format(hex(ip),
hex(cache_lookup_ip)))
self.cache_insert(ip, fn)
# This needs to happen for both the cache hit case and the
# just-looked-it-up case.
if '?' in fn:
#print_debug(tag, ("got unknown fn '{}' returned from addr2line, "
# "will return empty string from this fn").format(fn))
fn = ''
else:
#print_debug(tag, ("got fn '{}' from addr2line output {}").format(
# fn, output.replace('\n', '')))
pass
return fn
# The user should try to remember to call this function explicitly
# when done using the instance of the class, but if the user forgets,
# then the destructor (__del__) should eventually perform the same
# cleanup operations (i.e. terminating the addr2line process).
def close(self):
tag = "{}.close".format(self.tag)
self.stop_addr2line()
self.objname = None
self.relocatable = None
self.cache = None
return
# "private" method:
# Starts an instance of the addr2line program for converting ips into
# function names. Returns: 0 on success, -1 on error.
def start_addr2line(self):
tag = "{}.start_addr2line".format(self.tag)
global addr2line_prog
a2lcmd = ("{} -e {} -Cif").format(addr2line_prog, self.objname)
# don't use -p flag, so that output will *always* have two lines
a2largs = shlex.split(a2lcmd)
print_debug(tag, ("a2largs: {}").format(a2largs))
self.a2l = subprocess.Popen(a2largs, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
#stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if not self.a2l:
print_error(tag, "Popen failed for command {}".format(a2lcmd))
return -1
retcode = self.a2l.poll()
if retcode:
print_error(tag, ("addr2line subprocess already "
"terminated, this is unexpected").format())
retcode = self.a2l.wait()
self.a2l = None
return -1
print_debug(tag, ("started addr2line subprocess with pid "
"{}").format(self.a2l.pid))
return 0
# "private" method:
def stop_addr2line(self):
tag = "{}.stop_addr2line".format(self.tag)
if not self.a2l:
print_debug(tag, ("self.a2l is None, addr2line subprocess "
"is already terminated (or was never started)").format())
return
# http://docs.python.org/3/library/subprocess.html#subprocess.Popen.communicate
print_debug(tag, ("sending Ctrl-d to addr2line subprocess {} to "
"terminate it").format(self.a2l.pid))
stop_input = ''
# Ctrl-d: hit Ctrl-v first in vim insert mode to 'type' this
# special key
#(out, err) = self.a2l.communicate(input=stop_input)
(out, err) = self.a2l.communicate(
input=bytearray(stop_input, 'utf-8'))
# does stop_input need to be converted to bytes?? Docs appear to
# say so, but code examples don't...
if self.a2l.returncode is None:
print_error_exit(tag, ("communicate() returned, but returncode "
"is not set yet!").format())
elif self.a2l.returncode != 0:
print_warning(tag, ("terminated addr2line subprocess returned "
"error code {}").format(self.a2l.returncode))
else:
print_debug(tag, ("addr2line subprocess terminated "
"successfully").format())
self.a2l = None
return
def __del__(self):
tag = "{}.__del__".format(self.tag)
if self.a2l:
self.stop_addr2line()
return
##############################################################################
# Converts instruction pointers to function names.
# Uses one addr2line_module object per file that we perform lookups in.
class ip_to_fn_converter:
tag = 'ip_to_fn_converter'
# Members:
a2lmap = None
def __init__(self):
tag = "{}.__init__".format(self.tag)
self.a2lmap = dict()
return
# Attempts to lookup the specified instruction pointer in the specified
# file (executable file or shared object file). vma_start_addr should
# be the address (as an int) where the file was mapped into the address
# space when the ip was captured. If this address is unknown, then
# setting it to 0 will likely still work for non-relocatable executable
# files, but the lookup will likely fail (or worse, succeed incorrectly)
# for relocatable object files or position-independent executables.
# Returns: function name on success, empty string '' if the lookup
# failed, or None if there was an error.
def lookup(self, objname, ip, vma_start_addr):
tag = "{}.lookup".format(self.tag)
if (not objname or not ip or type(objname) != str or type(ip) != int
or len(objname) == 0 or vma_start_addr is None or
type(vma_start_addr) != int):
print_error(tag, ("invalid argument: objname {} must be a "
"non-empty string, ip {} must be an int, vma_start_addr "
"must be an int").format(objname, ip, vma_start_addr))
return None
# We keep one addr2line_module object per file:
try:
a2l = self.a2lmap[objname]
print_debug(tag, ("got an existing addr2line instance for "
"objname {}").format(objname))
except KeyError:
print_debug(tag, ("creating a new addr2line instance for "
"objname {}").format(objname))
a2l = addr2line_module(objname)
if not a2l:
print_error(tag, ("addr2line_module constructor "
"failed, just returning {}").format(UNKNOWN_FN))
return UNKNOWN_FN
self.a2lmap[objname] = a2l
return a2l.ip_to_fn(ip, vma_start_addr)
def close(self):
tag = "{}.close".format(self.tag)
for a2l in self.a2lmap.values():
a2l.close()
self.a2lmap = None
return
def __del__(self):
tag = "{}.__del__".format(self.tag)
if self.a2lmap:
self.close()
return
if __name__ == '__main__':
print("Cannot run stand-alone")
sys.exit(1)
| bsd-3-clause | 8,448,444,619,364,010,000 | 37.747731 | 117 | 0.678344 | false | 3.225865 | true | false | false |
lampertb/RPIAFIB | Software/afib_lib.py | 1 | 4157 | import sys
import numpy as np
from scipy import signal
# For testing
import csv
defaultWindowSize=120
defaultMinSNR=2
defaultNoisePercentage=10
defaultSampleRate=250
#The find peaks function takes in an array of data
#It returns an array of the peak locations after running the wavelet transform
def findPeaks(dataArray, windowSize=defaultWindowSize):
peakIndex=signal.find_peaks_cwt(dataArray, np.arange(1, windowSize), min_snr=defaultMinSNR, noise_perc=defaultNoisePercentage)
#print peakIndex
return peakIndex
#Calcuate the time interval between samples
def getRR(peakIndex, sampleRate=defaultSampleRate):
rr_data = []
for i in range(0, len(peakIndex)-1):
diff = peakIndex[i+1]-peakIndex[i]
#print "peak1 {0} - peak2 {1} Diff {2}".format(peakIndex[i+1], peakIndex[i], diff)
timeDelay = diff/float(sampleRate) #Get the time difference between samples
rr_data.append(timeDelay)
#sum+=timeDelay #create an average
#print "Sum {0}, len {1}".format(sum, len(peakIndex))
return rr_data
#AFib Detection Algorithm
from scipy.stats import norm
def Fib_Detection( x , seglen = 128):
N = len(x)
tprmean = 0.65625; tprvar = 0.001369222
# TPR mean and variance from rozinn database
afstats = {};
afstats['avg'] = [];
afstats['rmssd'] = [];
afstats['tpr'] = [];
afstats['se'] = [];
afstats['tprstat'] = [];
afstats['count'] = [];
for i in range (0,N-seglen+1):
perc = i/N*100
j = 0
segment = x[i:i+seglen]
#******************** Remove 16 outlier ********************************
#* In the outlier removal, 8 maximum and 8 minimum values are discarded
#***********************************************************************
segment_outlier = segment[:]
for j in range (0,8):
segment_outlier.remove(max(segment_outlier))
segment_outlier.remove(min(segment_outlier))
#print segment
#print segment_outlier
# Get mean
afstats['avg'].append(np.mean(segment))
# RMSSD
difference = np.subtract(segment_outlier[2:seglen-16], segment_outlier[1:seglen-17])
afstats['rmssd'].append(np.sqrt(np.sum(np.power(difference, 2))/(seglen-17))/afstats['avg'][i-1])
# TPR
j = 0
for k in range (1,seglen-1):
if ((segment[k]-segment[k-1])*(segment[k]-segment[k+1])>0):
j = j+1
afstats['tpr'].append(j/(seglen-2.0))
# Shannon Entropy
seg_max = np.max(segment_outlier)
seg_min = np.min(segment_outlier)
step = (seg_max-seg_min)/16.0;
entropy = 0;
if (step!=0):
group1 = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
for j in range(0,112):
z = int(np.around((segment_outlier[j]-seg_min)/step))
group1[z] = group1[z]+1
group1 = np.divide(group1,np.sum(group1)+0.0)
for j in range (0,16):
if (group1[j]>0):
entropy = entropy+group1[j]*np.log(group1[j])
afstats['se'].append(entropy/-2.7726)
# Compute the afstats
afstats['tprstat'].append(norm.cdf(afstats['tpr'][i-1], tprmean, np.sqrt(tprvar)));
if(afstats['rmssd'][i-1]>=0.1 and afstats['tprstat'][i-1]>0.0001 and afstats['tprstat'][i-1] <= 0.9999 and afstats['se'][i-1] >=0.7):
afstats['count'].append(1)
else:
afstats['count'].append(0)
return afstats
#AFib Detection from ECG file
def afib_dect():
inputFile="0403_Normal_tiny.csv"
ECG=[]
with open(inputFile,'rb') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
ECG.append(float(row['ECG']))
data=np.asarray(ECG)
peakIndex=findPeaks(data, 200)
rr_data = getRR(peakIndex)
afstats = Fib_Detection(rr_data)
# Print result to result.txt file
outputFile = "result.txt"
result = "%d"%sum(afstats['count']);
fp = open(outputFile, 'r+')
fp.write(result);
fp.close()
return sum(afstats['count']) > 1
afib_dect();
| mit | 1,566,503,293,678,278,400 | 32.256 | 141 | 0.577099 | false | 3.212519 | false | false | false |
eubr-bigsea/tahiti | migrations/versions/38745782554d_adding_missing_port_interfaces.py | 1 | 5671 | # -*- coding: utf-8 -*-}
"""Adding missing port interfaces
Revision ID: 38745782554d
Revises: b2b823fe47b1
Create Date: 2017-06-07 15:16:30.224298
"""
from alembic import op
from sqlalchemy import Integer, String
from sqlalchemy.sql import table, column, text
# revision identifiers, used by Alembic.
revision = '38745782554d'
down_revision = 'b2b823fe47b1'
branch_labels = None
depends_on = None
data = [
(34, 5),
(55, 1),
(56, 1),
(57, 11),
(37, 2),
(37, 18),
# (46, 2),
# (46, 18),
(63, 1),
(64, 1),
(73, 19),
(100, 2),
(100, 19),
(161, 17)
]
def upgrade():
try:
op.execute(text('START TRANSACTION'))
insert_operation_port_interface()
insert_operation_port_interface_translation()
insert_operation_port_interface_operation_port()
insert_operation_platform()
insert_operation_translation()
except:
op.execute(text('ROLLBACK'))
raise
def insert_operation_translation():
tb = table(
'operation_translation',
column('id', Integer),
column('locale', String),
column('name', String),
column('description', String), )
columns = ('id', 'locale', 'name', 'description')
rows_data = [
(73, 'en', 'Regression Model', 'Regression Model'),
(73, 'pt', 'Modelo de Regressão', 'Modelo de Regressão'),
(74, 'en', 'Isotonic Regression', 'Isotonic Regression'),
(74, 'pt', 'Regressão Isotônica', 'Regressão Isotônica'),
(75, 'en', 'One Hot Encoder',
'One hot encoding transforms categorical '
'features to a format that works better with '
'classification and regression algorithms.'),
(75, 'pt', 'One Hot Encoder',
'One Hot encoding é uma transformação que fazemos nos '
'dados para representarmos uma variável categórica de '
'forma binária (indica presença ou ausência de um valor).'),
(76, 'en', 'AFT Survival Regression',
'Accelerated Failure Time (AFT) Model Survival Regression'),
(76, 'pt', 'Regressão AFT Survival',
'Accelerated Failure Time (AFT) Model Survival Regression'),
(77, 'en', 'GBT Regressor',
'Gradient-Boosted Trees (GBTs) learning algorithm for '
'regression. It supports both continuous and categorical featur'),
(77, 'pt', 'Regressor GBT',
'Gradient-Boosted Trees (GBTs) learning algorithm for '
'regression. It supports both continuous and categorical feature'),
(78, 'en', 'Random Forest Regressor',
'Random Forest learning algorithm for regression. '
'It supports both continuous and categorical features.'),
(78, 'pt', 'Regressor Random Forest',
'Random Forest learning algorithm for regression. '
'It supports both continuous and categorical features.'),
(79, 'en', 'Generalized Linear Regressor',
'Generalized Linear Regressor'),
(79, 'pt', 'Regressor Linear Generalizado',
'Regressor Linear Generalizado'),
]
rows = [dict(list(zip(columns, row))) for row in rows_data]
op.bulk_insert(tb, rows)
def insert_operation_platform():
tb = table(
'operation_platform',
column('operation_id', Integer),
column('platform_id', Integer), )
columns = ('operation_id', 'platform_id')
rows_data = [
(73, 1),
(74, 1),
(75, 1),
(76, 1),
(77, 1),
(78, 1),
(79, 1),
]
rows = [dict(list(zip(columns, row))) for row in rows_data]
op.bulk_insert(tb, rows)
def insert_operation_port_interface():
tb = table(
'operation_port_interface',
column('id', Integer),
column('color', String), )
columns = ('id', 'color')
interface_data = [
(19, '#AACC22')
]
rows = [dict(list(zip(columns, row))) for row in interface_data]
op.bulk_insert(tb, rows)
def insert_operation_port_interface_translation():
tb = table(
'operation_port_interface_translation',
column('id', Integer),
column('locale', String),
column('name', String), )
columns = ('id', 'locale', 'name')
interface_data = [
(19, 'pt', 'Visualização'),
(19, 'en', 'Visualization'),
]
rows = [dict(list(zip(columns, row))) for row in interface_data]
op.bulk_insert(tb, rows)
def insert_operation_port_interface_operation_port():
tb = table(
'operation_port_interface_operation_port',
column('operation_port_id', Integer),
column('operation_port_interface_id', Integer), )
columns = ('operation_port_id', 'operation_port_interface_id')
rows = [dict(list(zip(columns, row))) for row in data]
op.bulk_insert(tb, rows)
def downgrade():
try:
for d in data:
op.execute(
text('DELETE FROM '
'operation_port_interface_operation_port '
'WHERE operation_port_id = {} '
' AND operation_port_interface_id = {}'.format(*d)))
op.execute(text('DELETE FROM operation_port_interface_translation '
'WHERE id = 19'))
op.execute(text('DELETE FROM operation_port_interface '
'WHERE id = 19'))
op.execute(text('DELETE FROM operation_platform '
'WHERE operation_id BETWEEN 73 AND 79'))
op.execute(text('DELETE FROM operation_translation '
'WHERE id BETWEEN 73 AND 79'))
except:
op.execute(text('ROLLBACK'))
raise
| apache-2.0 | 7,238,215,253,415,339,000 | 28.915344 | 76 | 0.582773 | false | 3.744371 | false | false | false |
Barrog/C4-Datapack | data/jscript/quests/329_CuriosityOfDwarf/__init__.py | 1 | 2487 | # Made by Mr. - Version 0.3 by DrLecter
import sys
from net.sf.l2j.gameserver.model.quest import State
from net.sf.l2j.gameserver.model.quest import QuestState
from net.sf.l2j.gameserver.model.quest.jython import QuestJython as JQuest
GOLEM_HEARTSTONE = 1346
BROKEN_HEARTSTONE = 1365
ADENA = 57
class Quest (JQuest) :
def __init__(self,id,name,descr): JQuest.__init__(self,id,name,descr)
def onEvent (self,event,st) :
htmltext = event
if event == "7437-03.htm" :
st.set("cond","1")
st.setState(STARTED)
st.playSound("ItemSound.quest_accept")
elif event == "7437-06.htm" :
st.exitQuest(1)
st.playSound("ItemSound.quest_finish")
return htmltext
def onTalk (Self,npc,st) :
npcId = npc.getNpcId()
htmltext = "<html><head><body>I have nothing to say you</body></html>"
id = st.getState()
if id == CREATED :
st.set("cond","0")
if int(st.get("cond"))==0 :
if st.getPlayer().getLevel() >= 33 :
htmltext = "7437-02.htm"
else:
htmltext = "7437-01.htm"
st.exitQuest(1)
else :
heart=st.getQuestItemsCount(GOLEM_HEARTSTONE)
broken=st.getQuestItemsCount(BROKEN_HEARTSTONE)
if broken+heart>0 :
st.giveItems(ADENA,50*broken+1000*heart)
st.takeItems(BROKEN_HEARTSTONE,-1)
st.takeItems(GOLEM_HEARTSTONE,-1)
htmltext = "7437-05.htm"
else:
htmltext = "7437-04.htm"
return htmltext
def onKill (self,npc,st):
npcId = npc.getNpcId()
n = st.getRandom(100)
if npcId == 85 :
if n<5 :
st.giveItems(GOLEM_HEARTSTONE,1)
st.playSound("ItemSound.quest_itemget")
elif n<58 :
st.giveItems(BROKEN_HEARTSTONE,1)
st.playSound("ItemSound.quest_itemget")
elif npcId == 83 :
if n<6 :
st.giveItems(GOLEM_HEARTSTONE,1)
st.playSound("ItemSound.quest_itemget")
elif n<56 :
st.giveItems(BROKEN_HEARTSTONE,1)
st.playSound("ItemSound.quest_itemget")
return
QUEST = Quest(329,"329_CuriosityOfDwarf","Curiosity Of Dwarf")
CREATED = State('Start', QUEST)
STARTED = State('Started', QUEST)
COMPLETED = State('Completed', QUEST)
QUEST.setInitialState(CREATED)
QUEST.addStartNpc(7437)
CREATED.addTalkId(7437)
STARTED.addTalkId(7437)
STARTED.addKillId(83)
STARTED.addKillId(85)
STARTED.addQuestDrop(85,BROKEN_HEARTSTONE,1)
STARTED.addQuestDrop(85,GOLEM_HEARTSTONE,1)
print "importing quests: 329: Curiosity Of Dwarf"
| gpl-2.0 | 4,741,307,329,138,383,000 | 27.918605 | 74 | 0.650985 | false | 2.729967 | false | false | false |
skuda/client-python | kubernetes/client/models/v1beta1_role_ref.py | 1 | 4581 | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.6.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1beta1RoleRef(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, api_group=None, kind=None, name=None):
"""
V1beta1RoleRef - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'api_group': 'str',
'kind': 'str',
'name': 'str'
}
self.attribute_map = {
'api_group': 'apiGroup',
'kind': 'kind',
'name': 'name'
}
self._api_group = api_group
self._kind = kind
self._name = name
@property
def api_group(self):
"""
Gets the api_group of this V1beta1RoleRef.
APIGroup is the group for the resource being referenced
:return: The api_group of this V1beta1RoleRef.
:rtype: str
"""
return self._api_group
@api_group.setter
def api_group(self, api_group):
"""
Sets the api_group of this V1beta1RoleRef.
APIGroup is the group for the resource being referenced
:param api_group: The api_group of this V1beta1RoleRef.
:type: str
"""
if api_group is None:
raise ValueError("Invalid value for `api_group`, must not be `None`")
self._api_group = api_group
@property
def kind(self):
"""
Gets the kind of this V1beta1RoleRef.
Kind is the type of resource being referenced
:return: The kind of this V1beta1RoleRef.
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""
Sets the kind of this V1beta1RoleRef.
Kind is the type of resource being referenced
:param kind: The kind of this V1beta1RoleRef.
:type: str
"""
if kind is None:
raise ValueError("Invalid value for `kind`, must not be `None`")
self._kind = kind
@property
def name(self):
"""
Gets the name of this V1beta1RoleRef.
Name is the name of resource being referenced
:return: The name of this V1beta1RoleRef.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this V1beta1RoleRef.
Name is the name of resource being referenced
:param name: The name of this V1beta1RoleRef.
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`")
self._name = name
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| apache-2.0 | -5,183,765,010,237,596,000 | 25.633721 | 105 | 0.531107 | false | 4.237743 | false | false | false |
DarthMaulware/EquationGroupLeaks | Leak #5 - Lost In Translation/windows/Resources/Python/Core/Lib/multiprocessing/queues.py | 1 | 9842 | # uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: queues.py
__all__ = [
'Queue', 'SimpleQueue', 'JoinableQueue']
import sys
import os
import threading
import collections
import time
import atexit
import weakref
from Queue import Empty, Full
import _multiprocessing
from multiprocessing import Pipe
from multiprocessing.synchronize import Lock, BoundedSemaphore, Semaphore, Condition
from multiprocessing.util import debug, info, Finalize, register_after_fork
from multiprocessing.forking import assert_spawning
class Queue(object):
def __init__(self, maxsize=0):
if maxsize <= 0:
maxsize = _multiprocessing.SemLock.SEM_VALUE_MAX
self._maxsize = maxsize
self._reader, self._writer = Pipe(duplex=False)
self._rlock = Lock()
self._opid = os.getpid()
if sys.platform == 'win32':
self._wlock = None
else:
self._wlock = Lock()
self._sem = BoundedSemaphore(maxsize)
self._after_fork()
if sys.platform != 'win32':
register_after_fork(self, Queue._after_fork)
return
def __getstate__(self):
assert_spawning(self)
return (
self._maxsize, self._reader, self._writer,
self._rlock, self._wlock, self._sem, self._opid)
def __setstate__(self, state):
self._maxsize, self._reader, self._writer, self._rlock, self._wlock, self._sem, self._opid = state
self._after_fork()
def _after_fork(self):
debug('Queue._after_fork()')
self._notempty = threading.Condition(threading.Lock())
self._buffer = collections.deque()
self._thread = None
self._jointhread = None
self._joincancelled = False
self._closed = False
self._close = None
self._send = self._writer.send
self._recv = self._reader.recv
self._poll = self._reader.poll
return
def put(self, obj, block=True, timeout=None):
if not self._sem.acquire(block, timeout):
raise Full
self._notempty.acquire()
try:
if self._thread is None:
self._start_thread()
self._buffer.append(obj)
self._notempty.notify()
finally:
self._notempty.release()
return
def get(self, block=True, timeout=None):
if block and timeout is None:
self._rlock.acquire()
try:
res = self._recv()
self._sem.release()
return res
finally:
self._rlock.release()
else:
if block:
deadline = time.time() + timeout
if not self._rlock.acquire(block, timeout):
raise Empty
try:
if not self._poll(block and deadline - time.time() or 0.0):
raise Empty
res = self._recv()
self._sem.release()
return res
finally:
self._rlock.release()
return
def qsize(self):
return self._maxsize - self._sem._semlock._get_value()
def empty(self):
return not self._poll()
def full(self):
return self._sem._semlock._is_zero()
def get_nowait(self):
return self.get(False)
def put_nowait(self, obj):
return self.put(obj, False)
def close(self):
self._closed = True
self._reader.close()
if self._close:
self._close()
def join_thread(self):
debug('Queue.join_thread()')
if self._jointhread:
self._jointhread()
def cancel_join_thread(self):
debug('Queue.cancel_join_thread()')
self._joincancelled = True
try:
self._jointhread.cancel()
except AttributeError:
pass
def _start_thread(self):
debug('Queue._start_thread()')
self._buffer.clear()
self._thread = threading.Thread(target=Queue._feed, args=(
self._buffer, self._notempty, self._send,
self._wlock, self._writer.close), name='QueueFeederThread')
self._thread.daemon = True
debug('doing self._thread.start()')
self._thread.start()
debug('... done self._thread.start()')
created_by_this_process = self._opid == os.getpid()
if not self._joincancelled and not created_by_this_process:
self._jointhread = Finalize(self._thread, Queue._finalize_join, [
weakref.ref(self._thread)], exitpriority=-5)
self._close = Finalize(self, Queue._finalize_close, [
self._buffer, self._notempty], exitpriority=10)
@staticmethod
def _finalize_join(twr):
debug('joining queue thread')
thread = twr()
if thread is not None:
thread.join()
debug('... queue thread joined')
else:
debug('... queue thread already dead')
return
@staticmethod
def _finalize_close(buffer, notempty):
debug('telling queue thread to quit')
notempty.acquire()
try:
buffer.append(_sentinel)
notempty.notify()
finally:
notempty.release()
@staticmethod
def _feed(buffer, notempty, send, writelock, close):
debug('starting thread to feed data to pipe')
from .util import is_exiting
nacquire = notempty.acquire
nrelease = notempty.release
nwait = notempty.wait
bpopleft = buffer.popleft
sentinel = _sentinel
if sys.platform != 'win32':
wacquire = writelock.acquire
wrelease = writelock.release
else:
wacquire = None
try:
while 1:
nacquire()
try:
if not buffer:
nwait()
finally:
nrelease()
try:
while 1:
obj = bpopleft()
if obj is sentinel:
debug('feeder thread got sentinel -- exiting')
close()
return
if wacquire is None:
send(obj)
else:
wacquire()
try:
send(obj)
finally:
wrelease()
except IndexError:
pass
except Exception as e:
try:
if is_exiting():
info('error in queue thread: %s', e)
else:
import traceback
traceback.print_exc()
except Exception:
pass
return
_sentinel = object()
class JoinableQueue(Queue):
def __init__(self, maxsize=0):
Queue.__init__(self, maxsize)
self._unfinished_tasks = Semaphore(0)
self._cond = Condition()
def __getstate__(self):
return Queue.__getstate__(self) + (self._cond, self._unfinished_tasks)
def __setstate__(self, state):
Queue.__setstate__(self, state[:-2])
self._cond, self._unfinished_tasks = state[-2:]
def put(self, obj, block=True, timeout=None):
if not self._sem.acquire(block, timeout):
raise Full
self._notempty.acquire()
self._cond.acquire()
try:
if self._thread is None:
self._start_thread()
self._buffer.append(obj)
self._unfinished_tasks.release()
self._notempty.notify()
finally:
self._cond.release()
self._notempty.release()
return
def task_done(self):
self._cond.acquire()
try:
if not self._unfinished_tasks.acquire(False):
raise ValueError('task_done() called too many times')
if self._unfinished_tasks._semlock._is_zero():
self._cond.notify_all()
finally:
self._cond.release()
def join(self):
self._cond.acquire()
try:
if not self._unfinished_tasks._semlock._is_zero():
self._cond.wait()
finally:
self._cond.release()
class SimpleQueue(object):
def __init__(self):
self._reader, self._writer = Pipe(duplex=False)
self._rlock = Lock()
if sys.platform == 'win32':
self._wlock = None
else:
self._wlock = Lock()
self._make_methods()
return
def empty(self):
return not self._reader.poll()
def __getstate__(self):
assert_spawning(self)
return (
self._reader, self._writer, self._rlock, self._wlock)
def __setstate__(self, state):
self._reader, self._writer, self._rlock, self._wlock = state
self._make_methods()
def _make_methods(self):
recv = self._reader.recv
racquire, rrelease = self._rlock.acquire, self._rlock.release
def get():
racquire()
try:
return recv()
finally:
rrelease()
self.get = get
if self._wlock is None:
self.put = self._writer.send
else:
send = self._writer.send
wacquire, wrelease = self._wlock.acquire, self._wlock.release
def put(obj):
wacquire()
try:
return send(obj)
finally:
wrelease()
self.put = put
return | unlicense | 1,957,727,825,150,317,600 | 28.47006 | 106 | 0.517578 | false | 4.356795 | false | false | false |
HackerEarth/django-allauth | allauth/socialaccount/providers/twitter/views.py | 1 | 1820 | from django.utils import simplejson
from allauth.socialaccount.providers.oauth.client import OAuth
from allauth.socialaccount.providers.oauth.views import (OAuthAdapter,
OAuthLoginView,
OAuthCallbackView)
from allauth.socialaccount.models import SocialLogin, SocialAccount
from allauth.utils import get_user_model
from provider import TwitterProvider
User = get_user_model()
class TwitterAPI(OAuth):
"""
Verifying twitter credentials
"""
url = 'https://api.twitter.com/1.1/account/verify_credentials.json'
def get_user_info(self):
user = simplejson.loads(self.query(self.url))
return user
class TwitterOAuthAdapter(OAuthAdapter):
provider_id = TwitterProvider.id
request_token_url = 'https://api.twitter.com/oauth/request_token'
access_token_url = 'https://api.twitter.com/oauth/access_token'
# Issue #42 -- this one authenticates over and over again...
# authorize_url = 'https://api.twitter.com/oauth/authorize'
authorize_url = 'https://api.twitter.com/oauth/authenticate'
def complete_login(self, request, app, token):
client = TwitterAPI(request, app.key, app.secret,
self.request_token_url)
extra_data = client.get_user_info()
uid = extra_data['id']
user = User(username=extra_data['screen_name'])
account = SocialAccount(user=user,
uid=uid,
provider=TwitterProvider.id,
extra_data=extra_data)
return SocialLogin(account)
oauth_login = OAuthLoginView.adapter_view(TwitterOAuthAdapter)
oauth_callback = OAuthCallbackView.adapter_view(TwitterOAuthAdapter)
| mit | 985,280,656,928,123,300 | 36.916667 | 75 | 0.636813 | false | 4.193548 | false | false | false |
skibaa/smart-sweeper | game/dbext.py | 1 | 2811 | import logging
from google.appengine.ext import db
from google.appengine.api import datastore_errors
import cPickle
logger=logging.getLogger("smartSweeper.dbext")
class PickledProperty(db.Property):
data_type = db.Blob
def __init__(self, force_type=None, *args, **kw):
self.force_type=force_type
super(PickledProperty, self).__init__(*args, **kw)
def validate(self, value):
value = super(PickledProperty, self).validate(value)
if value is not None and self.force_type and \
not isinstance(value, self.force_type):
raise datastore_errors.BadValueError(
'Property %s must be of type "%s".' % (self.name,
self.force_type))
return value
def get_value_for_datastore(self, model_instance):
value = self.__get__(model_instance, model_instance.__class__)
if value is not None:
return db.Text(cPickle.dumps(value))
def make_value_from_datastore(self, value):
if value is not None:
return cPickle.loads(str(value))
class CachedReferenceProperty(db.ReferenceProperty):
def __property_config__(self, model_class, property_name):
super(CachedReferenceProperty, self).__property_config__(model_class,
property_name)
#Just carelessly override what super made
setattr(self.reference_class,
self.collection_name,
_CachedReverseReferenceProperty(model_class, property_name,
self.collection_name))
class _CachedReverseReferenceProperty(db._ReverseReferenceProperty):
def __init__(self, model, prop, collection_name):
super(_CachedReverseReferenceProperty, self).__init__(model, prop)
self.__prop=prop
self.__collection_name = collection_name
def __get__(self, model_instance, model_class):
if model_instance is None:
return self
logger.debug("cached reverse trying")
if self.__collection_name in model_instance.__dict__:# why does it get here at all?
return model_instance.__dict__[self.__collection_name]
logger.info("cached reverse miss %s",self.__collection_name)
query=super(_CachedReverseReferenceProperty, self).__get__(model_instance,
model_class)
#replace the attribute on the instance
res=[]
for c in query:
resolved_name='_RESOLVED_'+self.__prop #WARNING: using internal
setattr(c, resolved_name, model_instance)
res += [c]
model_instance.__dict__[self.__collection_name]=res
return res
def __delete__ (self, model_instance):
if model_instance is not None:
del model_instance.__dict__[self.__collection_name]
| apache-2.0 | -9,060,400,630,525,390,000 | 37.506849 | 91 | 0.626467 | false | 4.291603 | false | false | false |
allynt/tings | T/tings/views/api/views_api_users.py | 1 | 1138 | from rest_framework import generics, permissions
from django.contrib.auth.models import User
# from T.tings.models.models_users import TUserProfile
from T.tings.serializers.serializers_users import TUserSerializer
class TUserPermission(permissions.BasePermission):
"""
Custom permission to only allow owners of an object to edit it.
"""
def has_object_permission(self, request, view, obj):
# anybody can submit GET, HEAD or OPTIONS requests...
if request.method in permissions.SAFE_METHODS:
return True
# only the admin or collection owners can submit PUT, POST, or DELETE requests...
user = request.user
return user.is_superuser or user == obj
class TUserList(generics.ListCreateAPIView):
queryset = User.objects.all()
serializer_class = TUserSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly, TUserPermission,)
class TUserDetail(generics.RetrieveUpdateDestroyAPIView):
queryset = User.objects.all()
serializer_class = TUserSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly, TUserPermission,)
| mit | 8,404,409,644,057,363,000 | 33.484848 | 89 | 0.742531 | false | 4.428016 | false | false | false |
akx/gentry | gore/api/handlers/store.py | 1 | 1862 | import base64
import json
import logging
import zlib
from datetime import datetime
from django.conf import settings
from django.db import transaction
from django.http import JsonResponse
from django.utils.encoding import force_str
from django.utils.timezone import make_aware
from pytz import UTC
from gore.auth import validate_auth_header
from gore.excs import InvalidAuth
from gore.models import Event
from gore.signals import event_received
from gore.utils.event_grouper import group_event
logger = logging.getLogger(__name__)
def store_event(request, project):
try:
auth_header = validate_auth_header(request, project)
except InvalidAuth as ia:
return JsonResponse({'error': str(ia)}, status=401)
body = request.body
if request.META.get('HTTP_CONTENT_ENCODING') == 'deflate':
body = zlib.decompress(body)
elif auth_header.get('sentry_version') == '5': # Support older versions of Raven
body = zlib.decompress(base64.b64decode(body)).decode('utf8')
body = json.loads(force_str(body))
timestamp = make_aware(datetime.fromtimestamp(float(auth_header['sentry_timestamp'])), timezone=UTC)
with transaction.atomic():
event = Event.objects.create_from_raven(project_id=project, body=body, timestamp=timestamp)
try:
with transaction.atomic():
group = group_event(event.project, event)
group.archived = False
group.cache_values()
group.save()
except: # pragma: no cover
logger.warning('event with ID %s could not be grouped' % event.id, exc_info=True)
try:
event_received.send(sender=event)
except: # pragma: no cover
logger.warning('event_received signal handling failed', exc_info=True)
if settings.DEBUG:
raise
return JsonResponse({'id': event.id}, status=201)
| mit | 4,651,106,081,188,272,000 | 33.481481 | 104 | 0.6971 | false | 3.936575 | false | false | false |
django-stars/dash2011 | presence/apps/shout/views.py | 1 | 2081 | import logging
import json
from django.shortcuts import render_to_response
from django.http import Http404
from django.template import RequestContext
from django.http import HttpResponse
from django.http import HttpResponseRedirect
from django.http import HttpResponseForbidden
from django.shortcuts import redirect
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from shout.models import Shout
from shout.forms import ShoutForm
logger = logging.getLogger("presence.%s" % __name__)
@login_required
def shout_new(request):
if request.method == "POST":
form = ShoutForm(request.POST)
if form.is_valid():
shout = form.save(user=request.user)
logger.info('New %s shout from "%s"' % (('public', 'private')[shout.is_private], shout.user.username))
if request.is_ajax():
return HttpResponse(json.dumps({'response': 'OK'}), mimetype='application/json')
return HttpResponseRedirect(reverse('shout-list'))
else:
if request.is_ajax():
return HttpResponse(json.dumps({'response': 'ERR', 'reason': 'Shout text is required!'}), mimetype='application/json')
else:
form = ShoutForm()
data = {
'form': form,
}
return render_to_response('shout/new.html', data, RequestContext(request))
@login_required
def shout_list(request):
#custom manager to get non provat or privat but my
shouts = Shout.objects.filter_for_user(user=request.user)
data = {
'shouts': shouts,
}
return render_to_response('shout/list.html', data, RequestContext(request))
@login_required
def shout_detail(request, shout_id):
try:
shout = Shout.objects.get_for_user(user=request.user, id=shout_id)
except Shout.DoesNotExist:
raise Http404
data = {
'shout': shout,
}
return render_to_response('shout/detail.html', data, RequestContext(request))
| bsd-3-clause | 7,593,957,745,665,859,000 | 31.515625 | 134 | 0.682845 | false | 3.882463 | false | false | false |
JaviMerino/lisa | libs/utils/analysis/frequency_analysis.py | 1 | 24894 | # SPDX-License-Identifier: Apache-2.0
#
# Copyright (C) 2015, ARM Limited and contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Frequency Analysis Module """
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import pandas as pd
import pylab as pl
import operator
from trappy.utils import listify
from devlib.utils.misc import memoized
from collections import namedtuple
from analysis_module import AnalysisModule
# Configure logging
import logging
NON_IDLE_STATE = 4294967295
ResidencyTime = namedtuple('ResidencyTime', ['total', 'active'])
ResidencyData = namedtuple('ResidencyData', ['label', 'residency'])
class FrequencyAnalysis(AnalysisModule):
"""
Support for plotting Frequency Analysis data
:param trace: input Trace object
:type trace: :mod:`libs.utils.Trace`
"""
def __init__(self, trace):
super(FrequencyAnalysis, self).__init__(trace)
###############################################################################
# DataFrame Getter Methods
###############################################################################
def _dfg_cpu_frequency_residency(self, cpu, total=True):
"""
Get per-CPU frequency residency, i.e. amount of
time CPU `cpu` spent at each frequency.
:param cpu: CPU ID
:type cpu: int
:param total: if true returns the "total" time, otherwise the "active"
time is returned
:type total: bool
:returns: :mod:`pandas.DataFrame` - "total" or "active" time residency
at each frequency.
"""
residency = self._getCPUFrequencyResidency(cpu)
if not residency:
return None
if total:
return residency.total
return residency.active
def _dfg_cluster_frequency_residency(self, cluster, total=True):
"""
Get per-Cluster frequency residency, i.e. amount of time CLUSTER
`cluster` spent at each frequency.
:param cluster: this can be either a single CPU ID or a list of CPU IDs
belonging to a cluster or the cluster name as specified in the
platform description
:type cluster: str or int or list(int)
:param total: if true returns the "total" time, otherwise the "active"
time is returned
:type total: bool
:returns: :mod:`pandas.DataFrame` - "total" or "active" time residency
at each frequency.
"""
residency = self._getClusterFrequencyResidency(cluster)
if not residency:
return None
if total:
return residency.total
return residency.active
###############################################################################
# Plotting Methods
###############################################################################
def plotClusterFrequencies(self, title='Clusters Frequencies'):
"""
Plot frequency trend for all clusters. If sched_overutilized events are
available, the plots will also show the intervals of time where the
cluster was overutilized.
:param title: user-defined plot title
:type title: str
"""
if not self._trace.hasEvents('cpu_frequency'):
logging.warn('Events [cpu_frequency] not found, plot DISABLED!')
return
df = self._dfg_trace_event('cpu_frequency')
pd.options.mode.chained_assignment = None
# Extract LITTLE and big clusters frequencies
# and scale them to [MHz]
if len(self._platform['clusters']['little']):
lfreq = df[df.cpu == self._platform['clusters']['little'][-1]]
lfreq['frequency'] = lfreq['frequency']/1e3
else:
lfreq = []
if len(self._platform['clusters']['big']):
bfreq = df[df.cpu == self._platform['clusters']['big'][-1]]
bfreq['frequency'] = bfreq['frequency']/1e3
else:
bfreq = []
# Compute AVG frequency for LITTLE cluster
avg_lfreq = 0
if len(lfreq) > 0:
lfreq['timestamp'] = lfreq.index
lfreq['delta'] = (lfreq['timestamp'] -lfreq['timestamp'].shift()).fillna(0).shift(-1)
lfreq['cfreq'] = (lfreq['frequency'] * lfreq['delta']).fillna(0)
timespan = lfreq.iloc[-1].timestamp - lfreq.iloc[0].timestamp
avg_lfreq = lfreq['cfreq'].sum()/timespan
# Compute AVG frequency for big cluster
avg_bfreq = 0
if len(bfreq) > 0:
bfreq['timestamp'] = bfreq.index
bfreq['delta'] = (bfreq['timestamp'] - bfreq['timestamp'].shift()).fillna(0).shift(-1)
bfreq['cfreq'] = (bfreq['frequency'] * bfreq['delta']).fillna(0)
timespan = bfreq.iloc[-1].timestamp - bfreq.iloc[0].timestamp
avg_bfreq = bfreq['cfreq'].sum()/timespan
pd.options.mode.chained_assignment = 'warn'
# Setup a dual cluster plot
fig, pltaxes = plt.subplots(2, 1, figsize=(16, 8))
plt.suptitle(title, y=.97, fontsize=16, horizontalalignment='center')
# Plot Cluster frequencies
axes = pltaxes[0]
axes.set_title('big Cluster')
if avg_bfreq > 0:
axes.axhline(avg_bfreq, color='r', linestyle='--', linewidth=2)
axes.set_ylim(
(self._platform['freqs']['big'][0] - 100000)/1e3,
(self._platform['freqs']['big'][-1] + 100000)/1e3
)
if len(bfreq) > 0:
bfreq['frequency'].plot(style=['r-'], ax=axes,
drawstyle='steps-post', alpha=0.4)
else:
logging.warn('NO big CPUs frequency events to plot')
axes.set_xlim(self._trace.x_min, self._trace.x_max)
axes.set_ylabel('MHz')
axes.grid(True)
axes.set_xticklabels([])
axes.set_xlabel('')
self._trace.analysis.status.plotOverutilized(axes)
axes = pltaxes[1]
axes.set_title('LITTLE Cluster')
if avg_lfreq > 0:
axes.axhline(avg_lfreq, color='b', linestyle='--', linewidth=2)
axes.set_ylim(
(self._platform['freqs']['little'][0] - 100000)/1e3,
(self._platform['freqs']['little'][-1] + 100000)/1e3
)
if len(lfreq) > 0:
lfreq['frequency'].plot(style=['b-'], ax=axes,
drawstyle='steps-post', alpha=0.4)
else:
logging.warn('NO LITTLE CPUs frequency events to plot')
axes.set_xlim(self._trace.x_min, self._trace.x_max)
axes.set_ylabel('MHz')
axes.grid(True)
self._trace.analysis.status.plotOverutilized(axes)
# Save generated plots into datadir
figname = '{}/{}cluster_freqs.png'\
.format(self._trace.plots_dir, self._trace.plots_prefix)
pl.savefig(figname, bbox_inches='tight')
logging.info('LITTLE cluster average frequency: %.3f GHz',
avg_lfreq/1e3)
logging.info('big cluster average frequency: %.3f GHz',
avg_bfreq/1e3)
return (avg_lfreq/1e3, avg_bfreq/1e3)
def plotCPUFrequencyResidency(self, cpus=None, pct=False, active=False):
"""
Plot per-CPU frequency residency. big CPUs are plotted first and then
LITTLEs.
Requires the following trace events:
- cpu_frequency
- cpu_idle
:param cpus: List of cpus. By default plot all CPUs
:type cpus: list(str)
:param pct: plot residencies in percentage
:type pct: bool
:param active: for percentage plot specify whether to plot active or
total time. Default is TOTAL time
:type active: bool
"""
if not self._trace.hasEvents('cpu_frequency'):
logging.warn('Events [cpu_frequency] not found, plot DISABLED!')
return
if not self._trace.hasEvents('cpu_idle'):
logging.warn('Events [cpu_idle] not found, plot DISABLED!')
return
if cpus is None:
# Generate plots only for available CPUs
cpufreq_data = self._dfg_trace_event('cpu_frequency')
_cpus = range(cpufreq_data.cpu.max()+1)
else:
_cpus = listify(cpus)
# Split between big and LITTLE CPUs ordered from higher to lower ID
_cpus.reverse()
big_cpus = [c for c in _cpus if c in self._platform['clusters']['big']]
little_cpus = [c for c in _cpus if c in
self._platform['clusters']['little']]
_cpus = big_cpus + little_cpus
# Precompute active and total time for each CPU
residencies = []
xmax = 0.0
for cpu in _cpus:
res = self._getCPUFrequencyResidency(cpu)
residencies.append(ResidencyData('CPU{}'.format(cpu), res))
max_time = res.total.max().values[0]
if xmax < max_time:
xmax = max_time
self._plotFrequencyResidency(residencies, 'cpu', xmax, pct, active)
def plotClusterFrequencyResidency(self, clusters=None,
pct=False, active=False):
"""
Plot the frequency residency in a given cluster, i.e. the amount of
time cluster `cluster` spent at frequency `f_i`. By default, both 'big'
and 'LITTLE' clusters data are plotted.
Requires the following trace events:
- cpu_frequency
- cpu_idle
:param clusters: name of the clusters to be plotted (all of them by
default)
:type clusters: str ot list(str)
:param pct: plot residencies in percentage
:type pct: bool
:param active: for percentage plot specify whether to plot active or
total time. Default is TOTAL time
:type active: bool
"""
if not self._trace.hasEvents('cpu_frequency'):
logging.warn('Events [cpu_frequency] not found, plot DISABLED!')
return
if not self._trace.hasEvents('cpu_idle'):
logging.warn('Events [cpu_idle] not found, plot DISABLED!')
return
# Assumption: all CPUs in a cluster run at the same frequency, i.e. the
# frequency is scaled per-cluster not per-CPU. Hence, we can limit the
# cluster frequencies data to a single CPU
if not self._trace.freq_coherency:
logging.warn('Cluster frequency is not coherent, plot DISABLED!')
return
# Sanitize clusters
if clusters is None:
_clusters = self._platform['clusters'].keys()
else:
_clusters = listify(clusters)
# Precompute active and total time for each cluster
residencies = []
xmax = 0.0
for cluster in _clusters:
res = self._getClusterFrequencyResidency(
self._platform['clusters'][cluster.lower()])
residencies.append(ResidencyData('{} Cluster'.format(cluster),
res))
max_time = res.total.max().values[0]
if xmax < max_time:
xmax = max_time
self._plotFrequencyResidency(residencies, 'cluster', xmax, pct, active)
###############################################################################
# Utility Methods
###############################################################################
@memoized
def _getCPUActiveSignal(self, cpu):
"""
Build a square wave representing the active (i.e. non-idle) CPU time,
i.e.:
cpu_active[t] == 1 if at least one CPU is reported to be
non-idle by CPUFreq at time t
cpu_active[t] == 0 otherwise
:param cpu: CPU ID
:type cpu: int
"""
if not self._trace.hasEvents('cpu_idle'):
logging.warn('Events [cpu_idle] not found, '
'cannot compute CPU active signal!')
return None
idle_df = self._dfg_trace_event('cpu_idle')
cpu_df = idle_df[idle_df.cpu_id == cpu]
cpu_active = cpu_df.state.apply(
lambda s: 1 if s == NON_IDLE_STATE else 0
)
start_time = 0.0
if not self._trace.ftrace.normalized_time:
start_time = self._trace.ftrace.basetime
if cpu_active.index[0] != start_time:
entry_0 = pd.Series(cpu_active.iloc[0] ^ 1, index=[start_time])
cpu_active = pd.concat([entry_0, cpu_active])
return cpu_active
@memoized
def _getClusterActiveSignal(self, cluster):
"""
Build a square wave representing the active (i.e. non-idle) cluster
time, i.e.:
cluster_active[t] == 1 if at least one CPU is reported to be
non-idle by CPUFreq at time t
cluster_active[t] == 0 otherwise
:param cluster: list of CPU IDs belonging to a cluster
:type cluster: list(int)
"""
cpu_active = {}
for cpu in cluster:
cpu_active[cpu] = self._getCPUActiveSignal(cpu)
active = pd.DataFrame(cpu_active)
active.fillna(method='ffill', inplace=True)
# Cluster active is the OR between the actives on each CPU
# belonging to that specific cluster
cluster_active = reduce(
operator.or_,
[cpu_active.astype(int) for _, cpu_active in
active.iteritems()]
)
return cluster_active
@memoized
def _getClusterFrequencyResidency(self, cluster):
"""
Get a DataFrame with per cluster frequency residency, i.e. amount of
time spent at a given frequency in each cluster.
:param cluster: this can be either a single CPU ID or a list of CPU IDs
belonging to a cluster or the cluster name as specified in the
platform description
:type cluster: str or int or list(int)
:returns: namedtuple(ResidencyTime) - tuple of total and active time
dataframes
:raises: KeyError
"""
if not self._trace.hasEvents('cpu_frequency'):
logging.warn('Events [cpu_frequency] not found, '
'frequency residency computation not possible!')
return None
if not self._trace.hasEvents('cpu_idle'):
logging.warn('Events [cpu_idle] not found, '
'frequency residency computation not possible!')
return None
if isinstance(cluster, str):
try:
_cluster = self._platform['clusters'][cluster.lower()]
except KeyError:
logging.warn('%s cluster not found!', cluster)
return None
else:
_cluster = listify(cluster)
freq_df = self._dfg_trace_event('cpu_frequency')
# Assumption: all CPUs in a cluster run at the same frequency, i.e. the
# frequency is scaled per-cluster not per-CPU. Hence, we can limit the
# cluster frequencies data to a single CPU. This assumption is verified
# by the Trace module when parsing the trace.
if len(_cluster) > 1 and not self._trace.freq_coherency:
logging.warn('Cluster frequency is NOT coherent,'
'cannot compute residency!')
return None
cluster_freqs = freq_df[freq_df.cpu == _cluster[0]]
# Compute TOTAL Time
time_intervals = cluster_freqs.index[1:] - cluster_freqs.index[:-1]
total_time = pd.DataFrame({
'time': time_intervals,
'frequency': [f/1000.0 for f in cluster_freqs.iloc[:-1].frequency]
})
total_time = total_time.groupby(['frequency']).sum()
# Compute ACTIVE Time
cluster_active = self._getClusterActiveSignal(_cluster)
# In order to compute the active time spent at each frequency we
# multiply 2 square waves:
# - cluster_active, a square wave of the form:
# cluster_active[t] == 1 if at least one CPU is reported to be
# non-idle by CPUFreq at time t
# cluster_active[t] == 0 otherwise
# - freq_active, square wave of the form:
# freq_active[t] == 1 if at time t the frequency is f
# freq_active[t] == 0 otherwise
available_freqs = sorted(cluster_freqs.frequency.unique())
new_idx = sorted(cluster_freqs.index.tolist() +
cluster_active.index.tolist())
cluster_freqs = cluster_freqs.reindex(new_idx, method='ffill')
cluster_active = cluster_active.reindex(new_idx, method='ffill')
nonidle_time = []
for f in available_freqs:
freq_active = cluster_freqs.frequency.apply(
lambda x: 1 if x == f else 0
)
active_t = cluster_active * freq_active
# Compute total time by integrating the square wave
nonidle_time.append(self._trace.integrate_square_wave(active_t))
active_time = pd.DataFrame({'time': nonidle_time},
index=[f/1000.0 for f in available_freqs])
active_time.index.name = 'frequency'
return ResidencyTime(total_time, active_time)
def _getCPUFrequencyResidency(self, cpu):
"""
Get a DataFrame with per-CPU frequency residency, i.e. amount of
time CPU `cpu` spent at each frequency. Both total and active times
will be computed.
:param cpu: CPU ID
:type cpu: int
:returns: namedtuple(ResidencyTime) - tuple of total and active time
dataframes
"""
return self._getClusterFrequencyResidency(cpu)
def _plotFrequencyResidencyAbs(self, axes, residency, n_plots,
is_first, is_last, xmax, title=''):
"""
Private method to generate frequency residency plots.
:param axes: axes over which to generate the plot
:type axes: matplotlib.axes.Axes
:param residency: tuple of total and active time dataframes
:type residency: namedtuple(ResidencyTime)
:param n_plots: total number of plots
:type n_plots: int
:param is_first: if True this is the first plot
:type is_first: bool
:param is_last: if True this is the last plot
:type is_last: bool
:param xmax: x-axes higher bound
:param xmax: double
:param title: title of this subplot
:type title: str
"""
yrange = 0.4 * max(6, len(residency.total)) * n_plots
residency.total.plot.barh(ax=axes, color='g',
legend=False, figsize=(16, yrange))
residency.active.plot.barh(ax=axes, color='r',
legend=False, figsize=(16, yrange))
axes.set_xlim(0, 1.05*xmax)
axes.set_ylabel('Frequency [MHz]')
axes.set_title(title)
axes.grid(True)
if is_last:
axes.set_xlabel('Time [s]')
else:
axes.set_xticklabels([])
if is_first:
# Put title on top of the figure. As of now there is no clean way
# to make the title appear always in the same position in the
# figure because figure heights may vary between different
# platforms (different number of OPPs). Hence, we use annotation
legend_y = axes.get_ylim()[1]
axes.annotate('OPP Residency Time', xy=(0, legend_y),
xytext=(-50, 45), textcoords='offset points',
fontsize=18)
axes.annotate('GREEN: Total', xy=(0, legend_y),
xytext=(-50, 25), textcoords='offset points',
color='g', fontsize=14)
axes.annotate('RED: Active', xy=(0, legend_y),
xytext=(50, 25), textcoords='offset points',
color='r', fontsize=14)
def _plotFrequencyResidencyPct(self, axes, residency_df, label,
n_plots, is_first, is_last, res_type):
"""
Private method to generate PERCENTAGE frequency residency plots.
:param axes: axes over which to generate the plot
:type axes: matplotlib.axes.Axes
:param residency_df: residency time dataframe
:type residency_df: :mod:`pandas.DataFrame`
:param label: label to be used for percentage residency dataframe
:type label: str
:param n_plots: total number of plots
:type n_plots: int
:param is_first: if True this is the first plot
:type is_first: bool
:param is_first: if True this is the last plot
:type is_first: bool
:param res_type: type of residency, either TOTAL or ACTIVE
:type title: str
"""
# Compute sum of the time intervals
duration = residency_df.time.sum()
residency_pct = pd.DataFrame(
{label: residency_df.time.apply(lambda x: x*100/duration)},
index=residency_df.index
)
yrange = 3 * n_plots
residency_pct.T.plot.barh(ax=axes, stacked=True, figsize=(16, yrange))
axes.legend(loc='lower center', ncol=7)
axes.set_xlim(0, 100)
axes.grid(True)
if is_last:
axes.set_xlabel('Residency [%]')
else:
axes.set_xticklabels([])
if is_first:
legend_y = axes.get_ylim()[1]
axes.annotate('OPP {} Residency Time'.format(res_type),
xy=(0, legend_y), xytext=(-50, 35),
textcoords='offset points', fontsize=18)
def _plotFrequencyResidency(self, residencies, entity_name, xmax,
pct, active):
"""
Generate Frequency residency plots for the given entities.
:param residencies:
:type residencies: namedtuple(ResidencyData) - tuple containing:
1) as first element, a label to be used as subplot title
2) as second element, a namedtuple(ResidencyTime)
:param entity_name: name of the entity ('cpu' or 'cluster') used in the
figure name
:type entity_name: str
:param xmax: upper bound of x-axes
:type xmax: double
:param pct: plot residencies in percentage
:type pct: bool
:param active: for percentage plot specify whether to plot active or
total time. Default is TOTAL time
:type active: bool
"""
n_plots = len(residencies)
gs = gridspec.GridSpec(n_plots, 1)
fig = plt.figure()
figtype = ""
for idx, data in enumerate(residencies):
if data.residency is None:
plt.close(fig)
return
axes = fig.add_subplot(gs[idx])
is_first = idx == 0
is_last = idx+1 == n_plots
if pct and active:
self._plotFrequencyResidencyPct(axes, data.residency.active,
data.label, n_plots,
is_first, is_last,
'ACTIVE')
figtype = "_pct_active"
continue
if pct:
self._plotFrequencyResidencyPct(axes, data.residency.total,
data.label, n_plots,
is_first, is_last,
'TOTAL')
figtype = "_pct_total"
continue
self._plotFrequencyResidencyAbs(axes, data.residency,
n_plots, is_first,
is_last, xmax,
title=data.label)
figname = '{}/{}{}_freq_residency{}.png'\
.format(self._trace.plots_dir,
self._trace.plots_prefix,
entity_name, figtype)
pl.savefig(figname, bbox_inches='tight')
# vim :set tabstop=4 shiftwidth=4 expandtab
| apache-2.0 | -7,015,758,231,730,057,000 | 37.180982 | 98 | 0.555676 | false | 4.189498 | false | false | false |
cburmeister/flask-bones | app/commands.py | 1 | 1163 | from faker import Faker
import click
from app.database import db
from app.user.models import User
@click.option('--num_users', default=5, help='Number of users.')
def populate_db(num_users):
"""Populates the database with seed data."""
fake = Faker()
users = []
for _ in range(num_users):
users.append(
User(
username=fake.user_name(),
email=fake.email(),
password=fake.word() + fake.word(),
remote_addr=fake.ipv4()
)
)
users.append(
User(
username='cburmeister',
email='[email protected]',
password='test123',
remote_addr=fake.ipv4(),
active=True,
is_admin=True
)
)
for user in users:
db.session.add(user)
db.session.commit()
def create_db():
"""Creates the database."""
db.create_all()
def drop_db():
"""Drops the database."""
if click.confirm('Are you sure?', abort=True):
db.drop_all()
def recreate_db():
"""Same as running drop_db() and create_db()."""
drop_db()
create_db()
| mit | -36,645,924,411,596,520 | 21.803922 | 64 | 0.536543 | false | 3.788274 | false | false | false |
rbuffat/pyidf | tests/test_shadingsite.py | 1 | 2163 | import os
import tempfile
import unittest
import logging
from pyidf import ValidationLevel
import pyidf
from pyidf.idf import IDF
from pyidf.thermal_zones_and_surfaces import ShadingSite
log = logging.getLogger(__name__)
class TestShadingSite(unittest.TestCase):
def setUp(self):
self.fd, self.path = tempfile.mkstemp()
def tearDown(self):
os.remove(self.path)
def test_create_shadingsite(self):
pyidf.validation_level = ValidationLevel.error
obj = ShadingSite()
# alpha
var_name = "Name"
obj.name = var_name
# real
var_azimuth_angle = 180.0
obj.azimuth_angle = var_azimuth_angle
# real
var_tilt_angle = 90.0
obj.tilt_angle = var_tilt_angle
# real
var_starting_x_coordinate = 4.4
obj.starting_x_coordinate = var_starting_x_coordinate
# real
var_starting_y_coordinate = 5.5
obj.starting_y_coordinate = var_starting_y_coordinate
# real
var_starting_z_coordinate = 6.6
obj.starting_z_coordinate = var_starting_z_coordinate
# real
var_length = 7.7
obj.length = var_length
# real
var_height = 8.8
obj.height = var_height
idf = IDF()
idf.add(obj)
idf.save(self.path, check=False)
with open(self.path, mode='r') as f:
for line in f:
log.debug(line.strip())
idf2 = IDF(self.path)
self.assertEqual(idf2.shadingsites[0].name, var_name)
self.assertAlmostEqual(idf2.shadingsites[0].azimuth_angle, var_azimuth_angle)
self.assertAlmostEqual(idf2.shadingsites[0].tilt_angle, var_tilt_angle)
self.assertAlmostEqual(idf2.shadingsites[0].starting_x_coordinate, var_starting_x_coordinate)
self.assertAlmostEqual(idf2.shadingsites[0].starting_y_coordinate, var_starting_y_coordinate)
self.assertAlmostEqual(idf2.shadingsites[0].starting_z_coordinate, var_starting_z_coordinate)
self.assertAlmostEqual(idf2.shadingsites[0].length, var_length)
self.assertAlmostEqual(idf2.shadingsites[0].height, var_height) | apache-2.0 | -7,739,266,213,189,500,000 | 31.787879 | 101 | 0.645862 | false | 3.581126 | false | false | false |
mitmedialab/MediaCloud-Web-Tools | server/views/topics/topiccreate.py | 1 | 3535 | import logging
from flask import jsonify, request
import flask_login
import mediacloud.error
from server import app, mc
from server.auth import user_mediacloud_client
from server.util.request import form_fields_required, api_error_handler, json_error_response, arguments_required
from server.views.topics.topic import topic_summary
logger = logging.getLogger(__name__)
VERSION_1 = 1
COLLECTION_US_TOP_ONLINE = 58722749
@app.route('/api/topics/create', methods=['PUT'])
@flask_login.login_required
@form_fields_required('name', 'description', 'solr_seed_query', 'start_date', 'end_date')
@api_error_handler
def topic_create():
user_mc = user_mediacloud_client()
name = request.form['name']
description = request.form['description']
solr_seed_query = request.form['solr_seed_query']
start_date = request.form['start_date']
end_date = request.form['end_date']
optional_args = {
'max_iterations': request.form['max_iterations'] if 'max_iterations' in request.form and request.form['max_iterations'] != 'null' else None,
'max_stories': request.form['max_stories'] if 'max_stories' in request.form and request.form['max_stories'] != 'null' else flask_login.current_user.profile['limits']['max_topic_stories'],
}
try:
topic_result = user_mc.topicCreate(name=name, description=description, solr_seed_query=solr_seed_query,
start_date=start_date, end_date=end_date,
media_tags_ids=[COLLECTION_US_TOP_ONLINE], # HACK: can't save without one of these in place (for now)
**optional_args,
)['topics'][0]
topics_id = topic_result['topics_id']
logger.info("Created new topic \"{}\" as {}".format(name, topics_id))
# if this includes any of the US-centric collections, add the retweet partisanship subtopic by default
# client will either make a empty snapshot, or a spidering one
return topic_summary(topics_id)
except mediacloud.error.MCException as e:
logging.error("Topic creation failed {}".format(name))
logging.exception(e)
return json_error_response(e.message, e.status_code)
except Exception as e:
logging.error("Topic creation failed {}".format(name))
logging.exception(e)
return json_error_response(str(e), 500)
@app.route('/api/topics/name-exists', methods=['GET'])
@flask_login.login_required
@arguments_required('searchStr')
@api_error_handler
def topic_name_exists():
# Check if topic with name exists already
# Have to do this in a unique method, instead of in topic_search because we need to use an admin connection
# to media cloud to list all topics, but we don't want to return topics a user can't see to them.
# :return: boolean indicating if topic with this name exists for not (case insensive check)
search_str = request.args['searchStr']
topics_id = int(request.args['topicId']) if 'topicId' in request.args else None
matching_topics = mc.topicList(name=search_str, limit=15)
if topics_id:
matching_topic_names = [t['name'].lower().strip() for t in matching_topics['topics']
if t['topics_id'] != topics_id]
else:
matching_topic_names = [t['name'].lower().strip() for t in matching_topics['topics']]
name_in_use = search_str.lower() in matching_topic_names
return jsonify({'nameInUse': name_in_use})
| apache-2.0 | -6,057,934,450,072,376,000 | 48.788732 | 195 | 0.660255 | false | 3.748674 | false | false | false |
jeffsilverm/presentation | whats_new_in_python_3.6/type_hints_complicated.py | 1 | 1308 | #! /usr/bin/python3.6
# -*- coding: utf-8 -*-
import time
import sys
assert sys.version_info.major == 3 and sys.version_info.minor == 6, "Not running python 3.6, running {}".format(
sys.version_info)
class A(object):
def __init__(self, instance_mark) -> None:
self.instance_mark_A = instance_mark
def af_A(self, input):
return input * 2
def afo_A(self, input):
return input * 4
class AA(A):
def __init__(self, instance_marker) -> None:
super()
self.instance_marker = instance_marker
def aaf_AA(self, method_input):
return method_input * 20
def afo_A(self, method_input):
return method_input ** 2
class B(object):
def __init__(self):
pass
def bf_B(self, method_input):
return method_input * 9
a = A("marker a")
aa = AA("marker aa")
print("a.af_A(4) ", a.af_A(4))
print("a.afo_A(4) ", a.afo_A(4))
print("aa.aaf_AA(4) ", aa.aaf_AA(4))
print("aa.afo_A(4) ", aa.afo_A(4))
print("a.af_A('4') ", a.af_A('4'))
print("a.afo_A('4') ", a.afo_A('4'))
print("aa.aaf_AA('4') ", aa.aaf_AA('4'), flush=True)
try:
print("aa.afo_A('4') ", aa.afo_A('4'))
except TypeError as t:
time.sleep(1)
print("Exception TypeError was raised, as expected, when calling aa.afo_A('4'))", file=sys.stderr)
| gpl-2.0 | 859,038,877,987,351,700 | 22.357143 | 112 | 0.58104 | false | 2.730689 | false | false | false |
Crowdcomputer/CC | crowdcomputer/init_db.py | 1 | 1613 | '''
Created on Nov 26, 2012
@author: stefanotranquillini
'''
from django.contrib.auth.models import User, Group
from rest_framework.authtoken.models import Token
from general.models import Application
from uuid import uuid4
def init():
initAppsAndCC()
def initAppsAndCC():
try:
user, c = User.objects.get_or_create(username='crowdcomputer',email="[email protected]",password="this.is.spam")
user.save()
print "%s %s"%(user.username,c)
app, c = Application.objects.get_or_create(name="crowdcomputer",url="http://www.crowdcomputer.org",user=user)
if c:
app.token=str(uuid4()).replace('-','')
app.save()
print "%s %s" %(app.name, app.token)
app, c = Application.objects.get_or_create(name="bpmn",url="http://www.crowdcomputer.org",user=user)
if c:
app.token=str(uuid4()).replace('-','')
print "%s %s" %(app.name, app.token)
app.save()
bpmn, c = Group.objects.get_or_create(name='bpmn')
bpmn.save()
except Exception, e:
print e
print 'exception'
def createAdmin(username,password,email):
try:
admin, c = User.objects.get_or_create(email=email)
if c:
admin.set_password(password)
admin.username=username
admin.is_superuser = True
admin.is_staff = True
admin.save()
print 'creato'
else:
admin.set_password(password)
admin.save()
print 'aggiornato'
except Exception:
print 'exception'
| apache-2.0 | -2,273,612,954,416,833,300 | 28.345455 | 126 | 0.588965 | false | 3.506522 | false | false | false |
DailyActie/Surrogate-Model | 01-codes/numpy-master/numpy/matrixlib/defmatrix.py | 1 | 34262 | from __future__ import division, absolute_import, print_function
__all__ = ['matrix', 'bmat', 'mat', 'asmatrix']
import sys
import numpy.core.numeric as N
from numpy.core.numeric import concatenate, isscalar, binary_repr, identity, asanyarray
from numpy.core.numerictypes import issubdtype
# make translation table
_numchars = '0123456789.-+jeEL'
if sys.version_info[0] >= 3:
class _NumCharTable:
def __getitem__(self, i):
if chr(i) in _numchars:
return chr(i)
else:
return None
_table = _NumCharTable()
def _eval(astr):
str_ = astr.translate(_table)
if not str_:
raise TypeError("Invalid data string supplied: " + astr)
else:
return eval(str_)
else:
_table = [None] * 256
for k in range(256):
_table[k] = chr(k)
_table = ''.join(_table)
_todelete = []
for k in _table:
if k not in _numchars:
_todelete.append(k)
_todelete = ''.join(_todelete)
del k
def _eval(astr):
str_ = astr.translate(_table, _todelete)
if not str_:
raise TypeError("Invalid data string supplied: " + astr)
else:
return eval(str_)
def _convert_from_string(data):
rows = data.split(';')
newdata = []
count = 0
for row in rows:
trow = row.split(',')
newrow = []
for col in trow:
temp = col.split()
newrow.extend(map(_eval, temp))
if count == 0:
Ncols = len(newrow)
elif len(newrow) != Ncols:
raise ValueError("Rows not the same size.")
count += 1
newdata.append(newrow)
return newdata
def asmatrix(data, dtype=None):
"""
Interpret the input as a matrix.
Unlike `matrix`, `asmatrix` does not make a copy if the input is already
a matrix or an ndarray. Equivalent to ``matrix(data, copy=False)``.
Parameters
----------
data : array_like
Input data.
dtype : data-type
Data-type of the output matrix.
Returns
-------
mat : matrix
`data` interpreted as a matrix.
Examples
--------
>>> x = np.array([[1, 2], [3, 4]])
>>> m = np.asmatrix(x)
>>> x[0,0] = 5
>>> m
matrix([[5, 2],
[3, 4]])
"""
return matrix(data, dtype=dtype, copy=False)
def matrix_power(M, n):
"""
Raise a square matrix to the (integer) power `n`.
For positive integers `n`, the power is computed by repeated matrix
squarings and matrix multiplications. If ``n == 0``, the identity matrix
of the same shape as M is returned. If ``n < 0``, the inverse
is computed and then raised to the ``abs(n)``.
Parameters
----------
M : ndarray or matrix object
Matrix to be "powered." Must be square, i.e. ``M.shape == (m, m)``,
with `m` a positive integer.
n : int
The exponent can be any integer or long integer, positive,
negative, or zero.
Returns
-------
M**n : ndarray or matrix object
The return value is the same shape and type as `M`;
if the exponent is positive or zero then the type of the
elements is the same as those of `M`. If the exponent is
negative the elements are floating-point.
Raises
------
LinAlgError
If the matrix is not numerically invertible.
See Also
--------
matrix
Provides an equivalent function as the exponentiation operator
(``**``, not ``^``).
Examples
--------
>>> from numpy import linalg as LA
>>> i = np.array([[0, 1], [-1, 0]]) # matrix equiv. of the imaginary unit
>>> LA.matrix_power(i, 3) # should = -i
array([[ 0, -1],
[ 1, 0]])
>>> LA.matrix_power(np.matrix(i), 3) # matrix arg returns matrix
matrix([[ 0, -1],
[ 1, 0]])
>>> LA.matrix_power(i, 0)
array([[1, 0],
[0, 1]])
>>> LA.matrix_power(i, -3) # should = 1/(-i) = i, but w/ f.p. elements
array([[ 0., 1.],
[-1., 0.]])
Somewhat more sophisticated example
>>> q = np.zeros((4, 4))
>>> q[0:2, 0:2] = -i
>>> q[2:4, 2:4] = i
>>> q # one of the three quaternion units not equal to 1
array([[ 0., -1., 0., 0.],
[ 1., 0., 0., 0.],
[ 0., 0., 0., 1.],
[ 0., 0., -1., 0.]])
>>> LA.matrix_power(q, 2) # = -np.eye(4)
array([[-1., 0., 0., 0.],
[ 0., -1., 0., 0.],
[ 0., 0., -1., 0.],
[ 0., 0., 0., -1.]])
"""
M = asanyarray(M)
if len(M.shape) != 2 or M.shape[0] != M.shape[1]:
raise ValueError("input must be a square array")
if not issubdtype(type(n), int):
raise TypeError("exponent must be an integer")
from numpy.linalg import inv
if n == 0:
M = M.copy()
M[:] = identity(M.shape[0])
return M
elif n < 0:
M = inv(M)
n *= -1
result = M
if n <= 3:
for _ in range(n - 1):
result = N.dot(result, M)
return result
# binary decomposition to reduce the number of Matrix
# multiplications for n > 3.
beta = binary_repr(n)
Z, q, t = M, 0, len(beta)
while beta[t - q - 1] == '0':
Z = N.dot(Z, Z)
q += 1
result = Z
for k in range(q + 1, t):
Z = N.dot(Z, Z)
if beta[t - k - 1] == '1':
result = N.dot(result, Z)
return result
class matrix(N.ndarray):
"""
matrix(data, dtype=None, copy=True)
Returns a matrix from an array-like object, or from a string of data.
A matrix is a specialized 2-D array that retains its 2-D nature
through operations. It has certain special operators, such as ``*``
(matrix multiplication) and ``**`` (matrix power).
Parameters
----------
data : array_like or string
If `data` is a string, it is interpreted as a matrix with commas
or spaces separating columns, and semicolons separating rows.
dtype : data-type
Data-type of the output matrix.
copy : bool
If `data` is already an `ndarray`, then this flag determines
whether the data is copied (the default), or whether a view is
constructed.
See Also
--------
array
Examples
--------
>>> a = np.matrix('1 2; 3 4')
>>> print(a)
[[1 2]
[3 4]]
>>> np.matrix([[1, 2], [3, 4]])
matrix([[1, 2],
[3, 4]])
"""
__array_priority__ = 10.0
def __new__(subtype, data, dtype=None, copy=True):
if isinstance(data, matrix):
dtype2 = data.dtype
if (dtype is None):
dtype = dtype2
if (dtype2 == dtype) and (not copy):
return data
return data.astype(dtype)
if isinstance(data, N.ndarray):
if dtype is None:
intype = data.dtype
else:
intype = N.dtype(dtype)
new = data.view(subtype)
if intype != data.dtype:
return new.astype(intype)
if copy:
return new.copy()
else:
return new
if isinstance(data, str):
data = _convert_from_string(data)
# now convert data to an array
arr = N.array(data, dtype=dtype, copy=copy)
ndim = arr.ndim
shape = arr.shape
if (ndim > 2):
raise ValueError("matrix must be 2-dimensional")
elif ndim == 0:
shape = (1, 1)
elif ndim == 1:
shape = (1, shape[0])
order = 'C'
if (ndim == 2) and arr.flags.fortran:
order = 'F'
if not (order or arr.flags.contiguous):
arr = arr.copy()
ret = N.ndarray.__new__(subtype, shape, arr.dtype,
buffer=arr,
order=order)
return ret
def __array_finalize__(self, obj):
self._getitem = False
if (isinstance(obj, matrix) and obj._getitem): return
ndim = self.ndim
if (ndim == 2):
return
if (ndim > 2):
newshape = tuple([x for x in self.shape if x > 1])
ndim = len(newshape)
if ndim == 2:
self.shape = newshape
return
elif (ndim > 2):
raise ValueError("shape too large to be a matrix.")
else:
newshape = self.shape
if ndim == 0:
self.shape = (1, 1)
elif ndim == 1:
self.shape = (1, newshape[0])
return
def __getitem__(self, index):
self._getitem = True
try:
out = N.ndarray.__getitem__(self, index)
finally:
self._getitem = False
if not isinstance(out, N.ndarray):
return out
if out.ndim == 0:
return out[()]
if out.ndim == 1:
sh = out.shape[0]
# Determine when we should have a column array
try:
n = len(index)
except:
n = 0
if n > 1 and isscalar(index[1]):
out.shape = (sh, 1)
else:
out.shape = (1, sh)
return out
def __mul__(self, other):
if isinstance(other, (N.ndarray, list, tuple)):
# This promotes 1-D vectors to row vectors
return N.dot(self, asmatrix(other))
if isscalar(other) or not hasattr(other, '__rmul__'):
return N.dot(self, other)
return NotImplemented
def __rmul__(self, other):
return N.dot(other, self)
def __imul__(self, other):
self[:] = self * other
return self
def __pow__(self, other):
return matrix_power(self, other)
def __ipow__(self, other):
self[:] = self ** other
return self
def __rpow__(self, other):
return NotImplemented
def __repr__(self):
s = repr(self.__array__()).replace('array', 'matrix')
# now, 'matrix' has 6 letters, and 'array' 5, so the columns don't
# line up anymore. We need to add a space.
l = s.splitlines()
for i in range(1, len(l)):
if l[i]:
l[i] = ' ' + l[i]
return '\n'.join(l)
def __str__(self):
return str(self.__array__())
def _align(self, axis):
"""A convenience function for operations that need to preserve axis
orientation.
"""
if axis is None:
return self[0, 0]
elif axis == 0:
return self
elif axis == 1:
return self.transpose()
else:
raise ValueError("unsupported axis")
def _collapse(self, axis):
"""A convenience function for operations that want to collapse
to a scalar like _align, but are using keepdims=True
"""
if axis is None:
return self[0, 0]
else:
return self
# Necessary because base-class tolist expects dimension
# reduction by x[0]
def tolist(self):
"""
Return the matrix as a (possibly nested) list.
See `ndarray.tolist` for full documentation.
See Also
--------
ndarray.tolist
Examples
--------
>>> x = np.matrix(np.arange(12).reshape((3,4))); x
matrix([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> x.tolist()
[[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11]]
"""
return self.__array__().tolist()
# To preserve orientation of result...
def sum(self, axis=None, dtype=None, out=None):
"""
Returns the sum of the matrix elements, along the given axis.
Refer to `numpy.sum` for full documentation.
See Also
--------
numpy.sum
Notes
-----
This is the same as `ndarray.sum`, except that where an `ndarray` would
be returned, a `matrix` object is returned instead.
Examples
--------
>>> x = np.matrix([[1, 2], [4, 3]])
>>> x.sum()
10
>>> x.sum(axis=1)
matrix([[3],
[7]])
>>> x.sum(axis=1, dtype='float')
matrix([[ 3.],
[ 7.]])
>>> out = np.zeros((1, 2), dtype='float')
>>> x.sum(axis=1, dtype='float', out=out)
matrix([[ 3.],
[ 7.]])
"""
return N.ndarray.sum(self, axis, dtype, out, keepdims=True)._collapse(axis)
# To update docstring from array to matrix...
def squeeze(self, axis=None):
"""
Return a possibly reshaped matrix.
Refer to `numpy.squeeze` for more documentation.
Parameters
----------
axis : None or int or tuple of ints, optional
Selects a subset of the single-dimensional entries in the shape.
If an axis is selected with shape entry greater than one,
an error is raised.
Returns
-------
squeezed : matrix
The matrix, but as a (1, N) matrix if it had shape (N, 1).
See Also
--------
numpy.squeeze : related function
Notes
-----
If `m` has a single column then that column is returned
as the single row of a matrix. Otherwise `m` is returned.
The returned matrix is always either `m` itself or a view into `m`.
Supplying an axis keyword argument will not affect the returned matrix
but it may cause an error to be raised.
Examples
--------
>>> c = np.matrix([[1], [2]])
>>> c
matrix([[1],
[2]])
>>> c.squeeze()
matrix([[1, 2]])
>>> r = c.T
>>> r
matrix([[1, 2]])
>>> r.squeeze()
matrix([[1, 2]])
>>> m = np.matrix([[1, 2], [3, 4]])
>>> m.squeeze()
matrix([[1, 2],
[3, 4]])
"""
return N.ndarray.squeeze(self, axis=axis)
# To update docstring from array to matrix...
def flatten(self, order='C'):
"""
Return a flattened copy of the matrix.
All `N` elements of the matrix are placed into a single row.
Parameters
----------
order : {'C', 'F', 'A', 'K'}, optional
'C' means to flatten in row-major (C-style) order. 'F' means to
flatten in column-major (Fortran-style) order. 'A' means to
flatten in column-major order if `m` is Fortran *contiguous* in
memory, row-major order otherwise. 'K' means to flatten `m` in
the order the elements occur in memory. The default is 'C'.
Returns
-------
y : matrix
A copy of the matrix, flattened to a `(1, N)` matrix where `N`
is the number of elements in the original matrix.
See Also
--------
ravel : Return a flattened array.
flat : A 1-D flat iterator over the matrix.
Examples
--------
>>> m = np.matrix([[1,2], [3,4]])
>>> m.flatten()
matrix([[1, 2, 3, 4]])
>>> m.flatten('F')
matrix([[1, 3, 2, 4]])
"""
return N.ndarray.flatten(self, order=order)
def mean(self, axis=None, dtype=None, out=None):
"""
Returns the average of the matrix elements along the given axis.
Refer to `numpy.mean` for full documentation.
See Also
--------
numpy.mean
Notes
-----
Same as `ndarray.mean` except that, where that returns an `ndarray`,
this returns a `matrix` object.
Examples
--------
>>> x = np.matrix(np.arange(12).reshape((3, 4)))
>>> x
matrix([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> x.mean()
5.5
>>> x.mean(0)
matrix([[ 4., 5., 6., 7.]])
>>> x.mean(1)
matrix([[ 1.5],
[ 5.5],
[ 9.5]])
"""
return N.ndarray.mean(self, axis, dtype, out, keepdims=True)._collapse(axis)
def std(self, axis=None, dtype=None, out=None, ddof=0):
"""
Return the standard deviation of the array elements along the given axis.
Refer to `numpy.std` for full documentation.
See Also
--------
numpy.std
Notes
-----
This is the same as `ndarray.std`, except that where an `ndarray` would
be returned, a `matrix` object is returned instead.
Examples
--------
>>> x = np.matrix(np.arange(12).reshape((3, 4)))
>>> x
matrix([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> x.std()
3.4520525295346629
>>> x.std(0)
matrix([[ 3.26598632, 3.26598632, 3.26598632, 3.26598632]])
>>> x.std(1)
matrix([[ 1.11803399],
[ 1.11803399],
[ 1.11803399]])
"""
return N.ndarray.std(self, axis, dtype, out, ddof, keepdims=True)._collapse(axis)
def var(self, axis=None, dtype=None, out=None, ddof=0):
"""
Returns the variance of the matrix elements, along the given axis.
Refer to `numpy.var` for full documentation.
See Also
--------
numpy.var
Notes
-----
This is the same as `ndarray.var`, except that where an `ndarray` would
be returned, a `matrix` object is returned instead.
Examples
--------
>>> x = np.matrix(np.arange(12).reshape((3, 4)))
>>> x
matrix([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> x.var()
11.916666666666666
>>> x.var(0)
matrix([[ 10.66666667, 10.66666667, 10.66666667, 10.66666667]])
>>> x.var(1)
matrix([[ 1.25],
[ 1.25],
[ 1.25]])
"""
return N.ndarray.var(self, axis, dtype, out, ddof, keepdims=True)._collapse(axis)
def prod(self, axis=None, dtype=None, out=None):
"""
Return the product of the array elements over the given axis.
Refer to `prod` for full documentation.
See Also
--------
prod, ndarray.prod
Notes
-----
Same as `ndarray.prod`, except, where that returns an `ndarray`, this
returns a `matrix` object instead.
Examples
--------
>>> x = np.matrix(np.arange(12).reshape((3,4))); x
matrix([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> x.prod()
0
>>> x.prod(0)
matrix([[ 0, 45, 120, 231]])
>>> x.prod(1)
matrix([[ 0],
[ 840],
[7920]])
"""
return N.ndarray.prod(self, axis, dtype, out, keepdims=True)._collapse(axis)
def any(self, axis=None, out=None):
"""
Test whether any array element along a given axis evaluates to True.
Refer to `numpy.any` for full documentation.
Parameters
----------
axis : int, optional
Axis along which logical OR is performed
out : ndarray, optional
Output to existing array instead of creating new one, must have
same shape as expected output
Returns
-------
any : bool, ndarray
Returns a single bool if `axis` is ``None``; otherwise,
returns `ndarray`
"""
return N.ndarray.any(self, axis, out, keepdims=True)._collapse(axis)
def all(self, axis=None, out=None):
"""
Test whether all matrix elements along a given axis evaluate to True.
Parameters
----------
See `numpy.all` for complete descriptions
See Also
--------
numpy.all
Notes
-----
This is the same as `ndarray.all`, but it returns a `matrix` object.
Examples
--------
>>> x = np.matrix(np.arange(12).reshape((3,4))); x
matrix([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> y = x[0]; y
matrix([[0, 1, 2, 3]])
>>> (x == y)
matrix([[ True, True, True, True],
[False, False, False, False],
[False, False, False, False]], dtype=bool)
>>> (x == y).all()
False
>>> (x == y).all(0)
matrix([[False, False, False, False]], dtype=bool)
>>> (x == y).all(1)
matrix([[ True],
[False],
[False]], dtype=bool)
"""
return N.ndarray.all(self, axis, out, keepdims=True)._collapse(axis)
def max(self, axis=None, out=None):
"""
Return the maximum value along an axis.
Parameters
----------
See `amax` for complete descriptions
See Also
--------
amax, ndarray.max
Notes
-----
This is the same as `ndarray.max`, but returns a `matrix` object
where `ndarray.max` would return an ndarray.
Examples
--------
>>> x = np.matrix(np.arange(12).reshape((3,4))); x
matrix([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> x.max()
11
>>> x.max(0)
matrix([[ 8, 9, 10, 11]])
>>> x.max(1)
matrix([[ 3],
[ 7],
[11]])
"""
return N.ndarray.max(self, axis, out, keepdims=True)._collapse(axis)
def argmax(self, axis=None, out=None):
"""
Indexes of the maximum values along an axis.
Return the indexes of the first occurrences of the maximum values
along the specified axis. If axis is None, the index is for the
flattened matrix.
Parameters
----------
See `numpy.argmax` for complete descriptions
See Also
--------
numpy.argmax
Notes
-----
This is the same as `ndarray.argmax`, but returns a `matrix` object
where `ndarray.argmax` would return an `ndarray`.
Examples
--------
>>> x = np.matrix(np.arange(12).reshape((3,4))); x
matrix([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> x.argmax()
11
>>> x.argmax(0)
matrix([[2, 2, 2, 2]])
>>> x.argmax(1)
matrix([[3],
[3],
[3]])
"""
return N.ndarray.argmax(self, axis, out)._align(axis)
def min(self, axis=None, out=None):
"""
Return the minimum value along an axis.
Parameters
----------
See `amin` for complete descriptions.
See Also
--------
amin, ndarray.min
Notes
-----
This is the same as `ndarray.min`, but returns a `matrix` object
where `ndarray.min` would return an ndarray.
Examples
--------
>>> x = -np.matrix(np.arange(12).reshape((3,4))); x
matrix([[ 0, -1, -2, -3],
[ -4, -5, -6, -7],
[ -8, -9, -10, -11]])
>>> x.min()
-11
>>> x.min(0)
matrix([[ -8, -9, -10, -11]])
>>> x.min(1)
matrix([[ -3],
[ -7],
[-11]])
"""
return N.ndarray.min(self, axis, out, keepdims=True)._collapse(axis)
def argmin(self, axis=None, out=None):
"""
Indexes of the minimum values along an axis.
Return the indexes of the first occurrences of the minimum values
along the specified axis. If axis is None, the index is for the
flattened matrix.
Parameters
----------
See `numpy.argmin` for complete descriptions.
See Also
--------
numpy.argmin
Notes
-----
This is the same as `ndarray.argmin`, but returns a `matrix` object
where `ndarray.argmin` would return an `ndarray`.
Examples
--------
>>> x = -np.matrix(np.arange(12).reshape((3,4))); x
matrix([[ 0, -1, -2, -3],
[ -4, -5, -6, -7],
[ -8, -9, -10, -11]])
>>> x.argmin()
11
>>> x.argmin(0)
matrix([[2, 2, 2, 2]])
>>> x.argmin(1)
matrix([[3],
[3],
[3]])
"""
return N.ndarray.argmin(self, axis, out)._align(axis)
def ptp(self, axis=None, out=None):
"""
Peak-to-peak (maximum - minimum) value along the given axis.
Refer to `numpy.ptp` for full documentation.
See Also
--------
numpy.ptp
Notes
-----
Same as `ndarray.ptp`, except, where that would return an `ndarray` object,
this returns a `matrix` object.
Examples
--------
>>> x = np.matrix(np.arange(12).reshape((3,4))); x
matrix([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> x.ptp()
11
>>> x.ptp(0)
matrix([[8, 8, 8, 8]])
>>> x.ptp(1)
matrix([[3],
[3],
[3]])
"""
return N.ndarray.ptp(self, axis, out)._align(axis)
def getI(self):
"""
Returns the (multiplicative) inverse of invertible `self`.
Parameters
----------
None
Returns
-------
ret : matrix object
If `self` is non-singular, `ret` is such that ``ret * self`` ==
``self * ret`` == ``np.matrix(np.eye(self[0,:].size)`` all return
``True``.
Raises
------
numpy.linalg.LinAlgError: Singular matrix
If `self` is singular.
See Also
--------
linalg.inv
Examples
--------
>>> m = np.matrix('[1, 2; 3, 4]'); m
matrix([[1, 2],
[3, 4]])
>>> m.getI()
matrix([[-2. , 1. ],
[ 1.5, -0.5]])
>>> m.getI() * m
matrix([[ 1., 0.],
[ 0., 1.]])
"""
M, N = self.shape
if M == N:
from numpy.dual import inv as func
else:
from numpy.dual import pinv as func
return asmatrix(func(self))
def getA(self):
"""
Return `self` as an `ndarray` object.
Equivalent to ``np.asarray(self)``.
Parameters
----------
None
Returns
-------
ret : ndarray
`self` as an `ndarray`
Examples
--------
>>> x = np.matrix(np.arange(12).reshape((3,4))); x
matrix([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> x.getA()
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
"""
return self.__array__()
def getA1(self):
"""
Return `self` as a flattened `ndarray`.
Equivalent to ``np.asarray(x).ravel()``
Parameters
----------
None
Returns
-------
ret : ndarray
`self`, 1-D, as an `ndarray`
Examples
--------
>>> x = np.matrix(np.arange(12).reshape((3,4))); x
matrix([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> x.getA1()
array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11])
"""
return self.__array__().ravel()
def ravel(self, order='C'):
"""
Return a flattened matrix.
Refer to `numpy.ravel` for more documentation.
Parameters
----------
order : {'C', 'F', 'A', 'K'}, optional
The elements of `m` are read using this index order. 'C' means to
index the elements in C-like order, with the last axis index
changing fastest, back to the first axis index changing slowest.
'F' means to index the elements in Fortran-like index order, with
the first index changing fastest, and the last index changing
slowest. Note that the 'C' and 'F' options take no account of the
memory layout of the underlying array, and only refer to the order
of axis indexing. 'A' means to read the elements in Fortran-like
index order if `m` is Fortran *contiguous* in memory, C-like order
otherwise. 'K' means to read the elements in the order they occur
in memory, except for reversing the data when strides are negative.
By default, 'C' index order is used.
Returns
-------
ret : matrix
Return the matrix flattened to shape `(1, N)` where `N`
is the number of elements in the original matrix.
A copy is made only if necessary.
See Also
--------
matrix.flatten : returns a similar output matrix but always a copy
matrix.flat : a flat iterator on the array.
numpy.ravel : related function which returns an ndarray
"""
return N.ndarray.ravel(self, order=order)
def getT(self):
"""
Returns the transpose of the matrix.
Does *not* conjugate! For the complex conjugate transpose, use ``.H``.
Parameters
----------
None
Returns
-------
ret : matrix object
The (non-conjugated) transpose of the matrix.
See Also
--------
transpose, getH
Examples
--------
>>> m = np.matrix('[1, 2; 3, 4]')
>>> m
matrix([[1, 2],
[3, 4]])
>>> m.getT()
matrix([[1, 3],
[2, 4]])
"""
return self.transpose()
def getH(self):
"""
Returns the (complex) conjugate transpose of `self`.
Equivalent to ``np.transpose(self)`` if `self` is real-valued.
Parameters
----------
None
Returns
-------
ret : matrix object
complex conjugate transpose of `self`
Examples
--------
>>> x = np.matrix(np.arange(12).reshape((3,4)))
>>> z = x - 1j*x; z
matrix([[ 0. +0.j, 1. -1.j, 2. -2.j, 3. -3.j],
[ 4. -4.j, 5. -5.j, 6. -6.j, 7. -7.j],
[ 8. -8.j, 9. -9.j, 10.-10.j, 11.-11.j]])
>>> z.getH()
matrix([[ 0. +0.j, 4. +4.j, 8. +8.j],
[ 1. +1.j, 5. +5.j, 9. +9.j],
[ 2. +2.j, 6. +6.j, 10.+10.j],
[ 3. +3.j, 7. +7.j, 11.+11.j]])
"""
if issubclass(self.dtype.type, N.complexfloating):
return self.transpose().conjugate()
else:
return self.transpose()
T = property(getT, None)
A = property(getA, None)
A1 = property(getA1, None)
H = property(getH, None)
I = property(getI, None)
def _from_string(str, gdict, ldict):
rows = str.split(';')
rowtup = []
for row in rows:
trow = row.split(',')
newrow = []
for x in trow:
newrow.extend(x.split())
trow = newrow
coltup = []
for col in trow:
col = col.strip()
try:
thismat = ldict[col]
except KeyError:
try:
thismat = gdict[col]
except KeyError:
raise KeyError("%s not found" % (col,))
coltup.append(thismat)
rowtup.append(concatenate(coltup, axis=-1))
return concatenate(rowtup, axis=0)
def bmat(obj, ldict=None, gdict=None):
"""
Build a matrix object from a string, nested sequence, or array.
Parameters
----------
obj : str or array_like
Input data. Names of variables in the current scope may be
referenced, even if `obj` is a string.
ldict : dict, optional
A dictionary that replaces local operands in current frame.
Ignored if `obj` is not a string or `gdict` is `None`.
gdict : dict, optional
A dictionary that replaces global operands in current frame.
Ignored if `obj` is not a string.
Returns
-------
out : matrix
Returns a matrix object, which is a specialized 2-D array.
See Also
--------
matrix
Examples
--------
>>> A = np.mat('1 1; 1 1')
>>> B = np.mat('2 2; 2 2')
>>> C = np.mat('3 4; 5 6')
>>> D = np.mat('7 8; 9 0')
All the following expressions construct the same block matrix:
>>> np.bmat([[A, B], [C, D]])
matrix([[1, 1, 2, 2],
[1, 1, 2, 2],
[3, 4, 7, 8],
[5, 6, 9, 0]])
>>> np.bmat(np.r_[np.c_[A, B], np.c_[C, D]])
matrix([[1, 1, 2, 2],
[1, 1, 2, 2],
[3, 4, 7, 8],
[5, 6, 9, 0]])
>>> np.bmat('A,B; C,D')
matrix([[1, 1, 2, 2],
[1, 1, 2, 2],
[3, 4, 7, 8],
[5, 6, 9, 0]])
"""
if isinstance(obj, str):
if gdict is None:
# get previous frame
frame = sys._getframe().f_back
glob_dict = frame.f_globals
loc_dict = frame.f_locals
else:
glob_dict = gdict
loc_dict = ldict
return matrix(_from_string(obj, glob_dict, loc_dict))
if isinstance(obj, (tuple, list)):
# [[A,B],[C,D]]
arr_rows = []
for row in obj:
if isinstance(row, N.ndarray): # not 2-d
return matrix(concatenate(obj, axis=-1))
else:
arr_rows.append(concatenate(row, axis=-1))
return matrix(concatenate(arr_rows, axis=0))
if isinstance(obj, N.ndarray):
return matrix(obj)
mat = asmatrix
| mit | -4,329,534,605,734,874,600 | 26.38769 | 89 | 0.471222 | false | 3.861377 | false | false | false |
Septima/qgis-qlrbrowser | src/QlrBrowser/mysettings/qgissettingmanager/types/bool.py | 1 | 3112 | #-----------------------------------------------------------
#
# QGIS setting manager is a python module to easily manage read/write
# settings and set/get corresponding widgets.
#
# Copyright : (C) 2013 Denis Rouzaud
# Email : [email protected]
#
#-----------------------------------------------------------
#
# licensed under the terms of GNU GPL 2
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this progsram; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#---------------------------------------------------------------------
from PyQt5.QtWidgets import QCheckBox
from qgis.core import QgsProject
from ..setting import Setting
from ..setting_widget import SettingWidget
from ..setting_manager import Debug
class Bool(Setting):
def __init__(self, name, scope, default_value, options={}):
Setting.__init__(self, name, scope, default_value, bool, QgsProject.instance().readBoolEntry, QgsProject.instance().writeEntryBool, options)
def check(self, value):
if type(value) != bool:
raise NameError("Setting %s must be a boolean." % self.name)
def config_widget(self, widget):
if type(widget) == QCheckBox:
return CheckBoxBoolWidget(self, widget, self.options)
elif hasattr(widget, "isCheckable") and widget.isCheckable():
return CheckableBoolWidget(self, widget, self.options)
else:
print(type(widget))
raise NameError("SettingManager does not handle %s widgets for booleans at the moment (setting: %s)" %
(type(widget), self.name))
class CheckBoxBoolWidget(SettingWidget):
def __init__(self, setting, widget, options):
signal = widget.stateChanged
SettingWidget.__init__(self, setting, widget, options, signal)
def set_widget_value(self, value):
if Debug:
print("Bool: set_widget_value: {0}{1}".format(value, self.setting.name))
self.widget.setChecked(value)
def widget_value(self):
return self.widget.isChecked()
class CheckableBoolWidget(SettingWidget):
def __init__(self, setting, widget, options):
signal = widget.clicked
SettingWidget.__init__(self, setting, widget, options, signal)
def set_widget_value(self, value):
self.widget.setChecked(value)
def widget_value(self):
return self.widget.isChecked()
def widget_test(self, value):
print('cannot test checkable groupbox at the moment')
return False | gpl-2.0 | 4,159,583,462,942,297,000 | 36.506024 | 148 | 0.643959 | false | 4.316227 | false | false | false |
vhernandez/pygtksheet | examples/complex_test.py | 1 | 11754 | import sys
sys.path += ['/usr/local/lib/python2.6/dist-packages/gtk-2.0']
import gtk
from gtk import gdk
import pango
import gtksheet
from bordercombo import BorderCombo
#from gtkextra import BorderCombo
#import gtkextra
class TestSheet(gtksheet.Sheet):
def __init__(self):
gtksheet.Sheet.__init__(self, 20, 20, "Test")
colormap = gdk.colormap_get_system()
self.default_bg_color = colormap.alloc_color("light yellow")
self.default_fg_color = colormap.alloc_color("black")
self.set_background(self.default_bg_color)
self.set_grid(colormap.alloc_color("light blue"))
for column in xrange(self.get_columns_count()):
name = chr(ord("A") + column)
self.column_button_add_label(column, name)
self.set_column_title(column, name)
self.default_font = self.style.font_desc
class TestWindow(gtk.Window):
def __init__(self):
gtk.Window.__init__(self)
status_box = gtk.HBox(spacing=1)
status_box.set_border_width(0)
self.location = gtk.Label("")
(width, height) = self.location.size_request()
self.location.set_size_request(160, height)
status_box.pack_start(self.location, False)
self.entry = gtk.Entry()
self.entry.connect("changed", self._show_sheet_entry_cb)
status_box.pack_start(self.entry)
t = gtk.Toolbar()
ttips = gtk.Tooltips()
def add_widget_to_toolbar(widget, separator=True, tooltip=None):
ti = gtk.ToolItem()
ti.add(widget)
if tooltip is not None:
ti.set_tooltip(ttips, tooltip)
t.insert(ti, -1)
if separator:
t.insert(gtk.SeparatorToolItem(), -1)
fontbutton = gtk.FontButton()
fontbutton.connect("font-set", self._font_changed_cb)
add_widget_to_toolbar(fontbutton,
tooltip="Change the font of the selected cells");
self.fontbutton = fontbutton
items = \
(("justleft", None,
"Justify selected cells to the left",
gtk.STOCK_JUSTIFY_LEFT, self._justification_cb,
gtk.JUSTIFY_LEFT),
("justcenter", None,
"Justify selected cells to the center",
gtk.STOCK_JUSTIFY_CENTER, self._justification_cb,
gtk.JUSTIFY_CENTER),
("justright", None,
"Justify selected cells to the right",
gtk.STOCK_JUSTIFY_RIGHT, self._justification_cb,
gtk.JUSTIFY_RIGHT))
for name, label, tooltip, stock_id, cb, cb_params in items:
ti = gtk.Action(name, label, tooltip, stock_id)
ti.connect("activate", cb, cb_params)
t.insert(ti.create_tool_item(), -1)
bordercombo = BorderCombo()
bordercombo.connect("changed", self._border_changed_cb)
add_widget_to_toolbar(bordercombo,
tooltip="Change the border of the selected cells")
colormap = gdk.colormap_get_system()
colorbtn = gtk.ColorButton(colormap.alloc_color("black"))
colorbtn.connect("color-set", self._color_changed_cb, "f")
add_widget_to_toolbar(colorbtn, separator=False,
tooltip="Change the foreground color of the selected cells")
self.fgcolorbtn = colorbtn
colorbtn = gtk.ColorButton(colormap.alloc_color("light yellow"))
colorbtn.connect("color-set", self._color_changed_cb, "b")
add_widget_to_toolbar(colorbtn,
tooltip="Change the background color of the selected cells");
self.bgcolorbtn = colorbtn
self.sheet = TestSheet()
self.sheet.connect("activate", self._activate_sheet_cell_cb)
self.sheet.get_entry().connect("changed", self._show_entry_cb)
self.sheet.connect("changed", self._sheet_changed_cb)
ws = gtk.ScrolledWindow()
ws.add(self.sheet)
fd = self.sheet.default_font
fontbutton.set_font_name(fd.to_string())
vbox = gtk.VBox()
vbox.pack_start(t, False, False, 0)
vbox.pack_start(status_box, False, False, 0)
vbox.pack_start(ws, True, True, 0)
self.add(vbox)
self.set_size_request(500,400)
self.show_all()
def _sheet_changed_cb(self, sheet, row, column):
print "Sheet change at row: %d, column: %d" % (row, column)
def _show_sheet_entry_cb(self, entry):
if not entry.flags() & gtk.HAS_FOCUS:
return
sheet_entry = self.sheet.get_entry()
text = entry.get_text()
sheet_entry.set_text(text)
def _show_entry_cb(self, sheet_entry, *args):
if not sheet_entry.flags() & gtk.HAS_FOCUS:
return
text = sheet_entry.get_text()
self.entry.set_text(text)
def _activate_sheet_cell_cb(self, sheet, row, column):
title = sheet.get_column_title(column)
if title:
cell = " %s:%d " % (title, row)
else:
cell = " ROW: %d COLUMN: %d " % (row, column)
self.location.set_text(cell)
# Set attributes
attributes = sheet.get_attributes(row, column)
if attributes:
fd = attributes.font_desc if attributes.font_desc else self.sheet.default_font
fgcolor = attributes.foreground
bgcolor = attributes.background
else:
fd = self.sheet.default_font
fgcolor = self.sheet.default_fg_color
bgcolor = self.sheet.default_bg_color
self.fontbutton.set_font_name(fd.to_string())
self.fgcolorbtn.set_color(fgcolor)
self.bgcolorbtn.set_color(bgcolor)
# Set entry text
sheet_entry = sheet.get_entry()
self.entry.props.max_length = sheet_entry.props.max_length
text = sheet.cell_get_text(row, column)
if text:
self.entry.set_text(text)
else:
self.entry.set_text("")
print self.sheet.props.active_cell
def _font_changed_cb(self, widget):
r = self.sheet.props.selected_range
fd = pango.FontDescription(widget.get_font_name())
self.sheet.range_set_font(r, fd)
def _justification_cb(self, widget, data=None):
if data is None:
return
r = self.sheet.props.selected_range
if r:
self.sheet.range_set_justification(r, data)
def _border_changed_cb(self, widget):
border = widget.get_active()
range = self.sheet.props.selected_range
border_width = 3
self.sheet.range_set_border(range, 0, 0)
if border == 1:
border_mask = gtksheet.SHEET_TOP_BORDER
range.rowi = range.row0
self.sheet.range_set_border(range, border_mask, border_width)
elif border == 2:
border_mask = gtksheet.SHEET_BOTTOM_BORDER
range.row0 = range.rowi
self.sheet.range_set_border(range, border_mask, border_width)
elif border == 3:
border_mask = gtksheet.SHEET_RIGHT_BORDER
range.col0 = range.coli
self.sheet.range_set_border(range, border_mask, border_width)
elif border == 4:
border_mask = gtksheet.SHEET_LEFT_BORDER
range.coli = range.col0
self.sheet.range_set_border(range, border_mask, border_width)
elif border == 5:
if range.col0 == range.coli:
border_mask = gtksheet.SHEET_LEFT_BORDER | gtksheet.SHEET_RIGHT_BORDER
self.sheet.range_set_border(range, border_mask, border_width)
else:
border_mask = gtksheet.SHEET_LEFT_BORDER
auxcol = range.coli
range.coli = range.col0
self.sheet.range_set_border(range, border_mask, border_width)
border_mask = gtksheet.SHEET_RIGHT_BORDER
range.col0 = range.coli = auxcol
self.sheet.range_set_border(range, border_mask, border_width)
elif border == 6:
if range.row0 == range.rowi:
border_mask = gtksheet.SHEET_TOP_BORDER | gtksheet.SHEET_BOTTOM_BORDER
self.sheet.range_set_border(range, border_mask, border_width)
else:
border_mask = gtksheet.SHEET_TOP_BORDER
auxrow = range.rowi
range.rowi = range.row0
self.sheet.range_set_border(range, border_mask, border_width)
border_mask = gtksheet.SHEET_BOTTOM_BORDER
range.row0 = range.rowi = auxrow
self.sheet.range_set_border(range, border_mask, border_width)
elif border == 7:
border_mask = gtksheet.SHEET_RIGHT_BORDER | gtksheet.SHEET_LEFT_BORDER
self.sheet.range_set_border(range, border_mask, border_width)
elif border == 8:
border_mask = gtksheet.SHEET_BOTTOM_BORDER | gtksheet.SHEET_TOP_BORDER
self.sheet.range_set_border(range, border_mask, border_width)
elif border == 9:
self.sheet.range_set_border(range, 15, border_width)
for i in xrange(range.row0, range.rowi + 1):
for j in xrange(range.col0, range.coli + 1):
border_mask = 15
auxrange = sheet.SheetRange(i, j, i, j)
if i == range.rowi:
border_mask = border_mask ^ gtksheet.SHEET_BOTTOM_BORDER
if i == range.row0:
border_mask = border_mask ^ gtksheet.SHEET_TOP_BORDER
if j == range.coli:
border_mask = border_mask ^ gtksheet.SHEET_RIGHT_BORDER
if j == range.col0:
border_mask = border_mask ^ gtksheet.SHEET_LEFT_BORDER
if border_mask != 15:
self.sheet.range_set_border(auxrange, border_mask,
border_width)
elif border == 10:
for i in xrange(range.row0, range.rowi + 1):
for j in xrange(range.col0, range.coli + 1):
border_mask = 0
auxrange = gtksheet.SheetRange(i, j, i, j)
if i == range.rowi:
border_mask = border_mask | gtksheet.SHEET_BOTTOM_BORDER
if i == range.row0:
border_mask = border_mask | gtksheet.SHEET_TOP_BORDER
if j == range.coli:
border_mask = border_mask | gtksheet.SHEET_RIGHT_BORDER
if j == range.col0:
border_mask = border_mask | gtksheet.SHEET_LEFT_BORDER
if border_mask != 0:
self.sheet.range_set_border(auxrange, border_mask,
border_width)
elif border == 11:
border_mask = 15
self.sheet.range_set_border(range, border_mask, border_width)
def _color_changed_cb(self, widget, data=None):
# Bug in GtkSheet?: the color must be allocated with the system's
# colormap, else it is ignored
if data is None:
return
color = widget.get_color()
_range = self.sheet.props.selected_range
if data == "f":
self.sheet.range_set_foreground(_range, color)
else:
self.sheet.range_set_background(_range, color)
def main():
w = TestWindow()
w.connect("delete-event", lambda x,y: gtk.main_quit())
gtk.main()
if __name__=='__main__':
main()
| gpl-2.0 | 9,136,060,946,695,285,000 | 39.253425 | 90 | 0.561766 | false | 3.80758 | false | false | false |
nash-x/hws | nova/huawei/scheduler/filters/disk_filter.py | 1 | 2145 | # Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.scheduler.filters import disk_filter
from nova.huawei import utils as h_utils
LOG = logging.getLogger(__name__)
class HuaweiDiskFilter(disk_filter.DiskFilter):
"""Disk Filter with over subscription flag."""
def host_passes(self, host_state, filter_properties):
"""Filter based on disk usage."""
#deep copy a filter properties to avoid changing
filter_properties_tmp = copy.deepcopy(filter_properties)
context = filter_properties_tmp['context']
instance = filter_properties_tmp['request_spec']['instance_properties']
if h_utils.is_boot_from_volume(context, instance):
# just process local disk(ephemeral and swap), so set
# root_gb to zero
filter_properties_tmp.get('instance_type')['root_gb'] = 0
# if the request disk size is zero, we should return true.
# In negative free disk size condition, the instance booted volume
# is not create successfully.
instance_type = filter_properties.get('instance_type')
requested_disk = (1024 * (instance_type['ephemeral_gb']) +
instance_type['swap'])
if requested_disk == 0:
return True
return super(HuaweiDiskFilter, self).host_passes(host_state,
filter_properties_tmp)
| apache-2.0 | -566,471,726,499,216,700 | 39.471698 | 79 | 0.660606 | false | 4.41358 | false | false | false |
pglomski/shopnotes | drill_speed_chart.py | 1 | 2778 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
'''Produce a custom twist drill plot'''
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
plt.rc('text', usetex=True)
# set some rcParams
mpl.rcParams['font.weight'] = 'bold'
mpl.rcParams['xtick.major.pad'] = 10
mpl.rcParams['xtick.direction'] = 'inout'
mpl.rcParams['xtick.labelsize'] = 26
mpl.rcParams['ytick.direction'] = 'inout'
mpl.rcParams['ytick.labelsize'] = 20
# define the constants for our chart
materials = [
('Acrylic' , 650 , 'c' , '-' ) ,
('Aluminum' , 300 , 'b' , '-' ) ,
('Brass' , 200 , 'g' , '-' ) ,
('LC Steel' , 110 , 'k' , '-' ) ,
('Wood' , 100 , 'brown' , '-' ) ,
('MC Steel' , 80 , 'darkgray' , '-' ) ,
('HC Steel' , 60 , 'lightgray' , '-' ) ,
('Stainless' , 50 , 'purple' , '-' ) ,
]
drill_speeds = [250, 340, 390, 510, 600, 650, 990, 1550, 1620, 1900, 2620, 3100] #rpm
speed_lims = (200., 4000.) # rpm
max_in = 1. # in.
incr = 1./16. # in.
im_sz = 25. # in.
ratio = 8.5/11.
fig = plt.figure(figsize=(im_sz,ratio * im_sz), dpi=600)
fig.patch.set_alpha(0)
# generate a vector of drill bit diameter
x = np.array([float(i) * incr for i in range(1,int(max_in/incr) + 1)]) # in.
# calculate the drill speed curve for each material type and plot the curve
for name, speed, color, linestyle in materials:
plt.loglog(x, 12/np.pi/x*speed, label=name, linewidth=5, color=color, linestyle=linestyle)
ax = plt.gca()
# adjust the axis tick locators to match drill press speeds
ax.yaxis.set_major_locator(mpl.ticker.FixedLocator(drill_speeds))
ax.yaxis.set_major_formatter(mpl.ticker.FormatStrFormatter('%4d'))
ax.yaxis.set_minor_locator(mpl.ticker.NullLocator())
ax.set_ylim(speed_lims)
# set the drill diameter locators and format the ticks with LaTeX
ax.xaxis.set_major_locator(mpl.ticker.MultipleLocator(base=incr))
ax.xaxis.set_minor_locator(mpl.ticker.NullLocator())
ax.set_xlim((incr, max_in))
ticks = ['0', r'$$\frac{1}{16}$$' , r'$$\frac{1}{8}$$' , r'$$\frac{3}{16}$$' , r'$$\frac{1}{4}$$' ,
r'$$\frac{5}{16}$$' , r'$$\frac{3}{8}$$' , r'$$\frac{7}{16}$$' , r'$$\frac{1}{2}$$' ,
r'$$\frac{9}{16}$$' , r'$$\frac{5}{8}$$' , r'$$\frac{11}{16}$$' , r'$$\frac{3}{4}$$' ,
r'$$\frac{13}{16}$$' , r'$$\frac{7}{8}$$' , r'$$\frac{15}{16}$$' , r'$$1$$' ]
ax.xaxis.set_ticklabels(ticks)
# Add the Texts
plt.xlabel('Bit Diameter (in.)', fontsize=26)
plt.ylabel('Drill Speed (rpm)' , fontsize=26)
plt.title('Twist Drill Speeds' , fontsize=50)
plt.legend(ncol=2, loc=3, fontsize=40)
plt.grid('on')
plt.savefig('drill_speed_chart.png')
| agpl-3.0 | 6,025,454,394,184,277,000 | 35.077922 | 102 | 0.569114 | false | 2.603561 | false | false | false |
crmorse/weewx-waterflow | bin/weedb/mysql.py | 1 | 9153 | #
# Copyright (c) 2012 Tom Keffer <[email protected]>
#
# See the file LICENSE.txt for your full rights.
#
# $Revision$
# $Author$
# $Date$
#
"""Driver for the MySQL database"""
import decimal
import MySQLdb
import _mysql_exceptions
from weeutil.weeutil import to_bool
import weedb
def connect(host='localhost', user='', password='', database='', driver='', **kwargs):
"""Connect to the specified database"""
return Connection(host=host, user=user, password=password, database=database, **kwargs)
def create(host='localhost', user='', password='', database='', driver='', **kwargs):
"""Create the specified database. If it already exists,
an exception of type weedb.DatabaseExists will be thrown."""
# Open up a connection w/o specifying the database.
try:
connect = MySQLdb.connect(host = host,
user = user,
passwd = password, **kwargs)
cursor = connect.cursor()
# An exception will get thrown if the database already exists.
try:
# Now create the database.
cursor.execute("CREATE DATABASE %s" % (database,))
except _mysql_exceptions.ProgrammingError:
# The database already exists. Change the type of exception.
raise weedb.DatabaseExists("Database %s already exists" % (database,))
finally:
cursor.close()
except _mysql_exceptions.OperationalError, e:
raise weedb.OperationalError(e)
def drop(host='localhost', user='', password='', database='', driver='', **kwargs):
"""Drop (delete) the specified database."""
# Open up a connection
try:
connect = MySQLdb.connect(host = host,
user = user,
passwd = password, **kwargs)
cursor = connect.cursor()
try:
cursor.execute("DROP DATABASE %s" % database)
except _mysql_exceptions.OperationalError:
raise weedb.NoDatabase("""Attempt to drop non-existent database %s""" % (database,))
finally:
cursor.close()
except _mysql_exceptions.OperationalError, e:
raise weedb.OperationalError(e)
class Connection(weedb.Connection):
"""A wrapper around a MySQL connection object."""
def __init__(self, host='localhost', user='', password='', database='', **kwargs):
"""Initialize an instance of Connection.
Parameters:
host: IP or hostname with the mysql database (required)
user: User name (required)
password: The password for the username (required)
database: The database to be used. (required)
kwargs: Any extra arguments you may wish to pass on to MySQL (optional)
If the operation fails, an exception of type weedb.OperationalError will be raised.
"""
try:
connection = MySQLdb.connect(host=host, user=user, passwd=password, db=database, **kwargs)
except _mysql_exceptions.OperationalError, e:
# The MySQL driver does not include the database in the
# exception information. Tack it on, in case it might be useful.
raise weedb.OperationalError(str(e) + " while opening database '%s'" % (database,))
weedb.Connection.__init__(self, connection, database, 'mysql')
# Allowing threads other than the main thread to see any transactions
# seems to require an isolation level of READ UNCOMMITTED.
self.query("SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED")
def cursor(self):
"""Return a cursor object."""
# The implementation of the MySQLdb cursor is lame enough that we are
# obliged to include a wrapper around it:
return Cursor(self)
def tables(self):
"""Returns a list of tables in the database."""
table_list = list()
try:
# Get a cursor directly from MySQL
cursor = self.connection.cursor()
cursor.execute("""SHOW TABLES;""")
while True:
row = cursor.fetchone()
if row is None: break
# Extract the table name. In case it's in unicode, convert to a regular string.
table_list.append(str(row[0]))
finally:
cursor.close()
return table_list
def genSchemaOf(self, table):
"""Return a summary of the schema of the specified table.
If the table does not exist, an exception of type weedb.OperationalError is raised."""
try:
# Get a cursor directly from MySQL:
cursor = self.connection.cursor()
# MySQL throws an exception if you try to show the columns of a
# non-existing table
try:
cursor.execute("""SHOW COLUMNS IN %s;""" % table)
except _mysql_exceptions.ProgrammingError, e:
# Table does not exist. Change the exception type:
raise weedb.OperationalError(e)
irow = 0
while True:
row = cursor.fetchone()
if row is None: break
# Append this column to the list of columns.
colname = str(row[0])
if row[1].upper()=='DOUBLE':
coltype = 'REAL'
elif row[1].upper().startswith('INT'):
coltype = 'INTEGER'
elif row[1].upper().startswith('CHAR'):
coltype = 'STR'
else:
coltype = str(row[1]).upper()
is_primary = True if row[3] == 'PRI' else False
yield (irow, colname, coltype, to_bool(row[2]), row[4], is_primary)
irow += 1
finally:
cursor.close()
def columnsOf(self, table):
"""Return a list of columns in the specified table.
If the table does not exist, an exception of type weedb.OperationalError is raised."""
column_list = [row[1] for row in self.genSchemaOf(table)]
return column_list
def begin(self):
"""Begin a transaction."""
self.query("START TRANSACTION")
def commit(self):
try:
weedb.Connection.commit(self)
except _mysql_exceptions.OperationalError, e:
raise weedb.OperationalError(e)
def rollback(self):
try:
weedb.Connection.rollback(self)
except _mysql_exceptions.OperationalError, e:
raise weedb.OperationalError(e)
def query(self, *args, **kwargs):
try:
self.connection.query(*args, **kwargs)
except _mysql_exceptions.OperationalError, e:
raise weedb.OperationalError(e)
class Cursor(object):
"""A wrapper around the MySQLdb cursor object"""
def __init__(self, connection):
"""Initialize a Cursor from a connection.
connection: An instance of db.mysql.Connection"""
# Get the MySQLdb cursor and store it internally:
self.cursor = connection.connection.cursor()
def execute(self, sql_string, sql_tuple=() ):
"""Execute a SQL statement on the MySQL server.
sql_string: A SQL statement to be executed. It should use ? as
a placeholder.
sql_tuple: A tuple with the values to be used in the placeholders."""
# MySQL uses '%s' as placeholders, so replace the ?'s with %s
mysql_string = sql_string.replace('?','%s')
try:
# Convert sql_tuple to a plain old tuple, just in case it actually
# derives from tuple, but overrides the string conversion (as is the
# case with a TimeSpan object):
self.cursor.execute(mysql_string, tuple(sql_tuple))
except (_mysql_exceptions.OperationalError, _mysql_exceptions.ProgrammingError), e:
raise weedb.OperationalError(e)
return self
def fetchone(self):
# Get a result from the MySQL cursor, then run it through the massage
# filter below
return massage(self.cursor.fetchone())
def close(self):
try:
self.cursor.close()
del self.cursor
except:
pass
#
# Supplying functions __iter__ and next allows the cursor to be used as an iterator.
#
def __iter__(self):
return self
def next(self):
result = self.fetchone()
if result is None:
raise StopIteration
return result
#
# This is a utility function for converting a result set that might contain
# longs or decimal.Decimals (which MySQLdb uses) to something containing just ints.
#
def massage(seq):
# Return the massaged sequence if it exists, otherwise, return None
if seq is not None:
return [int(i) if isinstance(i, long) or isinstance(i,decimal.Decimal) else i for i in seq]
| gpl-3.0 | 4,721,289,113,010,788,000 | 36.979253 | 102 | 0.579919 | false | 4.629742 | false | false | false |
papaloizouc/peacehack | peacehack/theapp/migrations/0001_initial.py | 1 | 5492 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='CrazyObject',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('ActionGeo_ADM1Code', models.CharField(max_length=10, null=True, blank=True)),
('ActionGeo_CountryCode', models.CharField(max_length=4, null=True, blank=True)),
('ActionGeo_FeatureID', models.CharField(max_length=4, null=True, blank=True)),
('ActionGeo_FullName', models.CharField(max_length=200, null=True, blank=True)),
('ActionGeo_Lat', models.CharField(max_length=4, null=True, blank=True)),
('ActionGeo_Long', models.TextField(null=True, blank=True)),
('ActionGeo_Type', models.TextField(null=True, blank=True)),
('Actor1Code', models.TextField(null=True, blank=True)),
('Actor1CountryCode', models.TextField(null=True, blank=True)),
('Actor1EthnicCode', models.TextField(null=True, blank=True)),
('Actor1Geo_ADM1Code', models.TextField(null=True, blank=True)),
('Actor1Geo_CountryCode', models.IntegerField(null=True, blank=True)),
('Actor1Geo_FeatureID', models.IntegerField(null=True, blank=True)),
('Actor1Geo_FullName', models.TextField(null=True, blank=True)),
('Actor1Geo_Lat', models.TextField(null=True, blank=True)),
('Actor1Geo_Long', models.TextField(null=True, blank=True)),
('Actor1Geo_Type', models.IntegerField(null=True, blank=True)),
('Actor1KnownGroupCode', models.CharField(max_length=4, null=True, blank=True)),
('Actor1Name', models.TextField(null=True, blank=True)),
('Actor1Religion1Code', models.CharField(max_length=4, null=True, blank=True)),
('Actor1Religion2Code', models.CharField(max_length=4, null=True, blank=True)),
('Actor1Type1Code', models.CharField(max_length=4, null=True, blank=True)),
('Actor1Type2Code', models.CharField(max_length=4, null=True, blank=True)),
('Actor1Type3Code', models.CharField(max_length=4, null=True, blank=True)),
('Actor2Code', models.CharField(max_length=4, null=True, blank=True)),
('Actor2CountryCode', models.CharField(max_length=4, null=True, blank=True)),
('Actor2EthnicCode', models.CharField(max_length=4, null=True, blank=True)),
('Actor2Geo_ADM1Code', models.CharField(max_length=4, null=True, blank=True)),
('Actor2Geo_CountryCode', models.CharField(max_length=4, null=True, blank=True)),
('Actor2Geo_FeatureID', models.IntegerField(null=True, blank=True)),
('Actor2Geo_FullName', models.TextField(null=True, blank=True)),
('Actor2Geo_Lat', models.TextField(null=True, blank=True)),
('Actor2Geo_Long', models.TextField(null=True, blank=True)),
('Actor2Geo_Type', models.IntegerField(null=True, blank=True)),
('Actor2KnownGroupCode', models.CharField(max_length=4, null=True, blank=True)),
('Actor2Name', models.TextField(null=True, blank=True)),
('Actor2Religion1Code', models.CharField(max_length=4, null=True, blank=True)),
('Actor2Religion2Code', models.CharField(max_length=4, null=True, blank=True)),
('Actor2Type1Code', models.CharField(max_length=4, null=True, blank=True)),
('Actor2Type2Code', models.CharField(max_length=4, null=True, blank=True)),
('Actor2Type3Code', models.CharField(max_length=4, null=True, blank=True)),
('AvgTone', models.TextField(null=True, blank=True)),
('DATEADDED', models.IntegerField(null=True, blank=True)),
('EventBaseCode', models.IntegerField(null=True, blank=True)),
('EventCode', models.IntegerField(null=True, blank=True)),
('EventRootCode', models.IntegerField(null=True, blank=True)),
('FractionDate', models.TextField(null=True, blank=True)),
('GLOBALEVENTID', models.IntegerField(null=True, blank=True)),
('GoldsteinScale', models.TextField(null=True, blank=True)),
('IsRootEvent', models.IntegerField(null=True, blank=True)),
('MonthYear', models.IntegerField(null=True, blank=True)),
('NumArticles', models.IntegerField(null=True, blank=True)),
('NumMentions', models.IntegerField(null=True, blank=True)),
('NumSources', models.IntegerField(null=True, blank=True)),
('QuadClass', models.IntegerField(null=True, blank=True)),
('SOURCEURL', models.TextField(null=True, blank=True)),
('SQLDATE', models.IntegerField(null=True, blank=True)),
('Year', models.IntegerField(null=True, blank=True)),
('Day', models.IntegerField(null=True, blank=True)),
('Month', models.IntegerField(null=True, blank=True)),
],
options={
},
bases=(models.Model,),
),
]
| gpl-2.0 | 5,515,570,037,667,942,000 | 65.97561 | 114 | 0.599417 | false | 3.922857 | false | false | false |
ashishtilokani/Cloaking-Detection-Tool | googleBot/googleBot/spiders/scrape2.py | 1 | 1236 | from scrapy.selector import HtmlXPathSelector
from scrapy.spider import Spider
import html2text
import re
import os.path
class scrape(Spider):
name = "googleBot2"
start_urls = []
with open('/home/ashish/Desktop/CloakingDetectionTool/url.txt','r') as f:
for line in f:
l=line.replace("/", "_")
try:
f=open('/home/ashish/Desktop/CloakingDetectionTool/c2/'+ l + '.txt','r')
f.close()
except:
start_urls.append(line)
def parse(self, response):
regex = re.compile('[^A-Za-z0-9_]')
#First parameter is the replacement, second parameter is your input string
d={}
l=(response.url).replace("/", "_")
f=open('/home/ashish/Desktop/CloakingDetectionTool/c2/'+ l + '.txt','w')
terms=[]
terms = (response.body).split()
c=0
for word in terms:
word=regex.sub('', word)
if word not in d:
d[word]=1
f.write(word)
f.write(' ')
c=1
if c==0: #empty
f.write(' ')
f.write('\n')
f.close()
| mit | -6,094,454,040,259,034,000 | 29.146341 | 88 | 0.486246 | false | 3.92381 | false | false | false |
mprinc/McMap | src/scripts/CSN_Archive/check_object_names.py | 1 | 4677 | #!/usr/bin/env python
# Copyright (c) 2015, Scott D. Peckham
#------------------------------------------------------
# S.D. Peckham
# July 9, 2015
#
# Tool to extract the object part of every CSDMS Standard
# Variable Name and generate a list of objects that
# includes those as well as all parent objects.
#
# Example of use at a Unix prompt:
#
# % ./check_object_names.py CSN_VarNames_v0.82.txt
#------------------------------------------------------
#
# Functions:
# check_objects()
#
#------------------------------------------------------
import os.path
import sys
#------------------------------------------------------
def check_objects( in_file='CSN_VarNames_v0.82.txt' ):
#--------------------------------------------------
# Open input file that contains copied names table
#--------------------------------------------------
try:
in_unit = open( in_file, 'r' )
except:
print 'SORRY: Could not open TXT file named:'
print ' ' + in_file
#-------------------------
# Open new CSV text file
#-------------------------
## pos = in_file.rfind('.')
## prefix = in_file[0:pos]
## out_file = prefix + '.ttl'
out_file = 'All_Object_Names.txt'
#-------------------------------------------
OUT_EXISTS = os.path.exists( out_file )
if (OUT_EXISTS):
print 'SORRY, A text file with the name'
print ' ' + out_file
print ' already exists.'
return
out_unit = open( out_file, 'w' )
#---------------------------
# Parse all variable names
#---------------------------
n_objects = 0
object_list1 = list()
object_list2 = list()
while (True):
#------------------------------
# Read data line from in_file
#------------------------------
line = in_unit.readline()
if (line == ''):
break
#--------------------------------------------------
# Write object and quantity fullnames to TTL file
#--------------------------------------------------
line = line.strip() # (strip leading/trailing white space)
main_parts = line.split('__')
object_fullname = main_parts[0]
# quantity_fullname = main_parts[1]
#------------------------------------
# Append object name to object_list
#------------------------------------
object_list1.append( object_fullname )
object_list2.append( object_fullname )
#------------------------------------------------
# Append all parent object names to object_list
#------------------------------------------------
object_name = object_fullname
while (True):
pos = object_name.rfind('_')
if (pos < 0):
break
object_name = object_name[:pos]
object_list2.append( object_name )
#---------------------------------------------
# Create sorted lists of unique object names
# Not fastest method, but simple.
#---------------------------------------------
old_list = sorted( set(object_list1) )
new_list = sorted( set(object_list2) )
n_objects1 = len( old_list )
n_objects2 = len( new_list )
#--------------------------------------------
# Write complete object list to output file
#--------------------------------------------
for k in xrange( n_objects2 ):
out_unit.write( new_list[k] + '\n' )
#----------------------
# Close the input file
#----------------------
in_unit.close()
#----------------------------
# Close the TXT output file
#----------------------------
out_unit.close()
print 'Finished checking all object names.'
print 'Number of old object names =', n_objects1, '.'
print 'Number of new object names =', n_objects2, '.'
print ' '
# check_objects()
#------------------------------------------------------
if (__name__ == "__main__"):
#-----------------------------------------------------
# Note: First arg in sys.argv is the command itself.
#-----------------------------------------------------
n_args = len(sys.argv)
if (n_args < 2):
print 'ERROR: This tool requires an input'
print ' text file argument.'
print 'sys.argv =', sys.argv
print ' '
elif (n_args == 2):
check_objects( sys.argv[1] )
else:
print 'ERROR: Invalid number of arguments.'
#-----------------------------------------------------------------------
| mit | -1,015,613,779,617,027,700 | 32.407143 | 94 | 0.383579 | false | 4.753049 | false | false | false |
PrFalken/exaproxy | lib/exaproxy/icap/response.py | 1 | 2403 |
class ICAPResponse (object):
def __init__ (self, version, code, status, headers, icap_header, http_header):
self.version = version
self.code = code
self.status = status
self.headers = headers
icap_len = len(icap_header)
http_len = len(http_header)
icap_end = icap_len
if http_header:
http_len_string = '%x\n' % http_len
http_string = http_len_string + http_header + '0\n'
http_offset = icap_end + len(http_len_string)
http_end = http_offset + http_len
else:
http_string = http_header
http_offset = icap_end
http_end = icap_end
self.response_view = memoryview(icap_header + http_string)
self.icap_view = self.response_view[:icap_end]
self.http_view = self.response_view[http_offset:http_end]
@property
def response_string (self):
return self.response_view.tobytes()
@property
def icap_header (self):
return self.icap_view.tobytes()
@property
def http_header (self):
return self.http_view.tobytes()
@property
def pragma (self):
return self.headers.get('pragma', {})
@property
def is_permit (self):
return False
@property
def is_modify (self):
return False
@property
def is_content (self):
return False
@property
def is_intercept (self):
return False
class ICAPRequestModification (ICAPResponse):
def __init__ (self, version, code, status, headers, icap_header, http_header, intercept_header=None):
ICAPResponse.__init__(self, version, code, status, headers, icap_header, http_header)
self.intercept_header = intercept_header
@property
def is_permit (self):
return self.code == 304
@property
def is_modify (self):
return self.code == 200 and self.intercept_header is None
@property
def is_intercept (self):
return self.code == 200 and self.intercept_header is not None
class ICAPResponseModification (ICAPResponse):
@property
def is_content (self):
return self.code == 200
class ICAPResponseFactory:
def __init__ (self, configuration):
self.configuration = configuration
def create (self, version, code, status, headers, icap_header, request_header, response_header, intercept_header=None):
if response_header:
response = ICAPResponseModification(version, code, status, headers, icap_header, response_header)
else:
response = ICAPRequestModification(version, code, status, headers, icap_header, request_header, intercept_header=intercept_header)
return response
| bsd-2-clause | -2,532,343,003,033,045,000 | 23.520408 | 133 | 0.714524 | false | 3.128906 | false | false | false |
yoe/veyepar | dj/scripts/enc.py | 1 | 23477 | #!/usr/bin/python
"""
assembles raw cuts into final, titles, tweaks audio, encodes to format for upload.
"""
import re
import os
import sys
import subprocess
import xml.etree.ElementTree
from mk_mlt import mk_mlt
import pprint
from process import process
from main.models import Client, Show, Location, Episode, Raw_File, Cut_List
class enc(process):
ready_state = 2
def mk_title_svg(self, raw_svg, texts):
"""
Make a title slide by filling in a pre-made svg with name/authors.
return: svg
"""
tree = xml.etree.ElementTree.XMLID(raw_svg)
for key in texts:
if self.options.verbose:
print("looking for:", key)
# tollerate template where tokens have been removed
if key in tree[1]:
if key == "license":
# CC license image
if self.options.verbose:
print("found in svg:", tree[1][key])
print("replacing with:", texts[key])
t = tree[1][key]
# import code; code.interact(local=locals())
if texts[key] is None:
# del(tree[1][key])
# print tree[1].has_key(key)
tree[1][key].clear()
else:
t.set('{http://www.w3.org/1999/xlink}href', texts[key])
elif key == "date":
if self.options.verbose:
print("found in svg:", tree[1][key].text)
print("replacing with:", re.split(',',texts[key])[0]) # .encode()
tree[1][key].text = re.split(',',texts[key])[0]
else:
if self.options.verbose:
print("found in svg:", tree[1][key].text)
print("replacing with:", texts[key]) # .encode()
tree[1][key].text = texts[key]
# cooked_svg = xml.etree.ElementTree.tostring(tree[0])
# print "testing...", "license" in cooked_svg
if 'presenternames' in tree[1]:
# some people like to add spiffy text near the presenter name(s)
if texts['authors']:
# prefix = u"Featuring" if "," in texts['authors'] else "By"
# tree[1]['presenternames'].text=u"%s %s" % (prefix,texts['authors'])
tree[1]['presenternames'].text = texts['authors']
else:
# remove the text (there is a placholder to make editing sane)
tree[1]['presenternames'].text = ""
cooked_svg = xml.etree.ElementTree.tostring(tree[0]).decode('ascii')
return cooked_svg
def get_title_text(self, episode):
# lets try putting (stuff) on a new line
title = episode.name
authors = episode.authors
if episode.show.slug == 'write_docs_na_2016':
title = title.upper()
authors = authors.upper()
if False and episode.show.slug != 'pygotham_2015' and len(title) > 80: # crazy long titles need all the lines
title2 = ''
elif ": " in title: # the space keeps 9:00 from breaking
pos = title.index(":") + 1
title, title2 = title[:pos], title[pos:].strip()
elif " - " in title:
# error if there is more than 1.
title, title2 = title.split(' - ')
elif " -- " in title:
# error if there is more than 1.
title, title2 = title.split(' -- ')
elif " (" in title:
pos = title.index(" (")
# +1 skip space in " ("
title, title2 = title[:pos], title[pos + 1:]
elif " using " in title:
pos = title.index(" using ")
title, title2 = title[:pos], title[pos + 1:]
elif ";" in title:
pos = title.index(";") + 1
title, title2 = title[:pos], title[pos:].strip()
elif "? " in title: # ?(space) to not break on 'can you?'
pos = title.index("?") + 1
title, title2 = title[:pos], title[pos:].strip()
elif ". " in title:
pos = title.index(". ") + 1
title, title2 = title[:pos], title[pos:].strip()
else:
title2 = ""
if episode.license:
license = "cc/{}.svg".format(episode.license.lower())
else:
license = None
if episode.tags:
tags = episode.tags.split(',')
tag1 = tags[0]
else:
tags = []
tag1 = ''
"""
# split authors over two objects
# breaking on comma, not space.
if ',' in authors:
authors = authors.split(', ')
author2 = ', '.join(authors[1:])
authors = authors[0].strip()
else:
author2 = ''
"""
author2 = ''
date = episode.start.strftime("%B %-d, %Y")
# DebConf style
# date = episode.start.strftime("%Y-%m-%-d")
texts = {
'client': episode.show.client.name,
'show': episode.show.name,
'title': title,
'title2': title2,
'tag1': tag1,
'authors': authors,
'author2': author2,
'presentertitle': "",
'twitter_id': episode.twitter_id,
'date': date,
'time': episode.start.strftime("%H:%M"),
'license': license,
'room': episode.location.name,
}
return texts
def svg2png(self, svg_name, png_name, episode):
"""
Make a title slide png file.
melt uses librsvg which doesn't support flow,
wich is needed for long titles, so render it to a .png using inkscape
"""
# create png file
# inkscape does not return an error code on failure
# so clean up previous run and
# check for the existance of a new png
if os.path.exists(png_name):
os.remove(png_name)
cmd = ["inkscape", svg_name,
"--export-png", png_name,
# "--export-width", "720",
]
ret = self.run_cmds(episode, [cmd])
ret = os.path.exists(png_name)
# if self.options.verbose: print cooked_svg
if self.options.verbose:
print(png_name)
if not ret:
print("svg:", svg_name)
png_name = None
return png_name
def mk_title(self, episode):
# make a title slide
# if we find titles/custom/(slug).svg, use that
# else make one from the tempalte
custom_svg_name = os.path.join( "..",
"custom", "titles", episode.slug + ".svg")
if self.options.verbose: print("custom:", custom_svg_name)
abs_path = os.path.join( self.show_dir, "tmp", custom_svg_name )
if os.path.exists(abs_path):
# cooked_svg_name = custom_svg_name
cooked_svg_name = abs_path
else:
svg_name = episode.show.client.title_svg
print(svg_name)
template = os.path.join(
os.path.split(os.path.abspath(__file__))[0],
"bling",
svg_name)
raw_svg = open(template).read()
# happy_filename = episode.slug.encode('utf-8')
happy_filename = episode.slug
# happy_filename = ''.join([c for c in happy_filename if c.isalpha()])
# title_base = os.path.join(self.show_dir, "titles", happy_filename)
title_base = os.path.join("..", "titles", happy_filename)
texts = self.get_title_text(episode)
cooked_svg = self.mk_title_svg(raw_svg, texts)
# save svg to a file
# strip 'broken' chars because inkscape can't handle the truth
# output_base=''.join([ c for c in output_base if c.isalpha()])
# output_base=''.join([ c for c in output_base if ord(c)<128])
# output_base=output_base.encode('utf-8','ignore')
cooked_svg_name = os.path.join(
self.show_dir, "titles", '{}.svg'.format(episode.slug))
open(cooked_svg_name, 'w').write(cooked_svg)
png_name = os.path.join( "..",
"titles", '{}.png'.format(episode.slug))
abs_path = os.path.join( self.show_dir, "tmp", png_name )
title_img = self.svg2png(cooked_svg_name, abs_path, episode)
if title_img is None:
print("missing title png")
return False
return png_name
def get_params(self, episode, rfs, cls):
"""
assemble a dict of params to send to mk_mlt
mlt template, title screen image,
filter parameters (currently just audio)
and cutlist+raw filenames
"""
def get_title(episode):
# if we find show_dir/custom/titles/(slug).svg, use that
# else make one from the tempalte
custom_png_name = os.path.join(
self.show_dir, "custom", "titles", episode.slug + ".png")
print("custom:", custom_png_name)
if os.path.exists(custom_png_name):
title_img = custom_png_name
else:
title_img = self.mk_title(episode)
return title_img
def get_foot(episode):
credits_img = episode.show.client.credits
credits_pathname = os.path.join("..", "assets", credits_img )
return credits_pathname
def get_clips(rfs, ep):
"""
return list of possible input files
this may get the files and store them localy.
start/end segments are under get_cuts.
ps. this is not used for encoding,
just shows in ShotCut for easy dragging onto the timeline.
"""
clips = []
for rf in rfs:
clip = {'id': rf.id }
# if rf.filename.startswith('\\'):
# rawpathname = rf.filename
# else:
raw_pathname = os.path.join( "../dv",
rf.location.slug, rf.filename)
# self.episode_dir, rf.filename)
# check for missing input file
# typically due to incorrect fs mount
abs_path = os.path.join(
self.show_dir, "tmp", raw_pathname)
if not os.path.exists(abs_path):
print(( 'raw_pathname not found: "{}"'.format(
abs_path)))
return False
clip['filename']=raw_pathname
# trim start/end based on episode start/end
if rf.start < ep.start < rf.end:
# if the ep start falls durring this clip,
# trim it
d = ep.start - rf.start
clip['in']="00:00:{}".format(d.total_seconds())
else:
clip['in']=None
# if "mkv" in rf.filename:
# import code; code.interact(local=locals())
if rf.start < ep.end < rf.end:
# if the ep end falls durring this clip,
d = ep.end - rf.start
clip['out']="00:00:{}".format(d.total_seconds())
else:
clip['out']=None
pprint.pprint(clip)
clips.append(clip)
return clips
def get_cuts(cls):
"""
gets the list of cuts.
input file, start, end, filters
ps, does not reference the clips above.
"""
def hms_to_clock(hms):
"""
Converts what media players show h:m:s
to the mlt time format h:m:s.s
for more on this:
http://mltframework.blogspot.com/2012/04/time-properties.html
"""
if not hms:
return None
if ":" not in hms:
hms = "0:" + hms
if "." not in hms:
hms = hms + ".0"
return hms
cuts = []
for cl in cls:
cut = {}
cut['id'] = cl.id
rawpathname = os.path.join( "../dv",
cl.raw_file.location.slug, cl.raw_file.filename)
# self.episode_dir, cl.raw_file.filename)
# print(rawpathname)
cut['filename'] = rawpathname
# set start/end on the clips if they are set in the db
# else None
cut['in']=hms_to_clock(cl.start)
cut['out']=hms_to_clock(cl.end)
cut['length'] = cl.duration()
if cl.episode.channelcopy:
cut['channelcopy'] = cl.episode.channelcopy
else:
cut['channelcopy']='01'
if cl.episode.normalise:
cut['normalize'] = cl.episode.normalise
else:
cut['normalize']='-12.0'
cut['video_delay']='0.0'
cuts.append(cut)
return cuts
params = {}
params['title_img'] = get_title(episode)
params['foot_img'] = get_foot(episode)
params['clips'] = get_clips(rfs, episode)
params['cuts'] = get_cuts(cls)
return params
def enc_all(self, mlt_pathname, episode):
def enc_one(ext):
out_pathname = os.path.join(
self.show_dir, ext, "%s.%s" % (episode.slug, ext))
if ext == 'webm':
parms = {
'dv_format': self.options.dv_format,
'mlt': mlt_pathname,
'out': out_pathname,
'threads': self.options.threads,
'test': '',
}
# cmds=["melt %s -profile dv_ntsc -consumer avformat:%s progress=1 acodec=libvorbis ab=128k ar=44100 vcodec=libvpx minrate=0 b=600k aspect=@4/3 maxrate=1800k g=120 qmax=42 qmin=10"% (mlt_pathname,out_pathname,)]
cmds = [
"melt -profile %(dv_format)s %(mlt)s force_aspect_ratio=@64/45 -consumer avformat:%(out)s progress=1 threads=0 ab=256k vb=2000k quality=good deadline=good deinterlace=1 deinterlace_method=yadif" % parms]
if ext == 'flv':
cmds = [
"melt %(mlt)s -progress -profile %(dv_format)s -consumer avformat:%(out)s progressive=1 acodec=libfaac ab=96k ar=44100 vcodec=libx264 b=110k vpre=/usr/share/ffmpeg/libx264-hq.ffpreset" % parms]
if ext == 'flac':
# 16kHz/mono
cmds = ["melt -verbose -progress %s -consumer avformat:%s ar=16000" %
(mlt_pathname, out_pathname)]
if ext == 'mp3':
cmds = ["melt -verbose -progress %s -consumer avformat:%s" %
(mlt_pathname, out_pathname)]
if ext == 'mp4':
# High Quality Master 720x480 NTSC
parms = {
'dv_format': self.options.dv_format,
'mlt': mlt_pathname,
'out': out_pathname,
'threads': self.options.threads,
'test': '',
}
cmd = "melt -verbose -progress "\
"-profile %(dv_format)s %(mlt)s "\
"-consumer avformat:%(out)s "\
"threads=%(threads)s "\
"progressive=1 "\
"strict=-2 "\
"properties=x264-high "\
"ab=256k "\
% parms
cmd = cmd.split()
# 2 pass causes no video track, so dumping this.
# need to figure out how to switch between good and fast
if False:
cmds = [cmd + ['pass=1'],
cmd + ['pass=2']]
if True: # even faster!
cmds[0].append('fastfirstpass=1')
else:
cmds = [cmd]
# cmds.append( ["qt-faststart", tmp_pathname, out_pathname] )
if self.options.rm_temp:
cmds.append(["rm", tmp_pathname])
if ext == 'm4v':
# iPhone
tmp_pathname = os.path.join(
self.tmp_dir, "%s.%s" % (episode.slug, ext))
# combine settings from 2 files
ffpreset = open(
'/usr/share/ffmpeg/libx264-default.ffpreset').read().split('\n')
ffpreset.extend(
open('/usr/share/ffmpeg/libx264-ipod640.ffpreset').read().split('\n'))
ffpreset = [i for i in ffpreset if i]
cmd = "melt %(mlt)s -progress -profile %(dv_format)s -consumer avformat:%(tmp)s s=432x320 aspect=@4/3 progressive=1 acodec=libfaac ar=44100 ab=128k vcodec=libx264 b=70k" % parms
cmd = cmd.split()
cmd.extend(ffpreset)
cmds = [cmd]
cmds.append(["qt-faststart", tmp_pathname, out_pathname])
if self.options.rm_temp:
cmds.append(["rm", tmp_pathname])
if ext == 'dv':
out_pathname = os.path.join(
self.tmp_dir, "%s.%s" % (episode.slug, ext))
cmds = ["melt -verbose -progress %s -consumer avformat:%s pix_fmt=yuv411p progressive=1" %
(mlt_pathname, out_pathname)]
if ext == 'ogv':
# melt/ffmpeg ogv encoder is loopy,
# so make a .dv and pass it to ffmpeg2theora
ret = enc_one("dv")
if ret:
dv_pathname = os.path.join(
self.tmp_dir, "%s.dv" % (episode.slug,))
cmds = [
"ffmpeg2theora --videoquality 5 -V 600 --audioquality 5 --channels 1 %s -o %s" % (dv_pathname, out_pathname)]
if self.options.rm_temp:
cmds.append(["rm", dv_pathname])
else:
return ret
# run encoder:
if self.options.noencode:
print("sorce files generated, skipping encode.")
if self.options.melt:
self.run_cmd(['melt', mlt_pathname])
ret = False
else:
ret = self.run_cmds(episode, cmds, )
if ret and not os.path.exists(out_pathname):
print("melt returned %ret, but no output: %s" % \
(ret, out_pathname))
ret = False
return ret
ret = True
# create all the formats for uploading
for ext in self.options.upload_formats:
print("encoding to %s" % (ext,))
ret = enc_one(ext) and ret
"""
if self.options.enc_script:
cmd = [self.options.enc_script,
self.show_dir, episode.slug]
ret = ret and self.run_cmds(episode, [cmd])
"""
return ret
def dv2theora(self, episode, dv_path_name, cls, rfs):
"""
Not used any more.
transcode dv to ogv
"""
oggpathname = os.path.join(
self.show_dir, "ogv", "%s.ogv" % episode.slug)
# cmd="ffmpeg2theora --videoquality 5 -V 600 --audioquality 5 --speedlevel 0 --optimize --keyint 256 --channels 1".split()
cmd = "ffmpeg2theora --videoquality 5 -V 600 --audioquality 5 --keyint 256 --channels 1".split()
cmd += ['--output', oggpathname]
cmd += [dv_path_name]
return cmd
def process_ep(self, episode):
ret = False
cls = Cut_List.objects.filter(
episode=episode, apply=True).order_by('sequence')
if cls:
# get list of raw footage for this episode
rfs = Raw_File.objects. \
filter(cut_list__episode=episode).\
exclude(trash=True).distinct()
# get a .mlt file for this episode (mlt_pathname)
# look for custom/slug.mlt and just use it,
# else build one from client.template_mlt
mlt_pathname = os.path.join(
self.show_dir, "custom",
"{}.mlt".format(episode.slug))
if os.path.exists(mlt_pathname):
print(("found custom/slug.mlt:\n{}".format( mlt_pathname )))
ret = True
else:
template_mlt = episode.show.client.template_mlt
mlt_pathname = os.path.join(self.show_dir,
"mlt", "%s.mlt" % episode.slug)
params = self.get_params(episode, rfs, cls )
pprint.pprint(params)
print((2, mlt_pathname))
ret = mk_mlt( template_mlt, mlt_pathname, params )
if not ret:
episode.state = 0
episode.comment += "\nenc.py mlt = self.mkmlt_1 failed.\n"
episode.save()
return False
# do the final encoding:
# using melt
ret = self.enc_all(mlt_pathname, episode)
if self.options.load_temp and self.options.rm_temp:
cmds = []
for rf in rfs:
dst_path = os.path.join(
self.tmp_dir, episode.slug, os.path.dirname(rf.filename))
rawpathname = os.path.join(
self.tmp_dir, episode.slug, rf.filename)
cmds.append(['rm', rawpathname])
cmds.append(['rmdir', dst_path])
dst_path = os.path.join(self.tmp_dir, episode.slug)
cmds.append(['rmdir', dst_path])
self.run_cmds(episode, cmds)
else:
err_msg = "No cutlist found."
episode.state = 0
episode.comment += "\nenc error: %s\n" % (err_msg,)
episode.save()
print(err_msg)
return False
if self.options.test:
ret = False
# save the episode so the test suite can get the slug
self.episode = episode
return ret
def add_more_options(self, parser):
parser.add_option('--enc-script',
help='encode shell script')
parser.add_option('--noencode', action="store_true",
help="don't encode, just make svg, png, mlt")
parser.add_option('--melt', action="store_true",
help="call melt slug.melt (only w/noencode)")
parser.add_option('--load-temp', action="store_true",
help='copy .dv to temp files')
parser.add_option('--rm-temp',
help='remove large temp files')
parser.add_option('--threads',
help='thread parameter passed to encoder')
def add_more_option_defaults(self, parser):
parser.set_defaults(threads=0)
if __name__ == '__main__':
p = enc()
p.main()
| mit | 2,883,326,822,299,327,000 | 35.06298 | 226 | 0.481407 | false | 4.092922 | false | false | false |
GoodCloud/johnny-cache | johnny/backends/memcached.py | 1 | 1842 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Infinite caching memcached class. Caches forever when passed a timeout
of 0. For Django >= 1.3, this module also provides ``MemcachedCache`` and
``PyLibMCCache``, which use the backends of their respective analogs in
django's default backend modules.
"""
from django.core.cache.backends import memcached
from django.utils.encoding import smart_str
import django
class CacheClass(memcached.CacheClass):
"""By checking ``timeout is None`` rather than ``not timeout``, this
cache class allows for non-expiring cache writes on certain backends,
notably memcached."""
def _get_memcache_timeout(self, timeout=None):
if timeout == 0: return 0 #2591999
return super(CacheClass, self)._get_memcache_timeout(timeout)
if django.VERSION[:2] > (1, 2):
class MemcachedCache(memcached.MemcachedCache):
"""Infinitely Caching version of django's MemcachedCache backend."""
def _get_memcache_timeout(self, timeout=None):
if timeout == 0: return 0 #2591999
return super(MemcachedCache, self)._get_memcache_timeout(timeout)
class PyLibMCCache(memcached.PyLibMCCache):
"""PyLibMCCache version that interprets 0 to mean, roughly, 30 days.
This is because `pylibmc interprets 0 to mean literally zero seconds
<http://sendapatch.se/projects/pylibmc/misc.html#differences-from-python-memcached>`_
rather than "infinity" as memcached itself does. The maximum timeout
memcached allows before treating the timeout as a timestamp is just
under 30 days."""
def _get_memcache_timeout(self, timeout=None):
# pylibmc doesn't like our definition of 0
if timeout == 0: return 2591999
return super(PyLibMCCache, self)._get_memcache_timeout(timeout)
| mit | -7,832,698,847,422,093,000 | 46.230769 | 93 | 0.701412 | false | 3.978402 | false | false | false |
freifunk-darmstadt/tools | update-telemetry.py | 1 | 8987 | #!/usr/bin/env python3
import psutil
import os
import json
import re
import itertools
from contextlib import contextmanager
import pprint
import time
import socket
import subprocess
import logging
logger = logging.getLogger(__name__)
def pairwise(iterable):
"s -> (s0,s1), (s2,s3), (s4, s5), ..."
a = iter(iterable)
return zip(a, a)
@contextmanager
def get_socket(host, port):
sock = socket.socket()
sock.settimeout(1)
sock.connect((host, port))
yield sock
sock.close()
@contextmanager
def get_unix_socket(filename):
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.settimeout(1)
sock.connect(filename)
yield sock
sock.close()
def write_to_graphite(data, prefix='freifunk', hostname=socket.gethostname()):
if '.' in hostname:
hostname = hostname.split('.')[0]
now = time.time()
with get_socket('stats.darmstadt.freifunk.net', 2013) as s:
for key, value in data.items():
line = "%s.%s.%s %s %s\n" % (prefix, hostname, key, value, now)
s.sendall(line.encode('latin-1'))
def write_to_node_collector(filename, data, patterns, prefix='freifunk'):
patterns = [re.compile(exp) for exp in patterns]
print(data)
updates = []
for metric, value in data.items():
for pattern in patterns:
m = pattern.match(metric)
if m:
groups = m.groupdict()
if all(key in groups for key in ['key']):
updates.append([groups, value])
break
content = []
for update, value in updates:
key = update['key'].replace('.', '_')
sub_key = update.pop('sub_key', None)
if prefix:
key = '{}_{}'.format(prefix, key)
if sub_key:
key += '_' + sub_key
params =update.copy()
params.pop('key')
params = ','.join(['{}={}'.format(k, v) for k, v in params.items()])
params = '{%s}' % (params)
content.append('{key}{params} {value}'.format(key=key, params=params, value=value))
with open(filename, 'w') as fh:
fh.write('\n'.join(content))
def read_from_fastd_socket(filename):
with get_unix_socket(filename) as client:
try:
strings = []
while True:
s = client.recv(8096)
if not s:
break
strings.append(s.decode('utf-8'))
data = json.loads(''.join(strings))
#pprint.pprint(data['statistics'])
online_peers = len([None for name, d in data['peers'].items() if d['connection']])
return {
'peers.count': len(data['peers']),
'peers.online': online_peers,
'rx.packets': data['statistics']['rx']['packets'],
'rx.bytes': data['statistics']['rx']['bytes'],
'rx.reordered.bytes': data['statistics']['rx_reordered']['bytes'],
'rx.reordered.packets': data['statistics']['rx_reordered']['packets'],
'tx.bytes': data['statistics']['tx']['bytes'],
'tx.packets': data['statistics']['tx']['packets'],
'tx.dropped.bytes': data['statistics']['tx_dropped']['bytes'],
'tx.dropped.packets': data['statistics']['tx_dropped']['packets'],
}
except Exception as e:
print(e)
return {}
def get_fastd_process_stats():
for proc in psutil.process_iter():
if proc.name() == 'fastd':
# 11905: 00000000000000000000000001000000:0035 00000000000000000000000000000000:0000 07 00000000:00000000 00:00000000 00000000 0 0 4469598 2 ffff880519be5100 0
drop_count = 0
for proto in ['udp', 'udp6']:
with open('/proc/{}/net/{}'.format(proc.pid, proto), 'r') as fh:
for line in (line.strip() for line in fh.read().split('\n')):
if not line:
continue
if line.startswith('sl'):
continue
parts = line.split(' ')
drop_count += int(parts[-1])
return drop_count
return None
def get_neighbour_table_states(family=socket.AF_INET6):
if family is socket.AF_INET:
family = '-4'
elif family is socket.AF_INET6:
family = '-6'
else:
return
response = subprocess.check_output(
['/bin/ip', family, 'neigh', 'show', 'nud', 'all']
).decode()
states = {'PERMANENT': 0, 'NOARP': 0, 'REACHABLE': 0, 'STALE': 0, 'NONE': 0,
'INCOMPLETE': 0, 'DELAY': 0, 'PROBE': 0, 'FAILED': 0}
for neigh_entry in response.split('\n'):
if not neigh_entry:
continue
state = neigh_entry.split()[-1]
if state not in states:
continue
states[state] += 1
return states
def main():
fastd_sockets = (
('0', '/run/fastd-ffda-vpn.sock'),
('1', '/run/fastd-ffda-vpn1.sock'),
)
device_name_mapping = {
'freifunk': 'ffda-br',
'bat0': 'ffda-bat',
'mesh-vpn': 'ffda-vpn'
}
device_whitelist = [
'eth0',
'ffda-vpn',
'ffda-vpn-1280',
'ffda-vpn-1312',
'ffda-bat',
'ffda-br',
'ffda-transport',
'services',
]
fields = [
'bytes', 'packets', 'errs', 'drop', 'fifo',
'frame', 'compressed', 'multicast',
]
field_format = '(?P<{direction}_{field}>\d+)'
pattern = re.compile(
'^\s*(?P<device_name>[\w-]+):\s+' + '\s+'.join(
itertools.chain.from_iterable((field_format.format(direction=direction, field=field)
for field in fields) for direction in ['rx', 'tx'])
)
)
update = {}
with open('/proc/net/dev') as fh:
lines = fh.readlines()
for line in lines:
m = pattern.match(line)
if m:
groupdict = m.groupdict()
device_name = groupdict.pop('device_name')
device_name = device_name_mapping.get(device_name, device_name)
if device_name in device_whitelist or device_name.endswith('-vpn') or \
device_name.endswith('-bat') or \
device_name.endswith('-br') or \
device_name.endswith('-transport'):
for key, value in groupdict.items():
direction, metric = key.split('_')
update['%s.%s.%s' % (device_name, direction, metric)] = value
with open('/proc/loadavg', 'r') as fh:
line = fh.read()
values = line.split(' ', 3)
update['load.15'] = values[0]
update['load.5'] = values[1]
update['load.1'] = values[2]
for key in ['count', 'max']:
try:
with open('/proc/sys/net/netfilter/nf_conntrack_%s' % key, 'r') as fh:
update['netfilter.%s' % key] = fh.read().strip()
except IOError as e:
pass
with open('/proc/net/snmp6', 'r') as fh:
for line in fh.readlines():
key, value = line.split(' ', 1)
value = value.strip()
update['ipv6.%s' % key] = value
with open('/proc/net/snmp', 'r') as fh:
for heading, values in pairwise(fh.readlines()):
section, headings = heading.split(':')
headings = headings.strip().split(' ')
_, values = values.split(':')
values = values.strip().split(' ')
for key, value in zip(headings, values):
update['ipv4.%s.%s' % (section, key)] = value
for af, prefix in [(socket.AF_INET, 'ipv4.Neigh'),
(socket.AF_INET6, 'ipv6.Neigh')]:
for state, count in get_neighbour_table_states(af).items():
update['{0}.{1}'.format(prefix, state.lower())] = count
with open('/proc/stat', 'r') as fh:
for line in fh.readlines():
key, value = line.split(' ', 1)
if key == 'ctxt':
update['context_switches'] = value.strip()
break
for name, filename in fastd_sockets:
if not os.path.exists(filename):
continue
data = read_from_fastd_socket(filename)
if len(data) > 0:
update.update({'fastd.%s.%s' % (name, key): value for (key, value) in data.items()})
fastd_drops = get_fastd_process_stats()
if fastd_drops:
update['fastd.drops'] = fastd_drops
#pprint.pprint(update)
write_to_graphite(update)
write_to_node_collector('/dev/shm/telemetry.prom', update, patterns=[
# '^(?P<interface>[^.]+)\.(?P<key>(rx|tx).+)',
'^(?P<key>fastd)\.(?P<fast_instance>.+)\.(?P<sub_key>.+)',
# '^(?P<key>load)\.(?P<period>\d+)'
], prefix='ffda_')
if __name__ == "__main__":
main()
| agpl-3.0 | 6,942,988,969,528,593,000 | 31.327338 | 182 | 0.516858 | false | 3.766555 | false | false | false |
maferelo/saleor | saleor/account/migrations/0001_initial.py | 3 | 19366 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import django.db.models.deletion
import django.utils.timezone
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("auth", "0006_require_contenttypes_0002")]
replaces = [("userprofile", "0001_initial")]
operations = [
migrations.CreateModel(
name="User",
fields=[
(
"id",
models.AutoField(
verbose_name="ID",
serialize=False,
auto_created=True,
primary_key=True,
),
),
(
"is_superuser",
models.BooleanField(
default=False,
help_text=(
"Designates that this user has all permissions "
"without explicitly assigning them."
),
verbose_name="superuser status",
),
),
("email", models.EmailField(unique=True, max_length=254)),
(
"is_staff",
models.BooleanField(default=False, verbose_name="staff status"),
),
(
"is_active",
models.BooleanField(default=False, verbose_name="active"),
),
(
"password",
models.CharField(
verbose_name="password", max_length=128, editable=False
),
),
(
"date_joined",
models.DateTimeField(
default=django.utils.timezone.now,
verbose_name="date joined",
editable=False,
),
),
(
"last_login",
models.DateTimeField(
default=django.utils.timezone.now,
verbose_name="last login",
editable=False,
),
),
],
options={"db_table": "userprofile_user", "abstract": False},
),
migrations.CreateModel(
name="Address",
fields=[
(
"id",
models.AutoField(
verbose_name="ID",
serialize=False,
auto_created=True,
primary_key=True,
),
),
(
"first_name",
models.CharField(max_length=256, verbose_name="first name"),
),
(
"last_name",
models.CharField(max_length=256, verbose_name="last name"),
),
(
"company_name",
models.CharField(
max_length=256,
verbose_name="company or organization",
blank=True,
),
),
(
"street_address_1",
models.CharField(max_length=256, verbose_name="address"),
),
(
"street_address_2",
models.CharField(
max_length=256, verbose_name="address", blank=True
),
),
("city", models.CharField(max_length=256, verbose_name="city")),
(
"postal_code",
models.CharField(max_length=20, verbose_name="postal code"),
),
(
"country",
models.CharField(
max_length=2,
verbose_name="country",
choices=[
("AF", "Afghanistan"),
("AX", "\xc5land Islands"),
("AL", "Albania"),
("DZ", "Algeria"),
("AS", "American Samoa"),
("AD", "Andorra"),
("AO", "Angola"),
("AI", "Anguilla"),
("AQ", "Antarctica"),
("AG", "Antigua And Barbuda"),
("AR", "Argentina"),
("AM", "Armenia"),
("AW", "Aruba"),
("AU", "Australia"),
("AT", "Austria"),
("AZ", "Azerbaijan"),
("BS", "Bahamas"),
("BH", "Bahrain"),
("BD", "Bangladesh"),
("BB", "Barbados"),
("BY", "Belarus"),
("BE", "Belgium"),
("BZ", "Belize"),
("BJ", "Benin"),
("BM", "Bermuda"),
("BT", "Bhutan"),
("BO", "Bolivia"),
("BQ", "Bonaire, Saint Eustatius And Saba"),
("BA", "Bosnia And Herzegovina"),
("BW", "Botswana"),
("BV", "Bouvet Island"),
("BR", "Brazil"),
("IO", "British Indian Ocean Territory"),
("BN", "Brunei Darussalam"),
("BG", "Bulgaria"),
("BF", "Burkina Faso"),
("BI", "Burundi"),
("KH", "Cambodia"),
("CM", "Cameroon"),
("CA", "Canada"),
("CV", "Cape Verde"),
("KY", "Cayman Islands"),
("CF", "Central African Republic"),
("TD", "Chad"),
("CL", "Chile"),
("CN", "China"),
("CX", "Christmas Island"),
("CC", "Cocos (Keeling) Islands"),
("CO", "Colombia"),
("KM", "Comoros"),
("CG", "Congo"),
("CD", "Congo, The Democratic Republic of the"),
("CK", "Cook Islands"),
("CR", "Costa Rica"),
("CI", "C\xf4te D'Ivoire"),
("HR", "Croatia"),
("CU", "Cuba"),
("CW", "Cura\xe7o"),
("CY", "Cyprus"),
("CZ", "Czech Republic"),
("DK", "Denmark"),
("DJ", "Djibouti"),
("DM", "Dominica"),
("DO", "Dominican Republic"),
("EC", "Ecuador"),
("EG", "Egypt"),
("SV", "El Salvador"),
("GQ", "Equatorial Guinea"),
("ER", "Eritrea"),
("EE", "Estonia"),
("ET", "Ethiopia"),
("FK", "Falkland Islands (Malvinas)"),
("FO", "Faroe Islands"),
("FJ", "Fiji"),
("FI", "Finland"),
("FR", "France"),
("GF", "French Guiana"),
("PF", "French Polynesia"),
("TF", "French Southern Territories"),
("GA", "Gabon"),
("GM", "Gambia"),
("GE", "Georgia"),
("DE", "Germany"),
("GH", "Ghana"),
("GI", "Gibraltar"),
("GR", "Greece"),
("GL", "Greenland"),
("GD", "Grenada"),
("GP", "Guadeloupe"),
("GU", "Guam"),
("GT", "Guatemala"),
("GG", "Guernsey"),
("GN", "Guinea"),
("GW", "Guinea-Bissau"),
("GY", "Guyana"),
("HT", "Haiti"),
("HM", "Heard Island And Mcdonald Islands"),
("VA", "Holy See (Vatican City State)"),
("HN", "Honduras"),
("HK", "Hong Kong"),
("HU", "Hungary"),
("IS", "Iceland"),
("IN", "India"),
("ID", "Indonesia"),
("IR", "Iran, Islamic Republic of"),
("IQ", "Iraq"),
("IE", "Ireland"),
("IM", "Isle of Man"),
("IL", "Israel"),
("IT", "Italy"),
("JM", "Jamaica"),
("JP", "Japan"),
("JE", "Jersey"),
("JO", "Jordan"),
("KZ", "Kazakhstan"),
("KE", "Kenya"),
("KI", "Kiribati"),
("KP", "Korea, Democratic People's Republic of"),
("KR", "Korea, Republic of"),
("KW", "Kuwait"),
("KG", "Kyrgyzstan"),
("LA", "Lao People's Democratic Republic"),
("LV", "Latvia"),
("LB", "Lebanon"),
("LS", "Lesotho"),
("LR", "Liberia"),
("LY", "Libya"),
("LI", "Liechtenstein"),
("LT", "Lithuania"),
("LU", "Luxembourg"),
("MO", "Macao"),
("MK", "Macedonia, The Former Yugoslav Republic of"),
("MG", "Madagascar"),
("MW", "Malawi"),
("MY", "Malaysia"),
("MV", "Maldives"),
("ML", "Mali"),
("MT", "Malta"),
("MH", "Marshall Islands"),
("MQ", "Martinique"),
("MR", "Mauritania"),
("MU", "Mauritius"),
("YT", "Mayotte"),
("MX", "Mexico"),
("FM", "Micronesia, Federated States of"),
("MD", "Moldova, Republic of"),
("MC", "Monaco"),
("MN", "Mongolia"),
("ME", "Montenegro"),
("MS", "Montserrat"),
("MA", "Morocco"),
("MZ", "Mozambique"),
("MM", "Myanmar"),
("NA", "Namibia"),
("NR", "Nauru"),
("NP", "Nepal"),
("NL", "Netherlands"),
("NC", "New Caledonia"),
("NZ", "New Zealand"),
("NI", "Nicaragua"),
("NE", "Niger"),
("NG", "Nigeria"),
("NU", "Niue"),
("NF", "Norfolk Island"),
("MP", "Northern Mariana Islands"),
("NO", "Norway"),
("OM", "Oman"),
("PK", "Pakistan"),
("PW", "Palau"),
("PS", "Palestinian Territory, Occupied"),
("PA", "Panama"),
("PG", "Papua New Guinea"),
("PY", "Paraguay"),
("PE", "Peru"),
("PH", "Philippines"),
("PN", "Pitcairn"),
("PL", "Poland"),
("PT", "Portugal"),
("PR", "Puerto Rico"),
("QA", "Qatar"),
("RE", "R\xe9union"),
("RO", "Romania"),
("RU", "Russian Federation"),
("RW", "Rwanda"),
("BL", "Saint Barth\xe9lemy"),
("SH", "Saint Helena, Ascension And Tristan Da Cunha"),
("KN", "Saint Kitts And Nevis"),
("LC", "Saint Lucia"),
("MF", "Saint Martin (French Part)"),
("PM", "Saint Pierre And Miquelon"),
("VC", "Saint Vincent And the Grenadines"),
("WS", "Samoa"),
("SM", "San Marino"),
("ST", "Sao Tome And Principe"),
("SA", "Saudi Arabia"),
("SN", "Senegal"),
("RS", "Serbia"),
("SC", "Seychelles"),
("SL", "Sierra Leone"),
("SG", "Singapore"),
("SX", "Sint Maarten (Dutch Part)"),
("SK", "Slovakia"),
("SI", "Slovenia"),
("SB", "Solomon Islands"),
("SO", "Somalia"),
("ZA", "South Africa"),
("GS", "South Georgia and the South Sandwich Islands"),
("ES", "Spain"),
("LK", "Sri Lanka"),
("SD", "Sudan"),
("SR", "Suriname"),
("SJ", "Svalbard and Jan Mayen"),
("SZ", "Swaziland"),
("SE", "Sweden"),
("CH", "Switzerland"),
("SY", "Syria"),
("TW", "Taiwan"),
("TJ", "Tajikistan"),
("TZ", "Tanzania"),
("TH", "Thailand"),
("TL", "Timor-Leste"),
("TG", "Togo"),
("TK", "Tokelau"),
("TO", "Tonga"),
("TT", "Trinidad And Tobago"),
("TN", "Tunisia"),
("TR", "Turkey"),
("TM", "Turkmenistan"),
("TC", "Turks And Caicos Islands"),
("TV", "Tuvalu"),
("UG", "Uganda"),
("UA", "Ukraine"),
("AE", "United Arab Emirates"),
("GB", "United Kingdom"),
("US", "United States"),
("UM", "United States Minor Outlying Islands"),
("UY", "Uruguay"),
("UZ", "Uzbekistan"),
("VU", "Vanuatu"),
("VE", "Venezuela"),
("VN", "Viet Nam"),
("VG", "Virgin Islands, British"),
("VI", "Virgin Islands, U.S."),
("WF", "Wallis And Futuna"),
("EH", "Western Sahara"),
("YE", "Yemen"),
("ZM", "Zambia"),
("ZW", "Zimbabwe"),
],
),
),
(
"country_area",
models.CharField(
max_length=128, verbose_name="state or province", blank=True
),
),
(
"phone",
models.CharField(
max_length=30, verbose_name="phone number", blank=True
),
),
],
options={"db_table": "userprofile_address"},
),
migrations.AddField(
model_name="user",
name="addresses",
field=models.ManyToManyField(to="account.Address"),
),
migrations.AddField(
model_name="user",
name="default_billing_address",
field=models.ForeignKey(
related_name="+",
on_delete=django.db.models.deletion.SET_NULL,
verbose_name="default billing address",
blank=True,
to="account.Address",
null=True,
),
),
migrations.AddField(
model_name="user",
name="default_shipping_address",
field=models.ForeignKey(
related_name="+",
on_delete=django.db.models.deletion.SET_NULL,
verbose_name="default shipping address",
blank=True,
to="account.Address",
null=True,
),
),
migrations.AddField(
model_name="user",
name="groups",
field=models.ManyToManyField(
related_query_name="user",
related_name="user_set",
to="auth.Group",
blank=True,
help_text=(
"The groups this user belongs to. "
"A user will get all permissions granted to each of their groups."
),
verbose_name="groups",
),
),
migrations.AddField(
model_name="user",
name="user_permissions",
field=models.ManyToManyField(
related_query_name="user",
related_name="user_set",
to="auth.Permission",
blank=True,
help_text="Specific permissions for this user.",
verbose_name="user permissions",
),
),
]
| bsd-3-clause | 2,425,811,758,527,500,300 | 42.617117 | 86 | 0.291748 | false | 5.11381 | false | false | false |
ag-sc/QALD | 4/scripts/Evaluation.py | 1 | 26257 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import xml.dom.minidom as dom
import xml.dom
from decimal import *
import sys
import os
import datetime
#from Numeric import *
# Dokument erzeugen
implement = xml.dom.getDOMImplementation()
###################Globale Variablen###################
task=None
choosen_tag={}
system_time=0
filename_out_html = None
filename_out_txt = None
system_name=None
configuration=None
testing=False
###################Funktionen##########################
def set_system_name(name):
global system_name
system_name=name
def set_configuration(name):
global configuration
configuration=name
def _ausgabe_(ausgabe):
print ausgabe
def set_filename_txt_out(time):
global filename_out_txt
filename_out_txt="upload/out"+str(time)+".txt"
def set_filename_out(time):
global filename_out_html
filename_out_html="upload/out"+str(time)+".html"
def _knoten_auslesen(knoten):
try:
string = knoten.firstChild.data.strip().encode("utf-8")
# print "knoten_auslesen: "+string
return string
except:
# print "Unexpected error:", sys.exc_info()[0]
pass
#def _knoten_auslesen(knoten):
# return eval("%s('%s')" % (knoten.getAttribute("typ"),
# knoten.firstChild.data.strip()))
def lade_musterloesung(dateiname):
d = {}
global choosen_tag
#baum = dom.parse(dateiname.encode( "utf-8" ))
baum = dom.parse(dateiname)
zaehler=1
for eintrag in baum.firstChild.childNodes:
if eintrag.nodeName == "question":
id=(eintrag.attributes["id"]).value
question_text = query = None
answer=[]
for knoten in eintrag.childNodes:
if knoten.nodeName == "text" or knoten.nodeName == "string":
if (knoten.attributes["lang"]).value == "en":
question_text = _knoten_auslesen(knoten)
elif (knoten.attributes["lang"]).value == "de":
question_text = _knoten_auslesen(knoten)
elif (knoten.attributes["lang"]).value == "es":
question_text = _knoten_auslesen(knoten)
elif (knoten.attributes["lang"]).value == "it":
question_text = _knoten_auslesen(knoten)
elif (knoten.attributes["lang"]).value == "fr":
question_text = _knoten_auslesen(knoten)
elif (knoten.attributes["lang"]).value == "nl":
question_text = _knoten_auslesen(knoten)
# elif knoten.nodeName == "query":
# query=knoten.firstChild.data.strip()
if knoten.nodeName=="answers":
answer_elem_1=[]
for knoten_answer in knoten.childNodes:
#here i have to check for optional.
if knoten_answer.nodeName=="answer":
answer_elem=[]
for knoten_answer1 in knoten_answer.childNodes:
for id_loesung,tag_loesung in choosen_tag.iteritems():
if(id==id_loesung):
###########################
#
#
# In QALD3 only uri/boolean/number and date are allowed, so string is "turned off"
#
#
###########################
if knoten_answer1.nodeName == "string" and choosen_tag[id]=="string":
try:
answer_elem.append(knoten_answer1.firstChild.data.strip())
except Exception:
answer_elem.append(" ")
if knoten_answer1.nodeName == "boolean" and choosen_tag[id]=="boolean":
try:
answer_elem.append(knoten_answer1.firstChild.data.strip())
except Exception:
answer_elem.append(" ")
if knoten_answer1.nodeName == "number"and choosen_tag[id]=="number":
try:
answer_elem.append(knoten_answer1.firstChild.data.strip())
except Exception:
answer_elem.append(" ")
if knoten_answer1.nodeName == "date" and choosen_tag[id]=="date":
try:
answer_elem.append(knoten_answer1.firstChild.data.strip())
except Exception:
answer_elem.append(" ")
if knoten_answer1.nodeName == "uri" and choosen_tag[id]=="uri":
try:
answer_elem.append(knoten_answer1.firstChild.data.strip())
except Exception:
answer_elem.append(" ")
answer_elem_1.append(answer_elem)
answer.append(answer_elem_1)
# print(answer)
d[id] = [query,question_text,answer]
# print str(d)
return d
def bearbeite_baum(dateiname):
#setze Zeielnumbrueche, damit der Parser spaeter besser mit dem Dokument zurecht kommt
fobj = open(dateiname, "r")
string=""
for line1 in fobj:
line=str(line1)
line=line.replace('<question','\n<question')
#line=line.replace('<string>','\n<string>')
line=line.replace('</string>','</string>\n')
line=line.replace('</keywords>','</keywords>\n')
line=line.replace('</query>','</query>\n')
line=line.replace('<answers>','<answers>\n')
line=line.replace('<answer>','<answer>\n')
line=line.replace('</answer>','</answer>\n')
line=line.replace('</answers>','</answers>\n')
line=line.replace('</uri>','</uri>\n')
line=line.replace('</boolean>','</boolean>\n')
line=line.replace('</number>','</number>\n')
line=line.replace('</date>','</date>\n')
#line=line.replace('&','&')
string+=line
fobj.close()
# print string
fobj = open(dateiname, "w")
fobj.write(string)
fobj.close()
def lade_baum(dateiname):
d = {}
bearbeite_baum(dateiname)
global choosen_tag
global testing
# print "after bearbeite baum"
baum = dom.parse(dateiname.encode( "utf-8" ))
zaehler=1
# print "after parsing baum"
for eintrag in baum.firstChild.childNodes:
if(zaehler==1):
knoten_id=((eintrag.parentNode).attributes["id"]).value
zaehler=2
# print "after 1"
if eintrag.nodeName == "question":
# print "in question"
id=(eintrag.attributes["id"]).value
# print "id: "+str(id)
question_text = query = None
answer=[]
for knoten in eintrag.childNodes: #
# print "in for knoten in eintrag.childNodes: "
if knoten.nodeName == "text" or knoten.nodeName == "string":
if (knoten.attributes["lang"]).value == "en":
question_text = _knoten_auslesen(knoten)
elif (knoten.attributes["lang"]).value == "de":
question_text = _knoten_auslesen(knoten)
elif (knoten.attributes["lang"]).value == "es":
question_text = _knoten_auslesen(knoten)
elif (knoten.attributes["lang"]).value == "it":
question_text = _knoten_auslesen(knoten)
elif (knoten.attributes["lang"]).value == "fr":
question_text = _knoten_auslesen(knoten)
elif (knoten.attributes["lang"]).value == "nl":
question_text = _knoten_auslesen(knoten)
# print str(question_txt)
# elif knoten.nodeName == "query":
# query=knoten.firstChild.data.strip()
elif knoten.nodeName=="answers":
try:
answer_elem_1=[]
for knoten_answer in knoten.childNodes:
if knoten_answer.nodeName=="answer":
answer_elem=[]
###########################
#
#
# In QALD3 only uri/boolean/number and date are allowed, so string is "turned off"
#
#
###########################
mehr_als_ein_typ=False
eins=zwei=None
eins=((knoten_answer.childNodes).item(1)).nodeName
if((knoten_answer.childNodes).item(3)):
zwei=((knoten_answer.childNodes).item(3)).nodeName
else:
zwei= None
if(eins==zwei or zwei==None):
mehr_als_ein_typ=False
choosen_tag[id]=((knoten_answer.childNodes).item(1)).nodeName
else:
mehr_als_ein_typ=True
#choosen_tag[id]="string"
choosen_tag[id]="uri"
for knoten_answer1 in knoten_answer.childNodes:
if(knoten_answer1.nodeName!="#text"):
if knoten_answer1.nodeName == "string" and mehr_als_ein_typ==False:
try:
answer_elem.append(knoten_answer1.firstChild.data.strip())
except Exception:
answer_elem.append(" ")
if knoten_answer1.nodeName == "boolean" and mehr_als_ein_typ==False:
try:
answer_elem.append(knoten_answer1.firstChild.data.strip())
except Exception:
answer_elem.append(" ")
if knoten_answer1.nodeName == "number" and mehr_als_ein_typ==False:
try:
answer_elem.append(knoten_answer1.firstChild.data.strip())
except Exception:
answer_elem.append(" ")
if knoten_answer1.nodeName == "date" and mehr_als_ein_typ==False:
try:
answer_elem.append(knoten_answer1.firstChild.data.strip())
except Exception:
answer_elem.append(" ")
if knoten_answer1.nodeName == "uri" and mehr_als_ein_typ==False:
try:
answer_elem.append(knoten_answer1.firstChild.data.strip())
except Exception:
answer_elem.append(" ")
#if knoten_answer1.nodeName == choosen_tag[id] and mehr_als_ein_typ==True:
# try:
# answer_elem.append(knoten_answer1.firstChild.data.strip())
# except Exception:
# answer_elem.append(" ")
answer_elem_1.append(answer_elem)
except Exception as inst:
error= "<!doctype html> <html> <head> <title>ERROR</title></head> <body> <p>"+str(type(inst))+"</p><p>"+str(inst.args)+"</p><p>"+str(inst)+"</p><p>"+id+"</p><p>PLEASE CHECK YOUR XML FILE</p></body></html>"
outfile=open(filename_out_html,"w")
# _ausgabe_(filename_out_html)
outfile.write(error)
outfile.close()
choosen_tag[id]="string"
answer_elem_1.append("ERROR IN FILE")
# print "Unexpected error:", sys.exc_info()[0]
# print "9"
answer.append(answer_elem_1)
d[question_text] = [query,id,answer]
# print str(d)
return d
def sortedDictValues2(adict):
keys = adict.keys()
keys.sort()
return [dict[key] for key in keys]
def _evaluation(loesung, musterloesung, task):
anzahl_bearbeiteter_fragen=0
anzahl_korrekter_antworten=0
anzahl_falscher_antworten=0
falsche_antworten=[]
anzahl_bearbeiteter_fragen=len(loesung)
bewertung_ausgabe={}
#number_answers_goldstandard = 0
number_answers_user = 0
#for question_text, query_loesung in musterloesung.iteritems():
# gold_loesung1=query_loesung[2]
# gold_loesung=gold_loesung1[0]
# number_answer_goldstandard += len(gold_loesung)
for question_text, query_loesung in loesung.iteritems():
anzahl_falscher_frageelemente=anzahl_richtiger_frageelemente=0
R=P=F=0
# print question_text
# print
# print str(query_loesung[2])
answer_loesung1=query_loesung[2]
answer_loesung=answer_loesung1[0]
number_answers_user += len(answer_loesung)
loesung_id=query_loesung[1]
answer_musterloesung1=musterloesung[loesung_id]
answer_musterloesung2=answer_musterloesung1[2]
answer_musterloesung=answer_musterloesung2[0]
#print "user: "+str(answer_loesung)
#print "gold: "+str(answer_musterloesung)
if len(answer_musterloesung) == len(answer_loesung) and len(answer_loesung) == 0:
bewertung_ausgabe[loesung_id]=[question_text,str(1.0),str(1.0),str(1.0)]
anzahl_korrekter_antworten+=1
elif(len(answer_loesung)==0):
# anzahl_falscher_fragen+=1
anzahl_falscher_antworten+=1
falsche_antworten.append(loesung_id)
R=P=F=0
bewertung_ausgabe[loesung_id]=[question_text,str(R),str(P),str(F)]
else:
if(len(answer_musterloesung)>len(answer_loesung)):
anzahl_falscher_antworten+=1
anzahl_falscher_frageelemente+=(len(answer_musterloesung)-len(answer_loesung))
falsche_antworten.append(loesung_id)
for i in range(0,len(answer_loesung)):
for j in range(0,len(answer_musterloesung)):
if(answer_loesung[i]==answer_musterloesung[j]):
anzahl_richtiger_frageelemente+=1
break
if(anzahl_richtiger_frageelemente==0):
R=F=P=0
else:
R1=Decimal(anzahl_richtiger_frageelemente)
R2=Decimal(len(answer_musterloesung))
R=round((R1/R2),5)
P1=R1
P2=Decimal(len(answer_loesung))
P=round((P1/P2),5)
F=round(((2*P*R)/(R+P)),5)
bewertung_ausgabe[loesung_id]=[question_text,str(R),str(P),str(F)]
else:
for i in range(0,len(answer_loesung)):
for j in range(0,len(answer_musterloesung)):
if(answer_loesung[i]==answer_musterloesung[j]):
anzahl_richtiger_frageelemente+=1
break
if(anzahl_richtiger_frageelemente==len(answer_loesung)):
anzahl_korrekter_antworten+=1
else:
anzahl_falscher_antworten+=1
falsche_antworten.append(loesung_id)
if(anzahl_richtiger_frageelemente==0):
R=F=P=0
else:
R1=Decimal(anzahl_richtiger_frageelemente)
R2=Decimal(len(answer_musterloesung))
R=round((R1/R2),5)
P1=R1
P2=Decimal(len(answer_loesung))
P=round((P1/P2),5)
F=round(((2*P*R)/(R+P)),5)
bewertung_ausgabe[loesung_id]=[question_text,str(R),str(P),str(F)]
if(anzahl_korrekter_antworten==0):
fmeasure=recall=precision=0
else:
wert1=Decimal(anzahl_korrekter_antworten)
wert2=Decimal(anzahl_bearbeiteter_fragen)
recall=round(((wert1/len(musterloesung))),5)
precision=round(((wert1/wert2)),5)
fmeasure=round(((2*recall*precision)/(recall+precision)),5)
recall=str(recall)
precision=str(precision)
fmeasure=str(fmeasure)
number_correct_user_answers = anzahl_bearbeiteter_fragen
anzahl_bearbeiteter_fragen=str(anzahl_bearbeiteter_fragen)
anzahl_korrekter_antworten=str(anzahl_korrekter_antworten)
anzahl_falscher_antworten=str(anzahl_falscher_antworten)
############################################################################################
# #
#Recall = Overall numbers of correct answers / overall number of goldstandard answers #
#Precision = Overall numbers of correct answers / overall number of all answers(given xml)
#F-Measure = (2*Recall*Precision)/(Recall+Precision)
# #
############################################################################################
global_precision=0.0
global_recall=0.0
global_fmeasure=0.0
for id,value in bewertung_ausgabe.iteritems():
tmp = id +";"
x = value[0]
x = x.decode("ascii","ignore")
tmp += x +";"
tmp += str(value[2])+";"
tmp += str(value[1])+";"
tmp += str(value[3])+";"
#print"tmp: "+ tmp
#tmp = (id+";"+str(value[0])+";"+str(value[2])+";"+str(value[1])+";"+str(value[3])+"\n").encode("utf-8")
string = "qald-4_"
if task == 1: string += "multilingual"
if task == 2: string += "biomedical"
if task == 3: string += "hybrid"
string += tmp
global_precision += float(value[2])
global_recall += float(value[1])
if global_recall == 0.0 or global_precision == 0.0:
global_precision = str(0)
global_recall = str(0)
global_fmeasure = str(0)
else:
global_precision = global_precision/len(musterloesung)
global_recall = global_recall/len(musterloesung)
global_fmeasure=str((2*global_recall*global_precision)/(global_precision + global_recall))
global_precision = str(global_precision)
global_recall = str(global_recall)
write_html(string,anzahl_falscher_antworten,anzahl_korrekter_antworten,anzahl_bearbeiteter_fragen,global_fmeasure,global_precision,global_recall,bewertung_ausgabe,falsche_antworten)
def write_txt(anzahl_falscher_antworten,anzahl_korrekter_antworten,anzahl_bearbeiteter_fragen,fmeasure,precision,recall,bewertung_ausgabe,falsche_antworten):
#global system_name, configuration
bla=""
bla=system_name+";"+configuration+"\n"
globale_uebersicht_txt= anzahl_bearbeiteter_fragen+";"+anzahl_korrekter_antworten+";"+anzahl_falscher_antworten+";"+recall+";"+precision+";"+fmeasure+"\n"
string=""
for id,answer in bewertung_ausgabe.iteritems():
question = answer[0]
question = question.decode("ascii","ignore")
string += id+";"+question+";"+answer[1]+";"+answer[2]+";"+answer[3]+"\n"
outfile=open(filename_out_txt,"w")
outfile.write(bla+globale_uebersicht_txt+string)
outfile.close()
_ausgabe_(filename_out_txt)
def write_html(string,anzahl_falscher_antworten,anzahl_korrekter_antworten,anzahl_bearbeiteter_fragen,fmeasure,precision,recall,bewertung_ausgabe,falsche_antworten):
tabelle3="<table class=\"eval\" border=\"1\"><tr><th>Failed questions (IDs)</th></tr>"
string_question ="<tr>"
for i in range(0,len(falsche_antworten)):
string_question+="<td>"+str(falsche_antworten[i])+"</td></tr>"
end_tabelle3="</table>"
start_table= "<!doctype html> <html> <head> <title>Evaluation of "+string+"</title></head> <body> <p>Evaluation</p><p>Skript Version 5.5</p>"
space="<p></p><p></p><p></p><p></p><p></p>"
tabelle1="<table class=\"eval\" border=\"1\"><tr><th>ID</th><th>Question</th><th>Recall</th><th>Precision</th><th>F-Measure</th></tr>"
tabelle2="<table class=\"eval\" border=\"1\"><tr><th>Number of constructed Queries</th><th>Number of correct Answers</th><th>Number of wrong Answers</th><th>Global Recall</th><th>Global Precision</th><th>Global F-Measure</th></tr>"
inhalt_tabelle2="<tr><td>"+anzahl_bearbeiteter_fragen+"</td><td>"+anzahl_korrekter_antworten+"</td><td>"+anzahl_falscher_antworten+"</td><td>"+recall+"</td><td>"+precision+"</td><td>"+fmeasure+"</td></tr>"
end_tabelle2="</table>"
end_tabelle1="</table>"
ende="</body> </html>"
string=""
for id,answer in bewertung_ausgabe.iteritems():
question = answer[0]
question = question.decode("ascii","ignore")
string_bla="<tr><td>"+id+"</td><td>"+question+"</td><td>"+answer[1]+"</td><td>"+answer[2]+"</td><td>"+answer[3]+"</td></tr>"
string+=string_bla
outfile=open(filename_out_html,"w")
outfile.write(start_table+space+tabelle2+inhalt_tabelle2+end_tabelle2+space+tabelle1+string+end_tabelle1+space+tabelle3+string_question+end_tabelle3+ende)
outfile.close()
_ausgabe_(filename_out_html)
################### MAIN ##################################################
def main():
global system_time, testing, task
system_time = datetime.datetime.now()
set_filename_out(system_time)
set_filename_txt_out(system_time)
#print system_time
#print filename_out_html
# Train or Test
if sys.argv[2] == "test":
testing = True
else:
testing = False
# Task
task = sys.argv[3]
# Set gold standard
gold = '../data/qald-4_'
if task == '1': gold += 'multilingual'
elif task == '2': gold += 'biomedical'
elif task == '3': gold += 'hybrid'
if testing: gold += '_test'
else: gold += '_train'
gold += '_withanswers.xml'
import urllib
dateiname=sys.argv[1]
if (len(sys.argv)>=6):
set_system_name(sys.argv[4])
set_configuration(sys.argv[5])
else:
set_system_name("None")
set_configuration("None")
loesung=None
try:
loesung=lade_baum(dateiname)
except Exception as inst:
error= "<!doctype html> <html> <head> <title>ERROR</title></head> <body> <p>"+str(type(inst))+"</p><p>"+str(inst.args)+"</p><p>"+str(inst)+"</p><p>PLEASE CHECK YOUR XML FILE</p></body></html>"
outfile=open(filename_out_html,"w")
outfile.write(error)
outfile.close()
_ausgabe_(filename_out_html)
# print "Unexpected error:", sys.exc_info()[0]
# print "8"
gstandard_importet=True
try:
musterloesung=lade_musterloesung(urllib.urlopen(gold))
except Exception as inst:
error= "<!doctype html> <html> <head> <title>ERROR</title></head> <body> <p>"+str(type(inst))+"</p><p>"+str(inst.args)+"</p><p>"+str(inst)+"</p></body></html>"
write_error(error)
# print "Unexpected error:", sys.exc_info()[0]
# print "7"
else:
_evaluation(loesung,musterloesung,task)
# print "Unexpected error:", sys.exc_info()[0]
# print "6"
def write_error(error):
global filename_out_html
outfile=open(filename_out_html,"w")
outfile.write(error)
outfile.close()
_ausgabe_(filename_out_html)
if __name__ == "__main__":
main()
| mit | -724,464,349,123,768,700 | 42.688852 | 235 | 0.473778 | false | 3.872143 | false | false | false |
Re4son/Kali-Pi | Menus/menu_pause.py | 1 | 1785 | #!/usr/bin/env python
import pygame, os, sys, subprocess, time
import RPi.GPIO as GPIO
from pygame.locals import *
from subprocess import *
if "TFT" in os.environ and os.environ["TFT"] == "0":
# No TFT screen
SCREEN=0
pass
elif "TFT" in os.environ and os.environ["TFT"] == "2":
# TFT screen with mouse
SCREEN=2
os.environ["SDL_FBDEV"] = "/dev/fb1"
elif "TFT" in os.environ and os.environ["TFT"] == "3":
# HDMI touchscreen
SCREEN=3
os.environ["SDL_FBDEV"] = "/dev/fb0"
os.environ["SDL_MOUSEDEV"] = "/dev/input/touchscreen"
os.environ["SDL_MOUSEDRV"] = "TSLIB"
elif "TFT" in os.environ and os.environ["TFT"] == "4":
# Raspberry Pi 7" touchscreen
SCREEN=4
from ft5406 import Touchscreen
os.environ["SDL_FBDEV"] = "/dev/fb0"
ts = Touchscreen()
else:
# TFT touchscreen
SCREEN=1
os.environ["SDL_FBDEV"] = "/dev/fb1"
os.environ["SDL_MOUSEDEV"] = "/dev/input/touchscreen"
os.environ["SDL_MOUSEDRV"] = "TSLIB"
# Initialize pygame modules individually (to avoid ALSA errors) and hide mouse
pygame.font.init()
pygame.display.init()
pygame.mouse.set_visible(0)
# Initialise GPIO
GPIO.setwarnings(False)
#While loop to manage touch screen inputs
state = [False for x in range(10)]
while 1:
if SCREEN==4:
for touch in ts.poll():
if state[touch.slot] != touch.valid:
if touch.valid:
sys.exit()
else:
for event in pygame.event.get():
if event.type == pygame.MOUSEBUTTONDOWN:
sys.exit()
#Debug:
#ensure there is always a safe way to end the program if the touch screen fails
##if event.type == KEYDOWN:
## if event.key == K_ESCAPE:
## sys.exit()
time.sleep(0.4)
| gpl-3.0 | 3,545,909,162,319,380,500 | 28.262295 | 87 | 0.614566 | false | 3.142606 | false | false | false |
foobarbazblarg/stayclean | stayclean-2016-april/serve-signups-with-flask.py | 1 | 8086 | #!/usr/bin/python
import subprocess
import praw
from hashlib import sha1
from flask import Flask
from flask import Response
from flask import request
from cStringIO import StringIO
from base64 import b64encode
from base64 import b64decode
from ConfigParser import ConfigParser
import OAuth2Util
import os
import markdown
import bleach
# encoding=utf8
import sys
from participantCollection import ParticipantCollection
reload(sys)
sys.setdefaultencoding('utf8')
# Edit Me!
# Each day after you post a signup post, copy its 6-character ID to this array.
signupPageSubmissionIds = [ '4bvb7i', '4c1crs', '4c5lvg', '4ca9ff', '4cf91t', '4ckta7', '4cp4ir' ]
flaskport = 8883
app = Flask(__name__)
app.debug = True
commentHashesAndComments = {}
def loginAndReturnRedditSession():
config = ConfigParser()
config.read("../reddit-password-credentials.cfg")
user = config.get("Reddit", "user")
password = config.get("Reddit", "password")
# TODO: password auth is going away, and we will soon need to do oauth.
redditSession = praw.Reddit(user_agent='Test Script by /u/foobarbazblarg')
redditSession.login(user, password, disable_warning=True)
# submissions = redditSession.get_subreddit('pornfree').get_hot(limit=5)
# print [str(x) for x in submissions]
return redditSession
def loginOAuthAndReturnRedditSession():
redditSession = praw.Reddit(user_agent='Test Script by /u/foobarbazblarg')
o = OAuth2Util.OAuth2Util(redditSession, print_log=True, configfile="../reddit-oauth-credentials.cfg")
o.refresh(force=True)
return redditSession
def getSubmissionsForRedditSession(redditSession):
submissions = [redditSession.get_submission(submission_id=submissionId) for submissionId in signupPageSubmissionIds]
for submission in submissions:
submission.replace_more_comments(limit=None, threshold=0)
return submissions
def getCommentsForSubmissions(submissions):
comments = []
for submission in submissions:
comments += praw.helpers.flatten_tree(submission.comments)
return comments
def retireCommentHash(commentHash):
with open("retiredcommenthashes.txt", "a") as commentHashFile:
commentHashFile.write(commentHash + '\n')
def retiredCommentHashes():
with open("retiredcommenthashes.txt", "r") as commentHashFile:
# return commentHashFile.readlines()
return commentHashFile.read().splitlines()
@app.route('/moderatesignups.html')
def moderatesignups():
global commentHashesAndComments
commentHashesAndComments = {}
stringio = StringIO()
stringio.write('<html>\n<head>\n</head>\n\n')
# redditSession = loginAndReturnRedditSession()
redditSession = loginOAuthAndReturnRedditSession()
submissions = getSubmissionsForRedditSession(redditSession)
flat_comments = getCommentsForSubmissions(submissions)
retiredHashes = retiredCommentHashes()
i = 1
stringio.write('<iframe name="invisibleiframe" style="display:none;"></iframe>\n')
stringio.write("<h3>")
stringio.write(os.getcwd())
stringio.write("<br>\n")
for submission in submissions:
stringio.write(submission.title)
stringio.write("<br>\n")
stringio.write("</h3>\n\n")
stringio.write('<form action="copydisplayduringsignuptoclipboard.html" method="post" target="invisibleiframe">')
stringio.write('<input type="submit" value="Copy display-during-signup.py stdout to clipboard">')
stringio.write('</form>')
for comment in flat_comments:
# print comment.is_root
# print comment.score
i += 1
commentHash = sha1()
commentHash.update(comment.permalink)
commentHash.update(comment.body.encode('utf-8'))
commentHash = commentHash.hexdigest()
if commentHash not in retiredHashes:
commentHashesAndComments[commentHash] = comment
authorName = str(comment.author) # can be None if author was deleted. So check for that and skip if it's None.
stringio.write("<hr>\n")
stringio.write('<font color="blue"><b>')
stringio.write(authorName) # can be None if author was deleted. So check for that and skip if it's None.
stringio.write('</b></font><br>')
if ParticipantCollection().hasParticipantNamed(authorName):
stringio.write(' <small><font color="green">(member)</font></small>')
# if ParticipantCollection().participantNamed(authorName).isStillIn:
# stringio.write(' <small><font color="green">(in)</font></small>')
# else:
# stringio.write(' <small><font color="red">(out)</font></small>')
else:
stringio.write(' <small><font color="red">(not a member)</font></small>')
stringio.write('<form action="takeaction.html" method="post" target="invisibleiframe">')
stringio.write('<input type="submit" name="actiontotake" value="Signup" style="color:white;background-color:green">')
# stringio.write('<input type="submit" name="actiontotake" value="Signup and checkin">')
# stringio.write('<input type="submit" name="actiontotake" value="Relapse">')
# stringio.write('<input type="submit" name="actiontotake" value="Reinstate">')
stringio.write('<input type="submit" name="actiontotake" value="Skip comment">')
stringio.write('<input type="submit" name="actiontotake" value="Skip comment and don\'t upvote">')
stringio.write('<input type="hidden" name="username" value="' + b64encode(authorName) + '">')
stringio.write('<input type="hidden" name="commenthash" value="' + commentHash + '">')
stringio.write('<input type="hidden" name="commentpermalink" value="' + comment.permalink + '">')
stringio.write('</form>')
stringio.write(bleach.clean(markdown.markdown(comment.body.encode('utf-8')), tags=['p']))
stringio.write("\n<br><br>\n\n")
stringio.write('</html>')
pageString = stringio.getvalue()
stringio.close()
return Response(pageString, mimetype='text/html')
@app.route('/takeaction.html', methods=["POST"])
def takeaction():
username = b64decode(request.form["username"])
commentHash = str(request.form["commenthash"])
# commentPermalink = request.form["commentpermalink"]
actionToTake = request.form["actiontotake"]
# print commentHashesAndComments
comment = commentHashesAndComments[commentHash]
# print "comment: " + str(comment)
if actionToTake == 'Signup':
print "signup - " + username
subprocess.call(['./signup.py', username])
comment.upvote()
retireCommentHash(commentHash)
# if actionToTake == 'Signup and checkin':
# print "signup and checkin - " + username
# subprocess.call(['./signup-and-checkin.sh', username])
# comment.upvote()
# retireCommentHash(commentHash)
# elif actionToTake == 'Relapse':
# print "relapse - " + username
# subprocess.call(['./relapse.py', username])
# comment.upvote()
# retireCommentHash(commentHash)
# elif actionToTake == 'Reinstate':
# print "reinstate - " + username
# subprocess.call(['./reinstate.py', username])
# comment.upvote()
# retireCommentHash(commentHash)
elif actionToTake == 'Skip comment':
print "Skip comment - " + username
comment.upvote()
retireCommentHash(commentHash)
elif actionToTake == "Skip comment and don't upvote":
print "Skip comment and don't upvote - " + username
retireCommentHash(commentHash)
return Response("hello", mimetype='text/html')
@app.route('/copydisplayduringsignuptoclipboard.html', methods=["POST"])
def copydisplayduringsignuptoclipboard():
print "TODO: Copy display to clipboard"
subprocess.call(['./display-during-signup.py'])
return Response("hello", mimetype='text/html')
if __name__ == '__main__':
app.run(host='127.0.0.1', port=flaskport)
| mit | 8,488,130,493,691,722,000 | 40.255102 | 129 | 0.67351 | false | 3.771455 | true | false | false |
zhibolau/webApp | www/models.py | 1 | 1589 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'Zhibo Liu'
import time,uuid
from transwarp.db import next_id # 直接from Import会出错 必须在那个目录下建立__init__.py 文件!!!!!!!!
from transwarp.orm import Model, StringField, BooleanField, FloatField, TextField
class User(Model):
__table__ = 'users'
id = StringField(primary_key=True, default=next_id, ddl='varchar(50)')
email = StringField(updatable=False, ddl='varchar(50)')
password = StringField(ddl='varchar(50)')
admin = BooleanField()
name = StringField(ddl='varchar(50)')
image = StringField(ddl='varchar(500)')
created_at = FloatField(updatable=False, default=time.time)
class Blog(Model):
__table__ = 'blogs'
id = StringField(primary_key=True, default=next_id, ddl='varchar(50)')
user_id = StringField(updatable=False, ddl='varchar(50)')
user_name = StringField(ddl='varchar(50)')
user_image = StringField(ddl='varchar(500)')
name = StringField(ddl='varchar(50)')
summary = StringField(ddl='varchar(200)')
content = TextField()
created_at = FloatField(updatable=False, default=time.time)
class Comment(Model):
__table__ = 'comments'
id = StringField(primary_key=True, default=next_id, ddl='varchar(50)')
blog_id = StringField(updatable=False, ddl='varchar(50)')
user_id = StringField(updatable=False, ddl='varchar(50)')
user_name = StringField(ddl='varchar(50)')
user_image = StringField(ddl='varchar(500)')
content = TextField()
created_at = FloatField(updatable=False, default=time.time)
| gpl-2.0 | -8,860,960,697,945,917,000 | 35.162791 | 86 | 0.67717 | false | 3.206186 | false | false | false |
thomaserlang/XenBackup | src/xenbackup/XenAPI.py | 1 | 9750 | # Copyright (c) Citrix Systems, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1) Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2) Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# --------------------------------------------------------------------
# Parts of this file are based upon xmlrpclib.py, the XML-RPC client
# interface included in the Python distribution.
#
# Copyright (c) 1999-2002 by Secret Labs AB
# Copyright (c) 1999-2002 by Fredrik Lundh
#
# By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Secret Labs AB or the author not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
# --------------------------------------------------------------------
import gettext
import xmlrpclib
import httplib
import socket
import sys
import ssl
translation = gettext.translation('xen-xm', fallback = True)
API_VERSION_1_1 = '1.1'
API_VERSION_1_2 = '1.2'
class Failure(Exception):
def __init__(self, details):
self.details = details
def __str__(self):
try:
return str(self.details)
except Exception, exn:
import sys
print >>sys.stderr, exn
return "Xen-API failure: %s" % str(self.details)
def _details_map(self):
return dict([(str(i), self.details[i])
for i in range(len(self.details))])
# Just a "constant" that we use to decide whether to retry the RPC
_RECONNECT_AND_RETRY = object()
class UDSHTTPConnection(httplib.HTTPConnection):
"""HTTPConnection subclass to allow HTTP over Unix domain sockets. """
def connect(self):
path = self.host.replace("_", "/")
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.sock.connect(path)
class UDSHTTP(httplib.HTTP):
_connection_class = UDSHTTPConnection
class UDSTransport(xmlrpclib.Transport):
def __init__(self, use_datetime=0):
self._use_datetime = use_datetime
self._extra_headers=[]
def add_extra_header(self, key, value):
self._extra_headers += [ (key,value) ]
def make_connection(self, host):
# Python 2.4 compatibility
if sys.version_info[0] <= 2 and sys.version_info[1] < 6:
return UDSHTTP(host)
else:
return UDSHTTPConnection(host)
def send_request(self, connection, handler, request_body):
connection.putrequest("POST", handler)
for key, value in self._extra_headers:
connection.putheader(key, value)
class Session(xmlrpclib.ServerProxy):
"""A server proxy and session manager for communicating with xapi using
the Xen-API.
Example:
session = Session('http://localhost/')
session.login_with_password('me', 'mypassword')
session.xenapi.VM.start(vm_uuid)
session.xenapi.session.logout()
"""
def __init__(self, uri, transport=None, encoding=None, verbose=0,
allow_none=1):
try:
xmlrpclib.ServerProxy.__init__(self, uri, transport, encoding,
verbose, allow_none, context=ssl._create_unverified_context())
except AttributeError:
xmlrpclib.ServerProxy.__init__(self, uri, transport, encoding, verbose, allow_none)
self.transport = transport
self._session = None
self.last_login_method = None
self.last_login_params = None
self.API_version = API_VERSION_1_1
def xenapi_request(self, methodname, params):
if methodname.startswith('login'):
self._login(methodname, params)
return None
elif methodname == 'logout' or methodname == 'session.logout':
self._logout()
return None
else:
retry_count = 0
while retry_count < 3:
full_params = (self._session,) + params
result = _parse_result(getattr(self, methodname)(*full_params))
if result is _RECONNECT_AND_RETRY:
retry_count += 1
if self.last_login_method:
self._login(self.last_login_method,
self.last_login_params)
else:
raise xmlrpclib.Fault(401, 'You must log in')
else:
return result
raise xmlrpclib.Fault(
500, 'Tried 3 times to get a valid session, but failed')
def _login(self, method, params):
result = _parse_result(getattr(self, 'session.%s' % method)(*params))
if result is _RECONNECT_AND_RETRY:
raise xmlrpclib.Fault(
500, 'Received SESSION_INVALID when logging in')
self._session = result
self.last_login_method = method
self.last_login_params = params
self.API_version = self._get_api_version()
def _logout(self):
try:
if self.last_login_method.startswith("slave_local"):
return _parse_result(self.session.local_logout(self._session))
else:
return _parse_result(self.session.logout(self._session))
finally:
self._session = None
self.last_login_method = None
self.last_login_params = None
self.API_version = API_VERSION_1_1
def _get_api_version(self):
pool = self.xenapi.pool.get_all()[0]
host = self.xenapi.pool.get_master(pool)
major = self.xenapi.host.get_API_version_major(host)
minor = self.xenapi.host.get_API_version_minor(host)
return "%s.%s"%(major,minor)
def __getattr__(self, name):
if name == 'handle':
return self._session
elif name == 'xenapi':
return _Dispatcher(self.API_version, self.xenapi_request, None)
elif name.startswith('login') or name.startswith('slave_local'):
return lambda *params: self._login(name, params)
else:
return xmlrpclib.ServerProxy.__getattr__(self, name)
def xapi_local():
return Session("http://_var_xapi_xapi/", transport=UDSTransport())
def _parse_result(result):
if type(result) != dict or 'Status' not in result:
raise xmlrpclib.Fault(500, 'Missing Status in response from server' + result)
if result['Status'] == 'Success':
if 'Value' in result:
return result['Value']
else:
raise xmlrpclib.Fault(500,
'Missing Value in response from server')
else:
if 'ErrorDescription' in result:
if result['ErrorDescription'][0] == 'SESSION_INVALID':
return _RECONNECT_AND_RETRY
else:
raise Failure(result['ErrorDescription'])
else:
raise xmlrpclib.Fault(
500, 'Missing ErrorDescription in response from server')
# Based upon _Method from xmlrpclib.
class _Dispatcher:
def __init__(self, API_version, send, name):
self.__API_version = API_version
self.__send = send
self.__name = name
def __repr__(self):
if self.__name:
return '<XenAPI._Dispatcher for %s>' % self.__name
else:
return '<XenAPI._Dispatcher>'
def __getattr__(self, name):
if self.__name is None:
return _Dispatcher(self.__API_version, self.__send, name)
else:
return _Dispatcher(self.__API_version, self.__send, "%s.%s" % (self.__name, name))
def __call__(self, *args):
return self.__send(self.__name, args)
| mit | -7,467,321,875,946,531,000 | 38.156627 | 101 | 0.628205 | false | 4.263227 | false | false | false |
mohseniaref/PySAR-1 | pysar/pysarApp.py | 1 | 21751 | #! /usr/bin/env python
###############################################################################
#
# Project: PySAR
# Purpose: Python Module for InSAR Time-series Analysis
# Author: Heresh Fattahi
# Created: July 2013
# Modified: Yunjun Zhang, Feb 2015
###############################################################################
# Copyright (c) 2013, Heresh Fattahi
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
import os
import sys
import glob
import time
import _readfile as readfile
import h5py
import subprocess
from pysar._pysar_utilities import check_variable_name
def radar_Or_geo(igramFile):
h5file=h5py.File(igramFile,'r')
igramList=h5file['interferograms'].keys()
if 'X_FIRST' in h5file['interferograms'][igramList[0]].attrs.keys():
rdr_geo='geo'
else:
rdr_geo='radar'
h5file.close()
return rdr_geo
def Usage():
print '''
*******************************************************
*******************************************************
*******************************************************
*******************************************************
********* OOOOO OOOOO O OOOO *********
********* O O O O O O O O O *********
********* OOOOO OOO OOOOO OOOOO OOOO *********
********* O O O O O O O *********
********* O OOO OOOOO O O O O *********
********* *********
*******************************************************
*******************************************************
*******************************************************
*******************************************************
A Python Module for InSAR time-series analysis.
PySAR v1.0 July 2013, InSAR Lab, RSMAS, University of Miami
usage:
pysarApp.py TEMPLATEFILE
example:
pysarApp.py /nethome/hfattahi/SanAndreasT356EnvD.template
pysarApp.py $TE/SanAndreasT356EnvD.template
*******************************************************
Template file options:
pysar.inputdata=/scratch/hfattahi/PROCESS/SanAndreasT356EnvD/DONE/IFG*/filt*0*c10.unw
pysar.CorFiles = /scratch/hfattahi/PROCESS/SanAndreasT356EnvD/DONE/IFG*/filt*0*.cor
pysar.wraped = /scratch/hfattahi/PROCESS/SanAndreasT356EnvD/DONE/IFG*/filt*0*.int
pysar.geomap = /scratch/hfattahi/PROCESS/SanAndreasT356EnvD/GEO/geomap_12/geomap_8rlks.trans
pysar.dem = /scratch/hfattahi/PROCESS/SanAndreasT356EnvD/DONE/IFG_20050102_20070809/radar_8lks.hgt
pysar.subset.yx = 1800:2000,700:800
pysar.seed.ll=31.5, 67 or pysar.seed.yx=257 , 151
pysar.unwrap_error = yes [no]
pysar.tropospheric_delay = yes ['no']
pysar.tropospheric_delay.method = pyaps ['height-correlation']
pysar.Numerical_Weather_Model = ECMWF ['MERRA', 'NARR']
pysar.acquisition_time = 00:00 ['06:00', '12:00', '18:00']
pysar.topo_error = yes [no]
pysar.orbit_error = yes [np]
pysar.orbit_error.method = plane ['quadratic', 'plane', 'quardatic_range', 'quadratic_azimiuth', 'plane_range', 'plane_azimuth','baselineCor','BaseTropCor']
pysar.mask=yes
pysar.mask.threshold = 0.7
pysar.geocode = yes
*******************************************************
'''
#########################################
def main(argv):
try:
templateFile = argv[1]
except:
Usage();sys.exit(1)
projectName = os.path.basename(templateFile.partition('.')[0])
try:
tssarProjectDir = os.getenv('TSSARDIR') +'/'+projectName
except:
tssarProjectDir = os.getenv('SCRATCHDIR') + '/' + projectName + "/TSSAR" # FA 7/2015: adopted for new directory structure
print "QQ " + tssarProjectDir
if not os.path.isdir(tssarProjectDir): os.mkdir(tssarProjectDir)
os.chdir(tssarProjectDir)
igramFile = 'LoadedData.h5'
Modified_igramFile = 'Modified_LoadedData.h5'
if os.path.isfile(Modified_igramFile):
print Modified_igramFile + ' already exists.'
igramFile=Modified_igramFile
template = readfile.read_template(templateFile)
Rlooks = template['Rlooks_unw']
#########################################
# Loading interferograms
#########################################
print '******************************************'
print''
if os.path.isfile(igramFile):
print igramFile + ' already exists.'
else:
loadCmd='load_data.py ' + templateFile
print loadCmd
os.system(loadCmd)
# copyDemCmd='copy_dem_trans.py ' + templateFile
# print copyDemCmd
# os.system(copyDemCmd)
print''
print '******************************************'
#########################################
# Check the subset
#########################################
try:
subset= template['pysar.subset.yx'].split(',')
print subset
print subset[0]
subsetOutName='subset_'+igramFile
subsetCmd='subset.py -f '+ igramFile + ' -y '+subset[0]+' -x '+subset[1] + ' -o ' + subsetOutName
print '*****************************************'
print 'Subset the area ...'
print subsetCmd
os.system(subsetCmd)
igramFile=subsetOutName
print '*****************************************'
except:
print '*****************************************'
print 'No Subset selected. Processing the whole area'
print '*****************************************'
#########################################
#Referencing all interferograms to the same pixel
#########################################
rdr_or_geo=radar_Or_geo(igramFile)
print '******************************************'
print''
if os.path.isfile('Seeded_'+igramFile):
igramFile = 'Seeded_'+igramFile
print igramFile + ' already exists.'
else:
print 'referncing all interferograms to the same pixel.'
if 'pysar.seed.ll' in template.keys():
'Checking the lat/lon refernce point'
lat= template['pysar.seed.ll'].split(',')[0]
lon= template['pysar.seed.ll'].split(',')[1]
seedCmd= 'SeedData.py -f ' + igramFile + ' -l ' +lat+ ' -L '+lon
elif 'pysar.seed.yx' in template.keys():
'Checking y/x reference point'
y= template['pysar.seed.yx'].split(',')[0]
x= template['pysar.seed.yx'].split(',')[1]
seedCmd= 'seed_data.py -f ' + igramFile + ' -y ' +y+ ' -x '+x
else:
seedCmd= 'seed_data.py -f ' + igramFile
igramFile = 'Seeded_'+igramFile
print seedCmd
os.system(seedCmd)
print''
print '******************************************'
############################################
#unwrapping error correction based on the
# consistency of triplets of interferograms
############################################
print '******************************************'
print''
try:
template['pysar.unwrap_error']
if template['pysar.unwrap_error'] in ('y','yes','Yes','YES'):
print 'unwrapping error correction might take a while depending on the size of your data set! '
unwCmd='unwrap_error.py '+igramFile
os.system(unwCmd)
igramFile=igramFile.split('.')[0]+'_unwCor.h5'
else:
print 'No unwrapping error correction.'
except:
print 'No unwrapping error correction.'
print''
print '******************************************'
#########################################
# inversion of interferograms
########################################
print '******************************************'
print''
if os.path.isfile(igramFile.split('.')[0]+'_unwCor.h5'):
igramFile = igramFile.split('.')[0]+'_unwCor.h5'
print igramFile + ' exists.'
if os.path.isfile('timeseries.h5'):
print 'timeseries.h5 already exists, inversion is not needed.'
else:
invertCmd = 'igram_inversion.py '+ igramFile
print invertCmd
os.system(invertCmd)
timeseriesFile='timeseries.h5'
print''
print '******************************************'
##############################################
#temporal coherence:
#A parameter to evaluate the consistency of
# timeseries with the interferograms
##############################################
print '******************************************'
print''
# if os.path.isfile('temporal_coherence.h5'):
# print 'temporal_coherence.h5 already exists.'
# else:
# tempcohCmd='temporal_coherence.py '+igramFile+' '+timeseriesFile
# print tempcohCmd
# os.system(tempcohCmd)
tempcohCmd='temporal_coherence.py '+igramFile+' '+timeseriesFile
print tempcohCmd
os.system(tempcohCmd)
print''
print '******************************************'
##############################################
#update Mask based on temporal coherence
# add by Yunjun Feb 15, 2015
##############################################
print '******************************************'
print''
try:
template['pysar.mask']
if template['pysar.mask'] in ('yes','Yes','YES','y'):
print 'Updating mask according to temporal coherence'
cohT=template['pysar.mask.threshold']
maskCmd='generate_mask.py -f temporal_coherence.h5 -m '+ cohT +' -M 1.0 -o Mask.h5'
print maskCmd
os.system(maskCmd)
else:
print 'No update for mask.'
except:
print 'No update for mask.'
print''
print '******************************************'
##############################################
# Generate incident angle
# add by Yunjun Feb 15, 2015
##############################################
print '******************************************'
print''
inciCmd='incidence_angle.py -f timeseries.h5'
print inciCmd
os.system(inciCmd)
print''
print '******************************************'
##############################################
#If Satellite is Envisat and if Coordinate
#system is radar then LOD correction
##############################################
print '******************************************'
print''
h5file=h5py.File(timeseriesFile,'r')
if rdr_or_geo =='radar':
if h5file['timeseries'].attrs['PLATFORM']=='ENVISAT':
LODcmd='lod.py '+timeseriesFile
print LODcmd
os.system(LODcmd)
timeseriesFile=timeseriesFile.split('.')[0]+'_LODcor.h5'
print''
print '******************************************'
##############################################
# Tropospheric Correction
##############################################
print '******************************************'
print''
try:
if (template['pysar.tropospheric_delay'] in ('y','yes','Yes','YES')) and template['pysar.orbit_error.method']=='BaseTropCor':
print '''
+++++++++++++++++++++++++++++++++++++++++++++++++++
WARNING:
Orbital error correction was BaseTropCor.
Tropospheric correction was already applied simultaneous with baseline error correction.
Tropospheric correction can not be applied again.
To apply the tropospheric correction separate from baseline error correction, chhose other existing options for orbital error correction.
+++++++++++++++++++++++++++++++++++++++++++++++++++
'''
template['pysar.tropospheric_delay']='no'
except:
print 'Checking the tropospheric delay correction ...'
if template['pysar.tropospheric_delay'] in ('y','yes','Yes','YES'):
# demFile='radar_'+Rlooks+'rlks.hgt'
demFile=template['pysar.dem']
demFile=check_variable_name(demFile)
# print 'DEM file: '+demFile
if not os.path.isfile(demFile):
print '++++++++++++++++++++++++++++++++++++++++++++++'
print 'Error:'
print 'DEM (radar_*rlks.hgt file) was not found!'
print 'Continue without tropospheric correction ...'
print '++++++++++++++++++++++++++++++++++++++++++++++'
else:
if template['pysar.tropospheric_delay.method'] in ['height-correlation','height_correlation','Height-Correlation','Height_Correlation']:
print 'tropospheric delay correction with height-correlation approach'
try:
polyOrder=template['pysar.trop.polyOrder']
except:
print 'Deafult polynomial order for troposphreic correction = 1'
polyOrder='1'
cmdTrop='tropospheric_correction.py'+ ' -f '+ timeseriesFile + ' -d '+ demfile + ' -p '+ polyOrder
os.system(cmdTrop)
timeseriesFile=timeseriesFile.split('.')[0]+'_tropCor.h5'
elif template['pysar.tropospheric_delay.method']=='pyaps':
print 'Atmospheric correction using Numerical Weather Models (using PyAPS software)'
print 'reading DEM, source of NWM and acquisition time from template file'
source_of_NWM=template['pysar.Numerical_Weather_Model']
print 'Numerical Weather Model: '+source_of_NWM
acquisition_time=template['pysar.acquisition_time']
print 'acquisition time: '+acquisition_time
# cmdTrop = ["tropcor_pyaps.py -f ",timeseriesFile," -d ",demFile," -s ",source_of_NWM," -h ",acquisition_time," -i incidence_angle.h5"]
cmdTrop = 'tropcor_pyaps.py -f '+timeseriesFile+ ' -d '+ demFile +' -s ' + source_of_NWM + ' -h '+ acquisition_time + ' -i incidence_angle.h5'
print cmdTrop
os.system(cmdTrop)
# subprocess.Popen(cmdTrop).wait()
timeseriesFile=timeseriesFile.split('.')[0]+'_'+source_of_NWM+'.h5'
else:
print 'Atmospheric correction method not recognized.'
else:
print 'No atmospheric delay correction.'
print''
print '******************************************'
##############################################
#topographic residuals
##############################################
print '******************************************'
print''
try:
template['pysar.topo_error']
if template['pysar.topo_error'] in ('yes','Yes','YES','y'):
print 'Correcting topographic residuals'
topoCmd='dem_error.py '+ timeseriesFile +' '+ igramFile
print topoCmd
os.system(topoCmd)
timeseriesFile=timeseriesFile.split('.')[0]+'_demCor.h5'
else:
print 'No correction for topographic residuals.'
except:
print 'No correction for topographic residuals.'
print''
print '******************************************'
##############################################
#Orbit correction
##############################################
print '******************************************'
print''
try:
template['pysar.orbit_error']
if template['pysar.orbit_error'] in ('yes','Yes','YES','y'):
try:
orbit_error_method=template['pysar.orbit_error.method']
print 'orbit error correction method : '+orbit_error_method
if orbit_error_method in ['quadratic', 'plane', 'quardatic_range', 'quadratic_azimiuth', 'plane_range', 'plane_azimuth']:
orbitCmd='remove_plane.py '+timeseriesFile+' '+template['pysar.orbit_error.method'] #+ ' Mask.h5'
timeseriesFile=timeseriesFile.split('.')[0]+'_'+template['pysar.orbit_error.method']+'.h5'
print orbitCmd
os.system(orbitCmd)
elif orbit_error_method == 'baselineCor':
orbitCmd='baseline_error.py ' +timeseriesFile #+ ' Mask.h5'
print orbitCmd
try:
h5file=h5py.File(timeseriesFile,'r')
daz=float(h5file['timeseries'].attrs['AZIMUTH_PIXEL_SIZE'])
os.system(orbitCmd)
timeseriesFile=timeseriesFile.split('.')[0]+'_'+template['pysar.orbit_error.method']+'.h5'
except:
print 'WARNING!'
print 'Skipping orbital error correction.'
print 'baselineCor method can only be applied in radar coordinate'
elif orbit_error_method =='BaseTropCor':
demfile=template['pysar.dem']
demfile=check_variable_name(demfile)
try:
polyOrder=template['pysar.trop.polyOrder']
except:
print 'Deafult polynomial order for troposphreic correction = 1'
polyOrder=1
try:
h5file=h5py.File(timeseriesFile,'r')
daz=float(h5file['timeseries'].attrs['AZIMUTH_PIXEL_SIZE'])
orbitCmd='baseline_trop.py '+timeseriesFile+' '+ demfile +' '+ polyOrder +'range_and_azimuth'
print 'Joint estimation of Baseline error and tropospheric delay [height-correlation approach]'
print orbitCmd
os.system(orbitCmd)
timeseriesFile=timeseriesFile.split('.')[0]+'_'+template['pysar.orbit_error.method']+'.h5'
except:
print 'WARNING!'
print 'Skipping orbital error correction.'
print 'baselineCor method can only be applied in radar coordinate'
else:
print '+++++++++++++++++++++++++++++++++++++++++++++++++++++++'
print 'WARNING!'
print 'Orbital error correction method was not recognized!'
print 'Possible options are:'
print 'quadratic, plane, quardatic_range, quadratic_azimiuth, plane_range, plane_azimuth,baselineCor,BaseTropCor'
print 'Continue without orbital errors correction...'
print '+++++++++++++++++++++++++++++++++++++++++++++++++++++++'
except:
print 'No orbital errors correction.'
else:
print 'No orbital errors correction.'
except:
print 'No orbital errors correction.'
print''
print '******************************************'
#############################################
#Velocity and rmse maps
#############################################
print '******************************************'
print''
velCmd='timeseries2velocity.py '+timeseriesFile
print velCmd
os.system(velCmd)
print''
print '******************************************'
#############################################
#Masking the velocity based on the temporal
#coherence or rmse if it's specified
#############################################
print '******************************************'
print''
try:
template['pysar.mask']
if template['pysar.mask'] in ('yes','Yes','YES','y'):
try:
template['pysar.mask.threshold']
maskCmd='masking.py -f velocity.h5 -m temporal_coherence.h5 -t '+template['pysar.mask.threshold']
print 'Masking the velocity file using the temporal coherence with the threshold of '+template['pysar.mask.threshold']
except:
maskCmd='Masking.py -f velocity.h5 -m temporal_coherence.h5 -t 0.7'
print 'Masking the velocity file using the temporal coherence with the threshold of 0.7'
os.system(maskCmd)
# rmCmd='rm velocity.h5'
# os.system(rmCmd)
# mvCmd='mv velocity_masked.h5 velocity.h5'
# os.system(mvCmd)
else:
print 'No masking applied'
except:
print 'No masking applied'
print''
print '******************************************'
############################################
#Geocoding
############################################
print '******************************************'
print''
try:
template['pysar.geocode']
if template['pysar.geocode'] in ('y','yes','Yes','YES'):
geomapFile='geomap_'+Rlooks+'rlks.trans'
# geoCmd = 'geocode.py '+timeseriesFile+' '+geomapFile
# print geoCmd
# os.system(geoCmd)
geoCmd = 'geocode.py velocity.h5 '+geomapFile
print geoCmd
os.system(geoCmd)
geoCmd = 'geocode.py Mask.h5 '+geomapFile
print geoCmd
os.system(geoCmd)
# maskCmd = 'Masking.py -f geo_'+timeseriesFile+' -m geo_Mask.h5'
# print maskCmd
# os.system(maskCmd)
maskCmd = 'masking.py -f geo_velocity.h5 -m geo_Mask.h5'
print maskCmd
os.system(maskCmd)
else:
print 'No geocoding applied'
except:
print 'No geocoding applied'
print''
print '******************************************'
#############################################
# PySAR v1.0 #
#############################################
print''
print '###############################################'
print ''
print 'End of PySAR processing.'
print ''
print '################################################'
if __name__ == '__main__':
main(sys.argv[:])
| mit | 6,423,405,613,165,199,000 | 37.841071 | 157 | 0.507379 | false | 3.978599 | false | false | false |
qbuat/rootpy | rootpy/tree/model.py | 1 | 5162 | # Copyright 2012 the rootpy developers
# distributed under the terms of the GNU General Public License
from __future__ import absolute_import
import inspect
from cStringIO import StringIO
import types
import ROOT
from .. import log; log = log[__name__]
from .treetypes import Column
from .treebuffer import TreeBuffer
__all__ = [
'TreeModel',
]
class TreeModelMeta(type):
"""
Metaclass for all TreeModels
Addition/subtraction of TreeModels is handled
as set union and difference of class attributes
"""
def __new__(cls, name, bases, dct):
for attr, value in dct.items():
TreeModelMeta.checkattr(attr, value)
return type.__new__(cls, name, bases, dct)
def __add__(cls, other):
return type('_'.join([cls.__name__, other.__name__]),
(cls, other), {})
def __iadd__(cls, other):
return cls.__add__(other)
def __sub__(cls, other):
attrs = dict(set(cls.get_attrs()).difference(set(other.get_attrs())))
return type('_'.join([cls.__name__, other.__name__]),
(TreeModel,), attrs)
def __isub__(cls, other):
return cls.__sub__(other)
def __setattr__(cls, attr, value):
TreeModelMeta.checkattr(attr, value)
type.__setattr__(cls, attr, value)
@classmethod
def checkattr(metacls, attr, value):
"""
Only allow class attributes that are instances of
rootpy.types.Column, ROOT.TObject, or ROOT.ObjectProxy
"""
if not isinstance(value, (
types.MethodType,
types.FunctionType,
classmethod,
staticmethod,
property)):
if attr in dir(type('dummy', (object,), {})) + \
['__metaclass__']:
return
if attr.startswith('_'):
raise SyntaxError(
"TreeModel attribute `{0}` "
"must not start with `_`".format(attr))
if not inspect.isclass(value):
if not isinstance(value, Column):
raise TypeError(
"TreeModel attribute `{0}` "
"must be an instance of "
"`rootpy.tree.treetypes.Column`".format(attr))
return
if not issubclass(value, (ROOT.TObject, ROOT.ObjectProxy)):
raise TypeError(
"TreeModel attribute `{0}` must inherit "
"from `ROOT.TObject` or `ROOT.ObjectProxy`".format(
attr))
def prefix(cls, name):
"""
Create a new TreeModel where class attribute
names are prefixed with ``name``
"""
attrs = dict([(name + attr, value) for attr, value in cls.get_attrs()])
return TreeModelMeta(
'_'.join([name, cls.__name__]),
(TreeModel,), attrs)
def suffix(cls, name):
"""
Create a new TreeModel where class attribute
names are suffixed with ``name``
"""
attrs = dict([(attr + name, value) for attr, value in cls.get_attrs()])
return TreeModelMeta(
'_'.join([cls.__name__, name]),
(TreeModel,), attrs)
def get_attrs(cls):
"""
Get all class attributes ordered by definition
"""
ignore = dir(type('dummy', (object,), {})) + ['__metaclass__']
attrs = [
item for item in inspect.getmembers(cls) if item[0] not in ignore
and not isinstance(
item[1], (
types.FunctionType,
types.MethodType,
classmethod,
staticmethod,
property))]
# sort by idx and use attribute name to break ties
attrs.sort(key=lambda attr: (getattr(attr[1], 'idx', -1), attr[0]))
return attrs
def to_struct(cls, name=None):
"""
Convert the TreeModel into a compiled C struct
"""
if name is None:
name = cls.__name__
basic_attrs = dict([(attr_name, value)
for attr_name, value in cls.get_attrs()
if isinstance(value, Column)])
if not basic_attrs:
return None
src = 'struct {0} {{'.format(name)
for attr_name, value in basic_attrs.items():
src += '{0} {1};'.format(value.type.typename, attr_name)
src += '};'
if ROOT.gROOT.ProcessLine(src) != 0:
return None
return getattr(ROOT, name, None)
def __repr__(cls):
out = StringIO()
for name, value in cls.get_attrs():
print >> out, '{0} -> {1}'.format(name, value)
return out.getvalue()[:-1]
def __str__(cls):
return repr(cls)
class TreeModel(object):
__metaclass__ = TreeModelMeta
def __new__(cls):
"""
Return a TreeBuffer for this TreeModel
"""
treebuffer = TreeBuffer()
for name, attr in cls.get_attrs():
treebuffer[name] = attr()
return treebuffer
| gpl-3.0 | 1,867,577,259,396,475,100 | 31.465409 | 79 | 0.518016 | false | 4.45 | false | false | false |
jolid/script.module.donnie | lib/donnie/furk.py | 1 | 6496 | import urllib2, urllib, sys, os, re, random, copy
from BeautifulSoup import BeautifulSoup, Tag, NavigableString
import xbmc,xbmcplugin,xbmcgui,xbmcaddon
from t0mm0.common.net import Net
from t0mm0.common.addon import Addon
from scrapers import CommonScraper
net = Net()
try:
import json
except:
# pre-frodo and python 2.4
import simplejson as json
''' ###########################################################
Usage and helper functions
############################################################'''
class FurkServiceSracper(CommonScraper):
def __init__(self, settingsid, DB=None, REG=None):
if DB:
self.DB=DB
if REG:
self.REG=REG
self.addon_id = 'script.module.donnie'
self.service='furk'
self.name = 'furk.net'
self.raiseError = False
self.referrer = 'http://www.furk.net/'
self.base_url = 'https://api.furk.net/api/'
self.user_agent = 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3'
self.provides = []
self.settingsid = settingsid
self._loadsettings()
self.settings_addon = self.addon
def _getShows(self, silent=False):
self.log('Do Nothing here')
def _getRecentShows(self, silent=False):
self.log('Do Nothing here')
def _getEpisodes(self, showid, show, url, pDialog, percent, silent):
self.log('Do Nothing here')
def _getMovies(self, silent=False):
self.log('Do Nothing here')
def _getRecentMovies(self, silent):
self.log('Do Nothing here')
def _setKey(self, api_key):
xbmcaddon.Addon(id='script.module.donnie').setSetting('furk-apikey', api_key)
def _getKey(self):
api_key = xbmcaddon.Addon(id='script.module.donnie').getSetting('furk-apikey')
if api_key == '':
return None
return api_key
def cleanQuery(self, query):
self.log('Cleaning furk search string')
cleaned = query
if re.search('\\(\\d\\d\\d\\d\\)$', cleaned):
cleaned = cleaned[0:len(cleaned)-7]
cleaned = cleaned.replace(":", '')
cleaned = cleaned.replace("'", '')
cleaned = cleaned.replace("-", ' ')
cleaned = cleaned.replace("_", ' ')
print cleaned
return cleaned
def _login(self):
api_key = self._getKey()
if api_key:
self.log('Using cached api key')
return api_key
loginurl = "%slogin/login" % self.base_url
login = self.getSetting('furk-username')
password = self.getSetting('furk-password')
post_dict = {"login": login, "pwd": password}
cookiejar = os.path.join(self.cookie_path,'furk.lwp')
try:
response = net.http_POST(loginurl, post_dict).content
data = json.loads(response)
status = data['status']
api_key = data['api_key']
self._setKey(api_key)
self.log("Furk response: %s", response)
if status=="ok":
net.save_cookies(cookiejar)
else:
print 'Furk Account: login failed'
return api_key
except Exception, e:
print '**** Furk Error: %s' % e
pass
def _getStreams(self, episodeid=None, movieid=None):
api_key = self._login()
query = ""
if episodeid:
row = self.DB.query("SELECT rw_shows.showname, season, episode FROM rw_episodes JOIN rw_shows ON rw_shows.showid=rw_episodes.showid WHERE episodeid=?", [episodeid])
name = row[0].replace("'", "")
if re.search('\\(\\d\\d\\d\\d\\)$', row[0]):
name = name[0:len(name)-7]
season = row[1].zfill(2)
episode = row[2].zfill(2)
query = str("%s S%sE%s" % (name, season, episode))
elif movieid:
row = self.DB.query("SELECT movie, year FROM rw_movies WHERE imdb=? LIMIT 1", [movieid])
movie = self.cleanQuery(row[0])
query = "%s %s" %(movie, row[1])
streams = []
url = "%splugins/metasearch" % self.base_url
params = {"type": "video", "filter": "cached", "api_key": api_key, "q": query}
pagedata = net.http_POST(url, params).content
if pagedata=='':
return False
data = json.loads(pagedata)
try:
files = data['files']
for f in files:
if f['type'] == 'video':
raw_url = f['id']
name = f['name']
size = int(f['size']) / (1024 * 1024)
if size > 2000:
size = size / 1024
unit = 'GB'
else :
unit = 'MB'
self.getStreamByPriority('Furk - %s ([COLOR blue]%s %s[/COLOR])' %(name, size, unit), self.service + '://' + raw_url)
except Exception, e:
self.log("********Donnie Error: %s, %s" % (self.service, e))
self.DB.commit()
def getStreamByPriority(self, link, stream):
self.log(link)
host = 'furk.net'
SQL = "INSERT INTO rw_stream_list(stream, url, priority, machineid) " \
"SELECT ?, ?, priority, ? " \
"FROM rw_providers " \
"WHERE mirror=? and provider=?"
self.DB.execute(SQL, [link, stream, self.REG.getSetting('machine-id'), host, self.service])
def _getServicePriority(self, link):
self.log(link)
host = 'furk.net'
row = self.DB.query("SELECT priority FROM rw_providers WHERE mirror=? and provider=?", [host, self.service])
return row[0]
def _resolveStream(self, stream):
raw_url = stream.replace(self.service + '://', '')
resolved_url = ''
t_files = []
t_options = []
sdialog = xbmcgui.Dialog()
api_key = self._getKey()
params = {"type": "video", "id": raw_url, "api_key": api_key, 't_files': 1}
url = "%sfile/get" % self.base_url
pagedata = net.http_POST(url, params).content
if pagedata=='':
return False
#print pagedata
data = json.loads(str(pagedata))
try:
files = data['files'][0]['t_files']
for f in files:
if re.search('^video/', f['ct']):
size = int(f['size']) / (1024 * 1024)
if size > 2000:
size = size / 1024
unit = 'GB'
else :
unit = 'MB'
t_files.append("%s ([COLOR blue]%s %s[/COLOR])" %(f['name'], size, unit))
t_options.append(f['url_dl'])
file_select = sdialog.select('Select Furk Stream', t_files)
if file_select < 0:
return resolved_url
resolved_url = str(t_options[file_select])
except Exception, e:
self.log("********Donnie Error: %s, %s" % (self.service, e))
self.log("Furk retruned: %s", resolved_url, level=0)
return resolved_url
def _resolveIMDB(self, uri): #Often needed if a sites movie index does not include imdb links but the movie page does
imdb = ''
print uri
pagedata = self.getURL(uri, append_base_url=True)
if pagedata=='':
return
imdb = re.search('http://www.imdb.com/title/(.+?)/', pagedata).group(1)
return imdb
def whichHost(self, host): #Sometimes needed
table = { 'Watch Blah' : 'blah.com',
'Watch Blah2' : 'blah2.com',
}
try:
host_url = table[host]
return host_url
except:
return 'Unknown'
| gpl-2.0 | 5,660,529,019,860,364,000 | 28.935484 | 167 | 0.622229 | false | 2.852877 | false | false | false |
twitterdev/twitter-leaderboard | services/migrations/0001_initial.py | 1 | 1248 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-01-28 08:34
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('timezone', models.CharField(blank=True, max_length=100, null=True)),
('curator_auth_token', models.CharField(blank=True, max_length=40, null=True)),
('twitter_id', models.CharField(blank=True, max_length=25, null=True)),
('twitter_access_token', models.CharField(blank=True, max_length=75, null=True)),
('twitter_access_token_secret', models.CharField(blank=True, max_length=75, null=True)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='profile', to=settings.AUTH_USER_MODEL)),
],
),
]
| mit | -8,509,788,362,144,956,000 | 39.258065 | 145 | 0.63141 | false | 3.961905 | false | false | false |
jayrumi/walmart-reviews | setup.py | 1 | 1197 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name = 'walmart-reviews',
version = '1.2.0.dev1',
packages = find_packages(),
requires = ['python (>= 3.5)'],
#install_requires = ['random', 'requests', 'lxml', 'datetime', 'time'],
description = 'Parsing reviews from Walmart.com without using API',
long_description = long_description, #'A package for parsing reviews and all information about reviewers from walmart.com for specific item. For more information read README.rst', #open('README.rst').read(),
author = 'Yauheni Rumiantsau',
author_email = '[email protected]',
url = 'https://github.com/jayrumi/walmart-reviews',
#download_url = '',
license = 'MIT License',
keywords = 'walmart parsing',
classifiers = [
'Intended Audience :: Developers',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
)
| mit | 8,552,747,381,099,799,000 | 34.205882 | 211 | 0.634085 | false | 3.5 | false | false | false |
harrylewis/python-uinames | uinames/models.py | 1 | 1384 | from utils import PropertyUnavailable
class People(object):
"""
A collection of people, represented by the Person class.
"""
def __init__(self, json=None):
self._json = json or {}
self.data = [Person(identity) for identity in self._json]
def __str__(self):
return self.__unicode__()
def __unicode__(self):
return "<People instance: {} Persons>".format(len(self.data))
class Person(object):
"""
A representation of a person identity, generated from the UINames API.
"""
def __init__(self, json=None):
self._json = json or {}
def __getattr__(self, item):
try:
obj = self._json[item]
# determine if string or dict
if isinstance(obj, str) or isinstance(obj, unicode):
return obj.encode("utf-8")
return obj
except KeyError:
raise PropertyUnavailable(
"Property '{}' is does not exist or is not available for this "
"Person.".format(item))
def __str__(self):
return self.__unicode__()
def __unicode__(self):
return "<Person instance: {} {} from {}>".format(self.name,
self.surname,
self.region)
if __name__ == "__main__":
pass
| mit | 8,602,743,123,588,186,000 | 26.68 | 79 | 0.510116 | false | 4.644295 | false | false | false |
digwanderlust/pants | tests/python/pants_test/base/test_payload_field.py | 1 | 10723 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from hashlib import sha1
from pants.backend.jvm.targets.exclude import Exclude
from pants.backend.jvm.targets.jar_dependency import IvyArtifact, JarDependency
from pants.backend.python.python_requirement import PythonRequirement
from pants.base.payload import Payload
from pants.base.payload_field import (ExcludesField, FileField, FingerprintedField,
FingerprintedMixin, JarsField, PrimitiveField,
PythonRequirementsField, SourcesField, TargetListField)
from pants_test.base_test import BaseTest
class PayloadTest(BaseTest):
def test_excludes_field(self):
empty = ExcludesField()
empty_fp = empty.fingerprint()
self.assertEqual(empty_fp, empty.fingerprint())
normal = ExcludesField([Exclude('com', 'foozle'), Exclude('org')])
normal_fp = normal.fingerprint()
self.assertEqual(normal_fp, normal.fingerprint())
normal_dup = ExcludesField([Exclude('com', 'foozle'), Exclude('org')])
self.assertEqual(normal_fp, normal_dup.fingerprint())
self.assertNotEqual(empty_fp, normal_fp)
def test_jars_field_order(self):
jar1 = JarDependency('com', 'foo', '1.0.0')
jar2 = JarDependency('org', 'baz')
self.assertNotEqual(
JarsField([jar1, jar2]).fingerprint(),
JarsField([jar2, jar1]).fingerprint(),
)
def test_jars_field_artifacts(self):
jar1 = JarDependency('com', 'foo', '1.0.0').with_artifact('com', 'baz')
jar2 = JarDependency('com', 'foo', '1.0.0')
self.assertNotEqual(
JarsField([jar1]).fingerprint(),
JarsField([jar2]).fingerprint(),
)
def test_jars_field_artifacts_arg(self):
jar1 = JarDependency('com', 'foo', '1.0.0', artifacts=[IvyArtifact('com', 'baz')])
jar2 = JarDependency('com', 'foo', '1.0.0')
self.assertNotEqual(
JarsField([jar1]).fingerprint(),
JarsField([jar2]).fingerprint(),
)
def test_jars_field_artifacts_arg_vs_method(self):
jar1 = JarDependency('com', 'foo', '1.0.0', artifacts=[IvyArtifact('com', 'baz')])
jar2 = JarDependency('com', 'foo', '1.0.0').with_artifact('com', 'baz')
self.assertEqual(
JarsField([jar1]).fingerprint(),
JarsField([jar2]).fingerprint(),
)
def test_jars_field_artifacts(self):
jar1 = (JarDependency('com', 'foo', '1.0.0')
.with_artifact('com', 'baz')
.with_artifact('org', 'bat'))
jar2 = (JarDependency('com', 'foo', '1.0.0')
.with_artifact('org', 'bat')
.with_artifact('com', 'baz'))
jar3 = (JarDependency('com', 'foo', '1.0.0')
.with_artifact('org', 'bat'))
jar4 = JarDependency('com', 'foo', '1.0.0')
self.assertEqual(
JarsField([jar1]).fingerprint(),
JarsField([jar2]).fingerprint(),
)
self.assertNotEqual(
JarsField([jar1]).fingerprint(),
JarsField([jar3]).fingerprint(),
)
self.assertNotEqual(
JarsField([jar1]).fingerprint(),
JarsField([jar4]).fingerprint(),
)
self.assertNotEqual(
JarsField([jar3]).fingerprint(),
JarsField([jar4]).fingerprint(),
)
def test_jars_field_artifacts_ordering(self):
"""JarDependencies throw away ordering information about their artifacts in the cache key.
But they do not throw it away in their internal representation! In the future, this should be
fixed: either they should sort them as they are added and keep a canonical representation, or
the order information should be preserved.
"""
jar1 = (JarDependency('com', 'foo', '1.0.0')
.with_artifact('com', 'baz')
.with_artifact('org', 'bat'))
jar2 = (JarDependency('com', 'foo', '1.0.0')
.with_artifact('org', 'bat')
.with_artifact('com', 'baz'))
self.assertEqual(
JarsField([jar1]).fingerprint(),
JarsField([jar2]).fingerprint(),
)
def test_deprecated_jars_field_methods(self):
"""with_sources() and with_docs() are now no-ops. This test shows they don't affect
fingerprinting.
"""
jar1 = (JarDependency('com', 'foo', '1.0.0'))
jar2 = (JarDependency('com', 'foo', '1.0.0')
.with_sources()
.with_docs())
self.assertEqual(
JarsField([jar1]).fingerprint(),
JarsField([jar2]).fingerprint(),
)
def test_jars_field_apidocs(self):
"""apidocs are not properly rolled into the cache key right now. Is this intentional?"""
jar1 = JarDependency('com', 'foo', '1.0.0', apidocs='pantsbuild.github.io')
jar2 = JarDependency('com', 'foo', '1.0.0', apidocs='someother.pantsbuild.github.io')
self.assertEqual(
JarsField([jar1]).fingerprint(),
JarsField([jar2]).fingerprint(),
)
def test_python_requirements_field(self):
req1 = PythonRequirement('foo==1.0')
req2 = PythonRequirement('bar==1.0')
self.assertNotEqual(
PythonRequirementsField([req1]).fingerprint(),
PythonRequirementsField([req2]).fingerprint(),
)
def test_python_requirements_field_version_filter(self):
"""version_filter is a lambda and can't be hashed properly.
Since in practice this is only ever used to differentiate between py3k and py2, it should use
a tuple of strings or even just a flag instead.
"""
req1 = PythonRequirement('foo==1.0', version_filter=lambda py, pl: False)
req2 = PythonRequirement('foo==1.0')
self.assertEqual(
PythonRequirementsField([req1]).fingerprint(),
PythonRequirementsField([req2]).fingerprint(),
)
def test_primitive_field(self):
self.assertEqual(
PrimitiveField({'foo': 'bar'}).fingerprint(),
PrimitiveField({'foo': 'bar'}).fingerprint(),
)
self.assertEqual(
PrimitiveField(['foo', 'bar']).fingerprint(),
PrimitiveField(('foo', 'bar')).fingerprint(),
)
self.assertEqual(
PrimitiveField(['foo', 'bar']).fingerprint(),
PrimitiveField(('foo', 'bar')).fingerprint(),
)
self.assertEqual(
PrimitiveField('foo').fingerprint(),
PrimitiveField(b'foo').fingerprint(),
)
self.assertNotEqual(
PrimitiveField('foo').fingerprint(),
PrimitiveField('bar').fingerprint(),
)
def test_excludes_field(self):
self.assertEqual(
ExcludesField([Exclude('com', 'foo')]).fingerprint(),
ExcludesField([Exclude('com', 'foo')]).fingerprint(),
)
self.assertEqual(
ExcludesField([]).fingerprint(),
ExcludesField().fingerprint(),
)
self.assertNotEqual(
ExcludesField([Exclude('com', 'foo')]).fingerprint(),
ExcludesField([Exclude('com')]).fingerprint(),
)
self.assertNotEqual(
ExcludesField([Exclude('com', 'foo'), Exclude('org', 'bar')]).fingerprint(),
ExcludesField([Exclude('org', 'bar'), Exclude('com', 'foo')]).fingerprint(),
)
def test_sources_field(self):
self.create_file('foo/bar/a.txt', 'a_contents')
self.create_file('foo/bar/b.txt', 'b_contents')
self.assertNotEqual(
SourcesField(
sources_rel_path='foo/bar',
sources=['a.txt'],
).fingerprint(),
SourcesField(
sources_rel_path='foo/bar',
sources=['b.txt'],
).fingerprint(),
)
self.assertEqual(
SourcesField(
sources_rel_path='foo/bar',
sources=['a.txt'],
).fingerprint(),
SourcesField(
sources_rel_path='foo/bar',
sources=['a.txt'],
).fingerprint(),
)
self.assertEqual(
SourcesField(
sources_rel_path='foo/bar',
sources=['a.txt'],
).fingerprint(),
SourcesField(
sources_rel_path='foo/bar',
sources=['a.txt'],
).fingerprint(),
)
self.assertEqual(
SourcesField(
sources_rel_path='foo/bar',
sources=['a.txt', 'b.txt'],
).fingerprint(),
SourcesField(
sources_rel_path='foo/bar',
sources=['b.txt', 'a.txt'],
).fingerprint(),
)
fp1 = SourcesField(
sources_rel_path='foo/bar',
sources=['a.txt'],
).fingerprint()
self.create_file('foo/bar/a.txt', 'a_contents_different')
fp2 = SourcesField(
sources_rel_path='foo/bar',
sources=['a.txt'],
).fingerprint()
self.assertNotEqual(fp1, fp2)
def test_fingerprinted_field(self):
class TestValue(FingerprintedMixin):
def __init__(self, test_value):
self.test_value = test_value
def fingerprint(self):
hasher = sha1()
hasher.update(self.test_value)
return hasher.hexdigest()
field1 = TestValue('field1')
field1_same = TestValue('field1')
field2 = TestValue('field2')
self.assertEquals(field1.fingerprint(), field1_same.fingerprint())
self.assertNotEquals(field1.fingerprint(), field2.fingerprint())
fingerprinted_field1 = FingerprintedField(field1)
fingerprinted_field1_same = FingerprintedField(field1_same)
fingerprinted_field2 = FingerprintedField(field2)
self.assertEquals(fingerprinted_field1.fingerprint(), fingerprinted_field1_same.fingerprint())
self.assertNotEquals(fingerprinted_field1.fingerprint(), fingerprinted_field2.fingerprint())
def test_unimplemented_fingerprinted_field(self):
class TestUnimplementedValue(FingerprintedMixin):
pass
with self.assertRaises(NotImplementedError):
FingerprintedField(TestUnimplementedValue()).fingerprint()
def test_file_field(self):
fp1 = FileField(self.create_file('foo/bar.config', contents='blah blah blah')).fingerprint()
fp2 = FileField(self.create_file('foo/bar.config', contents='meow meow meow')).fingerprint()
fp3 = FileField(self.create_file('spam/egg.config', contents='blah blah blah')).fingerprint()
self.assertNotEquals(fp1, fp2)
self.assertNotEquals(fp1, fp3)
self.assertNotEquals(fp2, fp3)
def test_target_list_field(self):
specs = [':t1', ':t2', ':t3']
payloads = [Payload() for i in range(3)]
for i, (s, p) in enumerate(zip(specs, payloads)):
p.add_field('foo', PrimitiveField(i))
self.make_target(s, payload=p)
s1, s2, s3 = specs
context = self.context()
fp1 = TargetListField([s1, s2]).fingerprint_with_context(context)
fp2 = TargetListField([s2, s1]).fingerprint_with_context(context)
fp3 = TargetListField([s1, s3]).fingerprint_with_context(context)
self.assertEquals(fp1, fp2)
self.assertNotEquals(fp1, fp3)
| apache-2.0 | 7,316,399,254,146,659,000 | 32.61442 | 98 | 0.633498 | false | 3.665983 | true | false | false |
pinax/pinax-eventlog | pinax/eventlog/migrations/0001_initial.py | 1 | 1443 | # Generated by Django 3.1 on 2020-08-15 10:08
from django.conf import settings
import django.core.serializers.json
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
from ..compat import JSONField
class Migration(migrations.Migration):
initial = True
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Log',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('timestamp', models.DateTimeField(db_index=True, default=django.utils.timezone.now)),
('action', models.CharField(db_index=True, max_length=50)),
('object_id', models.PositiveIntegerField(blank=True, null=True)),
('extra', JSONField(blank=True, encoder=django.core.serializers.json.DjangoJSONEncoder)),
('content_type', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='contenttypes.contenttype')),
('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['-timestamp'],
},
),
]
| mit | 8,742,902,873,296,838,000 | 38 | 152 | 0.634789 | false | 4.182609 | false | false | false |
NitrousPG/forkbot | server_events.py | 1 | 4992 | # LICENSE
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
"""server message handler"""
# Numerics aggregated from https://www.alien.net.au/irc/irc2numerics.html
class Server_Events:
"""handles events"""
def __init__(self, forkbot):
self.forkbot = forkbot
self.server_msg = ""
self.server_msg_info = ""
self.num_switch = {
#misc
"NOTICE": self.not_implemented,
# start-up
"001": self.rpl_welcome, # welcome information
"002": self.rpl_yourhost, # server information
"004": self.not_implemented, # RPL_MYINFO, TODO
"005": self.not_implemented, # RPL_BOUNCE
# server info
"251": self.not_implemented,
"252": self.not_implemented,
"253": self.not_implemented,
"254": self.not_implemented,
"255": self.not_implemented,
"265": self.not_implemented,
"266": self.not_implemented,
# channel info
"315": self.rpl_synirc_who_end, # end of WHO
"332": self.rpl_topic, # topic of channel
"333": self.not_implemented, # ??
"352": self.rpl_synirc_who, # response to WHO
"353": self.rpl_namreply, # user list
"366": self.rpl_endofnames, # end of user list
# motd
"372": self.motd, # motd start/continuation
"375": self.not_implemented,
"376": self.motd, # end of motd
}
def not_implemented(self):
"""000"""
#placeholder function
pass
def motd(self):
"""372, 376"""
# for right now we do not
# care about the MOTD
pass
def rpl_welcome(self):
"""001"""
self.forkbot.log("welcome recieved")
self.forkbot.hostmask = self.server_msg.split(" ")[-1].replace("\r", "")
self.forkbot.log("hostmask is " + self.forkbot.hostmask)
def rpl_yourhost(self):
"""002"""
pass
def rpl_synirc_who_end(self):
"""find out what this numeric for this is"""
pass
def rpl_topic(self):
"""332"""
channel = self.server_msg_info[-1]
topic = self.server_msg[:-1]
self.forkbot.channel_topics.update({channel: topic})
def rpl_synirc_who(self):
"""who request handler"""
msg_info = self.server_msg_info
host = msg_info[5]
nick = msg_info[7]
self.forkbot.users.hostmask.update({nick: host})
def rpl_namreply(self):
"""353"""
names = self.server_msg.split(" ")[:-1]
channel = self.server_msg_info[-1]
for name in names:
name = "".join([x for x in name if x not in "@+~"])
self.forkbot.users.add_user(channel, name)
for name in names:
op_type = ""
if name.startswith("~"):
op_type = "owner"
elif name.startswith("@"):
op_type = "operator"
elif name.startswith("&"):
op_type = "admin"
elif name.startswith("+"):
op_type = "voice"
if op_type != "":
self.forkbot.users.add_op(channel, "".join(
[x for x in name if x not in "@+&~"]), op_type)
def rpl_endofnames(self):
"""366"""
pass
def process(self, msg_info, msg):
"""processes and delegates the server event to the correct function"""
self.server_msg_info = msg_info
self.server_msg = msg
# find the key given by the server, and
# execute the function that deals with that key
try:
self.num_switch[msg_info[1]]()
except KeyError as ex:
self.forkbot.log(f"Unsupported Numeric: {ex}")
def find_ops(self, nicks):
"""parse the list of nicks given by the server and
register the ops"""
owner, operator, voice, none = [], [], [], []
for nick in nicks:
if nick.startswith("~"):
owner.append(nick[1:])
elif nick.startswith("@"):
operator.append(nick[1:])
elif nick.startswith("+"):
voice.append(nick[1:])
else:
none.append(nick)
ops = {
"owner": owner,
"operator": operator,
"voice": voice,
"none": none
}
return ops
| gpl-3.0 | -7,410,502,775,914,562,000 | 29.625767 | 80 | 0.527244 | false | 3.924528 | false | false | false |
Pragmatismo/TimelapsePi-EasyControl | webcamcap_show_numpy.py | 1 | 8684 | #!/usr/bin/python
import time
import os
import sys
import pygame
import numpy
from PIL import Image, ImageDraw, ImageChops
print("")
print("")
print(" USE l=3 to take a photo every 3 somethings, try a 1000 or 2")
print(" t to take triggered photos ")
print(" cap=/home/pi/folder/ to set caps path other than current dir")
print(" ")
pi_paper = False #updates pi wall paper, use -nopaper to turn it off.
s_val = "10"
c_val = "2"
g_val = "10"
b_val = "15"
x_dim = 1600
y_dim = 896
additonal_commands = "-d/dev/video1 -w"
try:
cappath = os.getcwd()
cappath += "/"
except:
print(" COULD NOT GET CURRENT DIR SET WITH A FLAG ")
cappath = "./"
print(" COULD NOT GET CURRENT DIR SET WITH A FLAG ")
loc_settings = "./camera_settings.txt"
try:
with open(loc_settings, "r") as f:
for line in f:
s_item = line.split("=")
if s_item[0] == "s_val":
s_val = s_item[1].split("\n")[0]
elif s_item[0] == "c_val":
c_val = s_item[1].split("\n")[0]
elif s_item[0] == "g_val":
g_val = s_item[1].split("\n")[0]
elif s_item[0] == "b_val":
b_val = s_item[1].split("\n")[0]
elif s_item[0] == "x_dim":
x_dim = s_item[1].split("\n")[0]
elif s_item[0] == "y_dim":
y_dim = s_item[1].split("\n")[0]
elif s_item[0] == "additonal_commands":
additonal_commands = s_item[1].split("\n")[0]
except:
print("No config file for camera, using default")
print("Run cam_config.py to create one")
def photo():
# take and save photo
timenow = time.time()
timenow = str(timenow)[0:10]
filename= "cap_"+str(timenow)+".jpg"
#os.system("uvccapture "+additonal_commands+" -S"+s_val+" -C" + c_val + " -G"+ g_val +" -B"+ b_val +" -x"+str(x_dim)+" -y"+str(y_dim)+" -v -t0 -o"+cappath+filename)
cmd = str("uvccapture "+additonal_commands+" -x"+str(x_dim)+" -y"+str(y_dim)+" -v -t0 -o"+cappath+filename)
print("####")
print("####")
print cmd
print("####")
print("####")
os.system(cmd)
print("Image taken and saved to "+cappath+filename)
if pi_paper == True:
os.system("export DISPLAY=:0 && pcmanfm --set-wallpaper "+cappath+filename)
return filename
if 'wp' in sys.argv or 'wallpaper' in sys.argv:
pi_paper = True
print(" Going to try changing wall paper")
loop = False
trig = False
for argu in sys.argv[1:]:
try:
thearg = str(argu).split('=')[0]
except:
thearg = str(argu)
if thearg == 'cap' or thearg =='cappath':
cappath = str(argu).split('=')[1]
elif thearg == 'l' or thearg == 'looped':
try:
num = int(str(argu).split('=')[1])
except:
print("No speed supplied, taking every 10")
num = 10
loop = True
elif thearg == 't' or thearg == 'TRIGGERED':
trig = True
print(" Saving files to, " + str(cappath))
pygame.init()
display_width = x_dim
display_height = y_dim
gameDisplay = pygame.display.set_mode((display_width,display_height))
pygame.display.set_caption('Most recent image')
black = (0,0,0)
white = (255,255,255)
clock = pygame.time.Clock()
crashed = False
import matplotlib.pyplot as plt
def show_pic(imgtaken, x=0,y=0):
gameDisplay.blit(imgtaken, (x,y))
gameDisplay.fill(white)
c_photo = photo()
pil_c_photo = Image.open(c_photo)
numpy_pic = numpy.array(pil_c_photo)
b_photo = photo()
pil_b_photo = Image.open(b_photo)
numpy_pic_b = numpy.array(pil_b_photo)
mask = numpy_pic_b > numpy_pic + 30 #the +30 gets rid of noise
mask2 = numpy_pic_b < numpy_pic - 30
lol = mask + mask2
e_pic = numpy_pic.copy()
num = 0
while not crashed:
for event in pygame.event.get():
if event.type == pygame.QUIT:
crashed = True
timenow = time.time()
e_photo = str(timenow).split(".")[0]
e_photo= "numpy_"+str(timenow)+".jpg"
num = num + 1
b_photo = c_photo
c_photo = photo()
numpy_pic_b = numpy_pic.copy()
pil_c_photo = Image.open(c_photo)
numpy_pic = numpy.array(pil_c_photo)
print numpy_pic.size
#print len(numpy_pic[3])
print "###"
#print numpy_pic[1:,1,1]
#a = np.arange(100)
print "##########"
#numpy_pic[1:500, range(0, len(numpy_pic[2]), 10), 1] = 0
#for x in numpy_pic[1:500, range(0, len(numpy_pic[2])), 1]:
# if x >= 100:
# x = 255
#for x in range(10,170,10):
# mask = numpy_pic < x
# numpy_pic[mask] = 255-x #numpy_pic[mask] + numpy_pic[mask]
#for x in range(200,255,5):
# mask = numpy_pic > x
# numpy_pic[mask] = 0+(x/10) # numpy_pic[mask] / numpy_pic[mask]+(numpy_pic[mask]/numpy_pic[mask])
#print numpy_pic[1:,1,1]
#print numpy_pic.min()
print "###"
#print numpy_pic.shape #Array dimensions
#print numpy_pic.ndim #Number of array dimensions
#print numpy_pic.dtype #Data type of array elements
#print numpy_pic.dtype.name #Name of data type
#print numpy_pic.mean()
#print numpy_pic.max()
#print numpy_pic.min()
#print numpy.info(numpy.ndarray.dtype)
#print numpy_pic.astype(int)
#mask = numpy_pic > numpy_pic_b
#mask = numpy_pic[:, :, 2] > 150
#numpy_pic[mask] = [0, 0, 255]
#lol = numpy_pic +
#mask = numpy_pic_b > numpy_pic + 30 #the +30 gets rid of noise
#mask2 = numpy_pic_b < numpy_pic - 30
margin = 20
maskr = numpy_pic[:, :, 0] < numpy_pic_b[:, :, 0] - margin
maskg = numpy_pic[:, :, 1] < numpy_pic_b[:, :, 1] - margin
maskb = numpy_pic[:, :, 2] < numpy_pic_b[:, :, 2] - margin
maskr2 = numpy_pic[:, :, 0] > numpy_pic_b[:, :, 0] + margin
maskg2 = numpy_pic[:, :, 1] > numpy_pic_b[:, :, 1] + margin
maskb2 = numpy_pic[:, :, 2] > numpy_pic_b[:, :, 2] + margin
#numpy_pic[mask] = [0, 0, 255]
#lol_old = lol
#lol = mask + mask2
#lol = lol + lol_old
persist = 'ohhh'
if persist == 'True':
numpy_pic[maskr] = [255, 0, 0]
numpy_pic[maskg] = [0, 255, 0]
numpy_pic[maskb] = [0, 0, 255]
numpy_pic[maskb2] = [0, 0, 100]
numpy_pic[maskr2] = [100, 0, 0]
numpy_pic[maskg2] = [0, 100, 0]
Image.fromarray(numpy_pic).save(e_photo)
elif persist == 'False':
old_e = e_pic
e_pic = numpy_pic.copy()
e_pic[maskr] = [255, 0, 0]
e_pic[maskg] = [0, 255, 0]
e_pic[maskb] = [0, 0, 255]
e_pic[maskr2] = [100, 0, 0]
e_pic[maskg2] = [0, 100, 0]
e_pic[maskb2] = [0, 0, 100]
show1 = 'waa'
if show1 == '1':
e_pic = ((e_pic/4) - (numpy_pic))*3
e_pic = e_pic / 3 + old_e / 2
elif show1 == 'tripsy':
e_pic = ((e_pic/4) - (numpy_pic))*3
e_pic = e_pic - old_e / 2
elif show1 == 'waa':
e_pic = ((e_pic/4) - (numpy_pic))*3
#e_pic = old_e * 0.8 + e_pic * 0.2
Image.fromarray(e_pic).save(e_photo)
elif persist == 'ohhh':
old_e = e_pic.copy()
mask_b_pic = numpy_pic.copy()
mask_d_pic = numpy_pic.copy()
mask_b_pic[maskr] = [255, 255, 255]
mask_b_pic[maskg] = [255, 255, 255]
mask_b_pic[maskb] = [255, 255, 255]
mask_d_pic[maskr2] = [0, 0, 0]
mask_d_pic[maskg2] = [0, 0, 0]
mask_d_pic[maskb2] = [0, 0, 0]
#e_pic = e_pic/6 + old_e
e_pic = [200, 200, 0]
#e_pic = e_pic/2 - ((mask_d_pic) + (mask_b_pic))
#e_pic = e_pic/2 + ((mask_d_pic) + (mask_b_pic))
#choose one of the following
#e_pic = mask_d_pic #shows when pixel is darker than it was
#e_pic = mask_b_pic #shows when pixel is lighter than prior
e_pic = mask_d_pic - mask_b_pic #black execpt for movement
e_pic = mask_b_pic / (mask_d_pic / 100) #black execpt for movement
#e_pic = mask_d_pic + mask_b_pic #looks odd
Image.fromarray(e_pic).save(e_photo)
#plt.imshow(lol)
#plt.show()
#Image.fromarray(numpy_pic).save(e_photo)
onscreen = pygame.image.load(e_photo)
gameDisplay.blit(onscreen, (0,0))
pygame.display.update()
if trig == True:
print("Waiting for input before taking next image...")
tp = raw_input("press return to take picture; ")
if tp == "q":
print("---bye!")
exit()
clock.tick(20)
if loop == True:
pygame.time.wait(num)
clock.tick(20)
elif trig == False and loop == False:
crashed = True
#while True:
#pygame.time.wait(1000)
#clock.tick(20)
pygame.quit()
quit()
| gpl-2.0 | -2,106,105,906,145,419,300 | 30.23741 | 168 | 0.539843 | false | 2.838836 | false | false | false |
mbj4668/pyang | pyang/xpath.py | 1 | 12087 | from . import xpath_lexer
from . import xpath_parser
from .error import err_add
from .util import prefix_to_module, search_data_node, data_node_up
from .syntax import re_identifier
core_functions = {
'last': ([], 'number'),
'position': ([], 'number'),
'count': (['node-set'], 'number'),
'id': (['object'], 'node-set'),
'local-name': (['node-set', '?'], 'string'),
'namespace-uri': (['node-set', '?'], 'string'),
'name': (['node-set', '?'], 'string'),
'string': (['object'], 'string'),
'concat': (['string', 'string', '*'], 'string'),
'starts-with': (['string', 'string'], 'boolean'),
'contains': (['string', 'string'], 'boolean'),
'substring-before': (['string', 'string'], 'string'),
'substring-after': (['string', 'string'], 'string'),
'substring': (['string', 'number', 'number', '?'], 'string'),
'string-length': (['string', '?'], 'number'),
'normalize-space': (['string', '?'], 'string'),
'translate': (['string', 'string', 'string'], 'string'),
'boolean': (['object'], 'boolean'),
'not': (['boolean'], 'boolean'),
'true': ([], 'boolean'),
'false': ([], 'boolean'),
'lang': (['string'], 'boolean'),
'number': (['object'], 'number'),
'sum': (['node-set'], 'number'),
'floor': (['number'], 'number'),
'ceiling': (['number'], 'number'),
'round': (['number'], 'number'),
}
yang_xpath_functions = {
'current': ([], 'node-set')
}
yang_1_1_xpath_functions = {
'bit-is-set': (['node-set', 'string'], 'boolean'),
'enum-value': (['string'], 'number'),
'deref': (['node-set'], 'node-set'),
'derived-from': (['node-set', 'qstring'], 'boolean'),
'derived-from-or-self': (['node-set', 'qstring'], 'boolean'),
're-match': (['string', 'string'], 'boolean'),
}
extra_xpath_functions = {
'deref': (['node-set'], 'node-set'), # pyang extension for 1.0
}
def add_extra_xpath_function(name, input_params, output_param):
extra_xpath_functions[name] = (input_params, output_param)
def add_prefix(prefix, s):
"Add `prefix` to all unprefixed names in `s`"
# tokenize the XPath expression
toks = xpath_lexer.scan(s)
# add default prefix to unprefixed names
toks2 = [_add_prefix(prefix, tok) for tok in toks]
# build a string of the patched expression
ls = [x.value for x in toks2]
return ''.join(ls)
def _add_prefix(prefix, tok):
if tok.type == 'name':
m = xpath_lexer.re_ncname.match(tok.value)
if m.group(2) is None:
tok.value = prefix + ':' + tok.value
return tok
## TODO: validate must/when after deviate
# node is the initial context node or None if it is not known
def v_xpath(ctx, stmt, node):
try:
if hasattr(stmt, 'i_xpath') and stmt.i_xpath is not None:
q = stmt.i_xpath
else:
q = xpath_parser.parse(stmt.arg)
stmt.i_xpath = q
chk_xpath_expr(ctx, stmt.i_orig_module, stmt.pos, node, node, q, None)
except xpath_lexer.XPathError as e:
err_add(ctx.errors, stmt.pos, 'XPATH_SYNTAX_ERROR', e.msg)
stmt.i_xpath = None
except SyntaxError as e:
err_add(ctx.errors, stmt.pos, 'XPATH_SYNTAX_ERROR', e.msg)
stmt.i_xpath = None
# mod is the (sub)module where the stmt is defined, which we use to
# resolve prefixes.
def chk_xpath_expr(ctx, mod, pos, initial, node, q, t):
if isinstance(q, list):
chk_xpath_path(ctx, mod, pos, initial, node, q)
elif isinstance(q, tuple):
if q[0] == 'absolute':
chk_xpath_path(ctx, mod, pos, initial, 'root', q[1])
elif q[0] == 'relative':
chk_xpath_path(ctx, mod, pos, initial, node, q[1])
elif q[0] == 'union':
for qa in q[1]:
chk_xpath_path(ctx, mod, pos, initial, node, qa)
elif q[0] == 'comp':
chk_xpath_expr(ctx, mod, pos, initial, node, q[2], None)
chk_xpath_expr(ctx, mod, pos, initial, node, q[3], None)
elif q[0] == 'arith':
chk_xpath_expr(ctx, mod, pos, initial, node, q[2], None)
chk_xpath_expr(ctx, mod, pos, initial, node, q[3], None)
elif q[0] == 'bool':
chk_xpath_expr(ctx, mod, pos, initial, node, q[2], None)
chk_xpath_expr(ctx, mod, pos, initial, node, q[3], None)
elif q[0] == 'negative':
chk_xpath_expr(ctx, mod, pos, initial, node, q[1], None)
elif q[0] == 'function_call':
chk_xpath_function(ctx, mod, pos, initial, node, q[1], q[2])
elif q[0] == 'path_expr':
chk_xpath_expr(ctx, mod, pos, initial, node, q[1], t)
elif q[0] == 'path': # q[1] == 'filter'
chk_xpath_expr(ctx, mod, pos, initial, node, q[2], None)
chk_xpath_expr(ctx, mod, pos, initial, node, q[3], None)
elif q[0] == 'var':
# NOTE: check if the variable is known; currently we don't
# have any variables in YANG xpath expressions
err_add(ctx.errors, pos, 'XPATH_VARIABLE', q[1])
elif q[0] == 'literal':
# kind of hack to detect qnames, and mark the prefixes
# as being used in order to avoid warnings.
s = q[1]
if s[0] == s[-1] and s[0] in ("'", '"'):
s = s[1:-1]
i = s.find(':')
# make sure there is just one : present
# FIXME: more colons should possibly be reported, instead
if i != -1 and s.find(':', i + 1) == -1:
prefix = s[:i]
tag = s[i + 1:]
if (re_identifier.search(prefix) is not None and
re_identifier.search(tag) is not None):
# we don't want to report an error; just mark the
# prefix as being used.
my_errors = []
prefix_to_module(mod, prefix, pos, my_errors)
for pos0, code, arg in my_errors:
if code == 'PREFIX_NOT_DEFINED' and t == 'qstring':
# we know for sure that this is an error
err_add(ctx.errors, pos0,
'PREFIX_NOT_DEFINED', arg)
else:
# this may or may not be an error;
# report a warning
err_add(ctx.errors, pos0,
'WPREFIX_NOT_DEFINED', arg)
def chk_xpath_function(ctx, mod, pos, initial, node, func, args):
signature = None
if func in core_functions:
signature = core_functions[func]
elif func in yang_xpath_functions:
signature = yang_xpath_functions[func]
elif mod.i_version != '1' and func in yang_1_1_xpath_functions:
signature = yang_1_1_xpath_functions[func]
elif ctx.strict and func in extra_xpath_functions:
err_add(ctx.errors, pos, 'STRICT_XPATH_FUNCTION', func)
return None
elif not ctx.strict and func in extra_xpath_functions:
signature = extra_xpath_functions[func]
if signature is None:
err_add(ctx.errors, pos, 'XPATH_FUNCTION', func)
return None
# check that the number of arguments are correct
nexp = len(signature[0])
nargs = len(args)
if nexp == 0:
if nargs != 0:
err_add(ctx.errors, pos, 'XPATH_FUNC_ARGS',
(func, nexp, nargs))
elif signature[0][-1] == '?':
if nargs != (nexp - 1) and nargs != (nexp - 2):
err_add(ctx.errors, pos, 'XPATH_FUNC_ARGS',
(func, "%s-%s" % (nexp - 2, nexp - 1), nargs))
elif signature[0][-1] == '*':
if nargs < (nexp - 1):
err_add(ctx.errors, pos, 'XPATH_FUNC_ARGS',
(func, "at least %s" % (nexp - 1), nargs))
elif nexp != nargs:
err_add(ctx.errors, pos, 'XPATH_FUNC_ARGS',
(func, nexp, nargs))
# FIXME implement checks from check_function()
# check the arguments - FIXME check type
i = 0
args_signature = signature[0][:]
for arg in args:
chk_xpath_expr(ctx, mod, pos, initial, node, arg, args_signature[i])
if args_signature[i] == '*':
args_signature.append('*')
i = i + 1
return signature[1]
def chk_xpath_path(ctx, mod, pos, initial, node, path):
if len(path) == 0:
return
head = path[0]
if head[0] == 'var':
# check if the variable is known as a node-set
# currently we don't have any variables, so this fails
err_add(ctx.errors, pos, 'XPATH_VARIABLE', head[1])
elif head[0] == 'function_call':
func = head[1]
args = head[2]
rettype = chk_xpath_function(ctx, mod, pos, initial, node, func, args)
if rettype is not None:
# known function, check that it returns a node set
if rettype != 'node-set':
err_add(ctx.errors, pos, 'XPATH_NODE_SET_FUNC', func)
if func == 'current':
chk_xpath_path(ctx, mod, pos, initial, initial, path[1:])
elif head[0] == 'step':
axis = head[1]
nodetest = head[2]
preds = head[3]
node1 = None
if axis == 'self':
pass
elif axis == 'child' and nodetest[0] == 'name':
prefix = nodetest[1]
name = nodetest[2]
if prefix is None:
if initial is None:
pmodule = None
elif initial.keyword == 'module':
pmodule = initial
else:
pmodule = initial.i_module
else:
pmodule = prefix_to_module(mod, prefix, pos, ctx.errors)
# if node and initial are None, it means we're checking an XPath
# expression when it is defined in a grouping or augment, i.e.,
# when the full tree is not expanded. in this case we can't check
# the paths
if pmodule is not None and node is not None and initial is not None:
if node == 'root':
children = pmodule.i_children
else:
children = getattr(node, 'i_children', None) or []
child = search_data_node(children, pmodule.i_modulename, name)
if child is None and node == 'root':
err_add(ctx.errors, pos, 'XPATH_NODE_NOT_FOUND2',
(pmodule.i_modulename, name, pmodule.arg))
elif child is None and node.i_module is not None:
err_add(ctx.errors, pos, 'XPATH_NODE_NOT_FOUND1',
(pmodule.i_modulename, name,
node.i_module.i_modulename, node.arg))
elif child is None:
err_add(ctx.errors, pos, 'XPATH_NODE_NOT_FOUND2',
(pmodule.i_modulename, name, node.arg))
elif (getattr(initial, 'i_config', None) is True
and getattr(child, 'i_config', None) is False):
err_add(ctx.errors, pos, 'XPATH_REF_CONFIG_FALSE',
(pmodule.i_modulename, name))
else:
node1 = child
elif axis == 'parent' and nodetest == ('node_type', 'node'):
if node is None:
pass
elif node == 'root':
err_add(ctx.errors, pos, 'XPATH_PATH_TOO_MANY_UP', ())
else:
p = data_node_up(node)
if p is None:
err_add(ctx.errors, pos, 'XPATH_PATH_TOO_MANY_UP', ())
else:
node1 = p
else:
# we can't validate the steps on other axis, but we can validate
# functions etc.
pass
for p in preds:
chk_xpath_expr(ctx, mod, pos, initial, node1, p, None)
chk_xpath_path(ctx, mod, pos, initial, node1, path[1:])
| isc | -2,654,624,637,892,220,000 | 41.410526 | 80 | 0.514933 | false | 3.653869 | false | false | false |
simone-campagna/zapper | lib/python/zapper/lock_file.py | 1 | 1913 | #!/usr/bin/env python3
#
# Copyright 2013 Simone Campagna
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
__author__ = 'Simone Campagna'
import os
import time
import errno
import fcntl
import contextlib
@contextlib.contextmanager
def Lock(filename, mode="r", blocking=True, timeout=10):
# enter
lock_op = fcntl.LOCK_EX
if not blocking:
lock_op += fcntl.LOCK_NB
count = 0
interval = 0.1
if timeout is not None:
count = int(round(timeout/interval, 0))
if count <= 0:
count = 1
with open(filename, mode) as f:
for i in range(count):
try:
#fcntl.fcntl(self.fileno(), lock_op, os.O_NDELAY)
fcntl.lockf(f.fileno(), lock_op)
except IOError as e:
if e.errno in (errno.EACCES, errno.EAGAIN):
if timeout:
time.sleep(interval)
continue
except:
import traceback
traceback.print_exc()
time.sleep(interval)
yield f
#exit
fcntl.lockf(f.fileno(), fcntl.LOCK_UN)
if __name__ == "__main__":
import sys
with Lock('a.lock', 'a') as f_out:
for arg in sys.argv:
f_out.write(arg + '\n')
f_out.flush()
print("sleeping...")
time.sleep(10)
print("done.")
f_out.write("finito!\n")
| apache-2.0 | 7,631,584,786,992,049,000 | 27.132353 | 74 | 0.590173 | false | 3.833667 | false | false | false |
LinguList/server | app/settings/sound_classes/asjp/asjp.py | 1 | 3293 | #! /usr/bin/env python
from __future__ import division,print_function
from lingpy.data.derive import compile_model
from scipy.spatial.distance import squareform
from time import sleep
from pickle import dump
asjp = {}
score = open('score','r').read()
score = score.split('\n')
del score[-1]
dicto = {}
for line in score:
lin = line.split('\t')
dicto[lin[0]] = lin[1:]
letters = []
for i in range(len(score)):
score[i] = score[i].split('\t')
letters.append(score[i][0])
del score[i][0]
matrix = []
for i in range(len(score)):
for l in letters:
if i < len(dicto[l]):
matrix.append(float(dicto[l][i]))
matrix = squareform(matrix)
consonants = ['p'] + letters
consonant_matrix = matrix.copy()
score = open('vows_score','r').read()
score = score.split('\n')
del score[-1]
dicto = {}
for line in score:
lin = line.split('\t')
dicto[lin[0]] = lin[1:]
letters = []
for i in range(len(score)):
score[i] = score[i].split('\t')
letters.append(score[i][0])
del score[i][0]
matrix = []
for i in range(len(score)):
for l in letters:
if i < len(dicto[l]):
matrix.append(float(dicto[l][i]))
matrix = squareform(matrix)
vowel_matrix = matrix.copy()
vowels = ['i'] + letters
for i in range(len(vowel_matrix)):
vowel_matrix[i][i] = 40
for i in range(len(consonant_matrix)):
consonant_matrix[i][i] = 40
for i in range(31):
for j in range(31):
asjp[consonants[i],consonants[j]] = consonant_matrix[i][j]
for i in range(7):
for j in range(7):
asjp[vowels[i],vowels[j]] = vowel_matrix[i][j]
for l in vowels:
asjp[l,'X'] = 0
asjp['X',l] = 0
for l in consonants:
asjp[l,'X'] = 0
asjp['X',l] = 0
asjp['X','X'] = 0
for v in vowels:
for c in consonants:
asjp[v,c] = -20
asjp[c,v] = -20
for key in asjp.keys():
if asjp[key] == 0:
asjp[key] = 0
else:
asjp[key] = int(asjp[key]+0.5)
for v1 in vowels:
for v2 in vowels:
asjp[v1,v2] = int(asjp[v1,v2] * 0.25 + 0.5) + 10
asjp['i','y'] = -2
asjp['y','i'] = -2
asjp['u','w'] = -2
asjp['w','u'] = -2
asjp['u','v'] = -4
asjp['v','u'] = -4
asjp['u','f'] = -6
asjp['f','u'] = -6
keys = []
for keyA,keyB in asjp.keys():
keys.append((keyA,keyB))
for keyA,keyB in keys:
asjp[keyA,'+'] = -20
asjp['+',keyB] = -20
asjp[keyA,'0'] = 0
asjp['0',keyB] = 0
asjp['X','+'] = -5
asjp['+','X'] = -5
asjp['+','+'] = 0 # swaps
asjp['0','0'] = 0 # missing values
asjp['X','0'] = 0
asjp['0','X'] = 0
for i in '0123456':
for j in '0123456':
if i == j:
asjp[i,j] = 10
else:
asjp[i,j] = 5
keys = []
for keyA,keyB in asjp.keys():
keys.append((keyA,keyB))
for keyA,keyB in keys:
for i in '123456':
if keyA not in '123456' and keyB not in '123456':
asjp[keyA,i] = -20
asjp[i,keyB] = -20
asjp[keyA,'_'] = -50
asjp['_',keyB] = -50
asjp['_','_'] = 0
for x in asjp.keys():
asjp[x] = asjp[x] / 4.0
if asjp[x] > 0 and asjp[x] != 10:
asjp[x] += 0.75 * asjp[x]
elif asjp[x] < 0:
asjp[x] += 0.75 * asjp[x]
out = open('scorer.bin','wb')
dump(asjp,out)
out.close()
compile_model('asjp')
print("[i] Compilation of the ASJP model was successful!")
sleep(1)
| gpl-2.0 | 3,162,807,456,613,274,600 | 19.710692 | 66 | 0.54631 | false | 2.485283 | false | false | false |
wevoice/wesub | dev_settings_test.py | 1 | 2112 | # Amara, universalsubtitles.org
#
# Copyright (C) 2013 Participatory Culture Foundation
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see
# http://www.gnu.org/licenses/agpl-3.0.html.
from dev_settings import *
INSTALLED_APPS += (
'django_nose',
)
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
}
}
CACHE_PREFIX = "testcache"
CACHE_TIMEOUT = 60
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
NOSE_PLUGINS = ['utils.test_utils.plugin.UnisubsTestPlugin']
CELERY_ALWAYS_EAGER = True
YOUTUBE_CLIENT_ID = 'test-youtube-id'
YOUTUBE_CLIENT_SECRET = 'test-youtube-secret'
YOUTUBE_API_KEY = 'test-youtube-api-key'
API_ALWAYS_USE_FUTURE = True
# Use MD5 password hashing, other algorithms are purposefully slow to increase
# security. Also include the SHA1 hasher since some of the tests use it.
PASSWORD_HASHERS = (
'django.contrib.auth.hashers.MD5PasswordHasher',
'django.contrib.auth.hashers.SHA1PasswordHasher',
)
# Let the nose CaptureLogging plugin handle logging. It doesn't display
# logging at all, except if there's a test failure.
del LOGGING
NOSE_ARGS = ['--logging-filter=test_steps, -remote_connection, '
'-selenium.webdriver.remote.remote_connection',
'--with-xunit', '--logging-level=ERROR',
'--xunit-file=nosetests.xml',
]
try:
from dev_settings_test_local import *
except ImportError:
pass
| agpl-3.0 | -7,629,318,859,687,858,000 | 30.058824 | 78 | 0.69697 | false | 3.622642 | true | false | false |
robotican/ric | ric_board/scripts/RiCConfigurator/GUI/SimulationWindow.py | 1 | 5700 | import GUI.MainWindow
__author__ = 'tom'
from PyQt4.QtGui import *
from GUI.Schemes.gazeboGui import Ui_gazebo_gui
from BAL.Interface.DeviceFrame import SERVO, BATTERY, SWITCH, IMU, PPM, GPS, RELAY, URF, CLOSE_LOP_ONE, CLOSE_LOP_TWO, \
OPEN_LOP, DIFF_CLOSE, DIFF_OPEN, EX_DEV, HOKUYO, OPRNNI, USBCAM, DIFF_CLOSE_FOUR, ROBOT_MODEL, SLAM, Keyboard, \
JOYSTICK, SMOOTHER
import rospkg
import pickle
from PyQt4.QtCore import Qt
from lxml.etree import Element, SubElement
class SimulationWindow(QDialog, Ui_gazebo_gui):
def __init__(self, parent=None):
super(SimulationWindow, self).__init__(parent)
self.setupUi(self)
self._devs = []
self.loadButton.clicked.connect(self.loadEvent)
self.launchButton.clicked.connect(self.launchEvent)
self.devList.itemClicked.connect(self.listChangeEvent)
self.loadFile()
self.showSimDetail()
def listChangeEvent(self, item):
dev = self._devs[self.devList.row(item)]
if item.checkState() > 0:
dev[1] = True
else:
dev[1] = False
def loadFile(self):
self._devs = []
pkg = rospkg.RosPack().get_path('ric_board')
fileName = QFileDialog.getOpenFileName(self, self.tr("Open file"), "%s/DATA" % pkg, self.tr("RiC File (*.RIC)"))
if fileName == '': return
devices = pickle.load(open(fileName))[2]
self.arrangeDevices(devices)
def arrangeDevices(self, devices):
for dev in devices:
if dev['type'] in [DIFF_CLOSE, IMU, OPRNNI, HOKUYO, USBCAM, URF]:
self._devs.append([dev, True])
def showSimDetail(self):
for dev in self._devs:
if dev[0]['type'] == OPRNNI:
listItem = QListWidgetItem('OpenniCamera')
else:
listItem = QListWidgetItem(dev[0]['name'])
listItem.setCheckState(Qt.Checked)
self.devList.addItem(listItem)
def clearLst(self):
size = self.devList.count()
for i in xrange(size):
self.devList.takeItem(0)
def loadEvent(self):
self.loadFile()
self.clearLst()
self.showSimDetail()
def launchEvent(self):
root = Element('launch')
SubElement(root, 'arg', {
'name': 'paused',
'default': 'false'
})
SubElement(root, 'arg', {
'name': 'use_sim_time',
'default': 'true'
})
SubElement(root, 'arg', {
'name': 'gui',
'default': 'true'
})
SubElement(root, 'arg', {
'name': 'headless',
'default': 'false'
})
SubElement(root, 'arg', {
'name': 'debug',
'default': 'false'
})
world = SubElement(root, 'include', dict(file='$(find gazebo_ros)/launch/empty_world.launch'))
SubElement(world, 'arg', {
'name': 'debug',
'value': '$(arg debug)'
})
SubElement(world, 'arg', {
'name': 'gui',
'value': '$(arg gui)'
})
SubElement(world, 'arg', {
'name': 'paused',
'value': '$(arg paused)'
})
SubElement(world, 'arg', {
'name': 'use_sim_time',
'value': '$(arg use_sim_time)'
})
SubElement(world, 'arg', {
'name': 'headless',
'value': '$(arg headless)'
})
SubElement(root, 'param', {
'name': 'robot_description',
'command': "$(find xacro)/xacro.py '$(find ric_gazebo)/robots/komodo/komodo.xacro' ns:='init' color_name:='Grey'"
})
haveCam = 'false'
haveOpenNi = 'false'
haveLaser = 'false'
haveUrf = 'false'
haveDiff = 'false'
haveImu = 'false'
for dev in self._devs:
if dev[1]:
if dev[0]['type'] == DIFF_CLOSE: haveDiff = 'true'
if dev[0]['type'] == IMU: haveImu = 'true'
if dev[0]['type'] == OPRNNI: haveOpenNi = 'true'
if dev[0]['type'] == HOKUYO: haveLaser = 'true'
if dev[0]['type'] == USBCAM: haveCam = 'true'
if dev[0]['type'] == URF: haveUrf = 'true'
amount = self.numberOfRobotsSpinBox.value()
for i in xrange(amount):
robotFile = SubElement(root, 'include', {'file': '$(find ric_gazebo)/launch/spawn_komodo.launch'})
SubElement(robotFile, 'arg', dict(name='name', value='komodo_%d' % (i + 1)))
SubElement(robotFile, 'arg', dict(name='color', value='White'))
SubElement(robotFile, 'arg', dict(name='x', value='0.0'))
SubElement(robotFile, 'arg', dict(name='y', value='%d.0' % i))
SubElement(robotFile, 'arg', dict(name='z', value='0.1'))
SubElement(robotFile, 'arg', dict(name='R', value='0.0'))
SubElement(robotFile, 'arg', dict(name='P', value='0.0'))
SubElement(robotFile, 'arg', dict(name='Y', value='0.0'))
SubElement(robotFile, 'arg', dict(name='arm_camera', value='true'))
SubElement(robotFile, 'arg', dict(name='front_camera', value=haveCam))
SubElement(robotFile, 'arg', dict(name='isDiff', value=haveDiff))
SubElement(robotFile, 'arg', dict(name='depth_camera', value=haveOpenNi))
SubElement(robotFile, 'arg', dict(name='laser_scanner', value=haveLaser))
SubElement(robotFile, 'arg', dict(name='urf', value=haveUrf))
SubElement(robotFile, 'arg', dict(name='imu', value=haveImu))
open('/home/tom/test.launch', 'w').write(GUI.MainWindow.prettify(root))
| bsd-3-clause | -2,762,979,084,064,864,000 | 35.538462 | 126 | 0.542632 | false | 3.490508 | false | false | false |
andrey-yemelyanov/competitive-programming | cp-book/ch1/adhoc/chess/10849_MoveTheBishop.py | 1 | 1456 | # Problem name: 10849 Move the bishop
# Problem url: https://uva.onlinejudge.org/external/108/10849.pdf
# Author: Andrey Yemelyanov
import sys
import math
WHITE, BLACK = 0, 1
INFINITY = -1
def main():
n_test_cases = int(sys.stdin.readline().strip())
for i in range(n_test_cases):
sys.stdin.readline()
n_tests = int(sys.stdin.readline().strip())
N = int(sys.stdin.readline().strip())
for j in range(n_tests):
from_row, from_col, to_row, to_col = [int(t) for t in sys.stdin.readline().split()]
n_moves = count_bishop_moves(from_row, from_col, to_row, to_col)
if n_moves == INFINITY:
print("no move")
else:
print(n_moves)
def count_bishop_moves(from_row, from_col, to_row, to_col):
if from_row == to_row and from_col == to_col:
return 0;
elif square_color(from_row, from_col) != square_color(to_row, to_col):
return INFINITY
elif on_the_same_diagonal(from_row, from_col, to_row, to_col):
return 1
else:
return 2
def on_the_same_diagonal(row1, col1, row2, col2):
return abs(row1 - row2) == abs(col1 - col2)
def square_color(row, col):
if row % 2 == 0:
if col % 2 == 0:
return WHITE
else:
return BLACK
else:
if col % 2 == 0:
return BLACK
else:
return WHITE
if __name__=="__main__":
main()
| mit | -4,838,296,833,532,812,000 | 27 | 95 | 0.56044 | false | 3.137931 | false | false | false |
zfrxiaxia/Code-zfr | 计蒜客/比赛/2015/light.py | 1 | 1148 | # -*- coding: utf-8 -*-
"""
Created on Sat May 28 19:17:42 2016
@author: AtoZ
"""
N,M = raw_input().split()
N,M = int(N),int(M)
while(1):
i0 = 0
while(i0<2**(M*N)):
bit = [0]*M*N
t_bit = bin(i0)
lt_bit = len(t_bit)
for j0 in range(lt_bit-2):
bit[j0] = int(t_bit[j0+2])
i0 +=1
print bit
"""
N,M = raw_input().split()
N,M = int(N),int(M)
L0 = []
L = []
time = 0
def change(h,l):
global time
time += 1
if h == N-1:
pass
else:
L[h*M+l] = not L[h*M+l]
L[(h+1)*M+l] = not L[(h+1)*M+l]
if l != 0:
L[(h+1)*M+l-1] = not L[(h+1)*M+l-1]
if l != M-1:
L[(h+1)*M+l+1] = not L[(h+1)*M+l+1]
if h != N-2:
L[(h+2)*M+l] = not L[(h+2)*M+l]
while(1):
try:
temp = raw_input()
L0.append(temp.split())
except:
break
for i in range(N):
for j in range(M):
L.append(int(L0[i][j]))
LL = [1]*N*M
j = 0
while(j<N):
for i in range(M):
ii = i+j*M
if L[ii]==0:
change(j,i)
j += 1
if L==LL:
print time
else:
print "no solution"
""" | gpl-3.0 | 7,432,343,685,599,265,000 | 16.953125 | 47 | 0.419861 | false | 2.314516 | false | false | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.