code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
import math
from readability.exceptions import ReadabilityException
class Result:
def __init__(self, score, grade_levels, ages):
self.score = score
self.grade_levels = grade_levels
self.ages = ages
def __str__(self):
return "score: {}, grade_levels: {}, ages: {}". \
format(self.score, self.grade_levels, self.ages)
class ARI:
def __init__(self, stats):
self._stats = stats
if stats.num_words < 100:
raise ReadabilityException('100 words required.')
def score(self):
score = self._score()
return Result(
score=score,
grade_levels=self._grade_levels(score),
ages=self._ages(score))
def _score(self):
s = self._stats
letters_per_word = s.num_letters / s.num_words
words_per_sent = s.num_words / s.num_sentences
return 4.71 * letters_per_word + 0.5 * words_per_sent - 21.43
def _grade_levels(self, score):
score = math.ceil(score)
if score <= 1:
return ['K']
elif score <= 2:
return ['1', '2']
elif score <= 3:
return ['3']
elif score <= 4:
return ['4']
elif score <= 5:
return ['5']
elif score <= 6:
return ['6']
elif score <= 7:
return ['7']
elif score <= 8:
return ['8']
elif score <= 9:
return ['9']
elif score <= 10:
return ['10']
elif score <= 11:
return ['11']
elif score <= 12:
return ['12']
elif score <= 13:
return ['college']
else:
return ['college_graduate']
def _ages(self, score):
score = math.ceil(score)
if score <= 1:
return [5, 6]
elif score <= 2:
return [6, 7]
elif score <= 3:
return [7, 9]
elif score <= 4:
return [9, 10]
elif score <= 5:
return [10, 11]
elif score <= 6:
return [11, 12]
elif score <= 7:
return [12, 13]
elif score <= 8:
return [13, 14]
elif score <= 9:
return [14, 15]
elif score <= 10:
return [15, 16]
elif score <= 11:
return [16, 17]
elif score <= 12:
return [17, 18]
elif score <= 13:
return [18, 24]
else:
return [24, 100]
|
cdimascio/py-readability-metrics
|
readability/scorers/ari.py
|
Python
|
mit
| 2,533
|
"""
__graph_MT_pre__PythonRef.py___________________________________________________________
Automatically generated graphical appearance ---> MODIFY DIRECTLY WITH CAUTION
_______________________________________________________________________________
"""
import tkFont
from graphEntity import *
from GraphicalForm import *
from ATOM3Constraint import *
class graph_MT_pre__PythonRef(graphEntity):
def __init__(self, x, y, semObject = None):
self.semanticObject = semObject
self.sizeX, self.sizeY = 173, 91
graphEntity.__init__(self, x, y)
self.ChangesAtRunTime = 0
self.constraintList = []
if self.semanticObject: atribs = self.semanticObject.attributesToDraw()
else: atribs = None
self.graphForms = []
self.imageDict = self.getImageDict()
def DrawObject(self, drawing, showGG = 0):
self.dc = drawing
if showGG and self.semanticObject: self.drawGGLabel(drawing)
h = drawing.create_oval(self.translate([209.0, 88.0, 209.0, 88.0]), tags = (self.tag, 'connector'), outline = '', fill = '' )
self.connectors.append( h )
h = drawing.create_rectangle(self.translate([38.0, 38.0, 209.0, 127.0]), tags = self.tag, stipple = '', width = 1, outline = 'black', fill = 'cyan')
self.gf5 = GraphicalForm(drawing, h, "gf5")
self.graphForms.append(self.gf5)
font = tkFont.Font( family='Arial', size=12, weight='normal', slant='roman', underline=0)
h = drawing.create_text(self.translate([105.0, 60.0, 105.0, 12.0])[:2], tags = self.tag, font=font, fill = 'black', anchor = 'center', text = 'MT_pre__PythonRef', width = '0', justify= 'left', stipple='' )
self.gf29 = GraphicalForm(drawing, h, 'gf29', fontObject=font)
self.graphForms.append(self.gf29)
helv12 = tkFont.Font ( family="Helvetica", size=12, weight="bold" )
h = drawing.create_text(self.translate([-3, -3]), font=helv12,
tags = (self.tag, self.semanticObject.getClass()),
fill = "black",
text=self.semanticObject.MT_label__.toString())
self.attr_display["MT_label__"] = h
self.gf_label = GraphicalForm(drawing, h, 'gf_label', fontObject=helv12)
self.graphForms.append(self.gf_label)
def postCondition( self, actionID, * params):
return None
def preCondition( self, actionID, * params):
return None
def getImageDict( self ):
imageDict = dict()
return imageDict
new_class = graph_MT_pre__PythonRef
|
levilucio/SyVOLT
|
UMLRT2Kiltera_MM/graph_MT_pre__PythonRef.py
|
Python
|
mit
| 2,620
|
"""
TriggersManager manages the instances of all the triggers in Linger
"""
from future.utils import itervalues
from LingerManagers.LingerBaseManager import LingerBaseManager
class TriggersManager(LingerBaseManager):
"""TriggersManager loads possible linger triggers,
and manages instances of them according to configuration"""
def __init__(self, configuration):
super(TriggersManager, self).__init__(configuration['dir_paths']['Triggers'])
self.configuration = configuration
self.manager_type = "Triggers"
def start(self):
"""
Start all the triggers
"""
for trigger_item in itervalues(self.plugin_instances):
if trigger_item.enabled is True:
trigger_item.start()
def stop(self):
"""
Stops all the triggers
"""
for trigger_item in itervalues(self.plugin_instances):
trigger_item.stop()
def set_enabled(self, trigger_uuid):
"""
Sets a given trigger as enabled
"""
self.plugin_instances[trigger_uuid].enabled = True
def create_trigger(self, configuration):
"""
Creates a trigger from configuration
"""
configuration_to_send = configuration.copy()
configuration_to_send.update(self.configuration)
trigger_instance = self.loaded_plugins_by_types[configuration["subtype"]].get_instance(configuration_to_send)
self.plugin_instances[configuration["uuid"]] = trigger_instance
def register_action_to_trigger(self, trigger_uuid, action):
"""
Registers an action to be associated with a trigger
"""
self.plugin_instances[trigger_uuid].register_action(action)
def get_trigger_labels_of_actions(self, trigger_uuid):
"""
Returns a labels list of all actions associated with a trigger
"""
labels = [action.label for action in self.plugin_instances[trigger_uuid].actions.values()]
return labels
|
GreenBlast/Linger
|
LingerManagers/TriggersManager.py
|
Python
|
mit
| 2,019
|
import boto
from boto.mturk.connection import MTurkConnection
from boto.mturk.question import ExternalQuestion
from connection import connect
import urllib
import argparse
import ConfigParser
import sys, os
import time
import pandas as pd
parser = argparse.ArgumentParser()
parser.add_argument('answers_file', nargs=1, type=argparse.FileType('r'), default=sys.stdin, help="File or stdin containing documents paths")
parser.add_argument('config_file', type=str, help="Config file containing parameters to spin the batch")
args = parser.parse_args()
config = ConfigParser.ConfigParser()
config.read(args.config_file)
mtc = connect(config.get('default', 'target'))
answers_file = pd.read_csv(args.answers_file[0], sep='\t')
for assignmentId, answer in zip(answers_file['assignmentId'], answers_file[config.get('default', 'answer_field')]):
while True:
try:
# Note: Python 2.x users should use raw_input, the equivalent of 3.x's input
print "Answer: ", answer
response = raw_input("Reject assignment (y/n)?")
except ValueError:
print("Sorry, I didn't understand that.")
#better try again... Return to the start of the loop
continue
else:
if response == 'y' or response == 'Y':
print "Rejected"
mtc.reject_assignment(assignmentId)
break
elif response == 'n' or response == 'N':
print "Not rejected"
break
else:
continue
|
arunchaganty/kbp-online
|
turkApi/reject_assignments.py
|
Python
|
mit
| 1,557
|
# -*- coding: utf-8 -*-
import datetime
from django.core.files.base import ContentFile
from django.core.files.storage import default_storage
try:
from django.utils import lorem_ipsum
except ImportError:
# Support Django < 1.8
from django.contrib.webdesign import lorem_ipsum
import os
import random
import re
import string
import sys
from decimal import Decimal
if sys.version_info[0] < 3:
str_ = unicode
else:
str_ = str
# backporting os.path.relpath, only availabe in python >= 2.6
try:
relpath = os.path.relpath
except AttributeError:
def relpath(path, start=os.curdir):
"""Return a relative version of a path"""
if not path:
raise ValueError("no path specified")
start_list = os.path.abspath(start).split(os.path.sep)
path_list = os.path.abspath(path).split(os.path.sep)
# Work out how much of the filepath is shared by start and path.
i = len(os.path.commonprefix([start_list, path_list]))
rel_list = [os.path.pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list:
return os.curdir
return os.path.join(*rel_list)
class Generator(object):
coerce_type = staticmethod(lambda x: x)
empty_value = None
empty_p = 0
def __init__(self, empty_p=None, coerce=None):
if empty_p is not None:
self.empty_p = empty_p
if coerce:
self.coerce_type = coerce
def coerce(self, value):
return self.coerce_type(value)
def generate(self):
raise NotImplementedError
def get_value(self):
if random.random() < self.empty_p:
return self.empty_value
value = self.generate()
return self.coerce(value)
def __call__(self):
return self.get_value()
class StaticGenerator(Generator):
def __init__(self, value, *args, **kwargs):
self.value = value
super(StaticGenerator, self).__init__(*args, **kwargs)
def generate(self):
return self.value
class CallableGenerator(Generator):
def __init__(self, value, args=None, kwargs=None, *xargs, **xkwargs):
self.value = value
self.args = args or ()
self.kwargs = kwargs or {}
super(CallableGenerator, self).__init__(*xargs, **xkwargs)
def generate(self):
return self.value(*self.args, **self.kwargs)
class NoneGenerator(Generator):
def generate(self):
return self.empty_value
class StringGenerator(Generator):
coerce_type = str_
singleline_chars = string.ascii_letters + u' '
multiline_chars = singleline_chars + u'\n'
def __init__(self, chars=None, multiline=False, min_length=1, max_length=1000, *args, **kwargs):
assert min_length >= 0
assert max_length >= 0
self.min_length = min_length
self.max_length = max_length
if chars is None:
if multiline:
self.chars = self.multiline_chars
else:
self.chars = self.singleline_chars
else:
self.chars = chars
super(StringGenerator, self).__init__(*args, **kwargs)
def generate(self):
length = random.randint(self.min_length, self.max_length)
value = u''
for x in range(length):
value += random.choice(self.chars)
return value
class SlugGenerator(StringGenerator):
def __init__(self, chars=None, *args, **kwargs):
if chars is None:
chars = string.ascii_lowercase + string.digits + '-'
super(SlugGenerator, self).__init__(chars, multiline=False, *args, **kwargs)
class LoremGenerator(Generator):
coerce_type = str_
common = True
count = 3
method = 'b'
def __init__(self, count=None, method=None, common=None, max_length=None, *args, **kwargs):
if count is not None:
self.count = count
if method is not None:
self.method = method
if common is not None:
self.common = common
self.max_length = max_length
super(LoremGenerator, self).__init__(*args, **kwargs)
def generate(self):
if self.method == 'w':
lorem = lorem_ipsum.words(self.count, common=self.common)
elif self.method == 's':
lorem = u' '.join([
lorem_ipsum.sentence()
for i in range(self.count)])
else:
paras = lorem_ipsum.paragraphs(self.count, common=self.common)
if self.method == 'p':
paras = ['<p>%s</p>' % p for p in paras]
lorem = u'\n\n'.join(paras)
if self.max_length:
length = random.randint(round(int(self.max_length) / 10),
self.max_length)
lorem = lorem[:max(1, length)]
return lorem.strip()
class LoremSentenceGenerator(LoremGenerator):
method = 's'
class LoremHTMLGenerator(LoremGenerator):
method = 'p'
class LoremWordGenerator(LoremGenerator):
count = 7
method = 'w'
class IntegerGenerator(Generator):
coerce_type = int
min_value = - 10 ** 5
max_value = 10 ** 5
def __init__(self, min_value=None, max_value=None, *args, **kwargs):
if min_value is not None:
self.min_value = min_value
if max_value is not None:
self.max_value = max_value
super(IntegerGenerator, self).__init__(*args, **kwargs)
def generate(self):
value = random.randint(self.min_value, self.max_value)
return value
class SmallIntegerGenerator(IntegerGenerator):
min_value = -2 ** 7
max_value = 2 ** 7 - 1
class PositiveIntegerGenerator(IntegerGenerator):
min_value = 0
class PositiveSmallIntegerGenerator(SmallIntegerGenerator):
min_value = 0
class FloatGenerator(IntegerGenerator):
coerce_type = float
decimal_digits = 1
def __init__(self, decimal_digits=None, *args, **kwargs):
if decimal_digits is not None:
self.decimal_digits = decimal_digits
super(FloatGenerator, self).__init__(*args, **kwargs)
def generate(self):
value = super(FloatGenerator, self).generate()
value = float(value)
if self.decimal_digits:
digits = random.randint(1, 10 ^ self.decimal_digits) - 1
digits = float(digits)
value = value + digits / (10 ^ self.decimal_digits)
return value
class ChoicesGenerator(Generator):
def __init__(self, choices=(), values=(), *args, **kwargs):
assert len(choices) or len(values)
self.choices = list(choices)
if not values:
self.values = [k for k, v in self.choices]
else:
self.values = list(values)
super(ChoicesGenerator, self).__init__(*args, **kwargs)
def generate(self):
return random.choice(self.values)
class BooleanGenerator(ChoicesGenerator):
def __init__(self, none=False, *args, **kwargs):
values = (True, False)
if none:
values = values + (None,)
super(BooleanGenerator, self).__init__(values=values, *args, **kwargs)
class NullBooleanGenerator(BooleanGenerator):
def __init__(self, none=True, *args, **kwargs):
super(NullBooleanGenerator, self).__init__(none=none, *args, **kwargs)
class DateTimeGenerator(Generator):
def __init__(self, min_date=None, max_date=None, *args, **kwargs):
from django.utils import timezone
if min_date is not None:
self.min_date = min_date
else:
self.min_date = timezone.now() - datetime.timedelta(365 * 5)
if max_date is not None:
self.max_date = max_date
else:
self.max_date = timezone.now() + datetime.timedelta(365 * 1)
assert self.min_date < self.max_date
super(DateTimeGenerator, self).__init__(*args, **kwargs)
def generate(self):
diff = self.max_date - self.min_date
seconds = random.randint(0, diff.days * 3600 * 24 + diff.seconds)
return self.min_date + datetime.timedelta(seconds=seconds)
class DateGenerator(Generator):
min_date = datetime.date.today() - datetime.timedelta(365 * 5)
max_date = datetime.date.today() + datetime.timedelta(365 * 1)
def __init__(self, min_date=None, max_date=None, *args, **kwargs):
if min_date is not None:
self.min_date = min_date
if max_date is not None:
self.max_date = max_date
assert self.min_date < self.max_date
super(DateGenerator, self).__init__(*args, **kwargs)
def generate(self):
diff = self.max_date - self.min_date
days = random.randint(0, diff.days)
date = self.min_date + datetime.timedelta(days=days)
return date
return datetime.date(date.year, date.month, date.day)
class DecimalGenerator(Generator):
coerce_type = Decimal
max_digits = 24
decimal_places = 10
def __init__(self, max_digits=None, decimal_places=None, *args, **kwargs):
if max_digits is not None:
self.max_digits = max_digits
if decimal_places is not None:
self.decimal_places = decimal_places
super(DecimalGenerator, self).__init__(*args, **kwargs)
def generate(self):
maxint = 10 ** self.max_digits - 1
value = (
Decimal(random.randint(-maxint, maxint)) /
10 ** self.decimal_places)
return value
class PositiveDecimalGenerator(DecimalGenerator):
def generate(self):
maxint = 10 ** self.max_digits - 1
value = (
Decimal(random.randint(0, maxint)) /
10 ** self.decimal_places)
return value
class FirstNameGenerator(Generator):
""" Generates a first name, either male or female """
male = [
'Abraham', 'Adam', 'Anthony', 'Brian', 'Bill', 'Ben', 'Calvin',
'David', 'Daniel', 'George', 'Henry', 'Isaac', 'Ian', 'Jonathan',
'Jeremy', 'Jacob', 'John', 'Jerry', 'Joseph', 'James', 'Larry',
'Michael', 'Mark', 'Paul', 'Peter', 'Phillip', 'Stephen', 'Tony',
'Titus', 'Trevor', 'Timothy', 'Victor', 'Vincent', 'Winston', 'Walt']
female = [
'Abbie', 'Anna', 'Alice', 'Beth', 'Carrie', 'Christina', 'Danielle',
'Emma', 'Emily', 'Esther', 'Felicia', 'Grace', 'Gloria', 'Helen',
'Irene', 'Joanne', 'Joyce', 'Jessica', 'Kathy', 'Katie', 'Kelly',
'Linda', 'Lydia', 'Mandy', 'Mary', 'Olivia', 'Priscilla',
'Rebecca', 'Rachel', 'Susan', 'Sarah', 'Stacey', 'Vivian']
def __init__(self, gender=None):
self.gender = gender
self.all = self.male + self.female
def generate(self):
if self.gender == 'm':
return random.choice(self.male)
elif self.gender == 'f':
return random.choice(self.female)
else:
return random.choice(self.all)
class LastNameGenerator(Generator):
""" Generates a last name """
surname = [
'Smith', 'Walker', 'Conroy', 'Stevens', 'Jones', 'Armstrong',
'Johnson', 'White', 'Stone', 'Strong', 'Olson', 'Lee', 'Forrest',
'Baker', 'Portman', 'Davis', 'Clark', 'Brown', 'Roberts', 'Ellis',
'Jackson', 'Marshall', 'Wang', 'Chen', 'Chou', 'Tang', 'Huang', 'Liu',
'Shih', 'Su', 'Song', 'Yang', 'Chan', 'Tsai', 'Wong', 'Hsu', 'Cheng',
'Chang', 'Wu', 'Lin', 'Yu', 'Yao', 'Kang', 'Park', 'Kim', 'Choi',
'Ahn', 'Mujuni']
def generate(self):
return random.choice(self.surname)
class EmailGenerator(StringGenerator):
chars = string.ascii_lowercase
def __init__(self, chars=None, max_length=30, tlds=None, static_domain=None, *args, **kwargs):
assert max_length >= 6
if chars is not None:
self.chars = chars
self.tlds = tlds
self.static_domain = static_domain
super(EmailGenerator, self).__init__(self.chars, max_length=max_length, *args, **kwargs)
def generate(self):
maxl = self.max_length - 2
if self.static_domain is None:
if self.tlds:
tld = random.choice(self.tlds)
elif maxl > 4:
tld = StringGenerator(self.chars, min_length=3, max_length=3).generate()
maxl -= len(tld)
assert maxl >= 2
else:
maxl -= len(self.static_domain)
name = StringGenerator(self.chars, min_length=1, max_length=maxl-1).generate()
maxl -= len(name)
if self.static_domain is None:
domain = StringGenerator(self.chars, min_length=1, max_length=maxl).generate()
return '%s@%s.%s' % (name, domain, tld)
else:
return '%s@%s' % (name, self.static_domain)
class URLGenerator(StringGenerator):
chars = string.ascii_lowercase
protocol = 'http'
tlds = ()
def __init__(self, chars=None, max_length=30, protocol=None, tlds=None,
*args, **kwargs):
if chars is not None:
self.chars = chars
if protocol is not None:
self.protocol = protocol
if tlds is not None:
self.tlds = tlds
assert max_length > (
len(self.protocol) + len('://') +
1 + len('.') +
max([2] + [len(tld) for tld in self.tlds if tld]))
super(URLGenerator, self).__init__(
chars=self.chars, max_length=max_length, *args, **kwargs)
def generate(self):
maxl = self.max_length - len(self.protocol) - 4 # len(://) + len(.)
if self.tlds:
tld = random.choice(self.tlds)
maxl -= len(tld)
else:
tld_max_length = 3 if maxl >= 5 else 2
tld = StringGenerator(self.chars,
min_length=2, max_length=tld_max_length).generate()
maxl -= len(tld)
domain = StringGenerator(chars=self.chars, max_length=maxl).generate()
return u'%s://%s.%s' % (self.protocol, domain, tld)
class IPAddressGenerator(Generator):
coerce_type = str_
def generate(self):
return '.'.join([str_(part) for part in [
IntegerGenerator(min_value=1, max_value=254).generate(),
IntegerGenerator(min_value=0, max_value=254).generate(),
IntegerGenerator(min_value=0, max_value=254).generate(),
IntegerGenerator(min_value=1, max_value=254).generate(),
]])
class TimeGenerator(Generator):
coerce_type = str_
def generate(self):
return u'%02d:%02d:%02d' % (
random.randint(0,23),
random.randint(0,59),
random.randint(0,59),
)
class FilePathGenerator(Generator):
coerce_type = str_
def __init__(self, path, match=None, recursive=False, max_length=None, *args, **kwargs):
self.path = path
self.match = match
self.recursive = recursive
self.max_length = max_length
super(FilePathGenerator, self).__init__(*args, **kwargs)
def generate(self):
filenames = []
if self.match:
match_re = re.compile(self.match)
if self.recursive:
for root, dirs, files in os.walk(self.path):
for f in files:
if self.match is None or self.match_re.search(f):
f = os.path.join(root, f)
filenames.append(f)
else:
try:
for f in os.listdir(self.path):
full_file = os.path.join(self.path, f)
if os.path.isfile(full_file) and \
(self.match is None or match_re.search(f)):
filenames.append(full_file)
except OSError:
pass
if self.max_length:
filenames = [fn for fn in filenames if len(fn) <= self.max_length]
return random.choice(filenames)
class MediaFilePathGenerator(FilePathGenerator):
'''
Generates a valid filename of an existing file from a subdirectory of
``settings.MEDIA_ROOT``. The returned filename is relative to
``MEDIA_ROOT``.
'''
def __init__(self, path='', *args, **kwargs):
from django.conf import settings
path = os.path.join(settings.MEDIA_ROOT, path)
super(MediaFilePathGenerator, self).__init__(path, *args, **kwargs)
def generate(self):
from django.conf import settings
filename = super(MediaFilePathGenerator, self).generate()
filename = relpath(filename, settings.MEDIA_ROOT)
return filename
class InstanceGenerator(Generator):
'''
Naive support for ``limit_choices_to``. It assignes specified value to
field for dict items that have one of the following form::
fieldname: value
fieldname__exact: value
fieldname__iexact: value
'''
def __init__(self, autofixture, limit_choices_to=None, *args, **kwargs):
self.autofixture = autofixture
limit_choices_to = limit_choices_to or {}
for lookup, value in limit_choices_to.items():
bits = lookup.split('__')
if len(bits) == 1 or \
len(bits) == 2 and bits[1] in ('exact', 'iexact'):
self.autofixture.add_field_value(bits[0], StaticGenerator(value))
super(InstanceGenerator, self).__init__(*args, **kwargs)
def generate(self):
return self.autofixture.create()[0]
class MultipleInstanceGenerator(InstanceGenerator):
empty_value = []
def __init__(self, *args, **kwargs):
self.min_count = kwargs.pop('min_count', 1)
self.max_count = kwargs.pop('max_count', 10)
super(MultipleInstanceGenerator, self).__init__(*args, **kwargs)
def generate(self):
instances = []
for i in range(random.randint(self.min_count, self.max_count)):
instances.append(
super(MultipleInstanceGenerator, self).generate())
return instances
class InstanceSelector(Generator):
'''
Select one or more instances from a queryset.
'''
empty_value = []
def __init__(self, queryset, min_count=None, max_count=None, fallback=None,
limit_choices_to=None, *args, **kwargs):
from django.db.models.query import QuerySet
if not isinstance(queryset, QuerySet):
queryset = queryset._default_manager.all()
limit_choices_to = limit_choices_to or {}
self.queryset = queryset.filter(**limit_choices_to)
self.fallback = fallback
self.min_count = min_count
self.max_count = max_count
super(InstanceSelector, self).__init__(*args, **kwargs)
def generate(self):
if self.max_count is None:
try:
return self.queryset.order_by('?')[0]
except IndexError:
return self.fallback
else:
min_count = self.min_count or 0
count = random.randint(min_count, self.max_count)
return self.queryset.order_by('?')[:count]
class WeightedGenerator(Generator):
"""
Takes a list of generator objects and integer weights, of the following form:
[(generator, weight), (generator, weight),...]
and returns a value from a generator chosen randomly by weight.
"""
def __init__(self, choices):
self.choices = choices
def weighted_choice(self, choices):
total = sum(w for c, w in choices)
r = random.uniform(0, total)
upto = 0
for c, w in choices:
if upto + w > r:
return c
upto += w
def generate(self):
return self.weighted_choice(self.choices).generate()
class ImageGenerator(Generator):
'''
Generates a valid palceholder image and saves it to the ``settings.MEDIA_ROOT``
The returned filename is relative to ``MEDIA_ROOT``.
'''
default_sizes = (
(100,100),
(200,300),
(400,600),
)
def __init__(self, width=None, height=None, sizes=None,
path='_autofixture', storage=None, *args, **kwargs):
self.width = width
self.height = height
self.sizes = list(sizes or self.default_sizes)
if self.width and self.height:
self.sizes.append((width, height))
self.path = path
self.storage = storage or default_storage
super(ImageGenerator, self).__init__(*args, **kwargs)
def generate_file_path(self, width, height, suffix=None):
suffix = suffix if suffix is not None else ''
filename ='{width}x{height}{suffix}.png'.format(
width=width, height=height, suffix=suffix)
return os.path.join(self.path, filename)
def generate(self):
from .placeholder import get_placeholder_image
width, height = random.choice(self.sizes)
# Ensure that _autofixture folder exists.
i = 0
path = self.generate_file_path(width, height)
while self.storage.exists(path):
i += 1
path = self.generate_file_path(width, height, '_{0}'.format(i))
return self.storage.save(
path,
ContentFile(get_placeholder_image(width, height))
)
|
ramcn/demo3
|
venv/lib/python3.4/site-packages/autofixture/generators.py
|
Python
|
mit
| 21,150
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('shop', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='product',
name='collect_stamp',
field=models.BooleanField(default=False),
preserve_default=True,
),
migrations.AlterField(
model_name='stamp',
name='redeemed',
field=models.BooleanField(default=False),
preserve_default=True,
),
migrations.AlterField(
model_name='voucher',
name='redeemed',
field=models.BooleanField(default=False),
preserve_default=True,
),
]
|
andreagrandi/drf3-test
|
drftest/shop/migrations/0002_auto_20150201_1559.py
|
Python
|
mit
| 820
|
import math
import davis
import os
os.environ["CUDA_VISIBLE_DEVICES"]="-1"
import davis_tf
from gui import Window
if __name__ == '__main__':
NUM_PARTICLES = 1000
DT = 0.0001
GAMMA = 0.01
A_perPartcile = 4*math.pi / NUM_PARTICLES
cutoff = math.sqrt(A_perPartcile) * 2
#cutoff = 0.1
print(("cutoff", cutoff))
binning = max(1, int(2.0/cutoff))
print("binning", binning)
world = davis_tf.World(NUM_PARTICLES, DT, GAMMA, cutoff, binning)
world.build_graph()
simu = davis.Simulation(world)
Window(simu, "Davis Sphere Simulation, N=%d, dt=%.0le" % (NUM_PARTICLES, DT))
|
tscheff/Davis
|
run_davis_tf.py
|
Python
|
mit
| 630
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^login/$', views.login, name='login'),
url(r'^home/$', views.home, name='home'),
url(r'^home/y12$',views.y12,name='y12'),
]
|
sonali0901/hackathon-studnit
|
studnit/studnit_app/urls.py
|
Python
|
mit
| 208
|
from forms.curve import koch
reload( koch )
koch1 = koch.Koch()
koch1.curve()
x = koch1.drawCurve()
koch2 = koch.Koch()
koch2.snowflake()
y = koch2.drawCurve()
print x
print y
# Result: curve1
# Result: curve2
|
davidpaulrosser/Forms
|
test/curve/koch.py
|
Python
|
mit
| 217
|
from warnings import warn
from django.template import Library
from django.utils.safestring import mark_safe
from .. import utils
register = Library()
_WARNING_MESSAGE = (
'You have specified skip_common_chunks=True but the passed context '
'doesn\'t have a request. django_webpack_loader needs a request object to '
'filter out duplicate chunks. Please see https://github.com/django-webpack'
'/django-webpack-loader#skipping-the-generation-of-multiple-common-chunks')
@register.simple_tag(takes_context=True)
def render_bundle(
context, bundle_name, extension=None, config='DEFAULT', suffix='',
attrs='', is_preload=False, skip_common_chunks=False):
tags = utils.get_as_tags(
bundle_name, extension=extension, config=config, suffix=suffix,
attrs=attrs, is_preload=is_preload)
request = context.get('request')
if request is None:
if skip_common_chunks:
warn(message=_WARNING_MESSAGE, category=RuntimeWarning)
return mark_safe('\n'.join(tags))
used_tags = getattr(request, '_webpack_loader_used_tags', None)
if not used_tags:
used_tags = request._webpack_loader_used_tags = set()
if skip_common_chunks:
tags = [tag for tag in tags if tag not in used_tags]
used_tags.update(tags)
return mark_safe('\n'.join(tags))
@register.simple_tag
def webpack_static(asset_name, config='DEFAULT'):
return utils.get_static(asset_name, config=config)
@register.simple_tag
def get_files(bundle_name, extension=None, config='DEFAULT'):
"""
Returns all chunks in the given bundle.
Example usage::
{% get_files 'editor' 'css' as editor_css_chunks %}
CKEDITOR.config.contentsCss = '{{ editor_css_chunks.0.publicPath }}';
:param bundle_name: The name of the bundle
:param extension: (optional) filter by extension
:param config: (optional) the name of the configuration
:return: a list of matching chunks
"""
return utils.get_files(bundle_name, extension=extension, config=config)
|
ezhome/django-webpack-loader
|
webpack_loader/templatetags/webpack_loader.py
|
Python
|
mit
| 2,048
|
#!/usr/bin/env python
# :) my Led Blink Test
# Analog Sensor Example By NetmaxIOT & Rohitkhosla
# OpenSource MIT licence by Netmax IOT Shield And Rohitkhosla
# :)
import time
from Netmaxiot import *
# Connect the Netmaxiot LED to digital port D4,D5,D6
led0 = 4
led1 = 5
led2 = 6
pinMode(led0,"OUTPUT")
pinMode(led1,"OUTPUT")
pinMode(led2,"OUTPUT")
while True:
try:
#Blink the LED
digitalWrite(led0,1) # Send HIGH to switch on LED
digitalWrite(led1,1) # Send HIGH to switch on LED
digitalWrite(led2,1) # Send HIGH to switch on LED
print ("LED ON!")
time.sleep(1)
digitalWrite(led0,0) # Send LOW to switch off LED
digitalWrite(led1,0) # Send LOW to switch off LED
digitalWrite(led2,0) # Send LOW to switch off LED
print ("LED OFF!")
time.sleep(1)
except IOError: # Print "Error" if communication error encountered
print ("Error")
|
NetmaxIOT/Netmaxiot-Shield
|
Software/Python/tests/multi_led_blink.py
|
Python
|
mit
| 950
|
import os
from django.conf import settings
ELASTICSEARCH_URL = settings.ELASTICSEARCH_URL
APACHE_TIKA_URL = settings.APACHE_TIKA_URL
os.environ["TIKA_CLIENT_ONLY"] = "true"
os.environ["TIKA_SERVER_ENDPOINT"] = settings.APACHE_TIKA_URL
|
watchdogpolska/feder
|
feder/es_search/settings.py
|
Python
|
mit
| 237
|
import numpy as np
import math
import os
from scipy.misc import imread
# In order to import caffe, one may have to add caffe in the PYTHONPATH
import caffe
# If using GPU, set to True
GPU = False
if GPU:
caffe.set_mode_gpu()
caffe.set_device(0)
else:
caffe.set_mode_cpu()
def get_features(net, locs_file):
'''
Runs the forward pass of the neural net on every image.
'''
img_cluster_locs, num_images = get_locs_info(locs_file)
num_batches = math.ceil(num_images / 32.0)
raw_features = []
batch_num = 0
with open(locs_file, 'r') as f:
curr_batch = []
for line in f:
img_path = line.split()[0]
# reads a RGB image
input_img = imread(img_path).astype(np.float32)
# convert to BGR
input_img = input_img[:, :, [2, 1, 0]]
# convert to D,H,W
input_img = np.transpose(input_img, [2, 0, 1])
# subtract the mean
mean_bgr = [103.334, 107.8797, 107.4072]
for i in xrange(0, 3):
input_img[i, :, :] = input_img[i, :, :] - mean_bgr[i]
curr_batch.append(input_img)
if len(curr_batch) == 32:
batch_num += 1
print("Batch %d/%d for %s" % (batch_num,
num_batches, locs_file))
curr_batch = np.asarray(curr_batch)
net.blobs['data'].data[...] = curr_batch
net.forward()
raw_features.append(net.blobs['conv7'].data)
curr_batch = []
if len(curr_batch) > 0:
batch_num += 1
print("Batch %d/%d for %s" % (batch_num, num_batches, locs_file))
curr_batch = np.asarray(curr_batch)
batch_size = curr_batch.shape[0]
# pad end batch
curr_batch = np.vstack((curr_batch, np.zeros((32 - batch_size, 3, 400, 400)).astype(np.float32)))
net.blobs['data'].data[...] = curr_batch
net.forward()
raw_features.append(net.blobs['conv7'].data[:batch_size])
raw_features = np.vstack(raw_features)
# average pooling
n, f, h, w = raw_features.shape
features = raw_features.reshape(n, f, h*w)
features = np.mean(features, axis=2)
return features, img_cluster_locs
def aggregate_features(features, img_cluster_locs, clusters):
'''
Aggregate features by cluster by taking the mean.
Respects the cluster ordering given by lats.npy and lons.npy.
'''
# average the features in the same cluster
conv_features = []
image_counts = []
for cluster in clusters:
cluster_mask = [(img_cluster == cluster) for img_cluster in img_cluster_locs]
cluster_mask = np.asarray(cluster_mask)
image_count = np.sum(cluster_mask)
# if count is 0, fill with a 0 feature
if image_count == 0:
mean_cluster_feature = np.zeros(features.shape[1])
else:
mean_cluster_feature = np.mean(features[cluster_mask], axis=0)
conv_features.append(mean_cluster_feature)
image_counts.append(image_count)
conv_features = np.asarray(conv_features)
image_counts = np.asarray(image_counts)
return conv_features, image_counts
def extract(net, countries, output_dir):
'''
Runs the forward pass of the CNN on every image and then
aggregates the features by cluster by taking the mean.
'''
for country in countries:
print("Extracting %s for %s" % (country, output_dir))
locs_file = os.path.join(output_dir, country, 'downloaded_locs.txt')
# compute conv features for every image
features, img_cluster_locs = get_features(net, locs_file)
# get the master cluster ordering
cluster_lats = np.load(os.path.join(output_dir, country, 'lats.npy'))
cluster_lons = np.load(os.path.join(output_dir, country, 'lons.npy'))
# bit of a hack here - cluster locations can be changed when
# writing to a file using format string
clusters = [(float("%f" % cluster_lats[i]), float("%f" % cluster_lons[i])) for i in xrange(cluster_lats.size)]
# aggregate features by cluster
conv_features, image_counts = aggregate_features(features, img_cluster_locs, clusters)
conv_features_path = os.path.join(output_dir, country, 'conv_features')
image_counts_path = os.path.join(output_dir, country, 'image_counts')
np.save(conv_features_path, conv_features)
np.save(image_counts_path, image_counts)
def get_locs_info(locs_file):
'''
Get the cluster location for each image and compute the number of
images.
'''
img_cluster_locs = []
num_images = 0
with open(locs_file, 'r') as f:
for line in f:
num_images += 1
img_path, lat, lon, cluster_lat, cluster_lon = line.split()
cluster_loc = (float(cluster_lat), float(cluster_lon))
img_cluster_locs.append(cluster_loc)
return img_cluster_locs, num_images
if __name__ == '__main__':
model_file = '../model/predicting_poverty_deploy.prototxt'
weights_file = '../model/predicting_poverty_trained.caffemodel'
net = caffe.Net(model_file, weights_file, caffe.TEST)
# DHS
print("Extracting features for DHS")
countries = ['nigeria', 'tanzania', 'uganda', 'malawi', 'rwanda']
dhs_dir = '../data/output/DHS'
extract(net, countries, dhs_dir)
# LSMS
print("Extracting features for LSMS")
countries = ['nigeria', 'tanzania', 'uganda', 'malawi']
lsms_dir = '../data/output/LSMS'
extract(net, countries, lsms_dir)
|
nealjean/predicting-poverty
|
scripts/extract_features.py
|
Python
|
mit
| 5,211
|
from datetime import datetime
from dateutil import tz
import stores
class Data():
def __init__(self, player_id=None):
self.to_zone = tz.gettz('America/New_York')
self.from_zone = tz.gettz('UTC')
self._rawData = self.RawData()
years = self._rawData.get_years()
self._players = self._rawData.get_players()
self._matchups = {}
self._matchups_list = {}
self._predictions = {}
self._winners = {}
self._teams = {}
for year in years:
self._teams[year] = self._rawData.get_teams(year)
self._matchups[year] = self._rawData.get_matchups(year)
self._matchups_list[year] = list(self._matchups[year].values())
self._predictions[year] = self._rawData.get_predictions(year)
self._winners[year] = self._rawData.get_winners(year)
self.build_players(player_id)
def add_player_team(self, team, round, teams, rounds_teams):
if team not in teams:
teams[team] = 1
else:
teams[team] = teams[team] + 1
if team not in rounds_teams[round]:
rounds_teams[round][team] = 1
else:
rounds_teams[round][team] = rounds_teams[round][team] + 1
def add_player_team_result(self, team, has_winner, results):
if team not in results:
results[team] = {'good': 0, 'bad': 0}
if has_winner:
results[team]['good'] = results[team]['good'] + 1
else:
results[team]['bad'] = results[team]['bad'] + 1
def add_player_games(self, games, round, total_games, rounds_games):
if games != 0:
if games not in total_games:
total_games[games] = 1
else:
total_games[games] = total_games[games] + 1
if games not in rounds_games[round]:
rounds_games[round][games] = 1
else:
rounds_games[round][games] = rounds_games[round][games] + 1
def add_player_game_result(self, games, has_games, results):
if games not in results:
results[games] = {'good': 0, 'bad': 0}
if has_games:
results[games]['good'] = results[games]['good'] + 1
else:
results[games]['bad'] = results[games]['bad'] + 1
def matchup_result(self, matchup):
winner = 0
games = 0
if matchup['result']['home_win'] == 4:
winner = matchup['home']
games = matchup['result']['home_win'] + matchup['result']['away_win']
if matchup['result']['away_win'] == 4:
winner = matchup['away']
games = matchup['result']['home_win'] + matchup['result']['away_win']
return winner, games
def build_player_matchup_result(self, year, pred, winner, games, result):
if pred['winner'] == winner:
result['has_winner'] = True
result['winner_rank'] = self._teams[year][winner]['standings']['divisionRank']
if pred['games'] == games:
result['has_games'] = True
def build_players(self, player_id=None):
for p in self._players:
p['predictions'] = {}
prediction_count = 0
prediction_team_results = {}
prediction_game_results = {}
prediction_teams = {}
rounds_teams = {1: {}, 2: {}, 3: {}, 4: {}}
total_games = {}
rounds_games = {1: {}, 2: {}, 3: {}, 4: {}}
missings_predictions = []
now = self.now()
for y in self._matchups_list:
p['predictions'][y] = []
for m in self._matchups_list[y]:
if not player_id or player_id == p['id'] or self.is_matchup_started(m, now):
pred = self.find_prediction(p['id'], y, m['round'], m['home'], m['away'])
result = {'has_winner': False, 'has_games': False, 'winner_rank': 0}
if pred and pred['winner'] != 0:
prediction_count = prediction_count + 1
self.add_player_team(pred['winner'], pred['round'], prediction_teams, rounds_teams)
self.add_player_games(pred['games'], pred['round'], total_games, rounds_games)
winner, games = self.matchup_result(m)
self.build_player_matchup_result(y, pred, winner, games, result)
if winner != 0:
self.add_player_team_result(pred['winner'], result['has_winner'], prediction_team_results)
if games != 0:
self.add_player_game_result(pred['games'], result['has_games'], prediction_game_results)
if 'player' in pred:
del pred['player']
elif m['home'] != 0 and m['away'] != 0:
missings_predictions.append(m)
if 'schedule' in m:
del m['schedule']
if 'season' in m:
del m['season']
m['year'] = y
p['predictions'][y].append({'matchup': m, 'prediction': pred, 'result': result})
p['prediction_count'] = prediction_count
favorite_team = 0
if len(prediction_teams) > 0:
favorite_team = max(prediction_teams, key=prediction_teams.get)
team_results = {}
for r in prediction_team_results:
result = prediction_team_results[r]
total = result['good'] + result['bad']
team_results[r] = float(result['good']) / float(total) * 100
games_results = {}
for r in prediction_game_results:
result = prediction_game_results[r]
total = result['good'] + result['bad']
games_results[r] = float(result['good']) / float(total) * 100
p['team_results'] = team_results
p['games_results'] = games_results
p['favorite_team'] = favorite_team
p['games_stats'] = {'total': total_games, 'rounds': rounds_games}
p['missings'] = missings_predictions
p['has_winner'] = False
# pprint(self._players)
def parse_time(self, timestamp):
utc = datetime.strptime(timestamp, '%Y-%m-%dT%H:%M:%SZ')
utc = utc.replace(tzinfo=self.from_zone)
return utc.astimezone(self.to_zone)
def get_start(self, matchup):
if 'start' in matchup and matchup['start']:
return self.parse_time(matchup['start'])
return None
def now(self):
return datetime.now(tz.tzlocal()).astimezone(self.to_zone)
def is_round_started(self, year, round, now=None):
if not now:
now = self.now()
for matchup in self._matchups_list[year]:
if matchup['round'] == round and self.is_matchup_started(matchup, now):
return True
return False
def is_matchup_started(self, matchup, now=None):
if not now:
now = self.now()
start = self.get_start(matchup)
if start is not None:
if now > start:
return True
return False
def get_players(self):
return self._players
def find_prediction(self, player_id, year, round, home, away):
result = None
for p in self._predictions[year]:
if p['player'] == player_id and p['round'] == round and p['home'] == home and p['away'] == away:
return p.copy()
return result
def find_winner(self, player_id, year):
for w in self._winners[year]:
if w['player'] == player_id:
return w['winner']
print('not Found', player_id, year)
return 0
def calculate_result_pts(self, result):
pts = 0
if result['has_winner']:
pts = pts + 10
pts = pts + int(result['winner_rank'])
if result['has_games']:
pts = pts + 5
return pts
def get_results(self, player_id, year):
results = []
now = self.now()
for player in self._players:
if player['prediction_count'] > 0:
pts = 0
oldpts = 0
winner = 0
if not player_id or player_id == player['id'] or self.is_round_started(year, 1, now):
winner = self.find_winner(player['id'], year)
player_preds = []
for pred in player['predictions'][year]:
if pred['prediction']:
player_preds.append(pred['prediction'])
pts = pts + self.calculate_result_pts(pred['result'])
scwinner, games = self.matchup_result(self._matchups[year]['sc'])
print(scwinner, winner)
if int(scwinner) and int(scwinner) == int(winner):
pts = pts + 50
victories = {'winner_count': 0, 'games_count': 0}
result = {'player': player['name'], 'pts': pts, 'oldpts': oldpts, 'winner': winner, 'predictions': player_preds, 'victories': victories, 'favorite_team': player['favorite_team']}
result['games_stats'] = player['games_stats']
result['team_results'] = player['team_results']
results.append(result)
return results
class RawData():
def __init__(self):
_db = stores.get()
self._data = _db.backup()
def get_years(self):
return self._data['datav2'].keys()
def get_players(self):
players = self._data['players'][1]
l = list(players.items())
result = []
for player in l:
p = player[1].copy()
del p['psw']
p['id'] = player[0]
result.append(p)
return result
def get_teams(self, year):
dbteams = self._data['datav2'][year]['teams']
teams = {}
for m in dbteams.items():
teams[int(m[0])] = m[1]
return teams
def get_matchups(self, year):
return self._data['datav2'][year]['matchups']
def get_predictions(self, year):
if year in self._data['predictions']:
return self._data['predictions'][year]['matchups']
return {}
def get_winners(self, year):
if year in self._data['predictions']:
return self._data['predictions'][year]['winners']
return {}
|
fjacob21/nhlplayoffs
|
data.py
|
Python
|
mit
| 10,797
|
import sys
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
from cmsplugin_articles import __version__
class Tox(TestCommand):
user_options = [('tox-args=', 'a', "Arguments to pass to tox")]
def initialize_options(self):
TestCommand.initialize_options(self)
self.tox_args = None
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
# import here, cause outside the eggs aren't loaded
import tox
import shlex
errno = tox.cmdline(args=shlex.split(self.tox_args))
sys.exit(errno)
CLASSIFIERS = [
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Communications',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content :: Message Boards',
'Topic :: Internet :: WWW/HTTP :: Site Management',
'Programming Language :: Python :: 2.7',
]
setup(
name='cmsplugin-articles',
version=__version__,
description='Django CMS articles management plugin',
author='Anton Egorov',
author_email='anton.egoroff@gmail.com',
url='https://github.com/satyrius/cmsplugin-articles',
license='MIT',
long_description=open('README.rst').read(),
classifiers=CLASSIFIERS,
platforms=['OS Independent'],
packages=find_packages(),
include_package_data=True,
install_requires=[
'django-cms',
'Pillow',
'beautifulsoup4',
],
tests_require=['tox>=1.8'],
cmdclass={'test': Tox},
zip_safe=False,
)
|
satyrius/cmsplugin-articles
|
setup.py
|
Python
|
mit
| 1,886
|
from sys import argv
import pexpect
children = []
for i in range(3, len(argv)):
children.append(pexpect.spawn('/bin/bash'))
children[len(children) - 1].sendline(argv[i])
print(argv[i])
import time
time.sleep(int(argv[1]))
for child in children:
child.sendcontrol('c')
child.sendline('exit')
child.expect(pexpect.EOF)
# if len(children) != 0:
# open(argv[2] + '.txt', 'w').write(children[len(children) - 1].read())
|
slremy/testingpubsub
|
myBallPlate/runCommands.py
|
Python
|
mit
| 462
|
"""Alpenhorn client interface."""
import datetime
import os
import sys
import click
import peewee as pw
from ch_util import data_index as di
from ch_util import ephemeris
@click.group()
def cli():
"""Client interface for alpenhorn. Use to request transfers, mount drives,
check status etc."""
@cli.command()
@click.argument("node_name", metavar="NODE")
@click.argument("group_name", metavar="GROUP")
@click.option(
"--acq", help="Sync only this acquisition.", metavar="ACQ", type=str, default=None
)
@click.option("--force", "-f", help="proceed without confirmation", is_flag=True)
@click.option("--nice", "-n", help="nice level for transfer", default=0)
@click.option(
"--target",
metavar="TARGET_GROUP",
default=None,
type=str,
help="Only transfer files not available on this group.",
)
@click.option(
"--transport",
"-t",
is_flag=True,
help="[DEPRECATED] transport mode: only copy if fewer than two archived copies exist.",
)
@click.option("--show_acq", help="Summarise acquisitions to be copied.", is_flag=True)
@click.option("--show_files", help="Show files to be copied.", is_flag=True)
def sync(
node_name, group_name, acq, force, nice, target, transport, show_acq, show_files
):
"""Copy all files from NODE to GROUP that are not already present.
We can also use the --target option to only transfer files that are not
available on both the destination group, and the TARGET_GROUP. This is
useful for transferring data to a staging location before going to a final
archive (e.g. HPSS, transport disks).
"""
# Make sure we connect RW
di.connect_database(read_write=True)
try:
from_node = di.StorageNode.get(name=node_name)
except pw.DoesNotExist:
raise Exception('Node "%s" does not exist in the DB.' % node_name)
try:
to_group = di.StorageGroup.get(name=group_name)
except pw.DoesNotExist:
raise Exception('Group "%s" does not exist in the DB.' % group_name)
# Construct list of file copies that are available on the source node, and
# not available on any nodes at the destination. This query is quite complex
# so I've broken it up...
# First get the nodes at the destination...
nodes_at_dest = di.StorageNode.select().where(di.StorageNode.group == to_group)
# Then use this to get a list of all files at the destination...
files_at_dest = (
di.ArchiveFile.select()
.join(di.ArchiveFileCopy)
.where(
di.ArchiveFileCopy.node << nodes_at_dest, di.ArchiveFileCopy.has_file == "Y"
)
)
# Then combine to get all file(copies) that are available at the source but
# not at the destination...
copy = di.ArchiveFileCopy.select().where(
di.ArchiveFileCopy.node == from_node,
di.ArchiveFileCopy.has_file == "Y",
~(di.ArchiveFileCopy.file << files_at_dest),
)
# If the target option has been specified, only copy nodes also not
# available there...
if target is not None:
# Fetch a reference to the target group
try:
target_group = di.StorageGroup.get(name=target)
except pw.DoesNotExist:
raise RuntimeError('Target group "%s" does not exist in the DB.' % target)
# First get the nodes at the destination...
nodes_at_target = di.StorageNode.select().where(
di.StorageNode.group == target_group
)
# Then use this to get a list of all files at the destination...
files_at_target = (
di.ArchiveFile.select()
.join(di.ArchiveFileCopy)
.where(
di.ArchiveFileCopy.node << nodes_at_target,
di.ArchiveFileCopy.has_file == "Y",
)
)
# Only match files that are also not available at the target
copy = copy.where(~(di.ArchiveFileCopy.file << files_at_target))
# In transport mode (DEPRECATED) we only move files that don't have an
# archive copy elsewhere...
if transport:
import warnings
warnings.warn("Transport mode is deprecated. Try to use --target instead.")
# Get list of other archive nodes
other_archive_nodes = di.StorageNode.select().where(
di.StorageNode.storage_type == "A", di.StorageNode.id != from_node
)
files_in_archive = (
di.ArchiveFile.select()
.join(di.ArchiveFileCopy)
.where(
di.ArchiveFileCopy.node << other_archive_nodes,
di.ArchiveFileCopy.has_file == "Y",
)
)
copy = copy.where(~(di.ArchiveFileCopy.file << files_in_archive))
# Join onto ArchiveFile for later query parts
copy = copy.join(di.ArchiveFile)
# If requested, limit query to a specific acquisition...
if acq is not None:
# Fetch acq if specified
try:
acq = di.ArchiveAcq.get(name=acq)
except pw.DoesNotExist:
raise Exception('Acquisition "%s" does not exist in the DB.' % acq)
# Restrict files to be in the acquisition
copy = copy.where(di.ArchiveFile.acq == acq)
if not copy.count():
print("No files to copy from node %s." % (node_name))
return
# Show acquisitions based summary of files to be copied
if show_acq:
acqs = [c.file.acq.name for c in copy]
import collections
for acq, count in collections.Counter(acqs).items():
print("%s [%i files]" % (acq, count))
# Show all files to be copied
if show_files:
for c in copy:
print("%s/%s" % (c.file.acq.name, c.file.name))
size_bytes = copy.aggregate(pw.fn.Sum(di.ArchiveFile.size_b))
size_gb = int(size_bytes) / 1073741824.0
print(
"Will request that %d files (%.1f GB) be copied from node %s to group %s."
% (copy.count(), size_gb, node_name, group_name)
)
if not (force or click.confirm("Do you want to proceed?")):
print("Aborted.")
return
dtnow = datetime.datetime.now()
# Perform update in a transaction to avoid any clobbering from concurrent updates
with di.ArchiveFileCopyRequest._meta.database.atomic():
# Get a list of all the file ids for the copies we should perform
files_ids = [c.file_id for c in copy]
# Get a list of all the file ids for exisiting requests
requests = di.ArchiveFileCopyRequest.select().where(
di.ArchiveFileCopyRequest.group_to == to_group,
di.ArchiveFileCopyRequest.node_from == from_node,
)
req_file_ids = [req.file_id for req in requests]
# Separate the files into ones that already have requests and ones that don't
files_in = filter(lambda x: x in req_file_ids, files_ids)
files_out = filter(lambda x: x not in req_file_ids, files_ids)
sys.stdout.write(
"Updating %i existing requests and inserting %i new ones.\n"
% (len(files_in), len(files_out))
)
# Perform an update of all the existing copy requests
if len(files_in) > 0:
update = di.ArchiveFileCopyRequest.update(
nice=nice,
completed=False,
cancelled=False,
timestamp=dtnow,
n_requests=di.ArchiveFileCopyRequest.n_requests + 1,
)
update = update.where(
di.ArchiveFileCopyRequest.file << files_in,
di.ArchiveFileCopyRequest.group_to == to_group,
di.ArchiveFileCopyRequest.node_from == from_node,
)
update.execute()
# Insert any new requests
if len(files_out) > 0:
# Construct a list of all the rows to insert
insert = [
{
"file": fid,
"node_from": from_node,
"nice": 0,
"group_to": to_group,
"completed": False,
"n_requests": 1,
"timestamp": dtnow,
}
for fid in files_out
]
# Do a bulk insert of these new rows
di.ArchiveFileCopyRequest.insert_many(insert).execute()
@cli.command()
@click.option(
"--all", help="Show the status of all nodes, not just mounted ones.", is_flag=True
)
def status(all):
"""Summarise the status of alpenhorn storage nodes."""
import tabulate
# Data to fetch from the database (node name, total files, total size)
query_info = (
di.StorageNode.name,
pw.fn.Count(di.ArchiveFileCopy.id).alias("count"),
pw.fn.Sum(di.ArchiveFile.size_b).alias("total_size"),
di.StorageNode.host,
di.StorageNode.root,
)
# Per node totals
nodes = (
di.StorageNode.select(*query_info)
.join(di.ArchiveFileCopy)
.where(di.ArchiveFileCopy.has_file == "Y")
.join(di.ArchiveFile)
.group_by(di.StorageNode)
.order_by(di.StorageNode.name)
)
if not all:
nodes = nodes.where(di.StorageNode.mounted)
# Totals for the whole archive
tot = di.ArchiveFile.select(
pw.fn.Count(di.ArchiveFile.id).alias("count"),
pw.fn.Sum(di.ArchiveFile.size_b).alias("total_size"),
).scalar(as_tuple=True)
data = [
[
node[0],
int(node[1]),
int(node[2]) / 2 ** 40.0,
100.0 * int(node[1]) / int(tot[0]),
100.0 * int(node[2]) / int(tot[1]),
"%s:%s" % (node[3], node[4]),
]
for node in nodes.tuples()
]
headers = ["Node", "Files", "Size [TB]", "Files [%]", "Size [%]", "Path"]
print(tabulate.tabulate(data, headers=headers, floatfmt=".1f"))
@cli.command()
@click.argument("node_name", metavar="NODE")
@click.option("--md5", help="perform full check against md5sum", is_flag=True)
@click.option(
"--fixdb", help="fix up the database to be consistent with reality", is_flag=True
)
@click.option(
"--acq",
metavar="ACQ",
multiple=True,
help="Limit verification to specified acquisitions. Use repeated --acq flags to specify multiple acquisitions.",
)
def verify(node_name, md5, fixdb, acq):
"""Verify the archive on NODE against the database."""
import os
try:
this_node = di.StorageNode.get(di.StorageNode.name == node_name)
except pw.DoesNotExist:
print("Specified node does not exist.")
return
## Use a complicated query with a tuples construct to fetch everything we
## need in a single query. This massively speeds up the whole process versus
## fetching all the FileCopy's then querying for Files and Acqs.
lfiles = (
di.ArchiveFile.select(
di.ArchiveFile.name,
di.ArchiveAcq.name,
di.ArchiveFile.size_b,
di.ArchiveFile.md5sum,
di.ArchiveFileCopy.id,
)
.join(di.ArchiveAcq)
.switch(di.ArchiveFile)
.join(di.ArchiveFileCopy)
.where(di.ArchiveFileCopy.node == this_node, di.ArchiveFileCopy.has_file == "Y")
.tuples()
)
missing_files = []
corrupt_files = []
missing_ids = []
corrupt_ids = []
nfiles = 0
with click.progressbar(lfiles, label="Scanning files") as lfiles_iter:
for filename, acqname, filesize, md5sum, fc_id in lfiles_iter:
# Skip if not in specified acquisitions
if len(acq) > 0 and acqname not in acq:
continue
nfiles += 1
filepath = this_node.root + "/" + acqname + "/" + filename
# Check if file is plain missing
if not os.path.exists(filepath):
missing_files.append(filepath)
missing_ids.append(fc_id)
continue
if md5:
file_md5 = di.md5sum_file(filepath)
corrupt = file_md5 != md5sum
else:
corrupt = os.path.getsize(filepath) != filesize
if corrupt:
corrupt_files.append(filepath)
corrupt_ids.append(fc_id)
continue
if len(missing_files) > 0:
print()
print("=== Missing files ===")
for fname in missing_files:
print(fname)
if len(corrupt_files) > 0:
print()
print("=== Corrupt files ===")
for fname in corrupt_files:
print(fname)
print()
print("=== Summary ===")
print(" %i total files" % nfiles)
print(" %i missing files" % len(missing_files))
print(" %i corrupt files" % len(corrupt_files))
print()
# Fix up the database by marking files as missing, and marking
# corrupt files for verification by alpenhornd.
if fixdb:
# Make sure we connect RW
di.connect_database(read_write=True)
if (len(missing_files) > 0) and click.confirm("Fix missing files"):
missing_count = (
di.ArchiveFileCopy.update(has_file="N")
.where(di.ArchiveFileCopy.id << missing_ids)
.execute()
)
print(" %i marked as missing" % missing_count)
if (len(corrupt_files) > 0) and click.confirm("Fix corrupt files"):
corrupt_count = (
di.ArchiveFileCopy.update(has_file="M")
.where(di.ArchiveFileCopy.id << corrupt_ids)
.execute()
)
print(" %i corrupt files marked for verification" % corrupt_count)
@cli.command()
@click.argument("node_name", metavar="NODE")
@click.option(
"--days", "-d", help="clean files older than <days>", type=int, default=None
)
@click.option("--force", "-f", help="force cleaning on an archive node", is_flag=True)
@click.option("--now", "-n", help="force immediate removal", is_flag=True)
@click.option(
"--target",
metavar="TARGET_GROUP",
default=None,
type=str,
help="Only clean files already available in this group.",
)
@click.option(
"--acq", metavar="ACQ", default=None, type=str, help="Limit removal to acquisition"
)
def clean(node_name, days, force, now, target, acq):
"""Clean up NODE by marking older files as potentially removable.
If --target is specified we will only remove files already available in the
TARGET_GROUP. This is useful for cleaning out intermediate locations such as
transport disks.
Using the --days flag will only clean correlator and housekeeping
files which have a timestamp associated with them. It will not
touch other types. If no --days flag is given, all files will be
considered for removal.
"""
import peewee as pw
di.connect_database(read_write=True)
try:
this_node = di.StorageNode.get(di.StorageNode.name == node_name)
except pw.DoesNotExist:
print("Specified node does not exist.")
# Check to see if we are on an archive node
if this_node.storage_type == "A":
if force or click.confirm("DANGER: run clean on archive node?"):
print("%s is an archive node. Forcing clean." % node_name)
else:
print("Cannot clean archive node %s without forcing." % node_name)
return
# Select FileCopys on this node.
files = di.ArchiveFileCopy.select(di.ArchiveFileCopy.id).where(
di.ArchiveFileCopy.node == this_node, di.ArchiveFileCopy.wants_file == "Y"
)
# Limit to acquisition
if acq is not None:
try:
acq = di.ArchiveAcq.get(name=acq)
except pw.DoesNotExit:
raise RuntimeError("Specified acquisition %s does not exist" % acq)
files_in_acq = di.ArchiveFile.select().where(di.ArchiveFile.acq == acq)
files = files.where(di.ArchiveFileCopy.file << files_in_acq)
# If the target option has been specified, only clean files also available there...
if target is not None:
# Fetch a reference to the target group
try:
target_group = di.StorageGroup.get(name=target)
except pw.DoesNotExist:
raise RuntimeError('Target group "%s" does not exist in the DB.' % target)
# First get the nodes at the destination...
nodes_at_target = di.StorageNode.select().where(
di.StorageNode.group == target_group
)
# Then use this to get a list of all files at the destination...
files_at_target = (
di.ArchiveFile.select()
.join(di.ArchiveFileCopy)
.where(
di.ArchiveFileCopy.node << nodes_at_target,
di.ArchiveFileCopy.has_file == "Y",
)
)
# Only match files that are also available at the target
files = files.where(di.ArchiveFileCopy.file << files_at_target)
# If --days has been set we need to restrict to files older than the given
# time. This only works for a few particular file types
if days is not None and days > 0:
# Get the time for the oldest files to keep
oldest = datetime.datetime.now() - datetime.timedelta(days)
oldest_unix = ephemeris.ensure_unix(oldest)
# List of filetypes we want to update, needs a human readable name and a
# FileInfo table.
filetypes = [["correlation", di.CorrFileInfo], ["housekeeping", di.HKFileInfo]]
file_ids = []
# Iterate over file types for cleaning
for name, infotable in filetypes:
# Filter to fetch only ones with a start time older than `oldest`
oldfiles = (
files.join(di.ArchiveFile)
.join(infotable)
.where(infotable.start_time < oldest_unix)
)
local_file_ids = list(oldfiles)
# Get number of correlation files
count = oldfiles.count()
if count > 0:
size_bytes = (
di.ArchiveFileCopy.select()
.where(di.ArchiveFileCopy.id << local_file_ids)
.join(di.ArchiveFile)
.aggregate(pw.fn.Sum(di.ArchiveFile.size_b))
)
size_gb = int(size_bytes) / 2 ** 30.0
print(
"Cleaning up %i %s files (%.1f GB) from %s "
% (count, name, size_gb, node_name)
)
file_ids += local_file_ids
# If days is not set, then just select all files that meet the requirements so far
else:
file_ids = list(files)
count = files.count()
if count > 0:
size_bytes = (
di.ArchiveFileCopy.select()
.where(di.ArchiveFileCopy.id << file_ids)
.join(di.ArchiveFile)
.aggregate(pw.fn.Sum(di.ArchiveFile.size_b))
)
size_gb = int(size_bytes) / 1073741824.0
print(
"Cleaning up %i files (%.1f GB) from %s " % (count, size_gb, node_name)
)
# If there are any files to clean, ask for confirmation and the mark them in
# the database for removal
if len(file_ids) > 0:
if force or click.confirm(" Are you sure?"):
print(" Marking files for cleaning.")
state = "N" if now else "M"
update = di.ArchiveFileCopy.update(wants_file=state).where(
di.ArchiveFileCopy.id << file_ids
)
n = update.execute()
print("Marked %i files for cleaning" % n)
else:
print(" Cancelled")
else:
print("No files selected for cleaning on %s." % node_name)
@cli.command()
@click.option(
"--host",
"-H",
help="use specified host rather than local machine",
type=str,
default=None,
)
def mounted(host):
"""list the nodes mounted on this, or another specified, machine"""
import socket
if host is None:
host = socket.gethostname().split(".")[0]
zero = True
for node in di.StorageNode.select().where(
di.StorageNode.host == host, di.StorageNode.mounted == True
):
n_file = (
di.ArchiveFileCopy.select().where(di.ArchiveFileCopy.node == node).count()
)
print("%-25s %-30s %5d files" % (node.name, node.root, n_file))
zero = False
if zero:
print("No nodes are mounted on host %s." % host)
@cli.command()
@click.argument("serial_num")
def format_transport(serial_num):
"""Interactive routine for formatting a transport disc as a storage
node; formats and labels the disc as necessary, the adds to the
database. The disk is specified using the manufacturers
SERIAL_NUM, which is printed on the disk.
"""
import glob
import os
if os.getuid() != 0:
print("You must be root to run mount on a transport disc. I quit.")
return
# Find the disc.
dev = glob.glob("/dev/disk/by-id/*%s" % serial_num)
if len(dev) == 0:
print("No disc with that serial number is attached.")
return
elif len(dev) > 1:
print("Confused: found more than one device matching that serial number:")
for d in dev:
print(" %s" % dev)
print("Aborting.")
return
dev = dev[0]
dev_part = "%s-part1" % dev
# Figure out if it is formatted.
print("Checking to see if disc is formatted. Please wait.")
fp = os.popen("parted -s %s print" % dev)
formatted = False
part_start = False
while True:
l = fp.readline()
if not l:
break
if l.find("Number") == 0 and l.find("Start") > 0 and l.find("File system") > 0:
part_start = True
elif l.strip() != "" and part_start:
formatted = True
fp.close()
if not formatted:
if not click.confirm("Disc is not formatted. Should I format it?"):
return
print("Creating partition. Please wait.")
os.system(
"parted -s -a optimal %s mklabel gpt -- mkpart primary 0%% 100%%" % dev
)
print("Formatting disc. Please wait.")
os.system("mkfs.ext4 %s -m 0 -L CH-%s" % (dev_part, serial_num))
else:
print("Disc is already formatted.")
e2label = get_e2label(dev_part)
name = "CH-%s" % serial_num
if e2label and e2label != name:
print(
"Disc label %s does not conform to labelling standard, "
"which is CH-<serialnum>."
)
exit
elif not e2label:
print('Labelling the disc as "%s" (using e2label) ...' % (name))
assert dev_part is not None
assert len(name) <= MAX_E2LABEL_LEN
stat = os.system("/sbin/e2label %s %s" % (dev_part, name))
if stat:
print("Failed to e2label! Stat = %s. I quit." % (stat))
exit()
# Ensure the mount path exists.
root = "/mnt/%s" % name
if not os.path.isdir(root):
print("Creating mount point %s." % root)
os.mkdir(root)
# Check to see if the disc is mounted.
fp = os.popen("df")
mounted = False
dev_part_abs = os.path.realpath(dev_part)
while 1:
l = fp.readline()
if not l:
break
if l.find(root) > 0:
if l[: len(dev_part)] == dev or l[: len(dev_part_abs)] == dev_part_abs:
mounted = True
else:
print(
"%s is a mount point, but %s is already mounted there."(
root, l.split()[0]
)
)
fp.close()
try:
node = di.StorageNode.get(name=name)
except pw.DoesNotExist:
print(
"This disc has not been registered yet as a storage node. "
"Registering now."
)
try:
group = di.StorageGroup.get(name="transport")
except pw.DoesNotExist:
print('Hmmm. Storage group "transport" does not exist. I quit.')
exit()
# We need to write to the database.
di.connect_database(read_write=True)
node = di.StorageNode.create(
name=name, root=root, group=group, storage_type="T", min_avail_gb=1
)
print("Successfully created storage node.")
print("Node created but not mounted. Run alpenhorn mount_transport for that.")
@cli.command()
@click.pass_context
@click.argument("node")
@click.option("--user", help="username to access this node.", type=str, default=None)
@click.option(
"--address", help="address for remote access to this node.", type=str, default=None
)
def mount_transport(ctx, node, user, address):
"""Mount a transport disk into the system and then make it available to alpenhorn."""
mnt_point = "/mnt/%s" % node
print("Mounting disc at %s" % mnt_point)
os.system("mount %s" % mnt_point)
ctx.invoke(mount, name=node, path=mnt_point, user=user, address=address)
@cli.command()
@click.pass_context
@click.argument("node")
def unmount_transport(ctx, node):
"""Mount a transport disk into the system and then make it available to alpenhorn."""
mnt_point = "/mnt/%s" % node
print("Unmounting disc at %s" % mnt_point)
os.system("umount %s" % mnt_point)
ctx.invoke(unmount, root_or_name=node)
@cli.command()
@click.argument("name")
@click.option("--path", help="Root path for this node", type=str, default=None)
@click.option("--user", help="username to access this node.", type=str, default=None)
@click.option(
"--address", help="address for remote access to this node.", type=str, default=None
)
@click.option(
"--hostname",
help="hostname running the alpenhornd instance for this node (set to this hostname by default).",
type=str,
default=None,
)
def mount(name, path, user, address, hostname):
"""Interactive routine for mounting a storage node located at ROOT."""
import socket
# We need to write to the database.
di.connect_database(read_write=True)
try:
node = di.StorageNode.get(name=name)
except pw.DoesNotExist:
print('Storage node "%s" does not exist. I quit.' % name)
if node.mounted:
print('Node "%s" is already mounted.' % name)
return
# Set the default hostname if required
if hostname is None:
hostname = socket.gethostname()
print('I will set the host to "%s".' % hostname)
# Set the parameters of this node
node.username = user
node.address = address
node.mounted = True
node.host = hostname
if path is not None:
node.root = path
node.save()
print('Successfully mounted "%s".' % name)
@cli.command()
@click.argument("root_or_name")
def unmount(root_or_name):
"""Unmount a storage node with location or named ROOT_OR_NAME."""
import os
import socket
# We need to write to the database.
di.connect_database(read_write=True)
try:
node = di.StorageNode.get(name=root_or_name)
except pw.DoesNotExist:
if root_or_name[-1] == "/":
root_or_name = root_or_name[: len(root_or_name) - 1]
if not os.path.exists(root_or_name):
print("That is neither a node name, nor a path on this host. " "I quit.")
exit()
try:
node = di.StorageNode.get(root=root_or_name, host=socket.gethostname())
except pw.DoesNotExist:
print(
"That is neither a node name nor a root name that is " "known. I quit."
)
exit()
if not node.mounted:
print("There is no node mounted there any more.")
else:
node.mounted = False
node.save()
print("Node successfully unmounted.")
@cli.command()
@click.argument("node_name", metavar="NODE")
@click.option("-v", "--verbose", count=True)
@click.option(
"--acq",
help="Limit import to specified acquisition directories",
multiple=True,
default=None,
)
@click.option("--dry", "-d", help="Dry run. Do not modify database.", is_flag=True)
def import_files(node_name, verbose, acq, dry):
"""Scan the current directory for known acquisition files and add them into the database for NODE.
This command is useful for manually maintaining an archive where we can run
alpenhornd in the usual manner.
"""
import glob
from ch_util import data_index as di
di.connect_database(read_write=True)
import peewee as pw
# Construct list of acqs to scan
if acq is None:
acqs = glob.glob("*")
else:
acqs = acq
# Keep track of state as we process the files
added_files = [] # Files we have added to the database
corrupt_files = [] # Known files which are corrupt
registered_files = [] # Files already registered in the database
unknown_files = [] # Files not known in the database
not_acqs = [] # Directories which were not known acquisitions
# Fetch a reference to the node
try:
node = di.StorageNode.select().where(di.StorageNode.name == node_name).get()
except pw.DoesNotExist:
print("Unknown node.")
return
with click.progressbar(acqs, label="Scanning acquisitions") as acq_iter:
for acq_name in acq_iter:
try:
di.parse_acq_name(acq_name)
except di.Validation:
not_acqs.append(acq_name)
continue
try:
acq = di.ArchiveAcq.select().where(di.ArchiveAcq.name == acq_name).get()
except pw.DoesNotExist:
not_acqs.append(acq_name)
continue
files = glob.glob(acq_name + "/*")
# Fetch lists of all files in this acquisition, and all
# files in this acq with local copies
file_names = [f.name for f in acq.files]
local_file_names = [
f.name
for f in acq.files.join(di.ArchiveFileCopy).where(
di.ArchiveFileCopy.node == node
)
]
for fn in files:
f_name = os.path.split(fn)[1]
# Check if file exists in database
if f_name not in file_names:
unknown_files.append(fn)
continue
# Check if file is already registered on this node
if f_name in local_file_names:
registered_files.append(fn)
else:
archive_file = (
di.ArchiveFile.select()
.where(di.ArchiveFile.name == f_name, di.ArchiveFile.acq == acq)
.get()
)
if os.path.getsize(fn) != archive_file.size_b:
corrupt_files.append(fn)
continue
added_files.append(fn)
if not dry:
di.ArchiveFileCopy.create(
file=archive_file, node=node, has_file="Y", wants_file="Y"
)
print("\n==== Summary ====")
print()
print("Added %i files" % len(added_files))
print()
print("%i corrupt files." % len(corrupt_files))
print("%i files already registered." % len(registered_files))
print("%i files not known" % len(unknown_files))
print("%i directories were not acquisitions." % len(not_acqs))
if verbose > 0:
print()
print("Added files:")
print()
for fn in added_files:
print(fn)
if verbose > 1:
print("Corrupt:")
for fn in corrupt_files:
print(fn)
print()
print("Unknown files:")
for fn in unknown_files:
print(fn)
print()
print("Unknown acquisitions:")
for fn in not_acqs:
print(fn)
print()
# A few utitly routines for dealing with filesystems
MAX_E2LABEL_LEN = 16
def get_e2label(dev):
import os
pin, pout, perr = os.popen3("/sbin/e2label %s" % dev, "r")
pin.close()
res = pout.read().strip()
err = perr.read()
pout.close()
perr.close()
if not len(err) and len(res) < MAX_E2LABEL_LEN:
return res
return None
def get_mount_device(path):
import os
p = os.popen("mount", "r")
res = p.read()
p.close()
dev = None
for l in res.split("\n"):
if not len(l):
continue
s = l.split()
assert s[1] == "on"
if s[2] == os.path.abspath(path):
dev = s[0]
return dev
|
radiocosmology/alpenhorn
|
alpenhorn/legacy/client.py
|
Python
|
mit
| 32,643
|
<<<<<<< HEAD
<<<<<<< HEAD
#!/usr/bin/env python3
""" turtle-example-suite:
tdemo_tree.py
Displays a 'breadth-first-tree' - in contrast
to the classical Logo tree drawing programs,
which use a depth-first-algorithm.
Uses:
(1) a tree-generator, where the drawing is
quasi the side-effect, whereas the generator
always yields None.
(2) Turtle-cloning: At each branching point
the current pen is cloned. So in the end
there are 1024 turtles.
"""
from turtle import Turtle, mainloop
from time import clock
def tree(plist, l, a, f):
""" plist is list of pens
l is length of branch
a is half of the angle between 2 branches
f is factor by which branch is shortened
from level to level."""
if l > 3:
lst = []
for p in plist:
p.forward(l)
q = p.clone()
p.left(a)
q.right(a)
lst.append(p)
lst.append(q)
for x in tree(lst, l*f, a, f):
yield None
def maketree():
p = Turtle()
p.setundobuffer(None)
p.hideturtle()
p.speed(0)
p.getscreen().tracer(30,0)
p.left(90)
p.penup()
p.forward(-210)
p.pendown()
t = tree([p], 200, 65, 0.6375)
for x in t:
pass
print(len(p.getscreen().turtles()))
def main():
a=clock()
maketree()
b=clock()
return "done: %.2f sec." % (b-a)
if __name__ == "__main__":
msg = main()
print(msg)
mainloop()
=======
#!/usr/bin/env python3
""" turtle-example-suite:
tdemo_tree.py
Displays a 'breadth-first-tree' - in contrast
to the classical Logo tree drawing programs,
which use a depth-first-algorithm.
Uses:
(1) a tree-generator, where the drawing is
quasi the side-effect, whereas the generator
always yields None.
(2) Turtle-cloning: At each branching point
the current pen is cloned. So in the end
there are 1024 turtles.
"""
from turtle import Turtle, mainloop
from time import clock
def tree(plist, l, a, f):
""" plist is list of pens
l is length of branch
a is half of the angle between 2 branches
f is factor by which branch is shortened
from level to level."""
if l > 3:
lst = []
for p in plist:
p.forward(l)
q = p.clone()
p.left(a)
q.right(a)
lst.append(p)
lst.append(q)
for x in tree(lst, l*f, a, f):
yield None
def maketree():
p = Turtle()
p.setundobuffer(None)
p.hideturtle()
p.speed(0)
p.getscreen().tracer(30,0)
p.left(90)
p.penup()
p.forward(-210)
p.pendown()
t = tree([p], 200, 65, 0.6375)
for x in t:
pass
print(len(p.getscreen().turtles()))
def main():
a=clock()
maketree()
b=clock()
return "done: %.2f sec." % (b-a)
if __name__ == "__main__":
msg = main()
print(msg)
mainloop()
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
=======
#!/usr/bin/env python3
""" turtle-example-suite:
tdemo_tree.py
Displays a 'breadth-first-tree' - in contrast
to the classical Logo tree drawing programs,
which use a depth-first-algorithm.
Uses:
(1) a tree-generator, where the drawing is
quasi the side-effect, whereas the generator
always yields None.
(2) Turtle-cloning: At each branching point
the current pen is cloned. So in the end
there are 1024 turtles.
"""
from turtle import Turtle, mainloop
from time import clock
def tree(plist, l, a, f):
""" plist is list of pens
l is length of branch
a is half of the angle between 2 branches
f is factor by which branch is shortened
from level to level."""
if l > 3:
lst = []
for p in plist:
p.forward(l)
q = p.clone()
p.left(a)
q.right(a)
lst.append(p)
lst.append(q)
for x in tree(lst, l*f, a, f):
yield None
def maketree():
p = Turtle()
p.setundobuffer(None)
p.hideturtle()
p.speed(0)
p.getscreen().tracer(30,0)
p.left(90)
p.penup()
p.forward(-210)
p.pendown()
t = tree([p], 200, 65, 0.6375)
for x in t:
pass
print(len(p.getscreen().turtles()))
def main():
a=clock()
maketree()
b=clock()
return "done: %.2f sec." % (b-a)
if __name__ == "__main__":
msg = main()
print(msg)
mainloop()
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
|
ArcherSys/ArcherSys
|
Lib/turtledemo/tree.py
|
Python
|
mit
| 4,415
|
import time
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from nw_util import *
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
chrome_options = Options()
chrome_options.add_argument("nwapp=" + os.path.dirname(os.path.abspath(__file__)))
driver = webdriver.Chrome(executable_path=os.environ['CHROMEDRIVER'], chrome_options=chrome_options)
driver.implicitly_wait(2)
try:
print driver.current_url
time.sleep(1) # wait for window open
print 'click button to show jailed devtools'
driver.find_element_by_id('showdevtools').click()
print 'wait for devtools open'
wait_window_handles(driver, 2)
print 'switch to devtools'
switch_to_devtools(driver, devtools_window=driver.window_handles[-1])
print 'click Elements panel'
devtools_click_tab(driver, 'elements')
print 'find h1'
h1 = driver.execute_script('return document.getElementById("elements-content").firstChild.shadowRoot.querySelectorAll(".webkit-html-text-node")[1]').get_attribute('textContent')
print h1
assert (h1 == 'child')
finally:
driver.quit()
|
nwjs/nw.js
|
test/sanity/issue3780-jailed-elements/test.py
|
Python
|
mit
| 1,155
|
import xml.etree.ElementTree as ET
from office365.runtime.odata.odata_base_reader import ODataBaseReader
from office365.runtime.odata.odata_model import ODataModel
class ODataV4Reader(ODataBaseReader):
"""OData v4 reader"""
_options = None
def __init__(self, options):
self._options = options
self._namespaces = {
'xmlns': 'http://docs.oasis-open.org/odata/ns/edm',
'edmx': 'http://docs.oasis-open.org/odata/ns/edmx'
}
def generate_model(self):
model = ODataModel()
root = ET.parse(self._options['inputPath']).getroot()
schema_node = root.find('edmx:DataServices/xmlns:Schema', self._namespaces)
for complex_type_node in schema_node.findall('xmlns:ComplexType', self._namespaces):
type_schema = {'namespace': schema_node.attrib['Namespace'], 'name': complex_type_node.get('Name')}
model.resolve_type(type_schema)
self._process_type_node(model, type_schema, complex_type_node)
return model
def _process_type_node(self, model, type_schema, type_node):
for prop_node in type_node.findall('xmlns:Property', self._namespaces):
name = prop_node.get('Name')
prop_schema = {'name': name}
model.resolve_property(type_schema, prop_schema)
|
vgrem/SharePointOnline-REST-Python-Client
|
office365/runtime/odata/odata_v4_reader.py
|
Python
|
mit
| 1,323
|
"""Implement Agents and Environments (Chapters 1-2).
The class hierarchies are as follows:
Thing ## A physical object that can exist in an environment
Agent
Wumpus
Dirt
Wall
...
Environment ## An environment holds objects, runs simulations
XYEnvironment
VacuumEnvironment
WumpusEnvironment
An agent program is a callable instance, taking percepts and choosing actions
SimpleReflexAgentProgram
...
EnvGUI ## A window with a graphical representation of the Environment
EnvToolbar ## contains buttons for controlling EnvGUI
EnvCanvas ## Canvas to display the environment of an EnvGUI
"""
# TO DO:
# Implement grabbing correctly.
# When an object is grabbed, does it still have a location?
# What if it is released?
# What if the grabbed or the grabber is deleted?
# What if the grabber moves?
#
# Speed control in GUI does not have any effect -- fix it.
from utils import distance_squared, turn_heading
from statistics import mean
import random
import copy
import collections
# ______________________________________________________________________________
class Thing:
"""This represents any physical object that can appear in an Environment.
You subclass Thing to get the things you want. Each thing can have a
.__name__ slot (used for output only)."""
def __repr__(self):
return '<{}>'.format(getattr(self, '__name__', self.__class__.__name__))
def is_alive(self):
"""Things that are 'alive' should return true."""
return hasattr(self, 'alive') and self.alive
def show_state(self):
"""Display the agent's internal state. Subclasses should override."""
print("I don't know how to show_state.")
def display(self, canvas, x, y, width, height):
"""Display an image of this Thing on the canvas."""
# Do we need this?
pass
class Agent(Thing):
"""An Agent is a subclass of Thing with one required slot,
.program, which should hold a function that takes one argument, the
percept, and returns an action. (What counts as a percept or action
will depend on the specific environment in which the agent exists.)
Note that 'program' is a slot, not a method. If it were a method,
then the program could 'cheat' and look at aspects of the agent.
It's not supposed to do that: the program can only look at the
percepts. An agent program that needs a model of the world (and of
the agent itself) will have to build and maintain its own model.
There is an optional slot, .performance, which is a number giving
the performance measure of the agent in its environment."""
def __init__(self, program=None):
self.alive = True
self.bump = False
self.holding = []
self.performance = 0
if program is None or not isinstance(program, collections.Callable):
print("Can't find a valid program for {}, falling back to default.".format(
self.__class__.__name__))
def program(percept):
return eval(input('Percept={}; action? '.format(percept)))
self.program = program
def can_grab(self, thing):
"""Return True if this agent can grab this thing.
Override for appropriate subclasses of Agent and Thing."""
return False
def TraceAgent(agent):
"""Wrap the agent's program to print its input and output. This will let
you see what the agent is doing in the environment."""
old_program = agent.program
def new_program(percept):
action = old_program(percept)
print('{} perceives {} and does {}'.format(agent, percept, action))
return action
agent.program = new_program
return agent
# ______________________________________________________________________________
def TableDrivenAgentProgram(table):
"""This agent selects an action based on the percept sequence.
It is practical only for tiny domains.
To customize it, provide as table a dictionary of all
{percept_sequence:action} pairs. [Figure 2.7]"""
percepts = []
def program(percept):
percepts.append(percept)
action = table.get(tuple(percepts))
return action
return program
def RandomAgentProgram(actions):
"""An agent that chooses an action at random, ignoring all percepts.
>>> list = ['Right', 'Left', 'Suck', 'NoOp']
>>> program = RandomAgentProgram(list)
>>> agent = Agent(program)
>>> environment = TrivialVacuumEnvironment()
>>> environment.add_thing(agent)
>>> environment.run()
>>> environment.status == {(1, 0): 'Clean' , (0, 0): 'Clean'}
True
"""
return lambda percept: random.choice(actions)
# ______________________________________________________________________________
def SimpleReflexAgentProgram(rules, interpret_input):
"""This agent takes action based solely on the percept. [Figure 2.10]"""
def program(percept):
state = interpret_input(percept)
rule = rule_match(state, rules)
action = rule.action
return action
return program
def ModelBasedReflexAgentProgram(rules, update_state, model):
"""This agent takes action based on the percept and state. [Figure 2.12]"""
def program(percept):
program.state = update_state(program.state, program.action, percept, model)
rule = rule_match(program.state, rules)
action = rule.action
return action
program.state = program.action = None
return program
def rule_match(state, rules):
"""Find the first rule that matches state."""
for rule in rules:
if rule.matches(state):
return rule
# ______________________________________________________________________________
loc_A, loc_B = (0, 0), (1, 0) # The two locations for the Vacuum world
def RandomVacuumAgent():
"""Randomly choose one of the actions from the vacuum environment.
>>> agent = RandomVacuumAgent()
>>> environment = TrivialVacuumEnvironment()
>>> environment.add_thing(agent)
>>> environment.run()
>>> environment.status == {(1,0):'Clean' , (0,0) : 'Clean'}
True
"""
return Agent(RandomAgentProgram(['Right', 'Left', 'Suck', 'NoOp']))
def TableDrivenVacuumAgent():
"""[Figure 2.3]"""
table = {((loc_A, 'Clean'),): 'Right',
((loc_A, 'Dirty'),): 'Suck',
((loc_B, 'Clean'),): 'Left',
((loc_B, 'Dirty'),): 'Suck',
((loc_A, 'Dirty'), (loc_A, 'Clean')): 'Right',
((loc_A, 'Clean'), (loc_B, 'Dirty')): 'Suck',
((loc_B, 'Clean'), (loc_A, 'Dirty')): 'Suck',
((loc_B, 'Dirty'), (loc_B, 'Clean')): 'Left',
((loc_A, 'Dirty'), (loc_A, 'Clean'), (loc_B, 'Dirty')): 'Suck',
((loc_B, 'Dirty'), (loc_B, 'Clean'), (loc_A, 'Dirty')): 'Suck'
}
return Agent(TableDrivenAgentProgram(table))
def ReflexVacuumAgent():
"""A reflex agent for the two-state vacuum environment. [Figure 2.8]
>>> agent = ReflexVacuumAgent()
>>> environment = TrivialVacuumEnvironment()
>>> environment.add_thing(agent)
>>> environment.run()
>>> environment.status == {(1,0):'Clean' , (0,0) : 'Clean'}
True
"""
def program(percept):
location, status = percept
if status == 'Dirty':
return 'Suck'
elif location == loc_A:
return 'Right'
elif location == loc_B:
return 'Left'
return Agent(program)
def ModelBasedVacuumAgent():
"""An agent that keeps track of what locations are clean or dirty.
>>> agent = ModelBasedVacuumAgent()
>>> environment = TrivialVacuumEnvironment()
>>> environment.add_thing(agent)
>>> environment.run()
>>> environment.status == {(1,0):'Clean' , (0,0) : 'Clean'}
True
"""
model = {loc_A: None, loc_B: None}
def program(percept):
"""Same as ReflexVacuumAgent, except if everything is clean, do NoOp."""
location, status = percept
model[location] = status # Update the model here
if model[loc_A] == model[loc_B] == 'Clean':
return 'NoOp'
elif status == 'Dirty':
return 'Suck'
elif location == loc_A:
return 'Right'
elif location == loc_B:
return 'Left'
return Agent(program)
# ______________________________________________________________________________
class Environment:
"""Abstract class representing an Environment. 'Real' Environment classes
inherit from this. Your Environment will typically need to implement:
percept: Define the percept that an agent sees.
execute_action: Define the effects of executing an action.
Also update the agent.performance slot.
The environment keeps a list of .things and .agents (which is a subset
of .things). Each agent has a .performance slot, initialized to 0.
Each thing has a .location slot, even though some environments may not
need this."""
def __init__(self):
self.things = []
self.agents = []
def thing_classes(self):
return [] # List of classes that can go into environment
def percept(self, agent):
"""Return the percept that the agent sees at this point. (Implement this.)"""
raise NotImplementedError
def execute_action(self, agent, action):
"""Change the world to reflect this action. (Implement this.)"""
raise NotImplementedError
def default_location(self, thing):
"""Default location to place a new thing with unspecified location."""
return None
def exogenous_change(self):
"""If there is spontaneous change in the world, override this."""
pass
def is_done(self):
"""By default, we're done when we can't find a live agent."""
return not any(agent.is_alive() for agent in self.agents)
def step(self):
"""Run the environment for one time step. If the
actions and exogenous changes are independent, this method will
do. If there are interactions between them, you'll need to
override this method."""
if not self.is_done():
actions = []
for agent in self.agents:
if agent.alive:
actions.append(agent.program(self.percept(agent)))
else:
actions.append("")
for (agent, action) in zip(self.agents, actions):
self.execute_action(agent, action)
self.exogenous_change()
def run(self, steps=1000):
"""Run the Environment for given number of time steps."""
for step in range(steps):
if self.is_done():
return
self.step()
def list_things_at(self, location, tclass=Thing):
"""Return all things exactly at a given location."""
return [thing for thing in self.things
if thing.location == location and isinstance(thing, tclass)]
def some_things_at(self, location, tclass=Thing):
"""Return true if at least one of the things at location
is an instance of class tclass (or a subclass)."""
return self.list_things_at(location, tclass) != []
def add_thing(self, thing, location=None):
"""Add a thing to the environment, setting its location. For
convenience, if thing is an agent program we make a new agent
for it. (Shouldn't need to override this.)"""
if not isinstance(thing, Thing):
thing = Agent(thing)
if thing in self.things:
print("Can't add the same thing twice")
else:
thing.location = location if location is not None else self.default_location(thing)
self.things.append(thing)
if isinstance(thing, Agent):
thing.performance = 0
self.agents.append(thing)
def delete_thing(self, thing):
"""Remove a thing from the environment."""
try:
self.things.remove(thing)
except ValueError as e:
print(e)
print(" in Environment delete_thing")
print(" Thing to be removed: {} at {}".format(thing, thing.location))
print(" from list: {}".format([(thing, thing.location) for thing in self.things]))
if thing in self.agents:
self.agents.remove(thing)
class Direction:
"""A direction class for agents that want to move in a 2D plane
Usage:
d = Direction("down")
To change directions:
d = d + "right" or d = d + Direction.R #Both do the same thing
Note that the argument to __add__ must be a string and not a Direction object.
Also, it (the argument) can only be right or left."""
R = "right"
L = "left"
U = "up"
D = "down"
def __init__(self, direction):
self.direction = direction
def __add__(self, heading):
"""
>>> d = Direction('right')
>>> l1 = d.__add__(Direction.L)
>>> l2 = d.__add__(Direction.R)
>>> l1.direction
'up'
>>> l2.direction
'down'
>>> d = Direction('down')
>>> l1 = d.__add__('right')
>>> l2 = d.__add__('left')
>>> l1.direction == Direction.L
True
>>> l2.direction == Direction.R
True
"""
if self.direction == self.R:
return{
self.R: Direction(self.D),
self.L: Direction(self.U),
}.get(heading, None)
elif self.direction == self.L:
return{
self.R: Direction(self.U),
self.L: Direction(self.D),
}.get(heading, None)
elif self.direction == self.U:
return{
self.R: Direction(self.R),
self.L: Direction(self.L),
}.get(heading, None)
elif self.direction == self.D:
return{
self.R: Direction(self.L),
self.L: Direction(self.R),
}.get(heading, None)
def move_forward(self, from_location):
"""
>>> d = Direction('up')
>>> l1 = d.move_forward((0, 0))
>>> l1
(0, -1)
>>> d = Direction(Direction.R)
>>> l1 = d.move_forward((0, 0))
>>> l1
(1, 0)
"""
x, y = from_location
if self.direction == self.R:
return (x + 1, y)
elif self.direction == self.L:
return (x - 1, y)
elif self.direction == self.U:
return (x, y - 1)
elif self.direction == self.D:
return (x, y + 1)
class XYEnvironment(Environment):
"""This class is for environments on a 2D plane, with locations
labelled by (x, y) points, either discrete or continuous.
Agents perceive things within a radius. Each agent in the
environment has a .location slot which should be a location such
as (0, 1), and a .holding slot, which should be a list of things
that are held."""
def __init__(self, width=10, height=10):
super().__init__()
self.width = width
self.height = height
self.observers = []
# Sets iteration start and end (no walls).
self.x_start, self.y_start = (0, 0)
self.x_end, self.y_end = (self.width, self.height)
perceptible_distance = 1
def things_near(self, location, radius=None):
"""Return all things within radius of location."""
if radius is None:
radius = self.perceptible_distance
radius2 = radius * radius
return [(thing, radius2 - distance_squared(location, thing.location))
for thing in self.things if distance_squared(
location, thing.location) <= radius2]
def percept(self, agent):
"""By default, agent perceives things within a default radius."""
return self.things_near(agent.location)
def execute_action(self, agent, action):
agent.bump = False
if action == 'TurnRight':
agent.direction += Direction.R
elif action == 'TurnLeft':
agent.direction += Direction.L
elif action == 'Forward':
agent.bump = self.move_to(agent, agent.direction.move_forward(agent.location))
# elif action == 'Grab':
# things = [thing for thing in self.list_things_at(agent.location)
# if agent.can_grab(thing)]
# if things:
# agent.holding.append(things[0])
elif action == 'Release':
if agent.holding:
agent.holding.pop()
def default_location(self, thing):
return (random.choice(self.width), random.choice(self.height))
def move_to(self, thing, destination):
"""Move a thing to a new location. Returns True on success or False if there is an Obstacle.
If thing is holding anything, they move with him."""
thing.bump = self.some_things_at(destination, Obstacle)
if not thing.bump:
thing.location = destination
for o in self.observers:
o.thing_moved(thing)
for t in thing.holding:
self.delete_thing(t)
self.add_thing(t, destination)
t.location = destination
return thing.bump
def add_thing(self, thing, location=(1, 1), exclude_duplicate_class_items=False):
"""Add things to the world. If (exclude_duplicate_class_items) then the item won't be
added if the location has at least one item of the same class."""
if (self.is_inbounds(location)):
if (exclude_duplicate_class_items and
any(isinstance(t, thing.__class__) for t in self.list_things_at(location))):
return
super().add_thing(thing, location)
def is_inbounds(self, location):
"""Checks to make sure that the location is inbounds (within walls if we have walls)"""
x, y = location
return not (x < self.x_start or x >= self.x_end or y < self.y_start or y >= self.y_end)
def random_location_inbounds(self, exclude=None):
"""Returns a random location that is inbounds (within walls if we have walls)"""
location = (random.randint(self.x_start, self.x_end),
random.randint(self.y_start, self.y_end))
if exclude is not None:
while(location == exclude):
location = (random.randint(self.x_start, self.x_end),
random.randint(self.y_start, self.y_end))
return location
def delete_thing(self, thing):
"""Deletes thing, and everything it is holding (if thing is an agent)"""
if isinstance(thing, Agent):
for obj in thing.holding:
super().delete_thing(obj)
for obs in self.observers:
obs.thing_deleted(obj)
super().delete_thing(thing)
for obs in self.observers:
obs.thing_deleted(thing)
def add_walls(self):
"""Put walls around the entire perimeter of the grid."""
for x in range(self.width):
self.add_thing(Wall(), (x, 0))
self.add_thing(Wall(), (x, self.height - 1))
for y in range(self.height):
self.add_thing(Wall(), (0, y))
self.add_thing(Wall(), (self.width - 1, y))
# Updates iteration start and end (with walls).
self.x_start, self.y_start = (1, 1)
self.x_end, self.y_end = (self.width - 1, self.height - 1)
def add_observer(self, observer):
"""Adds an observer to the list of observers.
An observer is typically an EnvGUI.
Each observer is notified of changes in move_to and add_thing,
by calling the observer's methods thing_moved(thing)
and thing_added(thing, loc)."""
self.observers.append(observer)
def turn_heading(self, heading, inc):
"""Return the heading to the left (inc=+1) or right (inc=-1) of heading."""
return turn_heading(heading, inc)
class Obstacle(Thing):
"""Something that can cause a bump, preventing an agent from
moving into the same square it's in."""
pass
class Wall(Obstacle):
pass
# ______________________________________________________________________________
try:
from ipythonblocks import BlockGrid
from IPython.display import HTML, display
from time import sleep
except:
pass
class GraphicEnvironment(XYEnvironment):
def __init__(self, width=10, height=10, boundary=True, color={}, display=False):
"""Define all the usual XYEnvironment characteristics,
but initialise a BlockGrid for GUI too."""
super().__init__(width, height)
self.grid = BlockGrid(width, height, fill=(200, 200, 200))
if display:
self.grid.show()
self.visible = True
else:
self.visible = False
self.bounded = boundary
self.colors = color
def get_world(self):
"""Returns all the items in the world in a format
understandable by the ipythonblocks BlockGrid."""
result = []
x_start, y_start = (0, 0)
x_end, y_end = self.width, self.height
for x in range(x_start, x_end):
row = []
for y in range(y_start, y_end):
row.append(self.list_things_at([x, y]))
result.append(row)
return result
"""
def run(self, steps=1000, delay=1):
"" "Run the Environment for given number of time steps,
but update the GUI too." ""
for step in range(steps):
sleep(delay)
if self.visible:
self.reveal()
if self.is_done():
if self.visible:
self.reveal()
return
self.step()
if self.visible:
self.reveal()
"""
def run(self, steps=1000, delay=1):
"""Run the Environment for given number of time steps,
but update the GUI too."""
for step in range(steps):
self.update(delay)
if self.is_done():
break
self.step()
self.update(delay)
def update(self, delay=1):
sleep(delay)
if self.visible:
self.conceal()
self.reveal()
else:
self.reveal()
def reveal(self):
"""Display the BlockGrid for this world - the last thing to be added
at a location defines the location color."""
self.draw_world()
self.grid.show()
self.visible = True
def draw_world(self):
self.grid[:] = (200, 200, 200)
world = self.get_world()
for x in range(0, len(world)):
for y in range(0, len(world[x])):
if len(world[x][y]):
self.grid[y, x] = self.colors[world[x][y][-1].__class__.__name__]
def conceal(self):
"""Hide the BlockGrid for this world"""
self.visible = False
display(HTML(''))
# ______________________________________________________________________________
# Continuous environment
class ContinuousWorld(Environment):
"""Model for Continuous World"""
def __init__(self, width=10, height=10):
super().__init__()
self.width = width
self.height = height
def add_obstacle(self, coordinates):
self.things.append(PolygonObstacle(coordinates))
class PolygonObstacle(Obstacle):
def __init__(self, coordinates):
"""Coordinates is a list of tuples."""
super().__init__()
self.coordinates = coordinates
# ______________________________________________________________________________
# Vacuum environment
class Dirt(Thing):
pass
class VacuumEnvironment(XYEnvironment):
"""The environment of [Ex. 2.12]. Agent perceives dirty or clean,
and bump (into obstacle) or not; 2D discrete world of unknown size;
performance measure is 100 for each dirt cleaned, and -1 for
each turn taken."""
def __init__(self, width=10, height=10):
super().__init__(width, height)
self.add_walls()
def thing_classes(self):
return [Wall, Dirt, ReflexVacuumAgent, RandomVacuumAgent,
TableDrivenVacuumAgent, ModelBasedVacuumAgent]
def percept(self, agent):
"""The percept is a tuple of ('Dirty' or 'Clean', 'Bump' or 'None').
Unlike the TrivialVacuumEnvironment, location is NOT perceived."""
status = ('Dirty' if self.some_things_at(
agent.location, Dirt) else 'Clean')
bump = ('Bump' if agent.bump else'None')
return (status, bump)
def execute_action(self, agent, action):
if action == 'Suck':
dirt_list = self.list_things_at(agent.location, Dirt)
if dirt_list != []:
dirt = dirt_list[0]
agent.performance += 100
self.delete_thing(dirt)
else:
super().execute_action(agent, action)
if action != 'NoOp':
agent.performance -= 1
class TrivialVacuumEnvironment(Environment):
"""This environment has two locations, A and B. Each can be Dirty
or Clean. The agent perceives its location and the location's
status. This serves as an example of how to implement a simple
Environment."""
def __init__(self):
super().__init__()
self.status = {loc_A: random.choice(['Clean', 'Dirty']),
loc_B: random.choice(['Clean', 'Dirty'])}
def thing_classes(self):
return [Wall, Dirt, ReflexVacuumAgent, RandomVacuumAgent,
TableDrivenVacuumAgent, ModelBasedVacuumAgent]
def percept(self, agent):
"""Returns the agent's location, and the location status (Dirty/Clean)."""
return (agent.location, self.status[agent.location])
def execute_action(self, agent, action):
"""Change agent's location and/or location's status; track performance.
Score 10 for each dirt cleaned; -1 for each move."""
if action == 'Right':
agent.location = loc_B
agent.performance -= 1
elif action == 'Left':
agent.location = loc_A
agent.performance -= 1
elif action == 'Suck':
if self.status[agent.location] == 'Dirty':
agent.performance += 10
self.status[agent.location] = 'Clean'
def default_location(self, thing):
"""Agents start in either location at random."""
return random.choice([loc_A, loc_B])
# ______________________________________________________________________________
# The Wumpus World
class Gold(Thing):
def __eq__(self, rhs):
"""All Gold are equal"""
return rhs.__class__ == Gold
pass
class Bump(Thing):
pass
class Glitter(Thing):
pass
class Pit(Thing):
pass
class Breeze(Thing):
pass
class Arrow(Thing):
pass
class Scream(Thing):
pass
class Wumpus(Agent):
screamed = False
pass
class Stench(Thing):
pass
class Explorer(Agent):
holding = []
has_arrow = True
killed_by = ""
direction = Direction("right")
def can_grab(self, thing):
"""Explorer can only grab gold"""
return thing.__class__ == Gold
class WumpusEnvironment(XYEnvironment):
pit_probability = 0.2 # Probability to spawn a pit in a location. (From Chapter 7.2)
# Room should be 4x4 grid of rooms. The extra 2 for walls
def __init__(self, agent_program, width=6, height=6):
super().__init__(width, height)
self.init_world(agent_program)
def init_world(self, program):
"""Spawn items in the world based on probabilities from the book"""
"WALLS"
self.add_walls()
"PITS"
for x in range(self.x_start, self.x_end):
for y in range(self.y_start, self.y_end):
if random.random() < self.pit_probability:
self.add_thing(Pit(), (x, y), True)
self.add_thing(Breeze(), (x - 1, y), True)
self.add_thing(Breeze(), (x, y - 1), True)
self.add_thing(Breeze(), (x + 1, y), True)
self.add_thing(Breeze(), (x, y + 1), True)
"WUMPUS"
w_x, w_y = self.random_location_inbounds(exclude=(1, 1))
self.add_thing(Wumpus(lambda x: ""), (w_x, w_y), True)
self.add_thing(Stench(), (w_x - 1, w_y), True)
self.add_thing(Stench(), (w_x + 1, w_y), True)
self.add_thing(Stench(), (w_x, w_y - 1), True)
self.add_thing(Stench(), (w_x, w_y + 1), True)
"GOLD"
self.add_thing(Gold(), self.random_location_inbounds(exclude=(1, 1)), True)
"AGENT"
self.add_thing(Explorer(program), (1, 1), True)
def get_world(self, show_walls=True):
"""Return the items in the world"""
result = []
x_start, y_start = (0, 0) if show_walls else (1, 1)
if show_walls:
x_end, y_end = self.width, self.height
else:
x_end, y_end = self.width - 1, self.height - 1
for x in range(x_start, x_end):
row = []
for y in range(y_start, y_end):
row.append(self.list_things_at((x, y)))
result.append(row)
return result
def percepts_from(self, agent, location, tclass=Thing):
"""Return percepts from a given location,
and replaces some items with percepts from chapter 7."""
thing_percepts = {
Gold: Glitter(),
Wall: Bump(),
Wumpus: Stench(),
Pit: Breeze()}
"""Agents don't need to get their percepts"""
thing_percepts[agent.__class__] = None
"""Gold only glitters in its cell"""
if location != agent.location:
thing_percepts[Gold] = None
result = [thing_percepts.get(thing.__class__, thing) for thing in self.things
if thing.location == location and isinstance(thing, tclass)]
return result if len(result) else [None]
def percept(self, agent):
"""Return things in adjacent (not diagonal) cells of the agent.
Result format: [Left, Right, Up, Down, Center / Current location]"""
x, y = agent.location
result = []
result.append(self.percepts_from(agent, (x - 1, y)))
result.append(self.percepts_from(agent, (x + 1, y)))
result.append(self.percepts_from(agent, (x, y - 1)))
result.append(self.percepts_from(agent, (x, y + 1)))
result.append(self.percepts_from(agent, (x, y)))
"""The wumpus gives out a loud scream once it's killed."""
wumpus = [thing for thing in self.things if isinstance(thing, Wumpus)]
if len(wumpus) and not wumpus[0].alive and not wumpus[0].screamed:
result[-1].append(Scream())
wumpus[0].screamed = True
return result
def execute_action(self, agent, action):
"""Modify the state of the environment based on the agent's actions.
Performance score taken directly out of the book."""
if isinstance(agent, Explorer) and self.in_danger(agent):
return
agent.bump = False
if action == 'TurnRight':
agent.direction += Direction.R
agent.performance -= 1
elif action == 'TurnLeft':
agent.direction += Direction.L
agent.performance -= 1
elif action == 'Forward':
agent.bump = self.move_to(agent, agent.direction.move_forward(agent.location))
agent.performance -= 1
elif action == 'Grab':
things = [thing for thing in self.list_things_at(agent.location)
if agent.can_grab(thing)]
if len(things):
print("Grabbing", things[0].__class__.__name__)
if len(things):
agent.holding.append(things[0])
agent.performance -= 1
elif action == 'Climb':
if agent.location == (1, 1): # Agent can only climb out of (1,1)
agent.performance += 1000 if Gold() in agent.holding else 0
self.delete_thing(agent)
elif action == 'Shoot':
"""The arrow travels straight down the path the agent is facing"""
if agent.has_arrow:
arrow_travel = agent.direction.move_forward(agent.location)
while(self.is_inbounds(arrow_travel)):
wumpus = [thing for thing in self.list_things_at(arrow_travel)
if isinstance(thing, Wumpus)]
if len(wumpus):
wumpus[0].alive = False
break
arrow_travel = agent.direction.move_forward(agent.location)
agent.has_arrow = False
def in_danger(self, agent):
"""Check if Explorer is in danger (Pit or Wumpus), if he is, kill him"""
for thing in self.list_things_at(agent.location):
if isinstance(thing, Pit) or (isinstance(thing, Wumpus) and thing.alive):
agent.alive = False
agent.performance -= 1000
agent.killed_by = thing.__class__.__name__
return True
return False
def is_done(self):
"""The game is over when the Explorer is killed
or if he climbs out of the cave only at (1,1)."""
explorer = [agent for agent in self.agents if isinstance(agent, Explorer)]
if len(explorer):
if explorer[0].alive:
return False
else:
print("Death by {} [-1000].".format(explorer[0].killed_by))
else:
print("Explorer climbed out {}."
.format(
"with Gold [+1000]!" if Gold() not in self.things else "without Gold [+0]"))
return True
# TODO: Arrow needs to be implemented
# ______________________________________________________________________________
def compare_agents(EnvFactory, AgentFactories, n=10, steps=1000):
"""See how well each of several agents do in n instances of an environment.
Pass in a factory (constructor) for environments, and several for agents.
Create n instances of the environment, and run each agent in copies of
each one for steps. Return a list of (agent, average-score) tuples.
>>> environment = TrivialVacuumEnvironment
>>> agents = [ModelBasedVacuumAgent, ReflexVacuumAgent]
>>> result = compare_agents(environment, agents)
>>> performance_ModelBasedVacummAgent = result[0][1]
>>> performance_ReflexVacummAgent = result[1][1]
>>> performance_ReflexVacummAgent <= performance_ModelBasedVacummAgent
True
"""
envs = [EnvFactory() for i in range(n)]
return [(A, test_agent(A, steps, copy.deepcopy(envs)))
for A in AgentFactories]
def test_agent(AgentFactory, steps, envs):
"""Return the mean score of running an agent in each of the envs, for steps
>>> def constant_prog(percept):
... return percept
...
>>> agent = Agent(constant_prog)
>>> result = agent.program(5)
>>> result == 5
True
"""
def score(env):
agent = AgentFactory()
env.add_thing(agent)
env.run(steps)
return agent.performance
return mean(map(score, envs))
# _________________________________________________________________________
__doc__ += """
>>> a = ReflexVacuumAgent()
>>> a.program((loc_A, 'Clean'))
'Right'
>>> a.program((loc_B, 'Clean'))
'Left'
>>> a.program((loc_A, 'Dirty'))
'Suck'
>>> a.program((loc_A, 'Dirty'))
'Suck'
>>> e = TrivialVacuumEnvironment()
>>> e.add_thing(ModelBasedVacuumAgent())
>>> e.run(5)
"""
|
jo-tez/aima-python
|
agents.py
|
Python
|
mit
| 35,990
|
from django.contrib import admin
from polls.models import Poll, Choice
class ChoiceInline(admin.StackedInline):
model = Choice
extra = 3
class PollAdmin(admin.ModelAdmin):
#fields = ['question', 'pub_date']
inlines = [ChoiceInline]
list_display = ('question', 'pub_date')
search_fields = ['question']
admin.site.register(Poll, PollAdmin)
#admin.site.register(Choice)
|
zhengwy888/rockproject
|
sandstone/polls/admin.py
|
Python
|
mit
| 391
|
import logging
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(levelname)s - %(name)s: %(message)s',
datefmt='%Y-%m-%dT%H:%M:%S%z')
def get_logger(name: str) -> logging.Logger:
logger = logging.getLogger(name)
logger.setLevel(logging.INFO)
return logger
|
GoC-Spending/fuzzy-tribble
|
src/tribble/log.py
|
Python
|
mit
| 325
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Test example app."""
import datetime
import os
import signal
import subprocess
import time
import pytest
@pytest.yield_fixture
def example_app():
"""Example app fixture."""
current_dir = os.getcwd()
# Go to example directory
project_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
exampleappdir = os.path.join(project_dir, 'examples')
os.chdir(exampleappdir)
# Start example app
webapp = subprocess.Popen(
'FLASK_APP=app.py flask run --debugger -p 5000',
stdout=subprocess.PIPE, preexec_fn=os.setsid, shell=True)
time.sleep(3)
yield webapp
# Stop server
os.killpg(webapp.pid, signal.SIGTERM)
# Return to the original directory
os.chdir(current_dir)
def test_example_app(example_app):
"""Test example app."""
# Testing get index page
cmd = 'curl http://127.0.0.1:5000/'
output = subprocess.check_output(cmd, shell=True).decode("utf-8")
expected = '\nToday is {0}\n<img src="/badge/DOI/invenio.12345.svg">' \
'</img>'.format(datetime.date.today())
assert expected == output
|
tiborsimko/invenio-formatter
|
tests/test_examples_app.py
|
Python
|
mit
| 1,342
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-01-08 19:16
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('timetracking', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='project',
name='tags',
field=models.ManyToManyField(blank=True, to='timetracking.Tag'),
),
]
|
rixx/tempus
|
tempus/timetracking/migrations/0002_auto_20160108_1916.py
|
Python
|
mit
| 469
|
from math import sqrt
def distance(a, b):
return sqrt(sum(map(lambda x: (x[1]-x[0])**2, zip(a, b))))
def scaler(old_low, old_high, new_low, new_high):
def func(x):
old_range = old_high - old_low
new_range = new_high - new_low
return (((x - old_low) * new_range) / old_range) + new_low
return func
def normalizer(old_low, old_high):
return scaler(old_low, old_high, 0, 1)
|
micaiahparker/vote
|
voters/utils.py
|
Python
|
mit
| 415
|
"""This pipeline is intended to make the classification of T2W modality
features."""
from __future__ import division
import os
import numpy as np
from imblearn import under_sampling
from imblearn import over_sampling
from sklearn.externals import joblib
from sklearn.preprocessing import label_binarize
from sklearn.ensemble import RandomForestClassifier
from protoclass.data_management import GTModality
# Define the path where the patients are stored
path_patients = '/data/prostate/experiments'
# Define the path where the features have been extracted
path_features = '/data/prostate/extraction/mp-mri-prostate'
# Define a list of the path where the feature are kept
t2w_features = ['dct-t2w', 'edge-t2w/kirsch', 'edge-t2w/laplacian',
'edge-t2w/prewitt', 'edge-t2w/scharr', 'edge-t2w/sobel',
'gabor-t2w', 'harlick-t2w', 'ise-t2w', 'lbp-t2w', 'lbp-t2w',
'phase-congruency-t2w']#,
# 'spatial-position-euclidean', 'spatial-dist-center',
# 'spatial-dist-contour']
# Define the extension of each features
ext_features = ['_dct_t2w.npy', '_edge_t2w.npy', '_edge_t2w.npy',
'_edge_t2w.npy', '_edge_t2w.npy', '_edge_t2w.npy',
'_gabor_t2w.npy', '_haralick_t2w.npy', '_ise_t2w.npy',
'_lbp_8_1_t2w.npy', '_lbp_16_2_t2w.npy',
'_phase_congruency_t2w.npy']#, '_spe.npy', '_spe.npy',
# '_spe.npy']
# Define the path of the ground for the prostate
path_gt = ['GT_inv/prostate', 'GT_inv/pz', 'GT_inv/cg', 'GT_inv/cap']
# Define the label of the ground-truth which will be provided
label_gt = ['prostate', 'pz', 'cg', 'cap']
# Define the path where to store the data
path_store = '/data/prostate/balanced/mp-mri-prostate/exp-3'
N_JOBS = -1
# Create the under_samplers and over_samplers list to use
samplers = [under_sampling.InstanceHardnessThreshold(
n_jobs=N_JOBS, estimator='random-forest'),
under_sampling.NearMiss(version=1, n_jobs=N_JOBS),
under_sampling.NearMiss(version=2, n_jobs=N_JOBS),
under_sampling.NearMiss(version=3, n_jobs=N_JOBS),
under_sampling.RandomUnderSampler(),
over_sampling.SMOTE(kind='regular', n_jobs=N_JOBS),
over_sampling.SMOTE(kind='borderline1', n_jobs=N_JOBS),
over_sampling.SMOTE(kind='borderline2', n_jobs=N_JOBS),
over_sampling.RandomOverSampler()]
# Define the sub-folder to use
sub_folder = ['cc', 'iht', 'nm1', 'nm2', 'nm3', 'rus', 'smote',
'smote-b1', 'smote-b2', 'ros']
# Generate the different path to be later treated
path_patients_list_gt = []
# Create the generator
id_patient_list = [name for name in os.listdir(path_patients)
if os.path.isdir(os.path.join(path_patients, name))]
for id_patient in id_patient_list:
# Append for the GT data - Note that we need a list of gt path
path_patients_list_gt.append([os.path.join(path_patients, id_patient, gt)
for gt in path_gt])
# Load the data and apply the balancing
for idx_pat in range(len(id_patient_list)):
print 'Read patient {}'.format(id_patient_list[idx_pat])
# For each patient we nee to load the different feature
patient_data = []
for idx_feat in range(len(t2w_features)):
# Create the path to the patient file
filename_feature = (id_patient_list[idx_pat].lower().replace(' ', '_') +
ext_features[idx_feat])
path_data = os.path.join(path_features, t2w_features[idx_feat],
filename_feature)
single_feature_data = np.load(path_data)
# Check if this is only one dimension data
if len(single_feature_data.shape) == 1:
single_feature_data = np.atleast_2d(single_feature_data).T
patient_data.append(single_feature_data)
# Concatenate the data in a single array
patient_data = np.concatenate(patient_data, axis=1)
print 'The patient data are loaded'
# Create the corresponding ground-truth
gt_mod = GTModality()
gt_mod.read_data_from_path(label_gt,
path_patients_list_gt[idx_pat])
print 'Read the GT data for the current patient ...'
# Concatenate the training data
data = patient_data
# Extract the corresponding ground-truth for the testing data
# Get the index corresponding to the ground-truth
roi_prostate = gt_mod.extract_gt_data('prostate', output_type='index')
# Get the label of the gt only for the prostate ROI
gt_cap = gt_mod.extract_gt_data('cap', output_type='data')
label = gt_cap[roi_prostate]
print 'Let s go for the different imbalancing methods'
for idx_s, imb_method in enumerate(samplers):
print 'Apply balancing {} over {}'.format(idx_s + 1, len(samplers))
# Make the fitting and sampling
data_resampled, label_resampled = imb_method.fit_sample(data, label)
# Store the resampled data
path_bal = os.path.join(path_store, sub_folder[idx_s])
if not os.path.exists(path_bal):
os.makedirs(path_bal)
pat_chg = (id_patient_list[idx_pat].lower().replace(' ', '_') +
'_t2w.npz')
filename = os.path.join(path_bal, pat_chg)
np.savez(filename, data_resampled=data_resampled,
label_resampled=label_resampled)
print 'Store the data'
|
I2Cvb/mp-mri-prostate
|
pipeline/feature-balancing/pipeline_balancing_t2w.py
|
Python
|
mit
| 5,450
|
import json
import os
import subprocess
import requests
def api_url(path):
host = os.environ['API_HOST']
prefix = 's'
if host == '127.0.0.1':
prefix = ''
host = '127.0.0.1:3000'
return 'http{0}://{1}{2}'.format(prefix, host, path)
def end_all():
url = api_url('/runs/end_all')
params = dict(api_key=os.environ['API_KEY'])
resp = requests.get(url=url, params=params)
print(resp)
def schedule_runs(general_params, narrow_params):
if os.environ.get('END_ALL') == 'end_all':
end_all()
url = api_url('/runs/schedule_runs.json')
params = dict(api_key=os.environ['API_KEY'])
data = json.dumps(dict(general_params=general_params, narrow_params=narrow_params))
headers = {'Content-Type': 'application/json'}
resp = requests.post(url=url, params=params, data=data, headers=headers)
print(resp)
def get_random_pending_run():
url = api_url('/runs/start_random_pending_run')
host_name = _get_host_name()
params = dict(api_key=os.environ['API_KEY'], host_name=host_name)
resp = requests.get(url=url, params=params)
data = json.loads(resp.text)
start = data['result'] == 'start'
if not start:
return None
return dict(id=data['id'], algo_params=data['algo_params'])
def report_results(run_id, output):
url = api_url('/runs/{0}/report_results'.format(run_id))
params = {'api_key': os.environ['API_KEY'], 'run[output]': output}
resp = requests.put(url=url, data=params)
print(resp)
def _get_host_name():
if os.path.exists('.host_name'):
with open('.host_name', 'r') as host_name_file:
return host_name_file.read().strip()
return subprocess.check_output('hostname').strip()
|
lukaselmer/hierarchical-paragraph-vectors
|
code/helpers/api.py
|
Python
|
mit
| 1,745
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
logger = logging.getLogger(__name__)
class Endpoint(object):
def __init__(self, method, raw_path, *args, **kwargs):
self._method = method
self._raw_path = raw_path
@property
def method(self):
return self._method
@property
def path(self):
return self._raw_path
def Get(path, *args, **kwargs):
return _method('GET', path, args, kwargs)
def Post(path, *args, **kwargs):
return _method('POST', path, args, kwargs)
def _method(method, path, *args, **kwargs):
endpoint = Endpoint(method, path, *args, **kwargs)
def _create_endpoint(api, *call_args, **call_kwargs):
return api.request(endpoint, call_args, call_kwargs)
return _create_endpoint
# vim: filetype=python
|
ryankanno/py-api
|
py_api/endpoint.py
|
Python
|
mit
| 815
|
# ALAINA KAFKES IMPLEMENTATION - alainakafkes
## Mergesort implementation in Python
## Runtime: O(n log(n))
## Space: O(n)
## Advantages: guaranteed good complexity (no need to worry about choice of pivot as in quicksort)
## Disadvantages: more temporary data storage needed than quicksort
#def mergesort(arr):
# if len(arr) <= 1:
# return arr
# midpoint = len(arr) // 2
# return merger(mergesort(arr[:mid]), mergesort(arr[mid:]))
#def merger(arrLeft, arrRight):
# if not arrLeft:
# return arrRight
# if not arrRight:
# return arrLeft
# if arrLeft[0] < arrRight[0]:
# return arrLeft[0] + merger(arrLeft[1:], arrRight)
# return arrRight[0] + merger(arrLeft, arrRight[1:])
# YASH KATARIYA IMPLEMENTATION - yashk2810
def merge(L,R,A): # Recursive and stable(for equal numbers it preserves their original position) algorithm
nL=len(L)
nR=len(R)
i=0
j=0
k=0
while i<nL and j<nR:
if L[i]<R[j]:
A[k]=L[i]
i+=1
else:
A[k]=R[j]
j+=1
k+=1
while i<nL:
A[k]=L[i]
i+=1
k+=1
while j<nR:
A[k]=R[j]
j+=1
k+=1
def mergesort(A): # Time complexity is Worst Case:- O(nlogn)
n=len(A) # Space complexity is O(n)
if n<2:
return A
mid=n/2
left=A[:mid]
right=A[mid:]
mergesort(left)
mergesort(right)
merge(left,right,A)
return A
print mergesort([2,4,1,6,8,5,3])
|
saru95/DSA
|
Python/mergesort.py
|
Python
|
mit
| 1,356
|
import os
from setuptools import setup, find_packages
description = "Multi-Author Blog for Django"
long_description = description
if os.path.exists('README.rst'):
long_description = open('README.rst').read()
setup(
name='django-blog',
version='1.0.1',
packages=find_packages(exclude=["tests"]),
install_requires=[
'markdown',
'django>=1.11'
],
author='Joe Bergantine',
author_email='joe@kinsa.us',
description=description,
long_description=long_description,
url='https://github.com/bergantine/django-blog',
download_url='https://github.com/bergantine/django-blog/tarball/1.0.1',
license='New BSD License',
platforms=['any'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Framework :: Django :: 1.11',
'Framework :: Django :: 2.1',
'Framework :: Django :: 2.2',
'Framework :: Django :: 3.0',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'License :: OSI Approved :: BSD License',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
test_suite="runtests.runtests",
include_package_data=True,
)
|
jbergantine/django-blog
|
setup.py
|
Python
|
mit
| 1,835
|
#! /usr/bin/env python
#
# Generated by PAGE version 4.2
# In conjunction with Tcl version 8.6
# Jan. 19, 2014 09:47:50 AM
import sys
try:
from Tkinter import *
except ImportError:
from tkinter import *
try:
import ttk
py3 = 0
except ImportError:
import tkinter.ttk as ttk
py3 = 1
import progress_bar_support
def vp_start_gui():
'''Starting point when module is the main routine.'''
global val, w, root
root = Tk()
root.title('Progress_Bar')
root.geometry('301x129+472+154')
progress_bar_support.set_Tk_var()
#set_Tk_var()
w = Progress_Bar (root)
progress_bar_support.init(root, w)
root.mainloop()
w = None
def create_Progress_Bar (root):
'''Starting point when module is imported by another program.'''
global w, w_win
w = Toplevel (root)
w.title('Progress_Bar')
w.geometry('301x129+472+154')
progress_bar_support.set_Tk_var()
w_win = Progress_Bar (w)
progress_bar_support.init(w, w_win)
return w_win
def destroy_Progress_Bar ():
global w
w.destroy()
w = None
class Progress_Bar:
def __init__(self, master=None):
_bgcolor = 'wheat' # X11 color: #f5deb3
_fgcolor = '#000000' # X11 color: 'black'
_compcolor = '#b2c9f4' # Closest X11 color: 'SlateGray2'
_ana1color = '#eaf4b2' # Closest X11 color: '{pale goldenrod}'
_ana2color = '#f4bcb2' # Closest X11 color: 'RosyBrown2'
self.style = ttk.Style()
if sys.platform == "win32":
self.style.theme_use('winnative')
self.style.configure('.',background=_bgcolor)
self.style.configure('.',foreground=_fgcolor)
self.style.map('.',background=
[('selected', _compcolor), ('active',_ana2color)])
master.configure(background=_bgcolor)
master.configure(highlightbackground="wheat")
master.configure(highlightcolor="black")
self.TProgressbar1 = ttk.Progressbar (master)
self.TProgressbar1.place(relx=0.17,rely=0.47,relheight=0.15
,relwidth=0.66)
self.TProgressbar1.configure(variable=progress_bar_support.prog_var)
if __name__ == '__main__':
vp_start_gui()
|
FrauBluher/PMSM
|
Config Tool/page/examples/rework_progress_bar/progress_bar.py
|
Python
|
mit
| 2,213
|
# -*- coding: utf-8 -*-
import urllib.parse
import requests_oauthlib as roauth
import pandas as pd
from tradeking import utils
BASE_URL = 'https://api.tradeking.com/v1'
_DATE_KEYS = ('date', 'datetime', 'divexdate', 'divpaydt', 'timestamp',
'pr_date', 'wk52hidate', 'wk52lodate', 'xdate')
_FLOAT_KEYS = ('ask', 'bid', 'chg', 'cl', 'div', 'dollar_value', 'eps',
'hi', 'iad', 'idelta', 'igamma', 'imp_volatility', 'irho',
'itheta', 'ivega', 'last', 'lo', 'opn', 'opt_val', 'pchg',
'pcls', 'pe', 'phi', 'plo', 'popn', 'pr_adp_100', 'pr_adp_200',
'pr_adp_50', 'prbook', 'prchg', 'strikeprice', 'volatility12',
'vwap', 'wk52hi', 'wk52lo', 'yield')
_INT_KEYS = ('asksz', 'basis', 'bidsz', 'bidtick', 'days_to_expiration',
'incr_vl', 'openinterest', 'pr_openinterest', 'prem_mult', 'pvol',
'sho', 'tr_num', 'vl', 'xday', 'xmonth', 'xyear')
def _quotes_to_df(quotes):
if not isinstance(quotes, list):
quotes = [quotes]
df = pd.DataFrame.from_records(quotes, index='symbol')
for col in df.keys().intersection(_DATE_KEYS):
kwargs = {}
if col == 'timestamp':
kwargs['unit'] = 's'
try:
df[col] = pd.to_datetime(df[col], **kwargs)
except ValueError:
pass
for col in df.keys().intersection(_INT_KEYS):
cleaned = df[col].str.replace(r'[$,%]', '')
df[col] = cleaned.astype('int', errors='ignore')
for col in df.keys().intersection(_FLOAT_KEYS):
cleaned = df[col].str.replace(r'[$,%]', '')
df[col] = cleaned.astype('float', errors='ignore')
return df
# TODO(jkoelker) Would be nice to do a proper DSL
class OptionQuery(object):
FIELDS = ('strikeprice', 'xdate', 'xmonth', 'xyear', 'put_call', 'unique')
OPS = {'<': 'lt', 'lt': 'lt',
'>': 'gt', 'gt': 'gt',
'>=': 'gte', 'gte': 'gte',
'<=': 'lte', 'lte': 'lte',
'=': 'eq', '==': 'eq', 'eq': 'eq'}
def __init__(self, query):
if isinstance(query, str):
query = [query]
self._query = []
for part in query:
field, op, value = part.split()
field = field.lower()
if field not in self.FIELDS or op not in self.OPS:
continue
if field == 'xdate':
value = pd.to_datetime(value).strftime('%Y%m%d')
self._query.append((field, self.OPS[op], value))
def __str__(self):
return ' AND '.join(['%s-%s:%s' % (field, op, value)
for field, op, value in self._query])
class API(object):
def __init__(self, consumer_key, consumer_secret,
oauth_token, oauth_secret):
self._api = roauth.OAuth1Session(client_key=consumer_key,
client_secret=consumer_secret,
resource_owner_key=oauth_token,
resource_owner_secret=oauth_secret)
def join(self, *paths, **kwargs):
if len(paths) == 1:
paths = paths[0]
if kwargs.get('clean', True):
paths = [p.rstrip('/') for p in paths]
return '/'.join(paths)
def request(self, method, url, format='json', decode=True, **kwargs):
if format:
url = '.'.join((url, format))
r = self._api.request(method, url, **kwargs)
if decode:
r = r.json()
return r
def get(self, url, format='json', decode=True, **kwargs):
return self.request('GET', url=url, format=format, decode=decode,
**kwargs)
def post(self, url, format='json', decode=True, **kwargs):
return self.request('POST', url=url, format=format, decode=decode,
**kwargs)
class Account(object):
def __init__(self, api, account_id):
self._api = api
self.account_id = account_id
def _get(self, what=None, **kwargs):
params = [BASE_URL, 'accounts', self.account_id]
if what is not None:
params.append(what)
path = self._api.join(params)
return self._api.get(path, **kwargs)
def _balances(self, **kwargs):
return self._get('balances', **kwargs)
def _history(self, date_range='all', transactions='all', **kwargs):
params = {'range': date_range, 'transactions': transactions}
return self._get('history', params=params, **kwargs)
def _holdings(self, **kwargs):
return self._get('holdings', **kwargs)
def _orders(self, **kwargs):
return self._get('orders', **kwargs)
@property
def balances(self):
r = self._balances()
return r['response']['accountbalance']
def history(self, date_range='all', transactions='all'):
r = self._history(date_range=date_range, transactions=transactions)
return r['response']['transactions']['transaction']
@property
def holdings(self):
r = self._holdings()
return r['response']['accountholdings']['holding']
# TODO(jkoelker)
def order(self, order, preview=True):
pass
@property
def orders(self):
r = self._orders()
return r['response']['orderstatus']
class News(object):
def __init__(self, api):
self._api = api
def _article(self, article_id, **kwargs):
path = self._api.join(BASE_URL, 'market', 'news', article_id)
return self._api.get(path, **kwargs)
def _search(self, keywords=None, symbols=None, maxhits=None,
startdate=None, enddate=None, **kwargs):
if not keywords and not symbols:
raise ValueError('Either keywords or symbols are required')
data = {}
if keywords:
if isinstance(keywords, str):
keywords = [keywords]
data['keywords'] = ','.join(keywords)
if symbols:
if isinstance(symbols, str):
symbols = [symbols]
data['symbols'] = ','.join(symbols)
if maxhits:
data['maxhits'] = maxhits
# TODO(jkoelker) calculate enddate to be now()
if (not startdate and enddate) or (not enddate and startdate):
raise ValueError('Both startdate and endate are required if one '
'is specified')
if startdate and enddate:
data['startdate'] = startdate
data['enddate'] = enddate
path = self._api.join(BASE_URL, 'market', 'news', 'search')
return self._api.post(path, data=data, **kwargs)
def article(self, article_id):
r = self._article(article_id=article_id)
return r['response']['article']
def search(self, keywords=None, symbols=None, maxhits=None, startdate=None,
enddate=None):
r = self._search(keywords=keywords, symbols=symbols, maxhits=maxhits,
startdate=startdate, enddate=enddate)
return r['response']['articles']['article']
class Options(object):
def __init__(self, api, market):
self._api = api
self._market = market
symbol = staticmethod(utils.option_symbol)
symbols = staticmethod(utils.option_symbols)
decode = staticmethod(utils.parse_option_symbol)
def _expirations(self, symbol, **kwargs):
params = {'symbol': symbol}
path = self._api.join(BASE_URL, 'market', 'options', 'expirations')
return self._api.get(path, params=params, **kwargs)
def _search(self, symbol, query, fields=None, query_is_prepared=False,
**kwargs):
if not isinstance(query, OptionQuery) and not query_is_prepared:
query = OptionQuery(query)
data = {'symbol': symbol, 'query': query}
if fields is not None:
data['fids'] = ','.join(fields)
path = self._api.join(BASE_URL, 'market', 'options', 'search')
return self._api.post(path, data=data, **kwargs)
def _strikes(self, symbol, **kwargs):
params = {'symbol': symbol}
path = self._api.join(BASE_URL, 'market', 'options', 'strikes')
return self._api.get(path, params=params, **kwargs)
def expirations(self, symbol):
r = self._expirations(symbol=symbol)
expirations = r['response']['expirationdates']['date']
return pd.to_datetime(pd.Series(expirations))
def search(self, symbol, query, fields=None):
r = self._search(symbol=symbol, query=query, fields=fields)
return _quotes_to_df(r['response']['quotes']['quote'])
def strikes(self, symbol):
r = self._strikes(symbol=symbol)
strikes = r['response']['prices']['price']
return pd.Series(strikes, dtype=float)
def quote(self, symbol, strikes=None, expirations=None, calls=True,
puts=True, fields=None):
if strikes is None:
strikes = self.strikes(symbol)
if expirations is None:
expirations = self.expirations(symbol)
symbols = utils.option_symbols(symbol, expirations, strikes, calls,
puts)
return self._market.quotes(symbols=symbols, fields=fields)
class Market(object):
def __init__(self, api):
self._api = api
self.news = News(self._api)
self.options = Options(self._api, self)
def _clock(self, **kwargs):
path = self._api.join(BASE_URL, 'market', 'clock')
return self._api.get(path, **kwargs)
def _quotes(self, symbols, fields=None, **kwargs):
if isinstance(symbols, (list, tuple)):
symbols = ','.join(symbols)
params = {'symbols': symbols}
if fields is not None:
params['fids'] = ','.join(fields)
path = self._api.join(BASE_URL, 'market', 'ext', 'quotes')
return self._api.post(path, data=params, **kwargs)
def _toplist(self, list_type='toppctgainers', **kwargs):
path = self._api.join(BASE_URL, 'market', 'toplists', list_type)
return self._api.get(path, **kwargs)
@property
def clock(self):
r = self._clock()
r = r['response']
del r['@id']
return r
def quotes(self, symbols, fields=None):
r = self._quotes(symbols=symbols, fields=fields)
return _quotes_to_df(r['response']['quotes']['quote'])
def toplist(self, list_type='toppctgainers'):
r = self._toplist(list_type=list_type)
return _quotes_to_df(r['response']['quotes']['quote'])
# TODO(jkoelker) market/timesales
# TODO(jkoelker) market/quotes (iterator)
class TradeKing(object):
def __init__(self, consumer_key, consumer_secret,
oauth_token, oauth_secret):
self._api = API(consumer_key=consumer_key,
consumer_secret=consumer_secret,
oauth_token=oauth_token,
oauth_secret=oauth_secret)
self.market = Market(self._api)
def _accounts(self, **kwargs):
path = urllib.parse.urljoin(BASE_URL, 'accounts')
return self._api.get(path, **kwargs)
def account(self, account_id):
return Account(self._api, account_id)
# TODO(jkoelker) member/profile
# TODO(jkoelker) utility/status
# TODO(jkoelker) utility/version
# TODO(jkoelker) utility/version
# TODO(jkoelker) watchlists
|
jkoelker/python-tradeking
|
tradeking/api.py
|
Python
|
mit
| 11,467
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2017-07-10 19:56
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('lots_admin', '0032_applicationstatus_eds_sent'),
]
operations = [
migrations.RemoveField(
model_name='applicationstatus',
name='eds_sent',
),
migrations.AddField(
model_name='application',
name='eds_sent',
field=models.BooleanField(default=False),
),
]
|
datamade/large-lots
|
lots_admin/migrations/0033_auto_20170710_1456.py
|
Python
|
mit
| 584
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
# Meta-info
Author: Nelson Brochado
Created: 09/07/2015
Updated: 07/03/2018
# Description
The time complexity of the fractional knapsack is O(n * log(n)), because of the
call to sort the items by value/weight ratio.
# TODO
- Add complexity analysis.
- Create a non-interactive version.
"""
import operator
from tabulate import tabulate
__all__ = ["interactive_fractional_knapsack"]
def ask_objects():
objects = []
print("Welcome to the Fractional Knapsack problem!\n\n" +
"You will tell me the objects that you have,\n" +
"their value and weight.\n\n" +
"After that, you should also tell me\n"
"how much weight you can carry with you.\n\n" +
"I will then tell you which items or\n" +
"fraction of items you should take.\n")
input("When you are ready, press ENTER.\n" + "=" * 40 + "\n\n")
while True:
name = input("Enter the name of the object: ")
cost = int(input("Enter the value of " + name + ": "))
weight = int(input("Enter the weight (in grams) of " + name + ": "))
objects.append([name, cost, weight])
yn = input("\nDo you have other items (y/n)? ")
if yn.lower() in ("n", "no"):
break
else:
print("-" * 40, end="\n\n")
for obj in objects:
# Adding as forth property of each object its path_cost/weight ratio.
obj.append(obj[1] / obj[2])
objects.sort(key=operator.itemgetter(3), reverse=True)
print("\n\nThe following are the items that you have:\n")
print(tabulate(objects, tablefmt="grid",
headers=("Name", "Value", "Weight", "Value/Weight Ratio")))
capacity = int(
input("\nEnter the maximum weight you can bring (in grams): "))
return objects, capacity
def output_fractional_knapsack(knapsack_objects, objects):
s = "You should take "
for i, item in enumerate(knapsack_objects):
if not isinstance(item, tuple):
s += str(objects[item][2]) + " gram(s) of " + objects[item][0]
if i < len(knapsack_objects) - 1:
s += ", "
else:
s += " and " + str(item[1]) + " gram(s) of " + objects[item[0]][
0] + "."
print("\n\n" + s)
def interactive_fractional_knapsack():
objects, capacity = ask_objects()
current_weight = 0
knapsack_objects = []
for i, obj in enumerate(objects):
if obj[2] + current_weight <= capacity:
current_weight += obj[2]
knapsack_objects.append(i)
else:
remaining_weight = capacity - current_weight
knapsack_objects.append((i, remaining_weight))
break
output_fractional_knapsack(knapsack_objects, objects)
if __name__ == "__main__":
interactive_fractional_knapsack()
|
nbro/ands
|
ands/algorithms/greedy/fractional_knapsack.py
|
Python
|
mit
| 2,887
|
#!/usr/bin/env python
# pseudoreplicator 0.0.1
# Generated by dx-app-wizard.
#
# Basic execution pattern: Your app will run on a single machine from
# beginning to end.
#
# See https://wiki.dnanexus.com/Developer-Portal for documentation and
# tutorials on how to modify this file.
#
# DNAnexus Python Bindings (dxpy) documentation:
# http://autodoc.dnanexus.com/bindings/python/current/
import subprocess
import re
import gzip
import dxpy
import common
import logging
logger = logging.getLogger(__name__)
logger.addHandler(dxpy.DXLogHandler())
logger.propagate = False
logger.setLevel(logging.INFO)
@dxpy.entry_point('main')
def main(input_tags, prefix=None):
input_tags_file = dxpy.DXFile(input_tags)
input_tags_filename = input_tags_file.name
dxpy.download_dxfile(input_tags_file.get_id(), input_tags_filename)
# introspect the file to determine tagAlign (thus SE) or BEDPE (thus PE)
# strip extension as appropriate
subprocess.check_output('ls', shell=True)
with gzip.open(input_tags_filename) as f:
firstline = f.readline()
logger.info('First line of input_tags:\n%s' % (firstline))
se_cols = 6
pe_cols = 10
if re.match('^(\S+[\t\n]){%d}$' % (se_cols), firstline):
paired_end = False
input_tags_basename = prefix or input_tags_filename.rstrip('.tagAlign.gz')
filename_infix = 'SE'
logger.info("Detected single-end data")
elif re.match('^(\S+[\t\n]){%d}$' % (pe_cols), firstline):
paired_end = True
input_tags_basename = prefix or input_tags_filename.rstrip('.bedpe.gz')
filename_infix = 'PE2SE'
logger.info("Detected paired-end data")
else:
raise IOError(
"%s is neither a BEDPE or tagAlign file" % (input_tags_filename))
pr_ta_filenames = \
[input_tags_basename + ".%s.pr1.tagAlign.gz" % (filename_infix),
input_tags_filename + ".%s.pr2.tagAlign.gz" % (filename_infix)]
# count lines in the file
out, err = common.run_pipe([
'gzip -dc %s' % (input_tags_filename),
'wc -l'])
# number of lines in each split
nlines = (int(out)+1)/2
# Shuffle and split BEDPE file into 2 equal parts
# by using the input to seed shuf we ensure multiple runs with the same
# input will produce the same output
# Produces two files named splits_prefix0n, n=1,2
splits_prefix = 'temp_split'
out, err = common.run_pipe([
'gzip -dc %s' % (input_tags_filename),
'shuf --random-source=%s' % (input_tags_filename),
'split -a 2 -d -l %d - %s' % (nlines, splits_prefix)])
# Convert read pairs to reads into standard tagAlign file
for i, index in enumerate(['00', '01']): # could be made multi-threaded
steps = ['cat %s' % (splits_prefix+index)]
if paired_end:
steps.extend([r"""awk 'BEGIN{OFS="\t"}{printf "%s\t%s\t%s\tN\t1000\t%s\n%s\t%s\t%s\tN\t1000\t%s\n",$1,$2,$3,$9,$4,$5,$6,$10}'"""])
steps.extend(['gzip -cn'])
out, err = common.run_pipe(steps, outfile=pr_ta_filenames[i])
pseudoreplicate1_file = dxpy.upload_local_file(pr_ta_filenames[0])
pseudoreplicate2_file = dxpy.upload_local_file(pr_ta_filenames[1])
output = {
"pseudoreplicate1": dxpy.dxlink(pseudoreplicate1_file),
"pseudoreplicate2": dxpy.dxlink(pseudoreplicate2_file)
}
return output
dxpy.run()
|
ENCODE-DCC/chip-seq-pipeline
|
dnanexus/pseudoreplicator/src/pseudoreplicator.py
|
Python
|
mit
| 3,390
|
# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2017 James Beedy <jamesbeedy@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""The nginx plugin is useful for web app based parts.
- nginx-version:
(string)
The version of nginx you want this snap to run.
- pcre-version:
(string)
The version of pcre you want to compile nginx with.
- openssl-version:
(string)
The version of openssl you want to compile nginx with.
- zlib-version:
(string)
The version of zlib you want to compile nginx with.
- use-passenger
(boolean)
Compile nginx with passenger module. (Defaults to false)
"""
import logging
import os
import platform
import re
from snapcraft import BasePlugin, file_utils
from snapcraft.sources import Tar
logger = logging.getLogger(__name__)
class NginxPlugin(BasePlugin):
@classmethod
def schema(cls):
schema = super().schema()
schema['properties']['nginx-version'] = {
'type': 'string',
'default': '1.11.13'
}
schema['properties']['pcre-version'] = {
'type': 'string',
'default': '8.40'
}
schema['properties']['zlib-version'] = {
'type': 'string',
'default': '1.2.11'
}
schema['properties']['openssl-version'] = {
'type': 'string',
'default': '1.0.2f'
}
schema['properties']['use-passenger'] = {
'type': 'boolean',
'default': False
}
return schema
@classmethod
def get_pull_properties(cls):
# Inform Snapcraft of the properties associated with pulling. If these
# change in the YAML Snapcraft will consider the build step dirty.
return ['nginx-version', 'pcre-version', 'zlib-version', 'openssl-version']
def __init__(self, name, options, project):
super().__init__(name, options, project)
# Beta Warning
# Remove this comment and warning once nginx plugin is stable.
logger.warn("The nginx plugin is currently in beta, "
"its API may break. Use at your own risk")
# NGINX bits
self._nginx_download_url = \
'http://nginx.org/download/nginx-{}.tar.gz'.format(
self.options.nginx_version)
self._nginx_part_dir = os.path.join(self.partdir, 'nginx')
self._nginx_tar = Tar(self._nginx_download_url, self._nginx_part_dir)
# PCRE
self._pcre_download_url = \
'ftp://ftp.csx.cam.ac.uk/pub/software/programming/pcre/pcre-{}.tar.gz'.format(
self.options.pcre_version)
self._pcre_part_dir = os.path.join(self.partdir, 'pcre')
self._pcre_tar = Tar(self._pcre_download_url, self._pcre_part_dir)
# OPENSSL
self._openssl_download_url = \
'http://www.openssl.org/source/openssl-{}.tar.gz'.format(
self.options.openssl_version)
self._openssl_part_dir = os.path.join(self.partdir, 'openssl')
self._openssl_tar = Tar(self._openssl_download_url, self._openssl_part_dir)
# ZLIB
self._zlib_download_url = \
'http://zlib.net/zlib-{}.tar.gz'.format(
self.options.zlib_version)
self._zlib_part_dir = os.path.join(self.partdir, 'zlib')
self._zlib_tar = Tar(self._zlib_download_url, self._zlib_part_dir)
# PASSENGER
if self.options.use_passenger:
self._passenger_download_url = \
'https://www.phusionpassenger.com/latest_stable_tarball'
self._passenger_part_dir = os.path.join(self.partdir, 'passenger')
self._passenger_tar = Tar(self._passenger_download_url,
self._passenger_part_dir)
self.build_packages.extend(['gcc', 'g++', 'make',
'ruby-dev', 'libcurl4-openssl-dev'])
def snap_fileset(self):
fileset = super().snap_fileset()
fileset.append('-include/')
fileset.append('-share/')
return fileset
def pull(self):
super().pull()
# PCRE
os.makedirs(self._pcre_part_dir, exist_ok=True)
self._pcre_tar.download()
self._pcre_install(builddir=self._pcre_part_dir)
# ZLIB
os.makedirs(self._zlib_part_dir, exist_ok=True)
self._zlib_tar.download()
self._zlib_install(builddir=self._zlib_part_dir)
# OPENSSL
os.makedirs(self._openssl_part_dir, exist_ok=True)
self._openssl_tar.download()
self._openssl_install(builddir=self._openssl_part_dir)
# PASSENGER
if self.options.use_passenger:
os.makedirs(self._passenger_part_dir, exist_ok=True)
self._passenger_tar.download()
self._passenger_install(builddir=self._passenger_part_dir)
# NGINX
os.makedirs(self._nginx_part_dir, exist_ok=True)
self._nginx_tar.download()
self._nginx_install(builddir=self._nginx_part_dir)
def env(self, root):
env = super().env(root)
env.append('PATH={}:{}'.format(
os.path.join(root, 'bin'), os.environ['PATH']))
return env
def _pcre_install(self, builddir):
self._pcre_tar.provision(
builddir, clean_target=False, keep_tarball=True)
self.run(['./configure'], cwd=builddir)
self.run(['make', '-j{}'.format(self.parallel_build_count)],
cwd=builddir)
self.run(['make', 'install'], cwd=builddir)
def _zlib_install(self, builddir):
self._zlib_tar.provision(
builddir, clean_target=False, keep_tarball=True)
self.run(['./configure'], cwd=builddir)
self.run(['make', '-j{}'.format(self.parallel_build_count)],
cwd=builddir)
self.run(['make', 'install'], cwd=builddir)
def _openssl_install(self, builddir):
self._openssl_tar.provision(
builddir, clean_target=False, keep_tarball=True)
self.run(['./config', '--prefix=/'], cwd=builddir)
self.run(['make', '-j{}'.format(self.parallel_build_count)],
cwd=builddir)
self.run(['make', 'install'], cwd=builddir)
def _passenger_install(self, builddir):
self._passenger_tar.provision(
builddir, clean_target=False, keep_tarball=True)
def _nginx_install(self, builddir):
self._nginx_tar.provision(
builddir, clean_target=False, keep_tarball=True)
cmd = ['./configure',
'--sbin-path={}'.format(os.path.join(
self.installdir, 'nginx', 'sbin', 'nginx')),
'--conf-path={}'.format(os.path.join(
self.installdir, 'nginx', 'conf', 'nginx.conf')),
'--with-pcre={}'.format(self._pcre_part_dir),
'--with-zlib={}'.format(self._zlib_part_dir),
'--with-openssl={}'.format(self._openssl_part_dir),
'--with-http_gzip_static_module',
'--with-stream']
if self.options.use_passenger:
cmd.append('--add-module={}'.format(os.path.join(
self._passenger_part_dir, 'src', 'nginx_module')))
self.run(cmd, cwd=builddir)
self.run(['make', '-j{}'.format(self.parallel_build_count)],
cwd=builddir)
self.run(['make', 'install'], cwd=builddir)
|
rene4jazz/visimil
|
snap/plugins/x-nginx.py
|
Python
|
mit
| 8,003
|
from __future__ import unicode_literals
try:
from urllib import urlencode
from urlparse import urlparse, parse_qs
except ImportError:
from urllib.parse import urlencode, urlparse, parse_qs
from django.views.generic import ListView
class SortableListView(ListView):
# Defaults, you probably want to specify these when you subclass
default_sort_field = 'id'
allowed_sort_fields = {default_sort_field: {'default_direction': '-',
'verbose_name': 'ID'}}
sort_parameter = 'sort' # the get parameter e.g. ?page=1&sort=2
del_query_parameters = ['page'] # get paramaters we don't want to preserve
# End of Defaults
@property
def sort(self):
return self.sort_order + self.sort_field
@property
def default_sort_order(self):
return self.allowed_sort_fields[
self.default_sort_field]['default_direction']
@property
def default_sort(self):
return self.default_sort_order + self.default_sort_field
def get(self, request, *args, **kwargs):
self.sort_order, self.sort_field = self.set_sort(request)
self.sort_link_list = self.get_sort_link_list(request)
return super(SortableListView, self).get(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super(SortableListView,
self).get_context_data(**kwargs)
context['current_sort_query'] = self.get_sort_string()
context['current_querystring'] = self.get_querystring()
context['sort_link_list'] = self.sort_link_list
return context
def get_queryset(self):
qs = super(SortableListView, self).get_queryset()
qs = qs.order_by(self.sort)
return qs
def get_querystring_parameter_to_remove(self):
"""
Return list of GET parameters that should be removed from querystring
"""
return self.del_query_parameters + [self.sort_parameter]
def get_querystring(self):
"""
Clean existing query string (GET parameters) by removing
arguments that we don't want to preserve (sort parameter, 'page')
"""
to_remove = self.get_querystring_parameter_to_remove()
query_string = urlparse(self.request.get_full_path()).query
query_dict = parse_qs(query_string)
for arg in to_remove:
if arg in query_dict:
del query_dict[arg]
clean_query_string = urlencode(query_dict, doseq=True)
return clean_query_string
def set_sort(self, request):
"""
Take the sort parameter from the get parameters and split it into
the field and the prefix
"""
# Look for 'sort' in get request. If not available use default.
sort_request = request.GET.get(self.sort_parameter, self.default_sort)
if sort_request.startswith('-'):
sort_order = '-'
sort_field = sort_request.split('-')[1]
else:
sort_order = ''
sort_field = sort_request
# Invalid sort requests fail silently
if not sort_field in self.allowed_sort_fields:
sort_order = self.default_sort_order
sort_field = self.default_sort_field
return (sort_order, sort_field)
def get_sort_string(self, sort=None):
if not sort:
sort = self.sort
sort_string = ''
if not sort == self.default_sort:
sort_string = self.sort_parameter + '=' + sort
return sort_string
def get_next_sort_string(self, field):
"""
If we're already sorted by the field then the sort query
returned reverses the sort order.
"""
# self.sort_field is the currect sort field
if field == self.sort_field:
next_sort = self.toggle_sort_order() + field
else:
default_order_for_field = \
self.allowed_sort_fields[field]['default_direction']
next_sort = default_order_for_field + field
return self.get_sort_string(next_sort)
def get_sort_indicator(self, field):
"""
Returns a sort class for the active sort only. That is, if field is not
sort_field, then nothing will be returned becaues the sort is not
active.
"""
indicator = ''
if field == self.sort_field:
indicator = 'sort-asc'
if self.sort_order == '-':
indicator = 'sort-desc'
return indicator
def toggle_sort_order(self):
if self.sort_order == '-':
toggled_sort_order = ''
if self.sort_order == '':
toggled_sort_order = '-'
return toggled_sort_order
def get_sort_link_list(self, request):
sort_links = []
for sort_field in self.allowed_sort_fields:
sort_link = {
'attrs': sort_field,
'path': self.get_basic_sort_link(request, sort_field),
'indicator': self.get_sort_indicator(sort_field),
'title': self.allowed_sort_fields[sort_field]['verbose_name']}
sort_links.append(sort_link)
return sort_links
def get_basic_sort_link(self, request, field):
"""
Thanks to del_query_parameters and get_querystring, we build the link
with preserving interesting get parameters and removing the others
"""
query_string = self.get_querystring()
sort_string = self.get_next_sort_string(field)
if sort_string:
sort_link = request.path + '?' + sort_string
if query_string:
sort_link += '&' + query_string
else:
sort_link = request.path
if query_string:
sort_link += '?' + query_string
return sort_link
|
JanMalte/django-sortable-listview
|
sortable_listview/views.py
|
Python
|
mit
| 5,863
|
#
# Hello World client in Python
# Connects REQ socket to tcp://localhost:5555
#
import sys
import time
import zmq
context = zmq.Context()
# Socket to talk to server
print "Connecting to hello world server..."
socket = context.socket(zmq.REQ)
socket.connect ("tcp://localhost:5555")
while True:
socket.send(sys.argv[2])
# Get the reply.
message = socket.recv()
print "Received reply ", "[", message, "]"
time.sleep(float(sys.argv[1]))
|
cnu/zeromq-talk
|
hwclient.py
|
Python
|
mit
| 466
|
#!/usr/bin/python
import rospy
import roslib
import web
import signal
from os import chdir
from os.path import join
from aaf_control_ui.srv import DemandTask
from aaf_control_ui.srv import DemandTaskResponse
from strands_executive_msgs.srv import CreateTask
from strands_executive_msgs.srv import DemandTask as ExecDemandTask
from strands_executive_msgs.msg import Task
from strands_executive_msgs.srv import AddTasks
from strands_executive_msgs.srv import AddTasksRequest
#from strands_executive_msgs.srv import DemandTask as SchedulerDemandTask
#from strands_executive_msgs.srv import DemandTaskRequest as SchedulerDemandTaskRequest
### Templates
TEMPLATE_DIR = roslib.packages.get_pkg_dir('aaf_control_ui') + '/www'
WEBTOOLS_DIR = roslib.packages.get_pkg_dir('strands_webtools')
html_config = {
'rosws_suffix': ':9090',
'mjpeg_suffix': ':8181',
'rosws_protocol': 'ws'
}
render = web.template.render(TEMPLATE_DIR, base='base', globals=globals())
chdir(TEMPLATE_DIR)
class ControlServer(web.application):
def __init__(self):
urls = (
'/', 'DashboardPage',
'/tasks', 'TasksPage',
'/setup', 'SetupPage',
'/admin', 'AdminPage',
'/webtools/(.*)', 'Webtools'
)
web.application.__init__(self, urls, globals())
rospy.Service(rospy.get_name()+'/demand_task', DemandTask, self.demand_task)
signal.signal(signal.SIGINT, self.signal_handler)
self.demand_priority = 100
def run(self, port=8027, *middleware):
func = self.wsgifunc(*middleware)
return web.httpserver.runsimple(func, ('0.0.0.0', port))
def signal_handler(self, signum, frame):
self.stop()
print "aaf_control_server stopped."
def create_task(self, req):
factory_name = '/' + req.action + "_create"
start_after = rospy.Time.now()+rospy.Duration(secs=30)
rospy.loginfo(req)
end_before = start_after + rospy.Duration(secs=req.duration)
sa = "start_after: {secs: %d, nsecs: %d}" % \
(start_after.secs, start_after.nsecs)
eb = "end_before: {secs: %d, nsecs: %d}" % \
(end_before.secs, end_before.nsecs)
sn = "start_node_id: '%s'" % req.waypoint
en = "end_node_id: '%s'" % req.waypoint
yaml = "{%s, %s, %s, %s}" % (sa, eb, sn, en)
rospy.loginfo("calling with pre-populated yaml: %s" % yaml)
try:
factory = rospy.ServiceProxy(factory_name, CreateTask)
t = factory.call(yaml).task
rospy.loginfo("got the task back: %s" % str(t))
except Exception as e:
rospy.logwarn("Couldn't instantiate task from factory %s."
"error: %s."
"This is an error." %
(factory_name, str(e)))
t = Task()
t.start_node_id = req.waypoint
t.end_node_id = req.waypoint
t.end_before = end_before
t.start_after = start_after
t.action = req.action
# use maximum duration of the one given here and the one returned from the constructor
t.max_duration.secs = max(t.max_duration.secs, req.duration)
t.max_duration.nsecs = 0
t.start_node_id = req.waypoint
t.end_node_id = req.waypoint
# allow to end this 60 seconds after the duration
# to give some slack for scheduling
#t.end_before = t.end_before + rospy.Duration(secs=60)
t.priority = self.demand_priority
return t
def add_task(self, req):
t = self.create_task(req)
tasks = [t]
rospy.loginfo('add task %s to schedule now' % t)
service_name = '/task_executor/add_tasks'
try:
rospy.wait_for_service(service_name, timeout=10)
dt = rospy.ServiceProxy(service_name, AddTasks)
rospy.loginfo(dt(tasks))
except Exception as e:
rospy.logerr("Couldn't add task to scheduler. "
"error: %s." % str(e))
t = Task()
t.action = req.action
return DemandTaskResponse()
def demand_task(self, req):
t = self.create_task(req)
rospy.loginfo('demand task %s' % t)
service_name = '/task_executor/demand_task'
try:
rospy.wait_for_service(service_name, timeout=10)
dt = rospy.ServiceProxy(service_name, ExecDemandTask)
rospy.loginfo(dt(t))
except Exception as e:
rospy.logerr("Couldn't demand task on scheduler. "
"error: %s." % str(e))
t = Task()
t.action = req.action
return DemandTaskResponse()
def set_ws_protocol():
forward = web.ctx.env.get('HTTP_X_FORWARDED_HOST','')
if 'lcas.lincoln.ac.uk' in forward:
html_config['rosws_protocol'] = 'wss'
else:
html_config['rosws_protocol'] = 'ws'
print html_config['rosws_protocol']
class DashboardPage(object):
def GET(self):
set_ws_protocol()
return render.dashboard()
class TasksPage(object):
def GET(self):
set_ws_protocol()
return render.tasks()
class SetupPage(object):
def GET(self):
set_ws_protocol()
return render.setup()
class HelpPage(object):
def GET(self):
set_ws_protocol()
return render.help()
class AdminPage(object):
def GET(self):
set_ws_protocol()
return render.admin()
class Webtools(object):
"""
proxies all requests to strands_webtools
"""
def GET(self, f):
try:
p = join(WEBTOOLS_DIR, f)
rospy.logdebug("trying to serve %s from %s", f, p)
if f.endswith('.js'):
web.header('Content-Type', 'text/javascript')
return open(p, 'r').read()
except:
web.application.notfound(app) # file not found
if __name__ == "__main__":
rospy.init_node("aaf_control_ui_server")
port = rospy.get_param('~port', 8127)
html_config['rosws_suffix'] = rospy.get_param('~rosws_suffix', "/rosws")
html_config['mjpeg_suffix'] = rospy.get_param('~mjpeg_suffix', "/video")
html_config['rosws_protocol'] = rospy.get_param('~rosws_protocol', "wss")
rospy.loginfo("aaf_control_server started.")
app = ControlServer()
app.run(port=port)
|
strands-project/aaf_deployment
|
aaf_control_ui/scripts/server.py
|
Python
|
mit
| 6,395
|
# utf-8
# Python 3.5.1
# Software developed by Oscar Russo
# http://github.com/odrusso/bhs-pe-inventory
# Simple program to store the different database configurations
def db_local_users():
"""Returns the config information for the users table of a local database"""
config = {
"user": "root",
"password": "root",
"host": "localhost",
"database": "users"
}
return config
def db_local_data():
"""Returns the config information for the inventory table of a local database"""
config = {
"user": "root",
"password": "root",
"host": "localhost",
"database": "data"
}
return config
def db_remote_data():
"""Returns the config information for the users table of a remote database"""
config = {
"user": "root",
"password": "5sQ-Hsd-ekt-bzS",
"host": "bhs-pe-inventory.ci3pushvdxiu.us-west-2.rds.amazonaws.com:",
"database": "test"
}
return config
def db_remote_users():
"""Returns the config information for the users table of a remote database"""
config = {
"user": "root",
"password": "5sQ-Hsd-ekt-bzS",
"host": "bhs-pe-inventory.ci3pushvdxiu.us-west-2.rds.amazonaws.com:",
"database": "users"
}
return config
|
odrusso/bhs-pe-inventory
|
src/db_configs.py
|
Python
|
mit
| 1,278
|
import datetime
from celery import task
from celery.utils.log import get_task_logger
from django.conf import settings
from django.core.mail import EmailMessage
from django.template.loader import get_template
from register_site.models import EntriesIndex
from scanner_engine.models import WatchersIndex, RedirectionsIndex
from scanner_engine.utils.redirection.utils import run_redirection_scan, has_problems_rediretion
from scanner_engine.utils.watcher.utils import run_watcher_scan, has_problems_watcher
logger = get_task_logger(__name__)
@task()
def automatic_scan_task(should_mail_the_user=True):
logger.info("Run automatic scan...")
number_of_entries_scanned = 0
number_of_problems_found = 0
for entry in EntriesIndex.objects.all():
number_of_entries_scanned += 1
if entry.has_redirections():
if len(RedirectionsIndex.objects.filter(entry=entry)) < 1:
raise ValueError('Wrong number of Redirections. Should be at least 1, found %d' % (len(RedirectionsIndex.objects.filter(entry=entry))))
redirections = RedirectionsIndex.objects.filter(entry=entry)
for redirection in redirections:
scan_result = run_redirection_scan(redirection)
if has_problems_rediretion(scan_result):
number_of_problems_found += 1
if should_mail_the_user:
template = get_template("mail_alert/hero.html")
context = {
'date': datetime.datetime.now().date(),
'full_date': datetime.datetime.now(),
'number_of_entries': number_of_entries_scanned,
'number_of_problems': number_of_problems_found
}
mail_subject = "Automatyczny skan!"
mail_message = template.render(context)
from_email = settings.EMAIL_HOST_USER
to_emails = [settings.PRIVATE_TARGET_EMAIL]
message = EmailMessage(mail_subject, mail_message, from_email, to_emails)
message.content_subtype = "html"
message.send(fail_silently=True)
|
Josowsky/SiteScanner-Backend
|
scanner_engine/tasks.py
|
Python
|
mit
| 2,039
|
## Security-Constrained LOPF with SciGRID
#
#This Jupyter Notebook is also available to download at: <https://pypsa.readthedocs.io/en/latest/examples/scigrid-sclopf.ipynb> and can be viewed as an HTML page at: <https://pypsa.readthedocs.io/en/latest/examples/scigrid-sclopf.html>.
#
#In this example, the dispatch of generators is optimised using the security-constrained linear OPF, to guaranteed that no branches are overloaded by certain branch outages.
#
#The data files for this example are in the examples folder of the github repository: <https://github.com/PyPSA/PyPSA>.
#
### Data sources and health warnings
#
#See the separate notebook at <https://pypsa.readthedocs.io/en/latest/examples/add_load_gen_trafos_to_scigrid.ipynb>.
import pypsa, os
csv_folder_name = os.path.dirname(pypsa.__file__) + "/../examples/scigrid-de/scigrid-with-load-gen-trafos/"
network = pypsa.Network(csv_folder_name=csv_folder_name)
#There are some infeasibilities without line extensions
for line_name in ["316","527","602"]:
network.lines.loc[line_name,"s_nom"] = 1200
now = network.snapshots[0]
branch_outages = network.lines.index[:15]
print("Performing security-constrained linear OPF:")
network.sclopf(now,branch_outages=branch_outages)
print("Objective:",network.objective)
#For the PF, set the P to the optimised P
network.generators_t.p_set = network.generators_t.p_set.reindex(columns=network.generators.index)
network.generators_t.p_set.loc[now] = network.generators_t.p.loc[now]
network.storage_units_t.p_set = network.storage_units_t.p_set.reindex(columns=network.storage_units.index)
network.storage_units_t.p_set.loc[now] = network.storage_units_t.p.loc[now]
#Check no lines are overloaded with the linear contingency analysis
p0_test = network.lpf_contingency(now,branch_outages=branch_outages)
p0_test
#check loading as per unit of s_nom in each contingency
max_loading = abs(p0_test.divide(network.passive_branches().s_nom,axis=0)).describe().loc["max"]
print(max_loading)
import numpy as np
np.testing.assert_array_almost_equal(max_loading,np.ones((len(max_loading))))
|
PyPSA/PyPSA
|
examples/scigrid-de/scigrid-sclopf.py
|
Python
|
mit
| 2,182
|
import numpy as np
import matplotlib.pyplot as plt
np.ones((1024,1024))
np.ones((1024,1024))*np.nan
im = np.ones((1024,1024))*np.nan
for i, line in ascfile:
im[i] = np.array(list(line)[:-1]).astype('int')
for line in ascfile:
list(line)
break
ascfile
ascfile.split("\n")
ascfile.split("\n")[:-1]
for i, line in enumerate(ascfile.split("\n")[:-1]):
im[i] = np.array(list(line)).asarray(int)
for i, line in enumerate(ascfile.split("\n")[:-1]):
im[i] = np.array(list(line)).astype(int)
im
np.unique(im)
|
DrkSephy/NOAA-Projects
|
ims/files/file.py
|
Python
|
mit
| 529
|
#!/usr/bin/env python3
#
# Copyright (c) 2017 Weitian LI <liweitianux@live.com>
# MIT license
#
# Weitian LI
# 2017-02-12
"""
Collect YAML manifest files, and convert collected results to CSV
format for later use.
"""
import sys
import argparse
import csv
from _context import acispy
from acispy.manifest import Manifest
def main():
parser = argparse.ArgumentParser(description="Collect YAML manifest files")
parser.add_argument("-k", "--keys", dest="keys", required=True,
help="YAML keys to be collected (in order); " +
"can be comma-separated string, or a file " +
"containing the keys one-per-line")
parser.add_argument("-b", "--brief", dest="brief",
action="store_true",
help="be brief and do not print header")
parser.add_argument("-v", "--verbose", dest="verbose",
action="store_true",
help="show verbose information")
parser.add_argument("-o", "--outfile", dest="outfile", default=sys.stdout,
help="output CSV file to save collected data")
parser.add_argument("-i", "--infile", dest="infile",
nargs="+", required=True,
help="list of input YAML manifest files")
args = parser.parse_args()
try:
keys = [k.strip() for k in open(args.keys).readlines()]
except FileNotFoundError:
keys = [k.strip() for k in args.keys.split(",")]
if args.verbose:
print("keys:", keys, file=sys.stderr)
print("infile:", args.infile, file=sys.stderr)
print("outfile:", args.outfile, file=sys.stderr)
results = []
for fp in args.infile:
manifest = Manifest(fp)
res = manifest.gets(keys, splitlist=True)
if args.verbose:
print("FILE:{0}: {1}".format(fp, list(res.values())),
file=sys.stderr)
results.append(res)
try:
of = open(args.outfile, "w")
except TypeError:
of = args.outfile
writer = csv.writer(of)
if not args.brief:
writer.writerow(results[0].keys())
for res in results:
writer.writerow(res.values())
if of is not sys.stdout:
of.close()
if __name__ == "__main__":
main()
|
liweitianux/chandra-acis-analysis
|
bin/collect_yaml.py
|
Python
|
mit
| 2,329
|
# -*- coding: utf-8 -*-
from django import forms
from django.utils.translation import gettext_lazy as _
from .models import TopicNotification
class NotificationForm(forms.ModelForm):
is_active = forms.BooleanField(widget=forms.HiddenInput(), initial=True, required=False)
class Meta:
model = TopicNotification
fields = ['is_active', ]
class NotificationCreationForm(NotificationForm):
def __init__(self, user=None, topic=None, *args, **kwargs):
super(NotificationCreationForm, self).__init__(*args, **kwargs)
self.user = user
self.topic = topic
def clean(self):
cleaned_data = super(NotificationCreationForm, self).clean()
notification = TopicNotification.objects.filter(
user=self.user,
topic=self.topic
)
if notification.exists():
# Do this since some of the unique_together fields are excluded.
raise forms.ValidationError(_("This notification already exists"))
# todo: test!
comment = self.topic.comment_set.last()
if comment is None:
raise forms.ValidationError(_("You can't subscribe to a topic with no comments"))
return cleaned_data
def save(self, commit=True):
if not self.instance.pk:
self.instance.user = self.user
self.instance.topic = self.topic
self.instance.comment = self.topic.comment_set.last()
return super(NotificationCreationForm, self).save(commit)
|
nitely/Spirit
|
spirit/topic/notification/forms.py
|
Python
|
mit
| 1,524
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# (c) The James Hutton Institute 2016-2019
# (c) University of Strathclyde 2019-2020
# Author: Leighton Pritchard
#
# Contact:
# leighton.pritchard@strath.ac.uk
#
# Leighton Pritchard,
# Strathclyde Institute for Pharmacy and Biomedical Sciences,
# 161 Cathedral Street,
# Glasgow,
# G4 0RE
# Scotland,
# UK
#
# The MIT License
#
# Copyright (c) 2016-2019 The James Hutton Institute
# Copyright (c) 2019-2020 University of Strathclyde
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""Tests for pyani graphics.
These tests are intended to be run from the repository root using:
pytest -v
print() statements will be caught by nosetests unless there is an
error. They can also be recovered with the -s option.
"""
import pandas as pd
from pathlib import Path
from typing import Dict, NamedTuple
import pytest
from pyani import pyani_config, pyani_graphics
from pyani.pyani_tools import get_labels
class GraphicsTestInputs(NamedTuple):
"""Convenience struct for graphics test inputs."""
filename: Path
labels: Dict[str, str]
classes: Dict[str, str]
@pytest.fixture
def graphics_inputs(dir_graphics_in):
"""Returns namedtuple of graphics inputs."""
return GraphicsTestInputs(
dir_graphics_in / "ANIm_percentage_identity.tab",
get_labels(dir_graphics_in / "labels.tab"),
get_labels(dir_graphics_in / "classes.tab"),
)
def draw_format_method(fmt, mth, graphics_inputs, tmp_path):
"""Render graphics format and method output."""
df = pd.read_csv(graphics_inputs.filename, index_col=0, sep="\t")
fn = {"mpl": pyani_graphics.mpl.heatmap, "seaborn": pyani_graphics.sns.heatmap}
sc = {"mpl": pyani_graphics.mpl.scatter, "seaborn": pyani_graphics.sns.scatter}
params = {"mpl": pyani_config.params_mpl, "seaborn": pyani_config.params_mpl}
method_params = pyani_graphics.Params(
params[mth](df)["ANIm_percentage_identity"],
graphics_inputs.labels,
graphics_inputs.classes,
)
fn[mth](
df, tmp_path / f"{mth}.{fmt}", title=f"{mth}:{fmt} test", params=method_params
)
sc[mth](
df,
df,
tmp_path / f"{mth}.{fmt}",
"matrix1",
"matrix2",
title=f"{mth}:{fmt} test",
params=method_params,
)
def test_png_mpl(graphics_inputs, tmp_path):
"""Write .png graphics with mpl."""
draw_format_method("png", "mpl", graphics_inputs, tmp_path)
def test_svg_mpl(graphics_inputs, tmp_path):
"""Write .svg graphics with mpl."""
draw_format_method("svg", "mpl", graphics_inputs, tmp_path)
def test_pdf_mpl(graphics_inputs, tmp_path):
"""Write .pdf graphics with mpl."""
draw_format_method("pdf", "mpl", graphics_inputs, tmp_path)
def test_png_seaborn(graphics_inputs, tmp_path):
"""Write .png graphics with seaborn."""
draw_format_method("png", "seaborn", graphics_inputs, tmp_path)
def test_svg_seaborn(graphics_inputs, tmp_path):
"""Write .svg graphics with seaborn."""
draw_format_method("svg", "seaborn", graphics_inputs, tmp_path)
def test_pdf_seaborn(graphics_inputs, tmp_path):
"""Write .pdf graphics with seaborn."""
draw_format_method("pdf", "seaborn", graphics_inputs, tmp_path)
|
widdowquinn/pyani
|
tests/test_graphics.py
|
Python
|
mit
| 4,265
|
# author: bukun
#
import os
import sys
pwd = os.getcwd()
fo = open('clean_link.sh', 'w')
for wroot, wdirs, wfiles in os.walk(pwd):
for wfile in wfiles:
test = os.path.join(wroot, wfile)
if os.path.islink(test):
print(test)
fo.write('rm -f %s\n' % test)
for wdir in wdirs:
test = os.path.join(wroot, wdir)
if os.path.islink(test):
print(test)
os.remove(test)
fo.write('rm -f %s\n' % test)
fo.close()
|
bukun/bkcase
|
clean_link.py
|
Python
|
mit
| 501
|
import pytest
import os
import subprocess
import sys
if '-nogui' not in sys.argv:
sys.argv.append('-nogui')
from .utils import pkg_setup
@pytest.mark.package_data(['examples/NeuroMLImport/', '.'])
class TestPTcell:
def test_init(self, pkg_setup):
import SimpleNet_import
|
Neurosim-lab/netpyne
|
tests/examples/test_NeuroMLImport.py
|
Python
|
mit
| 290
|
# coding: utf-8
from django.conf import settings
DEFAULT_SETTINGS = {
'upload_to': 'videos', # upload_to parameter for unconverted videos
'convert_to': 'videos/converted', # upload_to parameter for converted videos
'screens_to': 'videos/screens', # upload_to parameter for video screenshots
'num_screens': 3, # Number of video screenshots to create
'ffmpeg': {
'binary': 'ffmpeg', # path to ffmpeg binary
'convert_settings': {
'commands': {
'h264': [
# ffmpeg call for converting to h264 (first pass)
"{ffmpeg} -y -i {infile} -vcodec libx264 -vprofile high -b:v {video_bitrate} "
"-maxrate {video_max_bitrate} -bufsize {video_bufsize} "
"{video_size} -bf 2 -g 100 -an -threads 0 -pass 1 -f mp4 /dev/null",
# ffmpeg call for converting to h264 (second pass)
"{ffmpeg} -y -i {infile} -vcodec libx264 -vprofile high -b:v {video_bitrate} "
"-maxrate {video_max_bitrate} -bufsize {video_bufsize} "
"{video_size} -bf 2 -g 100 -threads 0 -pass 2 -acodec libfaac -ar 48000 "
"-b:a 128k -ac 2 -f mp4 {outfile}"
],
'ogv': [
# ffmpeg call for converting to ogv (first pass)
"{ffmpeg} -y -i {infile} -vcodec libtheora -b:v {video_bitrate} "
"-maxrate {video_max_bitrate} -bufsize {video_bufsize} "
"{video_size} -bf 2 -g 100 -an -threads 0 -pass 1 -f ogg /dev/null",
# ffmpeg call for converting to ogv (second pass)
"{ffmpeg} -y -i {infile} -vcodec libtheora -b:v {video_bitrate} "
"-maxrate {video_max_bitrate} -bufsize {video_bufsize} "
"{video_size} -bf 2 -g 100 -threads 0 -pass 2 -acodec libvorbis "
"-ar 48000 -b:a 128k -ac 2 -f ogg {outfile}",
],
'webm': [
# ffmpeg call for converting to webm (first pass)
"{ffmpeg} -y -i {infile} -codec:v libvpx -quality good -cpu-used 0 -b:v {video_bitrate} "
"-qmin 10 -qmax 42 -maxrate {video_max_bitrate} -bufsize {video_bufsize} "
"{video_size} -bf 2 -g 100 -an -threads 4 -pass 1 -f webm /dev/null",
# ffmpeg call for converting to webm (second pass)
"{ffmpeg} -y -i {infile} -codec:v libvpx -quality good -cpu-used 0 -b:v {video_bitrate} "
"-qmin 10 -qmax 42 -maxrate {video_max_bitrate} -bufsize {video_bufsize} "
"{video_size} -bf 2 -g 100 -threads 4 -pass 2 -codec:a libvorbis "
"-ar 48000 -b:a 128k -ac 2 -f webm {outfile}",
],
},
# Settings for 'original' version (will be passed to commands above)
'original': {
'video_bitrate': '1792k', # will be min(video_bitrate, original_bitrate)
'video_max_bitrate': '', # will be video_bitrate * 2
'video_bufsize': '', # will be video_bitrate * 2
'video_size': '-vf scale={original_width}:{original_height}'
},
# High Quality settings (will be passed to commands above)
'high': {
'video_bitrate': '1792k',
'video_max_bitrate': '4000k',
'video_bufsize': '4000k',
'video_size': '-vf scale=1920:1080'
},
# Semi-High Quality settings (will be passed to commands above)
'semi-high': {
'video_bitrate': '1000k',
'video_max_bitrate': '2000k',
'video_bufsize': '2000k',
'video_size': '-vf scale=1280:720'
},
# Medium Quality settings (will be passed to commands above)
'medium': {
'video_bitrate': '500k',
'video_max_bitrate': '1000k',
'video_bufsize': '1000k',
'video_size': '-vf scale=854:480'
},
# Low Quality settings (will be passed to commands above)
'low': {
'video_bitrate': '300k',
'video_max_bitrate': '600k',
'video_bufsize': '600k',
'video_size': '-vf scale=640:360'
},
},
},
'redis': {
# redis connection settings
'connection': {
'db': 0,
'host': 'localhost',
'port': 6379,
},
'eager': False, # If True, Tasks are not queued, but executed directly. Use for testing purposes only!
'queue_prefix': 'webvideo', # django_webvideo will prefix all (RQ-)Queues with this prefix.
'timeout': 3600, # rq queue timeout (in seconds)
},
'use_admin': True, # set to False to disable registering into django admin
}
def _get_setting(setting, key, *subkeys):
if len(subkeys) > 0:
while setting is not None and isinstance(setting, dict) and len(subkeys) > 0:
setting = setting.get(key)
key = subkeys[0]
subkeys = subkeys[1:]
try:
return setting.get(key)
except AttributeError:
return None
else:
return setting.get(key)
def get_setting(key, *subkeys):
value = _get_setting(getattr(settings, 'DJANGO_WEBVIDEO_SETTINGS'), key, *subkeys)
if value is None:
return _get_setting(DEFAULT_SETTINGS, key, *subkeys)
else:
return value
def get_queue_name():
return "{0}:convert".format(get_setting('redis', 'queue_prefix'))
|
randomknowledge/django-webvideo
|
django_webvideo/settings.py
|
Python
|
mit
| 5,744
|
# -*- coding: utf-8 -*-
from django.conf import settings
import requests, json, time
session = None
if settings.SIMULTANEOUS_QUERY_THREADS > 1:
try:
from requests_futures.sessions import FuturesSession
from concurrent.futures import ThreadPoolExecutor
session = FuturesSession(executor=ThreadPoolExecutor(max_workers=settings.SIMULTANEOUS_QUERY_THREADS))
except:
print("Unable to create thread pool for requests_futures. Falling back to synchronous querying.")
def page_size(num_portals):
return max(settings.MIN_PORTAL_RESULTS, settings.EXPECTED_PAGE_RESULTS // num_portals)
## Used for single-threaded querying.
class MockFuture:
def __init__(self, query_string):
self._result = requests.get(query_string)
def result(self):
return self._result
def start_query(session, query_string, portal, num_portals, page_number):
query_string = portal.url + '/api/3/action/package_search?q=' + \
query_string + \
'&rows=' + str(page_size(num_portals) + 1) + \
'&start=' + str(page_number * page_size(num_portals))
if session:
return (
session.get(query_string),
portal
)
else:
return (
MockFuture(query_string),
portal
)
def query_portals(query_string, portals, page_number):
start = time.time()
futures = [start_query(session, query_string, portal, len(portals), page_number) for portal in portals]
results = []
top_results = []
errors = []
more = False
for future, portal in futures:
try:
r = future.result()
json_result = json.loads(r.text)
if json_result['success']:
top_results.extend(json_result['result']['results'][:30])
if len(json_result['result']['results']) > page_size(len(portals)):
more = True
for r in json_result['result']['results']:
r['result_url'] = portal.url + '/dataset/' + r['name']
r['portal'] = portal
results.append(r)
except ValueError as e:
errors.append(portal)
continue
print("Time taken: " + str(time.time() - start))
return (results, top_results, errors, more)
|
bellisk/opendata-multisearch
|
ord_hackday/search/query.py
|
Python
|
mit
| 2,336
|
from race_tracker_api.app import get_app
APP = get_app()
if __name__ == "__main__":
APP.run()
|
race-tracker/api
|
uwsgi.py
|
Python
|
mit
| 100
|
__all__ = [
'get_builder_name',
'get_builder_image_path',
'get_image_path',
'parse_images_parameter',
# Helper commands.
'chown',
'rsync',
]
import getpass
import grp
import pwd
import foreman
from g1 import scripts
from g1.bases.assertions import ASSERT
from g1.containers import models
import shipyard2
def get_builder_name(name):
return name + '-builder'
def get_builder_image_path(parameters, name):
return get_image_path(parameters, name).with_name(
shipyard2.IMAGE_DIR_BUILDER_IMAGE_FILENAME
)
def get_image_path(parameters, name):
return (
parameters['//releases:root'] / \
foreman.get_relpath() /
name /
ASSERT.not_none(parameters['%s/version' % name]) /
shipyard2.IMAGE_DIR_IMAGE_FILENAME
)
def parse_images_parameter(value):
images = []
for v in value.split(','):
if v.startswith('id:'):
images.append(models.PodConfig.Image(id=v[len('id:'):]))
elif v.startswith('nv:'):
_, name, version = v.split(':', maxsplit=3)
images.append(models.PodConfig.Image(name=name, version=version))
elif v.startswith('tag:'):
images.append(models.PodConfig.Image(tag=v[len('tag:'):]))
else:
ASSERT.unreachable('unknown image parameter: {}', v)
return images
def chown(path):
user = getpass.getuser()
with scripts.using_sudo():
scripts.chown(
user,
grp.getgrgid(pwd.getpwnam(user).pw_gid).gr_name,
path,
)
def rsync(src_path, dst_path, rsync_args=()):
scripts.run([
'rsync',
'--archive',
*rsync_args,
# Use the trailing slash trick.
'%s/' % src_path,
dst_path,
])
|
clchiou/garage
|
shipyard2/shipyard2/rules/images/utils.py
|
Python
|
mit
| 1,787
|
# -- coding: utf-8 --
# Copyright 2015 Tim Santor
#
# This file is part of proprietary software and use of this file
# is strictly prohibited without written consent.
#
# @author Tim Santor <tsantor@xstudios.agency>
"""Uploads HTML5 banner ads."""
# -----------------------------------------------------------------------------
from __future__ import absolute_import, print_function, unicode_literals
import argparse
import fnmatch
import logging
import os
import shlex
import sys
import time
import webbrowser
from subprocess import PIPE, Popen
from bashutils import logmsg
from .adkit import AdKitBase
# -----------------------------------------------------------------------------
class Main(AdKitBase):
"""Uploads HTML5 banner ads."""
def __init__(self):
self.logger = logging.getLogger(__name__)
super(Main, self).__init__()
def create_rsync_exclude(self):
"""Create rsync exclude params."""
params = ''
if self.exclude_list:
for f in self.exclude_list:
params += ' --exclude %s' % f
return params
def upload(self):
"""Upload HTML5 ads files."""
logmsg.header('Uploading HTML5 ad files...')
start_time = time.time()
exclude = self.create_rsync_exclude()
cmd = 'rsync -avzhP --no-perms --no-owner --no-group {exclude} "{from_dir}" {user}@{ip}:{to_dir}'.format(
exclude=exclude,
from_dir=self.input_dir,
user=self.user,
ip=self.ip,
to_dir=self.remote_dir)
self.logger.debug(cmd)
proc = Popen(shlex.split(cmd), stdout=PIPE, stderr=PIPE)
stdout, stderr = proc.communicate()
status = not bool(proc.returncode)
if status:
logmsg.success('Uploaded HTML ad files (Time Elapsed: {0})'.format(
start_time - time.time())
# logmsg.debug(stdout.strip())
def get_links(self):
"""Get all links of the files we uploaded."""
logmsg.header('Generating links')
links = []
for root, dirnames, filenames in os.walk(self.input_dir):
for name in fnmatch.filter(filenames, 'index.html'):
url = '{0}{1}'.format(self.url, os.path.join(root, name))
links.append(url)
print(url)
return links
def get_parser(self):
"""Return the parsed command line arguments."""
parser = argparse.ArgumentParser(
description="Upload HTML5 ads and generate preview links."
)
parser.add_argument('-l', '--log', help='Enable logging',
action='store_true')
parser.add_argument('-b', '--browser', help='Open links in browser',
action='store_true')
return parser.parse_args()
def run(self):
"""Run script."""
config = self.get_config()
args = self.get_parser()
if args.log:
self.create_logger()
self.logger.debug('-' * 10)
# Set some vars
self.input_dir = os.path.join(config.get('html5', 'input')).rstrip('/')
self.user = config.get('upload', 'user')
self.ip = config.get('upload', 'ip')
self.remote_dir = config.get('upload', 'remote_dir')
self.url = config.get('upload', 'url')
self.exclude_list = self.create_list(config.get('upload', 'exclude_list'))
# Do the stuff we came here to do
# Check if the input dir exists
if not os.path.isdir(self.input_dir):
logmsg.error('"{0}" does nots exist'.format(self.input_dir))
sys.exit()
# Upload preview files
self.upload()
# Get links
links = self.get_links()
logmsg.success('%s ads uploaded' % len(links))
# Ask question
if args.browser and logmsg.confirm('Open all ads in browser'):
for l in links:
webbrowser.open(l)
# -----------------------------------------------------------------------------
def main():
"""Main script."""
script = Main()
script.run()
# -----------------------------------------------------------------------------
if __name__ == "__main__":
main()
|
tsantor/banner-ad-toolkit
|
adkit/upload_html.py
|
Python
|
mit
| 4,265
|
########################
# lmfit - C/C++ library for Levenberg-Marquardt least-squares minimization and curve fitting
# http://joachimwuttke.de/lmfit/
########################
# download it
# run ./configure [--prefix=<path/to/install>]
# make [install]
#
# EXAMPLE
# ./configure --prefix=/home/simone/MOOV3D/libs/lmfit/build
# make install
# inside the .sconf
# extern = <the libraries path>
# incdir_lmfit = [ join(extern, 'lmfit/build/include/') ]
# libdir_lmfit = [ join(extern, 'lmfit/build/lib/') ]
from _external import *
lmfit = LibWithHeaderChecker(['lmmin'],['lmcurve.h','lmmin.h'], 'c', name='lmfit')
|
tuttleofx/sconsProject
|
autoconf/lmfit.py
|
Python
|
mit
| 616
|
#!/usr/bin/python
from subprocess import call
#3.9.2015 - Writing a script to process Tracy's experimentally evolved
#Mtb samples.
#There are 6 populations that were evolved, with a total of 18 samples.
samples = [
"ERR003100", "ERR003108", "ERR003112", "ERR003116",
"ERR004900", "ERR004908", "ERR004912", "ERR004916",
"ERR005500", "ERR005504",
"ERR007200", "ERR007204", "ERR007208",
"ERR034500", "ERR034504", "ERR034508",
"ERR054000", "ERR054004"
]
print("There are " + str(len(samples)) + " samples in the list.")
def remove_ambMap(samp):
#Remove ambiguously mapped reads from the realigned bam files
call('samtools view -q 20 -b /home/tmsmith/data/expEvo/RGA/RGAbamsBais/{samp}.realn.bam' '| samtools sort - {samp}'.format(samp=samp), shell=True)
#Convert the relevant bam files into an mpileup (by population,
#i.e. all samples from #31 together):
#samtools mpileup -B -f ./<path to ref>/MtbNCBIH37Rv.fa -q 20 \
#-Q 20 ./<path to bams - sep by space>/ > {Prefix}.pileup
#Convert the mpileups to sync files:
#java -ea -Xmx7g -jar \
#/home/peplab/src/popoolation2_12-1/mpileup2syn.jar \
#--input {Prefix}.mpileup --output {Prefix}.sync --fastq-type sanger \
#--min-qual 20 --threads 4
def callable_loci(samp):
"""Run callable loci on samples"""
# call('java -Xmx4g -jar /opt/PepPrograms/RGAPipeline/GenomeAnalysisTK.jar -T CallableLoci -I /home/tmsmith/data/expEvo/RGA/RGAbamsBais/{samp}.realn.bam -summary {samp}_defaults.summary -o {samp}_defaults.bed -R /home/mrood/data/Mtb/ref/MtbNCBIH37Rv.fa'.format(samp=samp), shell=True)
call('java -Xmx4g -jar /opt/PepPrograms/RGAPipeline/GenomeAnalysisTK.jar -T CallableLoci -I /home/tmsmith/data/expEvo/RGA/RGAbamsBais/{samp}.realn.bam -summary {samp}_strict.summary -o {samp}_strict.bed -R /home/mrood/data/Mtb/ref/MtbNCBIH37Rv.fa -frlmq 0.04 -mmq 20'.format(samp=samp), shell=True)
#Prefix = raw_input("Prefix: ")
def snp_frequency_diff(Prefix):
#Calculate allele frequency differences
call('perl /opt/PepPrograms/popoolation2_1201/snp-frequency-diff.pl --input {Prefix}.sync --output {Prefix}_mc10 --min-count 10 --min-coverage 10 --max-coverage 2%'.format(Prefix=Prefix), shell=True)
def fisher_test(Prefix):
#Estimate the significance of allele frequency differences
call('perl /opt/PepPrograms/popoolation2_1201/fisher-test.pl --input {Prefix}.sync --output {Prefix}_mc10.fet --min-count 10 --min-coverage 10 --max-coverage 2% --min-covered-fraction 1 --window-size 1 --step-size 1 --suppress-noninformative'.format(Prefix=Prefix), shell=True)
def fst_sliding(Prefix):
#Calculate Fst values using a sliding-window approach
call('perl /opt/PepPrograms/popoolation2_1201/fst-sliding.pl --input {Prefix}.sync --output {Prefix}_mc10_p10K.fst --min-count 10 --min-coverage 10 --max-coverage 2% --min-covered-fraction 1 --window-size 1 --step-size 1 --suppress-noninformative --pool-size 10000'.format(Prefix=Prefix), shell=True)
for samp in samples:
print("Processing sample: " + samp)
# remove_ambMap(samp)
callable_loci(samp)
#snp_frequency_diff(Prefix)
#fisher_test(Prefix)
#fst_sliding(Prefix)
def make_fetDict(Prefix):
fetDict = {}
with open('{Prefix}_mc10.fet'.format(Prefix=Prefix), 'r') as fetFile:
for line in fetFile:
line = line.strip().split('\t')
pos = int(line[1])
fetDict[pos] = {}
paircomps = line[5:]
for i in paircomps:
key,fet = i.split("=")
fetDict[pos][str(key)] = float(fet)
return fetDict
def make_figureFile(Prefix, fetDict):
with open("{Prefix}_mc10_p10K.fst".format(Prefix=Prefix), 'r') as fstFile, open("{Prefix}_mc10_manhatten.txt".format(Prefix=Prefix), 'w') as outFile:
outFile.write("%s\t%s\t%s\t%s\n" %
("key",
"position",
"fst",
"fet")
)
for line in fstFile:
line = line.strip().split('\t')
pos = int(line[1])
paircomps = line[5:]
for i in paircomps:
key = str(i.split("=")[0])
fst = float(i.split("=")[1])
outFile.write("%s\t%i\t%f\t%f\n" %
(key,
pos,
fst,
fetDict[pos][key])
)
#fetDict = make_fetDict(Prefix)
#make_figureFile(Prefix, fetDict)
|
tracysmith/RGAPepPipe
|
pooled/expEvo.py
|
Python
|
mit
| 4,378
|
#!/usr/bin/env python
from setuptools import setup
setup(name='cabot-alert-sms',
version='0.1',
description='An sms alert plugin for Cabot by Lyncir',
author='Lyncir',
author_email='lyncir@gmail.com',
url='http://cabotapp.com',
packages=[
'cabot_alert_sms'
],
)
|
lyncir/cabot-alert-sms
|
setup.py
|
Python
|
mit
| 319
|
import numpy as np
from kmapper.cover import Cover
# uniform data:
data = np.arange(0, 1000).reshape((1000, 1))
lens = data
cov = Cover(10, 0.5, verbose=0)
def overlap(c1, c2):
ints = set(c1).intersection(set(c2))
return len(ints) / max(len(c1), len(c2))
# Prefix'ing the data with an ID column
ids = np.array([x for x in range(lens.shape[0])])
lens = np.c_[ids, lens]
bins = cov.fit(lens)
cube_entries = cov.transform(lens, bins)
for i, hypercube in enumerate(cube_entries):
print(
"There are %s points in cube %s/%s" % (hypercube.shape[0], i, len(cube_entries))
)
print()
for i, (c1, c2) in enumerate(zip(cube_entries, cube_entries[1:])):
print("Overlap %s" % (overlap(c1[:, 0], c2[:, 0])))
|
MLWave/kepler-mapper
|
test/cover_test_script.py
|
Python
|
mit
| 730
|
# Testing Rester with HttpBin API
|
chitamoor/Rester
|
rester/test/testHttpBin/__init__.py
|
Python
|
mit
| 33
|
#!/usr/bin/env python
import unittest
import mock
class TestImapDuplicateRemover(unittest.TestCase):
@mock.patch("imaplib.IMAP4_SSL")
def test_does_nothing_if_no_mails_in_mailbox(self, imap_4):
import imap_duplicate_remover
imap_duplicate_remover.remove_duplicates()
assert not imap_4.return_value.store.called
if __name__ == "__main__":
unittest.main()
|
hjameel/imap-duplicate-remover
|
test_imap_duplicate_remover.py
|
Python
|
mit
| 393
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Patches the settings of a Cloud SQL database."""
from googlecloudsdk.api_lib.sql import operations
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.sql import flags
from googlecloudsdk.core import log
class _Result(object):
"""Run() method result object."""
def __init__(self, new, old):
self.new = new
self.old = old
class _BasePatch(object):
"""Patches the settings of a Cloud SQL database."""
@staticmethod
def Args(parser):
"""Args is called by calliope to gather arguments for this command.
Please add arguments in alphabetical order except for no- or a clear-
pair for that argument which can follow the argument itself.
Args:
parser: An argparse parser that you can use to add arguments that go
on the command line after this command. Positional arguments are
allowed.
"""
flags.CHARSET_FLAG.AddToParser(parser)
flags.COLLATION_FLAG.AddToParser(parser)
flags.DATABASE_NAME_FLAG.AddToParser(parser)
flags.INSTANCE_FLAG.AddToParser(parser)
parser.add_argument(
'--diff',
action='store_true',
help='Show what changed as a result of the patch.')
def Format(self, args):
if args.diff:
return 'diff(old, new)'
fmt = self.ListFormat(args)
return 'table(new:format="{fmt}")'.format(fmt=fmt)
def Run(self, args):
"""Patches settings of a Cloud SQL database using the patch api method.
Args:
args: argparse.Namespace, The arguments that this command was invoked
with.
Returns:
A dict object representing the operations resource describing the patch
operation if the patch was successful.
Raises:
HttpException: A http error response was received while executing api
request.
ToolException: An error other than http error occured while executing the
command.
"""
sql_client = self.context['sql_client']
sql_messages = self.context['sql_messages']
resources = self.context['registry']
instance_ref = resources.Parse(args.instance, collection='sql.instances')
original_database_resource = sql_client.databases.Get(
sql_messages.SqlDatabasesGetRequest(
database=args.database,
project=instance_ref.project,
instance=instance_ref.instance))
patch_database = sql_messages.Database(
project=instance_ref.project,
instance=instance_ref.instance,
name=args.database)
if hasattr(args, 'collation'):
patch_database.collation = args.collation
if hasattr(args, 'charset'):
patch_database.charset = args.charset
operation_ref = None
result_operation = sql_client.databases.Patch(
sql_messages.SqlDatabasesPatchRequest(
database=args.database,
databaseResource=patch_database,
project=instance_ref.project,
instance=instance_ref.instance))
operation_ref = resources.Create(
'sql.operations',
operation=result_operation.name,
project=instance_ref.project,
instance=instance_ref.instance)
operations.OperationsV1Beta4.WaitForOperation(
sql_client, operation_ref, 'Patching Cloud SQL database')
log.UpdatedResource(args.database, 'database')
changed_database_resource = sql_client.databases.Get(
sql_messages.SqlDatabasesGetRequest(
database=args.database,
project=instance_ref.project,
instance=instance_ref.instance))
return _Result(changed_database_resource, original_database_resource)
@base.ReleaseTracks(base.ReleaseTrack.BETA)
class PatchBeta(_BasePatch, base.Command):
"""Patches the settings of a Cloud SQL database."""
|
Sorsly/subtle
|
google-cloud-sdk/lib/surface/sql/databases/patch.py
|
Python
|
mit
| 4,349
|
#!/usr/bin/env python
# coding: utf-8
__author__ = 'toly'
"""
script for make per-page dictionaries according to user personal list of known words
"""
import re
import os
import sys
import argparse
from string import lower
import nltk
from nltk.stem.wordnet import WordNetLemmatizer
from nltk.stem.snowball import EnglishStemmer
from nltk.tokenize import RegexpTokenizer
tokenizer = RegexpTokenizer(r'\w+')
stemmer = EnglishStemmer()
lemmatizer = WordNetLemmatizer()
NO_LETTER_REGEXP = re.compile(r'[^a-zA-Z]')
PERSONAL_USER_DIR = os.path.join(os.path.expanduser('~'), '.easy_english')
UNKNOWN_STEMS_FILE = 'unknown_stems.txt'
KNOWN_STEMS_FILE = 'known_stems.txt'
STUDY_DICT_FILE = 'dictionary.txt'
PERSONAL_FILES = [UNKNOWN_STEMS_FILE, KNOWN_STEMS_FILE, STUDY_DICT_FILE]
PERSONAL_FILES = map(lambda x: os.path.join(PERSONAL_USER_DIR, x), PERSONAL_FILES)
UNKNOWN_STEMS_FILE, KNOWN_STEMS_FILE, STUDY_DICT_FILE = PERSONAL_FILES
def main():
"""
main func - entry point
"""
# if not created - make work directory for personal user lists of words
if not os.path.exists(PERSONAL_USER_DIR):
os.mkdir(PERSONAL_USER_DIR)
# make arguments parser and parse arguments
arg_parser = make_arguments_parser()
args = arg_parser.parse_args()
# main loop-for by pages in input file
big_page = ''
for page_num, page in enumerate(file_pages(args.input_file)):
big_page += page
words = tokenizer.tokenize(big_page)
words = map(lower, words)
words = list(set(words))
words = filter_non_words(words)
tesaurus = Tesaurus()
tesaurus.determine_words(words)
def make_arguments_parser():
"""
make arguments parser and set options
"""
argument_parser = argparse.ArgumentParser()
argument_parser.add_argument('-i', '--input-file', type=str, required=True, help="input txt file")
argument_parser.add_argument('-o', '--output-file', type=str, help="output file (default: <input_file>_d.txt )")
return argument_parser
def filter_non_words(words):
return filter(lambda x: not NO_LETTER_REGEXP.findall(x), words)
class Tesaurus(object):
unknown_stems_file = None
known_stems_file = None
study_words_file = None
unknown_stems = None
known_stems = None
study_words = None
def __init__(self, unknown_stems_file=UNKNOWN_STEMS_FILE, known_stems_file=KNOWN_STEMS_FILE,
study_words_file=STUDY_DICT_FILE):
self.unknown_stems_file = unknown_stems_file
self.known_stems_file = known_stems_file
self.study_words_file = study_words_file
personal_files = (unknown_stems_file, known_stems_file, study_words_file)
self.unknown_stems, self.known_stems, self.study_words = map(get_user_words, personal_files)
def determine_words(self, words_list):
"""
Determine words - known or unknown, and append to dictionary if need
"""
# dict: lemma -> stem
dict_lemmas = {}
not_determined_words = []
total_words = len(words_list)
n = 0
for word, part_of_speech in nltk.pos_tag(words_list):
n += 1
lemma, stemm = get_base_forms(word, part_of_speech)
if stemm in self.known_stems or stemm in self.unknown_stems:
continue
not_determined_words.append(lemma)
dict_lemmas[lemma] = stemm
if len(not_determined_words) < 10:
continue
progress = 100 * float(n) / float(total_words)
print "Progress: %d/%d [%f %%]" % (n, total_words, progress)
known_words = input_known_words(not_determined_words)
unknown_words = set(not_determined_words) - set(known_words)
known_stems = map(lambda x: dict_lemmas[x], known_words)
unknown_stems = map(lambda x: dict_lemmas[x], unknown_words)
append_words(self.known_stems_file, known_stems)
append_words(self.unknown_stems_file, unknown_stems)
append_words(self.study_words_file, unknown_words)
self.known_stems += known_stems
self.unknown_stems += unknown_stems
not_determined_words = []
if not_determined_words:
known_words = input_known_words(not_determined_words)
unknown_words = set(not_determined_words) - set(known_words)
known_stems = map(lambda x: dict_lemmas[x], known_words)
unknown_stems = map(lambda x: dict_lemmas[x], unknown_words)
append_words(self.known_stems_file, known_stems)
append_words(self.unknown_stems_file, unknown_stems)
append_words(self.study_words_file, unknown_words)
def append_words(filename, words):
"""
append words to file
"""
lines = map(lambda x: '%s\n' % x, words)
with open(filename, 'a') as f:
f.writelines(lines)
def get_base_forms(word, part_of_speech):
"""
word, part_of_speech -> lemma, stemm
"""
try:
lemma = lemmatizer.lemmatize(word, lower(part_of_speech[0]))
except Exception:
lemma = lemmatizer.lemmatize(word)
stemm = stemmer.stem(lemma)
return lemma, stemm
def input_known_words(words):
"""
Determine words through user input
list of words -> [known words], [unknown words]
"""
word_views = map(lambda item: '%d) %s' % item, enumerate(words))
prompt = '\n'.join(word_views) + "\nWhat words are you know? "
not_inputed = True
while not_inputed:
try:
words_positions = raw_input(prompt)
if not words_positions:
words_positions
break
words_positions = map(int, words_positions.split(','))
not_inputed = False
except (ValueError, ):
print "Input like a '0,3,8'"
known_words = []
for position in words_positions:
try:
known_words.append(words[position])
except IndexError:
pass
return known_words
def get_user_words(filename):
"""
get list of user words from file <filename>
or
create file if not exists
"""
if not os.path.exists(filename):
open(filename, 'a').close()
return []
def remove_end_of_line(line):
if '\n' in line:
return line.replace('\n', '')
return line
with open(filename, 'r') as f:
return map(remove_end_of_line, f.readlines())
def file_lines(filename):
"""read file line by line"""
with open(filename) as f:
for line in f:
yield line
def file_pages(filename, split_regexp=r'^===page #\d+$'):
"""read file page by page"""
page = ''
for line in file_lines(filename):
if re.match(split_regexp, line):
yield page
page = ''
continue
page += line
yield page
if __name__ == "__main__":
sys.exit(main())
|
toly/easy_english
|
main.py
|
Python
|
mit
| 6,995
|
try:
from setuptools import setup, find_packages
except ImportError:
from distutils.core import setup
import os
this_dir = os.path.dirname(__file__)
readme_filename = os.path.join(this_dir, 'README.md')
requirements_filename = os.path.join(this_dir, 'requirements.txt')
def get_project_path(*args):
return os.path.abspath(os.path.join(this_dir, *args))
PACKAGE_NAME = 'xplenty'
PACKAGE_VERSION = '3.0.0'
PACKAGE_AUTHOR = 'Xplenty'
PACKAGE_AUTHOR_EMAIL = 'opensource@xplenty.com'
PACKAGE_URL = 'https://github.com/xplenty/xplenty.py'
PACKAGES = find_packages(get_project_path())
PACKAGE_LICENSE = 'MIT'
PACKAGE_DESCRIPTION = 'Xplenty API Python SDK'
PACKAGE_INCLUDE_PACKAGE_DATA = True
PACKAGE_DATA_FILES = []
PACKAGE_CLASSIFIERS = ['Programming Language :: Python :: 3']
PYTHON_REQUIRES = '==3.7.*'
with open(readme_filename) as f:
PACKAGE_LONG_DESCRIPTION = f.read()
PACKAGE_LONG_DESCRIPTION_FORMAT = "text/markdown"
with open(requirements_filename) as f:
PACKAGE_INSTALL_REQUIRES = [line[:-1] for line in f]
setup(
name=PACKAGE_NAME,
version=PACKAGE_VERSION,
author=PACKAGE_AUTHOR,
author_email=PACKAGE_AUTHOR_EMAIL,
url=PACKAGE_URL,
packages=PACKAGES,
license=PACKAGE_LICENSE,
description=PACKAGE_DESCRIPTION,
long_description=PACKAGE_LONG_DESCRIPTION,
install_requires=PACKAGE_INSTALL_REQUIRES,
include_package_data=PACKAGE_INCLUDE_PACKAGE_DATA,
long_description_content_type=PACKAGE_LONG_DESCRIPTION_FORMAT,
data_files=PACKAGE_DATA_FILES,
entry_points={},
classifiers=PACKAGE_CLASSIFIERS,
python_requires=PYTHON_REQUIRES
)
|
xplenty/xplenty.py
|
setup.py
|
Python
|
mit
| 1,624
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# duco documentation build configuration file, created by
# sphinx-quickstart on Thu Sep 28 12:14:26 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
from duco.const import (__version__, __short_version__,
PROJECT_NAME, PROJECT_COPYRIGHT,
PROJECT_AUTHOR, PROJECT_DESCRIPTION,
PROJECT_GITHUB_USERNAME, PROJECT_GITHUB_REPOSITORY,
GITHUB_PATH)
sys.path.insert(0, os.path.abspath('_ext'))
sys.path.insert(0, os.path.abspath('../../'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
'sphinx_autodoc_annotation',
'edit_on_github'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = PROJECT_NAME
copyright = PROJECT_COPYRIGHT
author = PROJECT_AUTHOR
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = __short_version__
# The full version, including alpha/beta/rc tags.
release = __version__
code_branch = 'dev' if 'dev' in __version__ else 'master'
# Edit on Github config
edit_on_github_project = GITHUB_PATH
edit_on_github_branch = code_branch
edit_on_github_src_path = 'docs/source/'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'en'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['build']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
'logo_name': PROJECT_NAME,
'description': PROJECT_DESCRIPTION,
'github_user': PROJECT_GITHUB_USERNAME,
'github_repo': PROJECT_GITHUB_REPOSITORY,
'github_type': 'star',
'github_banner': True,
'travis_button': True,
# 'fixed_sidebar': True, # Re-enable when we have more content
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'relations.html', # needs 'show_related': True theme option to display
'searchbox.html',
'donate.html',
]
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'ducodoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'duco.tex', 'duco Documentation',
'Author', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'duco', 'duco Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'duco', 'duco Documentation',
author, 'duco', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
|
luuloe/python-duco
|
docs/source/conf.py
|
Python
|
mit
| 6,554
|
from urllib.request import urlopen, Request
from bs4 import BeautifulSoup
import time
def search():
headers = {}
headers['User-Agent'] = "Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.27 Safari/537.17"
start = 0
searchword = input('Enter the word(s) to search for: ')
if ' ' in searchword: searchword = searchword.split()
searchaddress = input('Enter the address to search for: ')
url = 'http://www.google.com/search?q='+'+'.join(searchword)
position = 0
while True:
print('Retrieving',url)
print('Checking from article',start+1)
try:
req = Request(url, headers=headers)
html = urlopen(req).read().decode()
except Exception as e:
print(str(e))
return
soup = BeautifulSoup(html, "html.parser")
tags = soup('cite')
for tag in tags:
position += 1
if searchaddress in str(tag):
print('Address found:')
print(str(tag.string))
print('in position',position,'of Google.com index')
return
start += 10
url = 'http://www.google.com/search?q='+'+'.join(searchword) +'&start='+ str(start)
time.sleep(2)
if __name__ == '__main__':
search()
|
maybejose/GoogleIndexSearch
|
google_index_search.py
|
Python
|
mit
| 1,361
|
# here detecting 'n' handling events...
# with keyboard 'n' mouse
import World
def pressed(key):
"""listener 4 keyboard pressing"""
# zoom in
if key.char == 'q':
if World.scale_() - 2 > 0:
World.scale_(-2)
World.draw()
# zoom out
elif key.char == 'e':
World.scale_(2)
World.draw()
# move view up
if key.char == 'w':
World.move_view(y=-1)
# move view left
elif key.char == 'a':
World.move_view(x=-1)
# move view down
elif key.char == 's':
World.move_view(y=1)
# move view right
elif key.char == 'd':
World.move_view(x=1)
|
efanescent/SapidCircuits
|
Control.py
|
Python
|
mit
| 654
|
# coding: utf-8
class AdminTools(object):
""" django-admin-tools """
ADMIN_TOOLS_INDEX_DASHBOARD = '{{ project_name }}.config.dashboard.AdminIndexDashboard'
ADMIN_TOOLS_MENU = '{{ project_name }}.config.dashboard.AdminMenu'
ADMIN_TOOLS_THEMING_CSS = 'css/admin.css'
|
futurecolors/tinned-django
|
tinned_django/project_name/config/apps_config/admintools.py
|
Python
|
mit
| 284
|
from math import ceil
import argparse
import json
import os
def _get_states(i): # get state from parameter
return 'M{}'.format(i), 'I{}'.format(i), 'D{}'.format(i)
class ProfileHiddenMarkovMoldel:
def __init__(self, inputfile, output):
self.inputfile = inputfile
self.output = output
self.t_prob = {} # transition probability
self.e_prob = {} # emission probability
self.inp_strings = [] # input string
self.char_list = set()
# Open file and read input strings
with open(inputfile, 'r') as f:
for line in f.read().splitlines():
self.inp_strings.append(line)
self.char_list = self.char_list.union(set(line))
self.char_list = self.char_list - {'.'} # Clean the data
# number of input strings
self.num_of_strings = len(self.inp_strings)
# number of char in a string
self.num_of_chars = len(self.inp_strings[0])
self.frequncy_list = [{} for i in range(self.num_of_chars + 1)]
for string in self.inp_strings:
for index, char in enumerate(string):
if char in self.frequncy_list[index].keys():
self.frequncy_list[index][char] += 1
else:
self.frequncy_list[index][char] = 1
# Which states are the match state
self.match_states = [
k for n, k in zip(self.frequncy_list, range(self.num_of_chars + 1))
if int(n.get('.', 0)) < ceil(self.num_of_strings / 2)
]
# State lists(temp)
match_state = ['M{}'.format(k)
for k in range(0, len(self.match_states) + 1)]
insert_state = ['I{}'.format(k)
for k in range(0, len(self.match_states))]
delete_state = ['D{}'.format(k)
for k in range(1, len(self.match_states))]
# formatting the transition probabilities
self.t_prob.update({key: {'strs': []} for key in match_state})
self.t_prob.update({key: {'strs': []} for key in insert_state})
self.t_prob.update({key: {'strs': []} for key in delete_state})
# put all input chars(proteins) at the begining state
self.t_prob['M0']['strs'] = [n for n in range(self.num_of_strings)]
def build_model(self):
i = 0 # counter
j = 0 # current state no
while i < self.num_of_chars + 1:
M, I, D, = _get_states(j)
nextM, nextD = _get_states(j + 1)[::2]
# If the current state is the match state
if i in self.match_states:
# D --> D list and D --> M list
deltodel, deltomatch = [], []
# I --> M List and I --> D List
instomatch, instodel = [], []
# M --> D List and M --> M List
matchtodel, matchtomatch = [], []
# D --> D and D --> M
# D(j) --> D(j+1) or D(j) --> M(j+1)
if self.t_prob.get(D, {}).get('strs', []) and i != 0:
try:
deltodel = [n for n in self.t_prob[D]['strs']
if self.inp_strings[n][i] == '.']
except IndexError:
pass
deltomatch = [
n for n in self.t_prob[D]['strs'] if n not in deltodel
]
# If deltodel is not empty
# D --> D
if deltodel:
self.t_prob[D][nextD] = {
'prob': float(len(deltodel) /
len(self.t_prob[D]['strs'])),
'strs': deltodel
}
self.t_prob[nextD]['strs'].extend(deltodel)
# If deltomatch is not empty
# D --> M
if deltomatch:
self.t_prob[D][nextM] = {
'prob': float(len(deltomatch) /
len(self.t_prob[D]['strs'])),
'strs': self.t_prob[D]['strs']
}
self.t_prob[nextM]['strs'].extend(deltomatch)
# I --> M and I --> D
# I(j) --> D(j+1) or I(j) --> M(j+1)
if self.t_prob[I]['strs'] and i != 0:
try:
instodel = list(set(
[n for n in self.t_prob[I]['strs']
if self.inp_strings[n][i] == '.']))
except IndexError:
pass
instomatch = list(set(
[n for n in self.t_prob[I]['strs']
if n not in instodel]))
# if instodel is not empty
# I --> D
if instodel:
self.t_prob[I][nextD] = {
'prob': float(len(instodel) /
len(self.t_prob[I]['strs'])),
'strs': instodel
}
self.t_prob[nextD]['strs'].extend(set(instodel))
# If instomatch is not empty
# I --> M
if instomatch:
self.t_prob[I][nextM] = {
'prob': float(len(instomatch) /
len(self.t_prob[I]['strs'])),
'strs': instomatch
}
self.t_prob[nextM]['strs'].extend(set(instomatch))
# M --> D and M --> M
# M(j) --> D(j+1) or M(j) --> M(j+1)
if self.t_prob[M]['strs']:
try:
matchtodel = [n for n in self.t_prob[M]['strs']
if self.inp_strings[n][i] == '.'
and n not in self.t_prob[I]['strs']]
except IndexError:
pass
matchtomatch = [
n for n in self.t_prob[M]['strs']
if n not in matchtodel + self.t_prob[I]['strs']
]
# If matchtodel is not empty
# M --> D
if matchtodel:
self.t_prob[M][nextD] = {
'prob': float(len(matchtodel) /
len(self.t_prob[M]['strs'])),
'strs': matchtodel
}
self.t_prob[nextD]['strs'].extend(matchtodel)
# If matchtomatch is not empty
# M --> M
if matchtomatch:
self.t_prob[M][nextM] = {
'prob': float(len(matchtomatch) /
len(self.t_prob[M]['strs'])),
'strs': matchtomatch
}
self.t_prob[nextM]['strs'].extend(matchtomatch)
j += 1
else:
insert_states = []
# This loop for going to the next insert state
while True:
insert_states.extend([n for n in range(self.num_of_strings)
if self.inp_strings[n][i] != '.'])
if i + 1 in self.match_states or i + 1 == self.num_of_chars:
# if i+1 is not match state or i+1 last char in strings
break
i += 1 # next insert state
# If the current insert_state is not empty
if insert_states:
# M --> I
come_from_match = [n for n in self.t_prob[M]['strs']
if n in insert_states]
# D --> I
come_from_del = [n for n in self.t_prob.get(D, {})
.get('strs', []) if n in insert_states]
# I --> I
come_from_ins = [n for n in set(insert_states) for k in
range(insert_states.count(n) - 1)]
# If the string comes from the match state
# M(j) --> I(j)
if come_from_match:
self.t_prob[M][I] = {
'prob': float(len(come_from_match) /
len(self.t_prob[M]['strs'])),
'strs': come_from_match
}
# If the string comes from the delete state
# D(j) --> I(j)
if come_from_del:
self.t_prob[D][I] = {
'prob': float(len(come_from_del) /
len(self.t_prob[D]['strs'])),
'strs': come_from_del
}
# If the string comes from the insert state
if come_from_ins:
self.t_prob[I][I] = {
'prob': float(len(come_from_ins) /
len(insert_states)),
'strs': list(set(come_from_ins))
}
self.t_prob[I]['strs'].extend(insert_states)
# get emission probabilities without '.'
num_of_dot = self.frequncy_list[i].get('.', 0)
self.e_prob[nextM] = {
n:
self.frequncy_list[i][n] / (self.num_of_strings - num_of_dot)
for n in self.frequncy_list[i] if n != '.'
}
i += 1
def create_result(self):
# write emission and transition probability
def __detele(d):
# delete unnecessary information about 'strs'
# no need to check exception because it guarantees to have key
del d['strs']
for i in d:
del d[i]['strs']
prob = d[i].pop('prob')
d[i] = prob
return d
# delete unnecessary states
self.t_prob = {
n: __detele(self.t_prob[n])
for n in self.t_prob if self.t_prob[n]['strs']
}
result = {
'transition': self.t_prob,
'emission': self.e_prob,
'matchState': len(self.match_states)
}
os.chdir(self.output)
with open(os.path.join(self.output, 'result.phmm'), 'w') as output:
output.write(json.dumps(result, ensure_ascii=False))
# import chart
# chart.generate_chart(self.e_prob, self.char_list)
def main():
parser = argparse.ArgumentParser(
description='generate profile hidden markov')
parser.add_argument('--input', help='input file location')
parser.add_argument('--output', help='output file location')
args = parser.parse_args()
model = ProfileHiddenMarkovMoldel(args.input, args.output)
model.build_model()
model.create_result()
if __name__ == '__main__':
main()
|
burakkose/profile-hidden-markov-models
|
src/profileHMM.py
|
Python
|
mit
| 11,453
|
# !/usr/bin/env python
# -*- coding: utf-8 -*-
from django import forms
from widgets import DatePickerWidget
FILTER_PREFIX = 'drf__'
class DateRangeExForm(forms.Form):
def __init__(self, request, *args, **kwargs):
field_name = kwargs.pop('field_name')
self.request = request
super(DateRangeExForm, self).__init__(*args, **kwargs)
self.fields['%s%s__gte' % (FILTER_PREFIX, field_name)] = forms.DateField(
label='',
widget=DatePickerWidget(
attrs={
'class': 'date',
'placeholder': 'Desde el día'
},
format="%m/%d/%Y"
),
input_formats=('%Y/%m/%d', '%d/%m/%Y', '%m/%d/%Y'),
localize=True,
required=False
)
self.fields['%s%s__lte' % (FILTER_PREFIX, field_name)] = forms.DateField(
label='',
widget=DatePickerWidget(
attrs={
'class': 'date',
'placeholder': 'Hasta el día',
},
format="%m/%d/%Y"
),
input_formats=('%Y/%m/%d', '%d/%m/%Y', '%m/%d/%Y'),
localize=True,
required=False,
)
# end def
# end class
|
exildev/webpage
|
exile_ui/forms.py
|
Python
|
mit
| 1,290
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "pollapp.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
#!/usr/bin/env python2.7
from django.core.management import execute_manager
import imp
try:
imp.find_module('settings') # Assumed to be in the same directory.
except ImportError:
import sys
sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n" % __file__)
sys.exit(1)
import settings
if __name__ == "__main__":
execute_manager(settings)
|
rajmohanperiyasamy/pollapp
|
manage.py
|
Python
|
mit
| 761
|
import os
import sys
import logging
import math
import pandas as pd
import pytz
import bt
import matplotlib.pyplot as plt
from talib import RSI, MA
from stock_data_provider import create_dataframe, filter_dataframe
try:
from . import module_loader
except:
import module_loader
sys.dont_write_bytecode = True
stock_data_provider = module_loader.load_module_from_file('stock_data_provider.cn_a.vip_dataset')
load_data = module_loader.load_module_func(stock_data_provider,
'load_stock_data')
do_normalize_data = True
#all_loaded_data = load_data('600369,600999,600732,601066', do_normalize_data)
#all_loaded_data = load_data('600019,600050,600030,600584,600036,600406', do_normalize_data)
#all_loaded_data = load_data('600019,600050,600584', do_normalize_data)
#all_loaded_data = load_data('600519,000858,601318,600036,603288,600276,600900,600887', do_normalize_data)
all_loaded_data = load_data('600732', do_normalize_data)
bench_data = load_data('sh000001', do_normalize_data)
start_date = '20210501'
end_date = '20210630'
print(start_date, end_date)
all_data = panel = create_dataframe(all_loaded_data, 'close')
bench_data = create_dataframe(bench_data, 'close')
panel = filter_dataframe(panel, start_date, end_date)
bench_data = filter_dataframe(bench_data, start_date, end_date)
bt.merge(panel).plot()
plt.show()
# create the strategy
from strategy.rsi_25_75_talib import create_strategy
from strategy.r3 import create_strategy as r3_create_strategy
from strategy.sma import above_sma, long_only_ew
from strategy.macd_talib import create_strategy as macd_create_strategy
from strategy.ibs import create_strategy as ibs_create_strategy
from strategy.ibs_rsi import create_strategy as ibs_rsi_create_strategy
ss = bt.Strategy('s1', [bt.algos.RunMonthly(),
bt.algos.SelectAll(),
bt.algos.WeighEqually(),
bt.algos.Rebalance()])
s = create_strategy().get_strategy(all_loaded_data)
r3_s = r3_create_strategy().get_strategy(all_loaded_data)
macd_s = macd_create_strategy().get_strategy(all_loaded_data)
ibs_s = ibs_create_strategy().get_strategy(all_loaded_data)
ibs_rsi_s = ibs_rsi_create_strategy().get_strategy(all_loaded_data)
# create a backtest and run it
test = bt.Backtest(s, panel)
test_s = bt.Backtest(ss, panel)
test_r3_s = bt.Backtest(r3_s, panel)
test_macd_s = bt.Backtest(macd_s, panel)
test_ibs_s = bt.Backtest(ibs_s, panel)
test_ibs_rsi_s = bt.Backtest(ibs_rsi_s, panel)
sma10 = above_sma(data=panel, sma_per=10, name='sma10', start=start_date)
sma20 = above_sma(data=panel, sma_per=20, name='sma20', start=start_date)
sma40 = above_sma(data=panel, sma_per=40, name='sma40', start=start_date)
benchmark = long_only_ew(data=bench_data, name='sh000001', start=start_date)
long_only = long_only_ew(data=panel, name='bench', start=start_date)
res = bt.run(test, test_s, sma10, sma20, sma40, benchmark, test_r3_s, test_macd_s, test_ibs_s, test_ibs_rsi_s)
#res = bt.run(test_ibs_s, test, test_r3_s, test_macd_s, test_ibs_rsi_s, long_only)
#trans = res.get_transactions()
trans = []
if len(trans) > 0:
res.plot()
plt.show()
res.plot_histogram()
plt.show()
print(trans.to_string())
# ok and what about some stats?
res.display()
|
stonewell/learn-curve
|
test/test_vipset.py
|
Python
|
mit
| 3,302
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Triangle Project Code.
# Triangle analyzes the lengths of the sides of a triangle
# (represented by a, b and c) and returns the type of triangle.
#
# It returns:
# 'equilateral' if all sides are equal
# 'isosceles' if exactly 2 sides are equal
# 'scalene' if no sides are equal
#
# The tests for this method can be found in
# about_triangle_project.py
# and
# about_triangle_project_2.py
#
def triangle(a, b, c):
# DELETE 'PASS' AND WRITE THIS CODE
if a <= 0 or b <= 0 or c <= 0:
raise TriangleError
if a + b <= c or b + c <= a or c + a <= b:
raise TriangleError
if a == b and b == c and c == a:
return 'equilateral'
if a != b and b != c and c != a:
return 'scalene'
else:
return 'isosceles'
# Error class used in part 2. No need to change this code.
class TriangleError(Exception):
pass
|
kimegitee/python-koans
|
python3/koans/triangle.py
|
Python
|
mit
| 886
|
#! /usr/bin/env python
#
# File Name : generate_grid_mrf_model.py
# Created By : largelymfs
# Creation Date : [2016-01-20 14:42]
# Last Modified : [2016-01-20 14:50]
# Description : the pyscripts to generate mrf grid model
#
def output_2darray(array, fout):
for item in array:
for value in item:
print >>fout, value,
print >>fout
if __name__=="__main__":
import sys
filename_output = sys.argv[2]
cnt_variable = int(sys.argv[1])
import numpy as np
#generate phi
phi = np.random.normal(0.0, 0.1, cnt_variable * cnt_variable).reshape((cnt_variable, cnt_variable))
theta_a = np.random.normal(0.0, 1.0, cnt_variable * (cnt_variable - 1)).reshape((cnt_variable - 1, cnt_variable))
theta_b = np.random.normal(0.0, 1.0, cnt_variable * (cnt_variable - 1)).reshape((cnt_variable, cnt_variable - 1))
with open(filename_output,"w") as fout:
print >>fout, cnt_variable
print >>fout
output_2darray(phi, fout)
print >>fout
output_2darray(theta_a, fout)
print >>fout
output_2darray(theta_b, fout)
|
YoungLew/NoiseContrastiveLearning
|
Grid_MRF/generate_grid_mrf_model.py
|
Python
|
mit
| 1,188
|
"""
Created on Sat Sep 16 18:32:01 2017
@author: dariocorral
"""
import os
import oandapy
import pandas as pd
class Tickers(object):
"""
Basic info about tickers available for OANDA trading
"""
#oanda_api private attribute
_oanda_api = oandapy.API(environment = os.environ['ENV'],
access_token = os.environ['TOKEN'])
@property
def dataframe(self):
"""
Tickers Dataframe with basic info:
:param : no params
:return : dataFrame object
* Pip value
* DisplayName
* Max Trade Units
* Base
* Quote
* Pip Decimals
"""
#Call dict OANDA API and transform to DataFrame
df = self._oanda_api.get_instruments(os.environ['ACCOUNT'])
df = df.get('instruments')
df = pd.DataFrame.from_dict(df)
base = df['instrument'].str.split('_', expand = True)
df = df.join(base)
df.set_index ('instrument',inplace = True)
#Rename columns
df.columns = (u'displayName', u'maxTradeUnits', u'pip', u'base',
u'quote')
#Change tick Value to float
df['pip'] = df['pip'].astype(float)
return df
def tick_value(self,ticker):
"""
Minimum tick value
:param: ticker
:type : string, list or tuple
:return: float or dataframe
"""
return self.dataframe.loc[ticker]['pip']
def display_name(self,ticker):
"""
ticker Display Name
:param: ticker
:type : string, list or tuple
:return : string or datrame
"""
return self.dataframe.loc[ticker]['displayName']
def max_trade_units(self,ticker):
"""
Max Trade Units allowed
:param: ticker
:type : string, list or tuple
:return : integer or dataframe
"""
return self.dataframe.loc[ticker]['maxTradeUnits']
def base(self,ticker):
"""
ticker base part
:param: ticker
:type : string, list or tuple
:return : string or dataframe
"""
return self.dataframe.loc[ticker]['base']
def quote(self,ticker):
"""
ticker quote part
:param: ticker
:type : string, list or tuple
:return : string or dataframe
"""
return self.dataframe.loc[ticker]['quote']
def pip_decimals(self,ticker):
"""
ticker decimals (for rounding quotes)
:param: ticker
:type : string
:return : int
"""
inverse = 1 / self.tick_value(ticker)
return str(inverse).count('0')
|
dariocorral/panoanda
|
panoanda/tickers.py
|
Python
|
mit
| 2,954
|
"""The tests for the logbook component."""
# pylint: disable=protected-access,too-many-public-methods
from datetime import timedelta
import unittest
from unittest.mock import patch
from homeassistant.components import sun
import homeassistant.core as ha
from homeassistant.const import (
EVENT_STATE_CHANGED, EVENT_HOMEASSISTANT_START, EVENT_HOMEASSISTANT_STOP,
ATTR_HIDDEN, STATE_NOT_HOME, STATE_ON, STATE_OFF)
import homeassistant.util.dt as dt_util
from homeassistant.components import logbook
from homeassistant.bootstrap import setup_component
from tests.common import mock_http_component, get_test_home_assistant
class TestComponentLogbook(unittest.TestCase):
"""Test the History component."""
EMPTY_CONFIG = logbook.CONFIG_SCHEMA({logbook.DOMAIN: {}})
def setUp(self):
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
mock_http_component(self.hass)
self.hass.config.components += ['frontend', 'recorder', 'api']
with patch('homeassistant.components.logbook.'
'register_built_in_panel'):
assert setup_component(self.hass, logbook.DOMAIN,
self.EMPTY_CONFIG)
def tearDown(self):
"""Stop everything that was started."""
self.hass.stop()
def test_service_call_create_logbook_entry(self):
"""Test if service call create log book entry."""
calls = []
def event_listener(event):
calls.append(event)
self.hass.bus.listen(logbook.EVENT_LOGBOOK_ENTRY, event_listener)
self.hass.services.call(logbook.DOMAIN, 'log', {
logbook.ATTR_NAME: 'Alarm',
logbook.ATTR_MESSAGE: 'is triggered',
logbook.ATTR_DOMAIN: 'switch',
logbook.ATTR_ENTITY_ID: 'switch.test_switch'
}, True)
self.assertEqual(1, len(calls))
last_call = calls[-1]
self.assertEqual('Alarm', last_call.data.get(logbook.ATTR_NAME))
self.assertEqual('is triggered', last_call.data.get(
logbook.ATTR_MESSAGE))
self.assertEqual('switch', last_call.data.get(logbook.ATTR_DOMAIN))
self.assertEqual('switch.test_switch', last_call.data.get(
logbook.ATTR_ENTITY_ID))
def test_service_call_create_log_book_entry_no_message(self):
"""Test if service call create log book entry without message."""
calls = []
def event_listener(event):
calls.append(event)
self.hass.bus.listen(logbook.EVENT_LOGBOOK_ENTRY, event_listener)
self.hass.services.call(logbook.DOMAIN, 'log', {}, True)
self.assertEqual(0, len(calls))
def test_humanify_filter_sensor(self):
"""Test humanify filter too frequent sensor values."""
entity_id = 'sensor.bla'
pointA = dt_util.utcnow().replace(minute=2)
pointB = pointA.replace(minute=5)
pointC = pointA + timedelta(minutes=logbook.GROUP_BY_MINUTES)
eventA = self.create_state_changed_event(pointA, entity_id, 10)
eventB = self.create_state_changed_event(pointB, entity_id, 20)
eventC = self.create_state_changed_event(pointC, entity_id, 30)
entries = list(logbook.humanify((eventA, eventB, eventC)))
self.assertEqual(2, len(entries))
self.assert_entry(
entries[0], pointB, 'bla', domain='sensor', entity_id=entity_id)
self.assert_entry(
entries[1], pointC, 'bla', domain='sensor', entity_id=entity_id)
def test_filter_continuous_sensor_values(self):
"""Test remove continuous sensor events from logbook."""
entity_id = 'sensor.bla'
pointA = dt_util.utcnow()
attributes = {'unit_of_measurement': 'foo'}
eventA = self.create_state_changed_event(
pointA, entity_id, 10, attributes)
entries = list(logbook.humanify((eventA,)))
self.assertEqual(0, len(entries))
def test_exclude_events_hidden(self):
"""Test if events are excluded if entity is hidden."""
entity_id = 'sensor.bla'
entity_id2 = 'sensor.blu'
pointA = dt_util.utcnow()
pointB = pointA + timedelta(minutes=logbook.GROUP_BY_MINUTES)
eventA = self.create_state_changed_event(pointA, entity_id, 10,
{ATTR_HIDDEN: 'true'})
eventB = self.create_state_changed_event(pointB, entity_id2, 20)
events = logbook._exclude_events((ha.Event(EVENT_HOMEASSISTANT_STOP),
eventA, eventB), self.EMPTY_CONFIG)
entries = list(logbook.humanify(events))
self.assertEqual(2, len(entries))
self.assert_entry(
entries[0], name='Home Assistant', message='stopped',
domain=ha.DOMAIN)
self.assert_entry(
entries[1], pointB, 'blu', domain='sensor', entity_id=entity_id2)
def test_exclude_events_entity(self):
"""Test if events are filtered if entity is excluded in config."""
entity_id = 'sensor.bla'
entity_id2 = 'sensor.blu'
pointA = dt_util.utcnow()
pointB = pointA + timedelta(minutes=logbook.GROUP_BY_MINUTES)
eventA = self.create_state_changed_event(pointA, entity_id, 10)
eventB = self.create_state_changed_event(pointB, entity_id2, 20)
config = logbook.CONFIG_SCHEMA({
ha.DOMAIN: {},
logbook.DOMAIN: {logbook.CONF_EXCLUDE: {
logbook.CONF_ENTITIES: [entity_id, ]}}})
events = logbook._exclude_events((ha.Event(EVENT_HOMEASSISTANT_STOP),
eventA, eventB), config)
entries = list(logbook.humanify(events))
self.assertEqual(2, len(entries))
self.assert_entry(
entries[0], name='Home Assistant', message='stopped',
domain=ha.DOMAIN)
self.assert_entry(
entries[1], pointB, 'blu', domain='sensor', entity_id=entity_id2)
def test_exclude_events_domain(self):
"""Test if events are filtered if domain is excluded in config."""
entity_id = 'switch.bla'
entity_id2 = 'sensor.blu'
pointA = dt_util.utcnow()
pointB = pointA + timedelta(minutes=logbook.GROUP_BY_MINUTES)
eventA = self.create_state_changed_event(pointA, entity_id, 10)
eventB = self.create_state_changed_event(pointB, entity_id2, 20)
config = logbook.CONFIG_SCHEMA({
ha.DOMAIN: {},
logbook.DOMAIN: {logbook.CONF_EXCLUDE: {
logbook.CONF_DOMAINS: ['switch', ]}}})
events = logbook._exclude_events((ha.Event(EVENT_HOMEASSISTANT_START),
eventA, eventB), config)
entries = list(logbook.humanify(events))
self.assertEqual(2, len(entries))
self.assert_entry(entries[0], name='Home Assistant', message='started',
domain=ha.DOMAIN)
self.assert_entry(entries[1], pointB, 'blu', domain='sensor',
entity_id=entity_id2)
def test_exclude_auto_groups(self):
"""Test if events of automatically generated groups are filtered."""
entity_id = 'switch.bla'
entity_id2 = 'group.switches'
pointA = dt_util.utcnow()
eventA = self.create_state_changed_event(pointA, entity_id, 10)
eventB = self.create_state_changed_event(pointA, entity_id2, 20,
{'auto': True})
entries = list(logbook.humanify((eventA, eventB)))
self.assertEqual(1, len(entries))
self.assert_entry(entries[0], pointA, 'bla', domain='switch',
entity_id=entity_id)
def test_exclude_attribute_changes(self):
"""Test if events of attribute changes are filtered."""
entity_id = 'switch.bla'
entity_id2 = 'switch.blu'
pointA = dt_util.utcnow()
pointB = pointA + timedelta(minutes=1)
eventA = self.create_state_changed_event(pointA, entity_id, 10)
eventB = self.create_state_changed_event(
pointA, entity_id2, 20, last_changed=pointA, last_updated=pointB)
entries = list(logbook.humanify((eventA, eventB)))
self.assertEqual(1, len(entries))
self.assert_entry(entries[0], pointA, 'bla', domain='switch',
entity_id=entity_id)
def test_entry_to_dict(self):
"""Test conversion of entry to dict."""
entry = logbook.Entry(
dt_util.utcnow(), 'Alarm', 'is triggered', 'switch', 'test_switch'
)
data = entry.as_dict()
self.assertEqual('Alarm', data.get(logbook.ATTR_NAME))
self.assertEqual('is triggered', data.get(logbook.ATTR_MESSAGE))
self.assertEqual('switch', data.get(logbook.ATTR_DOMAIN))
self.assertEqual('test_switch', data.get(logbook.ATTR_ENTITY_ID))
def test_home_assistant_start_stop_grouped(self):
"""Test if HA start and stop events are grouped.
Events that are occuring in the same minute.
"""
entries = list(logbook.humanify((
ha.Event(EVENT_HOMEASSISTANT_STOP),
ha.Event(EVENT_HOMEASSISTANT_START),
)))
self.assertEqual(1, len(entries))
self.assert_entry(
entries[0], name='Home Assistant', message='restarted',
domain=ha.DOMAIN)
def test_home_assistant_start(self):
"""Test if HA start is not filtered or converted into a restart."""
entity_id = 'switch.bla'
pointA = dt_util.utcnow()
entries = list(logbook.humanify((
ha.Event(EVENT_HOMEASSISTANT_START),
self.create_state_changed_event(pointA, entity_id, 10)
)))
self.assertEqual(2, len(entries))
self.assert_entry(
entries[0], name='Home Assistant', message='started',
domain=ha.DOMAIN)
self.assert_entry(entries[1], pointA, 'bla', domain='switch',
entity_id=entity_id)
def test_entry_message_from_state_device(self):
"""Test if logbook message is correctly created for switches.
Especially test if the special handling for turn on/off events is done.
"""
pointA = dt_util.utcnow()
# message for a device state change
eventA = self.create_state_changed_event(pointA, 'switch.bla', 10)
to_state = ha.State.from_dict(eventA.data.get('new_state'))
message = logbook._entry_message_from_state(to_state.domain, to_state)
self.assertEqual('changed to 10', message)
# message for a switch turned on
eventA = self.create_state_changed_event(pointA, 'switch.bla',
STATE_ON)
to_state = ha.State.from_dict(eventA.data.get('new_state'))
message = logbook._entry_message_from_state(to_state.domain, to_state)
self.assertEqual('turned on', message)
# message for a switch turned off
eventA = self.create_state_changed_event(pointA, 'switch.bla',
STATE_OFF)
to_state = ha.State.from_dict(eventA.data.get('new_state'))
message = logbook._entry_message_from_state(to_state.domain, to_state)
self.assertEqual('turned off', message)
def test_entry_message_from_state_device_tracker(self):
"""Test if logbook message is correctly created for device tracker."""
pointA = dt_util.utcnow()
# message for a device tracker "not home" state
eventA = self.create_state_changed_event(pointA, 'device_tracker.john',
STATE_NOT_HOME)
to_state = ha.State.from_dict(eventA.data.get('new_state'))
message = logbook._entry_message_from_state(to_state.domain, to_state)
self.assertEqual('is away', message)
# message for a device tracker "home" state
eventA = self.create_state_changed_event(pointA, 'device_tracker.john',
'work')
to_state = ha.State.from_dict(eventA.data.get('new_state'))
message = logbook._entry_message_from_state(to_state.domain, to_state)
self.assertEqual('is at work', message)
def test_entry_message_from_state_sun(self):
"""Test if logbook message is correctly created for sun."""
pointA = dt_util.utcnow()
# message for a sun rise
eventA = self.create_state_changed_event(pointA, 'sun.sun',
sun.STATE_ABOVE_HORIZON)
to_state = ha.State.from_dict(eventA.data.get('new_state'))
message = logbook._entry_message_from_state(to_state.domain, to_state)
self.assertEqual('has risen', message)
# message for a sun set
eventA = self.create_state_changed_event(pointA, 'sun.sun',
sun.STATE_BELOW_HORIZON)
to_state = ha.State.from_dict(eventA.data.get('new_state'))
message = logbook._entry_message_from_state(to_state.domain, to_state)
self.assertEqual('has set', message)
def test_process_custom_logbook_entries(self):
"""Test if custom log book entries get added as an entry."""
name = 'Nice name'
message = 'has a custom entry'
entity_id = 'sun.sun'
entries = list(logbook.humanify((
ha.Event(logbook.EVENT_LOGBOOK_ENTRY, {
logbook.ATTR_NAME: name,
logbook.ATTR_MESSAGE: message,
logbook.ATTR_ENTITY_ID: entity_id,
}),
)))
self.assertEqual(1, len(entries))
self.assert_entry(
entries[0], name=name, message=message,
domain='sun', entity_id=entity_id)
def assert_entry(self, entry, when=None, name=None, message=None,
domain=None, entity_id=None):
"""Assert an entry is what is expected."""
if when:
self.assertEqual(when, entry.when)
if name:
self.assertEqual(name, entry.name)
if message:
self.assertEqual(message, entry.message)
if domain:
self.assertEqual(domain, entry.domain)
if entity_id:
self.assertEqual(entity_id, entry.entity_id)
def create_state_changed_event(self, event_time_fired, entity_id, state,
attributes=None, last_changed=None,
last_updated=None):
"""Create state changed event."""
# Logbook only cares about state change events that
# contain an old state but will not actually act on it.
state = ha.State(entity_id, state, attributes, last_changed,
last_updated).as_dict()
return ha.Event(EVENT_STATE_CHANGED, {
'entity_id': entity_id,
'old_state': state,
'new_state': state,
}, time_fired=event_time_fired)
|
leoc/home-assistant
|
tests/components/test_logbook.py
|
Python
|
mit
| 15,111
|
from __future__ import absolute_import
# flake8: noqa
# import apis into api package
from telestream_cloud_qc.api.qc_api import QcApi
|
Telestream/telestream-cloud-python-sdk
|
telestream_cloud_qc_sdk/telestream_cloud_qc/api/__init__.py
|
Python
|
mit
| 136
|
# coding: utf-8
from __future__ import absolute_import
from flask import Flask, render_template, _app_ctx_stack, abort
from flask_debugtoolbar import DebugToolbarExtension
from flask.ext.restful import Api
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker, scoped_session, Query as SAQuery
from sqlalchemy.orm.exc import NoResultFound, MultipleResultsFound
# from .settings import ProdConfig
# from .assets import assets
# from budgetApp import public, user
# database session registry object, configured from create_app factory
DbSession = scoped_session(sessionmaker(),
# __ident_func__ should be hashable, therefore used
# for recognizing different incoming requests
scopefunc=_app_ctx_stack.__ident_func__)
class BaseQuery(SAQuery):
"""
Extended SQLAlchemy Query class, provides :meth:`BaseQuery.first_or_404`
and :meth:`BaseQuery.get_or_404` similarily to Flask-SQLAlchemy.
These methods are additional, :class:`BaseQuery` works like a normal
SQLAlchemy query class.
"""
def get_or_404(self, identity):
result = self.get(identity)
if result is None:
abort(404)
return result
def first_or_404(self):
result = self.first()
if result is None:
abort(404)
return result
def create_app(name_handler, config_object):
"""
Application factory (http://flask.pocoo.org/docs/patterns/appfactories/)
:param name_handler: name the application is created and bounded to.
:param config_object: the configuration object to use.
"""
app = Flask(name_handler, template_folder="budgetApp/templates")
app.config.from_object(config_object)
app.engine = create_engine(app.config["SQLALCHEMY_DATABASE_URI"])
global DbSession
DbSession.configure(bind=app.engine, query_cls=BaseQuery)
@app.teardown_appcontext
def teardown(exception=None):
if isinstance(exception, NoResultFound) or \
isinstance(exception, MultipleResultsFound):
abort(404)
global DbSession
if DbSession:
DbSession.remove()
register_extensions(app)
register_api(app)
register_blueprints(app)
register_errorhandlers(app)
return app
def register_api(app):
"""
Register Flask-RESTful APIs.
"""
api = Api(app)
# users
from .views.users import UsersList, UserResource
api.add_resource(UsersList, "/users")
api.add_resource(UserResource, "/users/<int:user_id>")
# users' budgets
from .views.budgets import UsersBudgetList
api.add_resource(UsersBudgetList, "/users/<int:user_id>/budget")
# budgets
from .views.budgets import BudgetResource
api.add_resource(BudgetResource, "/budget/<int:budget_id>")
return None
def register_extensions(app):
# db.init_app(app)
# login_manager.init_app(app)
# assets.init_app(app)
DebugToolbarExtension(app)
# cache.init_app(app)
# migrate.init_app(app, db)
return None
def register_blueprints(app):
# app.register_blueprint(public.views.blueprint)
# app.register_blueprint(user.views.blueprint)
return None
def register_errorhandlers(app):
def render_error(error):
return (render_template("{0}.html".format(error.code)),
error.code)
for errcode in [401, 404, 500]:
app.register_error_handler(errcode, render_error)
return None
|
pbanaszkiewicz/budgetApp
|
budgetApp/app.py
|
Python
|
mit
| 3,510
|
import random
from urllib import urlopen
import sys
WORD_URL = "http://learncodethehardway.org/words.txt"
WORDS = []
PHRASES = {
"class %%%(%%%):":
"Make a class named %%% that is-a %%%.",
"class %%%(object):\n\tdef __init__(self, ***)" :
"class %%% has-a __init__ that takes self and *** parameters.",
"class %%%(object):\n\tdef ***(self, @@@)":
"class %%% has-a function named *** that takes self and @@@ parameters.",
"*** = %%%()":
"Set *** to an instance of class %%%.",
"***.***(@@@)":
"From *** get the *** function and call it with parameters self, @@@.",
"***.*** = '***'":
"From *** get tha *** attribute and set it to '***'."
}
# do they want to drill phrases first
if len(sys.argv) == 2 and sys.argv[1] == "english":
PHRASES_FIRST = True
else:
PHRASES_FIRST = False
# load up the words from the website
for word in urlopen(WORD_URL).readlines():
WORDS.append(word.strip())
def convert(snippet, phrase):
class_names = [w.capitalize() for w in
random.sample(WORDS, snippet.count("%%%"))]
other_names = random.sample(WORDS, snippet.count("***"))
results = []
param_names = []
for i in range(0, snippet.count("@@@")):
param_count = random.randint(1,3)
param_names.append(', '.join(random.sample(WORDS, param_count)))
for sentence in snippet, phrase:
result = sentence[:]
# fake class names
for word in class_names:
result = result.replace("%%%", word, 1)
# fake other names
for word in other_names:
result = result.replace("***", word, 1)
# fake parameter lists
for word in param_names:
result = result.replace("@@@", word, 1)
results.append(result)
return results
# keep doing until they hit CTRL-D
try:
while True:
snippets = PHRASES.keys()
random.shuffle(snippets)
for snippet in snippets:
phrase = PHRASES[snippet]
question, answer = convert(snippet, phrase)
if PHRASES_FIRST:
question, answer = answer, question
print question
raw_input("> ")
print "ANSWER: %s\n\n" % answer
except EOFError:
print "\nBye"
|
Valka7a/python-playground
|
python-the-hard-way/oop-test.py
|
Python
|
mit
| 2,060
|
from core.himesis import Himesis, HimesisPreConditionPatternLHS
import uuid
class HUnitDaughter2Woman_CompleteLHS(HimesisPreConditionPatternLHS):
def __init__(self):
"""
Creates the himesis graph representing the AToM3 model HUnitDaughter2Woman_CompleteLHS
"""
# Flag this instance as compiled now
self.is_compiled = True
super(HUnitDaughter2Woman_CompleteLHS, self).__init__(name='HUnitDaughter2Woman_CompleteLHS', num_nodes=0, edges=[])
# Add the edges
self.add_edges([])
# Set the graph attributes
self["mm__"] = ['MT_pre__FamiliesToPersonsMM', 'MoTifRule']
self["MT_constraint__"] = """return True"""
self["name"] = """"""
self["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'HUnitDaughter2Woman_CompleteLHS')
self["equations"] = []
# Set the node attributes
# match class Family(Fam) node
self.add_node()
self.vs[0]["MT_pre__attr1"] = """return True"""
self.vs[0]["MT_label__"] = """1"""
self.vs[0]["mm__"] = """MT_pre__Family"""
self.vs[0]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'Fam')
# match class Child(Child) node
self.add_node()
self.vs[1]["MT_pre__attr1"] = """return True"""
self.vs[1]["MT_label__"] = """2"""
self.vs[1]["mm__"] = """MT_pre__Child"""
self.vs[1]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'Child')
# apply class Woman(Woman) node
self.add_node()
self.vs[2]["MT_pre__attr1"] = """return True"""
self.vs[2]["MT_label__"] = """3"""
self.vs[2]["mm__"] = """MT_pre__Woman"""
self.vs[2]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'Woman')
# match association null--daughters-->nullnode
self.add_node()
self.vs[3]["MT_pre__attr1"] = """return attr_value == "daughters" """
self.vs[3]["MT_label__"] = """4"""
self.vs[3]["mm__"] = """MT_pre__directLink_S"""
self.vs[3]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'Famassoc3Child')
# trace association null--trace-->nullnode
self.add_node()
self.vs[4]["MT_label__"] = """5"""
self.vs[4]["mm__"] = """MT_pre__trace_link"""
self.vs[4]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'Womanassoc4Child')
self['equations'].append(((2,'fullName'),('concat',((1,'firstName'),(0,'lastName')))))
# Add the edges
self.add_edges([
(0,3), # match class null(Fam) -> association daughters
(3,1), # association null -> match class null(Child)
(2,4), # apply class null(Child) -> backward_association
(4,1), # backward_associationnull -> match_class null(Child)
])
# define evaluation methods for each match class.
def eval_attr11(self, attr_value, this):
return True
def eval_attr12(self, attr_value, this):
return True
# define evaluation methods for each apply class.
def eval_attr13(self, attr_value, this):
return True
# define evaluation methods for each match association.
def eval_attr14(self, attr_value, this):
return attr_value == "daughters"
# define evaluation methods for each apply association.
def constraint(self, PreNode, graph):
return True
|
levilucio/SyVOLT
|
ExFamToPerson/contracts/unit/HUnitDaughter2Woman_CompleteLHS.py
|
Python
|
mit
| 2,956
|
# This file is autogenerated. Do not edit it manually.
# If you want change the content of this file, edit
#
# spec/fixtures/responses/whois.centralnic.com/de.com/status_available
#
# and regenerate the tests with the following script
#
# $ scripts/generate_tests.py
#
from nose.tools import *
from dateutil.parser import parse as time_parse
import yawhois
class TestWhoisCentralnicComDeComStatusAvailable(object):
def setUp(self):
fixture_path = "spec/fixtures/responses/whois.centralnic.com/de.com/status_available.txt"
host = "whois.centralnic.com"
part = yawhois.record.Part(open(fixture_path, "r").read(), host)
self.record = yawhois.record.Record(None, [part])
def test_status(self):
eq_(self.record.status, [])
def test_available(self):
eq_(self.record.available, True)
def test_domain(self):
eq_(self.record.domain, None)
def test_nameservers(self):
eq_(self.record.nameservers.__class__.__name__, 'list')
eq_(self.record.nameservers, [])
def test_admin_contacts(self):
eq_(self.record.admin_contacts.__class__.__name__, 'list')
eq_(self.record.admin_contacts, [])
def test_registered(self):
eq_(self.record.registered, False)
def test_created_on(self):
eq_(self.record.created_on, None)
def test_registrar(self):
eq_(self.record.registrar, None)
def test_registrant_contacts(self):
eq_(self.record.registrant_contacts.__class__.__name__, 'list')
eq_(self.record.registrant_contacts, [])
def test_technical_contacts(self):
eq_(self.record.technical_contacts.__class__.__name__, 'list')
eq_(self.record.technical_contacts, [])
def test_updated_on(self):
eq_(self.record.updated_on, None)
def test_domain_id(self):
eq_(self.record.domain_id, None)
def test_expires_on(self):
eq_(self.record.expires_on, None)
def test_disclaimer(self):
eq_(self.record.disclaimer, None)
|
huyphan/pyyawhois
|
test/record/parser/test_response_whois_centralnic_com_de_com_status_available.py
|
Python
|
mit
| 2,051
|
import asyncio
import codecs
import logging
logger = logging.getLogger('process')
class StdStream:
def __init__(self, encoding, errors='replace'):
self.buffer_ = ''
self.decoder = codecs.getincrementaldecoder(encoding)(errors)
def feed_data(self, data):
self.buffer_ += self.decoder.decode(data)
def get_lines(self):
*lines, self.buffer_ = self.buffer_.split('\n')
return lines
class ProcessProtocol(asyncio.SubprocessProtocol):
def __init__(self, wrapper):
self.wrapper = wrapper
self.stdout = StdStream('utf8')
self.stderr = StdStream('utf8')
@classmethod
def factory(cls, wrapper):
def factory():
return cls(wrapper)
return factory
def connection_made(self, transport):
self.process = transport
def ev_write(self, data):
self.process.write(data['data'].encode(encoding))
def ev_kill(self, data):
self.process.send_signal(data['signal'])
def pipe_data_received(self, fd, data):
if fd == 1:
typ = 'stdout'
stream = self.stdout
else:
typ = 'stderr'
stream = self.stderr
stream.feed_data(data)
for line in stream.get_lines():
self.wrapper.process_output(self, typ, line)
def process_exited(self):
self.wrapper.process_exited(self, self.process.get_returncode())
|
Thezomg/gsc
|
gsc/process.py
|
Python
|
mit
| 1,432
|
# Copyright (c) Jeroen Van Steirteghem
# See LICENSE
from twisted.internet import reactor, ssl
import OpenSSL
import twunnel.local_proxy_server
import twunnel.local_proxy_server__socks5
import twunnel.logger
class SSLServerContextFactory(ssl.ContextFactory):
isClient = 0
def __init__(self, certificateFile, certificateKeyFile):
twunnel.logger.log(3, "trace: SSLServerContextFactory.__init__")
self.certificateFile = certificateFile
self.certificateKeyFile = certificateKeyFile
def getContext(self):
twunnel.logger.log(3, "trace: SSLServerContextFactory.getContext")
context = OpenSSL.SSL.Context(OpenSSL.SSL.SSLv23_METHOD)
context.set_options(OpenSSL.SSL.OP_NO_SSLv2)
context.use_certificate_file(self.certificateFile)
context.use_privatekey_file(self.certificateKeyFile)
return context
class SSLInputProtocolFactory(twunnel.local_proxy_server__socks5.SOCKS5InputProtocolFactory):
def __init__(self, configuration):
twunnel.logger.log(3, "trace: SSLInputProtocolFactory.__init__")
self.configuration = configuration
configuration = {}
configuration["PROXY_SERVERS"] = self.configuration["PROXY_SERVERS"]
configuration["LOCAL_PROXY_SERVER"] = {}
configuration["LOCAL_PROXY_SERVER"]["TYPE"] = "SOCKS5"
configuration["LOCAL_PROXY_SERVER"]["ADDRESS"] = self.configuration["REMOTE_PROXY_SERVER"]["ADDRESS"]
configuration["LOCAL_PROXY_SERVER"]["PORT"] = self.configuration["REMOTE_PROXY_SERVER"]["PORT"]
configuration["LOCAL_PROXY_SERVER"]["ACCOUNTS"] = []
i = 0
while i < len(self.configuration["REMOTE_PROXY_SERVER"]["ACCOUNTS"]):
configuration["LOCAL_PROXY_SERVER"]["ACCOUNTS"].append({})
configuration["LOCAL_PROXY_SERVER"]["ACCOUNTS"][i]["NAME"] = self.configuration["REMOTE_PROXY_SERVER"]["ACCOUNTS"][i]["NAME"]
configuration["LOCAL_PROXY_SERVER"]["ACCOUNTS"][i]["PASSWORD"] = self.configuration["REMOTE_PROXY_SERVER"]["ACCOUNTS"][i]["PASSWORD"]
i = i + 1
configuration["REMOTE_PROXY_SERVERS"] = []
outputProtocolConnectionManager = twunnel.local_proxy_server.OutputProtocolConnectionManager(configuration)
twunnel.local_proxy_server__socks5.SOCKS5InputProtocolFactory.__init__(self, configuration, outputProtocolConnectionManager)
def createSSLPort(configuration):
factory = SSLInputProtocolFactory(configuration)
contextFactory = SSLServerContextFactory(configuration["REMOTE_PROXY_SERVER"]["CERTIFICATE"]["FILE"], configuration["REMOTE_PROXY_SERVER"]["CERTIFICATE"]["KEY"]["FILE"])
return ssl.Port(configuration["REMOTE_PROXY_SERVER"]["PORT"], factory, contextFactory, 50, configuration["REMOTE_PROXY_SERVER"]["ADDRESS"], reactor)
|
jvansteirteghem/twunnel
|
twunnel/remote_proxy_server__ssl.py
|
Python
|
mit
| 2,885
|
#!/usr/bin/env python
import sys
import csv
from Bio import SeqIO
#from Bio.Seq import Seq
#from Bio.SeqRecord import SeqRecord
#from collections import defaultdict
def main():
records = SeqIO.to_dict(SeqIO.parse(open(sys.argv[1]), 'fasta'))
reader = csv.DictReader(sys.stdin, dialect="excel-tab")
clusters = list(reader)
groups = set([c['group'] for c in clusters])
for group in groups:
print "cluster%s\t%s-cluster%s" % (group, sys.argv[1], group)
with open('%s-cluster%s' %(sys.argv[1], group), 'w') as fout:
SeqIO.write([records[i['node']] for i in clusters if i['group'] == group], fout, 'fasta')
if __name__ == '__main__':
main()
|
zibraproject/zika-pipeline
|
scripts/split-clusters.py
|
Python
|
mit
| 669
|
import numpy as np
import matplotlib.pyplot as plt
import Graphics as artist
import matplotlib.gridspec as gridspec
from awesome_print import ap
plt.xkcd()
def unique_words(aStr):
return ' '.join([word for word in set(aStr.split())])
def princomp(A,numpc=3):
# computing eigenvalues and eigenvectors of covariance matrix
M = (A-np.mean(A.T,axis=1)).T # subtract the mean (along columns)
[latent,coeff] = np.linalg.eig(np.cov(M))
p = np.size(coeff,axis=1)
idx = np.argsort(latent) # sorting the eigenvalues
idx = idx[::-1] # in ascending order
# sorting eigenvectors according to the sorted eigenvalues
coeff = coeff[:,idx]
latent = latent[idx] # sorting eigenvalues
if numpc < p and numpc >= 0:
coeff = coeff[:,range(numpc)] # cutting some PCs if needed
score = np.dot(coeff.T,M) # projection of the data in the new space
return coeff,score,latent
TEXT = 1
basis_vectors = [unique_words(line.split(':')[TEXT]) for line in open('../data/lda-topics.txt','rb').read().splitlines()]
def jaccard_similarity(a,b):
a = set(a)
b = set(b)
try:
return len(a & b)/float(len(a | b))
except:
return 0
#ap(basis_vectors)
basis_vectors_similarity = np.array([[jaccard_similarity(one,two) for one in basis_vectors] for two in basis_vectors])
eigvecs,proj,eigvals = princomp(basis_vectors_similarity,numpc=basis_vectors_similarity.shape[1])
for name,matrix in [('eigvecs',eigvecs),('projections',proj),('eigvals',eigvals)]:
np.savetxt('../data/%s'%name,matrix,fmt='%.02f')
max_color = max(basis_vectors_similarity.max(),eigvecs.max(),np.corrcoef(eigvecs.T).max())
min_color = min(basis_vectors_similarity.min(),eigvecs.min(),np.corrcoef(eigvecs.T).max())
fig = plt.figure(figsize=(12,5))
gspec = gridspec.GridSpec(1, 3,width_ratios=[1,1,1])
non_orthogonal = plt.subplot(gspec[0])
loading_matrix = plt.subplot(gspec[2])
orthogonal = plt.subplot(gspec[1])
cno = non_orthogonal.imshow(basis_vectors_similarity,interpolation='nearest',aspect='auto',vmax=max_color,vmin=min_color)
artist.adjust_spines(non_orthogonal)
non_orthogonal.set_xlabel('Topic')
non_orthogonal.set_ylabel('Topic')
cbar_no = fig.colorbar(cno,ax=non_orthogonal)
cbar_no.set_label('Jaccard Similarity ')
cax_load = loading_matrix.imshow(eigvecs,interpolation='nearest',aspect='auto',vmax=max_color,vmin=min_color)
artist.adjust_spines(loading_matrix)
loading_matrix.set_xlabel('Eigentopic')
loading_matrix.set_ylabel('Topic')
cbar_load = fig.colorbar(cax_load,ax=loading_matrix,use_gridspec=True)
cbar_load.set_label('Loading Weight')
cax = orthogonal.imshow(np.corrcoef(eigvecs.T),interpolation='nearest',aspect='auto',vmax=max_color,vmin=min_color)
artist.adjust_spines(orthogonal)
orthogonal.set_xlabel('Eigentopic')
orthogonal.set_ylabel('Eigentopic')
cbar = fig.colorbar(cax,ax=orthogonal,use_gridspec=True)
cbar.set_label('Correlation Coefficient')
gspec.tight_layout(fig)
plt.savefig('../images/Jaccard->LDA-xkcd.png',dpi=300)
|
mac389/lovasi
|
src/orthogonalize-topics-xkcd.py
|
Python
|
mit
| 2,933
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "froide_theme.settings")
os.environ.setdefault("DJANGO_CONFIGURATION", "Dev")
from configurations.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
okfde/froide-theme
|
manage.py
|
Python
|
mit
| 315
|
from fabric.api import *
# Fill out USER and HOSTS configuration before running
env.user = ''
env.hosts = ['']
env.code_dir = '/home/%s/rtd/checkouts/readthedocs.org' % (env.user)
env.virtualenv = '/home/%s/rtd' % (env.user)
def install_prerequisites():
"""Install prerequisites."""
sudo("apt-get -y install python-dev python-pip git redis-server texlive texlive-latex-extra")
sudo("pip install virtualenv")
def create_virtualenv():
"""Create virtualenv."""
run("virtualenv --no-site-packages --distribute rtd")
def clone_repository():
"""Clone readthedocs repo"""
run("mkdir %s/checkouts" % (env.virtualenv))
with cd("%s/checkouts" % env.virtualenv):
run("git clone http://github.com/rtfd/readthedocs.org.git")
def pip_requirements():
"""Install pip requirements"""
with cd(env.code_dir):
with prefix("source %s/bin/activate" % (env.virtualenv)):
run("pip install -r pip_requirements.txt")
def build_db():
"""Build database"""
with prefix("source %s/bin/activate" % (env.virtualenv)):
run("%s/readthedocs/manage.py syncdb" % (env.code_dir))
def migrate_db():
"""Migrate database"""
with prefix("source %s/bin/activate" % (env.virtualenv)):
run("%s/readthedocs/manage.py migrate" % (env.code_dir))
def load_testprojects():
"""Load test data and update repos"""
with prefix("source %s/bin/activate" % (env.virtualenv)):
run("%s/readthedocs/manage.py loaddata test_data" % (env.code_dir))
run("%s/readthedocs/manage.py update_repos" % (env.code_dir))
@task(default=True)
def install():
"""Install readthedocs"""
install_prerequisites()
create_virtualenv()
clone_repository()
pip_requirements()
build_db()
migrate_db()
load_testprojects()
@task
def clean():
"""Clean up everything to start over"""
sudo("rm -rf %s" % (env.virtualenv))
sudo("pip uninstall virtualenv")
sudo("apt-get -y purge python-dev python-pip git redis-server texlive texlive-latex-extra")
sudo("apt-get -y autoremove --purge")
|
ojii/readthedocs.org
|
fabfile-development.py
|
Python
|
mit
| 2,084
|
# -*- coding: utf-8 -*-
import json
import psycopg2
import math
DSN = "dbname=jonsaints"
areas = {}
with psycopg2.connect(DSN) as conn:
with conn.cursor() as curs:
SQL = '''
select
uuid,
st_area(geom::geography) as area
from polygons p
'''
curs.execute(SQL)
for row in curs:
uuid = row[0]
area = row[1]
areas[uuid] = area
geojson = []
with open('output/village_final_geonames_unhcr_primary_names.json') as f:
geojson = json.loads(f.read())
export = {
"type": "FeatureCollection",
"features": []
}
i = 0
for feature in geojson['features']:
i += 1
f = {}
f['geometry'] = feature['geometry']
f['properties'] = {}
if feature['properties']['UUID'] in areas:
f['properties']['areasqutm'] = int(math.ceil(areas[ feature['properties']['UUID'] ]))
else:
print "no area", feature['properties']['UUID']
f['properties']['areasqutm'] = None
f['properties']['uuid'] = feature['properties']['UUID']
f['properties']['mindate'] = feature['properties']['mindate']
f['properties']['maxdate'] = feature['properties']['maxdate']
f['properties']['pr_name'] = feature['properties']['primary_name']
#f['properties']['dgpc_ls'] = int(math.ceil( float(feature['properties']['PopCount']) ))
f['properties']['dgpc_ls'] = feature['properties']['PopCount']
f['properties']['unhrname'] = ""
if len(feature['properties']['unhcr']) > 1 :
f['properties']['unhrname'] = "AGG[{}]".format( ",".join(feature['properties']['unhcr']) )
elif len(feature['properties']['unhcr']) == 1:
f['properties']['unhrname'] = feature['properties']['unhcr'][0]
f['properties']['geoname'] = ""
if len(feature['properties']['geonames']) > 1 :
f['properties']['geoname'] = "AGG[{}]".format( u','.join(feature['properties']['geonames']).encode('utf-8').strip() )
elif len(feature['properties']['geonames']) == 1:
f['properties']['geoname'] = feature['properties']['geonames'][0]
export['features'].append(f)
with open('output/final.json','wb') as f:
f.write(json.dumps(export))
|
saintsjd/geonameit
|
bin/final.py
|
Python
|
mit
| 2,204
|
from tornado import gen
from tornado.httpclient import AsyncHTTPClient
from tornado.ioloop import IOLoop
@gen.coroutine
def test():
client = AsyncHTTPClient()
ret = yield client.fetch('http://127.0.0.1:8000')
raise gen.Return(ret.body)
@gen.coroutine
def run():
print 'run invoked'
ret = yield test()
print 'Done'
run()
run()
run()
IOLoop.instance().start()
|
ly0/pycrawler
|
others/temp.py
|
Python
|
mit
| 386
|
import _plotly_utils.basevalidators
class ColorsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="colorsrc", parent_name="waterfall.textfont", **kwargs
):
super(ColorsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs
)
|
plotly/plotly.py
|
packages/python/plotly/plotly/validators/waterfall/textfont/_colorsrc.py
|
Python
|
mit
| 421
|
"""
Tiny sample of converter
usage: updater.py --converter=tinysample
"""
def convert(xs, args):
print 'hello!', args
return xs # do nothing
|
mitou/meikan
|
converter/tinysample.py
|
Python
|
mit
| 151
|
import sys
sys.path.append('D:\GitHub\PythonDev\ChargingTime')
import hello
hello = reload(hello)
import hello2
hello2.hello()
import hello3
hello3.hello()
print __name__
print hello3.__name__
import hello4
hello4.hello()
hello4.test()
import sys, pprint
pprint.pprint(sys.path)
import constants
print constants.PI
import copy
pprint.pprint(dir(copy))
print [n for n in dir(copy) if not n.startswith('_')]
print copy.__all__
from copy import *
print help(copy)
print copy.__doc__
print range.__doc__
print copy.__file__
a = set(range(10))
b = set([0, 2, 3, 4, 5, 6, 7, 8, 4, 6, 887, 53, 54])
c = set(['fee', 'fie', 'foe'])
d = a | b
print d
print a.union(b)
print a.union(c)
print a & b
print a <= d
print a <= c
print a >= c
print a != c
c = a & b
print c.issubset(a)
print a.intersection(b)
print a.difference(b)
print a - b
print a.symmetric_difference(b)
print a ^ b
print a.copy()
print a.copy() is a
mySets = []
for i in range(10):
mySets.append(set(range(i, i + 5)))
print reduce(set.union, mySets)
a = set()
b = set()
print a.add(frozenset(b))
from heapq import *
from random import shuffle
data = range(10)
shuffle(data)
heap = []
for n in data:
heappush(heap, n)
print heap
heappush(heap, 0.5)
print heap
print heappop(heap)
print heappop(heap)
print heap
heap = [5, 8, 0, 3, 6, 7, 9, 1, 4, 2]
heapify(heap)
print heap
print heapreplace(heap, 0.5)
print heap
print heapreplace(heap, 10)
print heap
from collections import deque
q = deque(range(5))
q.append(5)
q.appendleft(6)
print q
print q.pop()
print q.popleft()
print q
q.rotate(3)
print q
q.rotate(-1)
print q
from random import *
from time import *
date1 = (2008, 1, 1, 0, 0, 0, -1, -1, -1)
time1 = mktime(date1)
date2 = (2009, 1, 1, 0, 0, 0, -1, -1, -1)
time2 = mktime(date2)
random_time = uniform(time1, time2)
print asctime(localtime(random_time))
from random import randrange
num = input('How many dice?')
sides = input('How many sides per dice?')
sum = 0
for i in range(num):
sum += randrange(sides) + 1
print 'The result is', sum
values = range(1, 11) + 'Jack Queen King'.split()
suits = 'diamonds clubs hearts spades'.split()
deck = ['%s if %s' % (v, s) for v in values for s in suits]
from random import shuffle
shuffle(deck)
from pprint import pprint
pprint(deck[:52])
while deck:
raw_input(deck.pop())
import shelve
s = shelve.open('test.dat')
s['x'] = ['a', 'b', 'c']
s['x'].append('d')
print s['x']
temp = s['x']
temp.append('d')
s['x'] = temp
print s['x']
import re
some_text = 'alpha,beta,,,,gamma delta'
print re.split('[,]+', some_text)
pat = '[a-zA-Z]+'
text = '"HmZ... Err -- are you sure?" he said, sounding insecure.'
print re.findall(pat, text)
pat = r'[.?\-",]+'
print re.findall(pat, text)
print re.split('o(o)', 'foobar')
pat = '{name}'
text = 'Dear {name}...'
print re.sub(pat, 'Mr. Lixin', text)
print re.escape('www.python.org')
m = re.match(r'www\.(.*)\..{3}', 'www.python.org')
print m.group(1)
print m.start(1)
print m.end(1)
print m.span(1)
emphasis_pattern = r'\*([^\*]+)\*'
emphasis_pattern = re.compile(r'''
\*
(
[^\*]+
)
\*
''', re.VERBOSE)
print re.sub(emphasis_pattern, r'<em>\1</em>', 'Hello, *world*!')
|
Great-Li-Xin/PythonDev
|
BatteriesIncluded/BatteriesIncluded.py
|
Python
|
mit
| 3,142
|
def to_lower(s):
char_map = {
u"I": u"ı",
u"İ": u"i",
}
for key, value in char_map.items():
s = s.replace(key, value)
return s.lower()
def to_upper(s):
char_map = {
u"ı": u"I",
u"i": u"İ",
}
for key, value in char_map.items():
s = s.replace(key, value)
return s.upper()
def to_title(s):
titled_s = ""
words = s.split()
for word in words:
titled_s += to_upper(word[0]) + to_lower(word[1:]) + " "
return titled_s[0:-1]
|
th0th/metu-cafeteria-menu
|
metu_cafeteria_menu/utils.py
|
Python
|
mit
| 535
|
from django.db import models
from measurement.models import Measurement
from django.utils.encoding import smart_unicode
class Alarm(models.Model):
measurement = models.OneToOneField(Measurement, null=True)
time_created = models.DateTimeField(null=False, auto_now_add=True, auto_now=False)
is_treated = models.BooleanField(default=False, null=False)
treated_text = models.TextField(default="", blank=True)
search_tag = models.CharField(default="", blank=True, max_length=50)
reason = models.NullBooleanField(
null=True, blank=True, default=None,
help_text="If true, the alarm was created because of a too high measurement. If false, the alarm was"
" created because of a too low measurement. Otherwise NULL."
)
class Meta():
ordering = ['is_treated', '-time_created']
def __unicode__(self):
reason = ""
if self.reason:
reason = "High"
elif False == self.reason:
reason = "Low"
s = "Alarm: " + reason + " " + self.measurement.type + " at " + str(self.time_created)
if self.measurement:
s += " for " + self.measurement.patient.user.get_full_name()
return smart_unicode(s)
|
sigurdsa/angelika-api
|
alarm/models.py
|
Python
|
mit
| 1,235
|
# This file is exec'd from settings.py, so it has access to and can
# modify all the variables in settings.py.
# If this file is changed in development, the development server will
# have to be manually restarted because changes will not be noticed
# immediately.
import os
import dotenv
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# pull env variables from .env
env_file = os.path.join(BASE_DIR, '.env')
dotenv.read_dotenv(env_file)
DEBUG = int(os.environ.get('DEBUG', 0))
JS_DEBUG = int(os.environ.get('JS_DEBUG', 0))
IN_DEV = int(os.environ.get('IN_DEV', 0))
# Make these unique, and don't share it with anybody.
SECRET_KEY = os.environ.get('SECRET_KEY')
NEVERCACHE_KEY = os.environ.get('NEVERCACHE_KEY')
DATABASES = {
"default": {
# Ends with "postgresql_psycopg2", "mysql", "sqlite3" or "oracle".
"ENGINE": "django.db.backends.mysql",
"NAME": os.environ.get('DB_NAME'),
"USER": os.environ.get('DB_USER_NAME'),
"PASSWORD": os.environ.get('DB_PASSWORD'),
"HOST": os.environ.get('DB_HOST'),
"PORT": os.environ.get('DB_PORT'),
}
}
###################
# DEPLOY SETTINGS #
###################
# Domains for public site
ALLOWED_HOSTS = os.environ.get('ALLOWED_HOSTS')
if ALLOWED_HOSTS:
ALLOWED_HOSTS = ALLOWED_HOSTS.split(',')
else:
ALLOWED_HOSTS = ['127.0.0.1']
# These settings are used by the default fabfile.py provided.
# Check fabfile.py for defaults.
# FABRIC = {
# "DEPLOY_TOOL": "rsync", # Deploy with "git", "hg", or "rsync"
# "SSH_USER": "", # VPS SSH username
# "HOSTS": [""], # The IP address of your VPS
# "DOMAINS": ALLOWED_HOSTS, # Edit domains in ALLOWED_HOSTS
# "REQUIREMENTS_PATH": "requirements.txt", # Project's pip requirements
# "LOCALE": "en_US.UTF-8", # Should end with ".UTF-8"
# "DB_PASS": "", # Live database password
# "ADMIN_PASS": "", # Live admin user password
# "SECRET_KEY": SECRET_KEY,
# "NEVERCACHE_KEY": NEVERCACHE_KEY,
# }
|
edilio/api_builder
|
api_builder/local_settings.py
|
Python
|
mit
| 2,017
|
#!/usr/bin/env python
import os
import re
from setuptools import setup, find_packages
version_file = os.path.join(
os.path.dirname(__file__),
'mass_api_client',
'__version__.py'
)
with open(version_file, 'r') as fp:
m = re.search(
r"^__version__ = ['\"]([^'\"]*)['\"]",
fp.read(),
re.MULTILINE
)
version = m.groups(1)[0]
setup(name='mass_api_client',
version=version,
license='MIT',
url='https://github.com/mass-project/mass_api_client',
install_requires=['requests==2.19.1', 'marshmallow==2.15.4'],
packages=find_packages(),
)
|
mass-project/mass_api_client
|
setup.py
|
Python
|
mit
| 617
|
"""Production settings and globals."""
from os import environ
from base import *
# Normally you should not import ANYTHING from Django directly
# into your settings, but ImproperlyConfigured is an exception.
from django.core.exceptions import ImproperlyConfigured
def get_env_setting(setting):
""" Get the environment setting or return exception """
try:
return environ[setting]
except KeyError:
error_msg = "Set the %s env variable" % setting
raise ImproperlyConfigured(error_msg)
INSTALLED_APPS += ('gunicorn',)
########## DEBUG CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = False
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
TEMPLATE_DEBUG = DEBUG
########## END DEBUG CONFIGURATION
########## EMAIL CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#email-host
EMAIL_HOST = environ.get('EMAIL_HOST', 'smtp.gmail.com')
# See: https://docs.djangoproject.com/en/dev/ref/settings/#email-host-password
EMAIL_HOST_PASSWORD = environ.get('EMAIL_HOST_PASSWORD', '')
# See: https://docs.djangoproject.com/en/dev/ref/settings/#email-host-user
EMAIL_HOST_USER = environ.get('EMAIL_HOST_USER', 'your_email@example.com')
# See: https://docs.djangoproject.com/en/dev/ref/settings/#email-port
EMAIL_PORT = environ.get('EMAIL_PORT', 587)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#email-subject-prefix
EMAIL_SUBJECT_PREFIX = '[%s] ' % SITE_NAME
# See: https://docs.djangoproject.com/en/dev/ref/settings/#email-use-tls
EMAIL_USE_TLS = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#server-email
SERVER_EMAIL = EMAIL_HOST_USER
########## END EMAIL CONFIGURATION
########## DATABASE CONFIGURATION
DATABASES = {
'default': {}
}
# Update database configuration with $DATABASE_URL.
import dj_database_url
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
########## END DATABASE CONFIGURATION
########## CACHE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#caches
def get_cache():
try:
environ['MEMCACHE_SERVERS'] = environ['MEMCACHIER_SERVERS'].replace(',', ';')
environ['MEMCACHE_USERNAME'] = environ['MEMCACHIER_USERNAME']
environ['MEMCACHE_PASSWORD'] = environ['MEMCACHIER_PASSWORD']
return {
'default': {
'BACKEND': 'django_pylibmc.memcached.PyLibMCCache',
'TIMEOUT': 500,
'BINARY': True,
'OPTIONS': { 'tcp_nodelay': True }
}
}
except:
return {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache'
}
}
CACHES = get_cache()
########## END CACHE CONFIGURATION
########## SECRET CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
SECRET_KEY = get_env_setting('SECRET_KEY')
########## END SECRET CONFIGURATION
ALLOWED_HOSTS =['*']
PREPEND_WWW = True
|
daviferreira/leticiastallone.com
|
leticiastallone/settings/production.py
|
Python
|
mit
| 3,050
|
from gevent import monkey
monkey.patch_all()
from werkzeug.wsgi import peek_path_info
from geventwebsocket import Resource
from lablog import config
from lablog.app import App
from lablog.controllers.dashboard import dashboard
from lablog.controllers.auth import auth
from lablog.controllers.auth.facebook import facebook
from lablog.controllers.healthcheck import hc
from lablog.controllers.api.lab import lab
from lablog.controllers.api.node import node
from lablog.controllers.api.location import location
from lablog.controllers.reports import reports
from lablog.controllers.locations import locations
from lablog.app.kilo import Kilo
import logging
logging.basicConfig(level=config.LOG_LEVEL)
def healthcheck(app, env):
if peek_path_info(env) == "healthcheck":
app.config['SERVER_NAME'] = None
else:
app.config['SERVER_NAME'] = config.SERVER_NAME
def create_app():
logging.info("Initializing")
_app = App()
### Require Auth for Web App controllers ###
dashboard.before_request(_app.user_logged_in)
facebook.before_request(_app.user_logged_in)
locations.before_request(_app.user_logged_in)
reports.before_request(_app.user_logged_in)
### Web App Controllers ###
_app.register_blueprint(dashboard)
_app.register_blueprint(auth)
_app.register_blueprint(facebook)
_app.register_blueprint(hc)
_app.register_blueprint(reports)
_app.register_blueprint(locations)
### API Controllers (OAuth protected)###
_app.register_blueprint(location)
_app.register_blueprint(lab)
_app.register_blueprint(node)
def app(env, start_response):
#healthcheck(_app, env)
return _app(env, start_response)
logging.info("Running")
return app
app = Resource(apps=[
('/', create_app()),
('/socket', Kilo),
])
|
NationalAssociationOfRealtors/LabLog
|
wsgi.py
|
Python
|
mit
| 1,820
|
"""
@brief test tree node (time=2s)
"""
import sys
import os
import unittest
from pyquickhelper.pycode import get_temp_folder
from pyquickhelper.pycode.build_helper import get_build_script, get_script_command, get_extra_script_command, _default_nofolder
from pyquickhelper.pycode.setup_helper import write_pyproj
class TestBuildScript(unittest.TestCase):
def test_build_script(self):
if sys.platform.startswith("win") and sys.version_info[0] != 2:
sc = get_build_script("pyquickhelper")
lines = sc.split("\n")
for line in lines:
if "__" in line and _default_nofolder not in line:
raise Exception("issue with '__' in:\n" +
line + "\nFULL\n" + sc)
scc = get_script_command(
"unittest", "pyquickhelper", requirements=[])
self.assertIn("setup.py", scc)
self.assertNotIn("__", scc)
sccc = get_extra_script_command(
"local_pypi", "pyquickhelper", port=8067, requirements=[])
self.assertIn("python", sccc)
if "__" in sccc:
raise Exception("'__' not in script\n" + sccc)
else:
# not yet implemented for this platform
return
def test_build_script_all(self):
project_var_name = "pyquickhelper"
requirements = None
port = 8067
if sys.platform.startswith("win"):
for c in ("build_script", "clean_space",
"write_version", "clean_pyd",
"build_sphinx", "unittests",
"unittests_LONG", "unittests_SKIP",
"copy27", "test_local_pypi",
"unittests_GUI"):
sc = get_script_command(
c, project_var_name, requirements=requirements, port=port)
self.assertTrue(len(sc) > 0)
if "__" in sc:
raise Exception(
"'__' in script\n{0}\n----------\n{1}".format(c, sc))
unit_test_folder = os.path.abspath(
os.path.join(os.path.dirname(__file__), ".."))
for c in {"notebook", "publish", "publish_doc", "local_pypi",
"setupdep", "copy_dist",
"any_setup_command", "build_dist"}:
sc = get_extra_script_command(
c, project_var_name, requirements=requirements, port=port,
unit_test_folder=unit_test_folder)
self.assertTrue(len(sc) > 0)
if "__" in sc:
raise Exception(
c + "\n\n" + "\n__##__##__##__\n".join(sc.split("__")))
if c == "run27":
if "nosetest" not in sc:
raise Exception(sc)
else:
# not yet implemented for this platform
return
@unittest.skipIf(sys.version_info[0] == 2, reason="not available on Python 2")
def test_build_pyproj(self):
temp = get_temp_folder(__file__, "temp_pyproj")
root = os.path.normpath(os.path.join(temp, "..", "..", ".."))
write_pyproj(root, temp)
with open(os.path.join(temp, "ptvs_project.pyproj"), "r", encoding="utf8") as f:
content = f.read()
if "build\\" in content:
raise Exception(content)
if "setup_helper.py" not in content:
raise Exception(content)
if __name__ == "__main__":
unittest.main()
|
sdpython/pyquickhelper
|
_unittests/ut_pycode/test_build_script.py
|
Python
|
mit
| 3,562
|
import asyncio
import os
import pytest
import re
import uuid
from arq import Worker
from buildpg import Values, asyncpg
from buildpg.asyncpg import BuildPgConnection
from foxglove import glove
from foxglove.db import PgMiddleware, prepare_database
from foxglove.db.helpers import DummyPgPool, SyncDb
from foxglove.test_server import create_dummy_server
from httpx import URL, AsyncClient
from pathlib import Path
from starlette.testclient import TestClient
from typing import Any, Callable
from src.schemas.messages import EmailSendModel, SendMethod
from src.settings import Settings
from src.worker import shutdown, startup, worker_settings
from . import dummy_server
@pytest.fixture(name='loop')
def fix_loop(settings):
try:
loop = asyncio.get_event_loop()
except RuntimeError:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
return loop
DB_DSN = os.getenv('DATABASE_URL', 'postgresql://postgres@localhost:5432/morpheus_test')
@pytest.fixture(name='settings')
def fix_settings(tmpdir):
settings = Settings(
dev_mode=False,
test_mode=True,
pg_dsn=DB_DSN,
test_output=Path(tmpdir),
delete_old_emails=True,
update_aggregation_view=True,
mandrill_url='http://localhost:8000/mandrill/',
messagebird_url='http://localhost:8000/messagebird/',
mandrill_key='good-mandrill-testing-key',
mandrill_webhook_key='testing-mandrill-api-key',
messagebird_key='good-messagebird-testing-key',
auth_key='testing-key',
secret_key='testkey',
origin='https://example.com',
)
assert not settings.dev_mode
glove._settings = settings
yield settings
glove._settings = None
@pytest.fixture(name='await_')
def fix_await(loop):
return loop.run_until_complete
@pytest.fixture(name='raw_conn')
def fix_raw_conn(settings, await_: Callable):
await_(prepare_database(settings, overwrite_existing=True, run_migrations=False))
conn = await_(asyncpg.connect_b(dsn=settings.pg_dsn, server_settings={'jit': 'off'}))
yield conn
await_(conn.close())
@pytest.fixture(name='db_conn')
def fix_db_conn(settings, raw_conn: BuildPgConnection, await_: Callable):
async def start():
tr_ = raw_conn.transaction()
await tr_.start()
return tr_
tr = await_(start())
yield DummyPgPool(raw_conn)
async def end():
if not raw_conn.is_closed():
await tr.rollback()
await_(end())
@pytest.fixture(name='sync_db')
def fix_sync_db(db_conn, loop):
return SyncDb(db_conn, loop)
@pytest.fixture(name='cli')
def fix_client(glove, settings: Settings, sync_db, worker):
app = settings.create_app()
app.user_middleware = []
app.add_middleware(PgMiddleware)
app.middleware_stack = app.build_middleware_stack()
app.state.webhook_auth_key = b'testing'
glove._settings = settings
with TestClient(app) as client:
yield client
class CustomAsyncClient(AsyncClient):
def __init__(self, *args, settings, local_server, **kwargs):
super().__init__(*args, **kwargs)
self.settings: Settings = settings
self.scheme, host_port = local_server.split('://')
self.host, port = host_port.split(':')
self.port = int(port)
def request(self, method, url, **kwargs):
new_url = URL(url).copy_with(scheme=self.scheme, host=self.host, port=self.port)
return super().request(method, new_url, **kwargs)
@pytest.fixture(name='dummy_server')
def _fix_dummy_server(loop, settings):
ctx = {'mandrill_subaccounts': {}}
ds = loop.run_until_complete(create_dummy_server(loop, extra_routes=dummy_server.routes, extra_context=ctx))
custom_client = CustomAsyncClient(settings=settings, local_server=ds.server_name)
glove._http = custom_client
yield ds
loop.run_until_complete(ds.stop())
class Worker4Testing(Worker):
def test_run(self, max_jobs: int = None) -> int:
return self.loop.run_until_complete(self.run_check(max_burst_jobs=max_jobs))
def test_close(self) -> None:
# pool is closed by glove, so don't want to mess with it here
self._pool = None
self.loop.run_until_complete(self.close())
@pytest.fixture(name='glove')
def fix_glove(db_conn, await_: Callable[..., Any]):
glove.pg = db_conn
async def start():
await glove.startup(run_migrations=False)
await glove.redis.flushdb()
await_(start())
yield glove
await_(glove.shutdown())
@pytest.fixture(name='worker_ctx')
def _fix_worker_ctx(loop, settings):
ctx = dict(settings=settings)
loop.run_until_complete(startup(ctx))
yield ctx
@pytest.fixture(name='worker')
def fix_worker(db_conn, glove, worker_ctx):
functions = worker_settings['functions']
worker = Worker4Testing(
functions=functions,
redis_pool=glove.redis,
on_startup=startup,
on_shutdown=shutdown,
burst=True,
poll_delay=0.001,
ctx=worker_ctx,
)
yield worker
worker.test_close()
@pytest.fixture()
def send_email(cli, worker, loop):
def _send_email(status_code=201, **extra):
data = dict(
uid=str(uuid.uuid4()),
main_template='<body>\n{{{ message }}}\n</body>',
company_code='foobar',
from_address='Sender Name <sender@example.com>',
method='email-test',
subject_template='test message',
context={'message': 'this is a test'},
recipients=[{'address': 'foobar@testing.com'}],
)
data.update(**extra)
r = cli.post('/send/email/', json=data, headers={'Authorization': 'testing-key'})
assert r.status_code == status_code
worker.test_run()
if len(data['recipients']) != 1:
return NotImplemented
else:
return re.sub(r'[^a-zA-Z0-9\-]', '', f'{data["uid"]}-{data["recipients"][0]["address"]}')
return _send_email
@pytest.fixture
def send_sms(cli, worker, loop):
def _send_message(**extra):
data = dict(
uid=str(uuid.uuid4()),
main_template='this is a test {{ variable }}',
company_code='foobar',
from_name='FooBar',
method='sms-test',
context={'variable': 'apples'},
recipients=[{'number': '07896541236'}],
)
data.update(**extra)
r = cli.post('/send/sms/', json=data, headers={'Authorization': 'testing-key'})
assert r.status_code == 201
worker.test_run()
return data['uid'] + '-447896541236'
return _send_message
@pytest.fixture(name='call_send_emails')
def _fix_call_send_emails(glove, sync_db):
def run(**kwargs):
base_kwargs = dict(
uid=str(uuid.uuid4()),
subject_template='hello',
company_code='test',
from_address='testing@example.com',
method=SendMethod.email_mandrill,
recipients=[],
)
m = EmailSendModel(**dict(base_kwargs, **kwargs))
company_id = sync_db.fetchval('insert into companies (code) values ($1) returning id', m.company_code)
group_id = sync_db.fetchval_b(
'insert into message_groups (:values__names) values :values returning id',
values=Values(
uuid=m.uid,
company_id=company_id,
message_method=m.method.value,
from_email=m.from_address.email,
from_name=m.from_address.name,
),
)
return group_id, company_id, m
return run
|
tutorcruncher/morpheus
|
tests/conftest.py
|
Python
|
mit
| 7,653
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "zango.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
BitWriters/Zenith_project
|
zango/src/manage.py
|
Python
|
mit
| 248
|