repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
v-samodelkin/TowerDefence
|
MapObjects/Enemy.py
|
Python
|
mit
| 2,746
| 0
|
# -*- coding: utf8 -*-
import MapModel as Mm
from MapObjects.MovingObject import MovingObject
import Statistic
class Enemy(MovingObject):
dx = [0, 1, 0, -1]
dy = [1, 0, -1, 0]
def __init__(self, health, width, height):
super().__init__()
self.gold = 2
self.able_to_go = {Mm.Player, Mm.Ground, Mm.Arrow,
Mm.HeartStone, Mm.Trap, Mm.Wall, Mm.SpiralTower}
self.unpretty = 10000
self.damage = 1
self.health = health
self.field = [[-1 for _ in range(height)] for _ in range(width)]
self.lazy_collision_init = self.collision_init
def on_dead(self):
Statistic.total_killed_enemies += 1
Statistic.player_gold += self.gold
return self.get_from_below()
def get_info(self):
info = "Панда\n"
info += "Здоровье: {0
|
}\n".format(self.health)
info += "Урон: {0}\n".format(self.damage)
return info
def collision_init(self):
# noinspection PyUnusedLocal
@self.collide_registrar(Mm.Ground)
def ground_collide(obj, ground):
return None, obj
@sel
|
f.collide_registrar(Mm.HeartStone)
def heartstone_collide(obj, heartstone):
heartstone.attack(obj.damage * (obj.health / heartstone.defence))
return None, heartstone
@self.collide_registrar(Mm.Arrow)
def arrow_collide(obj, arrow):
obj.health -= arrow.damage
if self.health > 0:
return None, obj
else:
self.on_dead()
return None, obj.get_from_below()
@self.collide_registrar(Mm.Player)
def player_collide(obj, player):
player.health -= obj.damage * (obj.health / player.damage)
obj.health -= player.damage * (obj.health / player.damage)
if player.health > 0:
obj.on_dead()
return None, player
else:
return None, obj
@self.collide_registrar(Mm.Trap)
def trap_collide(obj, structure):
structure.act_on_movable(obj)
if obj.health > 0:
obj.from_below = structure
return None, obj
else:
obj.on_dead()
return None, structure.check()
@self.collide_registrar(Mm.SpiralTower)
@self.collide_registrar(Mm.Wall)
def wall_collide(obj, wall):
damage = obj.damage * obj.health
if damage > wall.health:
obj.health -= wall.health / obj.damage
return None, obj
else:
wall.health -= damage
return None, wall.check()
|
smiley325/accounter
|
ref/edmunds.py
|
Python
|
epl-1.0
| 1,537
| 0.001952
|
import os, requests, time
import mydropbox
edmunds = mydropbox.get_keys('edmunds')
api_key = edmunds['api_key']
api_secret = edmunds['api_secret']
vin = mydropbox.read_dropbox_file(os.path.join('Records', 'Financials', 'Car', 'VIN')).strip()
r = requests.get("https://api.edmunds.com/api/vehicle/v2/vins/%s?&fmt=json&api_key=%s" % (vin, api_key))
car = r.json()
time.sleep(1)
# Pulled from above query
styleid = str(car['years'][0]['styles'][0]['id'])
optionids = []
for optcat in car['options']:
for opt in optcat['options']:
optionids.append(str(opt['id']))
colorids = []
for colorcat in car['colors']:
for opt in colorcat['options']:
colorids.append(str(opt['id']))
# User-supplied
condition = "Clean"
mileage = "6000"
zipcode = "60613"
r = requests.get(
"https://api.edmunds.com/v1/api/tmv/tmvservice/calculateusedtmv" +
"?styleid=%s" % styleid +
''.join(map(lambda opt
|
ionid: "&optionid=%s" % optionid, optionids)) +
''.join(map(lambda colorid: "&colorid=%s" % colorid, colorids)) +
"&condition=%s" % condition +
"&mileage=%s" % mileage +
"&zip=%s" % zipcode +
"&fmt=json&api_key=%s" % api_key
)
data = r.json()
totalWithOptions = data['tmv']['totalWithOptions']
disp = [
('Used Trade-in', 'usedTradeIn'),
('Used Private Party', 'usedPrivateParty'),
|
('Used TMV Retail', 'usedTmvRetail')
]
total = 0.0
for label, key in disp:
total += totalWithOptions[key]
print("%s: %f" % (label, totalWithOptions[key]))
total /= 3
print("Average: %f" % total)
|
mcallistersean/b2-issue-tracker
|
toucan/user_profile/notifications/email.py
|
Python
|
mit
| 561
| 0.005348
|
from django.template.loader import get_template
from . import BaseNotification
class EmailNotification(BaseNotification):
def get_message(self):
template = get_template('user_profile/notification/email/issue.txt')
return template.render({
'issue': self.issue,
'notification_setting': self.not
|
ification
})
def send_issue_notification(self):
self.notification.user.email_user(
'New Issue #%s created: %s' % (self.issue.pk, self.issue.title),
self.
|
get_message()
)
|
naegi/dotfiles
|
home/spotify_status.py
|
Python
|
unlicense
| 3,414
| 0.003515
|
#!/usr/bin/env python3
import sys
import dbus
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
'-t',
'--trunclen',
type=int,
metavar='trunclen'
)
parser.add_argument(
'-f',
'--format',
type=str,
metavar='custom format',
dest='custom_format'
)
parser.add_argument(
'-p',
'--playpause',
type=str,
metavar='play-pause indicator',
|
dest='play_pause'
)
parser.add_argument(
'-d',
'--default',
type=str,
metavar='string to return when spotify is off',
dest='default')
parser.add_argument(
'--font',
type=str,
metavar='the index of
|
the font to use for the main label',
dest='font'
)
parser.add_argument(
'--playpause-font',
type=str,
metavar='the index of the font to use to display the playpause indicator',
dest='play_pause_font'
)
args = parser.parse_args()
def fix_string(string):
# corrects encoding for the python version used
if sys.version_info.major == 3:
return string
else:
return string.encode('utf-8')
# Default parameters
default = ""
output = fix_string(u'{play_pause} {artist}: {song}')
trunclen = 25
play_pause = fix_string(u'\u25B6,\u23F8') # first character is play, second is paused
label_with_font = '%{{T{font}}}{label}%{{T-}}'
font = args.font
play_pause_font = args.play_pause_font
# parameters can be overwritten by args
if args.trunclen is not None:
trunclen = args.trunclen
if args.custom_format is not None:
output = args.custom_format
if args.play_pause is not None:
play_pause = args.play_pause
if args.default is not None:
default = args.default
try:
session_bus = dbus.SessionBus()
spotify_bus = session_bus.get_object(
'org.mpris.MediaPlayer2.spotify',
'/org/mpris/MediaPlayer2'
)
spotify_properties = dbus.Interface(
spotify_bus,
'org.freedesktop.DBus.Properties'
)
metadata = spotify_properties.Get('org.mpris.MediaPlayer2.Player', 'Metadata')
status = spotify_properties.Get('org.mpris.MediaPlayer2.Player', 'PlaybackStatus')
# Handle play/pause label
play_pause = play_pause.split(',')
if status == 'Playing':
play_pause = play_pause[0]
elif status == 'Paused':
play_pause = play_pause[1]
else:
play_pause = str()
if play_pause_font:
play_pause = label_with_font.format(font=play_pause_font, label=play_pause)
# Handle main label
artist = fix_string(metadata['xesam:artist'][0]) if metadata['xesam:artist'] else ''
song = fix_string(metadata['xesam:title']) if metadata['xesam:title'] else ''
album = fix_string(metadata['xesam:album']) if metadata['xesam:album'] else ''
if not artist and not song and not album:
print('')
else:
if len(song) > trunclen:
song = song[0:trunclen]
song += '...'
if ('(' in song) and (')' not in song):
song += ')'
if font:
artist = label_with_font.format(font=font, label=artist)
song = label_with_font.format(font=font, label=song)
album = label_with_font.format(font=font, label=album)
print(output.format(artist=artist, song=song, play_pause=play_pause, album=album))
except Exception as e:
if isinstance(e, dbus.exceptions.DBusException):
print(default)
else:
print(e)
|
tuturto/pyherc
|
src/pyherc/test/unit/test_itemadder.py
|
Python
|
mit
| 5,532
| 0.009219
|
# -*- coding: utf-8 -*-
# Copyright (c) 2010-2017 Tuukka Turto
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
Tests for ItemAdder
"""
from random import Random
from hamcrest import (assert_that,
greater_than, greater_than_or_equal_to, has_length,
less_than)
from pyherc.data import Model, get_items, add_location_tag
from pyherc.data.effects import EffectHandle
from pyherc.generators.item import (ItemConfiguration, ItemConfigurations,
ItemGenerator, WeaponConfiguration)
#from pyherc.generators.level import ItemAdder, item_by_name, item_by_type
from pyherc.test.matchers import does_have_item, located_in_room
from pyherc.test.builders import LevelBuilder
#TODO: enable later
class ItemAdder():
"""
Tests for ItemAdder
"""
def __init__(self):
"""
Default constructor
"""
self.rng = None
self.level = None
self.item_generator = None
self.configuration = None
self.item_adder = None
self.floor_rock = None
self.wall_empty = None
def setup(self):
"""
Setup the test case
"""
self.floor_rock = 1
self.wall_empty = None
self.rng = Random()
self.level = (LevelBuilder()
.with_size((60, 40))
.with_floor_tile(self.floor_rock)
.with_wall_tile(self.wall_empty)
.build())
add_location_tag(self.level, (10, 10), 'room')
for x_loc in range(11, 30):
add_location_tag(self.level, (x_loc, 10), 'corridor')
item_config = ItemConfigurations(Random())
item_config.add_item(
ItemConfiguration(name = 'dagger',
cost = 2,
weight = 1,
icons = [500],
types = ['weapon',
'light weapon',
'melee',
'simple weapon'],
rarity = 'common',
weapon_configration = WeaponConfiguration(
damage = [(2, 'piercing'),
(2, 'slashing')],
critical_range = 11,
critical_damage = 2,
weapon_class = 'simple')))
item_config.add_item(
ItemConfiguration(name = 'red potion',
cost = 150,
weight = 1,
icons = [100],
types = ['potion'],
rarity = 'rare',
effect_hand
|
les = [EffectHandle(
trigger = 'on drink',
effect = 'cure medium wounds',
parameters = None,
charges = 1)]))
self.item_generator = ItemGenerator(item_config)
self.configuration = [item_by_name(3, 4, 'dagger'),
item_by_type(1, 1, 'potion')]
self.item_adder = ItemAdder(self.it
|
em_generator,
self.configuration,
self.rng)
self.item_adder.add_items(self.level)
def test_adding_items(self):
"""
Test basic case of adding items on the level
"""
assert_that(list(get_items(self.level)), has_length(greater_than(3)))
assert_that(list(get_items(self.level)), has_length(less_than(6)))
assert_that(self.level, does_have_item('dagger',
greater_than_or_equal_to(3)))
assert_that(self.level, does_have_item('red potion', 1))
def test_adding_to_location(self):
"""
Test that ItemAdder will use location types passed to it
"""
potion = [x for x in get_items(self.level)
if x.name == 'red potion'][0]
location = potion.location
assert_that(located_in_room(potion))
|
scommab/can-opener
|
cans/gmail.py
|
Python
|
apache-2.0
| 1,364
| 0.026393
|
from threading import Thread
import sys
import imaplib
import time
class Can(Thread):
def __init__(self, id, config, opener, key):
Thread.__init__(self)
self.key = key
self.config = config
self.id = id
self.opener = opener
self.running = True
pass
def run(self):
try:
user = self.config.get('email', 'username')
passwd = self.config.get('email', 'password')
except:
# failed look up: abore running
return
while self.running:
m = imaplib.IMAP4_SSL('imap.gmail.com')
m.login(user, passwd)
count = m.select('Inbox')[1][0]
r, messages = m.search(None, '(UNSEEN)')
#print messages
for uid in messages[0].split(" "):
r, data = m.fetch(uid, '(ENVELOPE)')
data = data[0]
sub
|
ject = data.split('"')[3]
if str(self.key) in subject:
r, body = m.fetch(uid, '(BODY[TEXT])')
body = body[0][1].strip()
#print subject
#print body
self.opener(self
|
.id, body)
m.logout()
time.sleep(15)
def stop(self):
self.running = False
def setKey(self, key):
self.key = key
if __name__ == "__main__":
cans = {}
def opener(id, ans):
if id not in cans:
return
if ans == "quit":
cans[id].stop()
c = Can(1, opener, "non-real")
cans[c.id] = c
c.start()
|
ESOedX/edx-platform
|
common/test/acceptance/pages/lms/peer_grade.py
|
Python
|
agpl-3.0
| 1,101
| 0.000908
|
"""
Students grade peer submissions.
"""
from __future__ import absolute_import
from bok_choy.page_object import PageObject
from bok_choy.promise import Promise
class PeerGradePage(PageObject):
"""
Students grade peer submissions.
"""
url = None
def is_browser_on_page(self):
def _is_correct_page
|
():
is_present = (
self.q(css='div.peer-grading-tools').present or
self.q(css='div.grading-panel.current-state').present
)
return is_present, is_present
return Promise(_is_correct_page, 'On the peer grading page.').fulfill()
@property
def problem_list(self):
"""
Return the list of available problems to peer grade.
"""
return self.q(css='a.problem-button').text
def select_problem
|
(self, problem_name):
"""
Choose the problem with `problem_name` to start grading or calibrating.
"""
index = self.problem_list.index(problem_name) + 1
self.q(css='a.problem-button:nth-of-type({})'.format(index)).first.click()
|
seokjunbing/cs75
|
src/data_processing/read_data.py
|
Python
|
gpl-3.0
| 22,577
| 0.00186
|
import os, re, sys
import read_dicts
from collections import Counter
import pandas as pd
import numpy as np
import operator
import random
sys.path.append('.')
ENABLE_WRITE = 1
INDEX_NAMES_FILES = '../../data/aaindex/list_of_indices.txt'
def getscores(d, aalist, seq):
score_list = list()
char_freq = dict()
for c in seq
|
:
if c in char_freq:
char_freq[c] += 1
else:
|
char_freq[c] = 1
for aa in aalist:
score = 0
for k in d[aa].iterkeys():
try:
freq = char_freq[k]
except KeyError:
freq = 0
score += d[aa][k] * freq
score_list.append(str(score))
return '|'.join(score_list)
def get_specific_label(line):
location_search = re.search(r"(.+(\[)(?P<location1>.+?)(\])( |)$)", line)
location = location_search.group('location1').rstrip().split(',')[0]
if location == 'Plasma membrane':
location = 'Membrane'
return location
def get_general_label(line):
location_search = re.search(r"(.+(\[)(?P<location1>.+?)(\])( |)$)", line)
try:
location = location_search.group('location1')
# funny looking because animal and plants formatting differs
general_location = location.split('(')[0].rstrip().split(',')[0]
except AttributeError:
print('line: ' + line)
# print('location: ' + location)
assert False
if general_location == 'Plasma membrane':
general_location = 'Membrane'
return general_location
def get_general_label_test(file_in):
d = dict()
with open(file_in, 'r') as ifile:
for line in ifile:
if line[0] == '>':
# print(line)
location = get_general_label(line)
if location in d.keys():
d[location] += 1
else:
d[location] = 1
for k, v in d.items():
print('k: %-30s v: %d' % (k, v))
def get_species(line):
location_search = re.search(r"\(sp: (?P<location1>.+?)\)", line)
location = location_search.group('location1').rstrip()
return location
def get_dict_loc_to_score(file_in, group_similar_labels=True):
score_d, corr_d = read_dicts.construct_dicts("../../data/aaindex/aaindex1.txt")
aalist = read_dicts.get_aaindex_list("../../data/aaindex/aaindex_used.txt")
d = dict()
count = 0
entry_count = 0
uniques = set()
with open(file_in, 'r') as ifile:
for i, l in enumerate(ifile):
count = i + 1
print('raw data lines: %d' % count)
with open(file_in, 'r') as ifile:
for i in range(count):
# print "%d of %d lines" % (i+1, count)
l = ifile.readline()
# if i == 1000:
# break
if l[0] == '>':
if group_similar_labels:
location = get_general_label(l)
else:
location = get_specific_label(l)
sp = get_species(l)
else:
seq = ''
seq += l.rstrip()
while True:
x = ifile.tell()
l = ifile.readline()
if l == '': # EOF
# do something
# print seq
if (location != 'NULL') and (location != '\N') and (seq not in uniques):
# try:
# d_sp[sp] += 1
# except KeyError:
# d_sp[sp] = 1
uniques.add(seq)
scores = getscores(score_d, aalist, seq)
try:
d[location].append(scores)
except KeyError:
d[location] = [scores]
entry_count += 1
print('number of entries: %d' % entry_count)
del seq
return d
elif l[0] == '>':
ifile.seek(x)
break
else:
seq += l.rstrip()
# if seq in uniques:
# duplicate_count += 1
# print 'found dup:' + location + ' ' + seq
# print duplicate_count
if (location != 'NULL') and ('\N' not in location) and (seq not in uniques):
uniques.add(seq)
scores = getscores(score_d, aalist, seq)
try:
d[location].append(scores)
except KeyError:
d[location] = [scores]
entry_count += 1
print('number of entries: %d' % entry_count)
del seq
def write_label_score_file(file_in, file_out, write_file=0, outsize='all', group_similar_labels=True,
species='all'):
print('building and writing %s' % file_out)
count = 0
entry_count = 0
duplicate_count = 0
uniques = set()
d_sp = dict()
score_d, corr_d = read_dicts.construct_dicts("../../data/aaindex/aaindex1.txt")
aalist = read_dicts.get_aaindex_list("../../data/aaindex/aaindex_used.txt")
with open(file_in, 'r') as ifile:
for i, l in enumerate(ifile):
count = i + 1
print('raw data lines: %d' % count)
with open(file_in, 'r') as ifile:
with open(file_out, 'a') as ofile:
for i in range(count):
# print "%d of %d lines" % (i+1, count)
l = ifile.readline()
# if i == 1000:
# break
if l[0] == '>':
if group_similar_labels:
location = get_general_label(l)
else:
location = get_specific_label(l)
sp = get_species(l)
else:
seq = ''
seq += l.rstrip()
while True:
x = ifile.tell()
l = ifile.readline()
if l == '': # EOF
# do something
# print seq
if (location != 'NULL') and (location != '\N') and (seq not in uniques) and (
write_file != 0):
if species == 'all' or species == sp:
try:
d_sp[sp] += 1
except KeyError:
d_sp[sp] = 1
uniques.add(seq)
scores = getscores(score_d, aalist, seq)
ofile.write('%s|%s\n' % (location, scores))
entry_count += 1
print('number of entries: %d' % entry_count)
del seq
return
elif l[0] == '>':
ifile.seek(x)
break
else:
seq += l.rstrip()
# if seq in uniques:
# duplicate_count += 1
# print 'found dup:' + location + ' ' + seq
# print duplicate_count
if (location != 'NULL') and ('\N' not in location) and (seq not in uniques) and (write_file != 0):
if species == 'all' or species == sp:
try:
d_sp[sp] += 1
except KeyError:
d_sp[sp] = 1
uniques.add(seq)
scores = getscores(score_d, aalist, seq)
ofile.writ
|
google-research/federated
|
utils/keras_metrics.py
|
Python
|
apache-2.0
| 2,516
| 0.004769
|
# Copyright 2019, Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License
|
for the specific language governing permissions and
# limitations under the License.
"""Libraries of Keras metrics."""
import tensorflow as tf
def _apply_mask(y_true, sample_weight, masked_tokens, dtype):
if sample_weight is None:
sample_weight = tf.ones_like(y_true, dtype)
|
else:
sample_weight = tf.cast(sample_weight, dtype)
for token in masked_tokens:
mask = tf.cast(tf.not_equal(y_true, token), dtype)
sample_weight = sample_weight * mask
return sample_weight
class NumTokensCounter(tf.keras.metrics.Sum):
"""A `tf.keras.metrics.Metric` that counts tokens seen after masking."""
def __init__(self, masked_tokens=None, name='num_tokens', dtype=tf.int64):
self._masked_tokens = masked_tokens or []
super().__init__(name, dtype)
def update_state(self, y_true, y_pred, sample_weight=None):
sample_weight = _apply_mask(y_true, sample_weight, self._masked_tokens,
self._dtype)
sample_weight = tf.reshape(sample_weight, [-1])
super().update_state(sample_weight)
def get_config(self):
config = super().get_config()
config['masked_tokens'] = tuple(self._masked_tokens)
return config
class MaskedCategoricalAccuracy(tf.keras.metrics.SparseCategoricalAccuracy):
"""An accuracy metric that masks some tokens."""
def __init__(self, masked_tokens=None, name='accuracy', dtype=None):
self._masked_tokens = masked_tokens or []
super().__init__(name, dtype=dtype)
def update_state(self, y_true, y_pred, sample_weight=None):
sample_weight = _apply_mask(y_true, sample_weight, self._masked_tokens,
self._dtype)
num_classes = tf.shape(y_pred)[-1]
y_true = tf.reshape(y_true, [-1])
y_pred = tf.reshape(y_pred, [-1, num_classes])
sample_weight = tf.reshape(sample_weight, [-1])
super().update_state(y_true, y_pred, sample_weight)
def get_config(self):
config = super().get_config()
config['masked_tokens'] = tuple(self._masked_tokens)
return config
|
dsaldana/roomba_sensor_network
|
roomba_sensor/src/roomba_sensor/util/geo.py
|
Python
|
gpl-3.0
| 268
| 0.003731
|
from math import sqrt
def euclidean_distance(p1, p2):
"""
Compu
|
te euclidean distance for two points
:param p1:
:param p2:
:return:
"""
dx, dy = p2[0] - p1[0], p2[1] - p1[1]
# Magnitude. Coulomb law.
|
return sqrt(dx ** 2 + dy ** 2)
|
aleju/ImageAugmenter
|
test/augmentables/test_bbs.py
|
Python
|
mit
| 85,736
| 0.000105
|
from __future__ import print_function, division, absolute_import
import warnings
import sys
# unittest only added in 3.4 self.subTest()
if sys.version_info[0] < 3 or sys.version_info[1] < 4:
import unittest2 as unittest
else:
import unittest
# unittest.mock is not available in 2.7 (though unittest2 might contain it?)
try:
import unittest.mock as mock
except ImportError:
import mock
import numpy as np
import imgaug as ia
import imgaug.random as iarandom
from imgaug.augmentables.bbs import _LabelOnImageDrawer
from imgaug.testutils import wrap_shift_deprecation, assertWarns
class TestBoundingBox_project_(unittest.TestCase):
@property
def _is_inplace(self):
return True
def _func(self, cba, *args, **kwargs):
return cba.project_(*args, **kwargs)
def test_project_same_shape(self):
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40)
bb2 = self._func(bb, (10, 10), (10, 10))
assert np.isclose(bb2.y1, 10)
assert np.isclose(bb2.x1, 20)
assert np.isclose(bb2.y2, 30)
assert np.isclose(bb2.x2, 40)
def test_project_upscale_by_2(self):
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40)
bb2 = self._func(bb, (10, 10), (20, 20))
assert np.isclose(bb2.y1, 10*2)
assert np.isclose(bb2.x1, 20*2)
assert np.isclose(bb2.y2, 30*2)
assert np.isclose(bb2.x2, 40*2)
def test_project_downscale_by_2(self):
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40)
bb2 = self._func(bb, (10, 10), (5, 5))
assert np.isclose(bb2.y1, 10*0.5)
assert np.isclose(bb2.x1, 20*0.5)
assert np.isclose(bb2.y2, 30*0.5)
assert np.isclose(bb2.x2, 40*0.5)
def test_project_onto_wider_image(self):
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40)
bb2 = self._func(bb, (10, 10), (10, 20))
assert np.isclose(bb2.y1, 10*1)
assert np.isclose(bb2.x1, 20*2)
assert np.isclose(bb2.y2, 30*1)
assert np.isclose(bb2.x2, 40*2)
def test_project_onto_higher_image(se
|
lf):
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40)
bb2 = self._func(bb, (10, 10), (20, 10))
assert np.isclose(bb2.y1, 10*2)
assert np.isclose(bb2.x1, 20*1)
assert np.iscl
|
ose(bb2.y2, 30*2)
assert np.isclose(bb2.x2, 40*1)
def test_inplaceness(self):
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40)
bb2 = self._func(bb, (10, 10), (10, 10))
if self._is_inplace:
assert bb2 is bb
else:
assert bb2 is not bb
class TestBoundingBox_project(TestBoundingBox_project_):
@property
def _is_inplace(self):
return False
def _func(self, cba, *args, **kwargs):
return cba.project(*args, **kwargs)
class TestBoundingBox_extend_(unittest.TestCase):
@property
def _is_inplace(self):
return True
def _func(self, cba, *args, **kwargs):
return cba.extend_(*args, **kwargs)
def test_extend_all_sides_by_1(self):
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40)
bb2 = self._func(bb, all_sides=1)
assert bb2.y1 == 10-1
assert bb2.y2 == 30+1
assert bb2.x1 == 20-1
assert bb2.x2 == 40+1
def test_extend_all_sides_by_minus_1(self):
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40)
bb2 = self._func(bb, all_sides=-1)
assert bb2.y1 == 10-(-1)
assert bb2.y2 == 30+(-1)
assert bb2.x1 == 20-(-1)
assert bb2.x2 == 40+(-1)
def test_extend_top_by_1(self):
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40)
bb2 = self._func(bb, top=1)
assert bb2.y1 == 10-1
assert bb2.y2 == 30+0
assert bb2.x1 == 20-0
assert bb2.x2 == 40+0
def test_extend_right_by_1(self):
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40)
bb2 = self._func(bb, right=1)
assert bb2.y1 == 10-0
assert bb2.y2 == 30+0
assert bb2.x1 == 20-0
assert bb2.x2 == 40+1
def test_extend_bottom_by_1(self):
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40)
bb2 = self._func(bb, bottom=1)
assert bb2.y1 == 10-0
assert bb2.y2 == 30+1
assert bb2.x1 == 20-0
assert bb2.x2 == 40+0
def test_extend_left_by_1(self):
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40)
bb2 = self._func(bb, left=1)
assert bb2.y1 == 10-0
assert bb2.y2 == 30+0
assert bb2.x1 == 20-1
assert bb2.x2 == 40+0
def test_inplaceness(self):
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40)
bb2 = self._func(bb, all_sides=1)
if self._is_inplace:
assert bb2 is bb
else:
assert bb2 is not bb
class TestBoundingBox_extend(TestBoundingBox_extend_):
@property
def _is_inplace(self):
return False
def _func(self, cba, *args, **kwargs):
return cba.extend(*args, **kwargs)
class TestBoundingBox_clip_out_of_image_(unittest.TestCase):
@property
def _is_inplace(self):
return True
def _func(self, cba, *args, **kwargs):
return cba.clip_out_of_image_(*args, **kwargs)
def test_clip_out_of_image_with_bb_fully_inside_image(self):
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40)
bb_cut = self._func(bb, (100, 100, 3))
assert bb_cut.y1 == 10
assert bb_cut.x1 == 20
assert bb_cut.y2 == 30
assert bb_cut.x2 == 40
def test_clip_out_of_image_with_array_as_shape(self):
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40)
image = np.zeros((100, 100, 3), dtype=np.uint8)
bb_cut = bb.clip_out_of_image(image)
assert bb_cut.y1 == 10
assert bb_cut.x1 == 20
assert bb_cut.y2 == 30
assert bb_cut.x2 == 40
def test_clip_out_of_image_with_bb_too_high(self):
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40)
bb_cut = self._func(bb, (20, 100, 3))
assert bb_cut.y1 == 10
assert bb_cut.x1 == 20
assert np.isclose(bb_cut.y2, 20)
assert bb_cut.x2 == 40
def test_clip_out_of_image_with_bb_too_wide(self):
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40)
bb_cut = self._func(bb, (100, 30, 3))
assert bb_cut.y1 == 10
assert bb_cut.x1 == 20
assert bb_cut.y2 == 30
assert np.isclose(bb_cut.x2, 30)
def test_inplaceness(self):
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40)
bb2 = self._func(bb, (100, 100, 3))
if self._is_inplace:
assert bb2 is bb
else:
assert bb2 is not bb
class TestBoundingBox_clip_out_of_image(TestBoundingBox_clip_out_of_image_):
@property
def _is_inplace(self):
return False
def _func(self, cba, *args, **kwargs):
return cba.clip_out_of_image(*args, **kwargs)
class TestBoundingBox_shift_(unittest.TestCase):
@property
def _is_inplace(self):
return True
def _func(self, cba, *args, **kwargs):
def _func_impl():
return cba.shift_(*args, **kwargs)
return wrap_shift_deprecation(_func_impl, *args, **kwargs)
def test_shift_by_x(self):
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40)
bb_top = self._func(bb, x=1)
assert bb_top.y1 == 10
assert bb_top.x1 == 20 + 1
assert bb_top.y2 == 30
assert bb_top.x2 == 40 + 1
def test_shift_by_y(self):
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40)
bb_top = self._func(bb, y=1)
assert bb_top.y1 == 10 + 1
assert bb_top.x1 == 20
assert bb_top.y2 == 30 + 1
assert bb_top.x2 == 40
def test_inplaceness(self):
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40)
bb2 = self._func(bb, y=0)
if self._is_inplace:
assert bb2 is bb
else:
assert bb2 is not bb
class TestBoundingBox_shift(TestBoundingBox_shift_):
@property
def _is_inplace(self):
return False
def _func(self, cba, *args, **kwargs):
def _func_impl():
re
|
vinodchitrali/pbspro
|
test/fw/ptl/lib/pbs_testlib.py
|
Python
|
agpl-3.0
| 502,834
| 0.00042
|
# coding: utf-8
# Copyright (C) 1994-2016 Altair Engineering, Inc.
# For more information, contact Altair at www.altair.com.
#
# This file is part of the PBS Professional ("PBS Pro") software.
#
# Open Source License Information:
#
# PBS Pro is free software. You can redistribute it and/or modify it under the
# terms of the GNU Affero General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# PBS Pro is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Commercial License Information:
#
# The PBS Pro software is licensed under the terms of the GNU Affero General
# Public License agreement ("AGPL"), except where a separate commercial license
# agreement for PBS Pro version 14 or later has been executed in writing with
# Altair.
#
# Altair’s dual-license business model allows companies, individuals, and
# organizations to create proprietary derivative works of PBS Pro and
# distribute them - whether embedded or bundled with other software - under
# a commercial license agreement.
#
# Use of Altair’s trademarks, including but not limited to "PBS™",
# "PBS Professional®", and "PBS Pro™" and Altair’s logos is subject to Altair's
# trademark licensing policies.
import sys
import os
import socket
import pwd
import grp
import logging
import time
import re
import random
import string
import tempfile
import cPickle
import copy
import datetime
import traceback
import threading
from operator import itemgetter
from collections import OrderedDict
from distutils.version import LooseVersion
try:
import psycopg2
PSYCOPG = True
except:
PSYCOPG = False
try:
from ptl.lib.pbs_ifl import *
API_OK = True
except:
try:
from ptl.lib.pbs_ifl_mock import *
except:
sys.stderr.write("failed to import pbs_ifl, run pbs_swigify " +
"to make it\n")
raise ImportError
API_OK = False
from ptl.lib.pbs_api_to_cli import api_to_cli
from ptl.utils.pbs_dshutils import DshUtils
from ptl.utils.pbs_procutils import ProcUtils
from ptl.utils.pbs_cliutils import CliUtils
from ptl.utils.pbs_fileutils import FileUtils, FILE_TAIL
# suppress logging exceptions
logging.raiseExceptions = False
# Various mappings and aliases
MGR_OBJ_VNODE = MGR_OBJ_NODE
VNODE = MGR_OBJ_VNODE
NODE = MGR_OBJ_NODE
HOST = MGR_OBJ_HOST
JOB = MGR_OBJ_JOB
RESV = MGR_OBJ_RESV
SERVER = MGR_OBJ_SERVER
QUEUE = MGR_OBJ_QUEUE
SCHED = MGR_OBJ_SCHED
HOOK = MGR_OBJ_HOOK
RSC = MGR_OBJ_RSC
PBS_HOOK = MGR_OBJ_PBS_HOOK
# the order of these symbols matters, see pbs_ifl.h
(SET, UNSET, INCR, DECR, EQ, NE, GE, GT,
LE, LT, MATCH, MATCH_RE, NOT, DFLT) = range(14)
(PTL_OR, PTL_AND) = [0, 1]
(IFL_SUBMIT, IFL_SELECT, IFL_TERMINATE, IFL_ALTER,
IFL_MSG, IFL_DELETE) = [0, 1, 2, 3, 4, 5]
(PTL_API, PTL_CLI) = ['api', 'cli']
(PTL_COUNTER, PTL_FILTER) = [0, 1]
PTL_STR_TO_OP = {
'<': LT,
'<=': LE,
'=': EQ,
'>=': GE,
'>': GT,
'!=': NE,
' set ': SET,
' unset ': UNSET,
' match ': MATCH,
'~': MATCH_RE,
'!': NOT
}
PTL_OP_TO_STR = {
LT: '<',
LE: '<=',
EQ: '=',
GE: '>=',
GT: '>',
SET: ' set ',
NE: '!=',
UNSET: ' unset ',
MATCH: ' match ',
MATCH_RE: '~',
NOT: 'is not'
}
PTL_ATTROP_TO_STR = {PTL_AND: '&&', PTL_OR: '||'}
(RESOURCES_AVAILABLE, RESOURCES_TOTAL) = [0, 1]
EXPECT_MAP = {
UNSET: 'Unset',
SET: 'Set',
EQ: 'Equal',
NE: 'Not Equal',
LT: 'Less Than',
GT: 'Greater Than',
LE: 'Less Equal Than',
GE: 'Greater Equal Than',
MATCH_RE: 'Matches regexp',
MATCH: 'Matches',
NOT: 'Not'
}
PBS_CMD_MAP = {
MGR_CMD_CREATE: 'create',
MGR_CMD_SET: 'set',
MGR_CMD_DELETE: 'delete',
MGR_CMD_UNSET: 'unset',
MGR_CMD_IMPORT: 'import',
MGR_CMD_EXPORT: 'export',
MGR_CMD_LIST: 'list',
}
PBS_CMD_TO_OP = {
MGR_CMD_SET: SET,
MGR_CMD_UNSET: UNSET,
MGR_CMD_DELETE: UNSET,
MGR_CMD_CREATE: SET,
}
PBS_OBJ_MAP = {
MGR_OBJ_NONE: 'none',
SERVER: 'server',
QUEUE: 'queue',
JOB: 'job',
NODE: 'node',
RESV: 'reservation',
RSC: 'resource',
SCHED: 'sched',
HOST: 'host',
HOOK: 'hook',
VNODE: 'node',
PBS_HOOK: 'pbshook'
}
PTL_TRUE = ('1', 'true', 't', 'yes', 'y', 'enable', 'enabled', 'True', True)
PTL_FALSE = ('0', 'false', 'f', 'no', 'n', 'disable', 'disabled', 'False',
False)
PTL_NONE = ('None', None)
PTL_FORMULA = '__formula__'
PTL_NOARG = '__noarg__'
PTL_ALL = '__ALL__'
CMD_ERROR_MAP = {
'alterjob': 'PbsAlterError',
'holdjob': 'PbsHoldError',
'sigjob': 'PbsSignalError',
'msgjob': 'PbsMessageError',
'rlsjob': 'PbsReleaseError',
'rerunjob': 'PbsRerunError',
'orderjob': 'PbsOrderError',
'runjob': 'PbsRunError',
'movejob': 'PbsMoveError',
'delete': 'PbsDeleteError',
'deljob': 'PbsDeljobError',
'delresv': 'PbsDelresvError',
'status': 'PbsStatusError',
'manager': 'PbsManagerError',
'submit': 'PbsSubmitError',
'terminate': 'PbsQtermError'
}
class PtlConfig(object):
"""
Holds configuration options
The options can be stored in a file as well as in the OS environment
variables.When set, the environment variables will override
definitions in the file.By default, on Unix like systems, the file
read is ``/etc/ptl.conf``, the environment variable ``PTL_CONF_FILE``
can be used to set the path to the file to read.
The format of the file is a series of ``<key> = <value>`` properties.
A line that starts with a '#' is ignored and can be used for comments
:param conf: Path to PTL configuration file
:type conf: str or None
"""
logger = logging.getLogger(__name__)
def __init__(self, conf=None):
self.options = {
'PTL_SUDO_CMD': 'sudo -H',
|
'PTL_RSH_CMD': 'ssh',
'PTL_CP_CMD': 'scp -p',
'PTL_EXPECT_MAX_ATTEMPTS': 60,
'PTL_EXPECT_INTERVAL': 0.5,
'PTL_UPDATE_ATTRIBUTES': True,
}
self
|
.handlers = {
'PTL_SUDO_CMD': DshUtils.set_sudo_cmd,
'PTL_RSH_CMD': DshUtils.set_rsh_cmd,
'PTL_CP_CMD': DshUtils.set_copy_cmd,
'PTL_EXPECT_MAX_ATTEMPTS': Server.set_expect_max_attempts,
'PTL_EXPECT_INTERVAL': Server.set_expect_interval,
'PTL_UPDATE_ATTRIBUTES': Server.set_update_attributes
}
if conf is None:
conf = os.environ.get('PTL_CONF_FILE', '/etc/ptl.conf')
try:
lines = open(conf).readlines()
except IOError:
lines = []
for line in lines:
line = line.strip()
if (line.startswith('#') or (line == '')):
continue
try:
k, v = line.split('=', 1)
k = k.strip()
v = v.strip()
self.options[k] = v
except:
self.logger.error('Error parsing line ' + line)
for k, v in self.options.items():
if k in os.environ:
v = os.environ[k]
else:
os.environ[k] = str(v)
if k in self.handlers:
self.handlers[k](v)
class PtlException(Exception):
"""
Generic errors raised by PTL operations.
Sets a ``return value``, a ``return code``, and a ``message``
A post function and associated positional and named arguments
are available to perform any necessary cleanup.
:param rv: Return value set for the error occured during PTL
operation
:type rv: int or None.
:param rc: Return code set for the error occured during PTL
operation
:type rc: int or None.
:param msg: Message set for the error occured during PTL operation
:type msg: str or None.
|
mrquim/mrquimrepo
|
script.module.nanscrapers/lib/nanscrapers/scraperplugins/yesmovies.py
|
Python
|
gpl-2.0
| 5,703
| 0.01543
|
import re
import requests
import threading
from ..common import clean_title,clean_search
import xbmc
from ..scraper import Scraper
sources = []
class scrape_thread(threading.Thread):
def __init__(self,m,match,qual):
self.m = m
self.match = match
self.qual = qual
threading.Thread.__init__(self)
def run(self):
try:
qual = self.qual
url = 'https://yesmovies.to/ajax/movie_token?eid='+self.m+'&mid='+self.match
html3 = requests.get(url).content
x,y = re.findall("_x='(.+?)', _y='(.+?)'",html3)[0]
fin_url = 'https://yesmovies.to/ajax/movie_sources/'+self.m+'?x='+x+'&y='+y
h = requests.get(fin_url).content
playlink = re.findall('"file":"(.+?)"(.+?)}',h)
for p,rest in playlink:
try:
qual = re.findall('"label":"(.+?)"',str(rest))[0]
except:
qual = self.qual
p = p.replace('\\','')
if 'srt' in p:
pass
elif 'spanish' in qual:
pass
elif 'googleapis' in p:
pass
else:
if 'english' in qual:
qual = '720p'
if 'lemon' in p:
p = p+'|User-Agent=Mozilla/5.0 (Windows NT 6.3; WOW64; rv:55.0) Gecko/20100101 Firefox/55.0&Host=streaming.lemonstream.me:1443&Referer=https://yesmovies.to'
if 'http' in p:
sources.append({'source': 'Gvideo', 'quality': qual, 'scraper': 'yesmovies', 'url': p,'direct': True})
except Exception as e:
xbmc.log('get sources: '+str(e),xbmc.LOGNOTICE)
class Yesmovies(S
|
craper):
domains = ['yesmovies.to']
name = "yesmovies"
def __init__(self):
self.base_link = 'https://yesmovies.to'
self.search_link = '/search/'
def scrape_episode(self, title, show_year, year, season, episode, imdb, tvdb, debrid = False):
try:
start_url = self.base_link+self.search_link+t
|
itle.replace(' ','+')+'.html'
html = requests.get(start_url).content
match = re.compile('<div class="ml-item">.+?<a href="(.+?)".+?title="(.+?)"',re.DOTALL).findall(html)
for url,name in match:
if clean_title(title)+'season'+season == clean_title(name):
html2 = requests.get(url).content
match2 = re.findall('favorite\((.+?),',html2)[0]
get_ep = requests.get('https://yesmovies.to/ajax/v4_movie_episodes/'+match2).content
block = re.compile('data-id="(.+?)".+?title="(.+?)">').findall(get_ep.replace('\\',''))
for ID,name in block:
if 'Episode' in name:
ep = re.findall('Episode (.+?):',str(name))[0]
if len(episode) == 1:
episode = '0'+episode
if episode == ep:
thread = scrape_thread(ID,match2,'SD')
thread.start()
try:
thread.join()
except:
pass
return sources
except Exception as e:
xbmc.log(str(e),xbmc.LOGNOTICE)
return []
def scrape_movie(self, title, year, imdb, debrid = False):
try:
try:
start_url = self.base_link+self.search_link+title.replace(' ','+')+'.html'
title = title
m_list = self.check_for_movie(title,start_url)
except:
start_url2 = self.base_link+self.search_link+title.replace(' ','+')+'+'+year+'.html'
title = title+year
m_list = self.check_for_movie(title,start_url2)
for item in m_list:
m = item[0];match=item[1];qual=item[2]
thread = scrape_thread(m,match,qual)
thread.start()
try:
thread.join()
except:
pass
return sources
except Exception as e:
xbmc.log('scrape movie: '+str(e),xbmc.LOGNOTICE)
return[]
def check_for_movie(self,title,start_url):
try:
m_list = []
html = requests.get(start_url).content
match = re.compile('<div class="ml-item">.+?<a href="(.+?)".+?title="(.+?)"',re.DOTALL).findall(html)
for url,name in match:
if clean_search(title.replace(' ','')) == clean_search(name).replace(' ',''):
html = requests.get(url).content
match = re.findall('favorite\((.+?),',html)[0]
second_url = 'https://yesmovies.to/ajax/v4_movie_episodes/'+match
html2 = requests.get(second_url).content
match2 = re.compile('<li class=.+?data-id=.+?"(.+?)".+?title=.+?"(.+?)"').findall(html2)
for m,qual in match2:
m = m.replace('\\','')
qual = qual.replace('\\','').replace('HD-','')
if len(m)==6 or len(m)==7:
m_list.append((m,match,qual))
return m_list
except Exception as e:
xbmc.log('check for movie '+str(e),xbmc.LOGNOTICE)
#Yesmovies().scrape_movie('baywatch','2017','')
#Yesmovies().scrape_episode('game of thrones', '', '', '7', '7', '', '')
|
wujuguang/sqlalchemy
|
test/orm/test_query.py
|
Python
|
mit
| 183,302
| 0.000005
|
import contextlib
import sqlalchemy as sa
from sqlalchemy import and_
from sqlalchemy import between
from sqlalchemy import bindparam
from sqlalchemy import Boolean
from sqlalchemy import cast
from sqlalchemy import collate
from sqlalchemy import column
from sqlalchemy import desc
from sqlalchemy import distinct
from sqlalchemy import exc as sa_exc
from sqlalchemy import exists
from sqlalchemy import ForeignKey
from sqlalchemy import func
from sqlalchemy import insert
from sqlalchemy import inspect
from sqlalchemy import Integer
from sqlalchemy import literal
from sqlalchemy import literal_column
from sqlalchemy import MetaData
from sqlalchemy import null
from sqlalchemy import or_
from sqlalchemy import select
from sqlalchemy import String
from sqlalchemy import table
from sqlalchemy import testing
from sqlalchemy import text
from sqlalchemy import Unicode
from sqlalchemy import union
from sqlalchemy import util
from sqlalchemy.engine import default
from sqlalchemy.orm import aliased
from sqlalchemy.orm import attributes
from sqlalchemy.orm import backref
from sqlalchemy.orm import Bundle
from sqlalchemy.orm import column_property
from sqlalchemy.orm import create_session
from sqlalchemy.orm import defer
from sqlalchemy.orm import joinedload
from sqlalchemy.orm import lazyload
from sqlalchemy.orm import mapper
from sqlalchemy.orm import Query
from sqlalchemy.orm import relationship
from sqlalchemy.orm import Session
from sqlalchemy.orm import subqueryload
from sqlalchemy.orm import synonym
from sqlalchemy.orm.util import join
from sqlalchemy.orm.util import with_parent
from sqlalchemy.sql import expression
from sqlalchemy.sql import operators
from sqlalchemy.testing import AssertsCompiledSQL
from sqlalchemy.testing import fixtures
from sqlalchemy.testing import is_
from sqlalchemy.testing import is_not_
from sqlalchemy.testing import mock
from sqlalchemy.testing.assertions import assert_raises
from sqlalchemy.testing.assertions import assert_raises_message
from sqlalchemy.testing.assertions import eq_
from sqlalchemy.testing.assertions import eq_ignore_whitespace
from sqlalchemy.testing.assertions import expect_warnings
from sqlalchemy.testing.assertsql import CompiledSQL
from sqlalchemy.testing.schema import Column
from sqlalchemy.testing.schema import Table
from test.orm import _fixtures
class QueryTest(_fixtures.FixtureTest):
run_setup_mappers = "once"
run_inserts = "once"
run_deletes = None
@classmethod
def setup_mappers(cls):
cls._setup_stock_mapping()
class MiscTest(QueryTest):
run_create_tables = None
run_inserts = None
def test_with_session(self):
User = self.classes.User
s1 = Session()
s2 = Session()
q1 = s1.query(User)
q2 = q1.with_session(s2)
assert q2.session is s2
assert q1.session is s1
class OnlyReturnTuplesTest(QueryTest):
def test_single_entity_false(self):
User = self.classes.User
row = create_session().query(User).only_return_tuples(False).first()
assert isinstance(row, User)
def test_single_entity_true(self):
User = self.classes.User
row = create_session().query(User).only_return_tuples(True).first()
assert isinstance(row, tuple)
def test_multiple_entity_false(self):
User = self.classes.User
row = (
create_session()
.query(User.id, User)
.only_return_tuples(False)
.first()
)
assert isinstance(row, tuple)
def test_multiple_entity_true(self):
User = self.classes.User
row = (
create_session()
.query(User.id, User)
.only_return_tuples(True)
.first()
)
assert isinstance(row, tuple)
class RowTupleTest(QueryTest):
run_setup_mappers = None
def test_custom_names(self):
User, users = self.classes.User, self.tables.users
mapper(User, users, properties={"uname": users.c.name})
row = (
create_session()
.query(User.id, User.uname)
.filter(User.id == 7)
.first()
)
assert row.id == 7
assert row.uname == "jack"
def test_column_metadata(self):
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
mapper(User, users)
mapper(Address, addresses)
sess = create_session()
user_alias = aliased(User)
user_alias_id_label = user_alias.id.label("foo")
address_alias = aliased(Address, name="aalias")
fn = func.count(User.id)
name_label = User.name.label("uname")
bundle = Bundle("b1", User.id, User.name)
cte = sess.query(User.id).cte()
for q, asserted in [
(
sess.query(User),
[
{
"name": "User",
"type": User,
"aliased": False,
"expr": User,
"entity": User,
}
],
),
(
sess.query(User.id, User),
[
{
"name": "id",
"type": users.c.id.type,
"aliased": False,
"expr": User.id,
"entity": User,
},
{
"name": "User",
"type": User,
"aliased": False,
"expr": User,
"entity": User,
},
],
),
(
sess.query(User.id, user_alias),
[
{
"name": "id",
"type": users.c.id.type,
"aliased": False,
"expr": User.id,
"entity": User,
},
{
"name": None,
"type": User,
"aliased": True,
|
"expr": user_alias,
"entity": user_alias,
},
],
|
),
(
sess.query(user_alias.id),
[
{
"name": "id",
"type": users.c.id.type,
"aliased": True,
"expr": user_alias.id,
"entity": user_alias,
}
],
),
(
sess.query(user_alias_id_label),
[
{
"name": "foo",
"type": users.c.id.type,
"aliased": True,
"expr": user_alias_id_label,
"entity": user_alias,
}
],
),
(
sess.query(address_alias),
[
{
"name": "aalias",
"type": Address,
"aliased": True,
"expr": address_alias,
"entity": address_alias,
}
],
),
(
sess.query(name_label, fn),
[
{
"name": "uname",
"type": users.c.name.type,
"aliased": False,
"expr": name_label,
"entity": User,
},
{
"name": None,
"type": fn.type,
"aliased": False,
"expr": fn,
"entity": User,
},
|
HackingHabits/PersonalPasswordManager
|
packages/Flask/examples/minitwit/minitwit.py
|
Python
|
mit
| 8,424
| 0.00095
|
# -*- coding: utf-8 -*-
"""
MiniTwit
~~~~~~~~
A microblogging application written with Flask and sqlite3.
:copyright: (c) 2010 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
from __future__ import with_statement
import time
from sqlite3 import dbapi2 as sqlite3
from hashlib import md5
from datetime import datetime
from contextlib import closing
from flask import Flask, request, session, url_for, redirect, \
render_template, abort, g, flash
from werkzeug import check_password_hash, generate_password_hash
# configuration
DATABASE = '/tmp/minitwit.db'
PER_PAGE = 30
DEBUG = True
SECRET_KEY = 'development key'
# create our little application :)
app = Flask(__name__)
app.config.from_object(__name__)
app.config.from_envvar('MINITWIT_SETTINGS', silent=True)
def connect_db():
"""Returns a new connection to the database."""
return sqlite3.connect(app.config['DATABASE'])
def init_db():
"""Creates the database tables."""
with closing(connect_db()) as db:
with app.open_resource('schema.sql') as f:
db.cursor().executescript(f.read())
db.commit()
def query_db(query, args=(), one=False):
"""Queries the database and returns a list of dictionaries."""
cur = g.db.execute(query, args)
rv = [dict((cur.description[idx][0], value)
for idx, value in enumerate(row)) for row in cur.fetchall()]
return (rv[0] if rv else None) if one else rv
def get_user_id(username):
"""Convenience method to look up the id for a username."""
rv = g.db.execute('select user_id from user where username = ?',
[username]).fetchone()
return rv[0] if rv else None
def format_datetime(timestamp):
"""Format a timestamp for display."""
return datetime.utcfromtimestamp(timestamp).strftime('%Y-%m-%d @ %H:%M')
def gravatar_url(email, size=80):
"""Return the gravatar image for the given email address."""
return 'http://www.gravatar.com/avatar/%s?d=identicon&s=%d' % \
(md5(email.strip().lower().encode('utf-8')).hexdigest(), size)
@app.before_request
def before_request():
"""Make sure we are connected to the database each request and look
up the current user so that we know he's there.
"""
g.db = connect_db()
g.user = None
if 'user_id' in session:
g.user = query_db('select * from user where user_id = ?',
[session['user_id']], one=True)
@app.teardown_request
def teardown_request(exception):
"""Closes the database again at the end of the request."""
if hasattr(g, 'db'):
g.db.close()
@app.route('/')
def timeline():
"""Shows a users timeline or if no user is logged in it will
redirect to the public timeline. This timeline shows the user's
messages as well as all the messages of followed users.
"""
if not g.user:
return redirect(url_for('public_timeline'))
return render_template('timeline.html', messages=query_db('''
select message.*, user.* from message, user
where message.author_id = user.user_id and (
user.user_id = ? or
user.user_id in (select whom_id from follower
where who_id = ?))
order by message.pub_date desc limit ?''',
[session['user_id'], session['user_id'], PER_PAGE]))
@app.route('/public')
def public_timeline():
"""Displays the latest messages of all users."""
return render_template('timeline.html', messages=query_db('''
select message.*, user.* from message, user
where message.author_id = user.user_id
order by message.pub_date desc limit ?''', [PER_PAGE]))
@app.route('/<username>')
def user_timeline(username):
"""Display's a users tweets."""
profile_user = query_db('select * from user where username = ?',
[username], one=True)
if profile_user is None:
abort(404)
followed = False
if g.user:
followed = query_db('''select 1 from follower where
follower.who_id = ? and follower.whom_id = ?''',
[session['user_id'], profile_user['user_id']],
one=True) is not None
return render_template('timeline.html', messages=query_db('''
select message.*, user.* from message, user where
user.user_id = message.author_id and user.user_id = ?
order by message.pub_date desc limit ?''',
[profile_user['user_id'], PER_PAGE]), followed=followed,
profile_user=profile_user)
@app.route('/<username>/follow')
def follow_user(username):
"""Adds the current user as follower of the given user."""
if not g.user:
abort(401)
whom_id = get_user_id(username)
if whom_id is None:
abort(404)
g.db.execute('insert into follower (who_id, whom_id) values (?, ?)',
[session['user_id'], whom_id])
g.db.commit()
flash('You are now following "%s"' % username)
return redirect(url_for('user_timeline', username=username))
@app.route('/<username>/unfollow')
def unfollow_user(username):
"""Removes the current user as follower of the given user."""
if not g.user:
abort(401)
whom_id = get_user_id(username)
if whom_id is None:
abort(404)
g.db.execute('delete from follower where who_id=? and whom_id=?',
|
[session['user_id'], whom_id])
g.db.commit()
flash('You are no longer following "%s"' % username)
return redirect(url_for('user_timeline', username=username))
@app.route('/add_message', methods=['POST'])
def add_message():
"""Registers a new message for the user."""
if 'user_id' not in session:
abort(401)
if request.form['text']:
g.db.execute('''insert into message (author_id, text, pub_date)
va
|
lues (?, ?, ?)''', (session['user_id'], request.form['text'],
int(time.time())))
g.db.commit()
flash('Your message was recorded')
return redirect(url_for('timeline'))
@app.route('/login', methods=['GET', 'POST'])
def login():
"""Logs the user in."""
if g.user:
return redirect(url_for('timeline'))
error = None
if request.method == 'POST':
user = query_db('''select * from user where
username = ?''', [request.form['username']], one=True)
if user is None:
error = 'Invalid username'
elif not check_password_hash(user['pw_hash'],
request.form['password']):
error = 'Invalid password'
else:
flash('You were logged in')
session['user_id'] = user['user_id']
return redirect(url_for('timeline'))
return render_template('login.html', error=error)
@app.route('/register', methods=['GET', 'POST'])
def register():
"""Registers the user."""
if g.user:
return redirect(url_for('timeline'))
error = None
if request.method == 'POST':
if not request.form['username']:
error = 'You have to enter a username'
elif not request.form['email'] or \
'@' not in request.form['email']:
error = 'You have to enter a valid email address'
elif not request.form['password']:
error = 'You have to enter a password'
elif request.form['password'] != request.form['password2']:
error = 'The two passwords do not match'
elif get_user_id(request.form['username']) is not None:
error = 'The username is already taken'
else:
g.db.execute('''insert into user (
username, email, pw_hash) values (?, ?, ?)''',
[request.form['username'], request.form['email'],
generate_password_hash(request.form['password'])])
g.db.commit()
flash('You were successfully registered and can login now')
return redirect(url_for('login'))
return render_template('register.html', error=error)
@app.route('/logout')
def logout():
"""Logs the user out."""
flash('You were logged out')
session.pop('
|
htem/CATMAID
|
django/applications/catmaid/control/common.py
|
Python
|
agpl-3.0
| 8,243
| 0.002669
|
import string
import random
import json
from collections import defaultdict
from django.http import HttpResponse
from django.shortcuts import render_to_response
from django.template.context import RequestContext
from catmaid.fields import Double3D
from catmaid.models import Log, NeuronSearch, CELL_BODY_CHOICES, \
SORT_ORDERS_DICT, Relation, Class, ClassInstance, \
ClassInstanceClassInstance
def _create_relation(user, project_id, relation_id, instance_a_id, instance_b_id):
relation = ClassInstanceClassInstance()
relation.user = user
relation.project_id = project_id
relation.relation_id = relation_id
relation.class_instance_a_id = instance_a_id
relation.class_instance_b_id = instance_b_id
relation.save()
return relation
def insert_into_log(project_id, user_id, op_type, location=None, freetext=None):
""" Inserts a new entry into the log table. If the location parameter is
passed, it is expected to be an iteratable (list, tuple).
"""
# valid operation types
operation_type_array = [
"rename_root",
"create_neuron",
"rename_neuron",
"remove_neuron",
"move_neuron",
"create_group",
"rename_group",
"remove_group",
"move_group",
"create_skeleton",
"rename_skeleton",
"remove_skeleton",
"move_skeleton",
"split_skeleton",
"join_skeleton",
"reroot_skeleton",
"change_confidence"
]
if not op_type in operation_type_array:
return {'error': 'Operation type {0} not valid'.format(op_type)}
new_log = Log()
new_log.user_id = user_id
new_log.project_id = project_id
new_log.operation_type = op_type
if not location is None:
new_log.location = Double3D(*location)
if not freetext is None:
new_log.freetext = freetext
new_log.save()
# $q = $db->insertIntoId('log', $data );
# echo json_encode( array ( 'error' => "Failed to insert operation $op_type for user $uid in project %pid." ) );
# Tip from: http://lincol
|
nloop.com/blog/2008/may/10/getting-requestcontext-your-templates/
# Required because we need a RequestContext, not just a Context - the
# former looks at TEMPLATE_CONTEXT_PROCESSORS, while the latter doesn't.
def my_render_to_response(req, *args, **kwargs):
kwargs['context_instance'] = RequestContext(req)
return render_to_response(*args, **kwargs)
def json_error_response(message):
"""
When an operation fails we should return a JSON dictionary
with the key 'error' set to an error
|
message. This is a
helper method to return such a structure:
"""
return HttpResponse(json.dumps({'error': message}),
content_type='text/json')
def order_neurons(neurons, order_by=None):
column, reverse = 'name', False
if order_by and (order_by in SORT_ORDERS_DICT):
column, reverse, _ = SORT_ORDERS_DICT[order_by]
if column == 'name':
neurons.sort(key=lambda x: x.name)
elif column == 'gal4':
neurons.sort(key=lambda x: x.cached_sorted_lines_str)
elif column == 'cell_body':
neurons.sort(key=lambda x: x.cached_cell_body)
else:
raise Exception("Unknown column (%s) in order_neurons" % (column,))
if reverse:
neurons.reverse()
return neurons
# Both index and visual_index take a request and kwargs and then
# return a list of neurons and a NeuronSearch form:
def get_form_and_neurons(request, project_id, kwargs):
# If we've been passed parameters in a REST-style GET request,
# create a form from them. Otherwise, if it's a POST request,
# create the form from the POST parameters. Otherwise, it's a
# plain request, so create the default search form.
rest_keys = ('search', 'cell_body_location', 'order_by')
if any((x in kwargs) for x in rest_keys):
kw_search = kwargs.get('search', None) or ""
kw_cell_body_choice = kwargs.get('cell_body_location', None) or "a"
kw_order_by = kwargs.get('order_by', None) or 'name'
search_form = NeuronSearch({'search': kw_search,
'cell_body_location': kw_cell_body_choice,
'order_by': kw_order_by})
elif request.method == 'POST':
search_form = NeuronSearch(request.POST)
else:
search_form = NeuronSearch({'search': '',
'cell_body_location': 'a',
'order_by': 'name'})
if search_form.is_valid():
search = search_form.cleaned_data['search']
cell_body_location = search_form.cleaned_data['cell_body_location']
order_by = search_form.cleaned_data['order_by']
else:
search = ''
cell_body_location = 'a'
order_by = 'name'
cell_body_choices_dict = dict(CELL_BODY_CHOICES)
all_neurons = ClassInstance.objects.filter(
project__id=project_id,
class_column__class_name='neuron',
name__icontains=search).exclude(name='orphaned pre').exclude(name='orphaned post')
if cell_body_location != 'a':
location = cell_body_choices_dict[cell_body_location]
all_neurons = all_neurons.filter(
project__id=project_id,
cici_via_a__relation__relation_name='has_cell_body',
cici_via_a__class_instance_b__name=location)
cici_qs = ClassInstanceClassInstance.objects.filter(
project__id=project_id,
relation__relation_name='has_cell_body',
class_instance_a__class_column__class_name='neuron',
class_instance_b__class_column__class_name='cell_body_location')
neuron_id_to_cell_body_location = dict(
(x.class_instance_a.id, x.class_instance_b.name) for x in cici_qs)
neuron_id_to_driver_lines = defaultdict(list)
for cici in ClassInstanceClassInstance.objects.filter(
project__id=project_id,
relation__relation_name='expresses_in',
class_instance_a__class_column__class_name='driver_line',
class_instance_b__class_column__class_name='neuron'):
neuron_id_to_driver_lines[cici.class_instance_b.id].append(cici.class_instance_a)
all_neurons = list(all_neurons)
for n in all_neurons:
n.cached_sorted_lines = sorted(
neuron_id_to_driver_lines[n.id], key=lambda x: x.name)
n.cached_sorted_lines_str = ", ".join(x.name for x in n.cached_sorted_lines)
n.cached_cell_body = neuron_id_to_cell_body_location.get(n.id, 'Unknown')
all_neurons = order_neurons(all_neurons, order_by)
return (all_neurons, search_form)
# TODO After all PHP functions have been replaced and all occurrence of
# this odd behavior have been found, change callers to not depend on this
# legacy functionality.
def makeJSON_legacy_list(objects):
'''
The PHP function makeJSON, when operating on a list of rows as
results, will output a JSON list of key-values, with keys being
integers from 0 and upwards. We return a dict with the same
structure so that it looks the same when used with json.dumps.
'''
i = 0
res = {}
for o in objects:
res[i] = o
i += 1
return res
def cursor_fetch_dictionary(cursor):
"Returns all rows from a cursor as a dict"
desc = cursor.description
return [
dict(zip([col[0] for col in desc], row))
for row in cursor.fetchall()
]
def get_relation_to_id_map(project_id):
return {rname: ID for rname, ID in Relation.objects.filter(project=project_id).values_list("relation_name", "id")}
def get_class_to_id_map(project_id):
return {cname: ID for cname, ID in Class.objects.filter(project=project_id).values_list("class_name", "id")}
def urljoin(a, b):
""" Joins to URL parts a and b while making sure this
exactly one slash inbetween.
"""
if a[-1] != '/':
a = a + '/'
if b[0] == '/':
b = b[1:]
return a + b
def id_generator(size=6, chars=string.ascii_lowercase + string.digits):
""" Creates a random string of the specified length.
"""
|
hicknhack-software/buildbot-inplace-config
|
buildbot_inplace/config.py
|
Python
|
apache-2.0
| 5,400
| 0.002222
|
""" Buildbot inplace config
(C) Copyright 2015 HicknHack Software GmbH
The original code can be found at:
https://github.com/hicknhack-software/buildbot-inplace-config
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from buildbot.config import BuilderConfig
from twisted.python import log
from buildbot.process.factory import BuildFactory
from buildbot.schedulers.forcesched import ForceScheduler
from buildbot.schedulers.triggerable import Triggerable
from inplace_build import InplaceBuildFactory
from project import Project
from setup_build import SetupBuildFactory
from worker import Worker
from pprint import pformat
class NamedList(list):
def named_set(self, elem):
self.named_del(elem.name)
self.append(elem)
def named_del(self, name):
for elem in self:
if elem.name == name:
self.remove(elem)
def named_get(self, name):
for elem in self:
if elem.name == name:
return elem
def clear(self):
del self[:]
@property
def names(self):
return map(lambda elem: elem.name, self)
class Wrapper(dict):
""" Wrapper for the configuration dictionary """
def __init__(self, **kwargs):
super(Wrapper, self).__init__(**kwargs)
self._inplace_workers = NamedList()
self._projects = NamedList()
@property
def builders(self):
return self.named_list('builders')
@property
def schedulers(self):
return self.named_list('schedulers')
@property
def change_source(self):
return self.named_list('change_source')
@property
def workers(self):
return self.named_list('workers')
@property
def inplace_workers(self):
return self._inplace_workers
@property
def projects(self):
return self._projects
def named_list(self, key):
if key not in self:
self[key] = NamedList()
return self[key]
def load_workers(self, path):
Worker.load(path, self.inplace_workers, self.workers)
def load_projects(self, path):
Project.load(path, self.projects)
DUMMY_NAME = "Dummy"
DUMMY_TRIGGER = "Trigger_Dummy"
def setup_inplace(self):
self.builders.clear()
self.schedulers.clear()
builder_name = self.DUMMY_NAME
trigger_name = self.DUMMY_TRIGGER
worker_names = self.inplace_workers.names
self.builders.named_set(BuilderConfig(name=builder_name, workernames=worker_names, factory=BuildFactory()))
self.schedulers.named_set(ForceScheduler(name=trigger_name, builderNames=[builder_name]))
for project in self.projects:
builder_name = "%s_Builder" % project.name
trigger_name = "Force_%s_Build" % project.name
builder_factory = InplaceBuildFactory(self, project)
self.builders.named_set(BuilderConfig(name=builder_name, workernames=worker_names, factory=builder_factory))
self.schedulers.named_set(ForceScheduler(name=trigger_name, builderNames=[builder_name]))
def project_profile_worker_names(self, profile):
return [worker.name
for worker in self.inplace_workers
if set(profile.setups).issubset(set(worker.setups))
and profile.platform in worker.platforms]
def setup_project_inplace(self, project):
self.setup_inplace()
for worker in self.inplace_workers:
log.msg("Got worker '%s' for platform %s and setups %s" %
(worker.name, pformat(worker.platforms), pformat(worker.setups)),
system='Inplace Config')
for profile in project.inplace.profiles:
worker_names = self.project_profile_worker_names(profile)
if not worker_names:
log.msg("Failed to find worker for platform '%s' and setups '%s' (project '%s')" %
(profile.platform, pformat(profile.setups), project.name),
system='Inplace Config')
continue # profile not executable
builder_name = "_".join([project.name, profile.platform, profile.name])
trigger_name = _project_profile_trigger_name(project.name, profile)
build_factory = SetupBuildFactory(self, project, profile)
self.builders.named_set(BuilderConfig(name=builder_name, workernames=worker_names, factory=build_factory))
self.schedulers.named_set(Triggerable(name=trigger_name, builderNames=[builder_name]))
def project_trigger_names(self,
|
project):
return [
_project_profile_trigger_name(project.name, profile)
for profile in project.inplace.profiles
if self.project_profile_worker_names(profile)]
def _pr
|
oject_profile_trigger_name(project_name, profile):
return "_".join([project_name, profile.platform, profile.name, "Trigger"])
|
theoneandonly-vector/LaZagne
|
Windows/src/LaZagne/config/constant.py
|
Python
|
lgpl-3.0
| 416
| 0.057692
|
class constant():
folder_name = 'results'
MAX_HELP_POSITION = 27
CURRENT_VERSION = '0.9.1'
|
output = None
file_logger = None
|
# jitsi options
jitsi_masterpass = None
# mozilla options
manually = None
path = None
bruteforce = None
specific_path = None
mozilla_software = ''
# ie options
ie_historic = None
# total password found
nbPasswordFound = 0
passwordFound = []
|
antoinecarme/sklearn2sql_heroku
|
tests/regression/freidman1/ws_freidman1_SVR_rbf_db2_code_gen.py
|
Python
|
bsd-3-clause
| 121
| 0.016529
|
from sklearn2sql_
|
heroku.tests.regression import generic as reg_gen
|
reg_gen.test_model("SVR_rbf" , "freidman1" , "db2")
|
Perlmint/Yuzuki
|
resource/util.py
|
Python
|
mit
| 66
| 0.015152
|
# -*- codin
|
g: utf-8 -*-
from helper.resource import Yuzuki
|
Resource
|
DineshRaghu/dstc6-track1
|
src/data_utils.py
|
Python
|
gpl-3.0
| 14,089
| 0.013273
|
from __future__ import absolute_import
import os
import re
import numpy as np
import tensorflow as tf
stop_words=set(["a","an","the"])
def load_candidates(data_dir, task_id):
assert task_id > 0 and task_id < 6
candidates=[]
candidates_f=None
candid_dic={}
#candidates_f='candidates.txt'
candidates_f='candidates' + str(task_id) + '.txt'
with open(os.path.join(data_dir,candidates_f)) as f:
for i,line in enumerate(f):
candid_dic[line.strip().split(' ',1)[1]] = i
line=tokenize(line.strip())[1:]
candidates.append(line)
# return candidates,dict((' '.join(cand),i) for i,cand in enumerate(candidates))
return candidates,candid_dic
def load_test_candidates(data_dir, task_id, test_id):
assert task_id > 0 and task_id < 6
candidates=[]
candidates_f=None
candid_dic={}
'''
if test_id == 1 or test_id == 2:
candidates_f='candidates.txt'
else:
candidates_f='candidates-ext.txt'
'''
if test_id == 1 or test_id == 2:
candidates_f='candidates' + str(task_id) + '.txt'
else:
candidates_f='candidates' + str(task_id) + '_tst'+ str(test_id) + '.txt'
with open(os.path.join(data_dir,candidates_f)) as f:
for i,line in enumerate(f):
candid_dic[line.strip().split(' ',1)[1]] = i
line=tokenize(line.strip())[1:]
candidates.append(line)
# return candidates,dict((' '.join(cand),i) for i,cand in enumerate(candidates))
return candidates,candid_dic
def load_dialog_task(data_dir, task_id, candid_dic, isOOV):
'''Load the nth task. There are 20 tasks in total.
Returns a tuple containing the training and testing data for the task.
'''
assert task_id > 0 and task_id < 6
files = os.listdir(data_dir)
files = [os.path.join(data_dir, f) for f in files]
s = '-dialog-task{}'.format(task_id)
train_file = [f for f in files if s in f and 'train' in f][0]
test_file = [f for f in files if s in f and 'dev' in f][0]
val_file = [f for f in files if s in f and 'dev' in f][0]
train_data = get_dialogs(train_file,candid_dic)
test_data = get_dialogs(test_file,candid_dic)
val_data = get_dialogs(val_file,candid_dic)
return train_data, test_data, val_data
def tokenize(sent):
'''Return the tokens of a sentence including punctuation.
>>> tokenize('Bob dropped the apple. Where is the apple?')
['Bob', 'dropped', 'the', 'apple', '.', 'Where', 'is', 'the', 'apple']
'''
sent=sent.lower()
if sent=='<silence>':
return [sent]
result=[x.strip() for x in re.split('(\W+)?', sent) if x.strip() and x.strip() not in stop_words]
if not result:
result=['<silence>']
if result[-1]=='.' or result[-1]=='?' or result[-1]=='!':
result=result[:-1]
return result
def load_dialog_test_data(data_dir, task_id, test_id):
assert task_id > 0 and task_id < 6
files = os.listdir(data_dir)
files = [os.path.join(data_dir, f) for f in files]
s = '-dialog-task{}'.format(task_id)
t = 'tst_' + str(test_id)
test_file = [f for f in files if s in f and t in f][0]
test_data = get_test_dialogs(test_file)
return test_data
def get_test_dialogs(f):
'''Given a file name, read the file, retrieve the dialogs, and then convert the sentences into a single dialog.
If max_length is supplied, any stories longer than max_length tokens will be discarded.
'''
with open(f) as f:
return parse_test_dialogs(f.readlines())
def parse_test_dialogs(lines):
'''
Parse dialogs provided in the babi tasks format
'''
data=[]
context=[]
u=None
r=None
a=-1
dialog_id=0
for line in lines:
line=line.strip()
if line:
nid, line = line.split(' ', 1)
nid = int(nid)
if '\t' in line:
u, r = line.split('\t')
u = tokenize(u)
r = tokenize(r)
# temporal encoding, and utterance/response encoding
# data.append((context[:],u[:],candid_dic[' '.join(r)]))
# data.append((context[:],u[:],a,dialog_id))
u.append('$u')
u.append('#'+str(nid))
r.append('$r')
r.append('#'+str(nid))
context.append(u)
context.append(r)
else:
r=tokenize(line)
r.append('$r')
r.append('#'+str(nid))
context.append(r)
else:
data.append((context[:-2],u[:],a,dialog_id))
# clear context
u=None
r=None
a=None
context=[]
dialog_id=dialog_id+1
return data
def parse_dialogs_per_response(lines,candid_dic):
'''
Parse dialogs provided in the babi tasks format
'''
data=[]
context=[]
u=None
r=None
dialog_id=0
for line in lines:
line=line.strip()
if line:
nid, line = line.split(' ', 1)
nid = int(nid)
if '\t' in line:
u, r = line.split('\t')
a = candid_dic[r]
u = tokenize(u)
r = tokenize(r)
# temporal encoding, and utterance/response encoding
# data.appe
|
nd((context[:],u[:],candid_dic[' '.join(r)]))
data.append((context[:],u[:],a,dialog_id))
u.append('$u')
u.append('#'+str(n
|
id))
r.append('$r')
r.append('#'+str(nid))
context.append(u)
context.append(r)
else:
r=tokenize(line)
r.append('$r')
r.append('#'+str(nid))
context.append(r)
else:
dialog_id=dialog_id+1
# clear context
context=[]
return data
def get_dialogs(f,candid_dic):
'''Given a file name, read the file, retrieve the dialogs, and then convert the sentences into a single dialog.
If max_length is supplied, any stories longer than max_length tokens will be discarded.
'''
with open(f) as f:
return parse_dialogs_per_response(f.readlines(),candid_dic)
def vectorize_candidates_sparse(candidates,word_idx):
shape=(len(candidates),len(word_idx)+1)
indices=[]
values=[]
for i,candidate in enumerate(candidates):
for w in candidate:
indices.append([i,word_idx[w]])
values.append(1.0)
return tf.SparseTensor(indices,values,shape)
def vectorize_candidates(candidates,word_idx,sentence_size):
shape=(len(candidates),sentence_size)
C=[]
for i,candidate in enumerate(candidates):
lc=max(0,sentence_size-len(candidate))
C.append([word_idx[w] if w in word_idx else 0 for w in candidate] + [0] * lc)
return tf.constant(C,shape=shape)
def vectorize_data(data, word_idx, sentence_size, batch_size, candidates_size, max_memory_size, candidates, match_feature_flag):
"""
Vectorize stories and queries.
If a sentence length < sentence_size, the sentence will be padded with 0's.
If a story length < memory_size, the story will be padded with empty memories.
Empty memories are 1-D arrays of length sentence_size filled with 0's.
The answer array is returned as a one-hot encoding.
"""
atmosphere_restriction_set={'casual','romantic','business','glutenfree','vegan','vegetarian'}
S = []
Q = []
A = []
C = []
data.sort(key=lambda x:len(x[0]),reverse=True)
for i, (story, query, answer, start) in enumerate(data):
if i%batch_size==0:
memory_size=max(1,min(max_memory_size,len(story)))
ss = []
story_query_vocab = set()
for i, sentence in enumerate(story, 1):
ls = max(0, sentence_size - len(sentence))
ss.append([word_idx[w] if w in word_idx else 0 for w in sentence] + [0] * ls)
for w in sentence:
story_query_vocab.add(w)
# take only the most recent sentences that fit in
|
joakim-hove/ert
|
tests/utils.py
|
Python
|
gpl-3.0
| 582
| 0
|
from pathlib import Path
def source_dir():
src
|
= Path("@CMAKE_CURRENT_SOURCE_DIR@/../..")
if src.is_dir():
return src.relative_to(Path.cwd())
# If the file was not correctly configured by cmake, look for the source
# folder, assuming the build folder is inside the source folder.
current_path = Path(__file__)
while current_path != Path("/"):
if (current_path / ".git").is_dir():
return curre
|
nt_path
current_path = current_path.parent
raise RuntimeError("Cannot find the source folder")
SOURCE_DIR = source_dir()
|
yausern/stlab
|
devices/TritonDaemon/TritonDaemon.py
|
Python
|
gpl-3.0
| 4,577
| 0.007428
|
"""Triton Daemon - Communication server for Oxford Triton system
The Triton fridge already has communication capacity to directly control and read both the temperatures and other elements
of the fridge (pressure sensors, valves, compressor, ...). However, the Triton logging uses binary format files that can
only be opened in their (very flaky) propriety program. This Daemon acts as an intermediary between the Triton system and
the measurement computer, allowing the measurement computer to send commands to the system while it maintains an ASCII log
of the system parameters. These logfiles can then be opened using cryoLogger. Note that the binary logs are still
kept in any case.
Run as::
python TritonDaemon.py
This Daemon is intended to be run on the Triton control computer but can actually be run from any system that has network access
to the triton control computer. The address will be requested when the script is started as well as the folder where the logs
should be saved. The defaults can be adjusted in the variables :code:`TRITONADDR` and :code:`LOGFOLDER` at the top of the script.
"""
from stlab.devices.Oxford_Triton import Oxford_Triton as Triton
from queue import Queue
from threading import Thread
import time
from stlab.devices.TritonDaemon.TritonLogger import TritonLogger as logger
import socket
from stlab.utils.MySocket import MySocket
import sys
import datetime
TRITONADDR = 'TCPIP::127.0.0.1::33576::SOCKET'
LOGFOLDER = 'C:/RemoteLogging/'
def command_handler(qin,addr='TCPIP::127.0.0.1::33576::SOCKET'):
mytriton = Triton(addr=addr)
while True:
nextcomm = qin.get()
if nextcomm == 0:
break
qout = nextcomm[0]
comm = nextcomm[1]
args = nextcomm[2]
ret = comm(mytriton, *args)
qin.task_done()
qout.put(ret)
if __name__ == '__main__':
print("StLab Temperature server for Triton. Initializing...")
'''
if len(sys.argv) >= 2:
filename = sys.argv[1]
ff = open(filename,'a')
ff.write('\n')
else:
t0 = datetime.datetime.now()
filename = 'log_' + t0.strftime('%y_%m_%d__%H_%M_%S') + '.dat'
varline = ['Time (s)'] + ['PT2 Head (K)','PT2 Plate (K)', 'Still Plate (K)','Cold Plate (K)','MC Cernox (K)','PT1 Head (K)','PT1 Plate (K)','MC Plate (K)'] + ['P%d (mbar)' % i for i in range(1,7)]
#varline = ['Time (s)'] + ['T%d (K)' % i for i in range(1,10)] + ['P%d (mbar)' % i for i in range(1,7)]
print(varline)
ff = open(filename,'
|
w')
ff.write('#' + ', '.join(varline)+'\n')
'''
logfolder = input('Enter BF log folder location (default "{}"):\n'.format(LOGFOLDER))
if logfolder == '':
logfolder = LOGFOLDER
tritonaddr = input('Enter address of Triton instrument (default "{}"):\n'.format(TRITONADDR))
if tritonaddr == '':
tritonaddr = TRITONADDR
commandq = Queue(maxsize=0)
myhandler = Thread(target=command_h
|
andler, args=(commandq,tritonaddr))
myhandler.daemon = True
myhandler.start()
loggerthread = Thread(target=logger, args=(commandq,logfolder))
loggerthread.start()
# create an INET, STREAMing socket
serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# bind the socket to a public host, and a well-known port
#serversocket.bind((socket.gethostname(), 8001))
#addr = socket.gethostbyname(socket.gethostname())
addr = '0.0.0.0' #listen on all network interfaces
port = 8472
serversocket.bind((addr, port))
# become a server socket
serversocket.listen(5)
myip = socket.gethostbyname(socket.gethostname())
print("Ready. Listening on port %d and address %s" % (port,myip))
def RunCommand(sock,resultq):
ss = MySocket(sock)
word = ss.myreceive()
word = word.decode('utf_8')
commandq.put( (resultq, Triton.query, (word,)) )
xx = resultq.get()
resultq.task_done()
ss.mysend(xx.encode('utf_8'))
ss.sock.close()
return word
resultq = Queue(maxsize=0)
while True:
clientsocket = None
try:
# accept connections from outside
(clientsocket, address) = serversocket.accept()
RunCommand(clientsocket,resultq)
print("Listening on port %d and address %s" % (port,myip))
except KeyboardInterrupt:
print('Shutting down temperature server')
serversocket.close()
break
commandq.put(0)
loggerthread.join()
|
themadinventor/esptool
|
espefuse.py
|
Python
|
gpl-2.0
| 42,922
| 0.004497
|
#!/usr/bin/env python
# ESP32 efuse get/set utility
# https://github.com/themadinventor/esptool
#
# Copyright (C) 2016 Espressif Systems (Shanghai) PTE LTD
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51 Franklin
# Street, Fifth Floor, Boston, MA 02110-1301 USA.
from __future__ import division, print_function
import argparse
import esptool
import io
import json
import os
import struct
import sys
import time
# Table of efuse values - (category, block, word in block, mask, write disable bit, read disable bit, type, description)
# Match values in efuse_reg.h & Efuse technical reference chapter
EFUSES = [
('WR_DIS', "efuse", 0, 0, 0x0000FFFF, 1, None, "int", "Efuse write disable mask"),
('RD_DIS', "efuse", 0, 0, 0x000F0000, 0
|
, None, "int", "Efuse read disablemask"),
('FLASH_CRYPT_CNT', "security", 0, 0, 0x07F00000, 2, None, "bitcount", "Flash encryption mode counter"),
('MAC', "identity", 0, 1, 0xFFFFFFFF, 3, None, "mac", "Factory MAC Address"),
('XPD_SDIO_FORCE', "config", 0, 4, 1 << 16, 5, None, "flag", "Ignore MTDI pin (GPIO12) for VDD_SDIO on reset"),
('
|
XPD_SDIO_REG', "config", 0, 4, 1 << 14, 5, None, "flag", "If XPD_SDIO_FORCE, enable VDD_SDIO reg on reset"),
('XPD_SDIO_TIEH', "config", 0, 4, 1 << 15, 5, None, "flag", "If XPD_SDIO_FORCE & XPD_SDIO_REG, 1=3.3V 0=1.8V"),
('CLK8M_FREQ', "config", 0, 4, 0xFF, None, None, "int", "8MHz clock freq override"),
('SPI_PAD_CONFIG_CLK', "config", 0, 5, 0x1F << 0, 6, None, "spipin", "Override SD_CLK pad (GPIO6/SPICLK)"),
('SPI_PAD_CONFIG_Q', "config", 0, 5, 0x1F << 5, 6, None, "spipin", "Override SD_DATA_0 pad (GPIO7/SPIQ)"),
('SPI_PAD_CONFIG_D', "config", 0, 5, 0x1F << 10, 6, None, "spipin", "Override SD_DATA_1 pad (GPIO8/SPID)"),
('SPI_PAD_CONFIG_HD', "config", 0, 3, 0x1F << 4, 6, None, "spipin", "Override SD_DATA_2 pad (GPIO9/SPIHD)"),
('SPI_PAD_CONFIG_CS0', "config", 0, 5, 0x1F << 15, 6, None, "spipin", "Override SD_CMD pad (GPIO11/SPICS0)"),
('FLASH_CRYPT_CONFIG', "security", 0, 5, 0x0F << 28, 10, 3, "int", "Flash encryption config (key tweak bits)"),
('CHIP_VER_REV1', "identity", 0, 3, 1 << 15, 3, None, "flag", "Silicon Revision 1"),
('CHIP_VER_REV2', "identity", 0, 5, 1 << 20, 6, None, "flag", "Silicon Revision 2"),
('BLK3_PART_RESERVE', "calibration", 0, 3, 1 << 14, 10, 3, "flag", "BLOCK3 partially served for ADC calibration data"),
('CHIP_VERSION', "identity", 0, 3, 0x03 << 12, 3, None, "int", "Reserved for future chip versions"),
('CHIP_PACKAGE', "identity", 0, 3, 0x07 << 9, 3, None, "int", "Chip package identifier"),
('CODING_SCHEME', "efuse", 0, 6, 0x3, 10, 3, "int", "Efuse variable block length scheme"),
('CONSOLE_DEBUG_DISABLE',"security", 0, 6, 1 << 2, 15, None, "flag", "Disable ROM BASIC interpreter fallback"),
('DISABLE_SDIO_HOST', "config", 0, 6, 1 << 3, None, None, "flag", "Disable SDIO host"),
('ABS_DONE_0', "security", 0, 6, 1 << 4, 12, None, "flag", "secure boot enabled for bootloader"),
('ABS_DONE_1', "security", 0, 6, 1 << 5, 13, None, "flag", "secure boot abstract 1 locked"),
('JTAG_DISABLE', "security", 0, 6, 1 << 6, 14, None, "flag", "Disable JTAG"),
('DISABLE_DL_ENCRYPT', "security", 0, 6, 1 << 7, 15, None, "flag", "Disable flash encryption in UART bootloader"),
('DISABLE_DL_DECRYPT', "security", 0, 6, 1 << 8, 15, None, "flag", "Disable flash decryption in UART bootloader"),
('DISABLE_DL_CACHE', "security", 0, 6, 1 << 9, 15, None, "flag", "Disable flash cache in UART bootloader"),
('KEY_STATUS', "efuse", 0, 6, 1 << 10, 10, 3, "flag", "Usage of efuse block 3 (reserved)"),
('ADC_VREF', "calibration", 0, 4,0x1F << 8,0, None, "vref", "Voltage reference calibration"),
('BLK1', "security", 1, 0, 0xFFFFFFFF, 7, 0, "keyblock", "Flash encryption key"),
('BLK2', "security", 2, 0, 0xFFFFFFFF, 8, 1, "keyblock", "Secure boot key"),
('BLK3', "security", 3, 0, 0xFFFFFFFF, 9, 2, "keyblock", "Variable Block 3"),
]
# if BLK3_PART_RESERVE is set, these efuse fields are in BLK3:
BLK3_PART_EFUSES = [
('ADC1_TP_LOW', "calibration", 3, 3, 0x7F << 0, 9, 2, "adc_tp", "ADC1 150mV reading"),
('ADC1_TP_HIGH', "calibration", 3, 3, 0x1FF << 7, 9, 2, "adc_tp", "ADC1 850mV reading"),
('ADC2_TP_LOW', "calibration", 3, 3, 0x7F << 16, 9, 2, "adc_tp", "ADC2 150mV reading"),
('ADC2_TP_HIGH', "calibration", 3, 3, 0x1FF << 23, 9, 2, "adc_tp", "ADC2 850mV reading"),
]
# Offsets and lengths of each of the 4 efuse blocks in register space
#
# These offsets/lens are for esptool.read_efuse(X) which takes
# a word offset (into registers) not a byte offset.
EFUSE_BLOCK_OFFS = [0, 14, 22, 30]
EFUSE_BLOCK_LEN = [7, 8, 8, 8]
# EFUSE registers & command/conf values
EFUSE_REG_CONF = 0x3FF5A0FC
EFUSE_CONF_WRITE = 0x5A5A
EFUSE_CONF_READ = 0x5AA5
EFUSE_REG_CMD = 0x3FF5A104
EFUSE_CMD_WRITE = 0x2
EFUSE_CMD_READ = 0x1
# address of first word of write registers for each efuse
EFUSE_REG_WRITE = [0x3FF5A01C, 0x3FF5A098, 0x3FF5A0B8, 0x3FF5A0D8]
# 3/4 Coding scheme warnings registers
EFUSE_REG_DEC_STATUS = 0x3FF5A11C
EFUSE_REG_DEC_STATUS_MASK = 0xFFF
# Efuse clock control
EFUSE_DAC_CONF_REG = 0x3FF5A118
EFUSE_CLK_REG = 0x3FF5A0F8
EFUSE_DAC_CLK_DIV_MASK = 0xFF
EFUSE_CLK_SEL0_MASK = 0x00FF
EFUSE_CLK_SEL1_MASK = 0xFF00
EFUSE_CLK_SETTINGS = {
# APB freq: clk_sel0, clk_sel1, dac_clk_div
# Taken from TRM chapter "eFuse Controller": Timing Configuration
26: (250, 255, 52),
40: (160, 255, 80),
80: (80, 128, 100), # this is here for completeness only as esptool never sets an 80MHz APB clock
}
EFUSE_BURN_TIMEOUT = 0.250 # seconds
# Coding Scheme values
CODING_SCHEME_NONE = 0
CODING_SCHEME_34 = 1
def confirm(action, args):
print("%s%sThis is an irreversible operation." % (action, "" if action.endswith("\n") else ". "))
if not args.do_not_confirm:
print("Type 'BURN' (all capitals) to continue.")
sys.stdout.flush() # required for Pythons which disable line buffering, ie mingw in mintty
try:
yes = raw_input() # raw_input renamed to input in Python 3
except NameError:
yes = input()
if yes != "BURN":
print("Aborting.")
sys.exit(0)
def efuse_write_reg_addr(block, word):
"""
Return the physical address of the efuse write data register
block X word X.
"""
return EFUSE_REG_WRITE[block] + (4 * word)
class EspEfuses(object):
"""
Wrapper object to manage the efuse fields in a connected ESP bootloader
"""
def __init__(self, esp):
self._esp = esp
self._efuses = [EfuseField.from_tuple(self, efuse) for efuse in EFUSES]
if self["BLK3_PART_RESERVE"].get():
# add these BLK3 efuses, if the BLK3_PART_RESERVE flag is set...
self._efuses += [EfuseField.from_tuple(self, efuse) for efuse in BLK3_PART_EFUSES]
self.coding_scheme = self["CODING_SCHEME"].get()
def __getitem__(self, efuse_name):
""" Return the efuse field with the given name """
for e in self._efuses:
if efuse_name == e.register_name:
return e
raise KeyError
def __iter__(self):
return self._efuses.__iter__()
|
mistermatti/plugz
|
plugz/plugz.py
|
Python
|
bsd-3-clause
| 373
| 0.002681
|
import abc
class PluginTypeBase(object):
""" Baseclass for plugin types.
This needs to be derived from in order for plugin types to
be accepted by p
|
lugz.
|
"""
__metaclass__ = abc.ABCMeta
plugintype = None
@staticmethod
def is_valid_file(file):
""" Accept or reject files as valid plugins. """
return file.endswith('.py')
|
arg-hya/taxiCab
|
Plots/TrajectoryPlot/TrajectoryPlot.py
|
Python
|
gpl-3.0
| 1,224
| 0.013072
|
import pycrs
import mpl_toolkits.basemap.pyproj as pyproj # Import the pyproj module
import shapefile as shp
import matplotlib.pyplot as plt
shpFilePath = r"taxi_zones\taxi_zones"
sf = shp.Reader(shpFilePath)
records = sf.records()
plt.figure()
for shape in sf.shapeRecords():
x = [i[0] for i in shape.shape.points[:]]
y = [i[1] for i in shape.shape.points[:]]
plt.plot(x,y)
projobj = pycrs.loader.from_file(r'taxi_zones\taxi_zones.prj')
proj4string = projobj.to_proj4()
print(proj4string)
isn2004=pyproj.Proj(proj4string, preserve_units=True)
wgs84=pyproj.Proj("+init=EPSG:4326")
i = 0
lat = []
lon = []
#1 foot = 0.3048 meters
conv = 0.3048
with open("2013000005_sampled.traj") as f:
next(f)
for line in f:
i += 1
# print line
strings = line.split(",")
co1 = float(strings[0])
co2 = fl
|
oat(strings[1])
x2,y2 = pyproj.transform(wgs84,isn2004 ,co1,co2)
lat.append(x2)
lon.append(y2)
# if i == 14450:
# break
if i == 1169120:
break
x1 = lat
y1 = lon
plt.plot(x1, y1, 'o', color='blue', markersize=7, markeredgewidth=0.0)
plt.sho
|
w()
|
JiscPER/jper
|
service/dao.py
|
Python
|
apache-2.0
| 3,957
| 0.006318
|
"""
This module contains all the Data Access Objects for models which are persisted to Elasticsearch
at some point in their lifecycle.
Each DAO is an extension of the octopus ESDAO utility class which provides all of the ES-level heavy lifting,
so these DAOs mostly just provide information on where to persist the data, and some additional storage-layer
query methods as required
"""
from octopus.modules.es import dao
class ContentLogDAO(dao.ESDAO):
__type__ = 'contentlog'
class UnroutedNotificationDAO(dao.ESDAO):
"""
DAO for UnroutedNotifications
"""
__type__ = 'unrouted'
""" The index type to use to store these objects """
@classmethod
def example(cls):
"""
request a document which acts as an example for this type
"""
from service.tests import fixtures
return cls(fixtures.NotificationFactory.unrouted_notification())
class RoutedNotificationDAO(dao.TimeBoxedTypeESDAO):
"""
DAO for RoutedNotification
This is an extension of the TimeBoxedTypeESDAO object, which means that a new type is created very
period (e.g. monthly) for new content. This enables rapid dropping of old index types without affecting
Elasticsearch performance, and works here because RoutedNotifications only persiste for a limited time
"""
__type__ = 'routed'
""" The base index type to use to store these objects - this will be appended by the time-boxing features of the DAO with the creation timestamp """
@classmethod
def example(cls):
"""
request a document which acts as an example for this type
"""
from service.tests import fixtures
return cls(fixtures.NotificationFactory.routed_notification())
class FailedNotificationDAO(dao.ESDAO):
"""
DAO for FailedNotifications
"""
__type__ = "failed"
""" The index type to use to store these objects """
class RepositoryConfigDAO(dao.ESDAO):
"""
DAO for RepositoryConfig
"""
__type__ = 'repo_config'
""" The index type to use to store these objects """
class MatchProvenanceDAO(dao.ESDAO):
"""
DAO for MatchProvenance
"""
__type__ = "match_prov"
""" The index type to use to store these objects """
@classmethod
def pull_by_notification(cls, notification_id, size=10):
"""
List all of the match provenance information for the requested notification
:param notification_id: the id of the notification for which to retrieve match provenance
:param size: the maximum number to return (defaults to 10)
"""
q = MatchProvNotificationQuery(notification_id, size=size)
return cls.object_query(q=q.query())
class MatchProvNotificationQuery(object):
"""
Query wrapper which generates an ES query for retrieving match provenance objects
based on the notification to which they are attached
"""
def __init__(self, notification_id, size=10):
"""
Set the parameters of the query
:param notification_id: the id of the notification for which to retrieve match provenance
:param size: the m
|
aximum number to return (defaults to 10)
"""
self.notification_id = notification_id
self.size = size
def query(self):
"""
generate the query as a python dictionary object
:return: a python dictionary containing the ES query, ready for JSON serialisation
"""
return {
|
"query" : {
"term" : {"notification.exact" : self.notification_id}
},
"size" : self.size
}
class RetrievalRecordDAO(dao.ESDAO):
"""
DAO for RetrievalRecord
"""
__type__ = "retrieval"
""" The index type to use to store these objects """
class AccountDAO(dao.ESDAO):
"""
DAO for Account
"""
__type__ = "account"
""" The index type to use to store these objects """
|
DMIAlumni/pydrone-game
|
pydrone/utils/matrix_generator.py
|
Python
|
bsd-2-clause
| 1,291
| 0.001549
|
import math
import fpformat
import os
from pydrone.utils.data_structures import Graph
def world_generator(size, x_end, y_end, knowledge):
# Controllo se si richiede un mondo con il knowledge degli stati o meno
if knowledge:
world = Graph(x_end, y_end)
for i in range(size):
for j in range(size):
world.add_node_coord((i, j))
world.change_weight((i, j), float(fpformat.fix(math.sqrt(math.fabs(pow((x_end - i), 2) + pow((y_end - j), 2))), 3)))
world.change_weight((x_end, y_end), -1)
return world
else:
|
matrix = [[0 for i in range(size)] for j in range(size)]
for i in range(size):
for j in range(size):
matrix[i][j] = float(fpformat.fix(math.sqrt(math.fabs(pow((x_end - i), 2) + pow((y_end - j), 2))), 3))
matrix[x_end][y_end] = -1
return matrix
def matrix_generator(size):
matrix = [[0 for i in range(size)] for j in range(siz
|
e)]
return matrix
def print_matrix(matrix):
os.system("clear")
size = len(matrix[0])
for j in range(size):
for i in range(size):
value = matrix[i][j]
if value > 0:
print "", "*",
else:
print "", "-",
print
|
metjush/decision_tree
|
setup.py
|
Python
|
mit
| 734
| 0.042234
|
from setuptools import setup
setup(name='decision_tree',
version='0.04',
description='Practice implementation of a classification decision tree',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python
|
:: 2.7',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
],
keywords='classification decision tree machine learning random forest',
url='https://github.com/metjush/decision_tree',
author='metjush',
author_email='metjush@gmail.com',
license='MIT',
packages=['decision_tree'],
install_req
|
uires=[
'numpy',
'sklearn'
],
include_package_data=True,
zip_safe=False)
|
lehinevych/cfme_tests
|
cfme/tests/configure/test_docs.py
|
Python
|
gpl-2.0
| 5,548
| 0.001802
|
# -*- coding: utf-8 -*-
import pytest
import re
import requests
try:
# Faster, C-ext
from cStringIO import StringIO
except ImportErro
|
r:
# Slower, pure python
from StringIO import StringIO
from pdfminer.converter import TextConverter
from pdfminer.layout import LAParams
from pdfminer.pdfpage import PDFPage
from pdfminer.pdfinterp import PDFPageInterpreter, PDFResourceManager
from cfme.configure.about import product_assistance as about
from utils import version
def pdf_get_text(file_obj, page_nums):
output
|
= StringIO()
manager = PDFResourceManager()
laparams = LAParams(all_texts=True, detect_vertical=True)
converter = TextConverter(manager, output, laparams=laparams)
interpreter = PDFPageInterpreter(manager, converter)
for page in PDFPage.get_pages(file_obj, page_nums):
interpreter.process_page(page)
converter.close()
text = output.getvalue().replace('\n', ' ')
output.close()
return text
@pytest.fixture(scope="module")
def guides():
return [
loc
for loc
in about.locators.iterkeys()
if loc.endswith("_guide")
and (
version.pick(about.locators[loc])
if isinstance(about.locators[loc], dict)
else about.locators[loc]
) is not None]
@pytest.mark.tier(3)
@pytest.fixture(scope="session")
def docs_info():
if version.current_version() < "5.4.0.1":
return [
'Control',
'Lifecycle and Automation',
'Quick Start',
'Settings And Operations',
'Insight',
'Integration Services'
]
elif version.current_version() < "5.5.0.12":
return [
'Insight',
'Control',
'Lifecycle and Automation',
'REST API',
'SOAP API',
'User',
'Settings and Operations'
]
elif version.appliance_is_downstream():
return [
'Monitoring Alerts Reporting',
'General Configuration',
'Virtual Machines Hosts',
'Methods For Automation',
'Infrastructure Inventory',
'Providers',
'Scripting Actions',
'Defining Policies Profiles'
]
else:
# Upstream version has no docs
return []
@pytest.mark.tier(2)
@pytest.mark.meta(blockers=[1272618])
@pytest.mark.sauce
def test_links(guides, soft_assert):
"""Test whether the PDF documents are present."""
pytest.sel.force_navigate("about")
for link in guides:
locator = getattr(about, link)
url = pytest.sel.get_attribute(locator, "href")
soft_assert(
requests.head(url, verify=False).status_code == 200,
"'{}' is not accessible".format(pytest.sel.text(locator).encode("utf-8").strip())
)
@pytest.mark.tier(3)
@pytest.mark.meta(blockers=[1272618])
def test_contents(guides, soft_assert):
"""Test contents of each document."""
pytest.sel.force_navigate("about")
precomp_noguide = re.compile("(.*) Guide")
cur_ver = version.current_version()
for link in guides:
locator = getattr(about, link)
url = pytest.sel.get_attribute(locator, "href")
data = requests.get(url, verify=False)
pdf_titlepage_text_low = pdf_get_text(StringIO(data.content), [0]).lower()
# don't include the word 'guide'
title_text_low = precomp_noguide.search(pytest.sel.text(locator)).group(1).lower()
expected = [title_text_low]
if cur_ver == version.LATEST:
expected.append('manageiq')
else:
expected.append('cloudforms')
maj_min = '{}.{}'.format(cur_ver.version[0], cur_ver.version[1])
expected.append(version.get_product_version(maj_min))
for exp_str in expected:
soft_assert(exp_str in pdf_titlepage_text_low,
"{} not in {}".format(exp_str, pdf_titlepage_text_low))
@pytest.mark.tier(3)
@pytest.mark.sauce
@pytest.mark.meta(blockers=[1232434, 1272618])
def test_info(guides, soft_assert):
pytest.sel.force_navigate("about")
for link in guides:
l_a = getattr(about, link)
# l_icon also implicitly checks for the icon url == text url
l_icon = lambda: pytest.sel.element(
"../a[contains(@href, '{}')]/img".format(
pytest.sel.get_attribute(l_a, "href").rsplit("/", 1)[-1]
),
root=l_a
)
l_icon_a = lambda: pytest.sel.element("..", root=l_icon)
soft_assert(
pytest.sel.get_attribute(l_icon, "alt") == pytest.sel.get_attribute(l_icon_a, "title"),
"Icon alt attr should match icon title attr ({})".format(pytest.sel.text(l_a))
)
soft_assert(
pytest.sel.get_attribute(l_icon_a, "href") == pytest.sel.get_attribute(l_a, "href"),
"Icon url should match text url ({})".format(pytest.sel.text(l_a))
)
@pytest.mark.tier(2)
@pytest.mark.ignore_stream("upstream")
@pytest.mark.meta(blockers=[1272618])
def test_all_docs_present(guides, docs_info):
pytest.sel.force_navigate("about")
docs_list = list(docs_info)
for link in guides:
for doc in docs_list:
if doc.lower() in pytest.sel.text(getattr(about, link)).lower():
break
else:
continue
docs_list.remove(doc)
assert len(docs_list) == 0, "All documents should be available ({} are missing)".format(
", ".join(docs_list)
)
|
congrieb/yikBot
|
start.py
|
Python
|
mit
| 469
| 0.004264
|
import pyak
import yikbot
import time
# Latitude and Longitude of location whe
|
re bot should
|
be localized
yLocation = pyak.Location("42.270340", "-83.742224")
yb = yikbot.YikBot("yikBot", yLocation)
print "DEBUG: Registered yikBot with handle %s and id %s" % (yb.handle, yb.id)
print "DEBUG: Going to sleep, new yakkers must wait ~90 seconds before they can act"
time.sleep(90)
print "DEBUG: yikBot instance 90 seconds after initialization"
print vars(yb)
yb.boot()
|
viswimmer1/PythonGenerator
|
data/python_files/29179833/tests.py
|
Python
|
gpl-2.0
| 3,436
| 0.009604
|
import unittest
import warnings
import datetime
from django.core.urlresolvers import reverse
from django.test import TestCase
from incuna.utils import find
from articles.models import Article
class ArticleAccessTests(TestCase):
fixtures = ['articles_data.json',]
def test_article_index(self):
response = self.client.get(reverse('article_index'))
for article in Article.objects.active():
self.assertContains(response, article.title)
def test_article_detail(self):
response = self.client.get(reverse('article_detail', args=['test-article',]))
article = Article.objects.active().get(slug='test-article')
self.assertContains(response, article.title)
class ArticleActiveTests(TestCase):
fixtures = ['articles_data.json',]
def test_article_active(self):
response = self.client.get(reverse('article_index'))
inactive_articles = Article.objects.exclude(pk__in=[a[0] for a in Article.objects.active().values_list('pk')])
assert(inactive_articles)
for article in inactive_articles:
self.assertNotContains(response, article.title)
def test_article_views_404(self):
response = self.client.get(reverse('article_detail', args=['inactive-article',]))
self.assertEquals(response.status_code, 404)
# extension related tests
class ArticleDatePublisherTests(TestCase):
fixtures = ['articles_datepublisher_data.json',]
def setUp(self, *args, **kwargs):
if bool(find(lambda f: f.name == 'publication
|
_date', Article._meta.local_fields)
|
) \
and bool(find(lambda f: f.name == 'publication_end_date', Article._meta.local_fields)):
self.skip = False
else:
warnings.warn("Skipping datepublisher tests. Extension not registered")
self.skip = True
def test_publication_date(self):
if self.skip:
return
article = Article.objects.active().get(slug='publication-date-test')
article.publication_date = datetime.datetime.now() + datetime.timedelta(1)
article.save()
response = self.client.get(reverse('article_detail', args=['publication-date-test',]))
self.assertEquals(response.status_code, 404)
article.publication_date = datetime.datetime.now() + datetime.timedelta(-1)
article.publication_end_date = datetime.datetime.now() + datetime.timedelta(-1)
article.save()
response = self.client.get(reverse('article_detail', args=['publication-date-test',]))
self.assertEquals(response.status_code, 404)
class ArticleTagsTests(TestCase):
fixtures = ['articles_tags_data.json',]
def setUp(self, *args, **kwargs):
if bool(find(lambda f: f.name == 'tags', Article._meta.many_to_many)):
self.skip = False
else:
warnings.warn("Skipping tags tests. Extension not registered")
self.skip = True
def test_tags(self):
if self.skip:
return
article = Article.objects.active().get(slug='tag-test')
article.tags.add("test", "testing")
response = self.client.get(reverse('article_tagged_list', args=['test',]))
self.assertContains(response, article.title)
def test_tags_404(self):
response = self.client.get(reverse('article_tagged_list', args=['tag_does_not_exist',]))
self.assertEquals(response.status_code, 404)
|
graalvm/mx
|
mx.py
|
Python
|
gpl-2.0
| 772,837
| 0.003661
|
#
# ----------------------------------------------------------------------------------------------------
#
# Copyright (c) 2007, 2021, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTA
|
BILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
|
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
# ----------------------------------------------------------------------------------------------------
#
r"""
mx is a command line tool for managing the development of Java code organized as suites of projects.
"""
from __future__ import print_function
import sys
if sys.version_info < (2, 7):
major, minor, micro, _, _ = sys.version_info
raise SystemExit('mx requires python 2.7+, not {0}.{1}.{2}'.format(major, minor, micro))
from abc import ABCMeta, abstractmethod, abstractproperty
if __name__ == '__main__':
# Rename this module as 'mx' so it is not re-executed when imported by other modules.
sys.modules['mx'] = sys.modules.pop('__main__')
try:
import defusedxml #pylint: disable=unused-import
from defusedxml.ElementTree import parse as etreeParse
except ImportError:
from xml.etree.ElementTree import parse as etreeParse
import os, errno, time, subprocess, shlex, zipfile, signal, tempfile, platform
import textwrap
import socket
import tarfile, gzip
import hashlib
import itertools
from functools import cmp_to_key
# TODO use defusedexpat?
import xml.parsers.expat, xml.sax.saxutils, xml.dom.minidom
from xml.dom.minidom import parseString as minidomParseString
import shutil, re
import pipes
import difflib
import glob
import filecmp
import json
from collections import OrderedDict, namedtuple, deque
from datetime import datetime, timedelta
from threading import Thread
from argparse import ArgumentParser, PARSER, REMAINDER, Namespace, HelpFormatter, ArgumentTypeError, RawTextHelpFormatter, FileType
from os.path import join, basename, dirname, exists, lexists, isabs, expandvars, isdir, islink, normpath, realpath, relpath, splitext
from tempfile import mkdtemp, mkstemp
from io import BytesIO
import fnmatch
import operator
import calendar
import random
from stat import S_IWRITE
from mx_commands import MxCommands, MxCommand
from copy import copy, deepcopy
import posixpath
_mx_commands = MxCommands("mx")
# Temporary imports and (re)definitions while porting mx from Python 2 to Python 3
if sys.version_info[0] < 3:
filter = itertools.ifilter # pylint: disable=redefined-builtin,invalid-name
def input(prompt=None): # pylint: disable=redefined-builtin
return raw_input(prompt) # pylint: disable=undefined-variable
import __builtin__ as builtins
import urllib2 # pylint: disable=unused-import
_urllib_request = urllib2
_urllib_error = urllib2
del urllib2
import urlparse as _urllib_parse
def _decode(x):
return x
def _encode(x):
return x
_unicode = unicode # pylint: disable=undefined-variable
import multiprocessing
else:
import builtins # pylint: disable=unused-import,no-name-in-module
import urllib.request as _urllib_request # pylint: disable=unused-import,no-name-in-module
import urllib.error as _urllib_error # pylint: disable=unused-import,no-name-in-module
import urllib.parse as _urllib_parse # pylint: disable=unused-import,no-name-in-module
def _decode(x):
return x.decode()
def _encode(x):
return x.encode()
_unicode = str
import multiprocessing.dummy as multiprocessing
class _DummyProcess(multiprocessing.DummyProcess):
def run(self):
try:
super(_DummyProcess, self).run()
except:
self._exitcode = 1
raise
@property
def exitcode(self):
return getattr(self, '_exitcode', super(_DummyProcess, self).exitcode)
multiprocessing.Process = _DummyProcess
### ~~~~~~~~~~~~~ _private
def _hashFromUrl(url):
logvv('Retrieving SHA1 from {}'.format(url))
hashFile = _urllib_request.urlopen(url)
try:
return hashFile.read()
except _urllib_error.URLError as e:
_suggest_http_proxy_error(e)
abort('Error while retrieving sha1 {}: {}'.format(url, str(e)))
finally:
if hashFile:
hashFile.close()
def _merge_file_contents(input_files, output_file):
for file_name in input_files:
with open(file_name, 'r') as input_file:
shutil.copyfileobj(input_file, output_file)
output_file.flush()
def _make_absolute(path, prefix):
"""
If 'path' is not absolute prefix it with 'prefix'
"""
return join(prefix, path)
def _cache_dir():
return _cygpathW2U(get_env('MX_CACHE_DIR', join(dot_mx_dir(), 'cache')))
def _global_env_file():
return _cygpathW2U(get_env('MX_GLOBAL_ENV', join(dot_mx_dir(), 'env')))
def _get_path_in_cache(name, sha1, urls, ext=None, sources=False, oldPath=False):
"""
Gets the path an artifact has (or would have) in the download cache.
"""
assert sha1 != 'NOCHECK', 'artifact for ' + name + ' cannot be cached since its sha1 is NOCHECK'
if ext is None:
for url in urls:
# Use extension of first URL whose path component ends with a non-empty extension
o = _urllib_parse.urlparse(url)
if o.path == "/remotecontent" and o.query.startswith("filepath"):
path = o.query
else:
path = o.path
ext = get_file_extension(path)
if ext:
ext = '.' + ext
break
if not ext:
abort('Could not determine a file extension from URL(s):\n ' + '\n '.join(urls))
assert os.sep not in name, name + ' cannot contain ' + os.sep
assert os.pathsep not in name, name + ' cannot contain ' + os.pathsep
if oldPath:
return join(_cache_dir(), name + ('.sources' if sources else '') + '_' + sha1 + ext) # mx < 5.176.0
filename = _map_to_maven_dist_name(name) + ('.sources' if sources else '') + ext
return join(_cache_dir(), name + '_' + sha1 + ('.dir' if not ext else ''), filename)
def _urlopen(*args, **kwargs):
timeout_attempts = [0]
timeout_retries = kwargs.pop('timeout_retries', 3)
def on_timeout():
if timeout_attempts[0] <= timeout_retries:
timeout_attempts[0] += 1
kwargs['timeout'] = kwargs.get('timeout', 5) * 2
warn("urlopen() timed out! Retrying with timeout of {}s.".format(kwargs['timeout']))
return True
return False
error500_attempts = 0
error500_limit = 5
while True:
try:
return _urllib_request.urlopen(*args, **kwargs)
except (_urllib_error.HTTPError) as e:
if e.code == 500:
if error500_attempts < error500_limit:
error500_attempts += 1
url = '?' if len(args) == 0 else args[0]
warn("Retrying after error reading from " + url + ": " + str(e))
time.sleep(0.2)
continue
raise
except _urllib_error.URLError as e:
if isinstance(e.reason, socket.error):
if e.reason.errno == errno.E
|
Shir0kamii/mongofollow
|
mongofollow.py
|
Python
|
mit
| 1,092
| 0
|
"""Tail any mongodb collection"""
from time import sleep
from bson import ObjectId
__version__ = "1.1.0"
def fetch(collection, filter, last_oid_generation_time=None):
if last_oid_generation_time is not None:
last_oid = ObjectId.from_da
|
tetime(last_oid_generation_time)
filter.update({"_id": {"$gte": last_oid}})
return collection.find(filter)
def filter_duplicates(cursor, ids):
for doc in cursor:
if doc["_id"] not in ids:
yield doc
def mongofollow(collection, filter=None, sleep_du
|
ration=0.1):
if filter is None:
filter = {}
last_oid_generation_time = None
last_oids = set()
while True:
cursor = fetch(collection, filter, last_oid_generation_time)
for doc in filter_duplicates(cursor, last_oids):
oid = doc["_id"]
last_oid_generation_time = doc["_id"].generation_time
last_oids.add(oid)
yield doc
last_oids = {oid for oid in last_oids if
oid.generation_time == last_oid_generation_time}
sleep(sleep_duration)
|
Healdb/altcointip
|
src/ctb/ctb_coin.py
|
Python
|
gpl-2.0
| 8,785
| 0.006602
|
"""
This file is part of ALTcointip.
ALTcointip is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
ALTcointip is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with ALTcointip. If not, see <http://www.gnu.org/licenses/>.
"""
import logging, re, time
from pifkoin.bitcoind import Bitcoind, BitcoindException
from httplib import CannotSendRequest
lg = logging.getLogger('cointipbot')
class CtbCoin(object):
"""
Coin class for cointip bot
"""
conn = None
conf = None
def __init__(self, _conf = None):
"""
Initialize CtbCoin with given parameters. _conf is a coin config dictionary defined in conf/coins.yml
"""
# verify _conf is a config dictionary
if not _conf or not hasattr(_conf, 'name') or not hasattr(_conf, 'config_file') or not hasattr(_conf, 'txfee'):
raise Exception("CtbCoin::__init__(): _conf is empty or invalid")
self.conf = _conf
# connect to coin daemon
try:
lg.debug("CtbCoin::__init__(): connecting to %s...", self.conf.name)
self.conn = Bitcoind(self.conf.config_file, rpcserver=self.conf.config_rpcserver)
except BitcoindException as e:
lg.error("CtbCoin::__init__(): error connecting to %s using %s: %s", self.conf.name, self.conf.config_file, e)
raise
lg.info("CtbCoin::__init__():: connected to %s", self.conf.name)
time.sleep(0.5)
# set transaction fee
lg.info("Setting tx fee of %f", self.conf.txfee)
self.conn.settxfee(self.conf.txfee)
def getbalance(self, _user = None, _minconf = None):
"""
Get user's tip or withdraw balance. _minconf is number of confirmations to use.
Returns (float) balance
"""
lg.debug("CtbCoin::getbalance(%s, %s)", _user, _minconf)
user = self.verify_user(_user=_user)
minconf = self.verify_minconf(_minconf=_minconf)
balance = float(0)
try:
balance = self.conn.getbalance(user, minconf)
except BitcoindException as e:
lg.error("CtbCoin.getbalance(): error getting %s (minconf=%s) balance for %s: %s", self.conf.name, minconf, user, e)
raise
time.sleep(0.5)
return float(balance)
def sendtouser(self, _userfrom = None, _userto = None, _amount = None, _minconf = 1):
"""
Transfer (move) coins to user
Returns (bool)
"""
lg.debug("CtbCoin::sendtouser(%s, %s, %.9f)", _userfrom, _userto, _amount)
userfrom = self.verify_user(_user=_userfrom)
userto = self.verify_user(_user=_userto)
amount = self.verify_amount(_amount=_amount)
# send request to coin daemon
try:
lg.info("CtbCoin::sendtouser(): moving %.9f %s from %s to %s", amount, self.conf.name, userfrom, userto)
result = self.conn.move(userfrom, userto, amount)
time.sleep(0.5)
except Exception as e:
lg.error("CtbCoin::sendtouser(): error moving %.9f %s from %s to %s: %s", amount, self.conf.name, userfrom, userto, e)
return False
time.sleep(0.5)
return True
def sendtoaddr(self, _userfrom = None, _addrto = None, _amount = None):
"""
Send coins to address
Returns (string) txid
"""
lg.debug("CtbCoin::sendtoaddr(%s, %s, %.9f)", _userfrom, _addrto, _amount)
userfrom = self.verify_user(_user=_userfrom)
addrto = self.verify_addr(_addr=_addrto)
amount = self.verify_amount(_amount=_amount)
minconf = self.verify_minconf(_minconf=self.conf.minconf.withdraw)
txid = ""
# send request to coin daemon
try:
lg.info("CtbCoin::sendtoaddr(): sending %.9f %s from %s to %s", amount, self.conf.name, userfrom, addrto)
# Unlock wallet, if applicable
if hasattr(self.conf, 'walletpassphrase'):
lg.debug("CtbCoin::sendtoaddr(): unlocking wallet...")
self.conn.walletpassphrase(self.conf.walletpassphrase, 1)
# Perform transaction
lg.debug("CtbCoin::sendtoaddr(): calling sendfrom()...")
txid = self.conn.sendfrom(userfrom, addrto, amount, minconf)
# Lock wallet, if applicable
if hasattr(self.conf, 'walletpassphrase'):
lg.debug("CtbCoin::sendtoaddr(): locking wallet...")
self.conn.walletlock()
except Exception as e:
lg.error("CtbCoin::sendtoaddr(): error sending %.9f %s from %s to %s: %s", amount, self.conf.name, userfrom, addrto, e)
raise
time.sleep(0.5)
return str(txid)
def validateaddr(self, _addr = None):
"""
Verify that _addr is a valid coin address
Returns (bool)
"""
lg.debug("CtbCoin::validateaddr(%s)", _addr)
addr = self.verify_addr(_addr=_addr)
addr_valid = self.conn.validateaddress(addr)
time.sleep(0.5)
if not addr_valid.has_key('isvalid') or not addr_valid['isvalid']:
lg.debug("CtbCoin::validateaddr(%s): not valid", addr)
return False
else:
lg.debug("CtbCoin::validateaddr(%s): valid", addr)
return True
def getnewaddr(self, _user = None):
"""
Generate a new address for _user
Returns (string) address
"""
user = self.verify_user(_user=_user)
addr = ""
counter = 0
while True:
try:
# Unlock wallet for keypoolrefill
if hasattr(self.conf, 'walletpassphrase'):
self.conn.walletpassphrase(self.conf.walletpassphrase, 1)
# Generate new address
addr = self.conn.getnewaddress(user)
# Lock wallet
if hasattr(self.conf, 'walletpassphrase'):
self.conn.walletlock()
if not addr:
raise Exception("CtbCoin::getnewaddr(%s): empty addr", user)
time.sleep(0.1)
return str(addr)
except BitcoindException as e:
lg.error("CtbCoin::getnewaddr(%s): BitcoindException: %s", user, e)
raise
except CannotSendRequest as e:
if counter < 3:
lg.warning("CtbCoin::getnewaddr(%s): CannotSendRequest, retrying")
counter += 1
time.sleep(10)
continue
else:
raise
except Exception as e:
if str(e) == "timed out" and counter < 3:
lg.warning("CtbCoin::getnewaddr(%s): timed out, retrying")
counter += 1
time.sleep(10)
continue
else:
lg.error("CtbCoin::getnewaddr(%s): Exception: %s", user, e)
raise
def verify_user(self, _user = None):
"""
Verify and return a username
"""
if not _user or not type(_user) in [str, unicode
|
]:
raise Exception("CtbCoin::verify_user(): _user wrong type (%s) or empty (%s)", type(_user), _user)
return str(_user.lower())
def verify_addr(self, _addr = None):
"""
Verify and return coin address
"""
if not _addr or not type(_addr) in [str, unicode]:
raise Exception("CtbCoin::verify_addr(): _addr wrong type (%s) or empty (%s)", type(_ad
|
dr),_addr)
return re.escape(str(_addr))
def verify_amount(self, _amount = None):
"""
Verify and return amount
|
xxxIsaacPeralxxx/anim-studio-tools
|
grenade/tests/unit/test_translators/test_sequence.py
|
Python
|
gpl-3.0
| 3,110
| 0.018328
|
#
# Copyright 2010 Dr D Studios Pty Limited (ACN 127 184 954) (Dr. D Studios), its
# affiliates and/or its licensors.
#
from ..helpers.translators import verify_translate
from grenade.translators.sequence import SequenceTranslator
from probe.fixtures.mock_shotgun import MockShotgun
class TestSequenceTranslator(object):
"""
Nose unit test suite for Grenade SequenceTranslator.
.. versionadded:: v00_04_00
"""
def setup(self):
"""
Set up the unit test suite.
.. versionadded:: v00_04_00
"""
self.shotgun_data = [{'id':1, 'type':'Project', 'sg_short_name':'hf2'},
{'id':2, 'type':'Asset'},
|
{'id':3, 'type':'Note'},
{'id':4, 'type':'Scene'},
{'id':5, 'type':'Shot'},
{'id':6, 'type':'Task'}]
self.session = MockShotgun(schema=[], data=self.shotgun_data)
self.translator = SequenceTranslator(self.session)
def teardown(self):
"""
Tear down the unit test suite.
.. versionadded:: v00_04_00
"
|
""
pass
def test_translate(self):
"""
Test that the translator converts the supplied test data as expected.
.. versionadded:: v00_04_00
"""
verify_translate(self.translator, 'project', {'id':1, 'type':'Project'}, 'hf2', 'mm4')
verify_translate(self.translator, 'assets', [{'id':2, 'type':'Asset'}], [{'Asset':[['id', 'is', 2]]}], [{'Asset':[['id', 'is', 3]]}])
verify_translate(self.translator, 'notes', [{'id':3, 'type':'Note'}], [{'Note':[['id', 'is', 3]]}], [{'Note':[['id', 'is', 4]]}])
verify_translate(self.translator, 'open_notes', [{'id':3, 'type':'Note'}], [{'Note':[['id', 'is', 3]]}], [{'Note':[['id', 'is', 4]]}])
verify_translate(self.translator, 'scenes', [{'id':4, 'type':'Scene'}], [{'Scene':[['id', 'is', 4]]}], [{'Scene':[['id', 'is', 5]]}])
verify_translate(self.translator, 'shots', [{'id':5, 'type':'Shot'}], [{'Shot':[['id', 'is', 5]]}], [{'Shot':[['id', 'is', 6]]}])
verify_translate(self.translator, 'tasks', [{'id':6, 'type':'Task'}], [{'Task':[['id', 'is', 6]]}], [{'Task':[['id', 'is', 7]]}])
# Copyright 2008-2012 Dr D Studios Pty Limited (ACN 127 184 954) (Dr. D Studios)
#
# This file is part of anim-studio-tools.
#
# anim-studio-tools is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# anim-studio-tools is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with anim-studio-tools. If not, see <http://www.gnu.org/licenses/>.
|
zadgroup/edx-platform
|
common/djangoapps/embargo/forms.py
|
Python
|
agpl-3.0
| 3,061
| 0.000327
|
"""
Defines forms for providing validation of embargo admin details.
"""
from django import forms
from django.utils.translation import ugettext as _
import ipaddr
from xmodule.modulestore.django import modulestore
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from embargo.models import IPFilter, RestrictedCourse
class RestrictedCourseForm(forms.ModelForm):
"""Validate course keys for the RestrictedCourse model.
The default behavior in Django admin is to:
* Save course keys for courses that do not exist.
* Return a 500 response if the course key format is invalid.
Using this form ensures that we display a user-friendly
error message instead.
"""
class Meta: # pylint: disable=missing-docstring
model = RestrictedCourse
def clean_course_key(self):
"""Validate the course key.
Checks that the key format is valid and that
the course exists. If not, displays an error message.
Arguments:
field_name (str): The name of the field to validate.
Returns:
CourseKey
"""
cleaned_id = self.cleaned_data['course_key']
error_msg = _('COURSE NOT FOUND. Please check that the course ID is valid.')
try:
|
course_key = CourseKey.from_string(cleaned_id)
except InvalidKeyError:
raise forms.ValidationError(error_msg)
if not modulestore().has_course(course_key):
raise forms.ValidationError(error_msg)
return course_key
class IPFilterForm(forms.ModelForm)
|
: # pylint: disable=incomplete-protocol
"""Form validating entry of IP addresses"""
class Meta: # pylint: disable=missing-docstring
model = IPFilter
def _is_valid_ip(self, address):
"""Whether or not address is a valid ipv4 address or ipv6 address"""
try:
# Is this an valid ip address?
ipaddr.IPNetwork(address)
except ValueError:
return False
return True
def _valid_ip_addresses(self, addresses):
"""
Checks if a csv string of IP addresses contains valid values.
If not, raises a ValidationError.
"""
if addresses == '':
return ''
error_addresses = []
for addr in addresses.split(','):
address = addr.strip()
if not self._is_valid_ip(address):
error_addresses.append(address)
if error_addresses:
msg = 'Invalid IP Address(es): {0}'.format(error_addresses)
msg += ' Please fix the error(s) and try again.'
raise forms.ValidationError(msg)
return addresses
def clean_whitelist(self):
"""Validates the whitelist"""
whitelist = self.cleaned_data["whitelist"]
return self._valid_ip_addresses(whitelist)
def clean_blacklist(self):
"""Validates the blacklist"""
blacklist = self.cleaned_data["blacklist"]
return self._valid_ip_addresses(blacklist)
|
mrjmad/gnu_linux_mag_drf
|
hall_of_cards/cardsgame/migrations/0003_card_modified.py
|
Python
|
mit
| 465
| 0.002151
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('cardsgame', '000
|
2_card_mana_cost'),
]
operations = [
migrations.AddField(
model_name='card',
name=
|
'modified',
field=models.PositiveIntegerField(null=True, verbose_name='Modified'),
preserve_default=True,
),
]
|
Buggaarde/youtube-dl
|
youtube_dl/extractor/yahoo.py
|
Python
|
unlicense
| 13,867
| 0.002323
|
# coding: utf-8
from __future__ import unicode_literals
import itertools
import json
import re
from .common import InfoExtractor, SearchInfoExtractor
from ..compat import (
compat_urllib_parse,
compat_urlparse,
)
from ..utils import (
clean_html,
unescapeHTML,
ExtractorError,
int_or_none,
mimetype2ext,
)
from .nbc import NBCSportsVPlayerIE
class YahooIE(InfoExtractor):
IE_DESC = 'Yahoo screen and movies'
_VALID_URL = r'(?P<url>(?P<host>https?://(?:[a-zA-Z]{2}\.)?[\da-zA-Z_-]+\.yahoo\.com)/(?:[^/]+/)*(?P<display_id>.+)?-(?P<id>[0-9]+)(?:-[a-z]+)?\.html)'
_TESTS = [
{
'url': 'http://screen.yahoo.com/julian-smith-travis-legg-watch-214727115.html',
'info_dict': {
'id': '2d25e626-2378-391f-ada0-ddaf1417e588',
'ext': 'mp4',
'title': 'Julian Smith & Travis Legg Watch Julian Smith',
'description': 'Julian and Travis watch Julian Smith',
'duration': 6863,
},
},
{
'url': 'http://screen.yahoo.com/wired/codefellas-s1-ep12-cougar-lies-103000935.html',
'md5': 'd6e6fc6e1313c608f316ddad7b82b306',
'info_dict': {
'id': 'd1dedf8c-d58c-38c3-8963-e899929ae0a9',
'ext': 'mp4',
'title': 'Codefellas - The Cougar Lies with Spanish Moss',
'description': 'md5:66b627ab0a282b26352136ca96ce73c1',
'duration': 151,
},
},
{
'url': 'https://screen.yahoo.com/community/community-sizzle-reel-203225340.html?format=embed',
'md5': '60e8ac193d8fb71997caa8fce54c6460',
'info_dict': {
'id': '4fe78544-8d48-39d8-97cd-13f205d9fcdb',
'ext': 'mp4',
'title': "Yahoo Saves 'Community'",
'description': 'md5:4d4145af2fd3de00cbb6c1d664105053',
'duration': 170,
}
},
{
'url': 'https://tw.screen.yahoo.com/election-2014-askmayor/敢問市長-黃秀霜批賴清德-非常高傲-033009720.html',
'md5': '3a09cf59349cfaddae1797acc3c087fc',
'info_dict': {
'id': 'cac903b3-fcf4-3c14-b632-643ab541712f',
'ext': 'mp4',
'title': '敢問市長/黃秀霜批賴清德「非常高傲」',
'description': '直言台南沒捷運 交通居五都之末',
'duration': 396,
}
},
{
'url': 'https://uk.screen.yahoo.com/editor-picks/cute-raccoon-freed-drain-using-091756545.html',
'md5': '0b51660361f0e27c9789e7037ef76f4b',
'info_dict': {
'id': 'b3affa53-2e14-3590-852b-0e0db6cd1a58',
'ext': 'mp4',
'title': 'Cute Raccoon Freed From Drain\u00a0Using Angle Grinder',
'description': 'md5:f66c890e1490f4910a9953c941dee944',
'duration': 97,
}
},
{
'url': 'https://ca.sports.yahoo.com/video/program-makes-hockey-more-affordable-013127711.html',
'md5': '57e06440778b1828a6079d2f744212c4',
'info_dict': {
'id': 'c9fa2a36-0d4d-3937-b8f6-cc0fb1881e73',
'ext': 'mp4',
'title': 'Program that makes hockey more affordable not offered in Manitoba',
'description': 'md5:c54a609f4c078d92b74ffb9bf1f496f4',
'duration': 121,
}
}, {
'url': 'https://ca.finance.yahoo.com/news/hackers-sony-more-trouble-we
|
ll-154609075.html',
'md5': '226a895aae7e21b0129e2a2006fe9690',
'info_dict': {
'id': 'e624c4bc-3389-34de-9d
|
fc-025f74943409',
'ext': 'mp4',
'title': '\'The Interview\' TV Spot: War',
'description': 'The Interview',
'duration': 30,
}
}, {
'url': 'http://news.yahoo.com/video/china-moses-crazy-blues-104538833.html',
'md5': '88e209b417f173d86186bef6e4d1f160',
'info_dict': {
'id': 'f885cf7f-43d4-3450-9fac-46ac30ece521',
'ext': 'mp4',
'title': 'China Moses Is Crazy About the Blues',
'description': 'md5:9900ab8cd5808175c7b3fe55b979bed0',
'duration': 128,
}
}, {
'url': 'https://in.lifestyle.yahoo.com/video/connect-dots-dark-side-virgo-090247395.html',
'md5': 'd9a083ccf1379127bf25699d67e4791b',
'info_dict': {
'id': '52aeeaa3-b3d1-30d8-9ef8-5d0cf05efb7c',
'ext': 'mp4',
'title': 'Connect the Dots: Dark Side of Virgo',
'description': 'md5:1428185051cfd1949807ad4ff6d3686a',
'duration': 201,
}
}, {
'url': 'https://www.yahoo.com/movies/v/true-story-trailer-173000497.html',
'md5': '989396ae73d20c6f057746fb226aa215',
'info_dict': {
'id': '071c4013-ce30-3a93-a5b2-e0413cd4a9d1',
'ext': 'mp4',
'title': '\'True Story\' Trailer',
'description': 'True Story',
'duration': 150,
},
}, {
'url': 'https://gma.yahoo.com/pizza-delivery-man-surprised-huge-tip-college-kids-195200785.html',
'only_matching': True,
}, {
'note': 'NBC Sports embeds',
'url': 'http://sports.yahoo.com/blogs/ncaab-the-dagger/tyler-kalinoski-s-buzzer-beater-caps-davidson-s-comeback-win-185609842.html?guid=nbc_cbk_davidsonbuzzerbeater_150313',
'info_dict': {
'id': '9CsDKds0kvHI',
'ext': 'flv',
'description': 'md5:df390f70a9ba7c95ff1daace988f0d8d',
'title': 'Tyler Kalinoski hits buzzer-beater to lift Davidson',
}
}, {
'url': 'https://tw.news.yahoo.com/-100120367.html',
'only_matching': True,
}, {
# Query result is embedded in webpage, but explicit request to video API fails with geo restriction
'url': 'https://screen.yahoo.com/community/communitary-community-episode-1-ladders-154501237.html',
'md5': '4fbafb9c9b6f07aa8f870629f6671b35',
'info_dict': {
'id': '1f32853c-a271-3eef-8cb6-f6d6872cb504',
'ext': 'mp4',
'title': 'Communitary - Community Episode 1: Ladders',
'description': 'md5:8fc39608213295748e1e289807838c97',
'duration': 1646,
},
}
]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
display_id = mobj.group('display_id') or self._match_id(url)
page_id = mobj.group('id')
url = mobj.group('url')
host = mobj.group('host')
webpage = self._download_webpage(url, display_id)
# Look for iframed media first
iframe_m = re.search(r'<iframe[^>]+src="(/video/.+?-\d+\.html\?format=embed.*?)"', webpage)
if iframe_m:
iframepage = self._download_webpage(
host + iframe_m.group(1), display_id, 'Downloading iframe webpage')
items_json = self._search_regex(
r'mediaItems: (\[.+?\])$', iframepage, 'items', flags=re.MULTILINE, default=None)
if items_json:
items = json.loads(items_json)
video_id = items[0]['id']
return self._get_info(video_id, display_id, webpage)
# Look for NBCSports iframes
nbc_sports_url = NBCSportsVPlayerIE._extract_url(webpage)
if nbc_sports_url:
return self.url_result(nbc_sports_url, 'NBCSportsVPlayer')
# Query result is often embedded in webpage as JSON. Sometimes explicit requests
# to video API results in a failure with geo restriction reason therefore using
# embedded query result when present sounds reasonable.
config_json = self._search_regex(
r'window\.Af\.bootstrap\[[^\]]+\]\s*=\s*({.*?"applet_type"\s*:\s*"td-applet-videoplayer".*?});(?:</script>|$)',
|
junbochen/pylearn2
|
pylearn2/testing/datasets.py
|
Python
|
bsd-3-clause
| 2,822
| 0
|
""" Simple datasets to be used for unit tests. """
__authors__ = "Ian Goodfellow"
__copyright__ = "Copyright 2010-2012, Universite de Montreal"
__credits__ = ["Ian Goodfellow"]
__license__ = "3-clause BSD"
__maintainer__ = "LISA Lab"
__email__ = "pylearn-dev@googlegroups"
import numpy as np
from theano.compat.six.moves import xrange
from pylearn2.datasets.dense_design_matrix import DenseDesignMatrix
class ArangeDataset(DenseDesignMatrix):
"""
A dataset where example i is just the number i. Makes it easy to track
which sets of examples are visited.
Parameters
----------
num_examples : WRITEME
To see the other parameters, look at the DenseDesignMatrix class
documentation
"""
def __init__(self, num_examples, *args, **kwargs):
X = np.zeros((num_examples, 1))
X[:, 0] = np.arange(num_examples)
super(ArangeDataset, self).__init__(X, *args, **kwargs)
def random_dense_design_matrix(rng, num_examples, dim, num_classes):
"""
Creates a random dense design matrix that has class labels.
Parameters
----------
rng : numpy.random.RandomState
The random number generator used to generate the dataset.
num_examples : int
The number of examples to create.
dim : int
The number of features in each example.
num_classes : int
The number of classes to assign the examples to.
0 indicates that no class labels will be generated.
"""
X = rng.randn(num_examples, dim)
if num_classes:
Y = rng.randint(0, num_classes, (num_examples, 1))
y_labels = num_classes
else:
Y = None
y_labels = None
return DenseDesignMatrix(X=X, y=Y, y_labels=y_labels)
def random_one_hot_dense_design_matrix(rng, num_examples, dim, num_classes):
X = rng.randn(num_examples, dim)
idx = rng.randint(0, num_classes, (num_examples, ))
Y = np.zeros((num_examples, num_classes))
for i in xrange(num_examples):
Y[i, idx[i]] = 1
return DenseDesignMatrix(X=X, y=Y)
def random_one_hot_topological_dense_design_matrix(rng,
num_examples,
shape,
|
channels,
axes,
num_classes):
|
dims = {'b': num_examples,
'c': channels}
for i, dim in enumerate(shape):
dims[i] = dim
shape = [dims[axis] for axis in axes]
X = rng.randn(*shape)
idx = rng.randint(0, num_classes, (num_examples,))
Y = np.zeros((num_examples, num_classes))
for i in xrange(num_examples):
Y[i, idx[i]] = 1
return DenseDesignMatrix(topo_view=X, axes=axes, y=Y)
|
lemenkov/sippy
|
sippy/SipReplaces.py
|
Python
|
gpl-2.0
| 2,867
| 0.011161
|
# Copyright (c) 2005 Maxim Sobolev. All rights reserved.
# Copyright (c) 2006-2007 Sippy Software, Inc. All rights reserved.
#
# This file is part of SIPPY, a free RFC3261 SIP stack and B2BUA.
#
# SIPPY is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# For a license to use the SIPPY software under conditions
# other than those described here, or to purchase support for this
# software, please contact Sippy Software, Inc. by e-mail at the
# following addresses: sales@sippysoft.com.
#
# SIPPY is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
from SipGenericHF import SipGenericHF
class SipReplaces(SipGenericHF):
hf_names = ('replaces',)
call_id = None
from_tag = None
to_tag = None
early_only = False
params = None
def __init__(self, body = None, call_id = None, from_tag = None, to_tag = None, \
early_only = False, p
|
arams = None):
SipGenericHF.__init__(self, body)
if body != None:
return
self.parsed = True
self.params = []
self.call_id = call_id
self.from_tag = from_tag
self.to_tag = to_tag
self.early_only = early_only
if params != None:
self.params = params[:]
def parse(self):
self.parsed = True
self.
|
params = []
params = self.body.split(';')
self.call_id = params.pop(0)
for param in params:
if param.startswith('from-tag='):
self.from_tag = param[len('from-tag='):]
elif param.startswith('to-tag='):
self.to_tag = param[len('to-tag='):]
elif param == 'early-only':
self.early_only = True
else:
self.params.append(param)
def __str__(self):
if not self.parsed:
return self.body
res = '%s;from-tag=%s;to-tag=%s' % (self.call_id, self.from_tag, self.to_tag)
if self.early_only:
res += ';early-only'
for param in self.params:
res += ';' + param
return res
def getCopy(self):
if not self.parsed:
return SipReplaces(self.body)
return SipReplaces(call_id = self.call_id, from_tag = self.from_tag, to_tag = self.to_tag, \
early_only = self.early_only, params = self.params)
|
nathanielvarona/airflow
|
airflow/contrib/sensors/sagemaker_tuning_sensor.py
|
Python
|
apache-2.0
| 1,201
| 0.001665
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or impli
|
ed. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module is deprecated. Please use :mod:`airflow.providers.amazon.aws.sensors.sagemaker_tuning`."""
import warnings
# pylint: disable=unused-import
from airflow.providers.amazon.aws.sensors.sagemaker_tuning import SageMakerTuningSensor # noqa
warnings.warn(
"This module is deprecated. Please use `airflow.pro
|
viders.amazon.aws.sensors.sagemaker_tuning`.",
DeprecationWarning,
stacklevel=2,
)
|
razor-x/scipy-data_fitting
|
examples/wave.py
|
Python
|
mit
| 1,252
| 0.002417
|
import os
import sympy
from example_helper import save_example_fit
from scipy_data_fitting import Data, Model, Fit
#
# Example of a fit to a sine wave with error bars.
#
name = 'wave'
# Load data from a csv file.
data = Data(name)
data.path = os.path.join('examples','data', 'wave.csv')
data.genfromtxt_args['skip_header'] = 1
data.error = (0.1, 0.05)
# Create a wave model.
model = Model(name)
model.add_symbols('t', 'A', 'ω', 'δ')
A, t, ω, δ = model.get_symbols('A', 't', 'ω', 'δ')
model.expressions['wave'] = A * sympy.functions.sin(ω * t + δ)
model.expressions['frequency'] = ω / (2 * sympy.pi)
# Create the fit using the data and model.
fit = Fit(name, data=data, model=model)
fit
|
.expression = 'wave'
fit.independent = {'symbol': 't', 'name': 'Time', 'units': 's'}
fit.dependent = {'name': 'Voltage', 'prefix': 'kilo', 'units': 'kV'}
fit.parameters = [
{'symbol': 'A', 'value': 0.3, 'prefix': 'kilo', 'units': 'kV'},
{'symbol': 'ω', 'guess': 1, '
|
units': 'Hz'},
{'symbol': 'δ', 'guess': 1},
]
fit.quantities = [
{'expression': 'frequency', 'name': 'Frequency', 'units': 'Hz'},
{'expression': 1 / model.expressions['frequency'] , 'name': 'Period', 'units': 's'},
]
# Save the fit to disk.
save_example_fit(fit)
|
guillemborrell/gtable
|
tests/test_records.py
|
Python
|
bsd-3-clause
| 1,282
| 0.00078
|
from gtable import Table
import numpy as np
def test_records():
t = Table({'a': [1, 2, 3], 'b': np.array([4, 5, 6])})
t1 =
|
Table({'a': [1, 2, 3], 'd': np.array([4, 5, 6])})
t.stack(t1)
records = [r for r in t.records()]
assert records == [
{'a': 1, 'b': 4},
{'a': 2, 'b': 5},
{'a': 3, 'b': 6},
{'a': 1, 'd': 4},
{'a': 2, 'd': 5},
{'a': 3, 'd': 6}]
records = [r for r in t.records(fill=True)]
assert records == [
{'a': 1, 'b': 4, 'd': np.nan},
{'a': 2, 'b': 5, '
|
d': np.nan},
{'a': 3, 'b': 6, 'd': np.nan},
{'a': 1, 'b': np.nan, 'd': 4},
{'a': 2, 'b': np.nan, 'd': 5},
{'a': 3, 'b': np.nan, 'd': 6}]
def test_first_record():
t = Table({'a': [1, 2, 3], 'b': np.array([4, 5, 6])})
t1 = Table({'a': [1, 2, 3], 'd': np.array([4, 5, 6])})
t.stack(t1)
assert t.first_record() == {'a': 1, 'b': 4}
assert t.first_record(fill=True) == {'a': 1, 'b': 4, 'd': np.nan}
def test_last_record():
t = Table({'a': [1, 2, 3], 'b': np.array([4, 5, 6])})
t1 = Table({'a': [1, 2, 3], 'd': np.array([4, 5, 6])})
t.stack(t1)
assert t.last_record() == {'a': 1, 'd': 4}
assert t.last_record(fill=True) == {'a': 1, 'd': 4, 'b': np.nan}
|
unnikrishnankgs/va
|
venv/lib/python3.5/site-packages/tensorflow/contrib/distributions/python/ops/bijectors/affine_linear_operator.py
|
Python
|
bsd-2-clause
| 1,182
| 0.000846
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#
|
http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""AffineLinear
|
Operator bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.contrib.distributions.python.ops.bijectors.affine_linear_operator_impl import *
# pylint: enable=wildcard-import
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = ["AffineLinearOperator"]
remove_undocumented(__name__, _allowed_symbols)
|
botify-labs/moto
|
moto/iam/aws_managed_policies.py
|
Python
|
apache-2.0
| 495,649
| 0.000176
|
# Imported via `make aws_managed_policies`
aws_managed_policies_data = """
{
"AWSAccountActivityAccess": {
"Arn": "arn:aws:iam::aws:policy/AWSAccountActivityAccess",
"AttachmentCount": 0,
"CreateDate": "2015-02-06T18:41:18+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"aws-portal:ViewBilling"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAJQRYCWMFX5J3E333K",
"PolicyName": "AWSAccountActivityAccess",
"UpdateDate": "2015-02-06T18:41:18+00:00",
"VersionId": "v1"
},
"AWSAccountUsageReportAccess": {
"Arn": "arn:aws:iam::aws:policy/AWSAccountUsageReportAccess",
"AttachmentCount": 0,
"CreateDate": "2015-02-06T18:41:19+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"aws-portal:ViewUsage"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAJLIB4VSBVO47ZSBB6",
"PolicyName": "AWSAccountUsageReportAccess",
"UpdateDate": "2015-02-06T18:41:19+00:00",
"VersionId": "v1"
},
"AWSAgentlessDiscoveryService": {
"Arn": "arn:aws:iam::aws:policy/AWSAgentlessDiscoveryService",
"AttachmentCount": 0,
"CreateDate": "2016-08-02T01:35:11+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"awsconnector:RegisterConnector",
"awsconnector:GetConnectorHealth"
],
"Effect": "Allow",
"Resource": "*"
},
{
"Action": "iam:GetUser",
"Effect": "Allow",
"Resource": "*"
},
{
"Action": [
"s3:GetObject",
"s3:ListBucket"
],
"Effect": "Allow",
"Resource": [
"arn:aws:s3:::connector-platform-upgrade-info/*",
"arn:aws:s3:::connector-platform-upgrade-info",
"arn:aws:s3:::connector-platform-upgrade-bundles/*",
"arn:aws:s3:::connector-platform-upgrade-bundles",
"arn:aws:s3:::connector-platform-release-notes/*",
"arn:aws:s3:::connector-platform-release-notes",
"arn:aws:s3:::prod.agentless.discovery.connector.upgrade/*",
"arn:aws:s3:::prod.agentless.discovery.connector.upgrade"
]
},
{
"Action": [
"s3:PutObject",
"s3:PutObjectAcl"
],
"Effect": "Allow",
"Resource": [
"arn:aws:s3:::import-to-ec2-connector-debug-logs/*"
]
},
{
|
"Action": [
"SNS:Publish"
],
"Effect": "Allow",
"Resource": "arn:aws:sns:*:*:metrics-sns-topic-f
|
or-*"
},
{
"Action": [
"Discovery:*"
],
"Effect": "Allow",
"Resource": "*",
"Sid": "Discovery"
},
{
"Action": [
"arsenal:RegisterOnPremisesAgent"
],
"Effect": "Allow",
"Resource": "*",
"Sid": "arsenal"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAIA3DIL7BYQ35ISM4K",
"PolicyName": "AWSAgentlessDiscoveryService",
"UpdateDate": "2016-08-02T01:35:11+00:00",
"VersionId": "v1"
},
"AWSApplicationDiscoveryAgentAccess": {
"Arn": "arn:aws:iam::aws:policy/AWSApplicationDiscoveryAgentAccess",
"AttachmentCount": 0,
"CreateDate": "2016-05-11T21:38:47+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"arsenal:RegisterOnPremisesAgent"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAICZIOVAGC6JPF3WHC",
"PolicyName": "AWSApplicationDiscoveryAgentAccess",
"UpdateDate": "2016-05-11T21:38:47+00:00",
"VersionId": "v1"
},
"AWSApplicationDiscoveryServiceFullAccess": {
"Arn": "arn:aws:iam::aws:policy/AWSApplicationDiscoveryServiceFullAccess",
"AttachmentCount": 0,
"CreateDate": "2016-05-11T21:30:50+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": "discovery:*",
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAJBNJEA6ZXM2SBOPDU",
"PolicyName": "AWSApplicationDiscoveryServiceFullAccess",
"UpdateDate": "2016-05-11T21:30:50+00:00",
"VersionId": "v1"
},
"AWSBatchFullAccess": {
"Arn": "arn:aws:iam::aws:policy/AWSBatchFullAccess",
"AttachmentCount": 0,
"CreateDate": "2016-12-13T00:38:59+00:00",
"DefaultVersionId": "v2",
"Document": {
"Statement": [
{
"Action": [
"batch:*",
"cloudwatch:GetMetricStatistics",
"ec2:DescribeSubnets",
"ec2:DescribeSecurityGroups",
"ec2:DescribeKeyPairs",
"ecs:DescribeClusters",
"ecs:Describe*",
"ecs:List*",
"logs:Describe*",
"logs:Get*",
"logs:TestMetricFilter",
"logs:FilterLogEvents",
"iam:ListInstanceProfiles",
"iam:ListRoles"
],
"Effect": "Allow",
"Resource": "*"
},
{
"Action": [
"iam:PassRole"
],
"Effect": "Allow",
"Resource": [
"arn:aws:iam::*:role/AWSBatchServiceRole",
"arn:aws:iam::*:role/ecsInstanceRole",
"arn:aws:iam::*:role/iaws-ec2-spot-fleet-role",
"arn:aws:iam::*:role/aws-ec2-spot-fleet-role",
"arn:aws:iam::*:role/AWSBatchJobRole*"
]
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAJ7K2KIWB3HZVK3CUO",
"PolicyNa
|
dr0pz0ne/sibble
|
lib/ansible/parsing/splitter.py
|
Python
|
gpl-3.0
| 10,657
| 0.002721
|
# (c) 2014 James Cammarata, <jcammarata@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Gen
|
eral Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_
|
import, division, print_function)
__metaclass__ = type
import re
import codecs
from ansible.errors import AnsibleParserError
from ansible.parsing.quoting import unquote
# Decode escapes adapted from rspeer's answer here:
# http://stackoverflow.com/questions/4020539/process-escape-sequences-in-a-string-in-python
_HEXCHAR = '[a-fA-F0-9]'
_ESCAPE_SEQUENCE_RE = re.compile(r'''
( \\U{0} # 8-digit hex escapes
| \\u{1} # 4-digit hex escapes
| \\x{2} # 2-digit hex escapes
| \\N\{{[^}}]+\}} # Unicode characters by name
| \\[\\'"abfnrtv] # Single-character escapes
)'''.format(_HEXCHAR*8, _HEXCHAR*4, _HEXCHAR*2), re.UNICODE | re.VERBOSE)
def _decode_escapes(s):
def decode_match(match):
return codecs.decode(match.group(0), 'unicode-escape')
return _ESCAPE_SEQUENCE_RE.sub(decode_match, s)
def parse_kv(args, check_raw=False):
'''
Convert a string of key/value items to a dict. If any free-form params
are found and the check_raw option is set to True, they will be added
to a new parameter called '_raw_params'. If check_raw is not enabled,
they will simply be ignored.
'''
### FIXME: args should already be a unicode string
from ansible.utils.unicode import to_unicode
args = to_unicode(args, nonstring='passthru')
options = {}
if args is not None:
try:
vargs = split_args(args)
except ValueError as ve:
if 'no closing quotation' in str(ve).lower():
raise AnsibleParsingError("error parsing argument string, try quoting the entire line.")
else:
raise
raw_params = []
for orig_x in vargs:
x = _decode_escapes(orig_x)
if "=" in x:
pos = 0
try:
while True:
pos = x.index('=', pos + 1)
if pos > 0 and x[pos - 1] != '\\':
break
except ValueError:
# ran out of string, but we must have some escaped equals,
# so replace those and append this to the list of raw params
raw_params.append(x.replace('\\=', '='))
continue
k = x[:pos]
v = x[pos + 1:]
# FIXME: make the retrieval of this list of shell/command
# options a function, so the list is centralized
if check_raw and k not in ('creates', 'removes', 'chdir', 'executable', 'warn'):
raw_params.append(orig_x)
else:
options[k.strip()] = unquote(v.strip())
else:
raw_params.append(orig_x)
# recombine the free-form params, if any were found, and assign
# them to a special option for use later by the shell/command module
if len(raw_params) > 0:
options[u'_raw_params'] = ' '.join(raw_params)
return options
def _get_quote_state(token, quote_char):
'''
the goal of this block is to determine if the quoted string
is unterminated in which case it needs to be put back together
'''
# the char before the current one, used to see if
# the current character is escaped
prev_char = None
for idx, cur_char in enumerate(token):
if idx > 0:
prev_char = token[idx-1]
if cur_char in '"\'' and prev_char != '\\':
if quote_char:
if cur_char == quote_char:
quote_char = None
else:
quote_char = cur_char
return quote_char
def _count_jinja2_blocks(token, cur_depth, open_token, close_token):
'''
this function counts the number of opening/closing blocks for a
given opening/closing type and adjusts the current depth for that
block based on the difference
'''
num_open = token.count(open_token)
num_close = token.count(close_token)
if num_open != num_close:
cur_depth += (num_open - num_close)
if cur_depth < 0:
cur_depth = 0
return cur_depth
def split_args(args):
'''
Splits args on whitespace, but intelligently reassembles
those that may have been split over a jinja2 block or quotes.
When used in a remote module, we won't ever have to be concerned about
jinja2 blocks, however this function is/will be used in the
core portions as well before the args are templated.
example input: a=b c="foo bar"
example output: ['a=b', 'c="foo bar"']
Basically this is a variation shlex that has some more intelligence for
how Ansible needs to use it.
'''
# the list of params parsed out of the arg string
# this is going to be the result value when we are done
params = []
# Initial split on white space
args = args.strip()
items = args.strip().split('\n')
# iterate over the tokens, and reassemble any that may have been
# split on a space inside a jinja2 block.
# ex if tokens are "{{", "foo", "}}" these go together
# These variables are used
# to keep track of the state of the parsing, since blocks and quotes
# may be nested within each other.
quote_char = None
inside_quotes = False
print_depth = 0 # used to count nested jinja2 {{ }} blocks
block_depth = 0 # used to count nested jinja2 {% %} blocks
comment_depth = 0 # used to count nested jinja2 {# #} blocks
# now we loop over each split chunk, coalescing tokens if the white space
# split occurred within quotes or a jinja2 block of some kind
for itemidx,item in enumerate(items):
# we split on spaces and newlines separately, so that we
# can tell which character we split on for reassembly
# inside quotation characters
tokens = item.strip().split(' ')
line_continuation = False
for idx,token in enumerate(tokens):
# if we hit a line continuation character, but
# we're not inside quotes, ignore it and continue
# on to the next token while setting a flag
if token == '\\' and not inside_quotes:
line_continuation = True
continue
# store the previous quoting state for checking later
was_inside_quotes = inside_quotes
quote_char = _get_quote_state(token, quote_char)
inside_quotes = quote_char is not None
# multiple conditions may append a token to the list of params,
# so we keep track with this flag to make sure it only happens once
# append means add to the end of the list, don't append means concatenate
# it to the end of the last token
appended = False
# if we're inside quotes now, but weren't before, append the token
# to the end of the list, since we'll tack on more to it later
# otherwise, if we're inside any jinja2 block, inside quotes, or we were
# inside quotes (but aren't now) concat this token to the last param
if inside_quotes and not was_inside_quotes:
params.append(token)
appended = True
elif print_depth or block_depth or comment_depth or inside_quotes or was_inside_quotes:
if idx == 0 and was_inside
|
wohllab/milkyway_proteomics
|
galaxy_milkyway_files/tools/wohl-proteomics/MSGFcrux/percolator_output_modifier_fractionated.py
|
Python
|
mit
| 10,590
| 0.011143
|
#!/usr/bin/python
import sys, copy, tarfile
""" - Splits Percolator output into decoy and target files.
- Extracts unique PSM/peptides/proteins out of a Percolator output file.
- Merges Percolator output files
Usage: python percolator_output_modifier.py command psm/peptides/proteins [score] infile outfile [outfile2]
"""
try:
from lxml import etree
except Exception:
sys.stderr.write('Failed to import lxml module.')
def readPercout(fname):
doc = None
try:
doc = etree.parse(fname)
except Exception:
sys.stderr.write('Could not parse XML provided in %s or error reading file. \n' % (fname))
return doc
def splitTargetDecoy(doc, args, ns):
""" Splits XML into target/decoy/notspecified elements.
then calls refill function to create a new XML ElementTree with results.
Usage: splitTargetDecoy('test.xml', ['psms', 'peptides', 'proteins'])
"""
for arg in args:
assert arg in ['psms', 'peptides', 'proteins'], Exception('Filtering argument must be one or more of: psms, peptides, proteins.')
output_elts = {
'psms' : doc.xpath('//xmlns:psms', namespaces=ns),
'peptides' : doc.xpath('//xmlns:peptides', namespaces=ns),
'proteins' : doc.xpath('//xmlns:proteins', namespaces=ns)
}
def do_split(name, tree, ns):
""" Does the actual target/decoy splitting. Returns lists of classified elements."""
# check if not passed an empty tree
if len(tree) == 0:
sys.stdout.write('Cannot output %s since it is not in the provided input XML. Continuing.\n' % name)
return [etree.Element('none')]*3
# check if decoy attribute is in tree at all
if tree[0].xpath('//@xmlns:decoy', namespaces=ns) == []:
sys.stderr.write('Percolator output has no specified target/decoy spectra, unable to split.\n')
return [etree.Element('none')]*3
# filter
ttree = []
dtree = []
discarded = []
# ttree = etree.Element(name, ns)
# dtree = etree.Element(name, ns)
# discarded = etree.Element(name, ns)
searchElt = name[:-1]
for element in tree[0].xpath('xmlns:%s' % searchElt, namespaces=ns):
if element.xpath('@xmlns:decoy', namespaces=ns) == ['true']:
dtree.append(element)
elif element.xpath('@xmlns:decoy', namespaces=ns) == ['false']:
ttree.append(element)
else:
discarded.append(element)
return ttree, dtree, discarded
# call splitfunction, then check output
target,decoy,discarded = {},{},{}
for arg in args:
target[arg], decoy[arg], discarded[arg] = do_split(arg, output_elts[arg], ns)
for arg in args:
if [target[arg], decoy[arg], discarded[arg]] != 3*['none']:
break
sys.stderr.write('No data matching %s has been found in Percolator output \
file.\n' % [x for x in args])
# clean parsed elementtree and make new ones
target = refillTree(doc, output_elts.keys(), target, ns)
decoy = refillTree(doc, output_elts.keys(), decoy, ns)
discarded = refillTree(doc, output_elts.keys(), discarded, ns)
# sys.stdout.write('%s\n' % etree.tostring(target))
return target, decoy #, discarded?
def refillTree(doc, oldelements, newelements, ns):
"""Takes an ElementTree, takes out specified oldelements (by tag). Replaces
with list of newelements. Returns a new ElementTree.
"""
# remove specified old elements
newdoc = copy.deepcopy(doc)
root = newdoc.getroot()
elements_toremove = []
for el in oldelements:
removes = root.xpath('xmlns:%s' % el, namespaces=ns) # element 'psms' or 'peptides'
for remove in removes:
children = remove.getchildren()
for el in children:
remove.remove(el)
# put in new ones
for node in root.getchildren():
try:
for child in newelements[node.tag[node.tag.index('}')+1:]]:
node.append(child)
except: # PSMs, peptides should be in this order and not in newelements-dict's arbitrary order.
pass
return etree.ElementTree(root)
def filterUniques(tar, to_filter, score, ns):
""" Filters unique psms/peptides/proteins from (multiple) Percolator output XML files.
Takes a tarred set of XML files, a filtering query (e.g. psms), a score to
filter on and a namespace.
Outputs an ElementTree.
"""
for tf in to_filter:
assert tf in ['psms', 'peptides', 'proteins'], Exception('filterUnique function needs a specified to_filter list of psms, peptides, proteins.')
assert score in ['q','pep','p'], Exception('filterUnique function needs a specified score to filter on of q, pep or p.')
try:
with tarfile.open(tar, 'r') as f:
members = f.getmembers()
f.extractall()
except:
sys.stderr.write('Could not extract Percolator files from dataset: %s \n' % tar)
return 1
docs = []
for fn in members:
docs.append(etree.parse(fn.name))
# lookup dicts
scores = {'q':'q_value', 'pep':'pep', 'p':'p_value'}
filt_el_dict = {'psms':'xmlns:peptide_seq', 'peptides':'@xmlns:peptide_id' }
# result dict
filtered = {'psms':{}, 'peptides':{}, 'proteins':{} }
for doc in docs:
for filt_el in to_filter:
feattree = doc.xpath('//xmlns:%s' % filt_el, namespaces=ns)
if feattree == []:
sys.stdout.write('%s not found in (one of the) Percolator output documents. Continuing...\n' % filt_el)
|
continue
for feat in feattree[0]:
# It's actually faster to loop through the feat's children,
# but this is 2-line code and still readable.
featscore = float(feat.xpath('xmlns:%s' % scores[score], namespaces=ns)[0].text)
seq = feat.xpath('%s' % filt_el_dict[filt_el], namespaces=ns)
|
try: # psm seqs are parsed here
seq = seq[0].attrib['seq']
except Exception: ## caught when parsing peptide seqs (different format)
seq = str(seq[0])
if seq not in filtered[filt_el]:
filtered[filt_el][seq] = feat
elif featscore < filtered[filt_el][seq]:
#FIXME now it only works for LOWER than scores (eg q-vals, pep, but not for scores that are better when higher)
filtered[filt_el][seq] = feat
# make trees from filtered dicts
for filt_el in filtered:
outlist = []
for feat in filtered[filt_el].values():
outlist.append(feat)
filtered[filt_el] = outlist
# node = etree.Element(filt_el)
# node.extend(filtered[filt_el].values())
# filtered[filt_el] = node
outdoc = refillTree(docs[0], ['psms', 'peptides', 'proteins'], filtered, ns)
return outdoc
def mergeXML(datasets, ns):
tomerge = ['psms', 'peptides', 'proteins']
tomerge_el = ['psms', 'peptides', 'proteins']
outdoc = readPercout(datasets[0])
root = outdoc.getroot()
for el in tomerge:
tomerge_el.extend(root.xpath('//xmlns:%s' % el, namespaces=ns))
for fn in datasets[1:]:
doc = readPercout(fn)
for tm, tme in zip(tomerge, tomerge_el):
print tm,tme,"both"
for el in doc.xpath('//xmlns:%s' % tm, namespaces=ns):
print el,"el"
tme.append(el)
print tme,"tme"
print tomerge_el,"to merge el"
return outdoc
def untar(tar):
try:
with tarfile.open(tar, 'r') as f:
members = f.getmembers()
f.extractall()
return [x.name for x in members]
except:
sys.stderr.write('Could not extract Percolator files from dataset: %s \n' % tar)
return 1
def writeXML(*args):
"""Takes a filename and _ElementTree arguments in tuples ((fn, tree), (fn2,tree2),...)
|
onecloud/neutron
|
neutron/plugins/cisco/common/cisco_constants.py
|
Python
|
apache-2.0
| 2,838
| 0.000352
|
# Copyright 2011 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache
|
License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF
|
ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Sumit Naiksatam, Cisco Systems, Inc.
# Attachment attributes
INSTANCE_ID = 'instance_id'
TENANT_ID = 'tenant_id'
TENANT_NAME = 'tenant_name'
HOST_NAME = 'host_name'
# Network attributes
NET_ID = 'id'
NET_NAME = 'name'
NET_VLAN_ID = 'vlan_id'
NET_VLAN_NAME = 'vlan_name'
NET_PORTS = 'ports'
CREDENTIAL_ID = 'credential_id'
CREDENTIAL_NAME = 'credential_name'
CREDENTIAL_USERNAME = 'user_name'
CREDENTIAL_PASSWORD = 'password'
CREDENTIAL_TYPE = 'type'
MASKED_PASSWORD = '********'
USERNAME = 'username'
PASSWORD = 'password'
LOGGER_COMPONENT_NAME = "cisco_plugin"
NEXUS_PLUGIN = 'nexus_plugin'
VSWITCH_PLUGIN = 'vswitch_plugin'
DEVICE_IP = 'device_ip'
NETWORK_ADMIN = 'network_admin'
NETWORK = 'network'
PORT = 'port'
BASE_PLUGIN_REF = 'base_plugin_ref'
CONTEXT = 'context'
SUBNET = 'subnet'
#### N1Kv CONSTANTS
# Special vlan_id value in n1kv_vlan_allocations table indicating flat network
FLAT_VLAN_ID = -1
# Topic for tunnel notifications between the plugin and agent
TUNNEL = 'tunnel'
# Maximum VXLAN range configurable for one network profile.
MAX_VXLAN_RANGE = 1000000
# Values for network_type
NETWORK_TYPE_FLAT = 'flat'
NETWORK_TYPE_VLAN = 'vlan'
NETWORK_TYPE_VXLAN = 'vxlan'
NETWORK_TYPE_LOCAL = 'local'
NETWORK_TYPE_NONE = 'none'
NETWORK_TYPE_TRUNK = 'trunk'
NETWORK_TYPE_MULTI_SEGMENT = 'multi-segment'
# Values for network sub_type
NETWORK_TYPE_OVERLAY = 'overlay'
NETWORK_SUBTYPE_NATIVE_VXLAN = 'native_vxlan'
NETWORK_SUBTYPE_TRUNK_VLAN = NETWORK_TYPE_VLAN
NETWORK_SUBTYPE_TRUNK_VXLAN = NETWORK_TYPE_OVERLAY
# Prefix for VM Network name
VM_NETWORK_NAME_PREFIX = 'vmn_'
DEFAULT_HTTP_TIMEOUT = 15
SET = 'set'
INSTANCE = 'instance'
PROPERTIES = 'properties'
NAME = 'name'
ID = 'id'
POLICY = 'policy'
TENANT_ID_NOT_SET = 'TENANT_ID_NOT_SET'
ENCAPSULATIONS = 'encapsulations'
STATE = 'state'
ONLINE = 'online'
MAPPINGS = 'mappings'
MAPPING = 'mapping'
SEGMENTS = 'segments'
SEGMENT = 'segment'
BRIDGE_DOMAIN_SUFFIX = '_bd'
LOGICAL_NETWORK_SUFFIX = '_log_net'
ENCAPSULATION_PROFILE_SUFFIX = '_profile'
UUID_LENGTH = 36
# Nexus vlan and vxlan segment range
NEXUS_VLAN_RESERVED_MIN = 3968
NEXUS_VLAN_RESERVED_MAX = 4047
NEXUS_VXLAN_MIN = 4096
NEXUS_VXLAN_MAX = 16000000
|
hasgeek/funnel
|
migrations/versions/e679554261b2_main_label_index.py
|
Python
|
agpl-3.0
| 452
| 0.004425
|
"""Main label index.
Revision ID: e679554261b2
Revises: e2be4ab896d3
Create Date: 2019-05-09 18
|
:55:24.472216
"""
# revision identifiers, used by Alembic.
revision = 'e679554261b2'
down_revision = 'e2be4ab896d3'
from alembic import op
def upgrade():
op.create_index(
op.f('ix_label_main_label_id'), 'label', ['main_label_id'], unique=False
)
def downgrade():
op.drop_index(op.f('ix_label_main_label_id'), table_name=
|
'label')
|
luizdepra/sketch_n_hit
|
app/assets.py
|
Python
|
mit
| 984
| 0.003049
|
from flask.ext.assets import Bundle
from . import wa
js_libs = Bundle('js/libs/jquery.min.js',
'js/libs/bootstrap.min.js',
'js/libs/lodash.min.js',
#filters='jsmin',
output='js/libs.js')
|
js_board = Bundle('js/libs/drawingboard.min.js',
#filters='jsmin',
output='js/board.js')
js_main = Bundle('js/main.js',
#filters='jsmin',
output='js/snh.js')
css_main = Bundle('css/bootstrap.min.css',
'css/font-awesome.min.css',
'css/main.css',
filters='cssmin',
|
output='css/snh.css')
css_board = Bundle('css/drawingboard.min.css',
filters='cssmin',
output='css/board.css')
wa.register('js_libs', js_libs)
wa.register('js_board', js_board)
wa.register('js_main', js_main)
wa.register('css_main', css_main)
wa.register('css_board', css_board)
|
fedallah/dsperf
|
pytest_2.py
|
Python
|
mit
| 1,084
| 0.057196
|
#!/usr/bin/env python3
import csv
import sqlite3
# requires python3
# requires sqlite3
#
sqldb = sqlite3.connect(':memory:')
def main:
while True:
input_location = input("Please provide the pathname of the file you wish to extract data from. Enter a blank line when you are done.")
if input_location = False:
break
else:
mi = inputfile(input_locaton)
allinputs.append(mi)
mi = False
for ifile in allinputs:
class inputfile:
def __init__(self, location):
self.location = location
if not os.path.isfile(self.location):
print "file not found"
open( self.location, newline='' ) as csvfile
|
csvread = csv.rader( csvfile, delimiter=',' quotechar='' )
class perfitem_group(inputfile):
def __init__(self)
def
class perfitem(perfitem_group):
def __init__(self)
def mkdict
class row(inputfile):
def init(self):
self =
with open( inputfile.self.location, newline='' ) as csvfile:
csvread = csv.reader( csvfile, delimiter=',', quotechar='' )
def get:
return r
|
ow
allfiles = False
while True:
# while allfiles <> "":
|
jrichte43/ProjectEuler
|
Problem-0442/solutions.py
|
Python
|
gpl-3.0
| 936
| 0.00641
|
__problem_title__ = "Eleven-free integers"
__problem_url___ = "http
|
s://projecteuler.net/problem=442"
__problem_description__ = "An integer is called if its decimal expansion does not contain any " \
"substring representing a power of 11 except 1. For example, 2404 and " \
"13431 are eleven-f
|
ree, while 911 and 4121331 are not. Let E( ) be the " \
"th positive eleven-free integer. For example, E(3) = 3, E(200) = 213 " \
"and E(500 000) = 531563. Find E(10 )."
import timeit
class Solution():
@staticmethod
def solution1():
pass
@staticmethod
def time_solutions():
setup = 'from __main__ import Solution'
print('Solution 1:', timeit.timeit('Solution.solution1()', setup=setup, number=1))
if __name__ == '__main__':
s = Solution()
print(s.solution1())
s.time_solutions()
|
r-stein/sublime-text-caret-jump
|
caret_jump.py
|
Python
|
mit
| 2,134
| 0
|
import sublime
import sublime_plugin
OPTIONS_LAST_REGEX = "jump_caret_last_regex"
class CaretJumpCommand(sublime_plugin.TextCommand):
def run(self, edit, jump=True, jump_to=None, repeat_previous_jump=False):
view = self.view
def get_next_sels(user_input):
new_sels = []
for sel in view.sel():
next_sel = view.find(user_input, sel.end(), sublime.IGNORECASE)
if next_sel.begin() != -1:
new_sels.append(next_sel)
return new_sels
def jump_last_regex():
last_reg = self.view.settings().get(OPTIONS_LAST_REGEX)
if last_reg:
select_next_regex(last_reg)
def select_next_regex(user_input):
view.erase_regions("caret_jump_preview")
if not user_input:
# jump_last_regex()
return
self.view.settings().set(OPTIONS_LAST_REGEX, user_input)
new_sels = get_next_sels(user_input)
|
if jump and new_sels:
view.sel().clear()
view.sel().add_all(new_sels)
def input_changed(user
|
_input):
new_sels = get_next_sels(user_input)
view.add_regions("caret_jump_preview",
new_sels,
"source, text",
"dot",
sublime.DRAW_OUTLINED)
def input_canceled():
view.erase_regions("caret_jump_preview")
selection = view.substr(view.sel()[0]) if view.sel() else ""
if jump_to:
select_next_regex(jump_to)
elif repeat_previous_jump:
jump_last_regex()
else:
default = selection if selection \
else self.view.settings().get(OPTIONS_LAST_REGEX, "")
view.window().show_input_panel("Seach for",
default,
select_next_regex,
input_changed,
input_canceled)
|
dmitrystu/svd_editor
|
modules/tview.py
|
Python
|
apache-2.0
| 7,250
| 0.001517
|
import wx
import svd
import my
class View(wx.Panel):
def __init__(self, parent, data=None):
wx.Panel.__init__(self, parent)
self.data = {}
self.tree = wx.TreeCtrl(self)
self.tree.AddRoot('FROM_RUSSIA_WITH_LOVE')
self.Bind(wx.EVT_SIZE, self.onResize)
self.Bind(wx.EVT_TREE_SEL_CHANGED, self.OnSelChanged)
self.Bind(wx.EVT_TREE_BEGIN_DRAG, self.onDrag)
self.Bind(wx.EVT_TREE_END_DRAG, self.onDrop)
msizer = wx.BoxSizer(wx.VERTICAL)
msizer.Add(self.tree, 1, wx.EXPAND | wx.ALL, 3)
self.SetSizerAndFit(msizer)
def OnSelChanged(self, event):
item = self.tree.GetFocusedItem()
obj = self.tree.GetPyData(item)
my.post_event(self.GetGrandParent(), my.EVT_SELECTED, obj)
def Reload(self, obj):
item = self.data.get(obj, 0)
if item:
self.tree.SetItemText(item, obj.name)
def Remove(self, obj):
item = self.data.pop(obj, 0)
if item:
if self.tree.IsSelected(item):
self.tree.SelectItem(self.tree.GetPrevVisible(item))
self.tree.Delete(item)
def Append(self, obj):
pi = self.data.get(obj.parent, 0)
ni = self.tree.AppendItem(pi, obj.name)
self.tree.SetPyData(ni, obj)
self.data[obj] = ni
if isinstance(obj, svd.peripheral):
for x in obj.registers:
self.Append(x)
def LoadDevice(self, device):
tree = self.tree
tree.Freeze()
tree.DeleteAllItems()
self.data.clear()
root = tree.AddRoot(device.name)
tree.SetPyData(root, device)
self.data[device] = root
for p in device.peripherals:
pi = tree.AppendItem(root, p.name)
tree.SetPyData(pi, p)
self.
|
data[p] = pi
for r in p.registers:
ri = tree.AppendItem(pi, r.name)
tree.SetPyData(ri, r)
self.data[r] = ri
tree.UnselectAll()
tree.Expand(root)
tree.Selec
|
tItem(root)
tree.Thaw()
def AddItem(self, obj):
pass
def DelItem(self, obj):
if obj == self.tree:
item = self.tree.GetSelection()
if item.IsOk():
data = self.tree.GetPyData(item)
if isinstance(data, svd.device):
return
if wx.OK != wx.MessageBox('%s will be deleted' % (data.name),
'Confirm item deletion',
wx.OK | wx.CANCEL | wx.ICON_QUESTION):
return
if isinstance(data, svd.register):
data.parent.delRegister(data)
if isinstance(data, svd.peripheral):
# checking if item have a references
refs = [x for x in data.parent.peripherals if x.ref == data]
if refs:
if wx.OK != wx.MessageBox('%s has a references. Delete all ?' % (data.name),
'Confirm item deletion',
wx.OK | wx.CANCEL | wx.ICON_QUESTION):
return
for x in refs:
data.parent.delPeripheral(x)
self.Remove(x)
data.parent.delPeripheral(data)
self.Remove(data)
def CloneItem(self, obj):
if obj == self.tree:
item = self.tree.GetSelection()
if item.IsOk():
data = self.tree.GetPyData(item)
if isinstance(data, svd.device):
return
if wx.OK != wx.MessageBox('%s will be cloned' % (data.name),
'Confirm item clone',
wx.OK | wx.CANCEL | wx.ICON_QUESTION):
return
if isinstance(data, svd.peripheral):
xml = data.toXML()
p = data.parent
new = svd.peripheral(p, xml)
new.name = '%s_CLONE' % (data.name)
p.addPeripheral(new)
elif isinstance(data, svd.register):
xml = data.toXML()
p = data.parent
new = svd.register(p, xml)
new.name = '%s_CLONE' % (data.name)
p.addRegister(new)
self.Append(new)
def SelectItem(self, obj):
item = self.data.get(obj, None)
if item and item.IsOk():
self.tree.SelectItem(item, True)
def onResize(self, event):
self.Layout()
def onDrag(self, event):
item = event.GetItem()
data = self.tree.GetPyData(item)
if isinstance(data, svd.peripheral) or isinstance(data, svd.register):
self.dragitem = item
event.Allow()
else:
self.dragitem = None
event.Veto()
def onDrop(self, event):
dropitem = event.GetItem()
dropdata = self.tree.GetPyData(dropitem)
dragitem = self.dragitem
dragdata = self.tree.GetPyData(dragitem)
self.dragitem = None
if isinstance(dragdata, svd.peripheral) and isinstance(dropdata, svd.peripheral):
# move peripheral under peripheral
if dragdata == dropdata:
return # trivial. nothing to do
parent = dragdata.parent
if dragdata.ref and parent.peripherals.index(dropdata) < parent.peripherals.index(dragdata.ref):
return # can't put reference before original
for x in parent.peripherals:
if x.ref == dragdata and parent.peripherals.index(dropdata) >= parent.peripherals.index(x):
return # can't put original after reference
item = self.tree.InsertItem(self.data[parent], dropitem, dragdata.name)
parent.movePeripheral(dropdata, dragdata)
self.tree.SetPyData(item, dragdata)
self.data[dragdata] = item
for x in dragdata.registers:
nr = self.tree.AppendItem(item, x._name)
self.tree.SetPyData(nr, x)
self.data[x] = nr
self.tree.Delete(dragitem)
elif isinstance(dragdata, svd.register) and isinstance(dropdata, svd.peripheral):
# move register to other peripheral
item = None
for x in reversed(dropdata.registers):
if x._offset == dragdata._offset:
# deftination offset is busy
return
if x._offset < dragdata._offset:
item = self.tree.InsertItem(dropitem, self.data[x], dragdata.name)
break
if item is None:
item = self.tree.PrependItem(dropitem, dragdata.name)
parent = dragdata.parent
parent.delRegister(dragdata)
dropdata.addRegister(dragdata)
self.tree.SetPyData(item, dragdata)
self.data[dragdata] = item
self.tree.Delete(dragitem)
|
t0mk/ansible
|
lib/ansible/modules/network/nxos/nxos_vlan.py
|
Python
|
gpl-3.0
| 13,902
| 0.001511
|
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: nxos_vlan
extends_documentation_fragment: nxos
version_added: "2.1"
short_description: Manages VLAN resources and attributes.
description:
- Manages VLAN configurations on NX-OS switches.
author: Jason Edelman (@jedelman8)
options:
vlan_id:
description:
- Single VLAN ID.
required: false
default: null
vlan_range:
description:
- Range of VLANs such as 2-10 or 2,5,10-15, etc.
required: false
default: null
name:
description:
- Name of VLAN.
required: false
default: null
vlan_state:
description:
- Manage the vlan operational state of the VLAN
(equivalent to state {active | suspend} command.
required: false
default: active
choices: ['active','suspend']
admin_state:
description:
- Manage the VLAN administrative state of the VLAN equivalent
to shut/no shut in VLAN config mode.
required: false
default: up
choices: ['up','down']
mapped_vni:
description:
- The Virtual Network Identifier (VNI) ID that is mapped to the
VLAN. Valid values are integer and keyword 'default'.
required: false
default: null
version_added: "2.2"
state:
description:
- Manage the state of the resource.
required: false
default: present
choices: ['present','absent']
'''
EXAMPLES = '''
- name: Ensure a range of VLANs are not present on the switch
nxos_vlan:
vlan_range: "2-10,20,50,55-60,100-150"
host: 68.170.147.165
username: cisco
password: cisco
state: absent
transport: nxapi
- name: Ensure VLAN 50 exists with the name WEB and is in the shutdown state
nxos_vlan:
vlan_id: 50
host: 68.170.147.165
admin_state: down
name: WEB
transport: nxapi
username: cisco
password: cisco
- name: Ensure VLAN is NOT on the device
nxos_vlan:
vlan_id: 50
host: 68.170.147.165
state: absent
transport: nxapi
username: cisco
password: cisco
'''
RETURN = '''
proposed_vlans_list:
description: list of VLANs being proposed
returned: when debug enabled
type: list
sample: ["100"]
existing_vlans_list:
description: list of existing VLANs on the switch prior to making changes
returned: when debug enabled
type: list
sample: ["1", "2", "3", "4", "5", "20"]
end_state_vlans_list:
description: list of VLANs after the module is executed
returned: when debug enabled
type: list
sample: ["1", "2", "3", "4", "5", "20", "100"]
proposed:
description: k/v pairs of parameters passed into module (does not include
vlan_id or vlan_range)
returned: when debug enabled
type: dict or null
sample: {"admin_state": "down", "name": "app_vlan",
"vlan_state": "suspend", "mapped_vni": "5000"}
existing:
description: k/v pairs of existing vlan or null when using vlan_range
returned: when debug enabled
type: dict
sample: {"admin_state": "down", "name": "app_vlan",
"vlan_id": "20", "vlan_state": "suspend", "mapped_vni": ""}
end_state:
description: k/v pairs of the VLAN after executing module or null
when using vlan_range
returned: when debug enabled
type: dict or null
sample: {"admin_state": "down", "name": "app_vlan", "vlan_id": "20",
"vlan_state": "suspend", "mapped_vni": "5000"}
updates:
description: command string sent to the device
returned: always
type: list
sample: ["vlan 20", "vlan 55", "vn-segment 5000"]
commands:
description: command string sent to the device
returned: always
type: list
sample: ["vlan 20", "vlan 55", "vn-segment 5000"]
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
'''
from ansible.module_utils.nxos import get_config, load_config, run_commands
from ansible.module_utils.nxos import nxos_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
import re
from ansible.module_utils.nxos import nxos_argument_spec, check_args
from ansible.module_utils.nxos import run_commands, load_config, get_config
from ansible.module_utils.basic import AnsibleModule
def vlan_range_to_list(vlans):
result = []
if vlans:
for part in vlans.split(','):
if part == 'none':
break
if '-' in part:
a, b = part.split('-')
a, b = int(a), int(b)
result.extend(range(a, b + 1))
else:
a = int(part)
result.append(a)
return numerical_sort(result)
return result
def numerical_sort(string_int_list):
"""Sort list of strings (VLAN IDs) that are digits in numerical order.
"""
as_int_list = []
as_str_list = []
for vlan in string_int_list:
as_int_list.append(int(vlan))
as_int_list.sort()
for vlan in as_int_list:
as_str_list.append(str(vlan))
return as_str_list
def build_commands(vlans, state):
commands = []
for vlan in vlans:
if state == 'present':
command = 'vlan {0}'.format(vlan)
commands.append(command)
elif state == 'absent':
command = 'no vlan {0}'.format(vlan)
commands.append(command)
return commands
def get_vlan_config_commands(vlan, vid):
"""Build command list required for VLAN configuration
"""
reverse_value_map = {
"admin_state": {
"down": "shutdown",
"up": "no shutdown"
}
}
if vlan.get('admin_state'):
# apply value map when making change to the admin state
# note: would need to be a loop or more in depth check if
# value map has more than 1 key
vlan = apply_value_map(reverse_value_map, vlan)
VLAN_ARGS = {
'name': 'name {0}',
'vlan_state': 'state {0}',
'admin_state': '{0}',
'mode': 'mode {0}',
'mapped_vni': 'vn-segment {0}'
}
commands = []
for param, value in vlan.item
|
s():
if param == 'mapped_vni' and value == 'default':
command = 'no vn-segment'
else:
command = VLAN_ARGS.get(param).format(vlan.g
|
et(param))
if command:
commands.append(command)
commands.insert(0, 'vlan ' + vid)
commands.append('exit')
return commands
def get_list_of_vlans(module):
body = run_commands(module, ['show vlan | json'])
vlan_list = []
vlan_table = body[0].get('TABLE_vlanbrief')['ROW_vlanbrief']
if isinstance(vlan_table, list):
for vlan in vlan_table:
vlan_list.append(str(vlan['vlanshowbr-vlanid-utf']))
else:
vlan_list.append('1')
return vlan_list
def get_vni(vlanid, module):
flags = str('all | section vlan.{0}'.format(vlanid)).split(' ')
body = get_config(module, flags=flags)
#command = 'show run all | section vlan.{0}'.format(vlanid)
#body = execute_show_command(command, module, command_type='cli_show_ascii')[0]
value = ''
if body:
REGEX = re.compile(r'(?:vn-segment\s)(?P<value>.*)$', re.M)
if 'vn-se
|
flavour/rgims_as_diff
|
private/templates/RGIMS/controllers.py
|
Python
|
mit
| 9,573
| 0.011804
|
# -*- coding: utf-8 -*-
from os import path
from gluon import current
from gluon.html import *
from s3 import s3_represent_facilities, s3_register_validation
# =============================================================================
class index():
""" Custom Home Page """
def __call__(self):
T = current.T
auth = current.auth
db = current.db
s3db = current.s3db
request = current.request
appname = request.application
response = current.response
s3 = response.s3
settings = current.deployment_settings
view = path.join(request.folder, "private", "templates",
"RGIMS", "views", "index.html")
try:
# Pass view as file not str to work in compiled mode
response.view = open(view, "rb")
except IOError:
from gluon.http import HTTP
raise HTTP("404", "Unable to open Custom View: %s" % view)
title = settings.get_system_name()
response.title = title
# flag for the link change
# (condition, warehouse_id)
flag = (False, 0)
# change of link will happen
# if pe_id is part of the inv_warehouse
wh_table = s3db.table('inv_warehouse')
if wh_table:
auth_table = db((db.auth_membership.user_id == auth.user_id) &
(db.auth_membership.pe_id == wh_table.pe_id))
for entity in auth_table.select(wh_table.id):
if entity.id:
flag = (True, entity.id)
break
if flag[0]:
# Menu Boxes
menu_btns = [#div, label, app, function
["sit", T("Request"), "inv", "warehouse/%s/req" % flag[1]],
["dec", T("Send"), "inv", "warehouse/%s/send" % flag[1]],
["res", T("Receive"), "inv", "warehouse/%s/recv" % flag[1]]
]
else:
# Menu Boxes
menu_btns = [#div, label, app, function
["sit", T("Request"), "req", "req"],
["dec", T("Send"), "inv", "send"],
["res", T("Receive"), "inv", "recv"]
]
menu_divs = {"facility": DIV( H3("Map"),
_id = "facility_box", _class = "menu_box"),
"sit": DIV(
_id = "menu_div_sit", _class = "menu_div"),
"dec": DIV(
_id = "menu_div_dec", _class = "menu_div"),
"res": DIV(
_id = "menu_div_res", _class = "menu_div"),
}
for div, label, app, function in menu_btns:
if settings.has_module(app):
# @ToDo: Also check permissions (e.g. for anonymous users)
menu_divs[div].append(A(DIV(label,
_class = "menu-btn-r"),
_class = "menu-btn-l",
_href = URL(app,function)
)
)
div_arrow = DIV(IMG(_src = "/%s/static/img/arrow_blue_right.png" % \
appname),
_class = "div_arrow")
sit_dec_res_box = DIV(menu_divs["sit"],
div_arrow,
menu_divs["dec"],
div_arrow,
menu_divs["res"],
_id = "sit_dec_res_box",
_class = "menu_box fleft swidth"
#div_additional,
)
facility_box = menu_divs["facility"]
facility_box.append(A(IMG(_src = "/%s/static/img/map_icon_128.png" % \
appname),
_href = URL(c="gis", f="index"),
_title = T("Map")
)
)
# Check logged in AND permissions
_s3 = current.session.s3
AUTHENTICATED = _s3.system_roles.AUTHENTICATED
roles = _s3.roles
if AUTHENTICATED in roles and \
auth.s3_has_permission("read", db.org_organisation):
auth.permission.controller = "org"
auth.permission.function = "site"
permitted_facilities = auth.permitted_facilities(redirect_on_error=False)
manage_facility_box = ""
if permitted_facilities:
facility_list = s3_represent_facilities(db, permitted_facilities,
link=False)
facility_list = sorted(facility_list, key=lambda fac: fac[1])
facility_opts = [OPTION(opt[1], _value = opt[0])
for opt in facility_list]
facility_opts.insert(0, OPTION("Please Select a Warehouse"))
if facility_list:
manage_facility_box = DIV(H3(T("Manage Your Warehouse")),
SELECT(_id = "manage_facility_select",
_style = "max-width:400px;",
*facility_opts
),
A(T("Go"),
_href = URL(c="default", f="site",
args=[facility_list[0][0]]),
|
#_disabled = "disabled",
_id = "manage_facility_btn",
|
_class = "action-btn"
),
_id = "manage_facility_box",
_class = "menu_box fleft")
s3.jquery_ready.append(
'''$('#manage_facility_select').change(function(){
$('#manage_facility_btn').attr('href',S3.Ap.concat('/default/site/',$('#manage_facility_select').val()))
})''')
else:
manage_facility_box = DIV()
else:
manage_facility_box = ""
# Login/Registration forms
self_registration = settings.get_security_self_registration()
registered = False
login_form = None
login_div = None
register_form = None
register_div = None
if AUTHENTICATED not in roles:
# This user isn't yet logged-in
if request.cookies.has_key("registered"):
# This browser has logged-in before
registered = True
if self_registration:
# Provide a Registration box on front page
request.args = ["register"]
if settings.get_terms_of_service():
auth.messages.submit_button = T("I accept. Create my account.")
else:
auth.messages.submit_button = T("Register")
register_form = auth()
register_div = DIV(H3(T("Register")),
P(XML(T("If you would like to help, then please %(sign_up_now)s") % \
dict(sign_up_now=B(T("sign-up now"))))))
# Add client-side validation
s3_register_validation()
if s3.debug:
s3.scripts.append("/%s/static/scripts/jquery.validate.js" % appname)
else:
s3.scripts.append("/%s/static/scripts/jquery.validate.min.js" % appname)
if request.env.request_method == "POST":
post_script = \
'''$('#register_form').removeClass('hide')
$('#login_form').addClass('hide')'''
else:
post_scrip
|
utkbansal/kuma
|
vendor/packages/translate/convert/test_po2dtd.py
|
Python
|
mpl-2.0
| 22,788
| 0.001668
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import warnings
import pytest
from translate.convert import dtd2po, po2dtd, test_convert
from translate.misc import wStringIO
from translate.storage import dtd, po
class TestPO2DTD:
def setup_method(self, method):
warnings.resetwarnings()
def teardown_method(self, method):
warnings.resetwarnings()
def po2dtd(self, posource, remove_untranslated=False):
"""helper that converts po source to dtd source without requiring files"""
inputfile = wStringIO.StringIO(posource)
inputpo = po.pofile(inputfile)
convertor = po2dtd.po2dtd(remove_untranslated=remove_untranslated)
outputdtd = convertor.convertstore(inputpo)
return outputdtd
def merge2dtd(self, dtdsource, posource):
"""helper that merges po translations to dtd source without requiring files"""
inputfile = wStringIO.StringIO(posource)
inputpo = po.pofile(inputfile)
templatefile = wStringIO.StringIO(dtdsource)
templatedtd = dtd.dtdfile(templatefile)
convertor = po2dtd.redtd(templatedtd)
outputdtd = convertor.convertstore(inputpo)
return outputdtd
def convertdtd(self, posource, dtdtemplate, remove_untranslated=False):
"""helper to exercise the command line function"""
inputfile = wStringIO.StringIO(posource)
outputfile = wStringIO.StringIO()
templatefile = wStringIO.StringIO(dtdtemplate)
assert po2dtd.convertdtd(inputfile, outputfile, templatefile,
remove_untranslated=remove_untranslated)
return outputfile.getvalue()
def roundtripsource(self, dtdsource):
"""converts dtd source to po and back again, returning the resulting source"""
dtdinputfile = wStringIO.StringIO(dtdsource)
dtdinputfile2 = wStringIO.StringIO(dtdsource)
pooutputfile = wStringIO.StringIO()
dtd2po.convertdtd(dtdinputfile, pooutputfile, dtdinputfile2)
posource = pooutputfile.getvalue()
poinputfile = wStringIO.StringIO(posource)
dtdtemplatefile = wStringIO.StringIO(dtdsource)
dtdoutputfile = wStringIO.StringIO()
po2dtd.convertdtd(poinputfile, dtdoutputfile, dtdtemplatefile)
dtdresult = dtdoutputfile.getvalue()
print_string = "Original DTD:\n%s\n\nPO version:\n%s\n\n"
print_string = print_string + "Output DTD:\n%s\n################"
print(print_string % (dtdsource, posource, dtdresult))
return dtdresult
def roundtripstring(self, entitystring):
"""Just takes the contents of a ENTITY definition (with quotes) and does a roundtrip on that"""
dtdintro, dtdoutro = '<!ENTITY Test.RoundTrip ', '>\n'
dtdsource = dtdintro + entitystring + dtdoutro
dtdresult = self.roundtripsource(dtdsource)
assert dtdresult.startswith(dtdintro) and dtdresult.endswith(dtdoutro)
return dtdresult[len(dtdintro):-len(dtdoutro)]
def check_roundtrip(self, dtdsource, dtdcompare=None):
"""Checks that the round-tripped string is the same as dtdcompare.
If no dtdcompare string is provided then the round-tripped string is
compared with the original string.
The reason why sometimes another string is provided to compare with the
resulting string from the roundtrip is that if the original string
contains some characters, like " character, or escapes like ",
then when the roundtrip is performed those characters or escapes are
escaped, rendering a round-tripped string which differs from the
original one.
"""
if not dtdcompare:
dtdcompare = dtdsource
assert self.roundtripstring(dtdsource) == dtdcompare
def test_joinlines(self):
"""tests that po lines are joined seamlessly (bug 16)"""
multilinepo = '''#: pref.menuPath\nmsgid ""\n"<span>Tools > Options</"\n"span>"\nmsgstr ""\n'''
dtdfile = self.po2dtd(multilinepo)
dtdsource = str(dtdfile)
assert "</span>" in dtdsource
def test_escapedstr(self):
"""tests that \n in msgstr is escaped correctly in dtd"""
multilinepo = '''#: pref.menuPath\nmsgid "Hello\\nEveryone"\nmsgstr "Good day\\nAll"\n'''
dtdfile = self.po2dtd(multilinepo)
dtdsource = str(dtdfile)
assert "Good day\nAll" in dtdsource
def test_missingaccesskey(self):
"""tests that proper warnings are given if access key is missing"""
simplepo = '''#: simple.label
#: simple.accesskey
msgid "Simple &String"
msgstr "Dimpled Ring"
'''
simpledtd = '''<!ENTITY simple.label "Simple String">
<!ENTITY simple.accesskey "S">'''
warnings.simplefilter("error")
assert pytest.raises(Warning, self.merge2dtd, simpledtd, simplepo)
def test_accesskeycase(self):
"""tests that access keys come out with the same case as the original, regardless"""
simplepo_template = '''#: simple.label\n#: simple.accesskey\nmsgid "%s"\nmsgstr "%s"\n'''
simpledtd_template = '''<!ENTITY simple.label "Simple %s">\n<!ENTITY simple.accesskey "%s">'''
possibilities = [
#(en label, en akey, en po, af po, af label, expected af akey)
("Sis", "S", "&Sis", "&Sies", "Sies", "S"),
("Sis", "s", "Si&s", "&Sies", "Sies", "S"),
("Sis", "S", "&Sis", "Sie&s", "Sies", "s"),
("Sis", "s", "Si&s", "Sie&s", "Sies", "s"),
# untranslated strings should have the casing of the source
("Sis", "S", "&Sis", "", "Sis", "S"),
("Sis", "s", "Si&s", "", "Sis", "s"),
("Suck", "S", "&Suck", "", "Suck", "S"),
("Suck", "s", "&Suck", "", "Suck", "s"),
]
for (en_label, en_akey, po_source, po_target, target_label, target_akey) in possibilities:
simplepo = simplepo_template % (po_source, po_target)
simpledtd = simpledtd_template % (en_label, en_akey)
dtdfile = self.merge2dtd(simpledtd, simplepo)
dtdfile.makeindex()
accel = dtd.unquotefromdtd(dtdfile.id_index["simple.accesskey"].definition)
assert accel == target_akey
def test_accesskey_types(self):
"""tests that we can detect the various styles of accesskey"""
simplepo_template = '''#: simple.%s\n#: simple.%s\nmsgid "&File"\nmsgstr "F&aele"\n'''
simpledtd_template = '''<!ENTITY simple.%s "File">\n<!ENTITY simple.%s "a">'''
for label in ("label", "title"):
for accesskey in ("accesskey", "accessKey", "akey"):
simplepo = simplepo_template % (label, accesskey)
|
simpledtd = simpledtd_template % (label, accesskey)
dtdfile = self.merge2dtd(simpledtd, simplepo)
dtdfile.makeindex()
assert dtd.unquotefromdtd(dtdfile.id_index["simple.%s" %
|
accesskey].definition) == "a"
def test_ampersandfix(self):
"""tests that invalid ampersands are fixed in the dtd"""
simplestring = '''#: simple.string\nmsgid "Simple String"\nmsgstr "Dimpled &Ring"\n'''
dtdfile = self.po2dtd(simplestring)
dtdsource = str(dtdfile)
assert "Dimpled Ring" in dtdsource
po_snippet = r'''#: searchIntegration.label
#: searchIntegration.accesskey
msgid "Allow &searchIntegration.engineName; to &search messages"
msgstr "&searchIntegration.engineName; &ileti aramasına izin ver"
'''
dtd_snippet = r'''<!ENTITY searchIntegration.accesskey "s">
<!ENTITY searchIntegration.label "Allow &searchIntegration.engineName; to search messages">'''
dtdfile = self.merge2dtd(dtd_snippet, po_snippet)
dtdsource = str(dtdfile)
print(dtdsource)
assert '"&searchIntegration.engineName; ileti aramasına izin ver"' in dtdsource
def test_accesskey_missing(self):
"""tests that missing ampersands use the source accesskey"""
po_snippet = r'''#: key.label
#: key.accesskey
msgid "&Search"
msgstr "Ileti"
'''
dtd_snippet = r'''<!ENTI
|
efforia/django-shipping
|
shipping/providers/default.py
|
Python
|
lgpl-3.0
| 5,966
| 0.008213
|
#!/usr/bin/python
#
# This file is part of django-ship project.
#
# Copyright (C) 2011-2020 William Oliveira de Lagos <william.lagos@icloud.com>
#
# Shipping is free software: you can redistribute it and/or modify
# it under the terms of the Lesser GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shipping is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Shipping. If not, see <http://www.gnu.org/licenses/>.
#
from django.utils.translation import ugettext as _
try:
from mezzanine.conf import settings
from cartridge.shop.utils import set_shipping
from cartridge.shop.models import Cart
from cartridge.shop.forms import OrderForm
except ImportError as e:
pass
from shipping.codes import CorreiosCode
from shipping.fretefacil import FreteFacilShippingService
from shipping.correios import CorreiosShippingService
from shipping.models import DeliverableProperty
def fretefacil_shipping_handler(request, form, order=None):
if request.session.get("free_shipping"): return
settings.use_editable()
if form i
|
s not None: user_postcode = form.cleaned_data['shipping_detail_postcode']
else: user_postcode = settings.STORE_POSTCODE
user_postcode = form.cleaned_data['shipping_detail_postcode']
shippingservice = FreteFacilShippingService()
cart = Cart.objects.from_request(request)
delivery_value = 0.0
if cart.has_items():
for product in cart:
properties = DeliverableProperty.objects.filter(sku=product.sku)
|
if len(properties) > 0:
props = properties[0]
deliverable = shippingservice.create_deliverable(settings.STORE_POSTCODE,
user_postcode,
props.width,
props.height,
props.length,
props.weight)
delivery_value += float(shippingservice.delivery_value(deliverable))
set_shipping(request, _("Correios"),delivery_value)
def correios_create_deliverable(obj,service,store_postcode,user_postcode,width,height,length,weight):
obj.cep_origem = store_postcode
obj.altura = height
obj.largura = width
obj.comprimento = length
obj.peso = weight
obj.servico = service
return {
'postcode':user_postcode,
'service':service
}
def correios_delivery_value(shippingservice,deliverable):
shippingservice(deliverable['postcode'],deliverable['service'])
return '.'.join(shippingservice.results[deliverable['service']][1].split(','))
def sedex_shipping_handler(request, form, order=None):
if request.session.get("free_shipping"): return
settings.use_editable()
if form is not None: user_postcode = form.cleaned_data['shipping_detail_postcode']
else: user_postcode = settings.STORE_POSTCODE
shippingservice = CorreiosShippingService()
cart = Cart.objects.from_request(request)
delivery_value = 0.0
if cart.has_items():
for product in cart:
properties = DeliverableProperty.objects.filter(sku=product.sku)
if len(properties) > 0:
props = properties[0]
deliverable = correios_create_deliverable(shippingservice,
'SEDEX',
settings.STORE_POSTCODE,
user_postcode,
props.width,
props.height,
props.length,
props.weight)
delivery_value += float(correios_delivery_value(shippingservice,deliverable))
set_shipping(request, _("Correios"),delivery_value)
def shipping_payment_handler(request, order_form, order):
data = order_form.cleaned_data
shipping = order.shipping_total
code = CorreiosCode()
shipping_data = code.consulta(order.billing_detail_postcode)
order.billing_detail_street = '%s %s %s' % (shipping_data['tipo_logradouro'],
shipping_data['logradouro'],
data['billing_detail_complement'])
order.billing_detail_city = shipping_data['cidade']
order.billing_detail_state = shipping_data['uf']
order.billing_detail_country = settings.STORE_COUNTRY
order.save()
currency = settings.SHOP_CURRENCY
cart = Cart.objects.from_request(request)
cart_items = []
has_shipping = False
for item in cart.items.all():
quantity = len(DeliverableProperty.objects.filter(sku=item.sku))
if quantity > 0: has_shipping = True
cart_items.append({
"name":item.description,
"sku":item.sku,
"price":'%.2f' % item.unit_price,
"currency":currency,
"quantity":item.quantity
})
if has_shipping:
cart_items.append({
"name": "Frete via SEDEX",
"sku":"1",
"price":'%.2f' % shipping,
"currency":currency,
"quantity":1
})
return shipping
|
jmaidens/Codility
|
MaxCounters.py
|
Python
|
mit
| 951
| 0.005258
|
# Solution to exercise MaxCounters
# http://www.codility.com/train/
def solution(N, A):
counters
|
= [0 for _ in range(N)]
last_max_counter = 0
current_max_counter = 0
# Iterate through A. At each step, the value of counter i is
# last_max_counter or counters[i], whichever is greater
for a in A:
if a == N+1:
last_max_counter = current_max_counter
elif counters[a-1] < last_max_counter:
counters[a-1] = last_max_coun
|
ter + 1
current_max_counter = max(current_max_counter, counters[a-1])
else:
counters[a-1] += 1
current_max_counter = max(current_max_counter, counters[a-1])
# Make a pass through counters to update the ones that
# have not changed since the last max_counter opperation
for i in range(N):
if counters[i] < last_max_counter:
counters[i] = last_max_counter
return counters
|
ctenix/pytheway
|
imooc_requests_urllib.py
|
Python
|
gpl-3.0
| 978
| 0.011579
|
#__*__coding:utf-8__*__
import urllib
import urllib2
URL_IP = 'http://127.0.0.1:8000/ip'
URL_GET = 'http://127.0.0.1:8000/get'
def use_simple_urllib2():
response = urllib2.urlopen(URL_IP)
print '>>>>Response Headers:'
print response.info()
print '>>>>Response Body:'
print ''.join([line for line in response.readlines()])
def use_params_urllib2():
#构建请求参数
params = urllib.urlencode({'param1':'hello','param2':'world'})
print 'Request Params:'
print params
#发送请求
response = urllib2.urlopen('?'.join([URL_GET, '%s']) % params)
#处理响应
print '>>>>Response Headers:'
print response.info()
print '>>>>Status Code:'
print response.getcode()
|
print '>>>>Response Body:'
print ''.join([line for line in response.readlines()])
if __name__== '__main
|
__':
print '>>>Use simple urllib2:'
use_simple_urllib2()
print
print '>>>Use params urllib2:'
use_params_urllib2()
|
antoinecarme/sklearn2sql_heroku
|
tests/classification/FourClass_500/ws_FourClass_500_GaussianNB_sqlite_code_gen.py
|
Python
|
bsd-3-clause
| 139
| 0.014388
|
from sklearn2sql_heroku
|
.tests.classification import generic as class_gen
class_gen.test_model("Gau
|
ssianNB" , "FourClass_500" , "sqlite")
|
Lenusik/python
|
fixture/contact.py
|
Python
|
gpl-2.0
| 2,909
| 0.003438
|
__author__ = 'Lenusik'
from model.contact import Contact
import re
class ContactHelper:
def __init__(self, app):
self.app = app
contact_cache = None
def get_contact_list(self):
if self.contact_cache is None:
driver = self.app.driver
self.app.open_home_page()
self.contact_cache = []
for row in driver.find_elements_by_name("entry"):
cells = row.find_elements_by_tag_name("td")
lastname = cells[1].text
id = cells[0].find_element_by_tag_name("input").get_attribute('value')
firstname = cells[2].text
all_phones = cells[5].text
self.contact_cache.append(Contact(firstname=firstname, lastname=lastname, id=id,
all_phones_from_home_page=all_phones))
return list(self.contact_cache)
def open_contact_to_edit_by_index(self, index):
driver = self.app.driver
self.app.open_home_page()
row = driver.find_elements_by_name("entry")[index]
cell = row.find_elements_by_tag_name("td")[7]
cell.find_element_by_tag_name("a").click()
def open_contact_view_by_index(self, index):
driver = self.app.driver
self.app.open_home_page()
row = driver.find_elements_by_name("entry")[index]
cell = row.find_elements_by_tag_name("td")[6]
cell.find_element_by_tag_name("a").click()
def get_contact_info_from_edit_page(self, index):
self.open_contact_to_edit_by_index(index)
driver = self.app.driver
firstname = driver.find_element_by_name("firstname").get_attribute("value")
lastname = driver.find_element_by_name("lastname").get_attribute("value")
id = driver.find_element_by_name("id").get_attribute("value")
homephone = driver.find_element_by_name("home").get_attribute("value")
workphone = driv
|
er.find_el
|
ement_by_name("work").get_attribute("value")
mobilephone = driver.find_element_by_name("mobile").get_attribute("value")
secondaryphone = driver.find_element_by_name("phone2").get_attribute("value")
return Contact(firstname=firstname, lastname=lastname, id=id,
homephone=homephone, workphone=workphone, mobilephone=mobilephone, secondaryphone=secondaryphone)
def get_contact_from_view_page(self, index):
driver = self.app.driver
self.open_contact_view_by_index(index)
text = driver.find_element_by_id("content").text
homephone = re.search("H: (.*)", text).group(1)
workphone = re.search("W: (.*)", text).group(1)
mobilephone = re.search("M: (.*)", text).group(1)
secondaryphone = re.search("P: (.*)", text).group(1)
return Contact(homephone=homephone, workphone=workphone, mobilephone=mobilephone, secondaryphone=secondaryphone)
|
asekretenko/yandex-tank
|
tests/Autostop_Test.py
|
Python
|
lgpl-2.1
| 3,041
| 0.007892
|
from yandextank.plugins.Aggregator import SecondAggregateData
from yandextank.plugins.Autostop import AutostopPlugin
from Tank_Test import TankTestCase
import tempfile
import unittest
class AutostopTestCase(TankTestCase):
def setUp(self):
core = self.get_core()
core.load_configs(['config/autostop.conf'])
core.load_plugins()
core.plugins_configure()
self.foo = AutostopPlugin(core)
def tearDown(self):
del self.foo
self.foo = None
def test_run(self):
data = SecondAggregateData()
data.overall.avg_response_time = 11
self.foo.core.set_option(self.foo.SECTION, "autostop", "time(1,10)")
self.foo.configure()
self.foo.prepare_test()
self.foo.start_test()
for n in range(1, 15):
self.foo.aggregate_second(data)
if self.foo.is_test_finished() < 0:
raise RuntimeError()
self.foo.end_test(0)
def test_run_http(self):
data = SecondAggregateData()
data.overall.http_codes = {'200':11}
self.foo.core.set_option(self.foo.SECTION, "autostop", "http (200, 10, 5 )\nhttp (3xx, 1.5%, 10m)")
self.foo.configure()
self.foo.prepare_test()
self.foo.start_test()
for n in range(1, 15):
self.foo.aggregate_second(data)
if self.foo.is_test_finished() < 0:
raise RuntimeError()
self.foo.end_test(0)
def test_run_net(self):
data = SecondAggregateData()
data.overall.net_codes = {71:11}
self.foo.core.set_option(self.foo.SECTION, "autostop", "net (71, 1, 5)\nnet (xx, 1.5%, 10m )")
self.foo.configure()
self.foo.prepare_test()
self.foo.start_test()
for n in range(1, 15):
self.foo.aggregate_second(data)
if self.foo.is_test_finished() < 0:
raise RuntimeError()
self.foo.end_test(0)
def test_run_quan(self):
data = SecondAggregateData()
data.overall.quantiles = {99.0:11}
self.foo.core.set_option(self.foo.SECTION, "autostop", "quantile(99,2,3)")
self.foo.configure()
self.foo.prepare_test()
self.foo.start_test()
for n in range(1, 15):
self.foo.aggregate_second(data)
if self.foo.is_test_finished() < 0:
raise RuntimeError()
self.
|
foo.end_test(0)
def test_run_false_trigger_bug(self):
data = SecondAggregateData()
data.overall.http_codes = {}
self.foo.core.set_option(self.foo.SECTION, "autostop", "http (5xx, 100%, 1)")
self.foo.configure()
self.foo.prepare_test()
self.foo.start_test()
for n in range(1, 15):
self.fo
|
o.aggregate_second(data)
if self.foo.is_test_finished() >= 0:
raise RuntimeError()
self.foo.end_test(0)
if __name__ == '__main__':
unittest.main()
|
Kotaimen/stonemason
|
stonemason/service/tileserver/themes/views.py
|
Python
|
mit
| 1,132
| 0.000883
|
# -*- encoding: utf-8 -*-
__author__ = 'ray'
__date__ = '2/27/15'
from flask import jsonify, abort
from flask.views import MethodView
from ..models import ThemeModel
class ThemeView(MethodView):
""" Theme View
Retrieve description of a list of available themes.
:param theme_model: A theme model that manages themes
|
.
:type theme_model: :class:`~stonema
|
son.service.models.ThemeModel`
"""
def __init__(self, theme_model):
assert isinstance(theme_model, ThemeModel)
self._theme_model = theme_model
def get(self, tag):
"""Return description of the theme. Raise :http:statuscode:`404` if
not found.
:param name: Name of a theme.
:type name: str
"""
if tag is None:
collection = list()
for theme in self._theme_model.iter_themes():
collection.append(theme.to_dict())
return jsonify(result=collection)
else:
theme = self._theme_model.get_theme(tag)
if theme is None:
abort(404)
return jsonify(result=theme.to_dict())
|
maxplanck-ie/HiCExplorer
|
hicexplorer/test/long_run/test_hicBuildMatrix.py
|
Python
|
gpl-2.0
| 9,095
| 0.002969
|
import warnings
warnings.simplefilter(action="ignore", category=RuntimeWarning)
warnings.simplefilter(action="ignore", category=PendingDeprecationWarning)
from hicexplorer import hicBuildMatrix, hicInfo
from hicmatrix import HiCMatrix as hm
from tempfile import NamedTemporaryFile, mkdtemp
import shutil
import os
import numpy.testing as nt
ROOT = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), "test_data/")
sam_R1 = ROOT + "small_test_R1_unsorted.bam"
sam_R2 = ROOT + "small_test
|
_R2_unsorted.bam"
dpnii_file = ROOT + "DpnII.bed"
def are_files_equal(file1, file2, delta=None):
equal = True
if delta:
mismatches = 0
with open(file1) as textfile1, open
|
(file2) as textfile2:
for x, y in zip(textfile1, textfile2):
if x.startswith('File'):
continue
if x != y:
if delta:
mismatches += 1
if mismatches > delta:
equal = False
break
else:
equal = False
break
return equal
def test_build_matrix(capsys):
outfile = NamedTemporaryFile(suffix='.h5', delete=False)
outfile.close()
qc_folder = mkdtemp(prefix="testQC_")
args = "-s {} {} --outFileName {} -bs 5000 -b /tmp/test.bam --QCfolder {} --threads 4".format(sam_R1, sam_R2,
outfile.name,
qc_folder).split()
hicBuildMatrix.main(args)
test = hm.hiCMatrix(ROOT + "small_test_matrix_parallel.h5")
new = hm.hiCMatrix(outfile.name)
nt.assert_equal(test.matrix.data, new.matrix.data)
nt.assert_equal(test.cut_intervals, new.cut_intervals)
# print("MATRIX NAME:", outfile.name)
print(set(os.listdir(ROOT + "QC/")))
assert are_files_equal(ROOT + "QC/QC.log", qc_folder + "/QC.log")
assert set(os.listdir(ROOT + "QC/")) == set(os.listdir(qc_folder))
# accept delta of 60 kb, file size is around 4.5 MB
assert abs(os.path.getsize(ROOT + "small_test_matrix_result.bam") - os.path.getsize("/tmp/test.bam")) < 64000
os.unlink(outfile.name)
shutil.rmtree(qc_folder)
os.unlink("/tmp/test.bam")
def test_build_matrix_cooler():
outfile = NamedTemporaryFile(suffix='.cool', delete=False)
outfile.close()
qc_folder = mkdtemp(prefix="testQC_")
args = "-s {} {} --outFileName {} -bs 5000 -b /tmp/test.bam --QCfolder {} --threads 4".format(sam_R1, sam_R2,
outfile.name,
qc_folder).split()
hicBuildMatrix.main(args)
test = hm.hiCMatrix(ROOT + "small_test_matrix_parallel.h5")
new = hm.hiCMatrix(outfile.name)
nt.assert_equal(test.matrix.data, new.matrix.data)
# nt.assert_equal(test.cut_intervals, new.cut_intervals)
nt.assert_equal(len(new.cut_intervals), len(test.cut_intervals))
cut_interval_new_ = []
cut_interval_test_ = []
for x in new.cut_intervals:
cut_interval_new_.append(x[:3])
for x in test.cut_intervals:
cut_interval_test_.append(x[:3])
nt.assert_equal(cut_interval_new_, cut_interval_test_)
# print(set(os.listdir(ROOT + "QC/")))
assert are_files_equal(ROOT + "QC/QC.log", qc_folder + "/QC.log")
assert set(os.listdir(ROOT + "QC/")) == set(os.listdir(qc_folder))
os.unlink(outfile.name)
shutil.rmtree(qc_folder)
def test_build_matrix_cooler_metadata():
outfile = NamedTemporaryFile(suffix='.cool', delete=False)
outfile.close()
qc_folder = mkdtemp(prefix="testQC_")
args = "-s {} {} --outFileName {} -bs 5000 -b /tmp/test.bam --QCfolder {} --threads 4 --genomeAssembly dm3".format(sam_R1, sam_R2,
outfile.name,
qc_folder).split()
hicBuildMatrix.main(args)
test = hm.hiCMatrix(ROOT + "small_test_matrix_parallel.h5")
new = hm.hiCMatrix(outfile.name)
nt.assert_equal(test.matrix.data, new.matrix.data)
# nt.assert_equal(test.cut_intervals, new.cut_intervals)
nt.assert_equal(len(new.cut_intervals), len(test.cut_intervals))
cut_interval_new_ = []
cut_interval_test_ = []
for x in new.cut_intervals:
cut_interval_new_.append(x[:3])
for x in test.cut_intervals:
cut_interval_test_.append(x[:3])
nt.assert_equal(cut_interval_new_, cut_interval_test_)
# print(set(os.listdir(ROOT + "QC/")))
assert are_files_equal(ROOT + "QC/QC.log", qc_folder + "/QC.log")
assert set(os.listdir(ROOT + "QC/")) == set(os.listdir(qc_folder))
outfile_metadata = NamedTemporaryFile(suffix='.txt', delete=False)
outfile_metadata.close()
args = "-m {} -o {}".format(outfile.name, outfile_metadata.name).split()
hicInfo.main(args)
assert are_files_equal(ROOT + "hicBuildMatrix/metadata.txt", outfile_metadata.name, delta=7)
os.unlink(outfile.name)
shutil.rmtree(qc_folder)
def test_build_matrix_cooler_multiple():
outfile = NamedTemporaryFile(suffix='.cool', delete=False)
outfile.close()
qc_folder = mkdtemp(prefix="testQC_")
args = "-s {} {} --outFileName {} -bs 5000 10000 20000 -b /tmp/test.bam --QCfolder {} --threads 4".format(sam_R1, sam_R2,
outfile.name,
qc_folder).split()
hicBuildMatrix.main(args)
test_5000 = hm.hiCMatrix(ROOT + "hicBuildMatrix/multi_small_test_matrix.cool::/resolutions/5000")
test_10000 = hm.hiCMatrix(ROOT + "hicBuildMatrix/multi_small_test_matrix.cool::/resolutions/10000")
test_20000 = hm.hiCMatrix(ROOT + "hicBuildMatrix/multi_small_test_matrix.cool::/resolutions/20000")
new_5000 = hm.hiCMatrix(outfile.name + '::/resolutions/5000')
new_10000 = hm.hiCMatrix(outfile.name + '::/resolutions/10000')
new_20000 = hm.hiCMatrix(outfile.name + '::/resolutions/20000')
nt.assert_equal(test_5000.matrix.data, new_5000.matrix.data)
nt.assert_equal(test_10000.matrix.data, new_10000.matrix.data)
nt.assert_equal(test_20000.matrix.data, new_20000.matrix.data)
# nt.assert_equal(test.cut_intervals, new.cut_intervals)
nt.assert_equal(len(new_5000.cut_intervals), len(test_5000.cut_intervals))
nt.assert_equal(len(new_10000.cut_intervals), len(test_10000.cut_intervals))
nt.assert_equal(len(new_20000.cut_intervals), len(test_20000.cut_intervals))
cut_interval_new_ = []
cut_interval_test_ = []
for x in new_5000.cut_intervals:
cut_interval_new_.append(x[:3])
for x in test_5000.cut_intervals:
cut_interval_test_.append(x[:3])
nt.assert_equal(cut_interval_new_, cut_interval_test_)
cut_interval_new_ = []
cut_interval_test_ = []
for x in new_10000.cut_intervals:
cut_interval_new_.append(x[:3])
for x in test_10000.cut_intervals:
cut_interval_test_.append(x[:3])
nt.assert_equal(cut_interval_new_, cut_interval_test_)
cut_interval_new_ = []
cut_interval_test_ = []
for x in new_20000.cut_intervals:
cut_interval_new_.append(x[:3])
for x in test_20000.cut_intervals:
cut_interval_test_.append(x[:3])
nt.assert_equal(cut_interval_new_, cut_interval_test_)
# print(set(os.listdir(ROOT + "QC/")))
assert are_files_equal(ROOT + "QC/QC.log", qc_folder + "/QC.log")
assert set(os.listdir(ROOT + "QC/")) == set(os.listdir(qc_folder))
os.unlink(outfile.name)
shutil.rmtree(qc_folder)
def test_build_matrix_rf():
outfile = NamedTemporaryFile(suffix='.h5', delete=False)
outfile.close()
qc_folder = mkdtemp(prefix="testQC_"
|
sinotradition/meridian
|
meridian/tst/acupoints/test_zuwuli233.py
|
Python
|
apache-2.0
| 299
| 0.006689
|
#!/usr/bin/python
#coding=utf-8
'''
@author: sheng
@license:
'''
import unittest
from meridian.acupoints import zuwuli233
class TestZuwuli233Functions(unittest.TestCase):
def setUp(self):
pass
def test_xxx(sel
|
f):
pass
if __name__ == '__
|
main__':
unittest.main()
|
willingc/oh-mainline
|
vendor/packages/scrapy/scrapy/tests/test_cmdline/__init__.py
|
Python
|
agpl-3.0
| 1,166
| 0.008576
|
import sys
import os
from subprocess import Popen, PIPE
import unittest
class CmdlineTest(unittest.TestCase):
def setUp(self):
self.env = os.environ.copy()
if 'PYTHONPATH' in os.environ:
self.env['PYTHONPATH'] = os.environ['PYTHONPATH']
self.env['SCRAPY_SETTINGS_MODULE'] = 'scrapy.tests.test_cmdline.settings'
def _execute(self, *new_args, **kwargs):
args = (sys.executable, '-m', 'scrapy.cmdline') + new_args
proc = Popen(args, stdout=PIPE, stderr=PIPE, env=self.env, **kwargs)
comm = proc.communicate()
return comm[0].strip()
def test_default_settings(self):
self.assertEqual(self._execute('settings', '--get', 'TEST1'), \
'default + loaded + star
|
ted')
def test_override_settings_using_set_arg(self):
self.assertEqual(self._execute('settings', '--get', 'TEST1', '-s',
|
'TEST1=override'), \
'override + loaded + started')
def test_override_settings_using_envvar(self):
self.env['SCRAPY_TEST1'] = 'override'
self.assertEqual(self._execute('settings', '--get', 'TEST1'), \
'override + loaded + started')
|
openstack/octavia
|
octavia/tests/unit/common/test_base_taskflow.py
|
Python
|
apache-2.0
| 5,974
| 0
|
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import concurrent.futures
from unittest import mock
from oslo_config import cfg
from oslo_config import fixture as oslo_fixture
from taskflow import engines as tf_engines
from octavia.common import base_taskflow
import octavia.tests.unit.base as base
MAX_WORKERS = 1
ENGINE = 'parallel'
_engine_mock = mock.MagicMock()
class TestBaseTaskFlowEngine(base.TestCase):
def setUp(self):
conf = oslo_fixture.Config(cfg.CONF)
conf.config(group="task_flow", max_workers=MAX_WORKERS)
conf.config(group="task_flow", engine=ENGINE)
conf.config(group="task_flow", disable_revert=True)
super().setUp()
@mock.patch('concurrent.futures.ThreadPoolExecutor',
return_value='TESTEXECUTOR')
@mock.patch('taskflow.engines.load',
return_value=_engine_mock)
def test_taskflow_load(self,
mock_tf_engine_load,
mock_ThreadPoolExecutor):
# Test __init__
base_taskflow_engine = base_taskflow.BaseTaskFlowEngine()
concurrent.futures.ThreadPoolExecutor.assert_called_once_with(
max_workers=MAX_WORKERS)
# Test taskflow_load
base_taskflow_engine.taskflow_load('TEST')
tf_engines.load.assert_called_once_with(
'TEST',
engine=ENGINE,
executor='TESTEXECUTOR',
never_resolve=True)
_engine_mock.compile.assert_called_once_with()
_engine_mock.prepare.assert_called_once_with()
class TestTaskFlowServiceController(base.TestCase):
_mock_uuid = '9a2ebc48-cd3e-429e-aa04-e32f5fc5442a'
def setUp(self):
self.conf = oslo_fixture.Config(cfg.CONF)
self.conf.config(group="task_flow", engine='parallel')
self.conf.config(group="task_flow", max_workers=MAX_WORKERS)
self.driver_mock = mock.MagicMock()
self.persistence_mock = mock.MagicMock()
self.jobboard_mock = mock.MagicMock()
self.driver_mock.job_board.return_value = self.jobboard_mock
self.driver_mock.persistence_driver.get_persistence.return_value = (
self.persistence_mock)
self.service_controller = base_taskflow.TaskFlowServiceController(
self.driver_mock)
super().setUp()
@mock.patch('oslo_utils.uuidutils.generate_uuid', return_value=_mock_uuid)
@mock.patch('taskflow.engines.save_factory_details')
def test_run_poster(self, mock_engines, mockuuid):
flow_factory = mock.MagicMock()
flow_factory.__name__ = 'testname'
job_name = 'testname-%s' % self._mock_uuid
job_details = {'store': 'test'}
with mock.patch.object(self.service_controller, '_wait_for_job'
) as wait:
uuid = self.service_controller.run_poster(flow_factory,
**job_details)
save_logbook = self.persistence_mock.__enter__().get_connection(
).save_logbook
save_logbook.assert_called()
self.assertEqual(job_name, save_logbook.call_args[0][0].name)
mock_engines.assert_called()
save_args = mock_engines.call_args
self.assertEqual(job_name, save_args[0][0].name)
self.assertEqual(self._mock_uuid, save_args[0][0].uuid)
self.assertEqual(flow_factory, save_args[0][1])
self.assertEqual(self.persistence_mock.__enter__(),
save_args[1]['backend'])
self.jobboard_mock.__enter__().post.assert_called()
post_args = self.jobboard_mock.__enter__().post.call_args
self.assertEqual(job_name, post_args[0][0])
self.assertEqual(job_details, post_args[1]['details'])
wait.assert_called()
self.assertEqual(self._mock_uuid, uuid)
def test__wait_for_job(self):
job1 =
|
mock.MagicMock()
job1.wait.side_effect = [False, True]
job2 = mock.MagicMock()
job2.wait.side_effect = [False, True]
job3 = mock.MagicMock()
job3.wait.return_value = True
job_board = mock.MagicMock()
job_board.iterjobs.side_effect = [
[job1, j
|
ob2, job3],
[job1, job2]
]
self.service_controller._wait_for_job(job_board)
job1.extend_expiry.assert_called_once()
job2.extend_expiry.assert_called_once()
job3.extend_expiry.assert_not_called()
@mock.patch('octavia.common.base_taskflow.RedisDynamicLoggingConductor')
@mock.patch('octavia.common.base_taskflow.DynamicLoggingConductor')
def test_run_conductor(self, dynamiccond, rediscond):
self.service_controller.run_conductor("test")
rediscond.assert_called_once_with(
"test", self.jobboard_mock.__enter__(),
persistence=self.persistence_mock.__enter__(),
engine='parallel',
engine_options={
'max_workers': MAX_WORKERS,
})
self.conf.config(group="task_flow",
jobboard_backend_driver='zookeeper_taskflow_driver')
self.service_controller.run_conductor("test2")
dynamiccond.assert_called_once_with(
"test2", self.jobboard_mock.__enter__(),
persistence=self.persistence_mock.__enter__(),
engine='parallel')
|
hiidef/hiispider
|
legacy/evaluateboolean.py
|
Python
|
mit
| 650
| 0.004615
|
from unicodeconverter import convertToUnicode
def evaluateBoolean(b):
if isinstance(b, bool):
return b
if isinstance(b, str):
b = convertToUnicode(b)
if isinstance(b, unicode):
if b.lower() == u"false":
return False
elif b.l
|
ower() == u"true":
return True
elif b.lower() == u"no":
return False
elif b.lower() == u"yes":
return True
else:
try:
return bool(int(b))
except:
|
return True
else:
try:
return bool(int(b))
except:
return True
|
CptSpaceToaster/memegen
|
memegen/stores/image.py
|
Python
|
mit
| 275
| 0
|
import os
|
class ImageStore:
def __init__(self, root):
self.root = root
def exists(self, image):
image.root = self.root
return os.path.isfile(image.path)
def create(self, image):
image.root = self.root
|
image.generate()
|
unioslo/cerebrum
|
Cerebrum/modules/no/hia/OrgLDIF.py
|
Python
|
gpl-2.0
| 4,715
| 0
|
# -*- coding: utf-8 -*-
#
# Copyright 2004-2020 University of Oslo, Norway
#
# This file is part of Cerebrum.
#
# Cerebrum is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Cerebrum is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Cerebrum; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
import logging
from collections import defaultdict
from six import text_type
from Cerebrum import Entity
from Cerebrum.modules.no.OrgLDIF import OrgLdifEntitlementsMixin
from Cerebrum.modules.LDIFutils import (
attr_unique,
normalize_string,
)
from Cerebrum.Utils import make_timer
logger = logging.getLogger(__name__)
class OrgLDIFHiAMixin(OrgLdifEntitlementsMixin):
"""Mixin class for norEduLDIFMixin(OrgLDIF) with HiA modifications."""
def __init__(self, *args, **kwargs):
super(OrgLDIFHiAMixin, self).__init__(*args, **kwargs)
self.attr2syntax['mobile'] = self.attr2syntax['telephoneNumber']
self.attr2syntax['roomNumber'] = (None, None, normalize_string)
def init_attr2id2contacts(self):
# Changes from the original:
# - Get phone and fax from system_manual, others from system_sap.
# - Add mobile and roomNumber.
sap, manual = self.const.system_sap, self.const.system_manual
contacts = [
(attr, self.get_contacts(contact_type=contact_type,
|
source_system=source_system,
|
convert=self.attr2syntax[attr][0],
verify=self.attr2syntax[attr][1],
normalize=self.attr2syntax[attr][2]))
for attr, source_system, contact_type in (
('telephoneNumber', manual, self.const.contact_phone),
('facsimileTelephoneNumber', manual, self.const.contact_fax),
('mobile', sap, self.const.contact_mobile_phone),
('labeledURI', None, self.const.contact_url))]
self.id2labeledURI = contacts[-1][1]
self.attr2id2contacts = [v for v in contacts if v[1]]
# roomNumber
# Some employees have registered their office addresses in SAP.
# We store this as co.contact_office. The roomNumber is the alias.
attr = 'roomNumber'
syntax = self.attr2syntax[attr]
contacts = self.get_contact_aliases(
contact_type=self.const.contact_office,
source_system=self.const.system_sap,
convert=syntax[0],
verify=syntax[1],
normalize=syntax[2])
if contacts:
self.attr2id2contacts.append((attr, contacts))
def get_contact_aliases(self, contact_type=None, source_system=None,
convert=None, verify=None, normalize=None):
"""Return a dict {entity_id: [list of contact aliases]}."""
# The code mimics a reduced modules/OrgLDIF.py:get_contacts().
entity = Entity.EntityContactInfo(self.db)
cont_tab = defaultdict(list)
if not convert:
convert = text_type
if not verify:
verify = bool
for row in entity.list_contact_info(source_system=source_system,
contact_type=contact_type):
alias = convert(text_type(row['contact_alias']))
if alias and verify(alias):
cont_tab[int(row['entity_id'])].append(alias)
return dict((key, attr_unique(values, normalize=normalize))
for key, values in cont_tab.iteritems())
def init_person_titles(self):
"""Extends the person_titles dict with employment titles available via
the PersonEmployment module."""
super(OrgLDIFHiAMixin, self).init_person_titles()
timer = make_timer(logger,
'Fetching personal employment titles...')
employments = self.person.search_employment(main_employment=True)
for emp in employments:
if emp['person_id'] not in self.person_titles:
title = [(self.const.language_nb, emp['description'])]
self.person_titles[emp['person_id']] = title
timer("...personal employment titles done.")
|
smmribeiro/intellij-community
|
plugins/hg4idea/testData/bin/mercurial/thirdparty/selectors2.py
|
Python
|
apache-2.0
| 27,478
| 0.000619
|
""" Back-ported, durable, and portable selectors """
# MIT License
#
# Copyright (c) 2017 Seth Michael Larson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation th
|
e rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
|
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import
import collections
import errno
import math
import select
import socket
import sys
import time
from .. import pycompat
namedtuple = collections.namedtuple
Mapping = collections.Mapping
try:
monotonic = time.monotonic
except AttributeError:
monotonic = time.time
__author__ = 'Seth Michael Larson'
__email__ = 'sethmichaellarson@protonmail.com'
__version__ = '2.0.0'
__license__ = 'MIT'
__url__ = 'https://www.github.com/SethMichaelLarson/selectors2'
__all__ = ['EVENT_READ',
'EVENT_WRITE',
'SelectorKey',
'DefaultSelector',
'BaseSelector']
EVENT_READ = (1 << 0)
EVENT_WRITE = (1 << 1)
_DEFAULT_SELECTOR = None
_SYSCALL_SENTINEL = object() # Sentinel in case a system call returns None.
_ERROR_TYPES = (OSError, IOError, socket.error)
SelectorKey = namedtuple('SelectorKey', ['fileobj', 'fd', 'events', 'data'])
class _SelectorMapping(Mapping):
""" Mapping of file objects to selector keys """
def __init__(self, selector):
self._selector = selector
def __len__(self):
return len(self._selector._fd_to_key)
def __getitem__(self, fileobj):
try:
fd = self._selector._fileobj_lookup(fileobj)
return self._selector._fd_to_key[fd]
except KeyError:
raise KeyError("{0!r} is not registered.".format(fileobj))
def __iter__(self):
return iter(self._selector._fd_to_key)
def _fileobj_to_fd(fileobj):
""" Return a file descriptor from a file object. If
given an integer will simply return that integer back. """
if isinstance(fileobj, int):
fd = fileobj
else:
try:
fd = int(fileobj.fileno())
except (AttributeError, TypeError, ValueError):
raise ValueError("Invalid file object: {0!r}".format(fileobj))
if fd < 0:
raise ValueError("Invalid file descriptor: {0}".format(fd))
return fd
class BaseSelector(object):
""" Abstract Selector class
A selector supports registering file objects to be monitored
for specific I/O events.
A file object is a file descriptor or any object with a
`fileno()` method. An arbitrary object can be attached to the
file object which can be used for example to store context info,
a callback, etc.
A selector can use various implementations (select(), poll(), epoll(),
and kqueue()) depending on the platform. The 'DefaultSelector' class uses
the most efficient implementation for the current platform.
"""
def __init__(self):
# Maps file descriptors to keys.
self._fd_to_key = {}
# Read-only mapping returned by get_map()
self._map = _SelectorMapping(self)
def _fileobj_lookup(self, fileobj):
""" Return a file descriptor from a file object.
This wraps _fileobj_to_fd() to do an exhaustive
search in case the object is invalid but we still
have it in our map. Used by unregister() so we can
unregister an object that was previously registered
even if it is closed. It is also used by _SelectorMapping
"""
try:
return _fileobj_to_fd(fileobj)
except ValueError:
# Search through all our mapped keys.
for key in self._fd_to_key.values():
if key.fileobj is fileobj:
return key.fd
# Raise ValueError after all.
raise
def register(self, fileobj, events, data=None):
""" Register a file object for a set of events to monitor. """
if (not events) or (events & ~(EVENT_READ | EVENT_WRITE)):
raise ValueError("Invalid events: {0!r}".format(events))
key = SelectorKey(fileobj, self._fileobj_lookup(fileobj), events, data)
if key.fd in self._fd_to_key:
raise KeyError("{0!r} (FD {1}) is already registered"
.format(fileobj, key.fd))
self._fd_to_key[key.fd] = key
return key
def unregister(self, fileobj):
""" Unregister a file object from being monitored. """
try:
key = self._fd_to_key.pop(self._fileobj_lookup(fileobj))
except KeyError:
raise KeyError("{0!r} is not registered".format(fileobj))
# Getting the fileno of a closed socket on Windows errors with EBADF.
except socket.error as err:
if err.errno != errno.EBADF:
raise
else:
for key in self._fd_to_key.values():
if key.fileobj is fileobj:
self._fd_to_key.pop(key.fd)
break
else:
raise KeyError("{0!r} is not registered".format(fileobj))
return key
def modify(self, fileobj, events, data=None):
""" Change a registered file object monitored events and data. """
# NOTE: Some subclasses optimize this operation even further.
try:
key = self._fd_to_key[self._fileobj_lookup(fileobj)]
except KeyError:
raise KeyError("{0!r} is not registered".format(fileobj))
if events != key.events:
self.unregister(fileobj)
key = self.register(fileobj, events, data)
elif data != key.data:
# Use a shortcut to update the data.
key = key._replace(data=data)
self._fd_to_key[key.fd] = key
return key
def select(self, timeout=None):
""" Perform the actual selection until some monitored file objects
are ready or the timeout expires. """
raise NotImplementedError()
def close(self):
""" Close the selector. This must be called to ensure that all
underlying resources are freed. """
self._fd_to_key.clear()
self._map = None
def get_key(self, fileobj):
""" Return the key associated with a registered file object. """
mapping = self.get_map()
if mapping is None:
raise RuntimeError("Selector is closed")
try:
return mapping[fileobj]
except KeyError:
raise KeyError("{0!r} is not registered".format(fileobj))
def get_map(self):
""" Return a mapping of file objects to selector keys """
return self._map
def _key_from_fd(self, fd):
""" Return the key associated to a given file descriptor
Return None if it is not found. """
try:
return self._fd_to_key[fd]
except KeyError:
return None
def __enter__(self):
return self
def __exit__(self, *_):
self.close()
# Almost all platforms have select.select()
if hasattr(select, "select"):
class SelectSelector(BaseSelector):
""" Select-based selector. """
def __init__(self):
super(SelectSelector, self).__init__()
self._readers = set()
self._writers = set()
def regi
|
tombs/Water-Billing-System
|
waterbilling/core/models.py
|
Python
|
agpl-3.0
| 53,833
| 0.009529
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from datetime import timedelta
from decimal import Decimal
from django.db import models
from django.db import transaction
from django.utils.timezone import now
from django.db.models import Sum, Max, F
from audit_log.models.managers import AuditLog
class CoreModel(models.Model):
created = models.DateTimeField(default=now, editable=False)
created_by = models.TextField(db_column='created_by', blank=False, editable=False)
last_updated = models.DateTimeField(default=now, db_column='last_updated', editable=False)
last_updated_by = models.TextField(db_column='last_updated_by', blank=False, editable=False)
class Meta:
abstract = True
class Config(CoreModel):
class Meta:
db_table = 'config'
verbose_name = "configuration"
verbose_name_plural = "configurations"
name = models.TextField(blank=False)
value = models.TextField(blank=False)
audit_log = AuditLog()
class AccountException(Exception):
pass
class Account(CoreModel):
"""
Account is defined by a customer and address.
Customers can have multiple accounts with different addresses.
An Account is mapped to a Meter.
Should the Meter be destroyed, close the account and create a new one with a new meter, but same customer and address
"""
customer = models.ForeignKey('Customer', db_column='customer_id')
address = models.ForeignKey('Address', db_column='address_id')
account_type = models.ForeignKey('AccountType', db_column='account_type_id')
#meter = models.ForeignKey('Meter', db_column='meter_id', unique=True)
status = models.TextField(blank=True)
remarks = models.TextField(blank=True)
class Meta:
db_table = 'account'
ordering = ['customer']
unique_together = ('customer', 'address')
def __unicode__(self):
return u'-'.join([unicode(self.customer),unicode(self.address)])
@propert
|
y
|
def bills(self):
return self.bill_set.all()
@property
def notices(self):
return self.notice_set.all()
@property
def meterreads(self):
return self.meterread_set.all()
@property
def adjustments(self):
return self.adjustment_set.all()
@property
def meters(self):
meters = [meter for meter in self.accountmeter_set.all()]
return meters
@property
def accountmeters(self):
return self.accountmeter_set.all()
@property
def notes(self):
return self.accountnote_set.all()
@property
def balance(self):
return self.financialtransaction_set.latest('id').balance
@property
def latest_balance(self):
return self.financialtransaction_set.latest('id').balance
# @property
# def latest_balance_(self):
# posted_payment = self.payment_set.filter(status="posted",
# payment_date__gte=self.latest_bill.bill_date).aggregate(Sum('amount'))['amount__sum']
# if not posted_payment:
# posted_payment = Decimal('0.0')
# if self.latest_bill:
# return self.latest_bill.amount_due - posted_payment
# return None
@property
def is_for_disconnection(self):
'''
Returns true if the account is for disconnection
'''
if self.status == 'for disconnection':
return True
return False
def for_disconnection(self):
self.status = 'for disconnection'
self.update()
def is_disconnected(self):
'''
Returns true if the account is for disconnection
'''
if self.status == 'disconnected':
return True
return False
def disconnect(self):
''' Set status of account to disconnected'''
self.status = 'disconnected'
self.update()
@property
def bill(self):
'''
Returns the bill of the current active period, None of none.
'''
period = Config.objects.get(name='active_period').value
bs = BillingSchedule.objects.get(pk=period)
bill = Bill.objects.filter(account=self, billing_schedule=bs)
if self.has_bill():
return bill.get()
return None
def has_bill(self, period=None):
'''
Determines if the account has a bill for a particular period (default is active period)
'''
has_bill = False
if period is None:
period = Config.objects.get(name='active_period').value
bs = BillingSchedule.objects.get(pk=period)
bill = Bill.objects.filter(account=self, billing_schedule=bs)
if bill.count() > 0 :
has_bill = True
return has_bill
@property
def latest_bill(self):
if self.bill_set.exists():
return self.bill_set.latest('id')
return None
@property
def latest_notice(self):
if self.notice_set.exists():
return self.notice_set.latest('id')
return None
@property
def reconnection_fees(self):
if self.latest_bill:
return self.financialtransaction_set.filter(type='reconnection_fee',
transaction_date__gte=self.latest_bill.bill_date).aggregate(Sum('amount'))['amount__sum']
return None
@property
def total_posted_payment(self):
if self.latest_bill:
return self.financialtransaction_set.filter(type='posted_payment',
transaction_date__gte=self.latest_bill.bill_date).aggregate(Sum('amount'))['amount__sum']
return None
@property
def total_adjustment(self):
if self.latest_bill:
credit = self.financialtransaction_set.filter(adjustment__type='credit', transaction_date__gte=self.latest_bill.bill_date).aggregate(Sum('amount'))['amount__sum']
debit = self.financialtransaction_set.filter(adjustment__type='debit', transaction_date__gte=self.latest_bill.bill_date).aggregate(Sum('amount'))['amount__sum']
reconnection_fee = self.financialtransaction_set.filter(adjustment__type='reconnection_fee', transaction_date__gte=self.latest_bill.bill_date).aggregate(Sum('amount'))['amount__sum']
#if credit is None and debit is None:
if credit is None and debit is None and reconnection_fee is None:
return Decimal('0.00')
#return credit - debit
return credit - debit - reconnection_fee
return Decimal('0.00')
def regenerate_bill(self,user=''):
"""
This function executes delete_current_bill and generate_bill in succession.
This regenerates an accounts current bill (or creates a new one if no bill is currently existing)
for the given active period.
"""
deleted = self.delete_current_bill(user=user)
created = False
if deleted:
print "-- original bill deleted"
bill, created = self.generate_bill()
else:
print "-- no bill to delete. generating anyway.."
bill, created = self.generate_bill()
return bill, created
def delete_current_bill(self, business_date=None, period=None, user=''):
"""
delete the bill for the current period. This feature is used for corrections in bill generation.
The related FinancialTransaction must also be deleted (currently unable to delete due to foreign key constraint).
"""
deleted = False
if period is None:
period = Config.objects.get(name='active_period').value
business_date = Config.objects.get(name='business_date').value
billing_schedule = BillingSchedule.objects.get(pk=period)
bill = self.bill
if bill:
try:
penalties = billing_schedule.penalty_set.filter(
account_id=self.id,
type='overdue')
penalty_amount = penalties.aggregate(Sum('amount'))['amount__sum']
print "penalty amount: ", penalt
|
kronenthaler/mod-pbxproj
|
pbxproj/pbxsections/XCConfigurationList.py
|
Python
|
mit
| 821
| 0.002436
|
from pbxproj import PBXGenericObject
class XCConfigurationList(PBXGenericObject):
def _get_comment(self):
info = self._get_section()
return f'Build configuration list for {info[0]} "{info[1]}"'
def _get_section(self):
objects = self.ge
|
t_parent()
target_id = self.get_id()
for obj in objects.get_objects_in_section('PBXNativeTarget', 'PBXAggregateTarget'):
if target_id in obj.buildConfigurationList:
return obj.isa, obj.name
projects = filter(la
|
mbda o: target_id in o.buildConfigurationList, objects.get_objects_in_section('PBXProject'))
project = projects.__next__()
target = objects[project.targets[0]]
name = target.name if hasattr(target, 'name') else target.productName
return project.isa, name
|
sander76/home-assistant
|
homeassistant/components/climacell/__init__.py
|
Python
|
apache-2.0
| 11,985
| 0.000834
|
"""The ClimaCell integration."""
from __future__ import annotations
from datetime import timedelta
import logging
from math import ceil
from typing import Any
from pyclimacell import ClimaCellV3, ClimaCellV4
from pyclimacell.const import CURRENT, DAILY, FORECASTS, HOURLY, NOWCAST
from pyclimacell.exceptions import (
CantConnectException,
InvalidAPIKeyException,
RateLimitedException,
UnknownException,
)
from homeassistant.components.sensor import DOMAIN as SENSOR_DOMAIN
from homeassistant.components.weather import DOMAIN as WEATHER_DOMAIN
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
CONF_API_KEY,
CONF_API_VERSION,
CONF_LATITUDE,
CONF_LONGITUDE,
CONF_NAME,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers.aiohttp_client import async_get_
|
clientsession
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.helpers.update_coordinat
|
or import (
CoordinatorEntity,
DataUpdateCoordinator,
UpdateFailed,
)
from .const import (
ATTRIBUTION,
CC_ATTR_CLOUD_COVER,
CC_ATTR_CONDITION,
CC_ATTR_HUMIDITY,
CC_ATTR_OZONE,
CC_ATTR_PRECIPITATION,
CC_ATTR_PRECIPITATION_PROBABILITY,
CC_ATTR_PRECIPITATION_TYPE,
CC_ATTR_PRESSURE,
CC_ATTR_TEMPERATURE,
CC_ATTR_TEMPERATURE_HIGH,
CC_ATTR_TEMPERATURE_LOW,
CC_ATTR_VISIBILITY,
CC_ATTR_WIND_DIRECTION,
CC_ATTR_WIND_GUST,
CC_ATTR_WIND_SPEED,
CC_SENSOR_TYPES,
CC_V3_ATTR_CLOUD_COVER,
CC_V3_ATTR_CONDITION,
CC_V3_ATTR_HUMIDITY,
CC_V3_ATTR_OZONE,
CC_V3_ATTR_PRECIPITATION,
CC_V3_ATTR_PRECIPITATION_DAILY,
CC_V3_ATTR_PRECIPITATION_PROBABILITY,
CC_V3_ATTR_PRECIPITATION_TYPE,
CC_V3_ATTR_PRESSURE,
CC_V3_ATTR_TEMPERATURE,
CC_V3_ATTR_VISIBILITY,
CC_V3_ATTR_WIND_DIRECTION,
CC_V3_ATTR_WIND_GUST,
CC_V3_ATTR_WIND_SPEED,
CC_V3_SENSOR_TYPES,
CONF_TIMESTEP,
DEFAULT_TIMESTEP,
DOMAIN,
MAX_REQUESTS_PER_DAY,
)
_LOGGER = logging.getLogger(__name__)
PLATFORMS = [SENSOR_DOMAIN, WEATHER_DOMAIN]
def _set_update_interval(hass: HomeAssistant, current_entry: ConfigEntry) -> timedelta:
"""Recalculate update_interval based on existing ClimaCell instances and update them."""
api_calls = 4 if current_entry.data[CONF_API_VERSION] == 3 else 2
# We check how many ClimaCell configured instances are using the same API key and
# calculate interval to not exceed allowed numbers of requests. Divide 90% of
# MAX_REQUESTS_PER_DAY by 4 because every update requires four API calls and we want
# a buffer in the number of API calls left at the end of the day.
other_instance_entry_ids = [
entry.entry_id
for entry in hass.config_entries.async_entries(DOMAIN)
if entry.entry_id != current_entry.entry_id
and entry.data[CONF_API_KEY] == current_entry.data[CONF_API_KEY]
]
interval = timedelta(
minutes=(
ceil(
(24 * 60 * (len(other_instance_entry_ids) + 1) * api_calls)
/ (MAX_REQUESTS_PER_DAY * 0.9)
)
)
)
for entry_id in other_instance_entry_ids:
if entry_id in hass.data[DOMAIN]:
hass.data[DOMAIN][entry_id].update_interval = interval
return interval
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up ClimaCell API from a config entry."""
hass.data.setdefault(DOMAIN, {})
params = {}
# If config entry options not set up, set them up
if not entry.options:
params["options"] = {
CONF_TIMESTEP: DEFAULT_TIMESTEP,
}
else:
# Use valid timestep if it's invalid
timestep = entry.options[CONF_TIMESTEP]
if timestep not in (1, 5, 15, 30):
if timestep <= 2:
timestep = 1
elif timestep <= 7:
timestep = 5
elif timestep <= 20:
timestep = 15
else:
timestep = 30
new_options = entry.options.copy()
new_options[CONF_TIMESTEP] = timestep
params["options"] = new_options
# Add API version if not found
if CONF_API_VERSION not in entry.data:
new_data = entry.data.copy()
new_data[CONF_API_VERSION] = 3
params["data"] = new_data
if params:
hass.config_entries.async_update_entry(entry, **params)
api_class = ClimaCellV3 if entry.data[CONF_API_VERSION] == 3 else ClimaCellV4
api = api_class(
entry.data[CONF_API_KEY],
entry.data.get(CONF_LATITUDE, hass.config.latitude),
entry.data.get(CONF_LONGITUDE, hass.config.longitude),
session=async_get_clientsession(hass),
)
coordinator = ClimaCellDataUpdateCoordinator(
hass,
entry,
api,
_set_update_interval(hass, entry),
)
await coordinator.async_config_entry_first_refresh()
hass.data[DOMAIN][entry.entry_id] = coordinator
hass.config_entries.async_setup_platforms(entry, PLATFORMS)
return True
async def async_unload_entry(hass: HomeAssistant, config_entry: ConfigEntry) -> bool:
"""Unload a config entry."""
unload_ok = await hass.config_entries.async_unload_platforms(
config_entry, PLATFORMS
)
hass.data[DOMAIN].pop(config_entry.entry_id)
if not hass.data[DOMAIN]:
hass.data.pop(DOMAIN)
return unload_ok
class ClimaCellDataUpdateCoordinator(DataUpdateCoordinator):
"""Define an object to hold ClimaCell data."""
def __init__(
self,
hass: HomeAssistant,
config_entry: ConfigEntry,
api: ClimaCellV3 | ClimaCellV4,
update_interval: timedelta,
) -> None:
"""Initialize."""
self._config_entry = config_entry
self._api_version = config_entry.data[CONF_API_VERSION]
self._api = api
self.name = config_entry.data[CONF_NAME]
self.data = {CURRENT: {}, FORECASTS: {}}
super().__init__(
hass,
_LOGGER,
name=config_entry.data[CONF_NAME],
update_interval=update_interval,
)
async def _async_update_data(self) -> dict[str, Any]:
"""Update data via library."""
data = {FORECASTS: {}}
try:
if self._api_version == 3:
data[CURRENT] = await self._api.realtime(
[
CC_V3_ATTR_TEMPERATURE,
CC_V3_ATTR_HUMIDITY,
CC_V3_ATTR_PRESSURE,
CC_V3_ATTR_WIND_SPEED,
CC_V3_ATTR_WIND_DIRECTION,
CC_V3_ATTR_CONDITION,
CC_V3_ATTR_VISIBILITY,
CC_V3_ATTR_OZONE,
CC_V3_ATTR_WIND_GUST,
CC_V3_ATTR_CLOUD_COVER,
CC_V3_ATTR_PRECIPITATION_TYPE,
*(sensor_type.key for sensor_type in CC_V3_SENSOR_TYPES),
]
)
data[FORECASTS][HOURLY] = await self._api.forecast_hourly(
[
CC_V3_ATTR_TEMPERATURE,
CC_V3_ATTR_WIND_SPEED,
CC_V3_ATTR_WIND_DIRECTION,
CC_V3_ATTR_CONDITION,
CC_V3_ATTR_PRECIPITATION,
CC_V3_ATTR_PRECIPITATION_PROBABILITY,
],
None,
timedelta(hours=24),
)
data[FORECASTS][DAILY] = await self._api.forecast_daily(
[
CC_V3_ATTR_TEMPERATURE,
CC_V3_ATTR_WIND_SPEED,
CC_V3_ATTR_WIND_DIRECTION,
CC_V3_ATTR_CONDITION,
CC_V3_ATTR_PRECIPITATION_DAILY,
CC_V3_ATTR_PRECIPITATION_PROBABILITY,
],
None,
timedelta(days=14),
)
data[FORECASTS][NOWCAST] = await self._api.forecast_nowc
|
xingyepei/edx-platform
|
common/test/acceptance/pages/studio/settings_advanced.py
|
Python
|
agpl-3.0
| 7,464
| 0.001742
|
"""
Course Advanced Settings page
"""
from bok_choy.promise import EmptyPromise
from .course_page import CoursePage
from .utils import press_the_notification_button, type_in_codemirror, get_codemirror_value
KEY_CSS = '.key h3.title'
UNDO_BUTTON_SELECTOR = ".action-item .action-undo"
MANUAL_BUTTON_SELECTOR = ".action-item .action-cancel"
MODAL_SELECTOR = ".validation-error-modal-content"
ERROR_ITEM_NAME_SELECTOR = ".error-item-title strong"
ERROR_ITEM_CONTENT_SELECTOR = ".error-item-message"
SETTINGS_NAME_SELECTOR = ".is-not-editable"
class AdvancedSettingsPage(CoursePage):
"""
Course Advanced Settings page.
"""
url_path = "settings/advanced"
def is_browser_on_page(self):
def _is_finished_loading():
return len(self.q(css='.course-advanced-policy-list-item')) > 0
EmptyPromise(_is_finished_loading, 'Finished rendering the advanced policy items.').fulfill()
return self.q(css='body.advanced').present
def wait_for_modal_load(self):
"""
Wait for validation response from the server, and make sure that
the validation error modal pops up.
This method should only be called when it is guaranteed that there're
validation errors in the settings changes.
"""
self.wait_for_ajax()
self.wait_for_element_presence(MODAL_SELECTOR, 'Validation Modal is present')
def refresh_and_wait_for_load(self):
"""
Refresh the page and wait for all resources to load.
"""
self.browser.refresh()
self.wait_for_page()
def coordinates_for_scrolling(self, coordinates_for):
"""
Get the x and y coordinates of elements
"""
cordinates_dict = self.browser.find_element_by_css_selector(coordinates_for)
location = cordinates_dict.location
for key, val in location.iteritems():
if key == 'x':
x_axis = val
elif key == 'y':
y_axis = val
return x_axis, y_axis
def undo_changes_via_modal(self):
"""
Trigger clicking event of the undo changes button in the modal.
Wait for the undoing process to load via ajax call.
Before that Scroll so the button is clickable on all browsers
"""
self.browser.execute_script("window.scrollTo" + str(self.coordinates_for_scrolling(UNDO_BUTTON_SELECTOR)))
self.q(css=UNDO_BUTTON_SELECTOR).click()
self.wait_for_ajax()
def trigger_manual_changes(self):
"""
Trigger click event of the manual changes button in the modal.
No need to wait for any ajax.
Before that Scroll so the button is clickable on all browsers
"""
self.browser.execute_script("window.scrollTo" + str(self.coordinates_for_scrolling(MANUAL_BUTTON_SELECTOR)))
self.q(css=MANUAL_BUTTON_SELECTOR).click()
def is_validation_modal_present(self):
"""
Checks if the validation modal is present.
"""
return self.q(css=MODAL_SELECTOR).present
def get_error_item_names(self):
"""
Returns a list of display names of all invalid settings.
"""
return self.q(css=ERROR_ITEM_NAME_SELECTOR).text
def get_error_item_messages(self):
"""
Returns a list of error messages of all invalid settings.
"""
return self.q(css=ERROR_ITEM_CONTENT_SELECTOR).text
def _get_index_of(self, expected_key):
for i, element in enumerate(self.q(css=KEY_CSS)):
# Sometimes get stale reference if I hold on to the array of elements
key = self.q(css=KEY_CSS).nth(i).text[0]
if key == expected_key:
return i
return -1
def save(self):
press_the_notification_button(self, "Save")
def cancel(self):
press_the_notification_button(self, "Cancel")
def set(self, key, new_value):
index = self._get_index_of(key)
type_in_codemirror(self, index, new_value)
self.save()
def get(self, key):
index = self._get_index_of(key)
return get_codemirror_value(self, index)
def set_values(self, key_value_map):
"""
Make multiple settings changes and save them.
"""
for key, value in key_value_map.iteritems():
index = self._get_index_of(key)
type_in_codemirror(self, index, value)
self.save()
def get_values(self, key_list):
"""
Get a key-value dictionary of all keys in the given list.
"""
result_map = {}
for key in key_list:
index = self._get_index_of(key)
val = get_codemirror_value(self, index)
result_map[key] = val
return result_map
@property
def displayed_settings_names(self):
"""
Returns all settings displayed on the advanced settings page/screen/modal/whatever
We call it 'name', but it's really whatever is embedded in the 'id' element for each field
"""
query = self.q(css=SETTINGS_NAME_SELECTOR)
return query.attrs('id')
@property
def expected_settings_names(self):
"""
Returns a list of settings expected to be displayed on the Advanced Settings screen
Should match the list of settings found in cms/djangoapps/models/settings/course_metadata.py
If a new setting is added to the metadata list, this test will fail and you must update it.
Basically this guards against accidental exposure of a field on the Advanced Settings screen
"""
return [
'advanced_modules',
'allow_anonymous',
'allow_anonymous_to_peers',
'allow_public_wiki_access',
'cert_html_view_overrides',
'cert_name_long',
'cert_name_short',
'certificates_display_behavior',
'course_image',
'cosmetic_display_price',
'advertised_start',
'announcement',
'display_name',
'info_sidebar_name',
'is_new',
'ispublic',
'issue_badges',
'max_student_enrollments_allowed',
|
'no_grade',
'display_coursenumber',
'display_organization',
'catalog_visibility',
'chrome',
'days_early_for_beta',
'default_tab'
|
,
'disable_progress_graph',
'discussion_blackouts',
'discussion_sort_alpha',
'discussion_topics',
'due',
'due_date_display_format',
'edxnotes',
'use_latex_compiler',
'video_speed_optimizations',
'enrollment_domain',
'html_textbooks',
'invitation_only',
'lti_passports',
'matlab_api_key',
'max_attempts',
'mobile_available',
'rerandomize',
'remote_gradebook',
'annotation_token_secret',
'showanswer',
'show_calculator',
'show_chat',
'show_reset_button',
'static_asset_path',
'text_customization',
'annotation_storage_url',
'social_sharing_url',
'video_bumper',
'cert_html_view_enabled',
'enable_proctored_exams',
'enable_timed_exams',
]
|
alex-Symbroson/BotScript
|
BotScript/res/calibrate.py
|
Python
|
mit
| 4,128
| 0.004845
|
from RaspiBot import Methods, sleep
# represents btn color and action on press in a state
class Btn:
def __init__(self, red, green, nextid = None):
self.red = red
self.green = green
self.next = nextid
# represents one menu state with message, button and own action
class State:
def __init__(self, title, param, btns, func):
self.title = title
self.param = param
self.func = func
self.btns = btns
def run(self):
# write messages
Methods.clearLCD()
Methods.writeLCD(self.title, 0, 0)
Methods.writeLCD(self.param, 0, 1)
# set button colors
for i in range(len(self.btns)):
Methods.setRedLED(i + 1, self.btns[i].red)
Methods.setGreenLED(i + 1, self.btns[i].green)
# run action
return self.func(self)
# represents whole menu
class StateMachine:
state = {}
states = 1
# returns ids to create new states
def getStates(self, num): return range(self.states, self.states + num)
# define state of specific id
def setState(self, id, *StateArgs):
self.st
|
ate[id] = State(*StateArgs)
self.states += 1
return self.states
# run machine
def run(self, id):
while id != None: id = self.state[id].run()
# navigate through menu
def select(state):
while True:
if Methods.isBtnPressed(1):
Methods.waitForBtnRelease(1)
return state.btns[0].next
if Methods.isBtnPressed(2):
Methods.waitForBtnRelease(2)
return state.btns[1
|
].next
if Methods.isBtnPressed(3):
Methods.waitForBtnRelease(3)
return state.btns[2].next
sleep(0.1)
# measure sharp values
def cal_sharps(state):
i = 0
sharp = 1
while True:
# measure left sharp
if Methods.isBtnPressed(1):
sharp = 1
Methods.writeLCD("left ", 5, 0)
Methods.waitForBtnRelease(1)
# measure right sharp
if Methods.isBtnPressed(2):
sharp = 2
Methods.writeLCD("right", 5, 0)
Methods.waitForBtnRelease(2)
# exit
if Methods.isBtnPressed(3):
Methods.waitForBtnRelease(3)
return state.btns[2].next
if i % 8 == 0:
Methods.writeLCD("%i" % Methods.getSharp(sharp, "raw"), 12, 0)
sleep(0.1)
i += 1
# measure radencoders after driven for specific time
def cal_radenc(state):
time = 1
while True:
# increase time
if Methods.isBtnPressed(1):
time = (time + 1) % 10
Methods.writeLCD("%i" % time, 5, 1)
Methods.waitForBtnRelease(1)
# start driving
if Methods.isBtnPressed(2):
# reset display
Methods.writeLCD("l: ---- r: ---- ", 0, 0)
Methods.waitForBtnRelease(2)
# drive
Methods.resetEncoders()
Methods.setMotors(50, 50)
sleep(time)
# get encoder values
Methods.stopMotors()
Methods.writeLCD("l:%5i r:%5i" % tuple(Methods.getEncoders("raw")), 0, 0)
# exit
if Methods.isBtnPressed(3):
Methods.waitForBtnRelease(3)
return state.btns[2].next
sleep(0.1)
# create new state machine
Calibrate = StateMachine()
# S:status C:calibrate
(S_EXIT, S_START, S_C_IRS, S_C_RNC
) = Calibrate.getStates(4)
# start menu
Calibrate.setState(S_START, "calibration tool", "sharp radenc x",
[Btn(0, 100, S_C_IRS), Btn(0, 100, S_C_RNC), Btn(100, 0, S_EXIT)], select
)
# calibrate sharps
Calibrate.setState(S_C_IRS, " sharp", "left right x",
[Btn(0, 100), Btn(0, 100), Btn(100, 0, S_START)], cal_sharps
)
# calibrate encoders
Calibrate.setState(S_C_RNC, "l: r: ", "time 1s start x",
[Btn(100, 100), Btn(0, 100), Btn(100, 0, S_START)], cal_radenc
)
# exit menu
Calibrate.setState(S_EXIT, "", "",
[Btn(0, 0), Btn(0, 0), Btn(0, 0)], lambda _: Methods.cleanup()
)
# run machine at start
Calibrate.run(S_START)
|
wenxiaomao1023/wenxiaomao
|
article/migrations/0002_auto_20160921_1518.py
|
Python
|
mit
| 587
| 0
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-09-21 15:18
from __f
|
uture__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('article', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='article',
name='content',
field=models.TextField(blank=True),
),
migrations.AlterField(
model_name='article',
name='de
|
sc',
field=models.TextField(blank=True),
),
]
|
nikkitricky/nikbuzz
|
score.py
|
Python
|
mit
| 5,706
| 0.006484
|
"""
@author: Nikhith !!
"""
from pycricbuzz import Cricbuzz
import json
import sys
""" Writing a CLI for Live score """
try:
cric_obj = Cricbuzz() # cric_obj contains object instance of Cricbuzz Class
matches = cric_obj.matches()
except:
print "Connection dobhindhi bey!"
sys.exit(0)
# matches func is returning List of dictionaries
""" Key items in match dict : 1) status -- ex) Starts on Jun 15 at 09:30 GMT
2) mnum -- ex) 2nd Semi-Final (A2 VS B1)
3) mchdesc-- ex) BAN vs IND
4) srs -- ex) ICC Champions Trophy, 2017
5) mchstate- ex) preview / abandon / Result / complete
6) type -- ex) ODI
7) id -- ex) 4 / 6 (anything random given)
"""
"""CLI must contain commands for
-- current matches
-- selecting match by match id
-- getCommentary
"""
def upcomingmatches():
"""Prints upcoming matches list
"""
count = 1
for match in matches:
if match['mchstate'] == "preview":
print str(count)+". "+str(match['mchdesc'])+ " - "+ str(match['srs'])+"- - "+str(match['status'])
count = count + 1
def currentlive():
"""Prints Current LIVE MATCHES"""
count = 1
for match in matches:
#print str(match['mchdesc']) + " match id: " + str(match['mchstate'])
if (match['mchstate'] == "innings break" ) :
print str(match['mchdesc'])+" match id: "+str(match['id'])
count = count + 1
if (match['mchstate'] == "inprogress" ) :
print str(match['mchdesc'])+" match id: "+str(match['id'])
count = count + 1
if match['mchstate'] == "delay":
print str(match['mchdesc'])+" -> match has been delayed due to rain..! Enjoy the drizzle..!!"
if count == 1:
print "\nNO LIVE MATCHES RIGHT NOW!\n"
print "UPCOMING MATCHES TODAY!"
upcomingmatches()
else:
id = input("Enter corresponding match id : ")
gotolive(id)
return id
def calculate_runrate(runs, overs):
balls = str(overs)
arr = balls.split('.')
if len(arr) == 2:
rr = float(int(arr[0])*6)+int(arr[1])
else:
rr = float(int(arr[0])*6)
return (float(runs)/rr)*6
def gotolive(matchid):
batobj = cric_obj.livescore(matchid)['batting']
bowlobj = cric_obj.livescore(matchid)['bowling']
print "\n "+str(batobj['team'])+" vs "+str(bowlobj['team'])+"\n"
print " "+str(cric_obj.livescore(matchid)['matchinfo']['status'])+"\n"
if (bowlobj['score'] == []):
print "1st INNINGS: "+str(batobj['team'])+" => "+str(batobj['score'][0]['runs'])+"/"+str(batobj['score'][0]['wickets'])+" ("+str(batobj['score'][0]['overs'])+" Overs)"
|
print "Batting:"
try:
print " " + str(batobj['batsman'][0]['name']) + " : " + str(batobj['batsman'][0]['runs']) + " (" + str(batobj['batsman'][0]['balls']) + ")"
print " " + str(batobj['batsman'][1]['name']) + " : " +
|
str(batobj['batsman'][1]['runs']) + " (" + str(batobj['batsman'][1]['balls']) + ")"
except:
print "Wicket!!!!"
print "Bowling:"
print " " + str(bowlobj['bowler'][0]['name']) + " : " + str(bowlobj['bowler'][0]['runs']) + " /" + str(bowlobj['bowler'][0]['wickets']) + " (" + str(bowlobj['bowler'][0]['overs']) + ")"
print " " + str(bowlobj['bowler'][1]['name']) + " : " + str(bowlobj['bowler'][1]['runs']) + " /" + str(bowlobj['bowler'][1]['wickets']) + " (" + str(bowlobj['bowler'][1]['overs']) + ")"
print "Runrate:"
print ' {:1.2f}'.format(calculate_runrate(str(batobj['score'][0]['runs']),str(batobj['score'][0]['overs'])))
else:
print "1st INNINGS: "+str(bowlobj['team'])+" => "+str(bowlobj['score'][0]['runs'])+"/"+str(bowlobj['score'][0]['wickets'])+" ("+str(bowlobj['score'][0]['overs'])+" Overs)"
print "2nd INNINGS: "+str(batobj['team'])+" => "+str(batobj['score'][0]['runs'])+"/"+str(batobj['score'][0]['wickets'])+" ("+str(batobj['score'][0]['overs'])+" Overs)"
print "Batting:"
try:
print " "+str(batobj['batsman'][0]['name'])+" : "+str(batobj['batsman'][0]['runs'])+" ("+str(batobj['batsman'][0]['balls'])+")"
print " " + str(batobj['batsman'][1]['name']) + " : " + str(batobj['batsman'][1]['runs']) + " (" + str(batobj['batsman'][1]['balls']) + ")"
except:
print "Wicket!!"
print "Bowling:"
print " " + str(bowlobj['bowler'][0]['name']) + " : " + str(bowlobj['bowler'][0]['runs'])+" /"+str(bowlobj['bowler'][0]['wickets']) + " (" + str(bowlobj['bowler'][0]['overs']) + ")"
print " " + str(bowlobj['bowler'][1]['name']) + " : " + str(bowlobj['bowler'][1]['runs']) + " /" + str(bowlobj['bowler'][1]['wickets']) + " (" + str(bowlobj['bowler'][1]['overs']) + ")"
print "Summary:"
print " " + str(cric_obj.livescore(matchid)['matchinfo']['status'])
def last12Balls():
pass
def commentary(matchid):
print "\nCommentary: "
try:
for i in range(6):
print " "+str(cric_obj.commentary(matchid)['commentary'][i])
print "************************************************************************************************"
except:
print "No running commentary.. now..!!"
if __name__ == '__main__':
matchid=currentlive()
commentary(matchid)
|
cliffton/localsecrets
|
offers/views.py
|
Python
|
mit
| 3,993
| 0.001002
|
# Create your views here.
from rest_framework import viewsets
from offers.models import Offer, OfferHistory, OfferReview
from offers.serializers import (
OfferSerializer,
OfferHistorySerializer,
OfferReviewSerializer
)
from shops.serializers import ShopSerializer
from rest_framework import status
from rest_framework import viewsets
from rest_framework.response import Response
from math import radians, cos, sin, asin, sqrt
from accounts.models import User
def haversine(lon1, lat1, lon2, lat2):
"""
Calculate the great circle distance between two points
on the earth (specified in decimal degrees)
"""
# convert decimal degrees to radians
lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])
# haversine formula
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2
c = 2 * asin(sqrt(a))
km = 6367 * c
return km
class OfferViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows users to be viewed or edited.
"""
queryset = Offer.objects.all()
serializer_class = OfferSerializer
def get_queryset(self):
queryset = Offer.objects.all()
email = self.request.query_params.get('email', None)
# user = User.objects.get(email=email)
if email:
queryset = queryset.filter(shop__user__email=email)
return queryset
class OfferHistoryViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows users to be viewed or edited.
"""
queryset = OfferHistory.objects.all()
serializer_class = OfferHistorySerializer
class OfferReviewViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows users to be viewed or edited.
"""
queryset = OfferReview.objects.all()
serializer_class = OfferReviewSerializer
def get_queryset(self):
queryset = OfferReview.objects.all()
offer_id = self.request.query_params.get('offer', None)
if offer_id:
queryset = queryset.filter(offer__id=offer_id)
return queryset
class NearestOffer(viewsets.ViewSet):
queryset = Offer.objects.all()
# serializer_class
|
= OfferSerializer
def list(s
|
elf, request):
params = request.query_params
offers = []
if params:
ulat = float(params['lat'])
ulon = float(params['lon'])
for offer in Offer.objects.select_related('shop').all():
olat = float(offer.shop.latitude)
olon = float(offer.shop.longitude)
distance = haversine(ulat, ulon, olat, olon)
offer_data = OfferSerializer(offer).data
offer_data['distace'] = float(distance)
offer_data['shop'] = ShopSerializer(offer.shop).data
offers.append(offer_data)
return Response(
offers,
status=status.HTTP_200_OK)
class OfferData(viewsets.ViewSet):
queryset = Offer.objects.all()
# serializer_class = OfferSerializer
def retrieve(self, request, pk=None):
if pk:
offer = Offer.objects.get(pk=pk)
total = offer.offer_reviews.all().count()
if not total == 0:
positive = offer.offer_reviews.filter(sentiment="positive").count()
negative = offer.offer_reviews.filter(sentiment="negative").count()
neutral = offer.offer_reviews.filter(sentiment="neutral").count()
response = {
"positive": (float(positive) / total) * 100,
"negative": (float(negative) / total) * 100,
"neutral": (float(neutral) / total) * 100,
}
else:
response = {
"positive": float(0),
"negative": float(0),
"neutral": float(0),
}
return Response(
response,
status=status.HTTP_200_OK)
|
skomendera/PyMyTools
|
providers/value.py
|
Python
|
mit
| 359
| 0
|
def represe
|
nts_int(value):
try:
int(value)
return True
except ValueError:
return False
def bytes_to_gib(byte_value, round_digits=2):
return round(byte_value / 1024 / 1024 / float(1024), round_digits)
def count_to_millions(count_value, round_digits=3):
return round(count_value / float(1000000), round_dig
|
its)
|
betterlife/psi
|
psi/app/models/__init__.py
|
Python
|
mit
| 875
| 0.001143
|
from .image import Image
from .product_category import ProductCategory
from .supplier import Supplier, PaymentMethod
from .product import Product
from .product import ProductImage
from .enum_values import EnumValues
from .re
|
lated_values import RelatedValues
from .customer import Customer
from .expense import Expense
from .incoming import Incoming
from
|
.shipping import Shipping, ShippingLine
from .receiving import Receiving, ReceivingLine
from .inventory_transaction import InventoryTransaction, InventoryTransactionLine
from .purchase_order import PurchaseOrder, PurchaseOrderLine
from .sales_order import SalesOrder, SalesOrderLine
from .user import User
from .role import Role, roles_users
from .organization import Organization
from .inventory_in_out_link import InventoryInOutLink
from .aspects import update_menemonic
from .product_inventory import ProductInventory
|
deepmind/dm_control
|
dm_control/locomotion/arenas/bowl.py
|
Python
|
apache-2.0
| 4,927
| 0.008119
|
# Copyright 2020 The dm_control Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Bowl arena with bumps."""
from dm_control import composer
from dm_control.locomotion.arenas import assets as locomotion_arenas_assets
from dm_control.mujoco.wrapper import mjbindings
import numpy as np
from scipy import ndimage
mjlib = mjbindings.mjlib
_TOP_CAMERA_DISTANCE = 100
_TOP_CAMERA_Y_PADDING_FACTOR = 1.1
# Constants related to terrain generation.
_TERRAIN_SMOOTHNESS = .5 # 0.0: maximally bumpy; 1.0: completely smooth.
_TERRAIN_BUMP_SCALE = .2 # Spatial scale of terrain bumps (in meters).
class Bowl(composer.Arena):
"""A bowl arena with sinusoidal bumps."""
def _build(self, size=(10, 10), aesthetic='default', name='bowl'):
super()._build(name=name)
self._hfield = self._mjcf_root.asset.add(
'hfield',
name='terrain',
nrow=201,
ncol=201,
size=(6, 6, 0.5, 0.1))
if aesthetic != 'default':
ground_info = locomotion_arenas_assets.get_ground_texture_info(aesthetic)
sky_info = locomotion_arenas_assets.get_sky_texture_info(aesthetic)
texturedir = locomotion_arenas_assets.get_texturedir(aesthetic)
self._mjcf_root.compiler.texturedir = texturedir
self._texture = self._mjcf_root.asset.add(
'texture', name='aesthetic_texture', file=ground_info.file,
type=ground_info.type)
self._material = self._mjcf_root.asset.add(
'material', name='aesthetic_material', texture=self._texture,
texuniform='true')
self._skybox = self._mjcf_root.asset.add(
'texture', name='aesthetic_skybox', file=sky_info.file,
type='skybox', gridsize=sky_info.gridsize,
gridlayout=sky_info.gridlayout)
self._terrain_geom = self._mjcf_root.worldbody.add(
'geom',
name='terrain',
type='hfield',
pos=(0, 0, -0.01),
hfield='terrain',
material=self._material)
self._ground_geom = self._mjcf_root.worldbody.add(
'geom',
type='plane',
name='groundplane',
size=list(size) + [0.5],
material=self._material)
else:
self._terrain_geom = self._mjcf_root.worldbody.add(
'geom',
name='terrain',
type='hfield',
rgba=(0.2, 0.3, 0.4, 1),
pos=(0, 0, -0.01),
hfield='terrain')
self._ground_geom = self._mjcf_root.worldbody.add(
'geom',
type='plane',
name='groundplane',
rgba=(0.2, 0.3, 0.4, 1),
size=list(size) + [0.5])
self._mjcf_root.visual.headlight.set_attributes(
ambient=[.4, .4, .4], diffuse=[.8, .8, .8], specular=[.1, .1, .1])
self._regenerate = True
def regenerate(self, random_state):
# regeneration of the bowl requires physics, so postponed to initialization.
self._regenerate = True
def initialize_episode(self, physics, random_state):
if self._regenerate:
self._regenerate = False
# Get heightfield resolution, assert that it is square.
res = physics.bind(self._hfield).nrow
assert res == physics.bind(self._hfield).ncol
# Sinusoidal bowl shape.
row_grid, col_grid = np.ogrid[-1:1:res*1j, -1:1:res*1j]
radius = np.clip(np.sqrt(col_grid**2 + row_grid**2), .1, 1)
bowl_shape = .5 - np.cos(2*np.pi*radius)/2
# Random smooth bumps.
terrain_size = 2 * physics.bind(self._hfield).size[0]
bump_res = int(terrain_size / _TERRAIN_BUMP_SCALE)
bumps = random_state.uniform(_TERRAIN_SMOOTHNESS, 1, (bump_res, bump_res))
smooth_bumps = ndimage.zoom(bumps, res / float(bump_res))
# Terrain is elementwise product.
terrain = bowl_shape * smooth_bumps
start_idx = physics.bind(self._
|
hfield).adr
physics.model.hfield_data[start_idx:start_idx+res**2] = terrain.ravel()
# If we have a rendering context, we need to re-upload the modified
# heightfield data.
if physics.contexts:
with physics.contexts.gl.make_current() as ctx:
|
ctx.call(mjlib.mjr_uploadHField,
physics.model.ptr,
physics.contexts.mujoco.ptr,
physics.bind(self._hfield).element_id)
@property
def ground_geoms(self):
return (self._terrain_geom, self._ground_geom)
|
Danielhiversen/home-assistant
|
tests/components/template/test_binary_sensor.py
|
Python
|
apache-2.0
| 27,103
| 0.001107
|
"""The tests for the Template Binary sensor platform."""
from datetime import timedelta
import logging
from unittest.mock import patch
import pytest
from homeassistant import setup
from homeassistant.components import binary_sensor
from homeassistant.const import (
ATTR_DEVICE_CLASS,
EVENT_HOMEASSISTANT_START,
STATE_OFF,
STATE_ON,
STATE_UNAVAILABLE,
)
from homeassistant.core import Context, CoreState
from homeassistant.helpers import entity_registry
import homeassistant.util.dt as dt_util
from tests.common import async_fire_time_changed
ON = "on"
OFF = "off"
@pytest.mark.parametrize("count,domain", [(1, binary_sensor.DOMAIN)])
@pytest.mark.parametrize(
"config",
[
{
"binary_sensor": {
"platform": "template",
"sensors": {
"test": {
"friendly_name": "virtual thingy",
"value_template": "{{ True }}",
"device_class": "motion",
}
},
},
},
],
)
async def test_setup_legacy(hass, start_ha):
"""Test the setup."""
state = hass.states.get("binary_sensor.test")
assert state is not None
assert state.name == "virtual thingy"
assert state.state == ON
assert state.attributes["device_class"] == "motion"
@pytest.mark.parametrize("count,domain", [(0, binary_sensor.DOMAIN)])
@pytest.mark.parametrize(
"config",
[
{"binary_sensor": {"platform": "template"}},
{"binary_sensor": {"platform": "template", "sensors": {"foo bar": {}}}},
{
"binary_sensor": {
"platform": "template",
"sensors": {
"test": {
"value_template": "{{ foo }}",
"device_class": "foobarnotreal",
}
},
}
},
{
"binary_sensor": {
"platform": "template",
"sensors": {"test": {"device_class": "motion"}},
}
},
],
)
async def test_setup_invalid_sensors(hass, count, start_ha):
"""Test setup with no sensors."""
assert len(hass.states.async_entity_ids()) == count
@pytest.mark.parametrize("count,domain", [(1, binary_sensor.DOMAIN)])
@pytest.mark.parametrize(
"config",
[
{
"binary_sensor": {
"platform": "template",
"sensors": {
"test_template_sensor": {
"value_template": "{{ states.sensor.xyz.state }}",
"icon_template": "{% if "
"states.binary_sensor.test_state.state == "
"'Works' %}"
"mdi:check"
"{% endif %}",
},
},
},
},
],
)
async def test_icon_template(hass, start_ha):
"""Test icon template."""
state = hass.states.get("binary_sensor.test_template_sensor")
assert state.attributes.get("icon") == ""
hass.states.async_set("binary_sensor.test_state", "Works")
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test_template_sensor")
assert state.attributes["icon"] == "mdi:check"
@pytest.mark.parametrize("count,domain", [(1, binary_sensor.DOMAIN)])
@pytest.mark.parametrize(
"config",
[
{
"bina
|
ry_sensor": {
"platform": "template",
"sensors": {
"test_template_sensor": {
"value_template": "{{ states.sensor.xyz.state }}",
"entity_picture_template": "{% if "
"states.binary_sensor.test_state.state == "
"'Works' %}"
"/local/sensor.png"
"{% endif %}",
},
},
}
|
,
},
],
)
async def test_entity_picture_template(hass, start_ha):
"""Test entity_picture template."""
state = hass.states.get("binary_sensor.test_template_sensor")
assert state.attributes.get("entity_picture") == ""
hass.states.async_set("binary_sensor.test_state", "Works")
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test_template_sensor")
assert state.attributes["entity_picture"] == "/local/sensor.png"
@pytest.mark.parametrize("count,domain", [(1, binary_sensor.DOMAIN)])
@pytest.mark.parametrize(
"config",
[
{
"binary_sensor": {
"platform": "template",
"sensors": {
"test_template_sensor": {
"value_template": "{{ states.sensor.xyz.state }}",
"attribute_templates": {
"test_attribute": "It {{ states.sensor.test_state.state }}."
},
},
},
},
},
],
)
async def test_attribute_templates(hass, start_ha):
"""Test attribute_templates template."""
state = hass.states.get("binary_sensor.test_template_sensor")
assert state.attributes.get("test_attribute") == "It ."
hass.states.async_set("sensor.test_state", "Works2")
await hass.async_block_till_done()
hass.states.async_set("sensor.test_state", "Works")
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test_template_sensor")
assert state.attributes["test_attribute"] == "It Works."
@pytest.fixture
async def setup_mock():
"""Do setup of sensor mock."""
with patch(
"homeassistant.components.template.binary_sensor."
"BinarySensorTemplate._update_state"
) as _update_state:
yield _update_state
@pytest.mark.parametrize("count,domain", [(1, binary_sensor.DOMAIN)])
@pytest.mark.parametrize(
"config",
[
{
"binary_sensor": {
"platform": "template",
"sensors": {
"match_all_template_sensor": {
"value_template": (
"{% for state in states %}"
"{% if state.entity_id == 'sensor.humidity' %}"
"{{ state.entity_id }}={{ state.state }}"
"{% endif %}"
"{% endfor %}"
),
},
},
}
},
],
)
async def test_match_all(hass, setup_mock, start_ha):
"""Test template that is rerendered on any state lifecycle."""
init_calls = len(setup_mock.mock_calls)
hass.states.async_set("sensor.any_state", "update")
await hass.async_block_till_done()
assert len(setup_mock.mock_calls) == init_calls
@pytest.mark.parametrize("count,domain", [(1, binary_sensor.DOMAIN)])
@pytest.mark.parametrize(
"config",
[
{
"binary_sensor": {
"platform": "template",
"sensors": {
"test": {
"friendly_name": "virtual thingy",
"value_template": "{{ states.sensor.test_state.state == 'on' }}",
"device_class": "motion",
},
},
},
},
],
)
async def test_event(hass, start_ha):
"""Test the event."""
state = hass.states.get("binary_sensor.test")
assert state.state == OFF
hass.states.async_set("sensor.test_state", ON)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test")
assert state.state == ON
@pytest.mark.parametrize("count,domain", [(1, binary_sensor.DOMAIN)])
@pytest.mark.parametrize(
"config",
[
{
"binary_sensor": {
"platform": "template",
"sensors": {
"test_on": {
"friendly_name": "virtual thingy",
"value_template": "{{ states.sensor.test_state.state == 'on' }}",
"device_class": "motion",
|
rokuz/omim
|
kml/pykmlib/bindings_test.py
|
Python
|
apache-2.0
| 4,094
| 0.004272
|
import unittest
import datetime
import pykmlib
class PyKmlibAdsTest(unittest.TestCase):
def test_smoke(self):
classificator_file_str = ''
with open('./data/classificator.txt', 'r') as classificator_file:
classificator_file_str = classificator_file.read()
types_file_str = ''
with open('./data/types.txt', 'r') as types_file:
types_file_str = types_file.read()
pykmlib.load_classificator_types(classificator_file_str, types_file_str)
category = pykmlib.CategoryData()
category.name['default'] = 'Test category'
category.name['ru'] = 'Тестовая категория'
category.description['default'] = 'Test description'
category.description['ru'] = 'Тестовое описание'
category.annotation['default'] = 'Test annotation'
category.annotation['en'] = 'Test annotation'
category.image_url = 'https://localhost/123.png'
category.visible = True
category.author_name = 'Maps.Me'
category.author_id = '12345'
category.rating = 8.9
category.reviews_number = 567
category.last_modified = int(datetime.datetime.now().timestamp())
category.access_rules = pykmlib.AccessRules.PUBLIC
category.tags.set_list(['mountains', 'ski', 'snowboard'])
category.toponyms.set_list(['12345', '54321'])
category.languages.set_list(['en', 'ru', 'de'])
category.properties.set_dict({'property1':'value1', 'property2':'value2'})
bookmark = pykmlib.BookmarkData()
bookmark.name['default'] = 'Test bookmark'
bookmark.name['ru'] = 'Тестовая метка'
bookmark.description['default'] = 'Test bookmark description'
bookmark.description['ru'] = 'Тестовое описание метки'
bookmark.feature_types.set_list([
pykmlib.classificator_type_to_index('historic-castle'),
pykmlib.classificator_type_to_index('historic-memorial')])
bookmark.custom_name['default'] = 'Мое любимое место'
bookmark.custom_name['en'] = 'My favorite place'
bookmark.color.predefined_color = pykmlib.PredefinedColor.BLUE
bookmark.color.rgba = 0
bookmark.icon = pykmlib.BookmarkIcon.HOTEL
bookmark.viewport_scale = 15
bookmark.timestamp = int(datetime.datetime.now().timestamp())
bookmark.point = pykmlib.LatLon(45.9242, 56.8679)
bookmark.visible = True
bookmark.nearest_toponym = '12345'
bookmark.properties.set_dict({'bm_property1':'value1', 'bm_property2':'value2'})
bookmark.bound_tracks.set_list([0])
layer1 = pykmlib.TrackLayer()
layer1.line_width = 6.0
layer1.color.rgba = 0xff0000ff
layer2 = pykmlib.TrackLayer()
layer2.line_width = 7.0
layer2.color.rgba = 0x00ff00ff
track = pykmlib.TrackData()
track.local_id = 1
track.name['default'] = 'Test track'
track.name['ru'] = 'Тестовый трек'
track.description['default'] = 'Test track description'
track.description['ru'] = 'Тестовое описание трека'
track.timestamp = int(datetime.datetime.now().timestamp())
track.layers.set_list([layer1, layer2])
track.points.set_list([
pykmlib.LatLon(45.9242, 56.8679),
pykmlib.LatLon(45.2244, 56.2786),
pykmlib.LatLon(45.1964, 56.9832)])
track.visible = True
track.nearest_toponyms.set_list(
|
['12345', '54321', '98765'])
track.properties.set_dict({'tr_property1':'value1', 'tr_property2':'value2'})
file_data = pykmlib.FileData()
f
|
ile_data.server_id = 'AAAA-BBBB-CCCC-DDDD'
file_data.category = category
file_data.bookmarks.append(bookmark)
file_data.tracks.append(track)
s = pykmlib.export_kml(file_data)
imported_file_data = pykmlib.import_kml(s)
self.assertEqual(file_data, imported_file_data)
if __name__ == "__main__":
unittest.main()
|
vipmunot/HackerRank
|
Algorithms/Viral Advertising.py
|
Python
|
mit
| 81
| 0.012346
|
m = [2]
for i in range(int(input())-1):
m.ap
|
pend(in
|
t(3*m[i]/2))
print(sum(m))
|
lizardsystem/lizard-fewsapi
|
lizard_fewsapi/collect.py
|
Python
|
gpl-3.0
| 1,482
| 0
|
# (c) Nelen & Schuurmans. GPL licensed, see LICENSE.rst.
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import print_function
import logging
import requests
logger = logging.getLogger(__name__)
def collect_filters(url):
"""Return filters from FEWS, cleaned and ready for storing as json."""
from_fews = _download(url)
result = []
for filter_dict in from_fews:
result.append(_process_filter_dict(filter_dict))
return result
def collect_parameters(url):
from_fews = _download(url)
# TODO
return from_fews
def collect_locations(url):
from_fews = _download(
|
url)
# TODO
return from_fews
def _download(url):
r = requests.get(url)
r.raise_for_status() # Only raises an error when not succesful.
return r.json()
def _process_filter_dict(filter_dict):
# {'filter': {name, childfilters, etc}
content = filter_dict['filter']
name = content['name']
description = conten
|
t['description']
if name == description:
# Description is only interesting if it is different from the name.
# Often it is the same, so we've got to filter it out.
description = ''
children = [_process_filter_dict(child_filter_dict)
for child_filter_dict in content.get('childFilters', [])]
result = {'id': content['id'],
'name': name,
'description': description,
'children': children}
return result
|
epmatsw/FootballBot
|
fxns/8ball.py
|
Python
|
cc0-1.0
| 986
| 0.037525
|
if dest.lower()=='footballbot': dest=origin
par=' '.join(params).lower()
if len(par) < 10 and par.count('is') == 0 and par.count('?') == 0 and par.count('will') == 0 and par.count('should') == 0 and par.count('could') == 0 and par.count('do') == 0 and par.count('has') == 0 and par.count('does') == 0 and par.count('when') == 0 and par.count('why') == 0 and par.count('
|
who') == 0: db['msgqueue'].append([origin+': That\'s not a question!',dest])
else:
if par.count(' or ') == 1:
opt1=par[par.find(' or ')+4:].strip()
if opt1.count(' ') != 0: opt1=opt1[:opt1.find(' ')].strip()
|
opt2=par[::-1]
opt2=opt2[opt2.find(' ro ')+4:].strip()
if opt2.count(' ') != 0: opt2=opt2[:opt2.find(' ')].strip()
opt1=opt1.replace('?','')
opt2=opt2.replace('?','')
opt2=opt2[::-1]
db['msgqueue'].append([origin+': '+random.choice(db['language']['verbs'])+'ing '+random.choice([opt1,opt2]),dest])
else: db['msgqueue'].append([origin+': '+random.choice(db['language']['eightball']),dest])
|
junmin-zhu/chromium-rivertrail
|
tools/chrome_remote_control/chrome_remote_control/page_runner.py
|
Python
|
bsd-3-clause
| 6,385
| 0.01112
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
import time
import traceback
import urlparse
import random
import csv
from chrome_remote_control import page_test
from chrome_remote_control import util
from chrome_remote_control import wpr_modes
class PageState(object):
def __init__(self):
self.did_login = False
class PageRunner(object):
"""Runs a given test against a given test."""
def __init__(self, page_set):
self.page_set = page_set
def __enter__(self):
return self
def __exit__(self, *args):
self.Close()
def _ReorderPageSet(self, test_shuffle_order_file):
page_set_dict = {}
for page in self.page_set:
page_set_dict[page.url] = page
self.page_set.pages = []
with open(test_shuffle_order_file, 'rb') as csv_file:
csv_reader = csv.reader(csv_file)
csv_header = csv_reader.next()
if 'url' not in csv_header:
raise Exception('Unusable test_shuffle_order_file.')
url_index = csv_header.index('url')
for csv_row in csv_reader:
if csv_row[url_index] in page_set_dict:
self.page_set.pages.append(page_set_dict[csv_row[url_index]])
else:
raise Exception('Unusable tes
|
t_shuffle_order_file.')
def Run(self, options, possible_browser, test, results):
archive_path = os.path.abspath(os.path.join(self.page_set.base_dir,
self.page_set.archive_path))
if options.wpr_mode == wpr_modes.WPR_OFF:
if os.path.isfile(archive_path):
possible_browser.options.wpr_mode = wpr_modes.WPR_REPLAY
els
|
e:
possible_browser.options.wpr_mode = wpr_modes.WPR_OFF
logging.warning("""
The page set archive %s does not exist, benchmarking against live sites!
Results won't be repeatable or comparable.
To fix this, either add svn-internal to your .gclient using
http://goto/read-src-internal, or create a new archive using --record.
""", os.path.relpath(archive_path))
credentials_path = None
if self.page_set.credentials_path:
credentials_path = os.path.join(self.page_set.base_dir,
self.page_set.credentials_path)
if not os.path.exists(credentials_path):
credentials_path = None
with possible_browser.Create() as b:
b.credentials.credentials_path = credentials_path
test.SetUpBrowser(b)
b.credentials.WarnIfMissingCredentials(self.page_set)
if not options.test_shuffle and options.test_shuffle_order_file is not\
None:
raise Exception('--test-shuffle-order-file requires --test-shuffle.')
# Set up a random generator for shuffling the page running order.
test_random = random.Random()
b.SetReplayArchivePath(archive_path)
with b.ConnectToNthTab(0) as tab:
if options.test_shuffle_order_file is None:
for _ in range(int(options.pageset_repeat)):
if options.test_shuffle:
test_random.shuffle(self.page_set)
for page in self.page_set:
for _ in range(int(options.page_repeat)):
self._RunPage(options, page, tab, test, results)
else:
self._ReorderPageSet(options.test_shuffle_order_file)
for page in self.page_set:
self._RunPage(options, page, tab, test, results)
def _RunPage(self, options, page, tab, test, results):
logging.info('Running %s' % page.url)
page_state = PageState()
try:
did_prepare = self.PreparePage(page, tab, page_state, results)
except Exception, ex:
logging.error('Unexpected failure while running %s: %s',
page.url, traceback.format_exc())
self.CleanUpPage(page, tab, page_state)
raise
if not did_prepare:
self.CleanUpPage(page, tab, page_state)
return
try:
test.Run(options, page, tab, results)
except page_test.Failure, ex:
logging.info('%s: %s', ex, page.url)
results.AddFailure(page, ex, traceback.format_exc())
return
except util.TimeoutException, ex:
logging.warning('Timed out while running %s', page.url)
results.AddFailure(page, ex, traceback.format_exc())
return
except Exception, ex:
logging.error('Unexpected failure while running %s: %s',
page.url, traceback.format_exc())
raise
finally:
self.CleanUpPage(page, tab, page_state)
def Close(self):
pass
@staticmethod
def WaitForPageToLoad(expression, tab):
def IsPageLoaded():
return tab.runtime.Evaluate(expression)
# Wait until the form is submitted and the page completes loading.
util.WaitFor(lambda: IsPageLoaded(), 60) # pylint: disable=W0108
def PreparePage(self, page, tab, page_state, results):
parsed_url = urlparse.urlparse(page.url)
if parsed_url[0] == 'file':
path = os.path.join(self.page_set.base_dir,
parsed_url.netloc) # pylint: disable=E1101
dirname, filename = os.path.split(path)
tab.browser.SetHTTPServerDirectory(dirname)
target_side_url = tab.browser.http_server.UrlOf(filename)
else:
target_side_url = page.url
if page.credentials:
page_state.did_login = tab.browser.credentials.LoginNeeded(
tab, page.credentials)
if not page_state.did_login:
msg = 'Could not login to %s on %s' % (page.credentials,
target_side_url)
logging.info(msg)
results.AddFailure(page, msg, "")
return False
tab.page.Navigate(target_side_url)
# Wait for unpredictable redirects.
if page.wait_time_after_navigate:
time.sleep(page.wait_time_after_navigate)
if page.wait_for_javascript_expression is not None:
self.WaitForPageToLoad(page.wait_for_javascript_expression, tab)
tab.WaitForDocumentReadyStateToBeInteractiveOrBetter()
return True
def CleanUpPage(self, page, tab, page_state): # pylint: disable=R0201
if page.credentials and page_state.did_login:
tab.browser.credentials.LoginNoLongerNeeded(tab, page.credentials)
tab.runtime.Evaluate("""chrome && chrome.benchmarking &&
chrome.benchmarking.closeConnections()""")
|
start-jsk/jsk_apc
|
demos/instance_occlsegm/tests/image_tests/test_resize.py
|
Python
|
bsd-3-clause
| 1,334
| 0
|
import cv2
import skimage.data
import instance_occlsegm_lib
def test_resize():
for interpolation in [cv2.INTER_NEAREST, cv2.INTER_LINEAR]:
_test_resize(interpolation)
def _test_resize(interpolation):
img = skimage.data.astronaut()
H_dst, W_dst = 480, 640
ret = instance_occlsegm_lib.image.resize(i
|
mg, height=H_dst, width=W_dst,
interpolation=interpolation)
assert ret.dtype == img.dtype
assert r
|
et.shape == (H_dst, W_dst, 3)
ret = instance_occlsegm_lib.image.resize(
img, height=H_dst, interpolation=interpolation)
hw_ratio = 1. * img.shape[1] / img.shape[0]
W_expected = int(round(1 / hw_ratio * H_dst))
assert ret.dtype == img.dtype
assert ret.shape == (H_dst, W_expected, 3)
scale = 0.3
ret = instance_occlsegm_lib.image.resize(img, fy=scale, fx=scale,
interpolation=interpolation)
assert ret.dtype == img.dtype
H_expected = int(round(img.shape[0] * 0.3))
W_expected = int(round(img.shape[1] * 0.3))
assert ret.shape == (H_expected, W_expected, 3)
scale = 0.3
ret = instance_occlsegm_lib.image.resize(
img, fy=scale, interpolation=interpolation)
assert ret.dtype == img.dtype
assert ret.shape == (H_expected, W_expected, 3)
|
denz/swarm-crawler
|
swarm_crawler/text.py
|
Python
|
bsd-3-clause
| 2,172
| 0.005525
|
BREADABILITY_AVAILABLE = True
try:
from breadability.readable import Article, prep_article, check_siblings
except ImportError:
BREADABILITY_AVAILABLE = False
Article = object
from operator import attrgetter
from werkzeug.utils import cached_property
import re
from lxml.etree import tounicode, tostring
class PageText(Article):
WHITESPACE = {' ':re.compile(r"[\s\r\n]+"),
'':re.compile(r"\.{3,}"),}
CANDIDATE_SEPARATOR = u'\r\n'
def __init__(self, *args, **kwargs):
if not BREADABILITY_AVAILABLE:
rais
|
e ImportError('breadability is not available')
super(PageText, self).__init__(*args, **kwargs)
def __unicode__(self):
return self.winner()
def stripped(self, text):
for replacement, whitespace in self.WHITESPACE.items():
|
text = re.sub(whitespace, replacement, text)
return text
def slice(self, before=1, reverse=True):
if self.candidates:
# cleanup by removing the should_drop we spotted.
[n.drop_tree() for n in self._should_drop
if n.getparent() is not None]
# right now we return the highest scoring candidate content
by_score = sorted([c for c in self.candidates.values()],
key=attrgetter('content_score'), reverse=reverse)
# since we have several candidates, check the winner's siblings
# for extra content
for winner in by_score[:before]:
winner = check_siblings(winner, self.candidates)
# updated_winner.node = prep_article(updated_winner.node)
if winner.node is not None:
yield winner.node
def winner(self, greed=1):
if not self.candidates:
return u''
if isinstance(greed, float):
if 0 > greed > 1.0:
raise ValueError('greed coeft should be integer or 0<x<1.0')
greed = int(round(len(self.candidates)*greed))
return self.CANDIDATE_SEPARATOR.join((self.stripped(tounicode(node,
method='text')) for node in self.slice(before=greed)))
|
gorgias/apispec
|
tests/test_core.py
|
Python
|
mit
| 13,849
| 0.000939
|
# -*- coding: utf-8 -*-
import pytest
import mock
from apispec import APISpec, Path
from apispec.exceptions import PluginError, APISpecError
description = 'This is a sample Petstore server. You can find out more '
'about Swagger at <a href=\"http://swagger.wordnik.com\">http://swagger.wordnik.com</a> '
'or on irc.freenode.net, #swagger. For this sample, you can use the api '
'key \"special-key\" to test the authorization filters'
@pytest.fixture()
def spec():
return APISpec(
title='Swagger Petstore',
version='1.0.0',
info={'description': description},
security=[{'apiKey': []}],
)
class TestMetadata:
def test_swagger_version(self, spec):
assert spec.to_dict()['swagger'] == '2.0'
def test_swagger_metadata(self, spec):
metadata = spec.to_dict()
assert metadata['security'] == [{'apiKey': []}]
assert metadata['info']['title'] == 'Swagger Petstore'
assert metadata['info']['version'] == '1.0.0'
assert metadata['info']['description'] == description
class TestTags:
tag = {
'name': 'MyTag',
'description': 'This tag gathers all API endpoints which are mine.'
}
def test_tag(self, spec):
spec.add_tag(self.tag)
tags_json = spec.to_dict()['tags']
assert self.tag in tags_json
class TestDefinitions:
properties = {
'id': {'type': 'integer', 'format': 'int64'},
'name': {'type': 'string', 'example': 'doggie'},
}
def test_definition(self, spec):
spec.definition('Pet', properties=self.properties)
defs_json = spec.to_dict()['definitions']
assert 'Pet' in defs_json
assert defs_json['Pet']['properties'] == self.properties
def test_definition_description(self, spec):
model_description = 'An animal which lives with humans.'
spec.definition('Pet', properties=self.properties, description=model_description)
defs_json = spec.to_dict()['definitions']
assert defs_json['Pet']['description'] == model_description
def test_definition_stores_enum(self, spec):
enum = ['name', 'photoUrls']
spec.definition(
'Pet',
properties=self.properties,
enum=enum
)
defs_json = spec.to_dict()['definitions']
assert defs_json['Pet']['enum'] == enum
def test_definition_extra_fields(self, spec):
extra_fields = {'discriminator': 'name'}
spec.definition('Pet', properties=self.properties, extra_fields=extra_fields)
defs_json = spec.to_dict()['definitions']
assert defs_json['Pet']['discriminator'] == 'name'
def test_pass_definition_to_plugins(self, spec):
def def_helper(spec, name, **kwargs):
if kwargs.get('definition') is not None:
return {'available': True}
return {'available': False}
spec.register_definition_helper(def_helper)
spec.definition('Pet', properties=self.properties)
defs_json = spec.to_dict()['definitions']
assert defs_json['Pet']['available']
class TestPath:
paths = {
'/pet/{petId}': {
'get': {
'parameters': [
{
'required': True,
'format': 'int64',
'name': 'petId',
'in': 'path',
'type': 'integer',
'description': 'ID of pet that needs to be fetched'
}
],
'responses': {
"200": {
"schema": {'$ref': '#/definitions/Pet'},
'description': 'successful operation'
},
"400": {
"description": "Invalid ID supplied"
},
"404": {
"description": "Pet not found"
}
},
"produces": [
"application/json",
"application/xml"
],
"operationId": "getPetById",
"summary": "Find pet by ID",
'description': ('Returns a pet when ID < 10. '
'ID > 10 or nonintegers will simulate API error conditions'),
'tags': ['pet']
}
}
}
def test_add_path(self, spec):
route_spec = self.paths['/pet/{petId}']['get']
spec.add_path(
path='/pet/{petId}',
operations=dict(
get=dict(
parameters=route_spec['parameters'],
responses=route_spec['responses'],
produces=route_spec['produces'],
operationId=route_spec['operationId'],
summary=route_spec['summary'],
description=route_spec['description'],
tags=route_spec['tags']
)
)
)
p = spec._paths['/pet/{petId}']['get']
assert p['parameters'] == route_spec['parameters']
assert p['responses'] == route_spec['responses']
assert p['operationId'] == route_spec['operationId']
assert p['summary'] == route_spec['summary']
assert p['description'] == route_spec['description']
assert p['tags'] == route_spec['tags']
def test_paths_maintain_order(self, spec):
spec.add_path(path='/path1')
spec.add_path(path='/path2')
spec.add_path(path='/path3')
spec.add_path(path='/path4')
assert list(spec.to_dict()['paths'].keys()) == ['/path1', '/path2', '/path3', '/path4']
def test_add_path_merges_paths(self, spec):
"""Test that adding a second HTTP method to an existing path performs
a merge operation instead of an overwrite"""
path = '/pet/{petId}'
route_spec = self.paths[path]['get']
spec.add_path(
path=path,
operations=dict(
get=route_spec
)
)
spec.add_path(
path=path,
operations=dict(
put=dict(
parameters=route_spec['parameters'],
responses=route_spec['responses'],
produces=route_spec['produces'],
operationId='updatePet',
summary='Updates an existing Pet',
description='Use this method to make changes to Pet `petId`',
tags=route_spec['tags']
)
)
)
p = spec._paths[path]
assert 'get' in p
assert 'put' in p
def test_add_path_ensures_path_parameters_required(self, spec):
path = '/pet/{petId}'
spec.add_path(
path=path,
operations=dict(
put=dict(
parameters=[{
'name': 'petId',
'in': 'path',
}]
)
)
)
assert spec._paths[path]['put']['parameters'][0]['required'] is True
def test_a
|
dd_path_with_no_path_raises_error(self, spec):
with pytest.raises(APISpecError) as excinfo:
spec.add_path()
assert 'Path template is not specified' in str(excinfo)
|
def test_add_path_strips_base_path(self, spec):
spec.options['basePath'] = '/v1'
spec.add_path('/v1/pets')
assert '/pets' in spec._paths
assert '/v1/pets' not in spec._paths
def test_add_path_accepts_path(self, spec):
route = '/pet/{petId}'
route_spec = self.paths[route]
path = Path(path=route, operations={'get': route_spec['get']})
spec.add_path(path)
p = spec._paths[path.path]
assert path.path == p.path
assert 'get' in p
def test_add_path_strips_path_base_path(self, spec):
spec.options['basePath'] = '/v1'
path = Path(path='/v1/pets')
spec.add_path(path)
assert '/pets' in spec._paths
assert '/v1/pets' not in sp
|
csunny/blog_project
|
source/libs/spider/common.py
|
Python
|
mit
| 694
| 0.001441
|
#!usr/bin/env python
# -*- coding: utf-8 -*-
"""
@author magic
"""
import ur
|
llib2
def download(url, user_agent='wswp', num_retries=2):
print 'Downloading:', url
headers = {'User-Agent': user_agent}
request = urllib2.Request(url, headers=headers)
try:
html = urllib2.urlopen(request).read()
except urllib2.URLError, e:
print 'Download error:', e.reason
html = None
if num_retries > 0:
if hasattr(e, 'code') and 500 <= e.code < 600:
|
html = download(url, user_agent, num_retries-1)
return html
if __name__ == '__main__':
pass
# download('http://blog.csdn.net/column/details/datamining.html')
|
ceph/calamari-clients
|
utils/urls.py
|
Python
|
mit
| 3,135
| 0.00319
|
from django.conf.urls import patterns, include, url
from settings import STATIC_ROOT, GRAPHITE_API_PREFIX, CONTENT_DIR
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
urlpatterns = patterns(
'',
# These views are needed for
|
the django-rest-framework debug interface
# to be able to log in and out. The URL path doesn't matter, rest_framework
# finds the views by name.
url(r'^api/rest_framework/', include('rest_framework.urls', namespace='rest_framework')),
url(r'^$', 'calamari_web.views.home'),
url(r'^api/v1/', include('calamari_rest.urls.v1')),
url(r'^api/v2/', include('calamari_rest.urls.v2')),
url(r'^admin/(?P<path>.*)$', 'calamari_web.views.serve_dir_or_index',
{'document_roo
|
t': '%s/admin/' % STATIC_ROOT}),
url(r'^manage/(?P<path>.*)$', 'calamari_web.views.serve_dir_or_index',
{'document_root': '%s/manage/' % STATIC_ROOT}),
url(r'^login/$', 'django.views.static.serve',
{'document_root': '%s/login/' % STATIC_ROOT, 'path': "index.html"}),
url(r'^login/(?P<path>.*)$', 'django.views.static.serve',
{'document_root': '%s/login/' % STATIC_ROOT}),
url(r'^bootstrap$', 'calamari_web.views.bootstrap', name='bootstrap'),
url(r'^dashboard/(?P<path>.*)$', 'calamari_web.views.dashboard',
{'document_root': '%s/dashboard/' % STATIC_ROOT},
name='dashboard'),
url(r'^render/?', include('graphite.render.urls')),
url(r'^metrics/?', include('graphite.metrics.urls')),
url(r'^%s/dashboard/?' % GRAPHITE_API_PREFIX.lstrip('/'), include('graphite.dashboard.urls')),
# XXX this is a hack to make graphite visible where the 1.x GUI expects it,
url(r'^graphite/render/?', include('graphite.render.urls')),
url(r'^graphite/metrics/?', include('graphite.metrics.urls')),
# XXX this is a hack to make graphite dashboard work in dev mode (full installation
# serves this part with apache)
url('^content/(?P<path>.*)$', 'django.views.static.serve', {'document_root': CONTENT_DIR}),
# XXX this is a hack to serve apt repo in dev mode (Full installation serves this with apache)
url(r'^static/ubuntu/(?P<path>.*)$', 'django.views.static.serve',
{'document_root': '%s/ubuntu/' % STATIC_ROOT}),
)
# Graphite dashboard client code is not CSRF enabled, but we have
# global CSRF protection enabled. Make exceptions for the views
# that the graphite dashboard wants to POST to.
from django.views.decorators.csrf import csrf_exempt
# By default graphite views are visible to anyone who asks:
# we only want to allow logged in users to access graphite
# API.
from django.contrib.auth.decorators import login_required
def patch_views(mod):
for url_pattern in mod.urlpatterns:
cb = url_pattern.callback
url_pattern._callback = csrf_exempt(login_required(cb))
import graphite.metrics.urls
import graphite.dashboard.urls
patch_views(graphite.metrics.urls)
patch_views(graphite.dashboard.urls)
# Explicitly reset to default or graphite hijacks it
handler500 = 'django.views.defaults.bad_request'
|
makinacorpus/django
|
django/test/testcases.py
|
Python
|
bsd-3-clause
| 49,061
| 0.002242
|
from __future__ import unicode_literals
from copy import copy
import difflib
import errno
from functools import wraps
import json
import os
import re
import sys
import select
import socket
import threading
import unittest
from unittest import skipIf # Imported here for backward compatibility
from unittest.util import safe_repr
try:
from urllib.parse import urlsplit, urlunsplit
except ImportError: # Python 2
from urlparse import urlsplit, urlunsplit
from django.conf import settings
from django.contrib.staticfiles.handlers import StaticFilesHandler
from django.core import mail
from django.core.exceptions import ValidationError, ImproperlyConfigured
from django.core.handlers.wsgi import WSGIHandler
from django.core.management import call_command
from django.core.management.color import no_style
from django.core.management.commands import flush
from django.core.servers.basehttp import (WSGIRequestHandler, WSGIServer,
WSGIServerException)
from django.core.urlresolvers import clear_url_caches, set_urlconf
from django.db import connection, connections, DEFAULT_DB_ALIAS, transaction
from django.db.models.loading import cache
from django.forms.fields import CharField
from django.http import QueryDict
from django.test.client import Client
from django.test.html import HTMLParseError, parse_html
from django.test.signals import template_rendered
from django.test.utils import (CaptureQueriesContext, ContextList,
override_settings, compare_xml)
from django.utils import six
from django.utils.encoding import force_text
from django.views.static import serve
__all__ = ('TestCase', 'TransactionTestCase',
'SimpleTestCase', 'skipIfDBFeature', 'skipUnlessDBFeature')
def to_list(value):
"""
Puts value into a list if it's not already one.
Returns an
|
empty list if value is None.
"""
if value is None:
value = []
elif not isinstance(value, list):
value = [value]
return value
real_commit = transaction.commit
real_rollback = transaction.rollback
real_enter_transaction_management = transaction.enter_transaction_management
real_leave_transaction_management = transaction.leave_transaction_management
real_abort = transaction.abort
def nop(*args, **kwargs):
return
def disable_tra
|
nsaction_methods():
transaction.commit = nop
transaction.rollback = nop
transaction.enter_transaction_management = nop
transaction.leave_transaction_management = nop
transaction.abort = nop
def restore_transaction_methods():
transaction.commit = real_commit
transaction.rollback = real_rollback
transaction.enter_transaction_management = real_enter_transaction_management
transaction.leave_transaction_management = real_leave_transaction_management
transaction.abort = real_abort
def assert_and_parse_html(self, html, user_msg, msg):
try:
dom = parse_html(html)
except HTMLParseError as e:
standardMsg = '%s\n%s' % (msg, e.msg)
self.fail(self._formatMessage(user_msg, standardMsg))
return dom
class _AssertNumQueriesContext(CaptureQueriesContext):
def __init__(self, test_case, num, connection):
self.test_case = test_case
self.num = num
super(_AssertNumQueriesContext, self).__init__(connection)
def __exit__(self, exc_type, exc_value, traceback):
super(_AssertNumQueriesContext, self).__exit__(exc_type, exc_value, traceback)
if exc_type is not None:
return
executed = len(self)
self.test_case.assertEqual(
executed, self.num, "%d queries executed, %d expected" % (
executed, self.num
)
)
class _AssertTemplateUsedContext(object):
def __init__(self, test_case, template_name):
self.test_case = test_case
self.template_name = template_name
self.rendered_templates = []
self.rendered_template_names = []
self.context = ContextList()
def on_template_render(self, sender, signal, template, context, **kwargs):
self.rendered_templates.append(template)
self.rendered_template_names.append(template.name)
self.context.append(copy(context))
def test(self):
return self.template_name in self.rendered_template_names
def message(self):
return '%s was not rendered.' % self.template_name
def __enter__(self):
template_rendered.connect(self.on_template_render)
return self
def __exit__(self, exc_type, exc_value, traceback):
template_rendered.disconnect(self.on_template_render)
if exc_type is not None:
return
if not self.test():
message = self.message()
if len(self.rendered_templates) == 0:
message += ' No template was rendered.'
else:
message += ' Following templates were rendered: %s' % (
', '.join(self.rendered_template_names))
self.test_case.fail(message)
class _AssertTemplateNotUsedContext(_AssertTemplateUsedContext):
def test(self):
return self.template_name not in self.rendered_template_names
def message(self):
return '%s was rendered.' % self.template_name
class SimpleTestCase(unittest.TestCase):
# The class we'll use for the test client self.client.
# Can be overridden in derived classes.
client_class = Client
def __call__(self, result=None):
"""
Wrapper around default __call__ method to perform common Django test
set up. This means that user-defined Test Cases aren't required to
include a call to super().setUp().
"""
testMethod = getattr(self, self._testMethodName)
skipped = (getattr(self.__class__, "__unittest_skip__", False) or
getattr(testMethod, "__unittest_skip__", False))
if not skipped:
try:
self._pre_setup()
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
result.addError(self, sys.exc_info())
return
super(SimpleTestCase, self).__call__(result)
if not skipped:
try:
self._post_teardown()
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
result.addError(self, sys.exc_info())
return
def _pre_setup(self):
"""Performs any pre-test setup. This includes:
* Creating a test client.
* If the class has a 'urls' attribute, replace ROOT_URLCONF with it.
* Clearing the mail test outbox.
"""
self.client = self.client_class()
self._urlconf_setup()
mail.outbox = []
def _urlconf_setup(self):
set_urlconf(None)
if hasattr(self, 'urls'):
self._old_root_urlconf = settings.ROOT_URLCONF
settings.ROOT_URLCONF = self.urls
clear_url_caches()
def _post_teardown(self):
"""Performs any post-test things. This includes:
* Putting back the original ROOT_URLCONF if it was changed.
"""
self._urlconf_teardown()
def _urlconf_teardown(self):
set_urlconf(None)
if hasattr(self, '_old_root_urlconf'):
settings.ROOT_URLCONF = self._old_root_urlconf
clear_url_caches()
def settings(self, **kwargs):
"""
A context manager that temporarily sets a setting and reverts
back to the original value when exiting the context.
"""
return override_settings(**kwargs)
def assertRedirects(self, response, expected_url, status_code=302,
target_status_code=200, host=None, msg_prefix=''):
"""Asserts that a response redirected to a specific URL, and that the
redirect URL can be loaded.
Note that assertRedirects won't work for external links since it uses
TestClient to do a request.
"""
if msg_prefix:
msg_prefix += ": "
if hasattr(response, 'redirect_chain'):
# The request was a followed redirect
|
hgiemza/DIRAC
|
ConfigurationSystem/Client/Helpers/Resources.py
|
Python
|
gpl-3.0
| 10,846
| 0.043426
|
""" Helper for the CS Resources section
"""
import re
from distutils.version import LooseVersion #pylint: disable=no-name-in-module,import-error
from DIRAC import S_OK, S_ERROR, gConfig
from DIRAC.ConfigurationSystem.Client.Helpers.Path import cfgPath
from DIRAC.Core.Utilities.List import uniqueElements, fromChar
__RCSID__ = "$Id$"
gBaseResourcesSection = "/Resources"
def getSites():
""" Get the list of all the sites defined in the CS
"""
result = gConfig.getSections( cfgPath( gBaseResourcesSection, 'Sites' ) )
if not result['OK']:
return result
grids = result['Value']
sites = []
for grid in grids:
result = gConfig.getSections( cfgPath( gBaseResourcesSection, 'Sites', grid ) )
if not result['OK']:
return result
sites += result['Value']
return S_OK( sites )
def getStorageElementSiteMapping( siteList = None ):
""" Get Storage Element belonging to the given sites
"""
if not siteList:
result = getSites()
if not result['OK']:
return result
siteList = result['Value']
siteDict = {}
for site in siteList:
grid = site.split( '.' )[0]
ses = gConfig.getValue( cfgPath( gBaseResourcesSection, 'Sites', grid, site, 'SE' ), [] )
if ses:
siteDict[site] = ses
return S_OK( siteDict )
def getFTS2ServersForSites( siteList = None ):
""" get FTSServers for sites
:param siteList: list of sites
:type siteList: python:list
"""
siteList = siteList if siteList else None
if not siteList:
siteList = getSites()
if not siteList["OK"]:
return siteList
siteList = siteList["Value"]
ftsServers = dict()
defaultServ = gConfig.getValue( cfgPath( gBaseResourcesSection, 'FTSEndpoints/Default', 'FTSEndpoint' ), '' )
for site in siteList:
serv = gConfig.getValue( cfgPath( gBaseResourcesSection, "FTSEndpoints/FTS2", site ), defaultServ )
if serv:
ftsServers[site] = serv
return S_OK( ftsServers )
def getFTS3Servers():
""" get FTSServers for sites
"""
csPath = cfgPath( gBaseResourcesSection, "FTSEndpoints/FTS3" )
# We do it in two times to keep the order
ftsServerNames = gConfig.getOptions( csPath ).get( 'Value', [] )
ftsServers = []
for name in ftsServerNames:
ftsServers.append( gConfig.getValue( cfgPath( csPath, name ) ) )
return S_OK( ftsServers )
def getSiteTier( site ):
"""
Return Tier level of the given Site
"""
result = getSitePath( site )
if not result['OK']:
return result
sitePath = result['Value']
return S_OK( gConfig.getValue( cfgPath( sitePath, 'MoUTierLevel' ), 2 ) )
def getSitePath( site ):
"""
Return path to the Site section on CS
"""
result = getSiteGrid( site )
if not result['OK']:
return result
grid = result['Value']
return S_OK( cfgPath( gBaseResourcesSection, 'Sites', grid, site ) )
def getSiteGrid( site ):
"""
Return Grid component from Site Name
"""
sitetuple = site.split( "." )
if len( sitetuple ) != 3:
return S_ERROR( 'Wrong Site Name format' )
return S_OK( sitetuple[0] )
def getStorageElementOptions( seName ):
""" Get the CS StorageElementOptions
"""
storageConfigPath = '/Resources/StorageElements/%s' % seName
result = gConfig.getOptionsDict( storageConfigPath )
if not result['OK']:
return result
options = result['Value']
# If the SE is an baseSE or an alias, derefence it
if 'BaseSE' in options or 'Alias' in options:
storageConfigPath = '/Resources/StorageElements/%s' % options.get( 'BaseSE', options.get( 'Alias' ) )
result = gConfig.getOptionsDict( storageConfigPath )
if not result['OK']:
return result
result['Value'].update( options )
options = result['Value']
# Help distinguishing storage type
diskSE = True
tapeSE = False
if 'SEType' in options:
# Type should follow the convention TXDY
seType = options['SEType']
diskSE = re.search( 'D[1-9]', seType ) != None
tapeSE = re.search( 'T[1-9]', seType ) != None
options['DiskSE'] = diskSE
options['TapeSE'] = tapeSE
return S_OK( options )
def getQueue( site, ce, queue ):
""" Get parameters of the specified queue
"""
grid = site.split( '.' )[0]
result = gConfig.getOptionsDict( '/Resources/Sites/%s/%s/CEs/%s' % ( grid, site, ce ) )
if not result['OK']:
return result
resultDict = result['Value']
for tagFieldName in ( 'Tag', 'RequiredTag' ):
Tags = []
ceTags = resultDict.get( tagFieldName )
if ceTags:
Tags = fromChar( ceTags )
result = gConfig.getOptionsDict( '/Resources/Sites/%s/%s/CEs/%s/Queues/%s' % ( grid, site, ce, queue ) )
if not result['OK']:
return result
resultDict.update( result['Value'] )
queueTags = resultDict.get( tagFieldName )
if queueTags:
queueTags = fromChar( queueTags )
Tags = list( set( Tags + queueTags ) )
if Tags:
resultDict[tagFieldName] = Tags
resultDict['Queue'] = queue
return S_OK( resultDict )
def getQueues( siteList = None, ceList = None, ceTypeList = None, community = None, mode = None ):
""" Get CE/queue options according to the specified selection
"""
result = gConfig.getSections( '/Resources/Sites' )
if not result['OK']:
return result
resultDict = {}
grids = result['Value']
for grid in grids:
result = gConfig.getSections( '/Resources/Sites/%s' % grid )
if not result['OK']:
continue
sites = result['Value']
for site in sites:
if siteList is not None and not site in siteList:
continue
if community:
comList = gConfig.getValue( '/Resources/Sites/%s/%s/VO' % ( grid, site ), [] )
if comList and not community in comList:
continue
siteCEParameters = {}
result = gConfig.getOptionsDict( '/Resources/Sites/%s/%s/CEs' % ( grid, site ) )
if result['OK']:
siteCEParameters = result['Value']
result = gConfig.getSections( '/Resources/Sites/%s/%s/CEs' % ( grid, site ) )
if not result['OK']:
continue
ces = result['Value']
for ce in ces:
if mode:
ceMode = gConfig.getValue( '/Resources/Sites/%s/%s/CEs/%s/SubmissionMode' % ( grid, site, ce ), 'Direct' )
if not ceMode or ceMode != mode:
continue
if ceTypeList:
ceType = gConfig.getValue( '/Resources/Sites/%s/%s/CEs/%s/CEType' % ( grid, site, ce ), '' )
if not ceType or not ceType in ceTypeList:
continue
if ceList is not None and not ce in ceList:
continue
if community:
comList = gConfig.getValue( '/Resources/Sites/%s/%s/CEs/%s/VO' % ( grid, site, ce ), [] )
if comList and not community in comList:
continue
ceOptionsDict = dict( siteCEParameters )
result = gConfig.getOptionsDict( '/Resources/Sites/%s/%s/CEs/%s' % ( grid, site, ce ) )
if not result['OK']:
continue
ceOptionsDict.update( result['Value'] )
result = gConfig.getSections( '/Resources/Sites/%s/%s/CEs/%s/Que
|
ues' % (
|
grid, site, ce ) )
if not result['OK']:
continue
queues = result['Value']
for queue in queues:
if community:
comList = gConfig.getValue( '/Resources/Sites/%s/%s/CEs/%s/Queues/%s/VO' % ( grid, site, ce, queue ), [] )
if comList and not community in comList:
continue
resultDict.setdefault( site, {} )
resultDict[site].setdefault( ce, ceOptionsDict )
resultDict[site][ce].setdefault( 'Queues', {} )
result = gConfig.getOptionsDict( '/Resources/Sites/%s/%s/CEs/%s/Queues/%s' % ( grid, site, ce, queue ) )
if not result['OK']:
continue
queueOptionsDict = result['Value']
resultDict[site][ce]['Queues'][queue] = queueOptionsDict
return S_OK( resultDict )
def getCompatiblePlatforms( originalPlatforms ):
""" Get a list of platforms compatible with the given list
"""
if isinstance( originalPlatforms, basestring ):
platforms = [originalPlatforms]
else:
platforms = list( originalPlatforms )
platforms = list( p
|
staceytay/workabroad-scraper
|
workabroad/items.py
|
Python
|
mit
| 490
| 0.004082
|
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
from scrapy.item import Item, Field
class WorkabroadItem(Item):
# define the fields for your item here like:
# name = Field()
pass
class PostItem(Item):
href = Field()
|
id = Field()
title = Field()
location = Field()
expiry = Field()
agency = Field()
qualifications = Field()
info = Field()
requirements = Fi
|
eld()
|
Simocracy/simocraPy
|
simocracy/ldhost.py
|
Python
|
gpl-2.0
| 2,714
| 0.006275
|
#!/bin/env python3.4
# -*- coding: UTF-8 -*-
import simocracy.wiki as wiki
import re
## config ##
#Möglichkeit zur Simulation des Vorgangs
simulation = False
#Loglevel: schreibe nur geänderte Zeilen ("line") oder
# ganze geänderte Artikel ("article") auf stdin oder
# gar nicht ("none")
loglevel = "line"
# Ersatz für LD-Host-Links
replacement = r"{{LD-Host-Replacer}}"
# Kommt vor jeden Artikel, wo was ersetzt wurde
notif = r"{{LD-Host}}"
############
def main():
opener = wiki.login(wiki.username, wiki.password)
for p in wiki.all_pages(opener, resume="speed"):
doIt(p, opener)
#Ersetzt alle Vorkommnisse von sub in s durch repl.
def replaceAll(sub, repl, s):
while True:
testagainst = s
s = re.sub(sub, repl, s)
if s == testagainst:
return s
def doIt(article, opener):
ldhost = re.compile(r'(Thumb=)?\[?\[?\s*(?P<link>(http://)?(www\.)?ld-host\.de/[/\w]*?\.[a-z][a-z][a-z])\s*[^\]]*?\]?\]?')
doubleRepl = re.compile(r'\[?\s*' + re.escape(replacement) + r'\s*' + re.escape(replacement) + r'\s*\]?')
found = False
text = ""
logs = ""
#Spezialseiten abfangen
site = None
try:
site = wiki.openArticle(article, opener, redirect=False)
except Exception as e:
if str(e) == "Spezialseite":
return
for line in site:
newLine = line.decode('utf-8')
foundList = []
for el in ldhost.finditer(newLine):
foundList.append(el)
#nichts gefunden
if foundList == []:
text = text + newLine
continue
else:
found = True
#ersetzen
for el in foundList:
#Bildboxen berücksichtigen
if 'Thumb=' in el.groups():
newLine = replaceAll(el.groupdict()['link'], "", newLine)
else:
|
newLine = replaceAll(el.groupdict()['link'], replacement, newLine)
newLine = replaceAll(doubleRepl, replacement, newLine)
text = text + newLine
#logging
if simulation and loglevel == "line":
logs = logs + "\n- " + line.decode('utf-8') + "+ " + newLine + "\n"
if found:
text = notif + text
print("[[" + article + "]]")
if loglevel == "line":
print(logs)
elif loglevel == "article":
pr
|
int(text)
else:
raise Exception("config kaputt")
#Schreiben
if not simulation:
wiki.edit_article(article, text, opener)
print("Done: "+article)
print("========================================================\n")
if __name__ == "__main__":
main()
|
dsoprea/PyInotify
|
tests/test_inotify.py
|
Python
|
gpl-2.0
| 17,960
| 0.003905
|
# -*- coding: utf-8 -*-
import os
import unittest
import inotify.constants
import inotify.calls
import inotify.adapters
import inotify.test_support
try:
unicode
except NameError:
_HAS_PYTHON2_UNICODE_SUPPORT = False
else:
_HAS_PYTHON2_UNICODE_SUPPORT = True
class TestInotify(unittest.TestCase):
def __init__(self, *args, **kwargs):
self.maxDiff = None
super(TestInotify, self).__init__(*args, **kwargs)
def __read_all_events(self, i):
events = list(i.event_gen(timeout_s=1, yield_nones=False))
return events
@unittest.skipIf(_HAS_PYTHON2_UNICODE_SUPPORT is True, "Not in Python 3")
def test__international_naming_python3(self):
with inotify.test_support.temp_path() as path:
inner_path = os.path.join(path, '新增資料夾')
os.mkdir(inner_path)
i = inotify.adapters.Inotify()
i.add_watch(inner_path)
with open(os.path.join(inner_path, 'filename'), 'w'):
pass
events = self.__read_all_events(i)
expected = [
(inotify.adapters._INOTIFY_EVENT(wd=1, mask=256, cookie=0, len=16), ['IN_CREATE'], inner_path, 'filename'),
(inotify.adapters._INOTIFY_EVENT(wd=1, mask=32, cookie=0, len=16), ['IN_OPEN'], inner_path, 'filename'),
(inotify.adapters._INOTIFY_EVENT(wd=1, mask=8, cookie=0, len=16), ['IN_CLOSE_WRITE'], inner_path, 'filename'),
]
self.assertEquals(events, expected)
@unittest.skipIf(_HAS_PYTHON2_UNICODE_SUPPORT is False, "Not in Python 2")
def test__international_naming_python2(self):
with inotify.test_support.temp_path() as path:
inner_path = os.path.join(unicode(path), u'新增資料夾')
os.mkdir(inner_path)
i = inotify.adapters.Inotify()
i.add_watch(inner_path)
with open(os.path.join(inner_path, u'filename料夾'), 'w'):
pass
events = self.__read_all_events(i)
expected = [
(inotify.adapters._
|
INOTIFY_EVENT(wd=1, mask=256, cookie=0, len=16), ['IN_CREATE'], inner_path, u'filen
|
ame料夾'),
(inotify.adapters._INOTIFY_EVENT(wd=1, mask=32, cookie=0, len=16), ['IN_OPEN'], inner_path, u'filename料夾'),
(inotify.adapters._INOTIFY_EVENT(wd=1, mask=8, cookie=0, len=16), ['IN_CLOSE_WRITE'], inner_path, u'filename料夾'),
]
self.assertEquals(events, expected)
def test__cycle(self):
with inotify.test_support.temp_path() as path:
path1 = os.path.join(path, 'aa')
os.mkdir(path1)
path2 = os.path.join(path, 'bb')
os.mkdir(path2)
i = inotify.adapters.Inotify()
i.add_watch(path1)
with open('ignored_new_file', 'w'):
pass
with open(os.path.join(path1, 'seen_new_file'), 'w'):
pass
with open(os.path.join(path2, 'ignored_new_file'), 'w'):
pass
os.remove(os.path.join(path1, 'seen_new_file'))
events = self.__read_all_events(i)
expected = [
(
inotify.adapters._INOTIFY_EVENT(wd=1, mask=256, cookie=0, len=16),
['IN_CREATE'],
path1,
'seen_new_file'
),
(
inotify.adapters._INOTIFY_EVENT(wd=1, mask=32, cookie=0, len=16),
['IN_OPEN'],
path1,
'seen_new_file'
),
(
inotify.adapters._INOTIFY_EVENT(wd=1, mask=8, cookie=0, len=16),
['IN_CLOSE_WRITE'],
path1,
'seen_new_file'
),
(
inotify.adapters._INOTIFY_EVENT(wd=1, mask=512, cookie=0, len=16),
['IN_DELETE'],
path1,
'seen_new_file'
)
]
self.assertEquals(events, expected)
# This can't be removed until *after* we've read the events because
# they'll be flushed the moment we remove the watch.
i.remove_watch(path1)
with open(os.path.join(path1, 'ignored_after_removal'), 'w'):
pass
events = self.__read_all_events(i)
self.assertEquals(events, [])
@staticmethod
def _open_write_close(*args):
with open(os.path.join(*args), 'w'):
pass
@staticmethod
def _make_temp_path(*args):
path = os.path.join(*args)
os.mkdir(path)
return path
@staticmethod
def _event_general(wd, mask, type_name, path, filename):
return ((inotify.adapters._INOTIFY_EVENT(wd=wd, mask=mask, cookie=0, len=16)),
[type_name],
path,
filename)
@staticmethod
def _event_create(wd, path, filename):
return TestInotify._event_general(wd, 256, 'IN_CREATE', path, filename)
@staticmethod
def _event_open(wd, path, filename):
return TestInotify._event_general(wd, 32, 'IN_OPEN', path, filename)
@staticmethod
def _event_close_write(wd, path, filename):
return TestInotify._event_general(wd, 8, 'IN_CLOSE_WRITE', path, filename)
def test__watch_list_of_paths(self):
with inotify.test_support.temp_path() as path:
path1 = TestInotify._make_temp_path(path, 'aa')
path2 = TestInotify._make_temp_path(path, 'bb')
i = inotify.adapters.Inotify([path1, path2])
TestInotify._open_write_close('ignored_new_file')
TestInotify._open_write_close(path1, 'seen_new_file')
TestInotify._open_write_close(path2, 'seen_new_file2')
os.remove(os.path.join(path1, 'seen_new_file'))
events = self.__read_all_events(i)
expected = [
TestInotify._event_create(wd=1, path=path1, filename='seen_new_file'),
TestInotify._event_open(wd=1, path=path1, filename='seen_new_file'),
TestInotify._event_close_write(wd=1, path=path1, filename='seen_new_file'),
TestInotify._event_create(wd=2, path=path2, filename='seen_new_file2'),
TestInotify._event_open(wd=2, path=path2, filename='seen_new_file2'),
TestInotify._event_close_write(wd=2, path=path2, filename='seen_new_file2'),
TestInotify._event_general(wd=1, mask=512, type_name='IN_DELETE',
path=path1, filename='seen_new_file')
]
self.assertEquals(events, expected)
def test__error_on_watch_nonexistent_folder(self):
i = inotify.adapters.Inotify()
with self.assertRaises(inotify.calls.InotifyError):
i.add_watch('/dev/null/foo')
def test__get_event_names(self):
all_mask = 0
for bit in inotify.constants.MASK_LOOKUP.keys():
all_mask |= bit
all_names = inotify.constants.MASK_LOOKUP.values()
all_names = list(all_names)
i = inotify.adapters.Inotify()
names = i._get_event_names(all_mask)
self.assertEquals(names, all_names)
class TestInotifyTree(unittest.TestCase):
def __init__(self, *args, **kwargs):
self.maxDiff = None
super(TestInotifyTree, self).__init__(*args, **kwargs)
def __read_all_events(self, i):
events = list(i.event_gen(timeout_s=1, yield_nones=False))
return events
def test__cycle(self):
with inotify.test_support.temp_path() as path:
path1 = os.path.join(path, 'aa')
os.mkdir(path1)
path2 = os.path.join(path, 'bb')
os.mkdir(path2)
i = inotify.adapters.InotifyTree(path)
with open('seen_new_file1', 'w'):
pass
with open(os.path.join(path1, 'seen_new_file2'), 'w'):
pass
with open(os.path.join(path2, 'seen_new_file3'), 'w'):
pass
os.rem
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.