code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
from django.contrib import admin
from .models import Lesson, Series
class LessonAdmin(admin.ModelAdmin):
pass
class SeriesAdmin(admin.ModelAdmin):
pass
admin.site.register(Lesson, LessonAdmin)
admin.site.register(Series, SeriesAdmin)
|
python-krasnodar/python-krasnodar.ru
|
src/lessons/admin.py
|
Python
|
mit
| 248
|
# -*- coding: utf-8 -*-
#
# exercise 5: more variables and printing
#
# string formating
name = 'Zed A. Shaw'
ages = 35 # not a lie
height = 74 # inched
weight = 180 # lbs
eyes = 'Blue'
teeth = 'White'
hair = 'Brown'
print "Let's talk about %s." % name
print "He's %d inched tall." % height
print "He's %d pounds heavy." % weight
print "Actually that's not too heavy."
print "He's got %s eyes and %s hair." % (eyes, hair)
print "His teeth are usually %s depending on the coffee." % teeth
# this line is tricky, try to get it exactly right
print "If I add %d, %d, and %d I get %d." %(
ages, height, weight, ages + height + weight)
|
zstang/learning-python-the-hard-way
|
ex5.py
|
Python
|
mit
| 635
|
#!/env/python3
import sys
import argparse
import os
import csv
import tabix
import gzip
import io
from collections import Counter
def chromosom_sizes(hg19_size_file):
''' Return chromosom size range ex: size["chr13"] = 234324 '''
results = {}
with open(hg19_size_file) as file:
reader = csv.reader(file, delimiter="\t")
for line in reader:
results[line[0]] = int(line[1])
return results
def kart_racer(sample, genom, base_speed = 0, deceleration = 1, acceleration = 1, allow_negative = False):
# get chromosom size
sizes = chromosom_sizes(genom)
# get tabix variant file
tabix_file = tabix.open(sample)
# current speed
speed = 0.0 + base_speed
# test on chromosom 17
chromosom = "chr17"
size = sizes[chromosom]
# Loop over genoms
for pos in range(0, size):
# get how many mutation at one position
count = len([c for c in tabix_file.query(chromosom, pos, pos + 2)])
if count > 0 :
speed += count * acceleration
else:
if speed > 0:
speed -= deceleration
else:
speed = 0.0
print(chromosom, pos, pos +1, speed, sep="\t")
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="Compute speed of mutation ",
usage="kart_racer.py file.bed.gz -g hg19.sizes -a 1 -b 0.1 "
)
parser.add_argument("sample", type=str, help="tabix file")
parser.add_argument("-g","--genom", type=str, help="genom size ")
parser.add_argument("-b","--base_speed", type=int, default = 0)
parser.add_argument("-d","--deceleration", type=float, default = 0.01, help="decrease speed by x each empty base")
parser.add_argument("-a","--acceleration", type=float, default = 1, help="accelerate by 1 each variant")
args = parser.parse_args()
kart_racer(args.sample, args.genom, args.base_speed , args.deceleration , args.acceleration, False )
|
gustaveroussy/98drivers
|
scripts/kart_racer.py
|
Python
|
mit
| 1,836
|
class Campus(object):
def __init__(self,dados=None):
if dados is not None:
self.id = dados ['id']
self.nome = dados ['nome']
def getId(self):
return self.id
def setNome(self,nome):
self.nome = nome
def getNome(self):
return self.nome
|
AEDA-Solutions/matweb
|
backend/Database/Models/Campus.py
|
Python
|
mit
| 267
|
SAMPLES_YAML = {
'a': """\
- a1-a: [pk]
- [A1]
- [A2]
- [B1]
- [C1]
- [C2]
- [D1]
- [E1]
- [P1]
""",
'b': """\
- a1-a: [pk]
- [B1]
- [C1]
- [C2]
- [D1]
- [E1]
- a1-b: [pk]
- [B1]
- [C1]
- [C2]
- [D1]
- [E1]
""",
'c': """\
- a1-a: [pk]
- [C1]
- [C2]
- [D1]
- a1-b: [pk]
- [C1]
- [C2]
- [D1]
- a1-c: [pk]
- [C1]
- [C2]
- [D1]
""",
'd': """\
- a1-a: [pk]
- [D1]
- a1-b: [pk]
- [D1]
- a1-c: [pk]
- [D1]
- a1-d: [pk]
- [D1]
""",
'e': """\
- a1-a: [pk]
- [E1]
- a1-b: [pk]
- [E1]
- a1-e: [pk]
- [E1]
""",
'f': """\
- a1-a: [pk]
- [A1]
- [D1]
- a1-f: [pk, a]
- [F1, null]
- [F2, A1]
- [F3, A1]
- [F4, D1]
""",
'g': """\
- a1-a: [pk]
- [D1]
- a1-b: [pk]
- [D1]
- a1-c: [pk]
- [D1]
- a1-d: [pk]
- [D1]
- a1-g: [pk, d]
- [G1, D1]
""",
'p': """\
- a1-a: [pk]
- [A1]
- [A2]
- [B1]
- [C1]
- [C2]
- [D1]
- [E1]
- [P1]
""",
'o': """\
- a1-a: [pk]
- [C1]
- [C2]
- [D1]
- a1-b: [pk]
- [C1]
- [C2]
- [D1]
- a1-c: [pk]
- [C1]
- [C2]
- [D1]
- a1-o: [pk, c, s]
- [O1, null, null]
- [O2, C1, null]
- [O3, C2, O1]
- [O4, D1, O3]
- [O5, null, O5]
""",
'm': """\
- a1-m: [pk]
- [M1]
- [M2]
- [M3]
- [M4]
- [M5]
""",
'm_d': """\
- a1-a: [pk]
- [D1]
- a1-b: [pk]
- [D1]
- a1-c: [pk]
- [D1]
- a1-d: [pk]
- [D1]
- a1-m: [pk]
- [M3]
- [M4]
- a1-m_d: [pk, d, m]
- [1, D1, M3]
- [2, D1, M4]
""",
'm_s': """\
- a1-m: [pk]
- [M1]
- [M2]
- [M3]
- [M4]
- [M5]
- a1-m_s: [pk, from_m, to_m]
- [1, M2, M1]
- [2, M1, M2]
- [3, M4, M3]
- [4, M3, M4]
- [5, M5, M3]
- [6, M3, M5]
- [7, M5, M4]
- [8, M4, M5]
- [9, M5, M5]
""",
'a_some': """\
- a1-a: [pk]
- [A2]
- [D1]
""",
'a_some_a_b': """\
- a1-a: [pk]
- [A2]
- [D1]
- a1-b: [pk]
- [D1]
""",
'f_a_d': """\
- a1-a: [pk]
- [A1]
- [D1]
- a1-f: [pk, a]
- [F1, null]
- [F2, A1]
- [F3, A1]
- [F4, D1]
""",
'd_a_f': """\
- a1-a: [pk]
- [D1]
- a1-b: [pk]
- [D1]
- a1-c: [pk]
- [D1]
- a1-d: [pk]
- [D1]
- a1-f: [pk, a]
- [F4, D1]
""",
'o_one_o_s': """\
- a1-a: [pk]
- [C2]
- [D1]
- a1-b: [pk]
- [C2]
- [D1]
- a1-c: [pk]
- [C2]
- [D1]
- a1-o: [pk, c, s]
- [O1, null, null]
- [O3, C2, O1]
- [O4, D1, O3]
""",
'm_one_m_s': """\
- a1-m: [pk]
- [M3]
- [M4]
- [M5]
- a1-m_s: [pk, from_m, to_m]
- [3, M4, M3]
- [4, M3, M4]
- [5, M5, M3]
- [6, M3, M5]
- [7, M5, M4]
- [8, M4, M5]
- [9, M5, M5]
""",
'd_d_m': """\
- a1-a: [pk]
- [D1]
- a1-b: [pk]
- [D1]
- a1-c: [pk]
- [D1]
- a1-d: [pk]
- [D1]
- a1-m: [pk]
- [M3]
- [M4]
- a1-m_d: [pk, d, m]
- [1, D1, M3]
- [2, D1, M4]
""",
'd_d_m_m_s': """\
- a1-a: [pk]
- [D1]
- a1-b: [pk]
- [D1]
- a1-c: [pk]
- [D1]
- a1-d: [pk]
- [D1]
- a1-m: [pk]
- [M3]
- [M4]
- [M5]
- a1-m_d: [pk, d, m]
- [1, D1, M3]
- [2, D1, M4]
- a1-m_s: [pk, from_m, to_m]
- [3, M4, M3]
- [4, M3, M4]
- [5, M5, M3]
- [6, M3, M5]
- [7, M5, M4]
- [8, M4, M5]
- [9, M5, M5]
""",
'a2': """\
- a2-article: [pk, headline]
- [1, The only Review for The explicit AutoField]
- a2-book: [pk, title]
- [1, The explicit AutoField]
- a2-bookreview: [pk, article_ptr]
- [1, 1]
""",
'a3': """\
- a3-piece: [pk]
- [1]
- a3-article: [pk, headline]
- [1, The only Review for The common ancestor]
- a3-book: [pk, title]
- [1, The common ancestor]
- a3-bookreview: [pk, article_ptr]
- [1, 1]
""",
'q': """\
- a4-question: [pk, pub_date, question_text]
- [Q1, !!timestamp '2014-01-01 00:00:00', 'what question 1?']
- [Q2, !!timestamp '2014-01-02 00:00:00', 'what question 2?']
- [Q3, !!timestamp '2014-01-03 00:00:00', 'what question 3?']
""",
'q_r': """\
- a4-question: [pk, pub_date, question_text]
- [Q1, !!timestamp '2014-01-01 00:00:00', 'what question 1?']
- [Q2, !!timestamp '2014-01-02 00:00:00', 'what question 2?']
- [Q3, !!timestamp '2014-01-03 00:00:00', 'what question 3?']
- a4-response: [pk, question, response_text, votes]
- [R1, Q1, 'NULL', 0]
- [R2, Q1, None, 111]
- [R3, Q2, foo, 222]
- [R4, Q2, bar, 333]
- [R5, Q3, foobar, 444]
""",
's': """\
- a4-sample: [pk, bool, comma, date, dec, float, nullbool, slug, text, time]
- [S1, true, '[1, 2, 3]', 2014-02-01, '12.34', 1.23, false, abc, foobar, '01:01:00']
- [S2, false, '[4, 5, 6]', 2014-02-02, '56.78', 3.45, true, def, "\\xC2", '01:02:00']
- [S3, true, '[]', 2014-02-03, '-9.12', 6.78, null, '', '', '01:03:00']
""",
}
|
katakumpo/nicedjango
|
tests/samples_compact_yaml.py
|
Python
|
mit
| 6,357
|
# Notice:
# If you are running this in production environment, generate
# these for your app at https://dev.twitter.com/apps/new
TWITTER = {
'AUTH': {
'consumer_key': 'XXXX',
'consumer_secret': 'XXXX',
'token': 'XXXX',
'token_secret': 'XXXX',
}
}
# We're pulling data from graphite to calculate the uptime. Each service has a
# list of counters that it uses to help calculate the % of successful / failed
# requests.
UPTIME = {
'root_uri': 'http://graphite.balancedpayments.com/render/?',
'username': 'username',
'password': 'password',
'services': {
'DASH': {
'OK_TARGETS': [
'stats_counts.status.dashboard.2xx',
'stats_counts.status.dashboard.3xx',
'stats_counts.status.dashboard.4xx',
],
'ERROR_TARGETS': [
'stats_counts.status.dashboard.5xx',
'stats_counts.status.dashboard.timeout',
]
},
'JS': {
'OK_TARGETS': [
'stats_counts.status.balanced-js.2xx',
'stats_counts.status.balanced-js.3xx',
'stats_counts.status.balanced-js.4xx',
],
'ERROR_TARGETS': [
'stats_counts.status.balanced-js.5xx',
'stats_counts.status.balanced-js.timeout',
]
},
'API': {
'OK_TARGETS': [
'stats_counts.status.balanced-api.2xx',
'stats_counts.status.balanced-api.3xx',
'stats_counts.status.balanced-api.4xx',
],
'ERROR_TARGETS': [
'stats_counts.status.balanced-api.5xx',
'stats_counts.status.balanced-api.timeout',
]
}
}
}
# The e-mail address to send notifications from
EMAIL = {
'sender': 'Balanced Status <noreply@balancedpayments.com>'
}
LIBRATO_UPTIME = {
'root_uri': 'https://metrics-api.librato.com/v1/metrics/',
'username': 'FIXME',
'password': 'FIXME',
'services': {
'API': {
'SOURCE': '*bapi-live*',
'TOTAL_TARGETS': [
'AWS.ELB.RequestCount',
],
'ERROR_TARGETS': [
'AWS.ELB.HTTPCode_Backend_5XX',
'AWS.ELB.HTTPCode_ELB_5XX',
]
},
}
}
# TWILIO API credentials
TWILIO = {
'account_sid': 'XXXX',
'auth_token': 'XXXX',
'from_number': 'XXXX'
}
DEBUG = True
# Currently DASHBOARD does not send out notifications
NOTIFY_SERVICES = ['API', 'JS']
|
balanced/status.balancedpayments.com
|
situation/settings.py
|
Python
|
mit
| 2,611
|
# time series prediction of stock data
# using recurrent neural network with LSTM layer
from pybrain.datasets import SequentialDataSet
from itertools import cycle
from pybrain.tools.shortcuts import buildNetwork
from pybrain.structure.modules import LSTMLayer
from pybrain.supervised import RPropMinusTrainer
from pybrain.tools.customxml.networkwriter import NetworkWriter
from pybrain.tools.customxml.networkreader import NetworkReader
import matplotlib.pyplot as plt
import os.path
import sys
#sys.path.insert(0, '../../smap_nepse')
from smap_nepse.prediction import prepareInput as pi
from sklearn import preprocessing
import numpy as np
__author__ = "Semanta Bhandari"
__copyright__ = ""
__credits__ = ["Sameer Rai","Sumit Shrestha","Sankalpa Timilsina"]
__license__ = ""
__version__ = "0.1"
__email__ = "semantabhandari@gmail.com"
def rnn():
# load dataframe from csv file
df = pi.load_data_frame('../../data/NABIL.csv')
# column name to match with indicator calculating modules
# TODO: resolve issue with column name
df.columns = [
'Transactions',
'Traded_Shares',
'Traded_Amount',
'High',
'Low',
'Close']
data = df.Close.values
# TODO: write min_max normalization
# normalization
# cp = dataframe.pop(' Close Price')
# x = cp.values
temp = np.array(data).reshape(len(data),1)
min_max_scaler = preprocessing.MinMaxScaler()
data = min_max_scaler.fit_transform(temp)
# dataframe[' Close Price'] = x_scaled
# prepate sequential dataset for pyBrain rnn network
ds = SequentialDataSet(1, 1)
for sample, next_sample in zip(data, cycle(data[1:])):
ds.addSample(sample, next_sample)
# build rnn network with LSTM layer
# if saved network is available
if(os.path.isfile('random.xml')):
net = NetworkReader.readFrom('network.xml')
else:
net = buildNetwork(1, 20, 1,
hiddenclass=LSTMLayer, outputbias=False, recurrent=True)
# build trainer
trainer = RPropMinusTrainer(net, dataset=ds, verbose = True)
train_errors = [] # save errors for plotting later
EPOCHS_PER_CYCLE = 5
CYCLES = 5
EPOCHS = EPOCHS_PER_CYCLE * CYCLES
for i in range(CYCLES):
trainer.trainEpochs(EPOCHS_PER_CYCLE)
train_errors.append(trainer.testOnData())
epoch = (i+1) * EPOCHS_PER_CYCLE
print("\r epoch {}/{}".format(epoch, EPOCHS), end="")
sys.stdout.flush()
# save the network
NetworkWriter.writeToFile(net,'network.xml')
print()
print("final error =", train_errors[-1])
predicted = []
for dat in data:
predicted.append(net.activate(dat)[0])
# data = min_max_scaler.inverse_transform(data)
# predicted = min_max_scaler.inverse_transform(predicted)
predicted_array = min_max_scaler.inverse_transform(np.array(predicted).reshape(-1,1))
print(predicted_array[-1])
plt.figure()
legend_actual, = plt.plot(range(0, len(data)),temp, label = 'actual', linestyle = '--', linewidth = 2, c = 'blue')
legend_predicted, = plt.plot(range(0, len(data)), predicted_array, label = 'predicted', linewidth = 1.5, c='red')
plt.legend(handles=[legend_actual, legend_predicted])
plt.savefig('error.png')
plt.show()
# plt.plot(range(0,len(train_errors)),train_errors)
# plt.xlabel('epoch')
# plt.ylabel('error')
# plt.show()
# for sample, target in ds.getSequenceIterator(0):
# print(" sample = %4.2f" % sample)
# print("predicted next sample = %4.2f" % net.activate(sample))
# print(" actual next sample = %4.2f" % target)
# print()
rnn()
|
samshara/Stock-Market-Analysis-and-Prediction
|
smap_nepse/prediction/recurrent.py
|
Python
|
mit
| 3,734
|
# Italian / Italiano - Translations - Python 3 Only!
from seleniumbase import BaseCase
from seleniumbase import MasterQA
class CasoDiProva(BaseCase):
def __init__(self, *args, **kwargs):
super(CasoDiProva, self).__init__(*args, **kwargs)
self._language = "Italian"
def apri(self, *args, **kwargs):
# open(url)
return self.open(*args, **kwargs)
def apri_url(self, *args, **kwargs):
# open_url(url)
return self.open_url(*args, **kwargs)
def fare_clic(self, *args, **kwargs):
# click(selector)
return self.click(*args, **kwargs)
def doppio_clic(self, *args, **kwargs):
# double_click(selector)
return self.double_click(*args, **kwargs)
def clic_lentamente(self, *args, **kwargs):
# slow_click(selector)
return self.slow_click(*args, **kwargs)
def clic_se_visto(self, *args, **kwargs):
# click_if_visible(selector, by=By.CSS_SELECTOR)
return self.click_if_visible(*args, **kwargs)
def clic_testo_del_collegamento(self, *args, **kwargs):
# click_link_text(link_text)
return self.click_link_text(*args, **kwargs)
def aggiornare_testo(self, *args, **kwargs):
# update_text(selector, text)
return self.update_text(*args, **kwargs)
def digitare(self, *args, **kwargs):
# type(selector, text) # Same as update_text()
return self.type(*args, **kwargs)
def aggiungi_testo(self, *args, **kwargs):
# add_text(selector, text)
return self.add_text(*args, **kwargs)
def ottenere_testo(self, *args, **kwargs):
# get_text(selector, text)
return self.get_text(*args, **kwargs)
def verificare_testo(self, *args, **kwargs):
# assert_text(text, selector)
return self.assert_text(*args, **kwargs)
def verificare_testo_esatto(self, *args, **kwargs):
# assert_exact_text(text, selector)
return self.assert_exact_text(*args, **kwargs)
def verificare_testo_del_collegamento(self, *args, **kwargs):
# assert_link_text(link_text)
return self.assert_link_text(*args, **kwargs)
def verificare_elemento(self, *args, **kwargs):
# assert_element(selector)
return self.assert_element(*args, **kwargs)
def verificare_elemento_visto(self, *args, **kwargs):
# assert_element_visible(selector) # Same as self.assert_element()
return self.assert_element_visible(*args, **kwargs)
def verificare_elemento_non_visto(self, *args, **kwargs):
# assert_element_not_visible(selector)
return self.assert_element_not_visible(*args, **kwargs)
def verificare_elemento_presente(self, *args, **kwargs):
# assert_element_present(selector)
return self.assert_element_present(*args, **kwargs)
def verificare_elemento_assente(self, *args, **kwargs):
# assert_element_absent(selector)
return self.assert_element_absent(*args, **kwargs)
def verificare_titolo(self, *args, **kwargs):
# assert_title(title)
return self.assert_title(*args, **kwargs)
def ottenere_titolo(self, *args, **kwargs):
# get_title()
return self.get_title(*args, **kwargs)
def verificare_vero(self, *args, **kwargs):
# assert_true(expr)
return self.assert_true(*args, **kwargs)
def verificare_falso(self, *args, **kwargs):
# assert_false(expr)
return self.assert_false(*args, **kwargs)
def verificare_uguale(self, *args, **kwargs):
# assert_equal(first, second)
return self.assert_equal(*args, **kwargs)
def verificare_non_uguale(self, *args, **kwargs):
# assert_not_equal(first, second)
return self.assert_not_equal(*args, **kwargs)
def aggiorna_la_pagina(self, *args, **kwargs):
# refresh_page()
return self.refresh_page(*args, **kwargs)
def ottenere_url_corrente(self, *args, **kwargs):
# get_current_url()
return self.get_current_url(*args, **kwargs)
def ottenere_la_pagina_html(self, *args, **kwargs):
# get_page_source()
return self.get_page_source(*args, **kwargs)
def indietro(self, *args, **kwargs):
# go_back()
return self.go_back(*args, **kwargs)
def avanti(self, *args, **kwargs):
# go_forward()
return self.go_forward(*args, **kwargs)
def è_testo_visto(self, *args, **kwargs): # noqa
# is_text_visible(text, selector="html")
return self.is_text_visible(*args, **kwargs)
def è_elemento_visto(self, *args, **kwargs):
# is_element_visible(selector)
return self.is_element_visible(*args, **kwargs)
def è_elemento_presente(self, *args, **kwargs):
# is_element_present(selector)
return self.is_element_present(*args, **kwargs)
def attendere_il_testo(self, *args, **kwargs):
# wait_for_text(text, selector)
return self.wait_for_text(*args, **kwargs)
def attendere_un_elemento(self, *args, **kwargs):
# wait_for_element(selector)
return self.wait_for_element(*args, **kwargs)
def attendere_un_elemento_visto(self, *args, **kwargs):
# wait_for_element_visible(selector) # Same as wait_for_element()
return self.wait_for_element_visible(*args, **kwargs)
def attendere_un_elemento_non_visto(self, *args, **kwargs):
# wait_for_element_not_visible(selector)
return self.wait_for_element_not_visible(*args, **kwargs)
def attendere_un_elemento_presente(self, *args, **kwargs):
# wait_for_element_present(selector)
return self.wait_for_element_present(*args, **kwargs)
def attendere_un_elemento_assente(self, *args, **kwargs):
# wait_for_element_absent(selector)
return self.wait_for_element_absent(*args, **kwargs)
def dormire(self, *args, **kwargs):
# sleep(seconds)
return self.sleep(*args, **kwargs)
def attendere(self, *args, **kwargs):
# wait(seconds) # Same as sleep(seconds)
return self.wait(*args, **kwargs)
def inviare(self, *args, **kwargs):
# submit(selector)
return self.submit(*args, **kwargs)
def cancellare(self, *args, **kwargs):
# clear(selector)
return self.clear(*args, **kwargs)
def js_fare_clic(self, *args, **kwargs):
# js_click(selector)
return self.js_click(*args, **kwargs)
def js_aggiornare_testo(self, *args, **kwargs):
# js_update_text(selector, text)
return self.js_update_text(*args, **kwargs)
def js_digitare(self, *args, **kwargs):
# js_type(selector, text)
return self.js_type(*args, **kwargs)
def controlla_html(self, *args, **kwargs):
# inspect_html()
return self.inspect_html(*args, **kwargs)
def salva_screenshot(self, *args, **kwargs):
# save_screenshot(name)
return self.save_screenshot(*args, **kwargs)
def seleziona_file(self, *args, **kwargs):
# choose_file(selector, file_path)
return self.choose_file(*args, **kwargs)
def eseguire_script(self, *args, **kwargs):
# execute_script(script)
return self.execute_script(*args, **kwargs)
def eseguire_script_sicuro(self, *args, **kwargs):
# safe_execute_script(script)
return self.safe_execute_script(*args, **kwargs)
def attiva_jquery(self, *args, **kwargs):
# activate_jquery()
return self.activate_jquery(*args, **kwargs)
def bloccare_gli_annunci(self, *args, **kwargs):
# ad_block()
return self.ad_block(*args, **kwargs)
def saltare(self, *args, **kwargs):
# skip(reason="")
return self.skip(*args, **kwargs)
def verificare_i_collegamenti(self, *args, **kwargs):
# assert_no_404_errors()
return self.assert_no_404_errors(*args, **kwargs)
def controlla_errori_js(self, *args, **kwargs):
# assert_no_js_errors()
return self.assert_no_js_errors(*args, **kwargs)
def passa_al_frame(self, *args, **kwargs):
# switch_to_frame(frame)
return self.switch_to_frame(*args, **kwargs)
def passa_al_contenuto_predefinito(self, *args, **kwargs):
# switch_to_default_content()
return self.switch_to_default_content(*args, **kwargs)
def apri_una_nuova_finestra(self, *args, **kwargs):
# open_new_window()
return self.open_new_window(*args, **kwargs)
def passa_alla_finestra(self, *args, **kwargs):
# switch_to_window(window)
return self.switch_to_window(*args, **kwargs)
def passa_alla_finestra_predefinita(self, *args, **kwargs):
# switch_to_default_window()
return self.switch_to_default_window(*args, **kwargs)
def ingrandisci_finestra(self, *args, **kwargs):
# maximize_window()
return self.maximize_window(*args, **kwargs)
def illuminare(self, *args, **kwargs):
# highlight(selector)
return self.highlight(*args, **kwargs)
def illuminare_clic(self, *args, **kwargs):
# highlight_click(selector)
return self.highlight_click(*args, **kwargs)
def scorrere_fino_a(self, *args, **kwargs):
# scroll_to(selector)
return self.scroll_to(*args, **kwargs)
def scorri_verso_alto(self, *args, **kwargs):
# scroll_to_top()
return self.scroll_to_top(*args, **kwargs)
def scorri_verso_il_basso(self, *args, **kwargs):
# scroll_to_bottom()
return self.scroll_to_bottom(*args, **kwargs)
def passa_il_mouse_sopra_e_fai_clic(self, *args, **kwargs):
# hover_and_click(hover_selector, click_selector)
return self.hover_and_click(*args, **kwargs)
def è_selezionato(self, *args, **kwargs):
# is_selected(selector)
return self.is_selected(*args, **kwargs)
def premere_la_freccia_su(self, *args, **kwargs):
# press_up_arrow(selector="html", times=1)
return self.press_up_arrow(*args, **kwargs)
def premere_la_freccia_giù(self, *args, **kwargs):
# press_down_arrow(selector="html", times=1)
return self.press_down_arrow(*args, **kwargs)
def premere_la_freccia_sinistra(self, *args, **kwargs):
# press_left_arrow(selector="html", times=1)
return self.press_left_arrow(*args, **kwargs)
def premere_la_freccia_destra(self, *args, **kwargs):
# press_right_arrow(selector="html", times=1)
return self.press_right_arrow(*args, **kwargs)
def clic_sugli_elementi_visibili(self, *args, **kwargs):
# click_visible_elements(selector)
return self.click_visible_elements(*args, **kwargs)
def selezionare_opzione_per_testo(self, *args, **kwargs):
# select_option_by_text(dropdown_selector, option)
return self.select_option_by_text(*args, **kwargs)
def selezionare_opzione_per_indice(self, *args, **kwargs):
# select_option_by_index(dropdown_selector, option)
return self.select_option_by_index(*args, **kwargs)
def selezionare_opzione_per_valore(self, *args, **kwargs):
# select_option_by_value(dropdown_selector, option)
return self.select_option_by_value(*args, **kwargs)
def creare_una_presentazione(self, *args, **kwargs):
# create_presentation(name=None, theme="default", transition="default")
return self.create_presentation(*args, **kwargs)
def aggiungere_una_diapositiva(self, *args, **kwargs):
# add_slide(content=None, image=None, code=None, iframe=None,
# content2=None, notes=None, transition=None, name=None)
return self.add_slide(*args, **kwargs)
def salva_la_presentazione(self, *args, **kwargs):
# save_presentation(name=None, filename=None,
# show_notes=False, interval=0)
return self.save_presentation(*args, **kwargs)
def avviare_la_presentazione(self, *args, **kwargs):
# begin_presentation(name=None, filename=None,
# show_notes=False, interval=0)
return self.begin_presentation(*args, **kwargs)
def creare_un_grafico_a_torta(self, *args, **kwargs):
# create_pie_chart(chart_name=None, title=None, subtitle=None,
# data_name=None, unit=None, libs=True)
return self.create_pie_chart(*args, **kwargs)
def creare_un_grafico_a_barre(self, *args, **kwargs):
# create_bar_chart(chart_name=None, title=None, subtitle=None,
# data_name=None, unit=None, libs=True)
return self.create_bar_chart(*args, **kwargs)
def creare_un_grafico_a_colonne(self, *args, **kwargs):
# create_column_chart(chart_name=None, title=None, subtitle=None,
# data_name=None, unit=None, libs=True)
return self.create_column_chart(*args, **kwargs)
def creare_un_grafico_a_linee(self, *args, **kwargs):
# create_line_chart(chart_name=None, title=None, subtitle=None,
# data_name=None, unit=None, zero=False, libs=True)
return self.create_line_chart(*args, **kwargs)
def creare_un_grafico_ad_area(self, *args, **kwargs):
# create_area_chart(chart_name=None, title=None, subtitle=None,
# data_name=None, unit=None, zero=False, libs=True)
return self.create_area_chart(*args, **kwargs)
def aggiungere_serie_al_grafico(self, *args, **kwargs):
# add_series_to_chart(data_name=None, chart_name=None)
return self.add_series_to_chart(*args, **kwargs)
def aggiungi_punto_dati(self, *args, **kwargs):
# add_data_point(label, value, color=None, chart_name=None)
return self.add_data_point(*args, **kwargs)
def salva_il_grafico(self, *args, **kwargs):
# save_chart(chart_name=None, filename=None)
return self.save_chart(*args, **kwargs)
def mostra_il_grafico(self, *args, **kwargs):
# display_chart(chart_name=None, filename=None, interval=0)
return self.display_chart(*args, **kwargs)
def estrarre_il_grafico(self, *args, **kwargs):
# extract_chart(chart_name=None)
return self.extract_chart(*args, **kwargs)
def creare_un_tour(self, *args, **kwargs):
# create_tour(name=None, theme=None)
return self.create_tour(*args, **kwargs)
def creare_un_tour_shepherd(self, *args, **kwargs):
# create_shepherd_tour(name=None, theme=None)
return self.create_shepherd_tour(*args, **kwargs)
def creare_un_tour_bootstrap(self, *args, **kwargs):
# create_bootstrap_tour(name=None, theme=None)
return self.create_bootstrap_tour(*args, **kwargs)
def creare_un_tour_driverjs(self, *args, **kwargs):
# create_driverjs_tour(name=None, theme=None)
return self.create_driverjs_tour(*args, **kwargs)
def creare_un_tour_hopscotch(self, *args, **kwargs):
# create_hopscotch_tour(name=None, theme=None)
return self.create_hopscotch_tour(*args, **kwargs)
def creare_un_tour_introjs(self, *args, **kwargs):
# create_introjs_tour(name=None, theme=None)
return self.create_introjs_tour(*args, **kwargs)
def aggiungere_passo_al_tour(self, *args, **kwargs):
# add_tour_step(message, selector=None, name=None,
# title=None, theme=None, alignment=None)
return self.add_tour_step(*args, **kwargs)
def riprodurre_il_tour(self, *args, **kwargs):
# play_tour(name=None)
return self.play_tour(*args, **kwargs)
def esportare_il_tour(self, *args, **kwargs):
# export_tour(name=None, filename="my_tour.js", url=None)
return self.export_tour(*args, **kwargs)
def ottenere_testo_pdf(self, *args, **kwargs):
# get_pdf_text(pdf, page=None, maxpages=None, password=None,
# codec='utf-8', wrap=False, nav=False, override=False)
return self.get_pdf_text(*args, **kwargs)
def verificare_testo_pdf(self, *args, **kwargs):
# assert_pdf_text(pdf, text, page=None, maxpages=None, password=None,
# codec='utf-8', wrap=True, nav=False, override=False)
return self.assert_pdf_text(*args, **kwargs)
def verificare_file_scaricato(self, *args, **kwargs):
# assert_downloaded_file(file)
return self.assert_downloaded_file(*args, **kwargs)
def fallire(self, *args, **kwargs):
# fail(msg=None) # Inherited from "unittest"
return self.fail(*args, **kwargs)
def ottenere(self, *args, **kwargs):
# get(url) # Same as open(url)
return self.get(*args, **kwargs)
def visita(self, *args, **kwargs):
# visit(url) # Same as open(url)
return self.visit(*args, **kwargs)
def visita_url(self, *args, **kwargs):
# visit_url(url) # Same as open(url)
return self.visit_url(*args, **kwargs)
def ottenere_elemento(self, *args, **kwargs):
# get_element(selector) # Element can be hidden
return self.get_element(*args, **kwargs)
def trovare_elemento(self, *args, **kwargs):
# find_element(selector) # Element must be visible
return self.find_element(*args, **kwargs)
def rimuovere_elemento(self, *args, **kwargs):
# remove_element(selector)
return self.remove_element(*args, **kwargs)
def rimuovere_elementi(self, *args, **kwargs):
# remove_elements(selector)
return self.remove_elements(*args, **kwargs)
def trovare_testo(self, *args, **kwargs):
# find_text(text, selector="html") # Same as wait_for_text
return self.find_text(*args, **kwargs)
def impostare_testo(self, *args, **kwargs):
# set_text(selector, text)
return self.set_text(*args, **kwargs)
def ottenere_attributo(self, *args, **kwargs):
# get_attribute(selector, attribute)
return self.get_attribute(*args, **kwargs)
def imposta_attributo(self, *args, **kwargs):
# set_attribute(selector, attribute, value)
return self.set_attribute(*args, **kwargs)
def impostare_gli_attributi(self, *args, **kwargs):
# set_attributes(selector, attribute, value)
return self.set_attributes(*args, **kwargs)
def scrivere(self, *args, **kwargs):
# write(selector, text) # Same as update_text()
return self.write(*args, **kwargs)
def impostare_tema_del_messaggio(self, *args, **kwargs):
# set_messenger_theme(theme="default", location="default")
return self.set_messenger_theme(*args, **kwargs)
def visualizza_messaggio(self, *args, **kwargs):
# post_message(message, duration=None, pause=True, style="info")
return self.post_message(*args, **kwargs)
def stampare(self, *args, **kwargs):
# _print(msg) # Same as Python print()
return self._print(*args, **kwargs)
def differita_verificare_elemento(self, *args, **kwargs):
# deferred_assert_element(selector)
return self.deferred_assert_element(*args, **kwargs)
def differita_verificare_testo(self, *args, **kwargs):
# deferred_assert_text(text, selector="html")
return self.deferred_assert_text(*args, **kwargs)
def elaborare_differita_verificari(self, *args, **kwargs):
# process_deferred_asserts(print_only=False)
return self.process_deferred_asserts(*args, **kwargs)
def accetta_avviso(self, *args, **kwargs):
# accept_alert(timeout=None)
return self.accept_alert(*args, **kwargs)
def elimina_avviso(self, *args, **kwargs):
# dismiss_alert(timeout=None)
return self.dismiss_alert(*args, **kwargs)
def passa_al_avviso(self, *args, **kwargs):
# switch_to_alert(timeout=None)
return self.switch_to_alert(*args, **kwargs)
def trascinare_e_rilasciare(self, *args, **kwargs):
# drag_and_drop(drag_selector, drop_selector)
return self.drag_and_drop(*args, **kwargs)
def caricare_html_file(self, *args, **kwargs):
# load_html_file(html_file, new_page=True)
return self.load_html_file(*args, **kwargs)
def apri_html_file(self, *args, **kwargs):
# open_html_file(html_file)
return self.open_html_file(*args, **kwargs)
def elimina_tutti_i_cookie(self, *args, **kwargs):
# delete_all_cookies()
return self.delete_all_cookies(*args, **kwargs)
def ottenere_agente_utente(self, *args, **kwargs):
# get_user_agent()
return self.get_user_agent(*args, **kwargs)
def ottenere_codice_lingua(self, *args, **kwargs):
# get_locale_code()
return self.get_locale_code(*args, **kwargs)
class MasterQA_Italiano(MasterQA, CasoDiProva):
def verificare(self, *args, **kwargs):
# "Manual Check"
self.DEFAULT_VALIDATION_TITLE = "Controllo manuale"
# "Does the page look good?"
self.DEFAULT_VALIDATION_MESSAGE = "La pagina ha un bell'aspetto?"
# verify(QUESTION)
return self.verify(*args, **kwargs)
|
mdmintz/SeleniumBase
|
seleniumbase/translate/italian.py
|
Python
|
mit
| 21,139
|
import os
import inspect
import logging
logging.basicConfig()
logging.getLogger().setLevel(logging.DEBUG)
TRACEBACK_INSPECTOR = inspect.currentframe
DEBUG = logging.DEBUG
INFO = logging.INFO
ERROR = logging.ERROR
WARNING = logging.WARN
CRITICAL = logging.CRITICAL
def debugCaller(traceback=None):
""" Function who return all the traceback of a call."""
tracebackLog = inspect.getouterframes(traceback)
moduleName = os.path.basename(
tracebackLog[1][1]).replace(".py", "").replace(
'<Script Block >', 'stdin')
methodName = tracebackLog[1][3]
return [moduleName, methodName]
class Logger(object):
"""
"""
level = INFO
criticalFunc = None
infoFunc = None
warningFunc = None
debugFunc = None
errorFunc = None
tracebackFunc = None
separatorFunc = None
spaceFunc = None
@classmethod
def onDebug(cls, func):
"""
"""
cls.debugFunc = func
@classmethod
def onWarning(cls, func):
"""
"""
cls.warningFunc = func
@classmethod
def onCritical(cls, func):
"""
"""
cls.criticalFunc = func
@classmethod
def onError(cls, func):
"""
"""
cls.errorFunc = func
@classmethod
def onInfo(cls, func):
"""
"""
cls.infoFunc = func
@classmethod
def onTraceback(cls, func):
"""
"""
cls.tracebackFunc = func
@classmethod
def onSeparator(cls, func):
"""
"""
cls.separatorFunc = func
@classmethod
def onSpace(cls, func):
"""
"""
cls.spaceFunc = func
@classmethod
def warning(cls, msg):
"""
"""
if cls.level <= WARNING:
msgComplete = cls._buildString(
inspect.currentframe(), msg, WARNING)
if cls.warningFunc:
cls().warningFunc(msgComplete)
else:
logging.warning(msgComplete)
@classmethod
def info(cls, msg):
"""
"""
if cls.level <= INFO:
msgComplete = cls._buildString(inspect.currentframe(), msg, INFO)
if cls.infoFunc:
cls().infoFunc(msgComplete)
else:
logging.info(msgComplete)
@classmethod
def debug(cls, msg):
"""
"""
if cls.level <= DEBUG:
msgComplete = cls._buildString(inspect.currentframe(), msg, DEBUG)
if cls.debugFunc:
cls().debugFunc(msgComplete)
else:
logging.debug(msgComplete)
@classmethod
def error(cls, msg):
"""
"""
if cls.level <= ERROR:
msgComplete = cls._buildString(inspect.currentframe(), msg, ERROR)
if cls.errorFunc:
cls().errorFunc(msgComplete)
else:
logging.error(msgComplete)
@classmethod
def critical(cls, msg):
"""
"""
if cls.level <= CRITICAL:
msgComplete = cls._buildString(
inspect.currentframe(), msg, CRITICAL)
if cls.criticalFunc:
cls().criticalFunc(msgComplete)
else:
logging.critical(msgComplete)
def traceback(cls, msg):
"""
"""
if cls.tracebackFunc:
cls.tracebackFunc(msg)
else:
TracebackError(msg)
@classmethod
def _buildString(cls, input, msg, typeErr):
""" Build the display error string by the type of error """
debugAsString = debugCaller(input)
if typeErr in [INFO, WARNING]:
return "[%s] %s" % (debugAsString[0], msg)
return "[%s::%s] %s" % (debugAsString[0], debugAsString[1], msg)
@classmethod
def getLogger(cls, loggerName):
""" Return the given name of the logger """
logging.getLogger(loggerName)
@classmethod
def setLevel(cls, level):
""" set the level of debugging """
cls.level = level
@classmethod
def getLevel(cls):
""""""
return cls.level
@classmethod
def addSeparator(cls, separator="-", length=75):
"""
Create a line of separator to help viewable
displaying of an error
"""
if cls.separatorFunc:
cls().separatorFunc(separator * length)
else:
logging.info(separator * length)
@classmethod
def addSpace(cls):
if cls.spaceFunc:
cls().spaceFunc()
else:
logging.info("")
class TracebackError(object):
"""
Output the whole traceback instead of only the
last message and Log it as Critical
"""
def __init__(self, e):
"""TracebackError Constructor"""
super(TracebackError, self).__init__()
import StringIO
import traceback
fileHandler = StringIO.StringIO()
traceback.print_exc(file=fileHandler)
self.trace = fileHandler.getvalue()
Logger.critical(self.trace)
def asString(self):
""""""
return self.trace
if __name__ == "__main__":
class Test(object):
def __init__(self):
pass
def runTest(self):
Logger.setLevel(DEBUG)
Logger.info("info")
Logger.critical("critical")
Logger.debug("debug")
Logger.warning("warning")
Logger.error("error")
aTest = Test()
aTest.runTest()
|
alok1974/compage
|
src/compage/logger.py
|
Python
|
mit
| 5,536
|
# coding: utf-8
import pygame
import sys
from pygame.locals import *
from gui import *
from conexao import *
from jogador import *
from Queue import Queue
from threading import Thread
"""
Cliente
Tp de Redes - Truco
UFSJ
Carlos Magno
Lucas Geraldo
Requisitos:
*python 2.7
*pygame
Modulo Principal.
"""
class Principal(Gui):
"""
Classe Principal
"""
def __init__(self):
#---HABILITAR BOTAO TRUCO---
# Ative para ativar a opção de pedir truco..
self.truco_habilitado = 1
#--------------------
self.mensagem_servidor = ""
self.carta_selecionada = -1
self.sua_vez = 0
self.conexao = Conexao()
self.conexao.conectar()
self.gui = Gui()
self.jogador = Jogador()
self.recebe_cartas()
self.gui.carrega_cartas()
#--------------------
self.pede_truco = "0"
self.rodada = 1
self.gui.valor_rodada = "0"
self.flag_truco = 0
self.gui.pontos = "0000"
self.gui.partidas = "000"
self.question_truco = "0"
self.proposta_truco_equipe = "0"
self.resposta_proposta_truco = "0"
self.mesa_jogo = "000000"
self.gui.mensagem_vez = "Aguarde..."
self.gui.cont_cartas = 3
#-----------------
self.quee = Queue()
self.verifica = Thread(target=self.verifica_resposta_servidor, args=(
self.quee, self.conexao))
self.verifica.daemon = True
self.verifica.start()
def atualiza_mensagem(self):
"Atualiza o campo de mensagens.."
if(self.sua_vez is 0):
self.gui.mensagem_vez = "Aguarde..."
self.gui.escrever(self.gui.mensagem_vez, (40, 430), (255, 0, 0))
if(self.sua_vez is 1):
self.gui.mensagem_vez = "Sua Vez..."
self.gui.escrever(self.gui.mensagem_vez, (40, 430), (0, 255, 0))
def agrupa_cartas(self, lista):
"""Agrupa as cartas recebidas do servidor"""
final = ""
c1 = ""
for i in lista:
c1 = c1 + i
if(len(c1) == 2):
final = final + c1 + ","
c1 = ""
lista = final.split(',')
lista.pop()
return lista
def recebe_cartas(self):
"""
Carrega as cartas recebidas do servidor.
Extrai os dados iniciais da primeira conexão.
"""
self.mensagem_servidor = self.conexao.ler_socket()
#--Extrai os dados iniciais...
self.jogador.id = self.mensagem_servidor[0:1]
self.jogador.equipe = self.mensagem_servidor[1:2]
self.sua_vez = int(self.mensagem_servidor[2:3])
cartas = self.mensagem_servidor[4:10]
print "ID ", self.jogador.id, "Equipe ", self.jogador.equipe, "Sua Vez ", self.sua_vez
self.jogador.cartas_mao = cartas
cartas = self.agrupa_cartas(cartas)
for i in cartas:
self.gui.cartas_recebidas.append(i)
def verifica_resposta_servidor(self, fila, conexao):
"""Verifica a conexao.."""
while (True):
palavra = conexao.ler_socket()
if(palavra is not None):
self.quee.put(palavra)
def verifica_erro_mensagem(self,lista):
"""Verifica e corrige erro na mensagem recebida"""
tamanho=len(lista)
if(tamanho<30):
lista = lista[
:0] + "00" + lista[1:]
print "Mensagem corrigida ",lista
return lista
def processa_resposta(self, lista):
"""Vai processar a mensagem recebida"""
self.mensagem_servidor = lista
if(lista is not None):
print "resposta vinda do servidor ", lista
#lista = self.verifica_erro_mensagem(lista)
self.sua_vez = int(lista[2:3])
self.atualiza_mensagem()
self.finaliza_rodada(int(lista[3:4]))
self.rodada = int(lista[3:4])
cartas = lista[4:10]
if(cartas is not "000000"):
pass
else:
# Considerando que nos decorrer das partida o servidor não envia as
# cartas. Redefine a mão do jogador.
self.gui.cartas_recebidas = []
self.jogador.cartas_mao = cartas
cartas = self.agrupa_cartas(cartas)
for i in cartas:
self.gui.cartas_recebidas.append(i)
self.gui.pontos = lista[10:14]
self.gui.partidas = lista[14:17]
self.gui.valor_rodada = lista[17:19]
self.question_truco = lista[19:20]
self.proposta_truco_equipe = lista[20:21]
self.mesa_jogo = lista[22:30]
self.renderiza_mesa()
print self.sua_vez
if(self.gui.cont_cartas > 1):
self.gui.cont_cartas = self.gui.cont_cartas - 1
def renderiza_mesa(self):
"""Função que renderiza_mesa"""
# 00 00 00 00
self.gui.caminho_cartas
print self.mensagem_servidor
cartas = self.agrupa_cartas(self.mesa_jogo)
print "Cartas Mesa ", cartas
cont = 0
for i in cartas:
if not (i == "00" or i == "0"):
i = self.gui.caminho_cartas + i + ".png"
if(self.jogador.id == "0"):
if cont is 0:
self.gui.renderiza_cartas_jogadas(
i, self.gui.sua_pos_carta)
if cont is 1:
self.gui.renderiza_cartas_jogadas(
i, self.gui.pos_cartas_jog_1)
self.gui.update_card_adversario(
1, self.gui.cont_cartas)
if cont is 2:
self.gui.renderiza_cartas_jogadas(
i, self.gui.pos_cartas_jog_2)
self.gui.update_card_adversario(
2, self.gui.cont_cartas)
if cont is 3:
self.gui.renderiza_cartas_jogadas(
i, self.gui.pos_cartas_jog_3)
self.gui.update_card_adversario(
3, self.gui.cont_cartas)
elif(self.jogador.id == "1"):
if cont is 0:
self.gui.renderiza_cartas_jogadas(
i, self.gui.pos_cartas_jog_3)
self.gui.update_card_adversario(
3, self.gui.cont_cartas)
elif cont is 1:
self.gui.renderiza_cartas_jogadas(
i, self.gui.sua_pos_carta)
elif cont is 2:
self.gui.renderiza_cartas_jogadas(
i, self.gui.pos_cartas_jog_1)
self.gui.update_card_adversario(
1, self.gui.cont_cartas)
elif cont is 3:
self.gui.renderiza_cartas_jogadas(
i, self.gui.pos_cartas_jog_2)
self.gui.update_card_adversario(
3, self.gui.cont_cartas)
elif(self.jogador.id == "2"):
if cont is 0:
self.gui.renderiza_cartas_jogadas(
i, self.gui.pos_cartas_jog_2)
self.gui.update_card_adversario(
2, self.gui.cont_cartas)
elif cont is 1:
self.gui.renderiza_cartas_jogadas(
i, self.gui.pos_cartas_jog_3)
self.gui.update_card_adversario(
3, self.gui.cont_cartas)
elif cont is 2:
self.gui.renderiza_cartas_jogadas(
i, self.gui.sua_pos_carta)
elif cont is 3:
self.gui.renderiza_cartas_jogadas(
i, self.gui.pos_cartas_jog_1)
self.gui.update_card_adversario(
1, self.gui.cont_cartas)
elif (self.jogador.id == "3"):
if cont is 0:
self.gui.renderiza_cartas_jogadas(
i, self.gui.pos_cartas_jog_1)
self.gui.update_card_adversario(
1, self.gui.cont_cartas)
elif cont is 1:
self.gui.renderiza_cartas_jogadas(
i, self.gui.pos_cartas_jog_2)
self.gui.update_card_adversario(
2, self.gui.cont_cartas)
elif cont is 2:
self.gui.renderiza_cartas_jogadas(
i, self.gui.pos_cartas_jog_3)
self.gui.update_card_adversario(
3, self.gui.cont_cartas)
elif cont is 3:
self.gui.renderiza_cartas_jogadas(
i, self.gui.sua_pos_carta)
cont = cont + 1
def finaliza_rodada(self, valor):
"""Verifica se a rodada terminou e limpa a tela"""
if(int(self.rodada) is not valor):
self.gui.tela_padrao(self.jogador.equipe)
print "Limpando a rodada"
def prepara_mensagem(self, carta_jogada):
"""Prepara uma mensagem da carta jogada para o envio"""
# Acerta a posicao da carta na mesa
if(int(self.jogador.id) is 0):
self.mensagem_servidor = self.mensagem_servidor[
:22] + carta_jogada + self.mensagem_servidor[24:]
if(int(self.jogador.id) is 1):
self.mensagem_servidor = self.mensagem_servidor[
:24] + carta_jogada + self.mensagem_servidor[26:]
if(int(self.jogador.id) is 2):
self.mensagem_servidor = self.mensagem_servidor[
:26] + carta_jogada + self.mensagem_servidor[28:]
if(int(self.jogador.id) is 3):
self.mensagem_servidor = self.mensagem_servidor[
:28] + carta_jogada + self.mensagem_servidor[30:]
def verifica_proposta_truco(self):
"""Exibe a tela de Truco"""
if(self.question_truco == "1") and self.sua_vez is 1:
self.gui.tela_truco()
self.flag_truco = 1
def solicita_truco(self):
"""Solicitar Truco"""
if(self.sua_vez is 1):
print "Solicitando Truco.."
self.mensagem_servidor = self.mensagem_servidor[
:19] + self.pede_truco + self.mensagem_servidor[20:]
print "Mensagem enviada na solicitação de Truco..", self.mensagem_servidor
self.conexao.envia_mensagem(self.mensagem_servidor)
self.pede_truco = "0"
def responde_truco(self):
"""Envia uma mensagem para o servidor com a resposta do truco"""
self.mensagem_servidor = self.mensagem_servidor[
:21] + self.resposta_proposta_truco + self.mensagem_servidor[22:]
print "Enviando a Seguinte resposta de Truco ", self.mensagem_servidor
self.conexao.envia_mensagem(self.mensagem_servidor)
def envia_carta_servidor(self, carta_jogada):
"""Dispara cartas para o servidor e altera os campos necessarios.."""
if carta_jogada is not None:
carta_jogada = carta_jogada.split("/")[1].split(".")[0]
# 1(ID)|a(Equipe)|0(vez)|0(rodada)|4p7c7o(mao)|0000(placar_jogo)|000(placar_rodada)|00(valor
# rodada)|0(question)|0(equipe question)|0(resposta
# truco)|00000000(mesa)|0(virada)
self.prepara_mensagem(carta_jogada)
# envia a mensagem para o servidor..
print "mensagem para o envio ", self.mensagem_servidor
self.conexao.envia_mensagem(self.mensagem_servidor)
def main(self):
"""Realiza a renderização.."""
pygame.init()
pygame.display.set_caption("Truco")
pygame.DOUBLEBUF
self.gui.iniciar()
self.carta_selecionada = -1
select = 0
# print "Mensagem das Cartas ",self.mensagem_servidor
while True:
for event in pygame.event.get():
self.gui.mostra_pontuacao(self.jogador.equipe)
self.gui.rodadas(self.jogador.equipe)
self.atualiza_mensagem()
self.verifica_proposta_truco()
self.gui.desenha_botao_truco(
self.gui.valor_rodada, self.proposta_truco_equipe)
if event.type == QUIT:
print "Encerrando conexão...."
pygame.quit()
sys.exit()
self.verifica.exit()
self.quee.join()
if event.type == KEYDOWN and self.sua_vez == 1:
op = event.unicode
print op
op = str(op)
if op is "":
op = str(event.key)
print op
if op == "1":
self.gui.update_card(
self.gui.mao[0], self.gui.pos_cartas_jog)
self.carta_selecionada = 0
if op == "2":
self.gui.update_card(
self.gui.mao[1], self.gui.pos_cartas_jog)
self.carta_selecionada = 1
if op == "3":
self.gui.update_card(
self.gui.mao[2], self.gui.pos_cartas_jog)
self.carta_selecionada = 2
if (op == "275" or op == "276") and self.rodada is not 1:
"""Teclas de Seta esq e dir
carta oculta
"""
self.gui.update_card(
self.gui.mao[3], self.gui.pos_cartas_jog)
self.carta_selecionada = 3
else:
print "Jogada não permitida."
if op == "273":
print "carta jogada", self.gui.mao[self.carta_selecionada]
if (self.carta_selecionada != -1):
self.sua_vez = 1 # Bloqueia a mão ..
self.envia_carta_servidor(
self.gui.mao[self.carta_selecionada])
if self.carta_selecionada is not 3:
self.gui.mao[self.carta_selecionada] = None
self.gui.verifica_mao(self.gui.mao, self.conexao)
if event.type == MOUSEBUTTONDOWN and select == 0:
"""Define a mudança da tela"""
print event.button, event.pos
fundo = pygame.image.load(
self.gui.caminho_background + "fundo.jpg")
self.gui.novo_tamanho_janela()
self.gui.tela.blit(fundo, [0, 0])
self.gui.update_card_adversario(0, 3)
self.gui.escrever(
"Para selecionar cartas escolha [1,2,3]", (30, 30),
self.gui.branco)
self.gui.escrever(
"Para Jogar a carta utilize seta para frente", (
30, 50),
self.gui.branco)
self.gui.escrever(
"Utilize as setas direcionais para ocultar", (30, 70),
self.gui.branco)
select = 1
if event.type == MOUSEBUTTONDOWN and self.sua_vez == 1:
pos = event.pos
print "Posicao ", pos
if (pos[0] > 670 and pos[0] < 780):
if(pos[1] > 471 and pos[1] < 471 + 20):
# self.gui.desenha_botao_truco(self.gui.valor_rodada)
if (self.truco_habilitado is 1):
print "entrouuu"
print "Variaveis do truco Sua Vez ", self.sua_vez, type(self.sua_vez), "Minha equipe ", self.jogador.equipe, type(self.jogador.equipe), "Proposta truco equipe ", self.proposta_truco_equipe, type(self.proposta_truco_equipe)
if(self.sua_vez is 1 and (self.jogador.equipe == self.proposta_truco_equipe or self.proposta_truco_equipe == "0")):
print "pedindo truco"
self.pede_truco = "1"
self.solicita_truco()
self.flag_truco = 1
else:
print self.gui.mao
print "Não é permitido pedir truco na mão de 12"
else:
print "A opção de truco não está Habilitada."
if (pos[0] > 363 and pos[0] < 392) and self.flag_truco is 1:
if (pos[1] > 236 and pos[1] < 276):
print "Truco Aceito"
self.resposta_proposta_truco = "1"
self.responde_truco()
self.gui.tela_padrao(self.jogador.equipe)
self.flag_truco = 0
if (pos[0] > 410 and pos[0] < 441) and self.flag_truco is 1:
if (pos[1] > 237 and pos[1] < 266):
print "Truco Não Foi aceito"
self.gui.tela_padrao(self.jogador.equipe)
self.resposta_proposta_truco = "0"
se.responde_truco()
self.flag_truco = 0
# self.cartas_jogadas()
pygame.display.update()
for i in range(0, 1):
# Percorre a fila lendo as mensagens recebidas do servidor
if not self.quee.empty():
retorno = self.quee.get(i)
self.verifica_erro_mensagem(retorno)
self.processa_resposta(retorno)
# Adiciona um evento na pilha de eventos para atualizar a
# tela.
evento = pygame.event.Event(USEREVENT)
pygame.event.post(evento)
if __name__ == '__main__':
new = Principal()
new.main()
|
Exterminus/Redes
|
Cliente/Cliente_Interface/cliente_gui.py
|
Python
|
mit
| 18,741
|
import os
import dj_database_url
import re
from django.conf import settings
from cabot.celeryconfig import *
from cabot.cabot_config import *
settings_dir = os.path.dirname(__file__)
PROJECT_ROOT = os.path.abspath(settings_dir)
TEMPLATE_DEBUG = DEBUG = os.environ.get("DEBUG", False)
ADMINS = (
('Admin', os.environ.get('ADMIN_EMAIL', 'name@example.com')),
)
MANAGERS = ADMINS
DATABASES = {'default': dj_database_url.parse(os.environ["DATABASE_URL"])}
if not DEBUG:
DATABASES['default']['OPTIONS'] = {'autocommit': True}
USE_TZ = True
ALLOWED_HOSTS = os.environ.get('ALLOWED_HOSTS', '*').split(',')
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = os.environ.get('TIME_ZONE', 'Etc/UTC')
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = os.path.join(PROJECT_ROOT, 'media/')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = '/media/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = os.path.join(PROJECT_ROOT, os.path.pardir, 'static/')
COMPRESS_ROOT = STATIC_ROOT
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = [os.path.join(PROJECT_ROOT, 'static')]
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'compressor.finders.CompressorFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = os.environ.get(
'DJANGO_SECRET_KEY', '2FL6ORhHwr5eX34pP9mMugnIOd3jzVuT45f7w430Mt5PnEwbcJgma0q8zUXNZ68A')
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'cabot.urls'
TEMPLATE_DIRS = (
os.path.join(PROJECT_ROOT, 'templates'),
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
'south',
'compressor',
'polymorphic',
'djcelery',
'mptt',
'jsonify',
'cabot.cabotapp',
'rest_framework',
)
# Load additional apps from configuration file
CABOT_PLUGINS_ENABLED_PARSED = []
for plugin in CABOT_PLUGINS_ENABLED.split(","):
# Hack to clean up if versions of plugins specified
exploded = re.split(r'[<>=]+', plugin)
CABOT_PLUGINS_ENABLED_PARSED.append(exploded[0])
INSTALLED_APPS += tuple(CABOT_PLUGINS_ENABLED_PARSED)
COMPRESS_PRECOMPILERS = (
('text/coffeescript', 'coffee --compile --stdio'),
('text/eco',
'eco -i TEMPLATES {infile} && cat "$(echo "{infile}" | sed -e "s/\.eco$/.js/g")"'),
('text/less', 'lessc {infile} > {outfile}'),
)
EMAIL_HOST = os.environ.get('SES_HOST', 'localhost')
EMAIL_PORT = int(os.environ.get('SES_PORT', 25))
EMAIL_HOST_USER = os.environ.get('SES_USER', '')
EMAIL_HOST_PASSWORD = os.environ.get('SES_PASS', '')
EMAIL_BACKEND = os.environ.get('SES_BACKEND', 'django_smtp_ssl.SSLEmailBackend')
EMAIL_USE_TLS = os.environ.get('SES_USE_TLS', 0)
COMPRESS_OFFLINE = not DEBUG
COMPRESS_URL = '/static/'
RECOVERY_SNIPPETS_WHITELIST = (
r'https?://[^.]+\.hackpad\.com/[^./]+\.js',
r'https?://gist\.github\.com/[^.]+\.js',
r'https?://www\.refheap\.com/[^.]+\.js',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'handlers': {
'null': {
'level': 'DEBUG',
'class': 'django.utils.log.NullHandler',
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose'
},
'log_file': {
'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler',
'formatter': 'verbose',
'filename': os.environ['LOG_FILE'],
'maxBytes': 1024 * 1024 * 25, # 25 MB
'backupCount': 5,
},
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django': {
'handlers': ['console', 'log_file', 'mail_admins'],
'level': 'INFO',
'propagate': True,
},
'django.request': {
'handlers': ['console', 'log_file', 'mail_admins'],
'level': 'ERROR',
'propagate': False,
},
'django.db.backends': {
'handlers': ['console', 'log_file', 'mail_admins'],
'level': 'INFO',
'propagate': False,
},
# Catch All Logger -- Captures any other logging
'': {
'handlers': ['console', 'log_file', 'mail_admins'],
'level': 'INFO',
'propagate': True,
}
}
}
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.BasicAuthentication',
'rest_framework.authentication.SessionAuthentication',
),
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.DjangoModelPermissions',
],
'DEFAULT_FILTER_BACKENDS': [
'rest_framework.filters.DjangoFilterBackend',
'rest_framework.filters.OrderingFilter',
]
}
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
)
AUTH_LDAP = os.environ.get('AUTH_LDAP', 'false')
if AUTH_LDAP.lower() == "true":
from settings_ldap import *
AUTHENTICATION_BACKENDS += tuple(['django_auth_ldap.backend.LDAPBackend'])
|
lolotux/cabot-docker
|
cabot/settings.py
|
Python
|
mit
| 7,851
|
from m2core.m2core import M2Core, logger
from m2core import bases
from m2core import data_schemes
from m2core import db
from m2core import utils
from m2core import common
|
mdutkin/m2core
|
m2core/__init__.py
|
Python
|
mit
| 171
|
class UndirectedGraphNode:
def __init__(self, x):
self.label = x
self.neighbors = []
#using DFS
class Solution:
# @param node, a undirected graph node
# @return a undirected graph node
def cloneGraph(self, node):
seen={}
visited=[]
seen[None] = None
head = UndirectedGraphNode(node.label)
seen[node] = head
visited.append(node)
while len(visited) != 0:
refNode = visited.pop()
for n in refNode.neighbors:
if n not in seen:
neighBorNode = UndirectedGraphNode(n.label)
seen[refNode].neighbors.append(neighBorNode)
seen[n] = neighBorNode
visited.append(n)
else:
seen[refNode].neighbors.append(seen[n])
return head
A=UndirectedGraphNode(2)
B=UndirectedGraphNode(3)
C=UndirectedGraphNode(4)
A.neighbors.append(B)
A.neighbors.append(C)
B.neighbors.append(C)
N=Solution()
for i in N.cloneGraph(A).neighbors:
print i.label
|
bourneagain/pythonBytes
|
cloneGraph_BFS.py
|
Python
|
mit
| 1,080
|
from setuptools import setup, find_packages # Always prefer setuptools over distutils
from codecs import open # To use a consistent encoding
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the relevant file
# with open(path.join(here, 'DESCRIPTION.rst'), encoding='utf-8') as f:
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name = 'bugherd',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/development.html#single-sourcing-the-version
version = '0.1.dev1',
description = 'Access bugherd.com API',
long_description=long_description,
# The project's main homepage.
url = 'https://github.com/brooksc/bugherd', # use the URL to the github repo
# Author details
author = 'Brooks Cutter',
author_email = 'brooksc@brooksc.com',
# Choose your license
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Developers',
# 'Topic :: Software Development :: Build Tools',
'Topic :: Software Development :: Bug Tracking',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
# 'Programming Language :: Python :: 3',
# 'Programming Language :: Python :: 3.2',
# 'Programming Language :: Python :: 3.3',
# 'Programming Language :: Python :: 3.4',
],
# What does your project relate to?
keywords='bugherd',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
# List run-time dependencies here. These will be installed by pip when your
# project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/technical.html#install-requires-vs-requirements-files
install_requires=['requests'],
# List additional groups of dependencies here (e.g. development dependencies).
# You can install these using the following syntax, for example:
# $ pip install -e .[dev,test]
extras_require = {
'dev': ['check-manifest'],
'test': ['coverage'],
},
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
package_data={
# 'sample': ['package_data.dat'],
},
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages.
# see http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
data_files=[],
# data_files=[('my_data', ['data/data_file'])],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={},
# entry_points={
# 'console_scripts': [
# 'sample=sample:main',
# ],
# },
)
|
brooksc/bugherd
|
setup.py
|
Python
|
mit
| 4,028
|
from jsonrpc import ServiceProxy
access = ServiceProxy("http://127.0.0.1:52398")
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
|
harambe-dev/harambecoin
|
contrib/wallettools/walletunlock.py
|
Python
|
mit
| 159
|
"""Get a `re.Pattern` instance (as given by re.compile()) with control over defaults of it's methods.
Useful to reduce if/else boilerplate when handling the output of search functions (match, search, etc.)
See [regex_search_hack.md](https://gist.github.com/thorwhalen/6c913e9be35873cea6efaf6b962fde07) for more explanatoins of the
use case.
Example;
>>> dflt_result = type('dflt_search_result', (), {'groupdict': lambda x: {}})()
>>> p = re_compile('.*(?P<president>obama|bush|clinton)', search=dflt_result, match=dflt_result)
>>>
>>> p.search('I am beating around the bush, am I?').groupdict().get('president', 'Not found')
'bush'
>>> p.match('I am beating around the bush, am I?').groupdict().get('president', 'Not found')
'bush'
>>>
>>> # if not match is found, will return 'Not found', as requested
>>> p.search('This does not contain a president').groupdict().get('president', 'Not found')
'Not found'
>>>
>>> # see that other non-wrapped re.Pattern methods still work
>>> p.findall('I am beating around the bush, am I?')
['bush']
"""
import re
from functools import wraps
def add_dflt(func, dflt_if_none):
@wraps(func)
def wrapped_func(*args, **kwargs):
result = func(*args, **kwargs)
if result is not None:
return result
else:
if callable(dflt_if_none):
return dflt_if_none()
else:
return dflt_if_none
return wrapped_func
def re_compile(pattern, flags=0, **dflt_if_none):
"""Get a `re.Pattern` instance (as given by re.compile()) with control over defaults of it's methods.
Useful to reduce if/else boilerplate when handling the output of search functions (match, search, etc.)
Example;
>>> dflt_result = type('dflt_search_result', (), {'groupdict': lambda x: {}})()
>>> p = re_compile('.*(?P<president>obama|bush|clinton)', search=dflt_result, match=dflt_result)
>>>
>>> # trying p.search
>>> p.search('I am beating around the bush, am I?').groupdict().get('president', 'Not found')
'bush'
>>> # trying p.match
>>> p.match('I am beating around the bush, am I?').groupdict().get('president', 'Not found')
'bush'
>>>
>>> # if not match is found, will return 'Not found', as requested
>>> p.search('This does not contain a president').groupdict().get('president', 'Not found')
'Not found'
>>>
>>> # see that other non-wrapped re.Pattern methods still work
>>> p.findall('I am beating around the bush, am I?')
['bush']
"""
compiled_regex = re.compile(pattern, flags=flags)
intercepted_names = set(dflt_if_none)
my_regex_compilation = type('MyRegexCompilation', (object,), {})()
for _name, _dflt in dflt_if_none.items():
setattr(my_regex_compilation, _name, add_dflt(getattr(compiled_regex, _name), _dflt))
for _name in filter(lambda x: not x.startswith('__') and x not in intercepted_names,
dir(compiled_regex)):
setattr(my_regex_compilation, _name, getattr(compiled_regex, _name))
return my_regex_compilation
|
thorwhalen/ut
|
pstr/regex_with_defaults.py
|
Python
|
mit
| 3,075
|
#coding=utf-8
#author='Shichao-Dong'
import requests
def client_getProductTypes():
url="http://172.31.3.73:8888/client/v1/clientAuth.action"
headers={"Connection": "keep-alive",
# "Referer": "http://172.31.3.73:6020/layout_new/login.jsp?url=http://172.31.3.73:6020/layout_new/login.html",
#"Accept-Language": "zh-CN",
#"x-requested-with": "XMLHttpRequest",
"Content-Type": "application/x-www-form-urlencoded",
"Accept-Encoding": "gzip",
#"Pragm": "=no-cache",
#"Accept": "application/json, text/javascript, */*; q=0.01",
"User-Agent": "waiqin_android_5216010622618132075",
"Content-Length": "475",
"Host": "172.31.3.73:8888",
#"clientid": "gaeaclient-android-000004-001002",
#"clientver": "5.7.5",
#"cmd": "STATUSREPORT",
"wq-lang":"zh_CN",
#"zt": "gzip"
}
data={"info.versioncode":"201017",
"info.clientVer":"5.7.5",
"info.os":"Android 6.0",
"info.clientId":"gaeaclient-android-000004-001002",
"info.md5":"e95d7df6d46d167ce6984b5ca348eb22",
"info.esn":"352591070002482",
"info.tenantCode":"dongshichao",
"info.userCode":"dong001",
"info.phoneModel":"infocus m535",
"info.appVer":"1.2.18.0",
"info.screenheight":"1184",
"info.simCardNum":"2",
"info.password":"048b62076a732069",
"info.appId":"waiqin365@zhangkong",
"info.imsi":"X0000000000100X",
"info.type":"1",
"info.screenwidth":"720"
}
r=requests.post(url=url,headers=headers,data=data)
cookie=r.cookies.get_dict()
cookie1="WQSESSIONID="+"".join(cookie["WQSESSIONID"])
url2="http://172.31.3.73:8888/app/bas_pd/client/v1/getProductTypes.action"
headers1={"Connection": "keep-alive",
#"Referer": "http://172.31.3.73:6020/layout_new/login.html",
"Accept-Language": "zh-CN,zh;q=0.8",
#"x-requested-with": "XMLHttpRequest",
"Content-Type": "application/x-www-form-urlencoded",
"Accept-Encoding": "gzip,deflate,sdch",
#"Pragm": "=no-cache",
#"Accept": "application/json, text/javascript, */*; q=0.01",
"User-Agent": "waiqin_android_5216010622618132075",
#"Content-Length": "475",
"Host": "172.31.3.73:8888",
#"clientid": "gaeaclient-android-000004-001002",
#"clientver": "5.7.5",
#"cmd": "STATUSREPORT",
#"wq-lang":"zh_CN",
#"zt": "gzip"
"Coolie":cookie1
}
data1={"condition.parent_id":"1"
}
r1=requests.post(url=url2,headers=headers1,data=data1)
print r1.content
if __name__=="__main__":
client_getProductTypes()
|
NJ-zero/Android
|
Request/Client/client_getProductTypes.py
|
Python
|
mit
| 2,677
|
from resumable import split, rebuild
import requests
def get(s):
return s
@rebuild
def example(_):
print('this is a good start')
value = split(requests.get, 'first')('http://ms.mause.me')
print(value.text)
value = split(lambda: 'hello', 'second')()
print('hello', value)
split(print, 'derp')()
a, b, c = split(get, 'multiple')('abc')
print(a, b, c)
return split(get)('otherworldly')
def main():
arg = None
for name, func in example.items():
if func.__code__.co_argcount == 0:
arg = func()
else:
arg = func(arg)
print(arg)
if __name__ == '__main__':
main()
|
Mause/resumable
|
main.py
|
Python
|
mit
| 673
|
# -*- coding: utf-8 -*-
from itertools import product
import requests
import shutil
def api_list(apiargs):
"""Google Street View Image API results.
Constructs a list of `Google Street View Image API queries <https://developers.google.com/maps/documentation/streetview/>`_
from a dictionary.
Args:
apiargs (listof dict):
Dict containing `street view URL parameters <https://developers.google.com/maps/documentation/streetview/intro>`_.
Each parameter can have multiple values if separated by ``;``.
Returns:
A ``listof dict`` containing single query requests per dictionary for Google Street View Image API.
Examples:
::
# Import google_streetview for the api and helper module
import google_streetview.api
import google_streetview.helpers
# Create a dictionary with multiple parameters separated by ;
apiargs = {
'location': '46.414382,10.013988;40.720032,-73.988354',
'size': '640x300;640x640',
'heading': '0;90;180;270',
'fov': '0;90;120',
'pitch': '-90;0;90'
}
# Get a list of all possible queries from multiple parameters
api_list = google_streetview.helpers.api_list(apiargs)
# Create a results object for all possible queries
results = google_streetview.api.results(api_list)
# Preview results
results.preview()
# Download images to directory 'downloads'
results.download_links('downloads')
# Save metadata
results.save_metadata('metadata.json')
"""
# (api_query) Query combinations for each parameter
api_queries = {}
keywords = [k for k in apiargs]
for k in keywords:
if k in apiargs:
api_queries[k] = apiargs[k].split(';')
apiargs.pop(k, None)
# (api_list) Build list of api requests based on query combinations
out = []
keys = [k for k in api_queries]
queries = [api_queries[k] for k in api_queries]
combinations = product(*queries)
for combo in combinations:
api_copy = apiargs.copy()
for k, parameter in zip(keys, combo):
api_copy[k] = parameter
out.append(api_copy)
return(out)
def download(url, file_path):
r = requests.get(url, stream=True)
if r.status_code == 200: # if request is successful
with open(file_path, 'wb') as f:
r.raw.decode_content = True
shutil.copyfileobj(r.raw, f)
|
rrwen/google_streetview
|
google_streetview/helpers.py
|
Python
|
mit
| 2,418
|
from polyphony import testbench
from polyphony import pipelined
def pipe10(xs):
for i in pipelined(range(len(xs) - 1), ii=-1):
v = xs[i] + xs[i + 1]
v >>= 1
xs[i] = v
@testbench
def test():
data = [0, 16, 32, -16, -64]
pipe10(data)
assert 8 == data[0]
assert 24 == data[1]
assert 8 == data[2]
assert -40 == data[3]
assert -64 == data[4]
test()
|
ktok07b6/polyphony
|
tests/pipeline/for10.py
|
Python
|
mit
| 406
|
# -*- coding: utf-8 -*-
__author__ = 'hmizumoto'
from flask import Blueprint, request, render_template, abort
from app.utils import jwt_decode
from app.views.auth import check_login, authorized_user, login
from app.models import DOMAIN
from app import app
from app.decoretor import login_required
from bson import ObjectId
import re
from logging import getLogger, StreamHandler, DEBUG
logger = getLogger(__name__)
handler = StreamHandler()
handler.setLevel(DEBUG)
logger.setLevel(DEBUG)
logger.addHandler(handler)
module = Blueprint('view', __name__, url_prefix=app.config["APPLICATION_ROOT"])
@module.route("/")
def index():
"""
ログイン,アカウント作成
"""
user = check_login()
if not user:
return render_template("index.html", prefix=app.config["APPLICATION_ROOT"])
return render_template("home.html", user=user, prefix=app.config["APPLICATION_ROOT"])
@module.route("/drafts/")
@login_required
def drafts(oid=None):
"""
下書き一覧
"""
user = authorized_user()
return render_template("drafts.html", oid=oid, user=user, prefix=app.config["APPLICATION_ROOT"])
@module.route("/drafts/new")
@module.route("/drafts/<oid>/edit")
@login_required
def edit_drafts(oid=None):
"""
下書き作成
"""
user = authorized_user()
model = DOMAIN["items"]
draft = dict()
if oid:
draft = model.get_by_id(oid)
draft['markdown'] = draft['markdown'].replace('\\', '\\\\').replace('\n', '\\n')
return render_template("edit_drafts.html", oid=oid, user=user, draft=draft, prefix=app.config["APPLICATION_ROOT"])
@module.route("/<user_name>/items/<oid>")
@login_required
def item_page(user_name, oid):
"""
記事閲覧
"""
user = authorized_user()
author = DOMAIN['users'].get_by_identify(user_name)
model = DOMAIN['items']
query = {'status': 'published', 'user_id': author['_id']}
item = model.get_by_id(oid, query)
comments = DOMAIN["comments"].get_index({'item_id': oid}, sort=('created', 1))
stocks = DOMAIN["users"].get_index({'stocks': oid})
del author['password']
if item:
return render_template('item.html', user=user, item=item, author=author, comments=comments, stocks=stocks,
prefix=app.config["APPLICATION_ROOT"])
else:
abort(404)
@module.route("/home/<user_name>/")
@module.route("/home/<user_name>/<target>")
@login_required
def user_page(user_name, target=None):
"""
ユーザページ
"""
user = authorized_user()
author = DOMAIN['users'].get_by_identify(user_name, password=False)
if author:
model = DOMAIN['items']
query = {'status': 'published', 'user_id': author['_id']}
item = model.get_index(query, sort=("created", -1))
comments = DOMAIN["comments"].get_index({'user_id': author['_id']})
stock_ids = [ObjectId(x) for x in author['stocks']]
stocks = model.get_index({'_id': {'$in': stock_ids}})
followers = DOMAIN['users'].get_index({'following_users': author['user_name']}, password=False)
return render_template('users.html', user=user, item=item, author=author, stocks=stocks, comments=comments,
followers=followers, target=target, prefix=app.config["APPLICATION_ROOT"])
else:
abort(404)
@module.route("/tags")
@login_required
def tags_index():
"""
タグ一覧
"""
user = authorized_user()
model = DOMAIN['items']
tags = model.get_all_tags()
return render_template('tags_index.html', user=user, tags=tags, prefix=app.config["APPLICATION_ROOT"])
@module.route("/tags/<tag_name>")
@login_required
def tags_page(tag_name):
"""
タグ詳細
"""
user = authorized_user()
model = DOMAIN['items']
items = model.get_index({'tags': tag_name})
follower = DOMAIN['users'].get_index({'following_tags': tag_name})
return render_template('tags.html', user=user, items=items, tag_name=tag_name, follower_count=follower['count'],
prefix=app.config["APPLICATION_ROOT"])
@module.route("/settings")
@login_required
def setting():
"""
設定ページ
"""
user = authorized_user()
return render_template('settings.html', user=user, prefix=app.config["APPLICATION_ROOT"])
@module.route("/search")
@login_required
def search():
"""
検索
"""
user = authorized_user()
model = DOMAIN["items"]
q = request.args.get("query")
terms = q.split()
title = map(lambda x: {"title": re.compile(".*"+x+".*")}, terms)
tags = map(lambda x: {"tags": re.compile(".*"+x+".*")}, terms)
query = {
"$or": [{"$and": list(title)}, {"$and": list(tags)}]
}
result = model.get_index(query)
return render_template("search_result.html", user=user, items=result,
query=q, prefix=app.config["APPLICATION_ROOT"])
@module.route("/session/activate")
def activate_page():
"""
アカウントアクティベーション
"""
token = request.args.get("token")
if token:
model = DOMAIN["users"]
data = jwt_decode(token)
user = model.get_by_id(data["_id"])
if user["password"] == data["password"] and user["user_email"] == data["user_email"]:
# activate user account
model.patch(user["_id"], {"status": "active"})
# login
login(user["user_email"], user["password"])
return render_template("session.html", message="アカウントを認証しました。", user=user, prefix=app.config["APPLICATION_ROOT"])
else:
return render_template("session.html", message="不正なトークンです。", user=None, prefix=app.config["APPLICATION_ROOT"])
@module.route("/session/account_created")
def created():
return render_template("session.html", message="アカウントを作成しました。<br>メールに届いたURLをクリックし、<br>アカウントを認証してください。",
user=None, prefix=app.config["APPLICATION_ROOT"])
|
motomizuki/Qlone
|
app/views/view.py
|
Python
|
mit
| 6,101
|
import re
import time
from astropy import units as u
from astropy.coordinates import SkyCoord
from panoptes.utils.time import current_time
from panoptes.utils import error as error
from panoptes.pocs.mount.serial import AbstractSerialMount
class Mount(AbstractSerialMount):
"""
Mount class for iOptron mounts. Overrides the base `initialize` method
and providers some helper methods to convert coordinates.
"""
def __init__(self, *args, **kwargs):
super(Mount, self).__init__(*args, **kwargs)
self.logger.info('Creating iOptron mount')
# Regexp to match the iOptron RA/Dec format
self._ra_format = r'(?P<ra_millisecond>\d{8})'
self._dec_format = r'(?P<dec_sign>[\+\-])(?P<dec_arcsec>\d{8})'
self._coords_format = re.compile(self._dec_format + self._ra_format)
self._raw_status = None
self._status_format = re.compile(
'(?P<gps>[0-2]{1})' +
'(?P<state>[0-7]{1})' +
'(?P<tracking>[0-4]{1})' +
'(?P<movement_speed>[1-9]{1})' +
'(?P<time_source>[1-3]{1})' +
'(?P<hemisphere>[01]{1})'
)
self._status_lookup = {
'gps': {
'0': 'Off',
'1': 'On',
'2': 'Data Extracted'
},
'state': {
'0': 'Stopped - Not at Zero Position',
'1': 'Tracking (PEC disabled)',
'2': 'Slewing',
'3': 'Guiding',
'4': 'Meridian Flipping',
'5': 'Tracking (PEC enabled)',
'6': 'Parked',
'7': 'Stopped - Zero Position'
},
'tracking': {
'0': 'Sidereal',
'1': 'Lunar',
'2': 'Solar',
'3': 'King',
'4': 'Custom'
},
'movement_speed': {
'1': '1x sidereal',
'2': '2x sidereal',
'3': '8x sidereal',
'4': '16x sidereal',
'5': '64x sidereal',
'6': '128x sidereal',
'7': '256x sidereal',
'8': '512x sidereal',
'9': 'Max sidereal',
},
'time_source': {
'1': 'RS-232',
'2': 'Hand Controller',
'3': 'GPS'
},
'hemisphere': {
'0': 'Southern',
'1': 'Northern'
}
}
self.logger.info('Mount created')
################################################################################################
# Properties
################################################################################################
@property
def is_home(self):
""" bool: Mount home status. """
self._is_home = 'Stopped - Zero Position' in self.status.get('state', '')
return self._is_home
@property
def is_tracking(self):
""" bool: Mount tracking status. """
self._is_tracking = 'Tracking' in self.status.get('state', '')
return self._is_tracking
@property
def is_slewing(self):
""" bool: Mount slewing status. """
self._is_slewing = 'Slewing' in self.status.get('state', '')
return self._is_slewing
################################################################################################
# Public Methods
################################################################################################
def initialize(self, set_rates=True, unpark=False, *arg, **kwargs):
""" Initialize the connection with the mount and setup for location.
iOptron mounts are initialized by sending the following two commands
to the mount:
* Version
* MountInfo
If the mount is successfully initialized, the `_setup_location_for_mount` method
is also called.
Returns:
bool: Returns the value from `self.is_initialized`.
"""
if not self.is_connected:
self.logger.info(f'Connecting to mount {__name__}')
self.connect()
if self.is_connected and not self.is_initialized:
self.logger.info(f'Initializing {__name__} mount')
# We trick the mount into thinking it's initialized while we
# initialize otherwise the `query` method will test
# to see if initialized and be put into loop.
self._is_initialized = True
actual_version = self.query('version')
actual_mount_info = self.query('mount_info')
expected_version = self.commands.get('version').get('response')
expected_mount_info = self.commands.get('mount_info').get('response')
self._is_initialized = False
# Test our init procedure for iOptron
if actual_version != expected_version or actual_mount_info != expected_mount_info:
self.logger.debug(f'{actual_version} != {expected_version}')
self.logger.debug(f'{actual_mount_info} != {expected_mount_info}')
raise error.MountNotFound('Problem initializing mount')
else:
self._is_initialized = True
self._setup_location_for_mount()
if set_rates:
self._set_initial_rates()
self.logger.info(f'Mount initialized: {self.is_initialized}')
return self.is_initialized
def park(self,
ra_direction='west',
ra_seconds=11.,
dec_direction='south',
dec_seconds=15.,
*args, **kwargs):
"""Slews to the park position and parks the mount.
This will first move the mount to the home position, then move the RA axis
in the direction specified at 0.9x sidereal rate (the fastest) for the number
of seconds requested. Then move the Dec axis in a similar manner. This should
be adjusted for the particular parking position desired.
Note:
When mount is parked no movement commands will be accepted.
Args:
ra_direction (str, optional): The direction to move the RA axis from
the home position. Defaults to 'west' for northern hemisphere.
ra_seconds (float, optional): The number of seconds at fastest move
speed to move the RA axis from the home position.
dec_direction (str, optional): The direction to move the Dec axis
from the home position. Defaults to 'south' for northern hemisphere.
dec_seconds (float, optional): The number of seconds at the fastest
move speed to move the Dec axis from the home position.
Returns:
bool: indicating success
"""
if self.is_parked:
self.logger.info('Mount is parked')
return self._is_parked
if self.slew_to_home(blocking=True):
# The mount is currently not parking in correct position so we manually move it there.
self.query('set_button_moving_rate', 9)
self.move_direction(direction=ra_direction, seconds=ra_seconds)
while self.is_slewing:
self.logger.debug('Slewing RA axis to park position...')
time.sleep(3)
self.move_direction(direction=dec_direction, seconds=dec_seconds)
while self.is_slewing:
self.logger.debug('Slewing Dec axis to park position...')
time.sleep(3)
self._is_parked = True
self.logger.debug(f'Mount parked: {self.is_parked}')
return self._is_parked
################################################################################################
# Private Methods
################################################################################################
def _set_initial_rates(self):
# Make sure we start at sidereal
self.set_tracking_rate()
self.logger.debug('Setting manual moving rate to max')
self.query('set_button_moving_rate', 9)
self.logger.debug(f'Mount guide rate: {self.query("get_guide_rate")}')
self.query('set_guide_rate', '9090')
guide_rate = self.query('get_guide_rate')
self.ra_guide_rate = int(guide_rate[0:2]) / 100
self.dec_guide_rate = int(guide_rate[2:]) / 100
self.logger.debug(f'Mount guide rate: {self.ra_guide_rate} {self.dec_guide_rate}')
def _setup_location_for_mount(self):
"""
Sets the mount up to the current location. Mount must be initialized first.
This uses mount.location (an astropy.coords.EarthLocation) to set
most of the params and the rest is read from a config file. Users
should not call this directly.
Includes:
* Latitude set_long
* Longitude set_lat
* Daylight Savings disable_daylight_savings
* Universal Time Offset set_gmt_offset
* Current Date set_local_date
* Current Time set_local_time
"""
assert self.is_initialized, self.logger.warning('Mount has not been initialized')
assert self.location is not None, self.logger.warning(
'Please set a location before attempting setup')
self.logger.info('Setting up mount for location')
# Location
# Adjust the lat/long for format expected by iOptron
lat = '{:+07.0f}'.format(self.location.lat.to(u.arcsecond).value)
lon = '{:+07.0f}'.format(self.location.lon.to(u.arcsecond).value)
self.query('set_long', lon)
self.query('set_lat', lat)
# Time
self.query('disable_daylight_savings')
gmt_offset = self.get_config('location.gmt_offset', default=0)
self.query('set_gmt_offset', gmt_offset)
now = current_time() + gmt_offset * u.minute
self.query('set_local_time', now.datetime.strftime("%H%M%S"))
self.query('set_local_date', now.datetime.strftime("%y%m%d"))
def _mount_coord_to_skycoord(self, mount_coords):
"""
Converts between iOptron RA/Dec format and a SkyCoord
Args:
mount_coords (str): Coordinates as returned by mount
Returns:
astropy.SkyCoord: Mount coordinates as astropy SkyCoord with
EarthLocation included.
"""
coords_match = self._coords_format.fullmatch(mount_coords)
coords = None
if coords_match is not None:
ra = (coords_match.group('ra_millisecond') * u.millisecond).to(u.hour)
dec = (coords_match.group('dec_arcsec') * u.centiarcsecond).to(u.arcsec)
dec_sign = coords_match.group('dec_sign')
if dec_sign == '-':
dec = dec * -1
coords = SkyCoord(ra=ra, dec=dec, frame='icrs', unit=(u.hour, u.arcsecond))
else:
self.logger.warning('Cannot create SkyCoord from mount coordinates')
return coords
def _skycoord_to_mount_coord(self, coords):
"""
Converts between SkyCoord and a iOptron RA/Dec format.
`
TTTTTTTT(T) 0.01 arc-seconds
XXXXX(XXX) milliseconds
Command: “:SrXXXXXXXX#”
Defines the commanded right ascension, RA. Slew, calibrate and
park commands operate on the most recently defined right ascension.
Command: “:SdsTTTTTTTT#”
Defines the commanded declination, Dec. Slew, calibrate and
park commands operate on the most recently defined declination.
`
@param coords astropy.coordinates.SkyCoord
@retval A tuple of RA/Dec coordinates
"""
# RA in milliseconds
ra_ms = (coords.ra.hour * u.hour).to(u.millisecond)
mount_ra = f'{ra_ms.value:08.0f}'
self.logger.debug(f'RA (ms): {ra_ms}')
dec_dms = (coords.dec.degree * u.degree).to(u.centiarcsecond)
self.logger.debug(f'Dec (centiarcsec): {dec_dms}')
mount_dec = f'{dec_dms.value:=+08.0f}'
mount_coords = (mount_ra, mount_dec)
return mount_coords
@property
def _set_zero_position(self):
""" Sets the current position as the zero position.
The iOptron allows you to set the current position directly, so
we simply call the iOptron command.
"""
self.logger.info('Setting zero position')
return self.query('set_zero_position')
|
panoptes/POCS
|
src/panoptes/pocs/mount/ioptron/ieq30pro.py
|
Python
|
mit
| 12,613
|
class Solution(object):
def generate(self, numRows):
"""
:type numRows: int
:rtype: List[List[int]]
"""
pascal = []
if numRows >= 1:
pascal.append([1])
for i in range(1, numRows):
pascal.append([pascal[-1][0]])
for j in range(1, len(pascal[-2])):
pascal[-1].append(pascal[-2][j - 1] + pascal[-2][j])
pascal[-1].append(pascal[-2][-1])
return pascal
|
FeiZhan/Algo-Collection
|
answers/other/in sap/Pascal's Triangle.py
|
Python
|
mit
| 470
|
from django import template
from django.contrib.contenttypes.models import ContentType
from django.template.loader import render_to_string
from ..models import TaggedItem, Tag
register = template.Library()
@register.assignment_tag
def get_tagged_items_for(object):
'''retrieve tagged items which relative with the specific object.
:syntax: {% get_tagged_items_for <object> as <variable> %}
'''
return TaggedItem.objects.get_for_object(object)
@register.assignment_tag
def get_tags_for(object):
'''retrieve tags which relative with the specific object.
:syntax: {% get_tags_for <object> as <variable> %}
'''
return Tag.objects.get_for_object(object)
@register.assignment_tag
def get_content_type_for(object):
'''retrieve content type object for the specific object.
:syntax: {% get_content_type_for <object> as <variable> %}
'''
return ContentType.objects.get_for_model(object)
@register.simple_tag
def render_generic_tagging_head_tag():
return render_to_string('generic_tagging/head.html')
@register.simple_tag
def render_generic_tagging_component_tag_for(object):
return render_to_string('generic_tagging/component.html', {'object': object})
|
giginet/django-generic-tagging
|
generic_tagging/templatetags/tagging.py
|
Python
|
mit
| 1,210
|
"""WSGI application."""
import os
from sys import argv
from werkzeug.serving import run_simple
from werkzeug.wsgi import DispatcherMiddleware
from tweetTrack.app import app
application = DispatcherMiddleware(app)
if __name__ == '__main__':
if len(argv) < 2 or argv[1] == 'Dev':
os.environ['FLASK_CONFIG'] = 'Dev'
run_simple(
'localhost',
5000,
application,
__debug__
)
else:
os.environ['FLASK_CONFIG'] = argv[1].title()
print(os.environ['FLASK_CONFIG'])
run_simple(
'localhost',
5000,
application,
)
|
lnhubbell/tweetTrack
|
tweetTrack/wsgi.py
|
Python
|
mit
| 651
|
import unittest
import ast
import sys
import logging
from datetime import datetime
import ast
# Include src in the Python search path.
sys.path.insert(0, '../src')
from ast_extensions import TypeDecASTModule
from check import (check_expr, check_mod, expr_template, call_function)
from parse_file import parse_type_decs
from ptype import PType
from errors import TypeUnspecifiedError, TypeIncorrectlySpecifiedError
from settings import (TEST_CODE_SUBDIR, DEBUG_SUBJECT_FILE, DEBUG_UNTYPED_AST,
DEBUG_TYPED_AST, DEBUG_TYPEDECS)
from logger import Logger, announce_file
from util import log_center
# these should be redundant, but they're necessary to refer to the specific log
# objects.
import ast_extensions
import parse_file
import check
import infer
"""
This is just the core of the unit testing file. generate_tests.py must be run
to fill this file with the several unit tests (each of which tests one source
code file in the test_files directory).
"""
announce_file("unit_tests_core.py")
log = check.log = parse_file.log = infer.log = Logger()
class PytyTests(unittest.TestCase):
def _check_expr(self, s, expr_kind, typ, expected):
"""Typechecks the string C{s} as an C{expr_type} expression."""
a = ast.parse(s).body[0].value
f = expr_template % expr_kind
if expected == "pass" or expected == "fail":
t = PType.from_str(typ)
if expected == "pass":
self.assertEqual(True, call_function(f, a, t, {}),
"%s should typecheck as %s but does not." % (s,t))
elif expected == "fail":
self.assertEqual(False, call_function(f, a, t, {}),
"%s shouldn't typecheck as %s but does." % (s, t))
elif issubclass(eval(expected), Exception):
# if the expected value is an error, then make sure it
# raises the right error.
try:
t = PType.from_str(typ)
call_function(f, a, t, {})
except eval(expected):
pass
else:
self.fail("Should have raised error %s, but does not. (%s)."
% (expected, s))
else:
raise TestFileFormatError("Expression tests can only be" + \
" specified as passing, failing, or raising an error " + \
" specified in errors.py, but this test was specified " + \
" as expecting: " + expected)
def _parse_and_check_mod(self, filename):
with open(filename, 'r') as f:
text = f.read()
debug_file = TEST_CODE_SUBDIR + DEBUG_SUBJECT_FILE
if filename == debug_file:
log.enter_debug_file()
else:
log.exit_debug_file()
log.debug("--- v File : " + filename + " v ---\n" + text + "--- ^ File text ^ ---")
untyped_ast = ast.parse(text)
log.debug((log_center("v Untyped AST v") + str(untyped_ast) +
log_center("^ Untyped AST ^")), DEBUG_UNTYPED_AST)
typedecs = parse_type_decs(filename)
log.debug((log_center("v TypeDecs v") + str(typedecs) +
log_center("^ TypeDecs ^")), DEBUG_TYPEDECS)
typed_ast = TypeDecASTModule(untyped_ast, typedecs)
log.debug((log_center("v TypedAST v") + str(typed_ast) +
log_center("^ TypedAST ^")), DEBUG_TYPED_AST)
return check_mod(typed_ast.tree)
def _check_mod(self, filename):
"""Typechecks the contents of file C{filename} as a
module. The file will contain a header of the form '### Pass'
to indicate whether the module is expected to pass or fail
typechecking or throw a specified error.
"""
with open(filename, 'r') as f:
expected = f.readline().strip('###').strip()
text = f.read()
if expected == "pass":
# the third parameter is a message displayed if assertion fails.
self.assertEqual(True, self._parse_and_check_mod(filename),
"Should typecheck, but does not:\n%s" % text)
elif expected == "fail":
# the third parameter is a message displayed if assertion fails.
self.assertEqual(False, self._parse_and_check_mod(filename),
"Shouldn't typecheck, but does:\n%s" % text)
else:
# in generate_tests.py, we should have already ensured that the
# expected string is "pass", "fail", or a valid error name, so we
# should be able to parse the error name at this point, and if not
# then we have other issues.
try:
err = eval(expected)
except NameError:
# at this point, expected better be a valid error name.
assert(False)
# at this point, the error better actually be a subclass of
# Exception, since generate.py tests will already throw errors if
# improper errors are specified.
assert(issubclass(err, Exception))
try:
result = self._parse_and_check_mod(filename)
self.fail("Should raise error %s, but instead returned %s:\n%s"
% (expected, result, text.strip('\n')))
except err:
pass
except AssertionError as e:
self.fail(e)
except Exception as e:
self.fail("Should have raised %s, but instead raised %s (%s):\n%s" %
(expected, e.__class__.__name__, e, text.strip('\n')))
##### Generated unit tests will go below here
##### Generated unit tests will go above here
if __name__ == '__main__':
unittest.main()
|
jruberg/Pyty
|
test/unit_tests_core.py
|
Python
|
mit
| 5,812
|
'''Brainfuck interpreter'''
VERSION = '0.1.2.1103'
def __static_vars():
'''Decorate, add static attr'''
def decorate(func):
'''The decorate'''
setattr(func, 'stdin_buffer', [])
return func
return decorate
@__static_vars()
def __getchar() -> int:
'''Return one char from stdin'''
buffer_len = len(__getchar.stdin_buffer)
if buffer_len == 0:
__getchar.stdin_buffer = list(input().encode('ascii'))
__getchar.stdin_buffer.append(10) # We need this enter to compact getchar from libc.
ret_c, __getchar.stdin_buffer = __getchar.stdin_buffer[0], __getchar.stdin_buffer[1:]
return ret_c
def __pre_execute(raw_code: str) -> list:
'''Replace the [] with paired code pointer'''
iptr = 0
bracket = list()
code = list(raw_code)
code_len = len(code)
while iptr < code_len:
code[iptr] = [code[iptr], '']
if code[iptr][0] == '[':
bracket.append(iptr)
elif code[iptr][0] == ']':
piptr = bracket.pop()
code[piptr][1], code[iptr][1] = iptr, piptr
iptr += 1
bracket_len = len(bracket)
if bracket_len != 0:
code = []
return code
def __execute(code: list, stack_size: int) -> list:
'''Run bf code'''
iptr = 0
sptr = 0
stack = list(0 for _ in range(stack_size))
code_len = len(code)
while iptr < code_len:
instruction = code[iptr][0]
if instruction == '>':
sptr += 1
elif instruction == '<':
sptr -= 1
elif instruction == '+':
stack[sptr] += 1
if stack[sptr] == 256:
stack[sptr] = 0
elif instruction == '-':
stack[sptr] -= 1
if stack[sptr] == -1:
stack[sptr] = 255
elif instruction == '.':
print(chr(stack[sptr]), end='')
elif instruction == ',':
stack[sptr] = __getchar()
elif instruction == '[' and stack[sptr] == 0:
iptr = code[iptr][1]
elif instruction == ']' and stack[sptr] != 0:
iptr = code[iptr][1]
iptr += 1
# Clean the buffer, otherwise it will affect next round result.
__getchar.stdin_buffer = []
return stack
def run(raw_code: str = '', stack_size: int = 128) -> list:
'''Interpreter the raw_code.
Input:
- raw_code: the string of brainfuck code.
if this is empty, program will wait for user input.
- stack_size: the size of stack, default is 128Bytes.
Return value:
- The whole stack.
'''
if raw_code == '':
raw_code = input('% ')
code = __pre_execute(raw_code)
return __execute(code, stack_size)
|
Bestoa/py-brainfuck
|
nbfi/__init__.py
|
Python
|
mit
| 2,726
|
"""Useful utilities for handling CWL inputs and outputs.
This is shared functionality abstracted across multiple approaches, currently
mostly handling CWL records. This needs some generalization to apply across
non-variant calling workflows.
"""
import collections
import pprint
import toolz as tz
def _get_all_cwlkeys(items):
"""Retrieve cwlkeys from inputs, handling defaults which can be null.
When inputs are null in some and present in others, this creates unequal
keys in each sample, confusing decision making about which are primary and extras.
"""
default_keys = set(["metadata__batch", "config__algorithm__validate",
"config__algorithm__validate_regions", "validate__summary",
"validate__tp", "validate__fp", "validate__fn"])
all_keys = set([])
for data in items:
all_keys.update(set(data["cwl_keys"]))
all_keys.update(default_keys)
return all_keys
def split_data_cwl_items(items):
"""Split a set of CWL output dictionaries into data samples and CWL items.
Handles cases where we're arrayed on multiple things, like a set of regional
VCF calls and data objects.
"""
key_lens = set([])
for data in items:
key_lens.add(len(_get_all_cwlkeys([data])))
extra_key_len = min(list(key_lens)) if len(key_lens) > 1 else None
data_out = []
extra_out = []
for data in items:
if extra_key_len and len(_get_all_cwlkeys([data])) == extra_key_len:
extra_out.append(data)
else:
data_out.append(data)
if len(extra_out) == 0:
return data_out, {}
else:
cwl_keys = extra_out[0]["cwl_keys"]
for extra in extra_out[1:]:
cur_cwl_keys = extra["cwl_keys"]
assert cur_cwl_keys == cwl_keys, pprint.pformat(extra_out)
cwl_extras = collections.defaultdict(list)
for data in items:
for key in cwl_keys:
cwl_extras[key].append(data[key])
data_final = []
for data in data_out:
for key in cwl_keys:
data.pop(key)
data_final.append(data)
return data_final, dict(cwl_extras)
def samples_to_records(samples):
"""Convert samples into output CWL records.
"""
from bcbio.pipeline import run_info
RECORD_CONVERT_TO_LIST = set(["config__algorithm__tools_on", "config__algorithm__tools_off",
"reference__genome_context"])
all_keys = _get_all_cwlkeys(samples)
out = []
for data in samples:
for raw_key in sorted(list(all_keys)):
key = raw_key.split("__")
if tz.get_in(key, data) is None:
data = tz.update_in(data, key, lambda x: None)
data["cwl_keys"].append(raw_key)
if raw_key in RECORD_CONVERT_TO_LIST:
val = tz.get_in(key, data)
if not val: val = []
elif not isinstance(val, (list, tuple)): val = [val]
data = tz.update_in(data, key, lambda x: val)
data["metadata"] = run_info.add_metadata_defaults(data.get("metadata", {}))
out.append(data)
return out
|
brainstorm/bcbio-nextgen
|
bcbio/cwl/cwlutils.py
|
Python
|
mit
| 3,196
|
#!/usr/bin/python2
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
import re, os
PACKAGE_PATH = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(PACKAGE_PATH, 'pythonvideoannotator','__init__.py'), 'r') as fd:
content = fd.read()
version = re.search(
r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]', content, re.MULTILINE).group(1)
with open(os.path.join(PACKAGE_PATH, '..','..','README.md'), 'r') as fd:
long_description = fd.read()
# REQUIREMENTS BEGIN
REQUIREMENTS = [
"geometry_designer==0.4.38",
"modular-computer-vision-api-gui==0.3.31",
"pyforms-gui==4.904.152",
"modular-computer-vision-api==0.3.29",
"python-video-annotator-models-gui==0.7.63",
"python-video-annotator-models==0.8.82",
"python-video-annotator-module-timeline==0.6.26",
"python-video-annotator-module-eventstats==0.5.15",
"python-video-annotator-module-virtual-object-generator==0.6.26",
"python-video-annotator-module-deeplab==0.902.21",
"python-video-annotator-module-contours-images==0.5.28",
"python-video-annotator-module-tracking==0.6.38",
"python-video-annotator-module-smooth-paths==0.5.19",
"python-video-annotator-module-distances==0.5.18",
"python-video-annotator-module-path-map==0.6.16",
"python-video-annotator-module-motion-counter==0.5.26",
"python-video-annotator-module-create-paths==0.5.15",
"python-video-annotator-module-regions-filter==0.5.18",
"python-video-annotator-module-import-export==0.5.23",
"python-video-annotator-module-background-finder==0.5.21",
"python-video-annotator-module-find-orientation==0.5.18",
"python-video-annotator-module-path-editor==0.5.28"
]
# REQUIREMENTS END
setup(
name='Python video annotator',
version=version,
description="""""",
author=['Ricardo Ribeiro'],
author_email='ricardojvr@gmail.com',
url='https://bitbucket.org/fchampalimaud/pythonvideoannotator-models',
long_description = long_description,
long_description_content_type = 'text/markdown',
packages=find_packages(),
install_requires=[
'simplejson',
'pypi-xmlrpc',
'send2trash',
'scipy',
'sklearn',
'confapp',
] + REQUIREMENTS,
entry_points={
'console_scripts': [
'start-video-annotator=pythonvideoannotator.__main__:start',
],
},
package_data={'pythonvideoannotator': [
'resources/icons/*.png',
'resources/themes/default/*.css',
]
},
)
|
UmSenhorQualquer/pythonVideoAnnotator
|
base/pythonvideoannotator/setup.py
|
Python
|
mit
| 2,475
|
"""Suite WorldWideWeb suite, as defined in Spyglass spec.:
Level 1, version 1
Generated from /Volumes/Sap/Applications (Mac OS 9)/Netscape Communicator\xe2\x84\xa2 Folder/Netscape Communicator\xe2\x84\xa2
AETE/AEUT resource version 1/0, language 0, script 0
"""
import aetools
import MacOS
_code = 'WWW!'
class WorldWideWeb_suite_Events:
_argmap_OpenURL = {
'to' : 'INTO',
'toWindow' : 'WIND',
'flags' : 'FLGS',
'post_data' : 'POST',
'post_type' : 'MIME',
'progressApp' : 'PROG',
}
def OpenURL(self, _object, _attributes={}, **_arguments):
"""OpenURL: Opens a URL. Allows for more options than GetURL event
Required argument: URL
Keyword argument to: file destination
Keyword argument toWindow: window iD
Keyword argument flags: Binary: any combination of 1, 2 and 4 is allowed: 1 and 2 mean force reload the document. 4 is ignored
Keyword argument post_data: Form posting data
Keyword argument post_type: MIME type of the posting data. Defaults to application/x-www-form-urlencoded
Keyword argument progressApp: Application that will display progress
Keyword argument _attributes: AppleEvent attribute dictionary
Returns: ID of the loading window
"""
_code = 'WWW!'
_subcode = 'OURL'
aetools.keysubst(_arguments, self._argmap_OpenURL)
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
_argmap_ShowFile = {
'MIME_type' : 'MIME',
'Window_ID' : 'WIND',
'URL' : 'URL ',
}
def ShowFile(self, _object, _attributes={}, **_arguments):
"""ShowFile: Similar to OpenDocuments, except that it specifies the parent URL, and MIME type of the file
Required argument: File to open
Keyword argument MIME_type: MIME type
Keyword argument Window_ID: Window to open the file in
Keyword argument URL: Use this as a base URL
Keyword argument _attributes: AppleEvent attribute dictionary
Returns: Window ID of the loaded window. 0 means ShowFile failed, FFFFFFF means that data was not appropriate type to display in the browser.
"""
_code = 'WWW!'
_subcode = 'SHWF'
aetools.keysubst(_arguments, self._argmap_ShowFile)
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
_argmap_cancel_progress = {
'in_window' : 'WIND',
}
def cancel_progress(self, _object=None, _attributes={}, **_arguments):
"""cancel progress: Interrupts the download of the document in the given window
Required argument: progress ID, obtained from the progress app
Keyword argument in_window: window ID of the progress to cancel
Keyword argument _attributes: AppleEvent attribute dictionary
"""
_code = 'WWW!'
_subcode = 'CNCL'
aetools.keysubst(_arguments, self._argmap_cancel_progress)
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
def find_URL(self, _object, _attributes={}, **_arguments):
"""find URL: If the file was downloaded by Netscape, you can call FindURL to find out the URL used to download the file.
Required argument: File spec
Keyword argument _attributes: AppleEvent attribute dictionary
Returns: The URL
"""
_code = 'WWW!'
_subcode = 'FURL'
if _arguments: raise TypeError, 'No optional args expected'
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
def get_window_info(self, _object=None, _attributes={}, **_arguments):
"""get window info: Returns the information about the window as a list. Currently the list contains the window title and the URL. You can get the same information using standard Apple Event GetProperty.
Required argument: window ID
Keyword argument _attributes: AppleEvent attribute dictionary
Returns: undocumented, typecode 'list'
"""
_code = 'WWW!'
_subcode = 'WNFO'
if _arguments: raise TypeError, 'No optional args expected'
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
def list_windows(self, _no_object=None, _attributes={}, **_arguments):
"""list windows: Lists the IDs of all the hypertext windows
Keyword argument _attributes: AppleEvent attribute dictionary
Returns: List of unique IDs of all the hypertext windows
"""
_code = 'WWW!'
_subcode = 'LSTW'
if _arguments: raise TypeError, 'No optional args expected'
if _no_object != None: raise TypeError, 'No direct arg expected'
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
_argmap_parse_anchor = {
'relative_to' : 'RELA',
}
def parse_anchor(self, _object, _attributes={}, **_arguments):
"""parse anchor: Resolves the relative URL
Required argument: Main URL
Keyword argument relative_to: Relative URL
Keyword argument _attributes: AppleEvent attribute dictionary
Returns: Parsed URL
"""
_code = 'WWW!'
_subcode = 'PRSA'
aetools.keysubst(_arguments, self._argmap_parse_anchor)
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
def register_URL_echo(self, _object=None, _attributes={}, **_arguments):
"""register URL echo: Registers the \xd2echo\xd3 application. Each download from now on will be echoed to this application.
Required argument: Application signature
Keyword argument _attributes: AppleEvent attribute dictionary
"""
_code = 'WWW!'
_subcode = 'RGUE'
if _arguments: raise TypeError, 'No optional args expected'
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
_argmap_register_protocol = {
'for_protocol' : 'PROT',
}
def register_protocol(self, _object=None, _attributes={}, **_arguments):
"""register protocol: Registers application as a \xd2handler\xd3 for this protocol with a given prefix. The handler will receive \xd2OpenURL\xd3, or if that fails, \xd2GetURL\xd3 event.
Required argument: Application sig
Keyword argument for_protocol: protocol prefix: \xd2finger:\xd3, \xd2file\xd3,
Keyword argument _attributes: AppleEvent attribute dictionary
Returns: TRUE if registration has been successful
"""
_code = 'WWW!'
_subcode = 'RGPR'
aetools.keysubst(_arguments, self._argmap_register_protocol)
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
_argmap_register_viewer = {
'MIME_type' : 'MIME',
'with_file_type' : 'FTYP',
}
def register_viewer(self, _object, _attributes={}, **_arguments):
"""register viewer: Registers an application as a \xd4special\xd5 viewer for this MIME type. The application will be launched with ViewDoc events
Required argument: Application sig
Keyword argument MIME_type: MIME type viewer is registering for
Keyword argument with_file_type: Mac file type for the downloaded files
Keyword argument _attributes: AppleEvent attribute dictionary
Returns: TRUE if registration has been successful
"""
_code = 'WWW!'
_subcode = 'RGVW'
aetools.keysubst(_arguments, self._argmap_register_viewer)
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
_argmap_register_window_close = {
'for_window' : 'WIND',
}
def register_window_close(self, _object=None, _attributes={}, **_arguments):
"""register window close: Netscape will notify registered application when this window closes
Required argument: Application signature
Keyword argument for_window: window ID
Keyword argument _attributes: AppleEvent attribute dictionary
Returns: true if successful
"""
_code = 'WWW!'
_subcode = 'RGWC'
aetools.keysubst(_arguments, self._argmap_register_window_close)
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
def unregister_URL_echo(self, _object, _attributes={}, **_arguments):
"""unregister URL echo: cancels URL echo
Required argument: application signature
Keyword argument _attributes: AppleEvent attribute dictionary
"""
_code = 'WWW!'
_subcode = 'UNRU'
if _arguments: raise TypeError, 'No optional args expected'
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
_argmap_unregister_protocol = {
'for_protocol' : 'PROT',
}
def unregister_protocol(self, _object=None, _attributes={}, **_arguments):
"""unregister protocol: reverses the effects of \xd2register protocol\xd3
Required argument: Application sig.
Keyword argument for_protocol: protocol prefix. If none, unregister for all protocols
Keyword argument _attributes: AppleEvent attribute dictionary
Returns: TRUE if successful
"""
_code = 'WWW!'
_subcode = 'UNRP'
aetools.keysubst(_arguments, self._argmap_unregister_protocol)
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
_argmap_unregister_viewer = {
'MIME_type' : 'MIME',
}
def unregister_viewer(self, _object, _attributes={}, **_arguments):
"""unregister viewer: Revert to the old way of handling this MIME type
Required argument: Application sig
Keyword argument MIME_type: MIME type to be unregistered
Keyword argument _attributes: AppleEvent attribute dictionary
Returns: TRUE if the event was successful
"""
_code = 'WWW!'
_subcode = 'UNRV'
aetools.keysubst(_arguments, self._argmap_unregister_viewer)
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
_argmap_unregister_window_close = {
'for_window' : 'WIND',
}
def unregister_window_close(self, _object=None, _attributes={}, **_arguments):
"""unregister window close: Undo for register window close
Required argument: Application signature
Keyword argument for_window: window ID
Keyword argument _attributes: AppleEvent attribute dictionary
Returns: true if successful
"""
_code = 'WWW!'
_subcode = 'UNRC'
aetools.keysubst(_arguments, self._argmap_unregister_window_close)
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
def webActivate(self, _object=None, _attributes={}, **_arguments):
"""webActivate: Makes Netscape the frontmost application, and selects a given window. This event is here for suite completeness/ cross-platform compatibility only, you should use standard AppleEvents instead.
Required argument: window to bring to front
Keyword argument _attributes: AppleEvent attribute dictionary
"""
_code = 'WWW!'
_subcode = 'ACTV'
if _arguments: raise TypeError, 'No optional args expected'
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
#
# Indices of types declared in this module
#
_classdeclarations = {
}
_propdeclarations = {
}
_compdeclarations = {
}
_enumdeclarations = {
}
|
MalloyPower/parsing-python
|
front-end/testsuite-python-lib/Python-2.3/Lib/plat-mac/lib-scriptpackages/Netscape/WorldWideWeb_suite.py
|
Python
|
mit
| 16,104
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-11-13 09:33
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('questionnaires', '0001_initial'),
]
operations = [
migrations.RenameModel(
old_name='AttentionRelatedCognitiveErrors',
new_name='AttentionRelatedCognitiveError',
),
]
|
warrenatmindset/DjangoFlowApp
|
questionnaires/migrations/0002_auto_20171113_0933.py
|
Python
|
mit
| 440
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('chef_buddy', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='ingredientflavorcompound',
name='score',
field=models.FloatField(default=1.0),
preserve_default=False,
),
]
|
chef-buddy/chef-buddy-django
|
chef_buddy/migrations/0002_ingredientflavorcompound_score.py
|
Python
|
mit
| 444
|
from office365.runtime.client_object_collection import ClientObjectCollection
from office365.sharepoint.content_type import ContentType
class ContentTypeCollection(ClientObjectCollection):
"""Content Type resource collection"""
def __init__(self, context, resource_path=None):
super(ContentTypeCollection, self).__init__(context, ContentType, resource_path)
|
vgrem/SharePointOnline-REST-Python-Client
|
office365/sharepoint/content_type_collection.py
|
Python
|
mit
| 376
|
#!/usr/bin/env python
# -*- coding: utf-8 -*- #
from __future__ import unicode_literals
# This file is only used if you use `make publish` or
# explicitly specify it as your config file.
import os
import sys
sys.path.append(os.curdir)
from pelicanconf import *
SITEURL = 'http://rcarneva.github.io'
RELATIVE_URLS = False
FEED_ALL_ATOM = 'feeds/all.atom.xml'
CATEGORY_FEED_ATOM = 'feeds/%s.atom.xml'
DELETE_OUTPUT_DIRECTORY = True
# Following items are often useful when publishing
#DISQUS_SITENAME = ""
#GOOGLE_ANALYTICS = ""
|
rcarneva/rcarneva.github.io
|
publishconf.py
|
Python
|
mit
| 533
|
import numpy as np
from scipy.stats import skew, kurtosis, shapiro, pearsonr, ansari, mood, levene, fligner, bartlett, mannwhitneyu
from scipy.spatial.distance import braycurtis, canberra, chebyshev, cityblock, correlation, cosine, euclidean, hamming, jaccard, kulsinski, matching, russellrao, sqeuclidean
from sklearn.preprocessing import LabelBinarizer
from sklearn.linear_model import Ridge, LinearRegression, LogisticRegression
from sklearn.tree import DecisionTreeRegressor, DecisionTreeClassifier
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor, RandomForestClassifier, GradientBoostingClassifier
from sklearn.neighbors import KNeighborsRegressor, KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import explained_variance_score, mean_absolute_error, mean_squared_error, r2_score, accuracy_score, roc_auc_score, average_precision_score, f1_score, hinge_loss, matthews_corrcoef, precision_score, recall_score, zero_one_loss
from sklearn.metrics.cluster import adjusted_mutual_info_score, adjusted_rand_score, completeness_score, homogeneity_completeness_v_measure, homogeneity_score, mutual_info_score, normalized_mutual_info_score, v_measure_score
from boomlet.utils.aggregators import to_aggregator
from boomlet.metrics import max_error, error_variance, relative_error_variance, gini_loss, categorical_gini_loss
from boomlet.transform.type_conversion import Discretizer
from autocause.feature_functions import *
"""
Functions used to combine a list of features into one coherent one.
Sample use:
1. to convert categorical to numerical, we perform a one hot encoding
2. treat each binary column as a separate numerical feature
3. compute numerical features as usual
4. use each of the following functions to create a new feature
(with the input as the nth feature for each of the columns)
WARNING: these will be used in various locations throughout the code base
and will result in feature size growing at faster than a linear rate
"""
AGGREGATORS = [
to_aggregator("max"),
# to_aggregator("min"),
# to_aggregator("median"),
# to_aggregator("mode"),
# to_aggregator("mean"),
# to_aggregator("sum"),
]
"""
Boolean flags specifying whether or not to perform conversions
"""
CONVERT_TO_NUMERICAL = True
CONVERT_TO_CATEGORICAL = True
"""
Functions that compute a metric on a single 1-D array
"""
UNARY_NUMERICAL_FEATURES = [
normalized_entropy,
skew,
kurtosis,
np.std,
shapiro,
]
UNARY_CATEGORICAL_FEATURES = [
lambda x: len(set(x)), # number of unique
]
"""
Functions that compute a metric on two 1-D arrays
"""
BINARY_NN_FEATURES = [
independent_component,
chi_square,
pearsonr,
correlation_magnitude,
braycurtis,
canberra,
chebyshev,
cityblock,
correlation,
cosine,
euclidean,
hamming,
sqeuclidean,
ansari,
mood,
levene,
fligner,
bartlett,
mannwhitneyu,
]
BINARY_NC_FEATURES = [
]
BINARY_CN_FEATURES = [
categorical_numerical_homogeneity,
bucket_variance,
anova,
]
BINARY_CC_FEATURES = [
categorical_categorical_homogeneity,
anova,
dice_,
jaccard,
kulsinski,
matching,
rogerstanimoto_,
russellrao,
sokalmichener_,
sokalsneath_,
yule_,
adjusted_mutual_info_score,
adjusted_rand_score,
completeness_score,
homogeneity_completeness_v_measure,
homogeneity_score,
mutual_info_score,
normalized_mutual_info_score,
v_measure_score,
]
"""
Dictionaries of input type (e.g. B corresponds to pairs where binary
data is the input) to pairs of converter functions and a boolean flag
of whether or not to aggregate over the output of the converter function
converter functions should have the type signature:
converter(X_raw, X_current_type, Y_raw, Y_type)
where X_raw is the data to convert
"""
NUMERICAL_CONVERTERS = dict(
N=lambda x, *args: x, # identity function
B=lambda x, *args: x, # identity function
C=lambda x, *args: LabelBinarizer().fit_transform(x),
)
CATEGORICAL_CONVERTERS = dict(
N=lambda x, *args: Discretizer().fit_transform(x).flatten(),
B=lambda x, *args: x, # identity function
C=lambda x, *args: x, # identity function
)
"""
Whether or not the converters can result in a 2D output. This must be set to True
if any of the respective converts can return a 2D output.
"""
NUMERICAL_CAN_BE_2D = True
CATEGORICAL_CAN_BE_2D = False
"""
Estimators used to provide a fit for a variable
"""
REGRESSION_ESTIMATORS = [
Ridge(),
LinearRegression(),
DecisionTreeRegressor(random_state=0),
RandomForestRegressor(random_state=0),
GradientBoostingRegressor(subsample=0.5, n_estimators=10, random_state=0),
KNeighborsRegressor(),
]
CLASSIFICATION_ESTIMATORS = [
LogisticRegression(random_state=0),
DecisionTreeClassifier(random_state=0),
RandomForestClassifier(random_state=0),
GradientBoostingClassifier(subsample=0.5, n_estimators=10, random_state=0),
KNeighborsClassifier(),
GaussianNB(),
]
"""
Functions to provide a value of how good a fit on a variable is
"""
REGRESSION_METRICS = [
explained_variance_score,
mean_absolute_error,
mean_squared_error,
r2_score,
max_error,
error_variance,
relative_error_variance,
gini_loss,
] + BINARY_NN_FEATURES
REGRESSION_RESIDUAL_METRICS = [
] + UNARY_NUMERICAL_FEATURES
BINARY_PROBABILITY_CLASSIFICATION_METRICS = [
roc_auc_score,
hinge_loss,
] + REGRESSION_METRICS
RESIDUAL_PROBABILITY_CLASSIFICATION_METRICS = [
] + REGRESSION_RESIDUAL_METRICS
BINARY_CLASSIFICATION_METRICS = [
accuracy_score,
average_precision_score,
f1_score,
matthews_corrcoef,
precision_score,
recall_score,
zero_one_loss,
categorical_gini_loss,
]
ND_CLASSIFICATION_METRICS = [ # metrics for N-dimensional classification
] + BINARY_CC_FEATURES
"""
Functions to assess the model (e.g. complexity) of the fit on a numerical variable
of type signature:
metric(clf, X, y)
"""
REGRESSION_MODEL_METRICS = [
# TODO model complexity metrics
]
CLASSIFICATION_MODEL_METRICS = [
# TODO use regression model metrics on predict_proba
]
"""
The operations to perform on the A->B features and B->A features.
"""
RELATIVE_FEATURES = [
# Identity functions, comment out the next 2 lines for only relative features
lambda x, y: x,
lambda x, y: y,
lambda x, y: x - y,
]
"""
Whether or not to treat each observation (A,B) as two observations: (A,B) and (B,A)
If this is done and training labels are given, those labels will have to be
reflected as well. The reflection is performed through appending at the end.
(e.g. if we have N training examples, observation N+1 in the output will be
the first example reflected)
"""
REFLECT_DATA = False
"""
Whether or not metafeatures based on the types of A and B are generated.
e.g. 1/0 feature on whether or not A is Numerical, etc.
"""
ADD_METAFEATURES = True
"""
Whether or not to generate combination features between the computed
features and metafeatures.
e.g. for each feature and metafeature, generate a new feature which is the
product of the two
WARNING: will generate a LOT of features (approximately 21 times as many)
"""
COMPUTE_METAFEATURE_COMBINATIONS = False
|
diogo149/CauseEffectPairsPaper
|
configs/max_aggregate_only.py
|
Python
|
mit
| 7,294
|
import os
import logging
from urllib.parse import urlencode, unquote
from flask import request, current_app
from flask_api import FlaskAPI
from flask_api.exceptions import APIException, NotFound
from . import services
from . import stores
from . import routes
log = logging.getLogger('api')
class TemplateNotFound(NotFound):
detail = "Template not found."
class InvalidMaskedCode(NotFound):
detail = "Masked URL does not match any image."
class FilenameTooLong(APIException):
status_code = 414
detail = "Filename too long."
def create_app(config):
app = FlaskAPI(__name__)
app.config.from_object(config)
configure_logging(app)
register_services(app)
register_blueprints(app)
return app
def configure_logging(app):
if app.config['DEBUG']:
level = logging.DEBUG
else:
level = logging.INFO
logging.basicConfig(level=level, format="%(levelname)s: %(message)s")
logging.getLogger('yorm').setLevel(logging.WARNING)
logging.getLogger('requests').setLevel(logging.WARNING)
def register_services(app):
exceptions = services.Exceptions(
TemplateNotFound=TemplateNotFound,
InvalidMaskedCode=InvalidMaskedCode,
FilenameTooLong=FilenameTooLong,
)
templates_root = os.path.join(app.config['ROOT'], 'data', 'templates')
template_store = stores.template.TemplateStore(templates_root)
images_root = os.path.join(app.config['ROOT'], 'data', 'images')
image_store = stores.image.ImageStore(images_root)
app.link_service = services.link.LinkService(
exceptions=exceptions,
template_store=template_store,
)
app.template_service = services.template.TemplateService(
exceptions=exceptions,
template_store=template_store,
)
app.image_service = services.image.ImageService(
exceptions=exceptions,
template_store=template_store,
image_store=image_store,
debug=app.config['DEBUG']
)
def log_request(response):
if current_app.debug:
path = request.path
if request.args:
path += "?%s" % unquote(urlencode(request.args))
log.info("%s: %s - %i", request.method, path,
response.status_code)
return response
app.after_request(log_request)
def register_blueprints(app):
app.register_blueprint(routes.static.blueprint)
app.register_blueprint(routes.root.blueprint)
app.register_blueprint(routes.templates.blueprint)
app.register_blueprint(routes.links.blueprint)
app.register_blueprint(routes.image.blueprint)
app.register_blueprint(routes.overview.blueprint)
app.register_blueprint(routes.generator.blueprint)
app.register_blueprint(routes.latest.blueprint)
app.register_blueprint(routes.aliases.blueprint)
|
joshfriend/memegen
|
memegen/app.py
|
Python
|
mit
| 2,841
|
import scipy.sparse as ss
import warnings
warnings.simplefilter('ignore', ss.SparseEfficiencyWarning)
from sparray import FlatSparray
class Operations(object):
params = [['FlatSparray', 'csr_matrix']]
param_names = ['arr_type']
def setup(self, arr_type):
mat = ss.rand(3000, 4000, density=0.1, format='csr')
if arr_type == 'FlatSparray':
self.arr = FlatSparray.from_spmatrix(mat)
else:
self.arr = mat
def time_scalar_multiplication(self, arr_type):
self.arr * 3
def time_sum(self, arr_type):
self.arr.sum()
def time_getitem_scalar(self, arr_type):
self.arr[154, 145]
def time_getitem_subarray(self, arr_type):
self.arr[:5, :5]
def time_getitem_row(self, arr_type):
self.arr[876]
def time_getitem_col(self, arr_type):
self.arr[:,273]
def time_diagonal(self, arr_type):
self.arr.diagonal()
class ImpureOperations(object):
params = [['FlatSparray', 'csr_matrix']]
param_names = ['arr_type']
number = 1 # make sure we re-run setup() before each timing
def setup(self, arr_type):
mat = ss.rand(3000, 4000, density=0.1, format='csr')
if arr_type == 'FlatSparray':
self.arr = FlatSparray.from_spmatrix(mat)
else:
self.arr = mat
def time_setdiag(self, arr_type):
self.arr.setdiag(99)
|
perimosocordiae/sparray
|
bench/benchmarks/ops.py
|
Python
|
mit
| 1,302
|
# -*- coding: utf-8 -*-
""" Methods for estimating structural breaks in time series regressions
TODO: extract and move Chow test from "commission test" over to here
"""
from __future__ import division
from collections import namedtuple
import logging
import numpy as np
import pandas as pd
from scipy.optimize import brentq
from scipy import stats
from scipy.stats import norm
import xarray as xr
from ._core import pandas_like, StructuralBreakResult
from ..accel import try_jit
from ..regression._recresid import _recresid
logger = logging.getLogger(__name__)
pnorm = norm.cdf
# OLS-CUSUM
# dict: CUSUM OLS critical values
CUSUM_OLS_CRIT = {
0.01: 1.63,
0.05: 1.36,
0.10: 1.22
}
@try_jit(nopython=True, nogil=True)
def _cusum(resid, ddof):
n = resid.size
df = n - ddof
sigma = ((resid ** 2).sum() / df * n) ** 0.5
process = resid.cumsum() / sigma
return process
@try_jit(nopython=True, nogil=True)
def _cusum_OLS(X, y):
n, p = X.shape
beta = np.linalg.lstsq(X, y)[0]
resid = np.dot(X, beta) - y
process = _cusum(resid, p)
_process = np.abs(process)
idx = _process.argmax()
score = _process[idx]
return process, score, idx
def cusum_OLS(X, y, alpha=0.05):
ur""" OLS-CUSUM test for structural breaks
Tested against R's ``strucchange`` package and is faster than
the equivalent function in the ``statsmodels`` Python package when
Numba is installed.
The OLS-CUSUM test statistic, based on a single OLS regression, is defined
as:
.. math::
W_n^0(t) = \frac{1}{\hat{\sigma}\sqrt{n}}
\sum_{i=1}^{n}{\hat{\mu_i}}
Args:
X (array like): 2D (n_obs x n_features) design matrix
y (array like): 1D (n_obs) indepdent variable
alpha (float): Test threshold (either 0.01, 0.05, or 0.10) from
Ploberger and Krämer (1992)
Returns:
StructuralBreakResult: A named tuple include the the test name,
change point (index of ``y``), the test ``score`` and ``pvalue``,
and a boolean testing if the CUSUM score is
significant at the given ``alpha``
"""
_X = X.values if isinstance(X, pandas_like) else X
_y = y.values.ravel() if isinstance(y, pandas_like) else y.ravel()
process, score, idx = _cusum_OLS(_X, _y)
if isinstance(y, pandas_like):
if isinstance(y, (pd.Series, pd.DataFrame)):
index = y.index
idx = index[idx]
elif isinstance(y, xr.DataArray):
index = y.to_series().index
idx = index[idx]
process = pd.Series(data=process, index=index, name='OLS-CUSUM')
# crit = stats.kstwobign.isf(alpha) ~70usec
crit = CUSUM_OLS_CRIT[alpha]
pval = stats.kstwobign.sf(score)
return StructuralBreakResult(method='OLS-CUSUM',
index=idx,
score=score,
process=process,
boundary=crit,
pvalue=pval,
signif=score > crit)
# REC-CUSUM
def _brownian_motion_pvalue(x, k):
""" Return pvalue for some given test statistic """
# TODO: Make generic, add "type='Brownian Motion'"?
if x < 0.3:
p = 1 - 0.1464 * x
else:
p = 2 * (1 -
pnorm(3 * x) +
np.exp(-4 * x ** 2) * (pnorm(x) + pnorm(5 * x) - 1) -
np.exp(-16 * x ** 2) * (1 - pnorm(x)))
return 1 - (1 - p) ** k
def _cusum_rec_test_crit(alpha):
""" Return critical test statistic value for some alpha """
return brentq(lambda _x: _brownian_motion_pvalue(_x, 1) - alpha, 0, 20)
@try_jit(nopython=True, nogil=True)
def _cusum_rec_boundary(x, alpha=0.05):
""" Equivalent to ``strucchange::boundary.efp``` for Rec-CUSUM """
n = x.ravel().size
bound = _cusum_rec_test_crit(alpha)
boundary = (bound + (2 * bound * np.arange(0, n) / (n - 1)))
return boundary
@try_jit()
def _cusum_rec_efp(X, y):
""" Equivalent to ``strucchange::efp`` for Rec-CUSUM """
# Run "efp"
n, k = X.shape
w = _recresid(X, y, k)[k:]
sigma = w.var(ddof=1) ** 0.5 # can't jit because of ddof
w = np.concatenate((np.array([0]), w))
return np.cumsum(w) / (sigma * (n - k) ** 0.5)
@try_jit(nopython=True, nogil=True)
def _cusum_rec_sctest(x):
""" Equivalent to ``strucchange::sctest`` for Rec-CUSUM """
x = x[1:]
j = np.linspace(0, 1, x.size + 1)[1:]
x = x * 1 / (1 + 2 * j)
stat = np.abs(x).max()
return stat
def cusum_recursive(X, y, alpha=0.05):
ur""" Rec-CUSUM test for structural breaks
Tested against R's ``strucchange`` package.
The REC-CUSUM test, based on the recursive residuals, is defined as:
.. math::
W_n(t) = \frac{1}{\tilde{\sigma}\sqrt{n}}
\sum_{i=k+1}^{k+(n-k)}{\tilde{\mu_i}}
Critical values for this test statistic are taken from::
A. Zeileis. p values and alternative boundaries for CUSUM tests.
Working Paper 78, SFB "Adaptive Information Systems and Modelling
in Economics and Management Science", December 2000b.
Args:
X (array like): 2D (n_obs x n_features) design matrix
y (array like): 1D (n_obs) indepdent variable
alpha (float): Test threshold
Returns:
StructuralBreakResult: A named tuple include the the test name,
change point (index of ``y``), the test ``score`` and ``pvalue``,
and a boolean testing if the CUSUM score is
significant at the given ``alpha``
"""
_X = X.values if isinstance(X, pandas_like) else X
_y = y.values.ravel() if isinstance(y, pandas_like) else y.ravel()
process = _cusum_rec_efp(_X, _y)
stat = _cusum_rec_sctest(process)
stat_pvalue = _brownian_motion_pvalue(stat, 1)
pvalue_crit = _cusum_rec_test_crit(alpha)
if stat_pvalue < alpha:
boundary = _cusum_rec_boundary(process, alpha)
idx = np.where(np.abs(process) > boundary)[0].min()
else:
idx = np.abs(process).max()
if isinstance(y, pandas_like):
if isinstance(y, (pd.Series, pd.DataFrame)):
index = y.index
idx = index[idx]
elif isinstance(y, xr.DataArray):
index = y.to_series().index
idx = index[idx]
process = pd.Series(data=process, index=index, name='REC-CUSUM')
boundary = pd.Series(data=boundary, index=index, name='Boundary')
return StructuralBreakResult(method='REC-CUSUM',
process=process,
boundary=boundary,
index=idx,
pvalue=stat_pvalue,
score=stat,
signif=stat_pvalue < pvalue_crit)
|
c11/yatsm
|
yatsm/structural_break/_cusum.py
|
Python
|
mit
| 6,887
|
from __future__ import unicode_literals, print_function
import os
import sys
import subprocess
import json
from ape import feaquencer
from ape import tasks
from .exceptions import ContainerError, ContainerNotFound, ProductNotFound
class Config(object):
APE_ROOT = os.environ['APE_ROOT_DIR']
SOURCE_HEADER = '#please execute the following in your shell:\n'
introduce_conf = Config()
@tasks.register_helper
def get_container_dir(container_name):
return tasks.conf.APE_ROOT + '/' + container_name
@tasks.register_helper
def get_product_dir(container_name, product_name):
return tasks.get_container_dir(container_name) + '/products/' + product_name
@tasks.register_helper
def get_containers():
entries = os.listdir(tasks.conf.APE_ROOT)
containers = []
for entry in entries:
if os.path.isdir(tasks.get_container_dir(entry) + '/products'):
containers.append(entry)
return containers
@tasks.register_helper
def get_products(container_name):
products_dir = tasks.get_container_dir(container_name) + '/products'
if not os.path.isdir(products_dir):
return []
products = os.listdir(products_dir)
def is_product(p):
return not p.startswith('.') and not p.startswith('_')
return [p for p in products if is_product(p)]
@tasks.register
def info():
"""
List information about this productive environment
:return:
"""
print()
print('root directory :', tasks.conf.APE_ROOT)
print()
print('active container :', os.environ.get('CONTAINER_NAME', ''))
print()
print('active product :', os.environ.get('PRODUCT_NAME', ''))
print()
print('ape feature selection :', tasks.FEATURE_SELECTION)
print()
print('containers and products:')
print('-' * 30)
print()
for container_name in tasks.get_containers():
print(container_name)
for product_name in tasks.get_products(container_name):
print(' ' + product_name)
print()
@tasks.register
def cd(doi):
"""
cd to directory of interest(doi)
a doi can be:
herbert - the container named "herbert"
sdox:dev - product "website" located in container "herbert"
:param doi:
:return:
"""
parts = doi.split(':')
if len(parts) == 2:
container_name, product_name = parts[0], parts[1]
elif len(parts) == 1 and os.environ.get('CONTAINER_NAME'):
# interpret poi as product name if already zapped into a product in order
# to enable simply switching products by doing ape zap prod.
product_name = parts[0]
container_name = os.environ.get('CONTAINER_NAME')
else:
print('unable to parse context - format: <container_name>:<product_name>')
sys.exit(1)
if container_name not in tasks.get_containers():
print('No such container')
else:
if product_name:
if product_name not in tasks.get_products(container_name):
print('No such product')
else:
print(tasks.conf.SOURCE_HEADER)
print('cd ' + tasks.get_product_dir(container_name, product_name))
else:
print(tasks.conf.SOURCE_HEADER)
print('cd ' + tasks.get_container_dir(container_name))
SWITCH_TEMPLATE = '''{source_header}
export CONTAINER_NAME={container_name}
export PRODUCT_NAME={product_name}
update_ape_env
'''
@tasks.register
def switch(poi):
"""
Zaps into a specific product specified by switch context to the product of interest(poi)
A poi is:
sdox:dev - for product "dev" located in container "sdox"
If poi does not contain a ":" it is interpreted as product name implying that a product within this
container is already active. So if this task is called with ape zap prod (and the corresponding container is
already zapped in), than only the product is switched.
After the context has been switched to sdox:dev additional commands may be available
that are relevant to sdox:dev
:param poi: product of interest, string: <container_name>:<product_name> or <product_name>.
"""
parts = poi.split(':')
if len(parts) == 2:
container_name, product_name = parts
elif len(parts) == 1 and os.environ.get('CONTAINER_NAME'):
# interpret poi as product name if already zapped into a product in order
# to enable simply switching products by doing ape zap prod.
container_name = os.environ.get('CONTAINER_NAME')
product_name = parts[0]
else:
print('unable to find poi: ', poi)
sys.exit(1)
if container_name not in tasks.get_containers():
raise ContainerNotFound('No such container %s' % container_name)
elif product_name not in tasks.get_products(container_name):
raise ProductNotFound('No such product %s' % product_name)
else:
print(SWITCH_TEMPLATE.format(
source_header=tasks.conf.SOURCE_HEADER,
container_name=container_name,
product_name=product_name
))
@tasks.register
def teleport(poi):
"""
switch and cd in one operation
:param poi:
:return:
"""
tasks.switch(poi)
tasks.cd(poi)
@tasks.register
def zap(poi):
'''alias for "teleport"'''
tasks.teleport(poi)
@tasks.register
def install_container(container_name):
"""
Installs the container specified by container_name
:param container_name: string, name of the container
"""
container_dir = os.path.join(os.environ['APE_ROOT_DIR'], container_name)
if os.path.exists(container_dir):
os.environ['CONTAINER_DIR'] = container_dir
else:
raise ContainerNotFound('ERROR: container directory not found: %s' % container_dir)
install_script = os.path.join(container_dir, 'install.py')
if os.path.exists(install_script):
print('... running install.py for %s' % container_name)
subprocess.check_call(['python', install_script])
else:
raise ContainerError('ERROR: this container does not provide an install.py!')
@tasks.register_helper
def get_extra_pypath(container_name=None):
from ape.installtools import pypath
return pypath.get_extra_pypath()
@tasks.register_helper
def get_poi_tuple(poi=None):
"""
Takes the poi or None and returns the container_dir and the product name either of the passed poi
(<container_name>: <product_name>) or from os.environ-
:param poi: optional; <container_name>: <product_name>
:return: tuple of the container directory and the product name
"""
if poi:
parts = poi.split(':')
if len(parts) == 2:
container_name, product_name = parts
if container_name not in tasks.get_containers():
print('No such container')
sys.exit(1)
elif product_name not in tasks.get_products(container_name):
print('No such product')
sys.exit(1)
else:
container_dir = tasks.get_container_dir(container_name)
else:
print('Please check your arguments: --poi <container>:<product>')
sys.exit(1)
else:
container_dir = os.environ.get('CONTAINER_DIR')
product_name = os.environ.get('PRODUCT_NAME')
return container_dir, product_name
@tasks.register
def validate_product_equation(poi=None):
"""
Validates the product equation.
* Validates the feature order
* Validates the product spec (mandatory functional features)
:param poi: optional product of interest
"""
from . import utils
from . import validators
container_dir, product_name = tasks.get_poi_tuple(poi=poi)
feature_list = utils.get_features_from_equation(container_dir, product_name)
ordering_constraints = utils.get_feature_order_constraints(container_dir)
spec_path = utils.get_feature_ide_paths(container_dir, product_name).product_spec_path
print('*** Starting product.equation validation')
# --------------------------------------------------------
# Validate the feature order
print('\tChecking feature order')
feature_order_validator = validators.FeatureOrderValidator(feature_list, ordering_constraints)
feature_order_validator.check_order()
if feature_order_validator.has_errors():
print('\t\txxx ERROR in your product.equation feature order xxx')
for error in feature_order_validator.get_violations():
print('\t\t\t', error[1])
else:
print('\t\tOK')
# --------------------------------------------------------
# Validate the functional product specification
print('\tChecking functional product spec')
if not os.path.exists(spec_path):
print(
'\t\tSkipped - No product spec exists.\n'
'\t\tYou may create a product spec if you want to ensure that\n'
'\t\trequired functional features are represented in the product equation\n'
'\t\t=> Create spec file featuremodel/productline/<container>/product_spec.json'
)
return
spec_validator = validators.ProductSpecValidator(spec_path, product_name, feature_list)
if not spec_validator.is_valid():
if spec_validator.get_errors_mandatory():
print('\t\tERROR: The following feature are missing', spec_validator.get_errors_mandatory())
if spec_validator.get_errors_never():
print('\t\tERROR: The following feature are not allowed', spec_validator.get_errors_never())
else:
print('\t\tOK')
if feature_order_validator.has_errors() or spec_validator.has_errors():
sys.exit(1)
@tasks.register_helper
def get_ordered_feature_list(info_object, feature_list):
"""
Orders the passed feature list by the given, json-formatted feature
dependency file using feaquencer's topsort algorithm.
:param feature_list:
:param info_object:
:return:
"""
feature_dependencies = json.load(open(info_object.feature_order_json))
feature_selection = [feature for feature in [feature.strip().replace('\n', '') for feature in feature_list]
if len(feature) > 0 and not feature.startswith('_') and not feature.startswith('#')]
return [feature + '\n' for feature in feaquencer.get_total_order(feature_selection, feature_dependencies)]
@tasks.register
def config_to_equation(poi=None):
"""
Generates a product.equation file for the given product name.
It generates it from the <product_name>.config file in the products folder.
For that you need to have your project imported to featureIDE and set the correct settings.
"""
from . import utils
container_dir, product_name = tasks.get_poi_tuple(poi=poi)
info_object = utils.get_feature_ide_paths(container_dir, product_name)
feature_list = list()
try:
print('*** Processing ', info_object.config_file_path)
with open(info_object.config_file_path, 'r') as config_file:
config_file = config_file.readlines()
for line in config_file:
# in FeatureIDE we cant use '.' for the paths to sub-features so we used '__'
# e.g. django_productline__features__development
if len(line.split('__')) <= 2:
line = line
else:
line = line.replace('__', '.')
if line.startswith('abstract_'):
# we skipp abstract features; this is a special case as featureIDE does not work with abstract
# sub trees / leafs.
line = ''
feature_list.append(line)
except IOError:
print('{} does not exist. Make sure your config file exists.'.format(info_object.config_file_path))
feature_list = tasks.get_ordered_feature_list(info_object, feature_list)
try:
with open(info_object.equation_file_path, 'w') as eq_file:
eq_file.writelines(feature_list)
print('*** Successfully generated product.equation')
except IOError:
print('product.equation file not found. Please make sure you have a valid product.equation in your chosen product')
# finally performing the validation of the product equation
tasks.validate_product_equation()
|
henzk/ape
|
ape/container_mode/tasks.py
|
Python
|
mit
| 12,347
|
#!/usr/bin/env python2.7
'''
RSD: The reciprocal smallest distance algorithm.
Wall, D.P., Fraser, H.B. and Hirsh, A.E. (2003) Detecting putative orthologs, Bioinformatics, 19, 1710-1711.
Original author: Dennis P. Wall, Department of Biological Sciences, Stanford University.
Contributors: I-Hsien Wu, Computational Biology Initiative, Harvard Medical School
Maintainer: Todd F. DeLuca, Center for Biomedical Informatics, Harvard Medical School
This program is written to run on linux. It has not been tested on Windows.
To run this program you need to have installed on your system:
Python 2.7
NCBI BLAST 2.2.24
paml 4.4
Kalign 2.04 (recommended) or clustalw 2.0.9 (deprecated)
See README for full details.
'''
# python package version
# should match r"^__version__ = '(?P<version>[^']+)'$" for setup.py
__version__ = '1.1.7'
import cStringIO
import glob
import logging
import os
import re
import shutil
import subprocess
import time
import fasta
import nested
import util
PAML_ERROR_MSG = 'paml_error'
FORWARD_DIRECTION = 0
REVERSE_DIRECTION = 1
DASHLEN_RE = re.compile('^(-*)(.*?)(-*)$')
MAX_HITS = 3
MATRIX_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'jones.dat')
CODEML_CONTROL_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'codeml.ctl')
# Constants used when aligning seqs with clustalw. Kalign does not need these.
USE_CLUSTALW = util.getBoolFromEnv('RSD_USE_CLUSTALW', False)
CLUSTAL_INPUT_FILENAME = 'clustal_fasta.faa'
CLUSTAL_ALIGNMENT_FILENAME = 'clustal_fasta.aln'
#################
# BLAST FUNCTIONS
#################
#
# Used to compute blast hits between two genomes, parse the results, and save the best hits to a file
#
def formatForBlast(fastaPath):
# os.chdir(os.path.dirname(fastaPath))
# cmd = 'formatdb -p -o -i'+os.path.basename(fastaPath)
# cmd = 'formatdb -p -o -i'+fastaPath
# redirect stdout to /dev/null to make the command quiter.
cmd = ['makeblastdb', '-in', fastaPath, '-dbtype', 'prot', '-parse_seqids']
with open(os.devnull, 'w') as devnull:
subprocess.check_call(cmd, stdout=devnull)
def getHitId(hit):
return hit[0]
def getHitEvalue(hit):
'''
returns evalue as a float
'''
return hit[1]
def loadBlastHits(path):
'''
path: location of stored blast hits computed by computeBlastHits()
returns: mapping object from query id to hits. used to be a bsddb, now is a dict.
'''
return util.loadObject(path)
def getBlastHits(queryFastaPath, subjectIndexPath, evalue, limitHits=MAX_HITS, workingDir='.', copyToWorking=False):
'''
queryFastaPath: location of fasta file of query sequences
subjectIndexPath: location and name of blast-formatted indexes.
evalue: a string or float representing the maximum evalue threshold of hits to get.
workingDir: creates, uses, and removes a directory under workingDir.
copyToWorking: if True, copy query fasta path and subject index files to within the working directory and use the copies to blast.
can improve performance if the working directory is on local disk and the files are on a slow network.
blasts every sequence in query agaist subject, adding hits that are better than evalue to a list stored in a dict keyed on the query id.
'''
# work in a nested tmp dir to avoid junking up the working dir.
with nested.NestedTempDir(dir=workingDir, nesting=0) as tmpDir:
if copyToWorking:
localFastaPath = os.path.join(tmpDir, 'query.fa')
shutil.copyfile(queryFastaPath, localFastaPath)
localIndexDir = os.path.join(tmpDir, 'local_blast')
os.makedirs(localIndexDir, 0770)
localIndexPath = os.path.join(localIndexDir, os.path.basename(subjectIndexPath))
for path in glob.glob(subjectIndexPath+'*'):
if os.path.isfile:
shutil.copy(path, localIndexDir)
queryFastaPath = localFastaPath
subjectIndexPath = localIndexPath
blastResultsPath = os.path.join(tmpDir, 'blast_results')
# blast query vs subject, using /opt/blast-2.2.22/bin/blastp
cmd = ['blastp', '-outfmt', '6', '-evalue', str(evalue),
'-query', queryFastaPath, '-db', subjectIndexPath,
'-out', blastResultsPath]
subprocess.check_call(cmd)
# parse results
hitsMap = parseResults(blastResultsPath, limitHits)
return hitsMap
def computeBlastHits(queryFastaPath, subjectIndexPath, outPath, evalue, limitHits=MAX_HITS, workingDir='.', copyToWorking=False):
'''
queryFastaPath: location of fasta file of query sequences
subjectIndexPath: location and name of blast-formatted indexes.
evalue: a string or float representing the maximum evalue threshold of hits to get.
outPath: location of file where blast hits are saved.
workingDir: creates, uses, and removes a directory under workingDir.
copyToWorking: if True, copy query fasta path and subject index files to within the working directory and use the copies to blast.
can improve performance if the working directory is on local disk and the files are on a slow network.
Runs getBlastHits() and persists the hits to outPath.
'''
hitsMap = getBlastHits(queryFastaPath, subjectIndexPath, evalue, limitHits, workingDir, copyToWorking)
util.dumpObject(hitsMap, outPath)
def parseResults(blastResultsPath, limitHits=MAX_HITS):
'''
returns: a map from query seq id to a list of tuples of (subject seq id, evalue) for the top hits of the query sequence in the subject genome
'''
# parse tabular results into hits. thank you, ncbi, for creating results this easy to parse.
hitsMap = {}
hitsCountMap = {}
prevSeqId = None
prevHitId = None
fh = open(blastResultsPath)
for line in fh:
splits = line.split()
try:
seqId = fasta.idFromName(splits[0]) # remove namespace prefix, e.g. 'gi|'
hitId = fasta.idFromName(splits[1])
hitEvalue = float(splits[10])
except Exception as e:
logging.exception('parseResults(): prevSeqId: {}, prevHitId: {}, line: {}'.format(prevSeqId, prevHitId, line))
# results table reports multiple "alignments" per "hit" in ascending order by evalue
# we only store the top hits.
if prevSeqId != seqId or prevHitId != hitId:
prevSeqId = seqId
prevHitId = hitId
if seqId not in hitsCountMap:
hitsCountMap[seqId] = 0
hitsMap[seqId] = []
if not limitHits or hitsCountMap[seqId] < limitHits:
hitsCountMap[seqId] += 1
hitsMap[seqId].append((hitId, hitEvalue))
fh.close()
return hitsMap
###############
# RSD FUNCTIONS
###############
def pamlGetDistance(path):
filename = '%s/2AA.t'%path
# adding a pause on the off-chance that the filesystem might be lagging a bit, causing the open() to fail below.
# I think it is more likely that codeml in runPaml_all() is failing before writing the file.
if not os.path.isfile(filename):
time.sleep(0.5)
with open(filename) as rst:
get_rst = rst.readlines()
os.unlink(filename)
if not get_rst:
raise Exception(PAML_ERROR_MSG, path)
str = ''
for line in get_rst[1:]:
cd1 = line.split()
if not len(cd1) > 1:
str += "%s "%(line.split('\n')[0])
continue
if len(cd1) > 1:
str+="%s %s"%(cd1[0], cd1[1])
dist = float(str.split()[2])
return dist
def alignFastaKalign(input):
'''
input: string containing fasta formatted sequences to be aligned.
runs alignment program kalign
Returns: fasta-formatted aligned sequences
'''
alignedFasta = util.run(['kalign', '-f', 'fasta'], input) # output clustalw format
return alignedFasta.replace('\n\n', '\n') # replace fixes a bug in Kalign version 2.04, where if a seq is exactly 60 chars long, an extra newline is output.
def alignFastaClustalw(input, path):
'''
input: string containing fasta formatted sequences to be aligned.
path: working directory where fasta will be written and clustal will write output files.
runs alignment program clustalw
Returns: fasta-formatted aligned sequences
'''
clustalFastaPath = os.path.join(path, CLUSTAL_INPUT_FILENAME)
clustalAlignmentPath = os.path.join(path, CLUSTAL_ALIGNMENT_FILENAME)
util.writeToFile(input, clustalFastaPath)
try:
cmd = ['clustalw', '-output', 'fasta', '-infile', clustalFastaPath, '-outfile', clustalAlignmentPath]
with open(os.devnull, 'w') as devnull:
subprocess.check_call(cmd, stdout=devnull, stderr=devnull)
except Exception:
logging.exception('runClustal Error: clustalFastaPath data = %s'%open(clustalFastaPath).read())
raise
alignedFasta = util.readFromFile(clustalAlignmentPath)
return alignedFasta
def dashlen_check(seq):
'''
Objective: calculate the density of gaps in a sequence at 5' and 3' ends -- caused by poor alignment or by diff length seqs
Arguments: sequence
Result: the number of bases to be cut from the subjects 5' and 3' ends, and the divergence of the trimmed seq.
'''
seq = seq.strip()
# trim the dashes from the front and end
(frontDashes, trimmedSeq, endDashes) = DASHLEN_RE.search(seq).groups()
# logging.debug('dashlen_check: seq=%s'%seq)
# all dashes -- do not trim anything
if not trimmedSeq:
return (0, 0)
# ignore trims < 10.
frontTrim = len(frontDashes)
if frontTrim < 10:
frontTrim = 0
endTrim = len(endDashes)
if endTrim < 10:
endTrim = 0
trimmedSeqDivergence = (trimmedSeq.count('-') / float(len(trimmedSeq)))
return (frontTrim, endTrim, trimmedSeqDivergence)
def makeGetSeqForId(genomeFastaPath):
'''
genomeFastaPath: location of fasta file. also location/name of blast formatted indexes of the fasta file.
'''
# suck fasta file into memory, converting it into a map from id to sequence
# in memory dict performs much better than on-disk retrieval with xdget or fastacmd.
# and genome fasta files do not take much space (on a modern computer).
fastaMap = {}
for (seqNameline, seq) in fasta.readFasta(genomeFastaPath):
seqId = fasta.idFromName(seqNameline)
fastaMap[seqId] = seq
def getSeqForIdInMemory(seqId):
return fastaMap[seqId]
return getSeqForIdInMemory
def makeGetHitsOnTheFly(genomeIndexPath, evalue, workingDir='.'):
'''
genomeIndexPath: location of blast formatted indexes. usually same directory/name as genome fasta path
evalue: float or string. Hits with evalues >= evalue will not be included in the returned blast hits.
workingDir: a directory in which to create, use, and delete temporary files and dirs.
returns: a function that returns that takes as input a sequence id and sequence and returns the blast hits
'''
def getHitsOnTheFly(seqid, seq):
with nested.NestedTempDir(dir=workingDir, nesting=0) as tmpDir:
queryFastaPath = os.path.join(tmpDir, 'query.faa')
# add 'lcl|' to make ncbi blast happy.
util.writeToFile('{0}\n{1}\n'.format('>lcl|'+seqid, seq), queryFastaPath)
hitsDb = getBlastHits(queryFastaPath, genomeIndexPath, evalue, workingDir=workingDir)
return hitsDb.get(seqid)
return getHitsOnTheFly
def makeGetSavedHits(filename):
'''
returns a function which can be used to get the hits
from a file containing pre-computed blast results
'''
# in memory retrieval is faster than on-disk retrieval with bsddb, but this has a minor impact on overall roundup performance.
hitsDb = loadBlastHits(filename)
def getHitsInMemory(seqid, seq):
return hitsDb.get(seqid)
return getHitsInMemory
def getGoodEvalueHits(seqId, seq, getHitsFunc, getSeqFunc, evalue):
'''
evalue: a float.
returns: a list of pairs of (hitSeqId, hitSequence, hitEvalue) that have a hitEvalue below evalue. hitEvalue is a float.
'''
goodhits = []
hits = getHitsFunc(seqId, seq)
# check for 3 or fewer blast hits below evalue threshold
if hits:
hitCount = 0
for hit in hits:
if hitCount >= MAX_HITS:
break
hitSeqId = getHitId(hit)
hitEvalue = getHitEvalue(hit)
if hitEvalue < evalue:
hitCount += 1
hitSeq = getSeqFunc(hitSeqId)
goodhits.append((hitSeqId, hitSeq, hitEvalue))
return goodhits
def getDistanceForAlignedSeqPair(seqId, alignedSeq, hitSeqId, alignedHitSeq, workPath):
# paranoid check: aligned and trimmed seqs need to be the same length.
# if len(alignedSeq) != len(alignedHitSeq):
# raise Exception('getDistanceForAlignedSeqPairs: different lengths for seqs: '+str(((seqId, alignedSeq), (hitSeqId, alignedHitSeq))))
dataFileName = 'datafile.seq'
treeFileName = 'treefile.seq'
outFileName = 'outfile.seq'
dataFilePath = os.path.join(workPath, dataFileName)
treeFilePath = os.path.join(workPath, treeFileName)
outFilePath = os.path.join(workPath, outFileName)
# heading is number of seqs and length of each seq (which all need to be the same len).
heading = '2 %s\n'%len(alignedSeq)
pamlData = heading + '%s\n%s\n'%(seqId, alignedSeq) + '%s\n%s\n'%(hitSeqId, alignedHitSeq)
# logging.debug('pamlData=%s'%pamlData)
util.writeToFile(pamlData, dataFilePath)
# workPath is simply your folder that will contain codeml (Yang 2000), codeml.ctl (the codeml control file), and the jones.dat (Jones et. al, 1998)
# write the codeml control file that will run codeml
# run the codeml
try:
with open(os.devnull, 'w') as devnull:
subprocess.check_call(['codeml'], cwd=workPath, stdout=devnull)
distance = pamlGetDistance(workPath)
return distance
finally:
for filePath in [dataFilePath, treeFilePath, outFilePath]:
if os.path.exists(filePath):
os.remove(filePath)
def getGoodDivergenceAlignedTrimmedSeqPair(seqId, seq, hitSeqId, hitSeq, workPath):
'''
aligns seq to hit. trims aligned seq and hit seq.
returns: pairs of pairs of id and aligned trimmed sequences for sequences in hits,
and a predicate function that, given a divergence threshold, says if the divergence of the sequences exceeds the threshold.
e.g. ((seqId, alignedTrimmedSeq), (hitSeqId, alignedTrimmedHitSeq), divergencePredicateFunc)
'''
# ALIGN SEQ and HIT
# need to align the sequences so we'z can study the rate of evolution per site
inputFasta = '>%s\n%s\n>%s\n%s\n'%(seqId, seq, hitSeqId, hitSeq)
if USE_CLUSTALW:
alignedFasta = alignFastaClustalw(inputFasta, workPath)
else:
alignedFasta = alignFastaKalign(inputFasta)
# try to recover from rare, intermittent failure of fasta alignment
if not alignedFasta:
logging.error('fasta alignment failed.\ninputFasta=%s\n' +
'alignedFasta=%s\nSleep and retry alignment.',
inputFasta, alignedFasta)
time.sleep(0.1)
alignedFasta = alignFastaKalign(inputFasta)
try:
# parse the aligned fasta into sequence ids and sequences
namelinesAndSeqs = list(fasta.readFasta(cStringIO.StringIO(alignedFasta)))
idAndSeqs = [(fasta.idFromName(seqNameline), seq) for seqNameline, seq in namelinesAndSeqs]
alignedIdAndSeq, alignedHitIdAndSeq = idAndSeqs
except Exception as e:
e.args += (inputFasta, alignedFasta)
raise
# CHECK FOR EXCESSIVE DIVERGENCE AND TRIMMING
# find most diverged sequence
# sort sequences by dash count. why?
divIdSeqs = []
for id, seq in (alignedIdAndSeq, alignedHitIdAndSeq):
dashCount = seq.count('-')
div = dashCount / float(len(seq))
g = (dashCount, div, id, seq)
divIdSeqs.append(g)
divIdSeqs.sort()
# check for excessive divergence
leastDivergedDashCount, leastDivergedDiv, leastDivergedId, leastDivergedSeq = divIdSeqs[0]
# check for excessive divergence and generate dashtrim.
mostDivergedDashCount, mostDivergedDiv, mostDivergedId, mostDivergedSeq = divIdSeqs[1]
# dashtrim = dashlen_check(mostDivergedSeq, divergence)
startTrim, endTrim, trimDivergence = dashlen_check(mostDivergedSeq)
# logging.debug('dashtrim='+str(dashtrim))
# trim and add seqs to output
def divergencePredicate(divergenceThreshold):
'''Why this logic? Ask Dennis. Function closed over local variables that returns whether or not the alignment of the sequences is too diverged.'''
if leastDivergedSeq and leastDivergedDiv > divergenceThreshold:
return True
if (startTrim or endTrim) and trimDivergence >= divergenceThreshold:
return True
return False
alignedTrimmedIdAndSeq, alignedTrimmedHitIdAndSeq = [(id, seq[startTrim:(len(seq)-endTrim)]) for id, seq in (alignedIdAndSeq, alignedHitIdAndSeq)]
return alignedTrimmedIdAndSeq, alignedTrimmedHitIdAndSeq, divergencePredicate
def minimumDicts(dicts, key):
'''
dicts: list of dictionaries.
key: a key present in every dict in dicts.
returns: list of d in dicts, s.t. d[key] <= e[key] for every d, e in dicts.
e.g.: [{'a':4, 'b':1}, {'a':5, 'b':0}, {'b': 0, 'a': 3}], 'b' -> [{'a':5, 'b':0} and {'b': 0, 'a': 3}] (not necessarily in that order)
'''
if not dicts:
return []
sortedDicts = sorted(dicts, key=lambda x: x[key])
minValue = sortedDicts[0][key]
return [d for d in sortedDicts if d[key] == minValue]
def computeOrthologs(queryFastaPath, subjectFastaPath, divEvalues, getForwardHits, getReverseHits, querySeqIds=None, workingDir='.'):
'''
queryFastaPath: fasta file path for query genome.
subjectFastaPath: fasta file path for subject genome.
divEvalues: list of (div, evalue) tuples. orthologs are computed using the given div and evalue thresholds. div and evalue can be a float or string.
getForwardHits: a function mapping a query seq id to a list of subject genome blast hits. see makeGetSavedHits() and makeGetHitsOnTheFly().
getReverseHits: a function mapping a subject seq id to a list of query genome blast hits. see makeGetSavedHits() and makeGetHitsOnTheFly().
querySeqIds: a list of sequence ids for the query genome. orthologs are only computed for those sequences.
If False, orthologs are computed for every sequence in the query genome.
workingDir: under workingDir, a temp directory is created, worked in (files and dirs created and deleted), and removed.
returns: a mapping from (div, evalue) tuples to lists of orthologs.
'''
# optimization: internally swap query and subject if subject has fewer sequences than query and no querySeqIds were given.
# compute orthologs and unswap results.
# roundup time complexity is roughly linear in the number of sequences in the query genome.
genomeSwapOptimization = True
if not querySeqIds and genomeSwapOptimization and fasta.numSeqsInFastaDb(subjectFastaPath) < fasta.numSeqsInFastaDb(queryFastaPath):
# print 'roundup(): subject genome has fewer sequences than query genome. internally swapping query and subject to improve speed.'
isSwapped = True
# swap query and subject, forward and reverse
queryFastaPath, subjectFastaPath = subjectFastaPath, queryFastaPath
getForwardHits, getReverseHits = getReverseHits, getForwardHits
else:
isSwapped = False
# make functions to look up a sequence from a sequence id.
getQuerySeqFunc = makeGetSeqForId(queryFastaPath)
getSubjectSeqFunc = makeGetSeqForId(subjectFastaPath)
# if no querySeqIds were specified, get orthologs for every query sequence
if not querySeqIds:
querySeqIds = list(fasta.readIds(queryFastaPath))
# get orthologs for every (div, evalue) combination
with nested.NestedTempDir(dir=workingDir, nesting=0) as tmpDir:
divEvalueToOrthologs = _computeOrthologsSub(querySeqIds, getQuerySeqFunc, getSubjectSeqFunc, divEvalues, getForwardHits, getReverseHits, workingDir)
# if swapped query and subject genome, need to swap back the ids in orthologs before returning them.
if isSwapped:
swappedDivEvalueToOrthologs = divEvalueToOrthologs
divEvalueToOrthologs = {}
for divEvalue, swappedOrthologs in swappedDivEvalueToOrthologs.items():
orthologs = [(query, subject, distance) for subject, query, distance in swappedOrthologs]
divEvalueToOrthologs[divEvalue] = orthologs
return divEvalueToOrthologs
def _computeOrthologsSub(querySeqIds, getQuerySeqFunc, getSubjectSeqFunc, divEvalues, getForwardHits, getReverseHits, workingDir):
'''
querySeqIds: a list of sequence ids from query genome. Only orthologs for these ids are searched for.
getQuerySeqFunc: a function that takes a seq id and returns the matching sequence from the query genome.
getSubjectSeqFunc: a function that takes a seq id and returns the matching sequence from the subject genome.
divEvalues: a list of (div, evalue) pairs which are thresholds for finding orthologs. All pairs are searched simultaneously.
div can be a float or string. So can evalue.
getForwardHits: a function that takes a query seq id and a query seq and returns the blast hits in the subject genome.
getReverseHits: a function that takes a subject seq id and a subject seq and returns the blast hits in the query genome.
find orthologs for every sequence in querySeqIds and every (div, evalue) combination.
return: a mapping from (div, evalue) pairs to lists of orthologs.
'''
# Note: the divs and evalues in divEvalues are strings which need to be converted to floats at the appropriate times below.
# copy config files to working dir
shutil.copy(MATRIX_PATH, workingDir)
shutil.copy(CODEML_CONTROL_PATH, workingDir)
divEvalueToOrthologs = dict(((div, evalue), list()) for div, evalue in divEvalues)
maxEvalue = max(float(evalue) for div, evalue in divEvalues)
maxDiv = max(float(div) for div, evalue in divEvalues)
# get ortholog(s) for each query sequence
for queryId in querySeqIds:
querySeq = getQuerySeqFunc(queryId)
# get forward hits, evalues, alignments, divergences, and distances that meet the loosest standards of all the divs and evalues.
# get forward hits and evalues, filtered by max evalue
idSeqEvalueOfForwardHits = getGoodEvalueHits(queryId, querySeq, getForwardHits, getSubjectSeqFunc, maxEvalue)
hitDataList = [{'hitId': hitId, 'hitSeq': hitSeq, 'hitEvalue': hitEvalue} for hitId, hitSeq, hitEvalue in idSeqEvalueOfForwardHits]
# get alignments and divergences
for hitData in hitDataList:
(queryId, alignedQuerySeq), (hitId, alignedHitSeq), tooDivergedPred = getGoodDivergenceAlignedTrimmedSeqPair(queryId, querySeq, hitData['hitId'], hitData['hitSeq'], workingDir)
hitData['alignedQuerySeq'] = alignedQuerySeq
hitData['alignedHitSeq'] = alignedHitSeq
hitData['tooDivergedPred'] = tooDivergedPred
# filter by max divergence.
hitDataList = [hitData for hitData in hitDataList if not hitData['tooDivergedPred'](maxDiv)]
# get distances of remaining hits, discarding hits for which paml generates no rst data.
distancesHitDataList = []
for hitData in hitDataList:
try:
hitData['distance'] = getDistanceForAlignedSeqPair(queryId, hitData['alignedQuerySeq'], hitData['hitId'], hitData['alignedHitSeq'], workingDir)
distancesHitDataList.append(hitData)
except Exception as e:
if e.args and e.args[0] == PAML_ERROR_MSG:
continue
else:
raise
# filter hits by specific div and evalue combinations.
divEvalueToMinimumDistanceHitDatas = {}
minimumHitIdToDivEvalues = {}
minimumHitIdToHitData = {}
for divEvalue in divEvalues:
div, evalue = divEvalue
# collect hit datas that pass thresholds.
goodHitDatas = []
for hitData in distancesHitDataList:
if hitData['hitEvalue'] < float(evalue) and not hitData['tooDivergedPred'](float(div)):
goodHitDatas.append(hitData)
# get the minimum hit or hits.
minimumHitDatas = minimumDicts(goodHitDatas, 'distance')
divEvalueToMinimumDistanceHitDatas[divEvalue] = minimumHitDatas
for hitData in minimumHitDatas:
minimumHitIdToDivEvalues.setdefault(hitData['hitId'], []).append(divEvalue)
minimumHitIdToHitData[hitData['hitId']] = hitData # possibly redundant, since if two divEvalues have same minimum hit, it gets inserted into dict twice.
# get reverese hits that meet the loosest standards of the divs and evalues associated with that minimum distance hit.
# performance note: wasteful or necessary to realign and compute distance between minimum hit and query seq?
for hitId in minimumHitIdToHitData:
hitData = minimumHitIdToHitData[hitId]
hitSeq = hitData['hitSeq']
# since minimum hit might not be associated with all divs and evalues, need to find the loosest div and evalue associated with this minimum hit.
maxHitEvalue = max(float(evalue) for div, evalue in minimumHitIdToDivEvalues[hitId])
maxHitDiv = max(float(div) for div, evalue in minimumHitIdToDivEvalues[hitId])
# get reverse hits and evalues, filtered by max evalue
idSeqEvalueOfReverseHits = getGoodEvalueHits(hitId, hitSeq, getReverseHits, getQuerySeqFunc, maxHitEvalue)
revHitDataList = [{'revHitId': revHitId, 'revHitSeq': revHitSeq, 'revHitEvalue': revHitEvalue} for revHitId, revHitSeq, revHitEvalue in idSeqEvalueOfReverseHits]
# if the query is not in the reverese hits, there is no way we can find an ortholog
if queryId not in [revHitData['revHitId'] for revHitData in revHitDataList]:
continue
for revHitData in revHitDataList:
values = getGoodDivergenceAlignedTrimmedSeqPair(hitId, hitSeq, revHitData['revHitId'], revHitData['revHitSeq'], workingDir)
(hitId, alignedHitSeq), (revHitId, alignedRevHitSeq), tooDivergedPred = values
revHitData['alignedHitSeq'] = alignedHitSeq
revHitData['alignedRevHitSeq'] = alignedRevHitSeq
revHitData['tooDivergedPred'] = tooDivergedPred
# filter by max divergence.
revHitDataList = [revHitData for revHitData in revHitDataList if not revHitData['tooDivergedPred'](maxHitDiv)]
# if the query is not in the reverese hits, there is no way we can find an ortholog
if queryId not in [revHitData['revHitId'] for revHitData in revHitDataList]:
continue
# get distances of remaining reverse hits, discarding reverse hits for which paml generates no rst data.
distancesRevHitDataList = []
for revHitData in revHitDataList:
try:
revHitData['distance'] = getDistanceForAlignedSeqPair(hitId, revHitData['alignedHitSeq'], revHitData['revHitId'], revHitData['alignedRevHitSeq'], workingDir)
distancesRevHitDataList.append(revHitData)
except Exception as e:
if e.args and e.args[0] == PAML_ERROR_MSG:
continue
else:
raise
# if passes div and evalue thresholds of the minimum hit and minimum reverse hit == query, write ortholog.
# filter hits by specific div and evalue combinations.
for divEvalue in minimumHitIdToDivEvalues[hitId]:
div, evalue = divEvalue
# collect hit datas that pass thresholds.
goodRevHitDatas = []
for revHitData in distancesRevHitDataList:
if revHitData['revHitEvalue'] < float(evalue) and not revHitData['tooDivergedPred'](float(div)):
goodRevHitDatas.append(revHitData)
# get the minimum hit or hits.
minimumRevHitDatas = minimumDicts(goodRevHitDatas, 'distance')
if queryId in [revHitData['revHitId'] for revHitData in minimumRevHitDatas]:
divEvalueToOrthologs[divEvalue].append((queryId, hitId, hitData['distance']))
return divEvalueToOrthologs
def computeOrthologsUsingOnTheFlyHits(queryFastaPath, subjectFastaPath, divEvalues, querySeqIds=None, workingDir='.'):
'''
Convenience function around computeOrthologs()
querySeqIds: a list of sequence ids from query genome to find orthologs for. If empty/falsy, will compute orthologs for every sequence in query genome.
queryFastaPath: location and name of of fasta file and blast indexes of the query genome. e.g. /groups/rodeo/roundup/genomes/current/Homo_sapiens.aa/Homo_sapiens.aa
subjectFastaPath: location and name of of fasta file and blast indexes of the subject genome.
workingDir: a directory in which to create, use, and delete temporary files and dirs.
This computes blast hits on-the-fly, so it slower than rounduPrecompute() for computing orthologs for full genomes.
'''
# get blast hits using the least stringent evalue from among all the evalues in divEvalues.
maxEvalue = str(max(float(evalue) for div, evalue in divEvalues))
getForwardHits = makeGetHitsOnTheFly(subjectFastaPath, maxEvalue, workingDir)
getReverseHits = makeGetHitsOnTheFly(queryFastaPath, maxEvalue, workingDir)
divEvalueToOrthologs = computeOrthologs(queryFastaPath, subjectFastaPath, divEvalues, getForwardHits, getReverseHits, querySeqIds, workingDir)
return divEvalueToOrthologs
def computeOrthologsUsingSavedHits(queryFastaPath, subjectFastaPath, divEvalues, forwardHitsPath, reverseHitsPath, querySeqIds=None, workingDir='.'):
'''
Convenience function around computeOrthologs()
returns: a mapping from (div, evalue) pairs to lists of orthologs.
'''
getForwardHits = makeGetSavedHits(forwardHitsPath)
getReverseHits = makeGetSavedHits(reverseHitsPath)
divEvalueToOrthologs = computeOrthologs(queryFastaPath, subjectFastaPath, divEvalues, getForwardHits, getReverseHits, querySeqIds, workingDir)
return divEvalueToOrthologs
def writeToOutfile(orthologs, outfile):
'''
orthologs: a list of tuples of (queryid, subjectid, distance).
outfile: where to write the orthologs
write the orthologs to the outfile in the canonical format: one ortholog per line. each line is tab-separated query id subject id and distance.
'''
data = ''.join(['%s\t%s\t%s\n'%(query, subject, distance) for query, subject, distance in orthologs])
with open(outfile, 'w') as fh:
fh.write(data)
###################################
# COMMAND-LINE PROCESSING FUNCTIONS
###################################
def copyFastaArg(srcFile, destDir):
'''
srcFile: FASTA format genome file.
destDir: where to move the fasta file.
Copy the source file to the destination dir. If the source file is already in the destination dir, it will not be copied.
return: path of the copied fasta file.
'''
# use absolute paths
srcFile = os.path.abspath(os.path.expanduser(srcFile))
destDir = os.path.abspath(os.path.expanduser(destDir))
destFile = os.path.join(destDir, os.path.basename(srcFile))
# copy GENOME to DIR if necessary
if srcFile != destFile:
shutil.copyfile(srcFile, destFile)
return destFile
def formatFastaArg(fastaFile):
'''
formatting puts blast indexes in the same dir as fastaFile.
returns: fastaFile
'''
fastaFile = os.path.abspath(os.path.expanduser(fastaFile))
formatForBlast(fastaFile)
return fastaFile
if __name__ == '__main__':
pass
# last line
|
todddeluca/reciprocal_smallest_distance
|
rsd/rsd.py
|
Python
|
mit
| 32,578
|
#!/bin/python
import os
import roomai.common
import copy
#
#0, 1, 2, 3, ..., 7, 8, 9, 10, 11, 12, 13, 14
#^ ^ ^ ^ ^
#| | | | |
#3, 10, J, Q, K, A, 2, r, R
#
class DouDiZhuActionElement:
str_to_rank = {'3':0, '4':1, '5':2, '6':3, '7':4, '8':5, '9':6, 'T':7, 'J':8, 'Q':9, 'K':10, 'A':11, '2':12, 'r':13, 'R':14, 'x':15, 'b':16}
# x means check, b means bid
rank_to_str = {0: '3', 1: '4', 2: '5', 3: '6', 4: '7', 5: '8', 6: '9', 7: 'T', 8: 'J', 9: 'Q', 10: 'K', 11: 'A', 12: '2', 13: 'r', 14: 'R', 15: 'x', 16: 'b'}
total_normal_cards = 15
class DouDiZhuPokerAction(roomai.common.AbstractAction):
"""
"""
def __init__(self):
"""
"""
pass
def __init__(self, masterCards, slaveCards):
self.__masterCards__ = [c for c in masterCards]
self.__slaveCards__ = [c for c in slaveCards]
self.__masterPoints2Count__ = None
self.__slavePoints2Count__ = None
self.__isMasterStraight__ = None
self.__maxMasterPoint__ = None
self.__minMasterPoint__ = None
self.__pattern__ = None
self.__action2pattern__()
self.__key__ = DouDiZhuPokerAction.__master_slave_cards_to_key__(masterCards, slaveCards)
def __get_key__(self): return self.__key__
key = property(__get_key__, doc="The key of DouDiZhu Action")
def __get_masterCards__(self): return self.__masterCards__
masterCards = property(__get_masterCards__, doc="The cards act as the master cards")
def __get_slaveCards__(self): return self.__slaveCards__
slaveCards = property(__get_slaveCards__, doc="The cards act as the slave cards")
def __get_masterPoints2Count__(self): return self.__masterPoints2Count__
masterPoints2Count = property(__get_masterPoints2Count__, doc="The count of different points in the masterCards")
def __get_slavePoints2Count__(self): return self.__slavePoints2Count__
slavePoints2Count = property(__get_slavePoints2Count__, doc="The count of different points in the slaveCards")
def __get_isMasterStraight__(self): return self.__isMasterStraight__
isMasterStraight = property(__get_isMasterStraight__, doc="The master cards are straight")
def __get_maxMasterPoint__(self): return self.__maxMasterPoint__
maxMasterPoint = property(__get_maxMasterPoint__, doc="The max point in the master cards")
def __get_minMasterPoint__(self): return self.__minMasterPoint__
minMasterPoint = property(__get_minMasterPoint__, doc="The min point in the master cards")
def __get_pattern__(self): return self.__pattern__
pattern = property(__get_pattern__, doc="The pattern of the action")
@classmethod
def lookup(cls, key):
return AllActions["".join(sorted(key))]
@classmethod
def __master_slave_cards_to_key__(cls, masterCards, slaveCards):
key_int = (masterCards + slaveCards)
key_str = []
for key in key_int:
key_str.append(DouDiZhuActionElement.rank_to_str[key])
key_str.sort()
return "".join(key_str)
def __action2pattern__(self):
self.__masterPoints2Count__ = dict()
for c in self.__masterCards__:
if c in self.__masterPoints2Count__:
self.__masterPoints2Count__[c] += 1
else:
self.__masterPoints2Count__[c] = 1
self.__slavePoints2Count__ = dict()
for c in self.__slaveCards__:
if c in self.__slavePoints2Count__:
self.__slavePoints2Count__[c] += 1
else:
self.__slavePoints2Count__[c] = 1
self.__isMasterStraight__ = 0
num = 0
for v in self.__masterPoints2Count__:
if (v + 1) in self.__masterPoints2Count__ and (v + 1) < DouDiZhuActionElement.str_to_rank["2"]:
num += 1
if num == len(self.__masterPoints2Count__) - 1 and len(self.__masterPoints2Count__) != 1:
self.__isMasterStraight__ = 1
self.__maxMasterPoint__ = -1
self.__minMasterPoint__ = 100
for c in self.__masterPoints2Count__:
if self.__maxMasterPoint__ < c:
self.__maxMasterPoint__ = c
if self.__minMasterPoint__ > c:
self.__minMasterPoint__ = c
########################
## action 2 pattern ####
########################
# is cheat?
if len(self.__masterCards__) == 1 \
and len(self.__slaveCards__) == 0 \
and self.__masterCards__[0] == DouDiZhuActionElement.str_to_rank["x"]:
self.__pattern__ = AllPatterns["i_cheat"]
# is roblord
elif len(self.__masterCards__) == 1 \
and len(self.__slaveCards__) == 0 \
and self.__masterCards__[0] == DouDiZhuActionElement.str_to_rank["b"]:
self.__pattern__ = AllPatterns["i_bid"]
# is twoKings
elif len(self.__masterCards__) == 2 \
and len(self.__masterPoints2Count__) == 2 \
and len(self.__slaveCards__) == 0 \
and self.__masterCards__[0] in [DouDiZhuActionElement.str_to_rank["r"], DouDiZhuActionElement.str_to_rank["R"]] \
and self.__masterCards__[1] in [DouDiZhuActionElement.str_to_rank["r"], DouDiZhuActionElement.str_to_rank["R"]]:
self.__pattern__ = AllPatterns["x_rocket"]
else:
## process masterCards
masterPoints = self.__masterPoints2Count__
if len(masterPoints) > 0:
count = masterPoints[self.__masterCards__[0]]
for c in masterPoints:
if masterPoints[c] != count:
self.__pattern__ = AllPatterns["i_invalid"]
if self.__pattern__ == None:
pattern = "p_%d_%d_%d_%d_%d" % (len(self.__masterCards__), len(masterPoints), \
self.__isMasterStraight__, \
len(self.__slaveCards__), 0)
if pattern in AllPatterns:
self.__pattern__= AllPatterns[pattern]
else:
self.__pattern__ = AllPatterns["i_invalid"]
def __deepcopy__(self, memodict={}, newinstance = None):
return self.lookup(self.key)
############## read data ################
AllPatterns = dict()
AllActions = dict()
from roomai.doudizhu import doudizhu_action_data
from roomai.doudizhu import doudizhu_pattern_data
for line in doudizhu_pattern_data:
line = line.replace(" ", "").strip()
line = line.split("#")[0]
if len(line) == 0 or len(line[0].strip()) == 0:
continue
lines = line.split(",")
for i in range(1, len(lines)):
lines[i] = int(lines[i])
AllPatterns[lines[0]] = lines
for line in doudizhu_action_data:
line = line.replace(" ", "").strip()
lines = line.split("\t")
if lines[3] not in AllPatterns:
continue
m = [int(str1) for str1 in lines[1].split(",")]
s = []
if len(lines[2]) > 0:
s = [int(str1) for str1 in lines[2].split(",")]
action = DouDiZhuPokerAction(m, s)
if "b" in line:
b = 0
if action.key != lines[0] or action.pattern[0] != lines[3]:
raise ValueError("%s is wrong. The generated action has key(%s) and pattern(%s)"%(line, action.key,action.pattern[0]))
AllActions[action.key] = action
|
DMRookie/RoomAI
|
roomai/doudizhu/DouDiZhuPokerAction.py
|
Python
|
mit
| 7,614
|
from collections import ChainMap
from django.conf import settings
from django.core.urlresolvers import reverse_lazy
from django.shortcuts import render, redirect, resolve_url
from django.contrib import messages
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.contrib.auth.tokens import default_token_generator
from django.contrib.auth.decorators import login_required
from django.contrib.sites.models import get_current_site
from django.views.generic import FormView, TemplateView
from django.contrib.auth import login, logout
from django.utils.http import urlsafe_base64_encode, urlsafe_base64_decode, is_safe_url
from django.utils.encoding import force_bytes
from django.utils.decorators import method_decorator
from app.models.account import *
from app.forms.account import *
from app.views.edit import FormsetView
class SignUpView(FormsetView):
template_name = 'account/signup.html'
form_class = SignUpUserForm
formset_class = SignUpAddressFormSet
http_method_names = ['get', 'post']
success_url = reverse_lazy('account:signin')
success_message = 'We have sent you an activation email at <strong>%(email)s</strong>. Please follow the ' \
'instructions in the mail to <strong>activate</strong> your account.'
def form_valid(self, request, form, formset):
cleaned_data = dict()
for k, v in ChainMap(formset.cleaned_data, form.cleaned_data).items():
cleaned_data[k] = v
self.sign_up(request, **cleaned_data)
messages.success(request, self.success_message % {'email': cleaned_data['email']}, extra_tags='safe')
return super(SignUpView, self).form_valid(request, form, formset)
def sign_up(self, request, **cleaned_data):
cd = cleaned_data
# Attibutes of the User
email, password, first_name, last_name, phone_num, pickup_arrangements = cd['email'], cd[
'password1'], cd['first_name'], cd['last_name'], cd['phone_num'], cd['pickup_arrangements']
# Attributes of user's Address
apt_num, street, city, county, state, zip = cd['apt_num'], cd['street'], cd['city'], cd['county'], \
cd['state'], cd['zip']
user = RegistrationProfile.objects.create_inactive_user(get_current_site(request), **cleaned_data)
class SignInView(FormView):
template_name = 'account/signin.html'
form_class = SignInForm
http_method_names = ['get', 'post']
success_url = reverse_lazy('home')
def get(self, request, *args, **kwargs):
redirect_to = self.request.REQUEST.get(REDIRECT_FIELD_NAME, '')
form_class = self.get_form_class()
form = self.get_form(form_class)
kwargs = {'form': form, 'REDIRECT_FIELD_NAME': REDIRECT_FIELD_NAME, 'redirect_to': redirect_to}
return self.render_to_response(self.get_context_data(**kwargs))
def form_valid(self, form):
redirect_to = self.request.REQUEST.get(REDIRECT_FIELD_NAME, '')
# If redirection URL is un-safe redirect to default home
if is_safe_url(url=redirect_to, host=self.request.get_host()):
self.success_url = redirect_to
login(self.request, form.get_user())
return super(SignInView, self).form_valid(form)
def form_invalid(self, form):
return super(SignInView, self).form_invalid(form)
class SignOutView(FormView):
http_method_names = ['post']
success_url = reverse_lazy('account:signin')
success_message = 'You have signed out of your ToolShare account.'
def post(self, request):
logout(request)
messages.success(request, self.success_message)
return redirect(self.success_url)
class ActivateAccountView(TemplateView):
http_method_names = ['get']
success_url = reverse_lazy('account:signin')
success_message = 'Your ToolShare account is now ready to use. Please <strong>sign in</strong> to continue.'
failure_url = reverse_lazy('account:signup')
failure_message = 'It appears that the activation link is no longer valid. Please <strong>sign up</strong> for a ' \
'new account.'
def get(self, request, activation_key):
if RegistrationProfile.objects.activate_user(activation_key):
messages.success(request, self.success_message, extra_tags='safe')
return redirect(self.success_url)
messages.error(request, self.failure_message, extra_tags='safe')
return redirect(self.failure_url)
class RecoverAccountView(FormView):
template_name = 'account/recover.html'
success_url = reverse_lazy('account:signin')
success_message = 'We have sent an email with instructions to <strong>reset</strong> the password on your ' \
'ToolShare account.'
form_class = RecoverAccountForm
http_method_names = ['get', 'post']
def form_valid(self, form):
user = User.objects.get(email=form.cleaned_data['email'])
uidb64 = urlsafe_base64_encode(force_bytes(user.pk))
token = default_token_generator.make_token(user)
form.save(get_current_site(self.request), user, uidb64, token)
messages.success(self.request, self.success_message, extra_tags='safe')
return super(RecoverAccountView, self).form_valid(form)
class ResetAccountView(FormView):
template_name = 'account/reset.html'
success_url = reverse_lazy('account:signin')
success_message = 'The password on your ToolShare account was successfully reset. Please <strong>sign in' \
'</strong> with your new password.'
failure_url = reverse_lazy('account:recover')
failure_message = 'It appears that the URL you used to recover the account is no longer valid. Please try to ' \
'reset your password again.'
form_class = ResetAccountForm
http_method_names = ['get', 'post']
def get(self, request, uidb64, token):
uid = urlsafe_base64_decode(uidb64)
user = User.objects.get(pk=uid)
if not default_token_generator.check_token(user, token):
messages.error(self.request, self.failure_message, extra_tags='safe')
return redirect(self.failure_url)
return super(ResetAccountView, self).get(request)
def post(self, request, uidb64, token):
uid = urlsafe_base64_decode(uidb64)
user = User.objects.get(pk=uid)
form_class = self.get_form_class()
form = self.get_form(form_class)
if form.is_valid():
form.save(user)
messages.success(self.request, self.success_message, extra_tags='safe')
return super(ResetAccountView, self).form_valid(form)
return super(ResetAccountView, self).form_invalid(form)
class UpdateAccountView(FormsetView):
template_name = 'account/update.html'
success_url = reverse_lazy('account:update')
success_message = 'Your ToolShare account was successfully updated.'
failure_message = \
'''
One or more reasons listed below is <strong>preventing</strong> the change from being saved:
<ul>
<li>You have <strong>borrowed tools</strong> in your possession that need to be returned.</li>
<li>You have <strong>tools in the community shed</strong> that you need to collect.</li>
<li>You have <strong>unresolved future reservations</strong>.</li>
</ul>
'''
form_class = UpdateUserForm
formset_class = UpdateAddressFormSet
def get(self, request, *args, **kwargs):
form_class = self.get_form_class()
form = form_class(instance=request.user)
formset_class = self.get_formset_class()
formset = formset_class(instance=request.user.address)
return render(request, self.template_name, {'form': form, 'formset': formset})
def post(self, request, *args, **kwargs):
prev_sz = request.user.share_zone
form_class = self.get_form_class()
form = form_class(request.POST, instance=request.user)
formset_class = self.get_formset_class()
formset = formset_class(request.POST, instance=request.user.address)
if form.is_valid() and formset.is_valid():
if self.is_relocation_allowed(prev_sz, formset.instance.share_zone, request.user):
return self.form_valid(request, form, formset)
else:
messages.error(request, self.failure_message % {'r': reverse_lazy('reservation')}, extra_tags='safe')
return self.form_invalid(request, form, formset)
else:
return self.form_invalid(request, form, formset)
def form_valid(self, request, form, formset):
form.save()
formset.save()
messages.success(request, self.success_message)
return super(UpdateAccountView, self).form_valid(request, form, formset)
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
return super(UpdateAccountView, self).dispatch(request, *args, **kwargs)
def is_relocation_allowed(self, prev_sz, curr_sz, user):
if prev_sz != curr_sz and not user.is_ready_to_move():
return False
return True
class ChangePasswordView(FormView):
template_name = 'account/change_password.html'
success_url = reverse_lazy('account:update')
success_message = 'Password updated successfully.'
form_class = ChangePasswordForm
def get_form(self, form_class):
return form_class(user=self.request.user, **self.get_form_kwargs())
def form_valid(self, form):
form.save()
messages.success(self.request, self.success_message)
return super(ChangePasswordView, self).form_valid(form)
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
return super(ChangePasswordView, self).dispatch(request, *args, **kwargs)
|
nm6061/toolshare
|
app/views/account.py
|
Python
|
mit
| 9,912
|
# -*- coding: utf-8 -*-
from openpyxl import load_workbook
from libs.xlsxInterface import getLevelTypes, travesalLevels
import models
from prettyprint import pp
from functools import partial
from django.db import transaction
def generateLevels(levelDict, levelTypes, cellAndValues):
currentDict = levelDict
parentLevel = None
for index, (cell, value) in zip(range(len(cellAndValues)), cellAndValues):
levelType = levelTypes[index]
if value not in currentDict:
currentDict[value] = {}
level = models.Level()
level.title = value
currentDict[value]['levelObject'] = level
if parentLevel != None:
level.parent = parentLevel
else:
pass
level.levelType = levelType
level.save()
parentLevel = level
else:
parentLevel = currentDict[value]['levelObject']
currentDict = currentDict[value]
pass
@transaction.atomic
def generateLevelsFromExcelFile(xlsxFile, **args):
ltlt = args.pop('leftTopLevelTypeCell')
ltrb = args.pop('rightBottomLevelTypeCell')
lt = args.pop('leftTopLevelCell')
rb = args.pop('rightBottomLevelCell')
ws = load_workbook(xlsxFile, read_only = False).worksheets[0]
levelTypes = getLevelTypes(ws, ltlt, ltrb)
levelTypes = map(lambda x: models.LevelType(x), levelTypes)
levelDict = {}
travesalLevels(ws, lt, rb, partial(generateLevels, levelDict, levelTypes))
for levelType in levelTypes:
levelType.save()
pass
return levelDict
@transaction.atomic
def test():
# levelDict = generateLevelsFromExcelFile('test.xlsx',
# leftTopLevelTypeCell = 'B6',
# rightBottomLevelTypeCell = 'D7',
# leftTopLevelCell = 'B8',
# rightBottomLevelCell = 'D419')
# levels = models.Level.objects.all()
traversalLevels()
pass
@transaction.atomic
def clearAll():
map(lambda x: x.delete(), models.Level.objects.all())
map(lambda x: x.delete(), models.LevelType.objects.all())
def traversalLevels():
roots = models.Level.objects.filter(parent = None)
tableTemplate = u"""
<table>
<tbody>
{content}
</tbody>
</table>
"""
content = u""
for level in roots:
content += u"<TR>" + makeLevelsTable(level)[1]
print tableTemplate.format(content = content)
def makeLevelsTable(level):
children = models.Level.objects.filter(parent = level)
rowSpan = len(children)
if rowSpan == 0:
return (1, u"<td>{title}</td></tr>\n".format(title = level.title))
rowSpan = 0
ret = u"<TD rowspan='{{rowspan}}'>{title}</TD>".format(title = level.title)
for i, child in zip(range(len(children)), children):
childRowCount, childRows = makeLevelsTable(child)
rowSpan += childRowCount
if i == 0:
ret += childRows
else:
ret += u"<tr>" + childRows
ret = ret.format(rowspan = rowSpan)
return (rowSpan, ret)
def createTestObject(**args):
title = args.pop('title')
description = args.pop('description', u'')
testObject = models.TestObject.objects.create()
testObject.title = title
testObject.description = description
testObject.save()
return testObject
def getAllTestObjects():
return models.TestObject.objects.all()
def getAllTestLevelTypes():
return models.LevelType.objects.all()
def addLevelTypeToTestObject(levelTypeId, testObjectId):
try:
lt = models.LevelType.objects.get(id = levelTypeId)
to = models.TestObject.objects.get(id = testObjectId)
to.add(lt)
except Exception, e:
print e
raise
|
qinggeng/ceShiGuanLiXiTong
|
site/ceShiGuanLiSite/apps/testManage/utils.py
|
Python
|
mit
| 3,685
|
#!/usr/bin/env python3
import time
import random
import socket
from flask import Flask, render_template, redirect, url_for, request, jsonify
import config
log = None
# classes
class Agent():
def __init__(self, ip, cw=True, node=None, state='initial'):
self.ip = ip
self.cw = cw
self.state = state
self.node = node
def __repr__(self):
return 'Agent: ip {}, direction CW: {}, state: {}, node: {}'.format(self.ip, self.cw, self.state, self.node)
class Node():
def __init__(self, label):
assert isinstance(label, int), 'Node constructor accepts numeric label only'
self.label = label
# list of agent ips in the current node
self.agents = []
def add_agent(self, agent_ip):
# add an agent ip to the list of agents in the current node
self.agents.append(agent_ip)
def __repr__(self):
return '<Node {}: [{}]>'.format(self.label, ' | '.join(str(app.agents[ip]) for ip in self.agents))
class Ring():
def __init__(self, n_nodes):
self._nodes = [Node(i) for i in range(n_nodes)]
self.n_nodes = n_nodes
def get_node(self, label):
return self._nodes[label]
def next(self, agent):
"""Return next node."""
i = 1 if agent.cw else -1
return self._nodes[(agent.node+i) % self.n_nodes]
def prev(self, agent):
"""Return prev node."""
i = -1 if agent.cw else 1
return self._nodes[(agent.node+i) % self.n_nodes]
def blocked(self, agent):
"""Check if the next node is blocked."""
next_node = self.next(agent)
if agent.ip == app.malicious_ip:
return len(next_node.agents) > 0
else:
return app.malicious_ip in next_node.agents
def random_place_agents(self):
"""Randomly place agents in the ring."""
#a = app.agents[app.agents_ips[0]]
#a.node = 1
#self.get_node(1).add_agent(a.ip)
#a.cw = False
#a = app.agents[app.agents_ips[1]]
#a.node = 2
#self.get_node(2).add_agent(a.ip)
#a.cw = False
#a = app.agents[app.agents_ips[2]]
#a.node = 4
#self.get_node(4).add_agent(a.ip)
#a.cw = True
#a = app.agents[app.malicious_ip]
#a.node = 6
#self.get_node(6).add_agent(a.ip)
#a.cw = True
# True = clockwise
# False = counterclockwise
a = app.agents[app.agents_ips[0]]
a.node = 3
self.get_node(3).add_agent(a.ip)
a.cw = False
a = app.agents[app.agents_ips[1]]
a.node = 6
self.get_node(6).add_agent(a.ip)
a.cw = False
a = app.agents[app.agents_ips[2]]
a.node = 5
self.get_node(5).add_agent(a.ip)
a.cw = True
a = app.agents[app.malicious_ip]
a.node = 1
self.get_node(1).add_agent(a.ip)
a.cw = False
return
# at most 1 agent per node, randomize direction in case of unoriented ring
for agent, node in zip(app.agents.values(), random.sample(self._nodes, len(app.agents.keys()))):
agent.cw = True if config.oriented else random.choice([True, False])
agent.node = node.label
self.get_node(node.label).add_agent(agent.ip)
def dump(self):
ring = dict()
for node in self._nodes:
ring[str(node.label)] = [(app.agents[a].ip, str(app.agents[a].cw), app.agents[a].state, app.agents[a].node) for a in node.agents]
return ring
def __repr__(self):
return ', '.join(str(node) for node in self._nodes)
class MTFGRServer(Flask):
'''Wrapper around the Flask class used to store additional information.'''
def __init__(self, *args, **kwargs):
super(MTFGRServer, self).__init__(*args, **kwargs)
self.ring = Ring(config.n_nodes)
self.agents_ips = config.agents_ips
self.agents = dict()
self.malicious_ip = config.malicious_ip
self.oriented = config.oriented
self.started = False
# instance of the web application
app = MTFGRServer(__name__)
# auxiliary functions
def _reset():
"""Reset the global variables by parsing again the config file."""
import config
global log
app.ring = Ring(config.n_nodes)
app.agents = {ip: Agent(ip) for ip in config.agents_ips}
app.malicious_ip = config.malicious_ip
app.agents[app.malicious_ip] = Agent(app.malicious_ip, state='malicious')
app.oriented = config.oriented
app.started = False
app.ring.random_place_agents()
log = open('/tmp/ev3.log', 'a')
log.write('\n\nIIIIIIIIIINNNNNNNNNIIIIIIIIIIITTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTT\\n\n')
# views
def _communicate_start():
"""Instruct each bot to start."""
port = 31337
for ip in app.agents_ips[::-1] + [app.malicious_ip]:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((ip, port))
# s.sendall(b'Go!\n')
s.close()
@app.route('/start')
def start():
app.started = True
try:
_communicate_start()
except Exception:
pass
return redirect(url_for('index'))
@app.route('/reset')
def reset():
_reset()
return redirect(url_for('index'))
@app.route('/status')
def global_status():
"""Get the whole ring status."""
return jsonify(**app.ring.dump())
@app.route('/get/<agent_ip>')
def get_status(agent_ip):
"""Get the list of agents in the current node."""
agent = app.agents[agent_ip]
# aggiungere blocked
return jsonify(agents=[app.agents[ip].state for ip in app.ring.get_node(agent.node).agents if ip != agent_ip],
blocked=app.ring.blocked(agent))
@app.route('/set/<agent_ip>', methods=['GET'])
def set_status(agent_ip):
global log
turned = request.args.get('turned') == '1'
state = request.args.get('state')
stopped = request.args.get('stopped') == '1'
# logging
sss = '\n\n[Request] {} - ip: {}, turned: {}, state: {}, stopped: {}\n'.format(time.time(), agent_ip, turned, state, stopped)
log.write(sss)
log.write('[Status pre]\n')
log.write(str(app.ring.dump()))
agent = app.agents[agent_ip]
agent.state = state
agent.cw = agent.cw if not turned else not agent.cw
blocked = app.ring.blocked(agent)
if not blocked and not stopped:
# advance to the next node if not blocked
node = app.ring.get_node(agent.node)
next_node = app.ring.next(agent)
agent.node = next_node.label
node.agents.remove(agent_ip)
next_node.add_agent(agent_ip)
log.write('\n[Status post]\n')
log.write(str(app.ring.dump()))
return jsonify(blocked=blocked)
@app.route('/')
def index():
return render_template('base.html', started=app.started)
def main():
app.run(host='0.0.0.0', debug=config.debug)
if __name__ == '__main__':
main()
|
secgroup/MTFGatheRing
|
code/web.py
|
Python
|
mit
| 7,006
|
import json
import logging
from math import ceil
import os
import click
import cligj
from .helpers import resolve_inout
from . import options
import rasterio
from rasterio.errors import CRSError
from rasterio.transform import Affine
from rasterio.coords import disjoint_bounds
logger = logging.getLogger('rio')
# Common options used below
# Unlike the version in cligj, this one doesn't require values.
files_inout_arg = click.argument(
'files',
nargs=-1,
type=click.Path(resolve_path=True),
metavar="INPUTS... OUTPUT")
@click.command(short_help='Rasterize features.')
@files_inout_arg
@options.output_opt
@cligj.format_opt
@options.like_file_opt
@options.bounds_opt
@options.dimensions_opt
@options.resolution_opt
@click.option('--src-crs', '--src_crs', 'src_crs', default=None,
help='Source coordinate reference system. Limited to EPSG '
'codes for now. Used as output coordinate system if output '
'does not exist or --like option is not used. '
'Default: EPSG:4326')
@options.all_touched_opt
@click.option('--default-value', '--default_value', 'default_value',
type=float, default=1, help='Default value for rasterized pixels')
@click.option('--fill', type=float, default=0,
help='Fill value for all pixels not overlapping features. Will '
'be evaluated as NoData pixels for output. Default: 0')
@click.option('--property', 'prop', type=str, default=None, help='Property in '
'GeoJSON features to use for rasterized values. Any features '
'that lack this property will be given --default_value instead.')
@options.force_overwrite_opt
@options.creation_options
@click.pass_context
def rasterize(
ctx,
files,
output,
driver,
like,
bounds,
dimensions,
res,
src_crs,
all_touched,
default_value,
fill,
prop,
force_overwrite,
creation_options):
"""Rasterize GeoJSON into a new or existing raster.
If the output raster exists, rio-rasterize will rasterize feature values
into all bands of that raster. The GeoJSON is assumed to be in the same
coordinate reference system as the output unless --src-crs is provided.
--default_value or property values when using --property must be using a
data type valid for the data type of that raster.
If a template raster is provided using the --like option, the affine
transform and data type from that raster will be used to create the output.
Only a single band will be output.
The GeoJSON is assumed to be in the same coordinate reference system unless
--src-crs is provided.
--default_value or property values when using --property must be using a
data type valid for the data type of that raster.
--driver, --bounds, --dimensions, and --res are ignored when output exists
or --like raster is provided
If the output does not exist and --like raster is not provided, the input
GeoJSON will be used to determine the bounds of the output unless
provided using --bounds.
--dimensions or --res are required in this case.
If --res is provided, the bottom and right coordinates of bounds are
ignored.
Note:
The GeoJSON is not projected to match the coordinate reference system
of the output or --like rasters at this time. This functionality may be
added in the future.
"""
from rasterio.crs import CRS
from rasterio.features import rasterize
from rasterio.features import bounds as calculate_bounds
verbosity = (ctx.obj and ctx.obj.get('verbosity')) or 1
output, files = resolve_inout(
files=files, output=output, force_overwrite=force_overwrite)
bad_param = click.BadParameter('invalid CRS. Must be an EPSG code.',
ctx, param=src_crs, param_hint='--src_crs')
has_src_crs = src_crs is not None
try:
src_crs = CRS.from_string(src_crs) if has_src_crs else CRS.from_string('EPSG:4326')
except CRSError:
raise bad_param
# If values are actually meant to be integers, we need to cast them
# as such or rasterize creates floating point outputs
if default_value == int(default_value):
default_value = int(default_value)
if fill == int(fill):
fill = int(fill)
with rasterio.Env(CPL_DEBUG=verbosity > 2):
def feature_value(feature):
if prop and 'properties' in feature:
return feature['properties'].get(prop, default_value)
return default_value
with click.open_file(files.pop(0) if files else '-') as gj_f:
geojson = json.loads(gj_f.read())
if 'features' in geojson:
geometries = []
for f in geojson['features']:
geometries.append((f['geometry'], feature_value(f)))
elif 'geometry' in geojson:
geometries = ((geojson['geometry'], feature_value(geojson)), )
else:
raise click.BadParameter('Invalid GeoJSON', param=input,
param_hint='input')
geojson_bounds = geojson.get('bbox', calculate_bounds(geojson))
if os.path.exists(output):
with rasterio.open(output, 'r+') as out:
if has_src_crs and src_crs != out.crs:
raise click.BadParameter('GeoJSON does not match crs of '
'existing output raster',
param='input', param_hint='input')
if disjoint_bounds(geojson_bounds, out.bounds):
click.echo("GeoJSON outside bounds of existing output "
"raster. Are they in different coordinate "
"reference systems?",
err=True)
meta = out.meta.copy()
result = rasterize(
geometries,
out_shape=(meta['height'], meta['width']),
transform=meta.get('affine', meta['transform']),
all_touched=all_touched,
dtype=meta.get('dtype', None),
default_value=default_value,
fill=fill)
for bidx in range(1, meta['count'] + 1):
data = out.read(bidx, masked=True)
# Burn in any non-fill pixels, and update mask accordingly
ne = result != fill
data[ne] = result[ne]
data.mask[ne] = False
out.write(data, indexes=bidx)
else:
if like is not None:
template_ds = rasterio.open(like)
if has_src_crs and src_crs != template_ds.crs:
raise click.BadParameter('GeoJSON does not match crs of '
'--like raster',
param='input', param_hint='input')
if disjoint_bounds(geojson_bounds, template_ds.bounds):
click.echo("GeoJSON outside bounds of --like raster. "
"Are they in different coordinate reference "
"systems?",
err=True)
kwargs = template_ds.meta.copy()
kwargs['count'] = 1
# DEPRECATED
# upgrade transform to affine object or we may get an invalid
# transform set on output
kwargs['transform'] = template_ds.affine
template_ds.close()
else:
bounds = bounds or geojson_bounds
if src_crs.is_geographic:
if (bounds[0] < -180 or bounds[2] > 180 or
bounds[1] < -80 or bounds[3] > 80):
raise click.BadParameter(
"Bounds are beyond the valid extent for "
"EPSG:4326.",
ctx, param=bounds, param_hint='--bounds')
if dimensions:
width, height = dimensions
res = (
(bounds[2] - bounds[0]) / float(width),
(bounds[3] - bounds[1]) / float(height)
)
else:
if not res:
raise click.BadParameter(
'pixel dimensions are required',
ctx, param=res, param_hint='--res')
elif len(res) == 1:
res = (res[0], res[0])
width = max(int(ceil((bounds[2] - bounds[0]) /
float(res[0]))), 1)
height = max(int(ceil((bounds[3] - bounds[1]) /
float(res[1]))), 1)
kwargs = {
'count': 1,
'crs': src_crs,
'width': width,
'height': height,
'transform': Affine(res[0], 0, bounds[0], 0, -res[1],
bounds[3]),
'driver': driver
}
kwargs.update(**creation_options)
result = rasterize(
geometries,
out_shape=(kwargs['height'], kwargs['width']),
transform=kwargs.get('affine', kwargs['transform']),
all_touched=all_touched,
dtype=kwargs.get('dtype', None),
default_value=default_value,
fill=fill)
if 'dtype' not in kwargs:
kwargs['dtype'] = result.dtype
kwargs['nodata'] = fill
with rasterio.open(output, 'w', **kwargs) as out:
out.write(result, indexes=1)
|
ryfeus/lambda-packs
|
Rasterio_osgeo_shapely_PIL_pyproj_numpy/source/rasterio/rio/rasterize.py
|
Python
|
mit
| 10,025
|
# My computer was failing to recognize wifi networks after being woken up from sleep so this uses the network manager command
# line tool to force my computer to recognize the network I type in to the terminal.
import subprocess
network_name = raw_input("What is the name of your network? ")
subprocess.check_call(['nmcli', 'c', 'up', 'id', network_name])
|
caryben/Ubuntu-bug-fixes
|
hidden_network_workaround.py
|
Python
|
mit
| 357
|
#Evaluate semantic space against MEN dataset
import sys
import utils
from scipy import stats
import numpy as np
from math import sqrt
#Note: this is scipy's spearman, without tie adjustment
def spearman(x,y):
return stats.spearmanr(x, y)[0]
def readMEN(annotation_file):
pairs=[]
humans=[]
f=open(annotation_file,'r')
for l in f:
l=l.rstrip('\n')
items=l.split()
pairs.append((items[0],items[1]))
humans.append(float(items[2]))
f.close()
return pairs, humans
def compute_men_spearman(dm_dict, annotation_file):
pairs, humans=readMEN(annotation_file)
system_actual=[]
human_actual=[]
count=0
for i in range(len(pairs)):
human=humans[i]
a,b=pairs[i]
if a in dm_dict and b in dm_dict:
cos=utils.cosine_similarity(dm_dict[a],dm_dict[b])
system_actual.append(cos)
human_actual.append(human)
count+=1
sp = spearman(human_actual,system_actual)
return sp,count
|
minimalparts/Tutorials
|
FruitFly/MEN.py
|
Python
|
mit
| 989
|
import unittest
from katas.beta.plus_1_array import up_array
class UpArrayTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(up_array([2, 3, 9]), [2, 4, 0])
def test_equals_2(self):
self.assertEqual(up_array([4, 3, 2, 5]), [4, 3, 2, 6])
def test_none(self):
self.assertIsNone(up_array([1, -9]))
def test_none_2(self):
self.assertIsNone(up_array([]))
|
the-zebulan/CodeWars
|
tests/beta_tests/test_plus_1_array.py
|
Python
|
mit
| 422
|
###############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) Tavendo GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
from __future__ import absolute_import
from base64 import b64encode, b64decode
from zope.interface import implementer
import twisted.internet.protocol
from twisted.internet.defer import maybeDeferred
from twisted.internet.interfaces import ITransport
from twisted.internet.error import ConnectionDone, ConnectionAborted, \
ConnectionLost
from autobahn.wamp import websocket
from autobahn.websocket import protocol
from autobahn.websocket import http
from autobahn.twisted.util import peer2str
from autobahn.logger import make_logger
from autobahn.websocket.compress import PerMessageDeflateOffer, \
PerMessageDeflateOfferAccept, \
PerMessageDeflateResponse, \
PerMessageDeflateResponseAccept
__all__ = (
'WebSocketAdapterProtocol',
'WebSocketServerProtocol',
'WebSocketClientProtocol',
'WebSocketAdapterFactory',
'WebSocketServerFactory',
'WebSocketClientFactory',
'WrappingWebSocketAdapter',
'WrappingWebSocketServerProtocol',
'WrappingWebSocketClientProtocol',
'WrappingWebSocketServerFactory',
'WrappingWebSocketClientFactory',
'listenWS',
'connectWS',
'WampWebSocketServerProtocol',
'WampWebSocketServerFactory',
'WampWebSocketClientProtocol',
'WampWebSocketClientFactory',
)
class WebSocketAdapterProtocol(twisted.internet.protocol.Protocol):
"""
Adapter class for Twisted WebSocket client and server protocols.
"""
peer = '<never connected>'
def connectionMade(self):
# the peer we are connected to
try:
peer = self.transport.getPeer()
except AttributeError:
# ProcessProtocols lack getPeer()
self.peer = "process {}".format(self.transport.pid)
else:
self.peer = peer2str(peer)
self._connectionMade()
# Set "Nagle"
try:
self.transport.setTcpNoDelay(self.tcpNoDelay)
except: # don't touch this! does not work: AttributeError, OSError
# eg Unix Domain sockets throw Errno 22 on this
pass
def connectionLost(self, reason):
if isinstance(reason.value, ConnectionDone):
self.factory.log.debug("Connection to/from {peer} was closed cleanly",
peer=self.peer)
elif isinstance(reason.value, ConnectionAborted):
self.factory.log.debug("Connection to/from {peer} was aborted locally",
peer=self.peer)
elif isinstance(reason.value, ConnectionLost):
# The following is ridiculous, but the treatment of reason.value.args
# across py2/3 and tx and over various corner cases is deeply fucked up.
if hasattr(reason.value, 'message'):
message = reason.value.message
elif hasattr(reason.value, 'args') and type(reason.value.args) == tuple and len(reason.value.args) > 0:
message = reason.value.args[0]
else:
message = None
if message:
self.factory.log.debug("Connection to/from {peer} was lost in a non-clean fashion: {message}",
peer=self.peer, message=message)
else:
self.factory.log.debug("Connection to/from {peer} was lost in a non-clean fashion",
peer=self.peer)
# at least: FileDescriptorOverrun, ConnectionFdescWentAway - but maybe others as well?
else:
self.factory.log.info("Connection to/from {peer} lost ({error_type}): {error})",
peer=self.peer, error_type=type(reason.value), error=reason.value)
self._connectionLost(reason)
def dataReceived(self, data):
self._dataReceived(data)
def _closeConnection(self, abort=False):
if abort and hasattr(self.transport, 'abortConnection'):
self.transport.abortConnection()
else:
# e.g. ProcessProtocol lacks abortConnection()
self.transport.loseConnection()
def _onOpen(self):
self.onOpen()
def _onMessageBegin(self, isBinary):
self.onMessageBegin(isBinary)
def _onMessageFrameBegin(self, length):
self.onMessageFrameBegin(length)
def _onMessageFrameData(self, payload):
self.onMessageFrameData(payload)
def _onMessageFrameEnd(self):
self.onMessageFrameEnd()
def _onMessageFrame(self, payload):
self.onMessageFrame(payload)
def _onMessageEnd(self):
self.onMessageEnd()
def _onMessage(self, payload, isBinary):
self.onMessage(payload, isBinary)
def _onPing(self, payload):
self.onPing(payload)
def _onPong(self, payload):
self.onPong(payload)
def _onClose(self, wasClean, code, reason):
self.onClose(wasClean, code, reason)
def registerProducer(self, producer, streaming):
"""
Register a Twisted producer with this protocol.
Modes: Hybi, Hixie
:param producer: A Twisted push or pull producer.
:type producer: object
:param streaming: Producer type.
:type streaming: bool
"""
self.transport.registerProducer(producer, streaming)
class WebSocketServerProtocol(WebSocketAdapterProtocol, protocol.WebSocketServerProtocol):
"""
Base class for Twisted-based WebSocket server protocols.
"""
def _onConnect(self, request):
# onConnect() will return the selected subprotocol or None
# or a pair (protocol, headers) or raise an HttpException
res = maybeDeferred(self.onConnect, request)
res.addCallback(self.succeedHandshake)
def forwardError(failure):
if failure.check(http.HttpException):
return self.failHandshake(failure.value.reason, failure.value.code)
else:
if self.debug:
self.factory._log("Unexpected exception in onConnect ['%s']" % failure.value)
return self.failHandshake(http.INTERNAL_SERVER_ERROR[1], http.INTERNAL_SERVER_ERROR[0])
res.addErrback(forwardError)
class WebSocketClientProtocol(WebSocketAdapterProtocol, protocol.WebSocketClientProtocol):
"""
Base class for Twisted-based WebSocket client protocols.
"""
def _onConnect(self, response):
self.onConnect(response)
class WebSocketAdapterFactory(object):
"""
Adapter class for Twisted-based WebSocket client and server factories.
"""
log = make_logger("twisted")
# we deliberately subclass t.i.p.Factory, not t.i.p.ServerFactory. See https://github.com/tavendo/AutobahnPython/issues/389
class WebSocketServerFactory(WebSocketAdapterFactory, protocol.WebSocketServerFactory, twisted.internet.protocol.Factory):
"""
Base class for Twisted-based WebSocket server factories.
"""
def __init__(self, *args, **kwargs):
"""
In addition to all arguments to the constructor of
:class:`autobahn.websocket.protocol.WebSocketServerFactory`,
you can supply a `reactor` keyword argument to specify the
Twisted reactor to be used.
"""
# lazy import to avoid reactor install upon module import
reactor = kwargs.pop('reactor', None)
if reactor is None:
from twisted.internet import reactor
self.reactor = reactor
protocol.WebSocketServerFactory.__init__(self, *args, **kwargs)
# we deliberately subclass t.i.p.Factory, not t.i.p.ClientFactory. See https://github.com/tavendo/AutobahnPython/issues/389
class WebSocketClientFactory(WebSocketAdapterFactory, protocol.WebSocketClientFactory, twisted.internet.protocol.Factory):
"""
Base class for Twisted-based WebSocket client factories.
"""
def __init__(self, *args, **kwargs):
"""
In addition to all arguments to the constructor of
:class:`autobahn.websocket.protocol.WebSocketClientFactory`,
you can supply a `reactor` keyword argument to specify the
Twisted reactor to be used.
"""
# lazy import to avoid reactor install upon module import
reactor = kwargs.pop('reactor', None)
if reactor is None:
from twisted.internet import reactor
self.reactor = reactor
protocol.WebSocketClientFactory.__init__(self, *args, **kwargs)
@implementer(ITransport)
class WrappingWebSocketAdapter(object):
"""
An adapter for stream-based transport over WebSocket.
This follows `websockify <https://github.com/kanaka/websockify>`_
and should be compatible with that.
It uses WebSocket subprotocol negotiation and supports the
following WebSocket subprotocols:
- ``binary`` (or a compatible subprotocol)
- ``base64``
Octets are either transmitted as the payload of WebSocket binary
messages when using the ``binary`` subprotocol (or an alternative
binary compatible subprotocol), or encoded with Base64 and then
transmitted as the payload of WebSocket text messages when using
the ``base64`` subprotocol.
"""
def onConnect(self, requestOrResponse):
# Negotiate either the 'binary' or the 'base64' WebSocket subprotocol
if isinstance(requestOrResponse, protocol.ConnectionRequest):
request = requestOrResponse
for p in request.protocols:
if p in self.factory._subprotocols:
self._binaryMode = (p != 'base64')
return p
raise http.HttpException(http.NOT_ACCEPTABLE[0], "this server only speaks %s WebSocket subprotocols" % self.factory._subprotocols)
elif isinstance(requestOrResponse, protocol.ConnectionResponse):
response = requestOrResponse
if response.protocol not in self.factory._subprotocols:
self.failConnection(protocol.WebSocketProtocol.CLOSE_STATUS_CODE_PROTOCOL_ERROR, "this client only speaks %s WebSocket subprotocols" % self.factory._subprotocols)
self._binaryMode = (response.protocol != 'base64')
else:
# should not arrive here
raise Exception("logic error")
def onOpen(self):
self._proto.connectionMade()
def onMessage(self, payload, isBinary):
if isBinary != self._binaryMode:
self.failConnection(protocol.WebSocketProtocol.CLOSE_STATUS_CODE_UNSUPPORTED_DATA, "message payload type does not match the negotiated subprotocol")
else:
if not isBinary:
try:
payload = b64decode(payload)
except Exception as e:
self.failConnection(protocol.WebSocketProtocol.CLOSE_STATUS_CODE_INVALID_PAYLOAD, "message payload base64 decoding error: {0}".format(e))
# print("forwarding payload: {0}".format(binascii.hexlify(payload)))
self._proto.dataReceived(payload)
# noinspection PyUnusedLocal
def onClose(self, wasClean, code, reason):
self._proto.connectionLost(None)
def write(self, data):
# print("sending payload: {0}".format(binascii.hexlify(data)))
# part of ITransport
assert(type(data) == bytes)
if self._binaryMode:
self.sendMessage(data, isBinary=True)
else:
data = b64encode(data)
self.sendMessage(data, isBinary=False)
def writeSequence(self, data):
# part of ITransport
for d in data:
self.write(d)
def loseConnection(self):
# part of ITransport
self.sendClose()
def getPeer(self):
# part of ITransport
return self.transport.getPeer()
def getHost(self):
# part of ITransport
return self.transport.getHost()
class WrappingWebSocketServerProtocol(WrappingWebSocketAdapter, WebSocketServerProtocol):
"""
Server protocol for stream-based transport over WebSocket.
"""
class WrappingWebSocketClientProtocol(WrappingWebSocketAdapter, WebSocketClientProtocol):
"""
Client protocol for stream-based transport over WebSocket.
"""
class WrappingWebSocketServerFactory(WebSocketServerFactory):
"""
Wrapping server factory for stream-based transport over WebSocket.
"""
def __init__(self,
factory,
url,
reactor=None,
enableCompression=True,
autoFragmentSize=0,
subprotocol=None,
debug=False):
"""
:param factory: Stream-based factory to be wrapped.
:type factory: A subclass of ``twisted.internet.protocol.Factory``
:param url: WebSocket URL of the server this server factory will work for.
:type url: unicode
"""
self._factory = factory
self._subprotocols = ['binary', 'base64']
if subprotocol:
self._subprotocols.append(subprotocol)
WebSocketServerFactory.__init__(self,
url=url,
reactor=reactor,
protocols=self._subprotocols,
debug=debug)
# automatically fragment outgoing traffic into WebSocket frames
# of this size
self.setProtocolOptions(autoFragmentSize=autoFragmentSize)
# play nice and perform WS closing handshake
self.setProtocolOptions(failByDrop=False)
if enableCompression:
# Enable WebSocket extension "permessage-deflate".
# Function to accept offers from the client ..
def accept(offers):
for offer in offers:
if isinstance(offer, PerMessageDeflateOffer):
return PerMessageDeflateOfferAccept(offer)
self.setProtocolOptions(perMessageCompressionAccept=accept)
def buildProtocol(self, addr):
proto = WrappingWebSocketServerProtocol()
proto.factory = self
proto._proto = self._factory.buildProtocol(addr)
proto._proto.transport = proto
return proto
def startFactory(self):
self._factory.startFactory()
WebSocketServerFactory.startFactory(self)
def stopFactory(self):
self._factory.stopFactory()
WebSocketServerFactory.stopFactory(self)
class WrappingWebSocketClientFactory(WebSocketClientFactory):
"""
Wrapping client factory for stream-based transport over WebSocket.
"""
def __init__(self,
factory,
url,
reactor=None,
enableCompression=True,
autoFragmentSize=0,
subprotocol=None,
debug=False):
"""
:param factory: Stream-based factory to be wrapped.
:type factory: A subclass of ``twisted.internet.protocol.Factory``
:param url: WebSocket URL of the server this client factory will connect to.
:type url: unicode
"""
self._factory = factory
self._subprotocols = ['binary', 'base64']
if subprotocol:
self._subprotocols.append(subprotocol)
WebSocketClientFactory.__init__(self,
url=url,
reactor=reactor,
protocols=self._subprotocols,
debug=debug)
# automatically fragment outgoing traffic into WebSocket frames
# of this size
self.setProtocolOptions(autoFragmentSize=autoFragmentSize)
# play nice and perform WS closing handshake
self.setProtocolOptions(failByDrop=False)
if enableCompression:
# Enable WebSocket extension "permessage-deflate".
# The extensions offered to the server ..
offers = [PerMessageDeflateOffer()]
self.setProtocolOptions(perMessageCompressionOffers=offers)
# Function to accept responses from the server ..
def accept(response):
if isinstance(response, PerMessageDeflateResponse):
return PerMessageDeflateResponseAccept(response)
self.setProtocolOptions(perMessageCompressionAccept=accept)
def buildProtocol(self, addr):
proto = WrappingWebSocketClientProtocol()
proto.factory = self
proto._proto = self._factory.buildProtocol(addr)
proto._proto.transport = proto
return proto
def connectWS(factory, contextFactory=None, timeout=30, bindAddress=None):
"""
Establish WebSocket connection to a server. The connection parameters like target
host, port, resource and others are provided via the factory.
:param factory: The WebSocket protocol factory to be used for creating client protocol instances.
:type factory: An :class:`autobahn.websocket.WebSocketClientFactory` instance.
:param contextFactory: SSL context factory, required for secure WebSocket connections ("wss").
:type contextFactory: A `twisted.internet.ssl.ClientContextFactory <http://twistedmatrix.com/documents/current/api/twisted.internet.ssl.ClientContextFactory.html>`_ instance.
:param timeout: Number of seconds to wait before assuming the connection has failed.
:type timeout: int
:param bindAddress: A (host, port) tuple of local address to bind to, or None.
:type bindAddress: tuple
:returns: The connector.
:rtype: An object which implements `twisted.interface.IConnector <http://twistedmatrix.com/documents/current/api/twisted.internet.interfaces.IConnector.html>`_.
"""
# lazy import to avoid reactor install upon module import
if hasattr(factory, 'reactor'):
reactor = factory.reactor
else:
from twisted.internet import reactor
if factory.proxy is not None:
if factory.isSecure:
raise Exception("WSS over explicit proxies not implemented")
else:
conn = reactor.connectTCP(factory.proxy['host'], factory.proxy['port'], factory, timeout, bindAddress)
else:
if factory.isSecure:
if contextFactory is None:
# create default client SSL context factory when none given
from twisted.internet import ssl
contextFactory = ssl.ClientContextFactory()
conn = reactor.connectSSL(factory.host, factory.port, factory, contextFactory, timeout, bindAddress)
else:
conn = reactor.connectTCP(factory.host, factory.port, factory, timeout, bindAddress)
return conn
def listenWS(factory, contextFactory=None, backlog=50, interface=''):
"""
Listen for incoming WebSocket connections from clients. The connection parameters like
listening port and others are provided via the factory.
:param factory: The WebSocket protocol factory to be used for creating server protocol instances.
:type factory: An :class:`autobahn.websocket.WebSocketServerFactory` instance.
:param contextFactory: SSL context factory, required for secure WebSocket connections ("wss").
:type contextFactory: A twisted.internet.ssl.ContextFactory.
:param backlog: Size of the listen queue.
:type backlog: int
:param interface: The interface (derived from hostname given) to bind to, defaults to '' (all).
:type interface: str
:returns: The listening port.
:rtype: An object that implements `twisted.interface.IListeningPort <http://twistedmatrix.com/documents/current/api/twisted.internet.interfaces.IListeningPort.html>`_.
"""
# lazy import to avoid reactor install upon module import
if hasattr(factory, 'reactor'):
reactor = factory.reactor
else:
from twisted.internet import reactor
if factory.isSecure:
if contextFactory is None:
raise Exception("Secure WebSocket listen requested, but no SSL context factory given")
listener = reactor.listenSSL(factory.port, factory, contextFactory, backlog, interface)
else:
listener = reactor.listenTCP(factory.port, factory, backlog, interface)
return listener
class WampWebSocketServerProtocol(websocket.WampWebSocketServerProtocol, WebSocketServerProtocol):
"""
Base class for Twisted-based WAMP-over-WebSocket server protocols.
"""
class WampWebSocketServerFactory(websocket.WampWebSocketServerFactory, WebSocketServerFactory):
"""
Base class for Twisted-based WAMP-over-WebSocket server factories.
"""
protocol = WampWebSocketServerProtocol
def __init__(self, factory, *args, **kwargs):
serializers = kwargs.pop('serializers', None)
debug_wamp = kwargs.pop('debug_wamp', False)
websocket.WampWebSocketServerFactory.__init__(self, factory, serializers, debug_wamp=debug_wamp)
kwargs['protocols'] = self._protocols
# noinspection PyCallByClass
WebSocketServerFactory.__init__(self, *args, **kwargs)
class WampWebSocketClientProtocol(websocket.WampWebSocketClientProtocol, WebSocketClientProtocol):
"""
Base class for Twisted-based WAMP-over-WebSocket client protocols.
"""
class WampWebSocketClientFactory(websocket.WampWebSocketClientFactory, WebSocketClientFactory):
"""
Base class for Twisted-based WAMP-over-WebSocket client factories.
"""
protocol = WampWebSocketClientProtocol
def __init__(self, factory, *args, **kwargs):
serializers = kwargs.pop('serializers', None)
debug_wamp = kwargs.pop('debug_wamp', False)
websocket.WampWebSocketClientFactory.__init__(self, factory, serializers, debug_wamp=debug_wamp)
kwargs['protocols'] = self._protocols
WebSocketClientFactory.__init__(self, *args, **kwargs)
|
inirudebwoy/AutobahnPython
|
autobahn/twisted/websocket.py
|
Python
|
mit
| 23,106
|
from flake8_quotes import QuoteChecker
import os
import subprocess
from unittest import TestCase
class TestChecks(TestCase):
def test_get_noqa_lines(self):
checker = QuoteChecker(None, filename=get_absolute_path('data/no_qa.py'))
self.assertEqual(checker.get_noqa_lines(checker.get_file_contents()), [2])
class TestFlake8Stdin(TestCase):
def test_stdin(self):
"""Test using stdin."""
filepath = get_absolute_path('data/doubles.py')
with open(filepath, 'rb') as f:
p = subprocess.Popen(['flake8', '--select=Q', '-'], stdin=f,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
stdout_lines = stdout.splitlines()
self.assertEqual(stderr, b'')
self.assertEqual(len(stdout_lines), 3)
self.assertRegex(
stdout_lines[0],
b'stdin:1:(24|25): Q000 Double quotes found but single quotes preferred')
self.assertRegex(
stdout_lines[1],
b'stdin:2:(24|25): Q000 Double quotes found but single quotes preferred')
self.assertRegex(
stdout_lines[2],
b'stdin:3:(24|25): Q000 Double quotes found but single quotes preferred')
class DoublesTestChecks(TestCase):
def setUp(self):
class DoublesOptions():
inline_quotes = "'"
multiline_quotes = "'"
QuoteChecker.parse_options(DoublesOptions)
def test_multiline_string(self):
doubles_checker = QuoteChecker(None, filename=get_absolute_path('data/doubles_multiline_string.py'))
self.assertEqual(list(doubles_checker.get_quotes_errors(doubles_checker.get_file_contents())), [
{'col': 4, 'line': 1, 'message': 'Q001 Double quote multiline found but single quotes preferred'},
])
def test_multiline_string_using_lines(self):
with open(get_absolute_path('data/doubles_multiline_string.py')) as f:
lines = f.readlines()
doubles_checker = QuoteChecker(None, lines=lines)
self.assertEqual(list(doubles_checker.get_quotes_errors(doubles_checker.get_file_contents())), [
{'col': 4, 'line': 1, 'message': 'Q001 Double quote multiline found but single quotes preferred'},
])
def test_wrapped(self):
doubles_checker = QuoteChecker(None, filename=get_absolute_path('data/doubles_wrapped.py'))
self.assertEqual(list(doubles_checker.get_quotes_errors(doubles_checker.get_file_contents())), [])
def test_doubles(self):
doubles_checker = QuoteChecker(None, filename=get_absolute_path('data/doubles.py'))
self.assertEqual(list(doubles_checker.get_quotes_errors(doubles_checker.get_file_contents())), [
{'col': 24, 'line': 1, 'message': 'Q000 Double quotes found but single quotes preferred'},
{'col': 24, 'line': 2, 'message': 'Q000 Double quotes found but single quotes preferred'},
{'col': 24, 'line': 3, 'message': 'Q000 Double quotes found but single quotes preferred'},
])
def test_noqa_doubles(self):
checker = QuoteChecker(None, get_absolute_path('data/doubles_noqa.py'))
self.assertEqual(list(checker.run()), [])
def test_escapes(self):
doubles_checker = QuoteChecker(None, filename=get_absolute_path('data/doubles_escaped.py'))
self.assertEqual(list(doubles_checker.get_quotes_errors(doubles_checker.get_file_contents())), [
{'col': 25, 'line': 1, 'message': 'Q003 Change outer quotes to avoid escaping inner quotes'},
])
def test_escapes_allowed(self):
class Options():
inline_quotes = "'"
avoid_escape = False
QuoteChecker.parse_options(Options)
doubles_checker = QuoteChecker(None, filename=get_absolute_path('data/doubles_escaped.py'))
self.assertEqual(list(doubles_checker.get_quotes_errors(doubles_checker.get_file_contents())), [])
class DoublesAliasTestChecks(TestCase):
def setUp(self):
class DoublesAliasOptions():
inline_quotes = 'single'
multiline_quotes = 'single'
QuoteChecker.parse_options(DoublesAliasOptions)
def test_doubles(self):
doubles_checker = QuoteChecker(None, filename=get_absolute_path('data/doubles_wrapped.py'))
self.assertEqual(list(doubles_checker.get_quotes_errors(doubles_checker.get_file_contents())), [])
doubles_checker = QuoteChecker(None, filename=get_absolute_path('data/doubles.py'))
self.assertEqual(list(doubles_checker.get_quotes_errors(doubles_checker.get_file_contents())), [
{'col': 24, 'line': 1, 'message': 'Q000 Double quotes found but single quotes preferred'},
{'col': 24, 'line': 2, 'message': 'Q000 Double quotes found but single quotes preferred'},
{'col': 24, 'line': 3, 'message': 'Q000 Double quotes found but single quotes preferred'},
])
class SinglesTestChecks(TestCase):
def setUp(self):
class SinglesOptions():
inline_quotes = '"'
multiline_quotes = '"'
QuoteChecker.parse_options(SinglesOptions)
def test_multiline_string(self):
singles_checker = QuoteChecker(None, filename=get_absolute_path('data/singles_multiline_string.py'))
self.assertEqual(list(singles_checker.get_quotes_errors(singles_checker.get_file_contents())), [
{'col': 4, 'line': 1, 'message': 'Q001 Single quote multiline found but double quotes preferred'},
])
def test_wrapped(self):
singles_checker = QuoteChecker(None, filename=get_absolute_path('data/singles_wrapped.py'))
self.assertEqual(list(singles_checker.get_quotes_errors(singles_checker.get_file_contents())), [])
def test_singles(self):
singles_checker = QuoteChecker(None, filename=get_absolute_path('data/singles.py'))
self.assertEqual(list(singles_checker.get_quotes_errors(singles_checker.get_file_contents())), [
{'col': 24, 'line': 1, 'message': 'Q000 Single quotes found but double quotes preferred'},
{'col': 24, 'line': 2, 'message': 'Q000 Single quotes found but double quotes preferred'},
{'col': 24, 'line': 3, 'message': 'Q000 Single quotes found but double quotes preferred'},
])
def test_noqa_singles(self):
checker = QuoteChecker(None, get_absolute_path('data/singles_noqa.py'))
self.assertEqual(list(checker.run()), [])
def test_escapes(self):
singles_checker = QuoteChecker(None, filename=get_absolute_path('data/singles_escaped.py'))
self.assertEqual(list(singles_checker.get_quotes_errors(singles_checker.get_file_contents())), [
{'col': 25, 'line': 1, 'message': 'Q003 Change outer quotes to avoid escaping inner quotes'},
])
def test_escapes_allowed(self):
class Options():
inline_quotes = '"'
avoid_escape = False
QuoteChecker.parse_options(Options)
singles_checker = QuoteChecker(None, filename=get_absolute_path('data/singles_escaped.py'))
self.assertEqual(list(singles_checker.get_quotes_errors(singles_checker.get_file_contents())), [])
class SinglesAliasTestChecks(TestCase):
def setUp(self):
class SinglesAliasOptions():
inline_quotes = 'double'
multiline_quotes = 'double'
QuoteChecker.parse_options(SinglesAliasOptions)
def test_singles(self):
singles_checker = QuoteChecker(None, filename=get_absolute_path('data/singles_wrapped.py'))
self.assertEqual(list(singles_checker.get_quotes_errors(singles_checker.get_file_contents())), [])
singles_checker = QuoteChecker(None, filename=get_absolute_path('data/singles.py'))
self.assertEqual(list(singles_checker.get_quotes_errors(singles_checker.get_file_contents())), [
{'col': 24, 'line': 1, 'message': 'Q000 Single quotes found but double quotes preferred'},
{'col': 24, 'line': 2, 'message': 'Q000 Single quotes found but double quotes preferred'},
{'col': 24, 'line': 3, 'message': 'Q000 Single quotes found but double quotes preferred'},
])
class MultilineTestChecks(TestCase):
def test_singles(self):
class Options():
inline_quotes = "'"
multiline_quotes = '"'
QuoteChecker.parse_options(Options)
multiline_checker = QuoteChecker(None, filename=get_absolute_path('data/multiline_string.py'))
self.assertEqual(list(multiline_checker.get_quotes_errors(multiline_checker.get_file_contents())), [
{'col': 4, 'line': 10, 'message': 'Q001 Single quote multiline found but double quotes preferred'},
])
def test_singles_alias(self):
class Options():
inline_quotes = 'single'
multiline_quotes = 'double'
QuoteChecker.parse_options(Options)
multiline_checker = QuoteChecker(None, filename=get_absolute_path('data/multiline_string.py'))
self.assertEqual(list(multiline_checker.get_quotes_errors(multiline_checker.get_file_contents())), [
{'col': 4, 'line': 10, 'message': 'Q001 Single quote multiline found but double quotes preferred'},
])
def test_doubles(self):
class Options():
inline_quotes = '"'
multiline_quotes = "'"
QuoteChecker.parse_options(Options)
multiline_checker = QuoteChecker(None, filename=get_absolute_path('data/multiline_string.py'))
self.assertEqual(list(multiline_checker.get_quotes_errors(multiline_checker.get_file_contents())), [
{'col': 4, 'line': 1, 'message': 'Q001 Double quote multiline found but single quotes preferred'},
])
def test_doubles_alias(self):
class Options():
inline_quotes = 'double'
multiline_quotes = 'single'
QuoteChecker.parse_options(Options)
multiline_checker = QuoteChecker(None, filename=get_absolute_path('data/multiline_string.py'))
self.assertEqual(list(multiline_checker.get_quotes_errors(multiline_checker.get_file_contents())), [
{'col': 4, 'line': 1, 'message': 'Q001 Double quote multiline found but single quotes preferred'},
])
def get_absolute_path(filepath):
return os.path.join(os.path.dirname(__file__), filepath)
|
zheller/flake8-quotes
|
test/test_checks.py
|
Python
|
mit
| 10,373
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
import xml.etree.ElementTree as ET
from pprint import pprint
filename = 'GeoLogger.gpx'
def main():
tree = ET.parse(filename)
root = tree.getroot()
pprint(root.tag)
pprint(root.attrib)
pprint(root.findtext('.'))
if __name__ == "__main__":
main()
|
TheShellLand/pies
|
v3/Libraries/xml/xml-parse.py
|
Python
|
mit
| 321
|
"""Converts image data to TFRecords file format with Example protos.
The image data set is expected to reside in JPEG files located in the
following directory structure.
data_dir/run/video1/00001.jpeg
data_dir/run/video1/00002.jpeg
data_dir/run/video1/00003.jpeg
...
data_dir/run/video2/00001.jpeg
data_dir/run/video2/00002.jpeg
...
where the sub-directory is the unique label associated with these images.
This TensorFlow script converts the training and evaluation data into
a sharded data set consisting of TFRecord files
train_directory/train-00000-of-01024
train_directory/train-00001-of-01024
...
train_directory/train-00127-of-01024
and
validation_directory/validation-00000-of-00128
validation_directory/validation-00001-of-00128
...
validation_directory/validation-00127-of-00128
where we have selected 64 and 8 shards for each data set. Each record
within the TFRecord file is a serialized Example proto. The Example proto
contains the following fields:
raw/image/001:
...
raw/image/nnn: string containing JPEG encoded image in RGB colorspace
image/height: integer, image height in pixels
image/width: integer, image width in pixels
image/colorspace: string, specifying the colorspace, always 'RGB'
image/channels: integer, specifying the number of channels, always 3
image/format: string, specifying the format, always'JPEG'
image/filename: string containing the basename of the image file
e.g. '00001.JPEG' or '00002.JPEG'
image/class/label: integer specifying the index in a classification layer.
The label ranges from [0, num_labels] where 0 is unused and left as
the background class.
image/class/text: string specifying the human-readable version of the label
e.g. 'walk'
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import os
import random
import sys
import threading
import numpy as np
import tensorflow as tf
tf.app.flags.DEFINE_string('train_directory', '/tmp/dataset/train_directory',
'Training data directory')
tf.app.flags.DEFINE_string('validation_directory', '/tmp/dataset/train_directory',
'Validation data directory')
tf.app.flags.DEFINE_string('output_directory', '/tmp/dataset/result',
'Output data directory')
tf.app.flags.DEFINE_string('label_file', '/tmp/dataset/label.txt', 'Labels file')
tf.app.flags.DEFINE_integer('train_shards', 64,
'Number of shards in training TFRecord files.')
tf.app.flags.DEFINE_integer('validation_shards', 8,
'Number of shards in validation TFRecord files.')
tf.app.flags.DEFINE_integer('sequence_length', 16,
'The length of one video clips ')
tf.app.flags.DEFINE_integer('num_threads', 4,
'Number of threads to preprocess the images.')
tf.app.flags.DEFINE_boolean('sequence_random', True,
'Determine whether to shuffle the image order or not.')
FLAGS = tf.app.flags.FLAGS
def _int64_feature(value):
"""Wrapper for inserting int64 features into Example proto."""
if not isinstance(value, list):
value = [value]
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def _bytes_feature(value):
"""Wrapper for inserting bytes features into Example proto."""
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _convert_to_example(foldername, images_buffer, label, text, height, width):
"""Build an Example proto for an example.
Args:
foldername: string, path to an image file, e.g., '/training_data/walk/video1'
images_buffer: list, containing string of JPEG encoding of RGB image
label: integer, identifier for the ground truth for the network
text: string, unique human-readable, e.g. 'dog'
height: integer, image height in pixels
width: integer, image width in pixels
Returns:
Example proto
"""
colorspace = 'RGB'
channels = 3
image_format = 'JPEG'
# create the feature data for the TFRecord example
images = {}
for index, image in enumerate(images_buffer):
images['raw/image/%03d' % index] = _bytes_feature(image)
feature_dict = {
'image/height': _int64_feature(height),
'image/width': _int64_feature(width),
'image/colorspace': _bytes_feature(colorspace),
'image/channels': _int64_feature(channels),
'image/class/label': _int64_feature(label),
'image/class/text': _bytes_feature(text),
'image/format': _bytes_feature(image_format),
'image/filename': _bytes_feature(os.path.basename(foldername)),
}
feature_dict.update(images)
# create the TFRecord Example
example = tf.train.Example(features=tf.train.Features(feature=feature_dict))
return example
class ImageCoder(object):
"""Helper class that provides TensorFlow image coding utilities."""
def __init__(self):
# Create a single Session to run all image coding calls.
self._sess = tf.Session()
# Initializes function that converts PNG to JPEG data.
self._png_data = tf.placeholder(dtype=tf.string)
image = tf.image.decode_png(self._png_data, channels=3)
self._png_to_jpeg = tf.image.encode_jpeg(image, format='rgb', quality=100)
# Initializes function that decodes RGB JPEG data.
self._decode_jpeg_data = tf.placeholder(dtype=tf.string)
self._decode_jpeg = tf.image.decode_jpeg(self._decode_jpeg_data, channels=3)
def png_to_jpeg(self, image_data):
return self._sess.run(self._png_to_jpeg,
feed_dict={self._png_data: image_data})
def decode_jpeg(self, image_data):
image = self._sess.run(self._decode_jpeg,
feed_dict={self._decode_jpeg_data: image_data})
assert len(image.shape) == 3
assert image.shape[2] == 3
return image
def _is_png(filename):
"""Determine if a file contains a PNG format image.
Args:
filename: string, path of the image file.
Returns:
boolean indicating if the image is a PNG.
"""
return '.png' in filename
def _split(arr, size):
"""Split an arrary according to the size parameters, the last element of the
output array takes the last `size` elemnts of `arr`
Args:
arr: array, input array
size: the size used to split the array
Returns:
sub-array
Examples:
_split([1,2,3,4,5,6], 5) #=> [[1,2,3,4,5], [2,3,4,5,6]]
_split([1,2,3,4,5,6], 3) #=> [[1,2,3], [4,5,6]]
"""
arr_size = len(arr)
if arr_size < size:
raise ValueError('sequence length is too long, please set the length '
'smaller than the video length')
elif arr_size == size:
return arr
result = []
last_element = arr[-size:]
iter_num = arr_size//size
for i in range(iter_num):
pice = arr[:size]
result.append(pice)
arr = arr[size:]
# insert the last element
result.append(last_element)
return result
def _process_video(foldername, coder):
"""Process a single video file.
Args:
foldernames: string, path to a video folder e.g., '/path/to/video'.
coder: instance of ImageCoder to provide TensorFlow image coding utils.
Returns:
videos_buffer: list, contains list of video with specific sequence length.
These video is actually list of strings of JPEG encoding of RGB image.
height: integer, image height in pixels.
width: integer, image width in pixels.
"""
se_size = FLAGS.sequence_length
# Read the image file.
images_data = []
filenames = tf.gfile.Glob(foldername + '/*')
for filename in filenames:
image_data = tf.gfile.FastGFile(filename, 'r').read()
# Convert any PNG to JPEG's for consistency.
if _is_png(filename):
image_data = coder.png_to_jpeg(image_data)
# Decode the RGB JPEG.
image = coder.decode_jpeg(image_data)
# Check that image converted to RGB
assert len(image.shape) == 3
height = image.shape[0]
width = image.shape[1]
assert image.shape[2] == 3
# Add the image to the images data
images_data.append(image_data)
videos_data = _split(images_data, se_size)
return videos_data, height, width
def _process_image_files_batch(coder, thread_index, ranges, name, foldernames,
texts, labels, num_shards):
"""Processes and saves list of images as TFRecord in 1 thread.
Args:
coder: instance of ImageCoder to provide TensorFlow image coding utils.
thread_index: integer, unique batch to run index is within [0, len(ranges)).
ranges: list of pairs of integers specifying ranges of each batches to
analyze in parallel.
name: string, unique identifier specifying the data set
foldernames: list of strings; each string is a path to a video file
texts: list of strings; each string is human readable, e.g. 'dog'
labels: list of integer; each integer identifies the ground truth
num_shards: integer number of shards for this data set.
"""
# Each thread produces N shards where N = int(num_shards / num_threads).
# For instance, if num_shards = 128, and the num_threads = 2, then the first
# thread would produce shards [0, 64).
num_threads = len(ranges)
assert not num_shards % num_threads
num_shards_per_batch = num_shards // num_threads
shard_ranges = np.linspace(ranges[thread_index][0],
ranges[thread_index][1],
num_shards_per_batch + 1).astype(int)
num_files_in_thread = ranges[thread_index][1] - ranges[thread_index][0]
counter = 0
for s in range(num_shards_per_batch):
# Generate a sharded version of the file name, e.g. 'train-00002-of-00010'
shard = thread_index * num_shards_per_batch + s
output_filename = '%s-%.5d-of-%.5d' % (name, shard, num_shards)
output_file = os.path.join(FLAGS.output_directory, output_filename)
writer = tf.python_io.TFRecordWriter(output_file)
shard_counter = 0
files_in_shard = np.arange(shard_ranges[s], shard_ranges[s + 1], dtype=int)
for i in files_in_shard:
foldername = foldernames[i]
label = labels[i]
text = texts[i]
videos_buffer, height, width = _process_video(foldername, coder)
for video_buffer in videos_buffer:
example = _convert_to_example(foldername, video_buffer, label,
text, height, width)
writer.write(example.SerializeToString())
counter += 1
shard_counter += 1
if not counter % 1000:
print('%s [thread %d]: Processed %d videos in thread batch.' %
(datetime.now(), thread_index, counter))
sys.stdout.flush()
print('%s [thread %d]: Wrote %d videos' %
(datetime.now(), thread_index, shard_counter))
sys.stdout.flush()
shard_counter = 0
print('%s [thread %d]: Wrote %d videos in total' %
(datetime.now(), thread_index, counter))
sys.stdout.flush()
def _process_image_files(name, foldernames, texts, labels, num_shards):
"""Process and save list of images as TFRecord of Example protos.
Args:
name: string, unique identifier specifying the data set
foldernames: list of strings; each string is a path to a video folder
texts: list of strings; each string is human readable, e.g. 'dog'
labels: list of integer; each integer identifies the ground truth
num_shards: integer number of shards for this data set.
"""
assert len(foldernames) == len(texts)
assert len(foldernames) == len(labels)
# Break all images into batches with a [ranges[i][0], ranges[i][1]].
spacing = np.linspace(0, len(foldernames), FLAGS.num_threads + 1).astype(np.int)
ranges = []
threads = []
for i in range(len(spacing) - 1):
ranges.append([spacing[i], spacing[i+1]])
# Launch a thread for each batch.
print('Launching %d threads for spacings: %s' % (FLAGS.num_threads, ranges))
sys.stdout.flush()
# Create a mechanism for monitoring when all threads are finished.
coord = tf.train.Coordinator()
# Create a generic TensorFlow-based utility for converting all image codings.
coder = ImageCoder()
threads = []
for thread_index in range(len(ranges)):
args = (coder, thread_index, ranges, name, foldernames,
texts, labels, num_shards)
t = threading.Thread(target=_process_image_files_batch, args=args)
t.start()
threads.append(t)
# Wait for all the threads to terminate.
coord.join(threads)
def _find_video_folders(data_dir, label_file):
"""Build a list of all video folders and labels in the data set.
Args:
data_dir: string, path to the root directory of video folders.
Assumes that the video data set resides in JPEG files located in
the following directory structure.
data_dir/walk/video1/00001.JPEG
data_dir/walk/video1/00002.JPEG
...
data_dir/walk/video2/00001.jpg
...
where 'walk' is the label associated with these images.
number 1..n means that all the images in folder video1 belongs to one video
label_file: string, path to the label file.
The list of valid labels are held in this file. Assumes that the file
contains entries as such:
walk
run
play
where each line corresponds to a label. We map each label contained in
the file to an integer starting with the integer 0 corresponding to the
label contained in the first line.
Returns:
folders: list of strings; each string is a path to an video folder.
texts: list of strings; each string is the class, e.g. 'walk'
labels: list of integer; each integer identifies the ground truth.
"""
print('Determining list of input files and labels from %s.' % data_dir)
unique_labels = [l.strip() for l in tf.gfile.FastGFile(
label_file, 'r').readlines()]
labels = []
folders = []
texts = []
# Leave label index 0 empty as a background class.
label_index = 1
# Construct the list of video files and labels.
for text in unique_labels:
jpeg_file_path = '%s/%s/*' % (data_dir, text)
matching_files = tf.gfile.Glob(jpeg_file_path)
labels.extend([label_index] * len(matching_files))
texts.extend([text] * len(matching_files))
folders.extend(matching_files)
if not label_index % 100:
print('Finished finding files in %d of %d classes.' % (
label_index, len(labels)))
label_index += 1
# Shuffle the ordering of all video folder in order to guarantee
# random ordering of the videos with respect to label in the
# saved TFRecord files. Make the randomization repeatable.
if FLAGS.sequence_random:
shuffled_index = range(len(folders))
random.seed(12345)
random.shuffle(shuffled_index)
folders = [folders[i] for i in shuffled_index]
texts = [texts[i] for i in shuffled_index]
labels = [labels[i] for i in shuffled_index]
print('Found %d video files across %d labels inside %s.' %
(len(folders), len(unique_labels), data_dir))
return folders, texts, labels
def _process_dataset(name, directory, num_shards, label_file):
"""Process a complete data set and save it as a TFRecord.
Args:
name: string, unique identifier specifying the data set.
directory: string, root path to the data set.
num_shards: integer number of shards for this data set.
label_file: string, path to the labels file.
"""
foldernames, texts, labels = _find_video_folders(directory, label_file)
_process_image_files(name, foldernames, texts, labels, num_shards)
def main(unused_argv):
assert not FLAGS.train_shards % FLAGS.num_threads, (
'Please make the FLAGS.num_threads commensurate with FLAGS.train_shards')
assert not FLAGS.validation_shards % FLAGS.num_threads, (
'Please make the FLAGS.num_threads commensurate with '
'FLAGS.validation_shards')
print('Saving results to %s' % FLAGS.output_directory)
# Run it!
#_process_dataset('validation', FLAGS.validation_directory,
# FLAGS.validation_shards, FLAGS.label_file)
_process_dataset('train', FLAGS.train_directory,
FLAGS.train_shards, FLAGS.label_file)
if __name__ == '__main__':
tf.app.run()
|
frankgu/tensorflow_video_rnn
|
data_preparation/convert_to_records.py
|
Python
|
mit
| 16,235
|
# Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM15.IEC61970.Core.PowerSystemResource import PowerSystemResource
class RegulatingControl(PowerSystemResource):
"""Specifies a set of equipment that works together to control a power system quantity such as voltage or flow.Specifies a set of equipment that works together to control a power system quantity such as voltage or flow.
"""
def __init__(self, mode="fixed", targetRange=0.0, discrete=False, targetValue=0.0, monitoredPhase="s12N", RegulatingCondEq=None, Terminal=None, RegulationSchedule=None, *args, **kw_args):
"""Initialises a new 'RegulatingControl' instance.
@param mode: The regulating control mode presently available. This specifications allows for determining the kind of regualation without need for obtaining the units from a schedule. Values are: "fixed", "timeScheduled", "voltage", "admittance", "reactivePower", "powerFactor", "currentFlow", "activePower", "temperature"
@param targetRange: This is the case input target range. This performs the same function as the value2 attribute on the regulation schedule in the case that schedules are not used. The units of those appropriate for the mode.
@param discrete: The regulation is performed in a discrete mode.
@param targetValue: The target value specified for case input. This value can be used for the target value wihout the use of schedules. The value has the units appropriate to the mode attribute.
@param monitoredPhase: Phase voltage controlling this regulator, measured at regulator location. Values are: "s12N", "BN", "BC", "ABN", "s2N", "N", "ACN", "BCN", "ABCN", "AC", "s1N", "AN", "B", "AB", "C", "A", "CN", "ABC"
@param RegulatingCondEq: The equipment that participates in this regulating control scheme.
@param Terminal: The terminal associated with this regulating control.
@param RegulationSchedule: Schedule for this Regulating regulating control.
"""
#: The regulating control mode presently available. This specifications allows for determining the kind of regualation without need for obtaining the units from a schedule. Values are: "fixed", "timeScheduled", "voltage", "admittance", "reactivePower", "powerFactor", "currentFlow", "activePower", "temperature"
self.mode = mode
#: This is the case input target range. This performs the same function as the value2 attribute on the regulation schedule in the case that schedules are not used. The units of those appropriate for the mode.
self.targetRange = targetRange
#: The regulation is performed in a discrete mode.
self.discrete = discrete
#: The target value specified for case input. This value can be used for the target value wihout the use of schedules. The value has the units appropriate to the mode attribute.
self.targetValue = targetValue
#: Phase voltage controlling this regulator, measured at regulator location. Values are: "s12N", "BN", "BC", "ABN", "s2N", "N", "ACN", "BCN", "ABCN", "AC", "s1N", "AN", "B", "AB", "C", "A", "CN", "ABC"
self.monitoredPhase = monitoredPhase
self._RegulatingCondEq = []
self.RegulatingCondEq = [] if RegulatingCondEq is None else RegulatingCondEq
self._Terminal = None
self.Terminal = Terminal
self._RegulationSchedule = []
self.RegulationSchedule = [] if RegulationSchedule is None else RegulationSchedule
super(RegulatingControl, self).__init__(*args, **kw_args)
_attrs = ["mode", "targetRange", "discrete", "targetValue", "monitoredPhase"]
_attr_types = {"mode": str, "targetRange": float, "discrete": bool, "targetValue": float, "monitoredPhase": str}
_defaults = {"mode": "fixed", "targetRange": 0.0, "discrete": False, "targetValue": 0.0, "monitoredPhase": "s12N"}
_enums = {"mode": "RegulatingControlModeKind", "monitoredPhase": "PhaseCode"}
_refs = ["RegulatingCondEq", "Terminal", "RegulationSchedule"]
_many_refs = ["RegulatingCondEq", "RegulationSchedule"]
def getRegulatingCondEq(self):
"""The equipment that participates in this regulating control scheme.
"""
return self._RegulatingCondEq
def setRegulatingCondEq(self, value):
for x in self._RegulatingCondEq:
x.RegulatingControl = None
for y in value:
y._RegulatingControl = self
self._RegulatingCondEq = value
RegulatingCondEq = property(getRegulatingCondEq, setRegulatingCondEq)
def addRegulatingCondEq(self, *RegulatingCondEq):
for obj in RegulatingCondEq:
obj.RegulatingControl = self
def removeRegulatingCondEq(self, *RegulatingCondEq):
for obj in RegulatingCondEq:
obj.RegulatingControl = None
def getTerminal(self):
"""The terminal associated with this regulating control.
"""
return self._Terminal
def setTerminal(self, value):
if self._Terminal is not None:
filtered = [x for x in self.Terminal.RegulatingControl if x != self]
self._Terminal._RegulatingControl = filtered
self._Terminal = value
if self._Terminal is not None:
if self not in self._Terminal._RegulatingControl:
self._Terminal._RegulatingControl.append(self)
Terminal = property(getTerminal, setTerminal)
def getRegulationSchedule(self):
"""Schedule for this Regulating regulating control.
"""
return self._RegulationSchedule
def setRegulationSchedule(self, value):
for x in self._RegulationSchedule:
x.RegulatingControl = None
for y in value:
y._RegulatingControl = self
self._RegulationSchedule = value
RegulationSchedule = property(getRegulationSchedule, setRegulationSchedule)
def addRegulationSchedule(self, *RegulationSchedule):
for obj in RegulationSchedule:
obj.RegulatingControl = self
def removeRegulationSchedule(self, *RegulationSchedule):
for obj in RegulationSchedule:
obj.RegulatingControl = None
|
rwl/PyCIM
|
CIM15/IEC61970/Wires/RegulatingControl.py
|
Python
|
mit
| 7,227
|
"""
Authors: Jeremy Haugen, UCLA, Anthony Nguyen, UCLA
Created: June 2015
Copyright notice in LICENSE file
This file is used to take the raw files generated by
the SAM, which are in CSV format and processes it into
json format. Then it will contact the sMAP server and
upload the json data into sMAP
"""
import uuid_gen
import subprocess
import time
import json
import os
def curl_file_smap(filename):
#args = ["curl", "-XPOST", "-d", "@"+filename, '-H \"Content-Type: application/json\"', "http://128.97.93.240:8079/add/mHRzALUD7OtL9TFi0MbJDm6mKWdA2DJp5wJT"]
args = "/usr/bin/curl -v -XPOST -d @"+filename + " -H \"Content-Type: application/json\" http://128.97.93.240:8079/add/mHRzALUD7OtL9TFi0MbJDm6mKWdA2DJp5wJT"
#print args
p = subprocess.Popen(args, shell=True)
def parse(file):
op_fname = "smap_" + os.path.basename(file).split(".")[0] + ".json"
if os.path.isfile(op_fname):
print "skipping",op_fname
return
else:
print "processing",op_fname
with open(file) as f, open(op_fname , "w+") as op:
j = 0
data = {}
for line in f.readlines():
row = line.split(",")
#print row
#print j
path = "/ManisHouse/" + row[1]
if path == "/ManisHouse/MainMeter":
path = "/ManisHouse/ShenitechMainMeter"
num_cols = (len(row) - 3)/3
#print num_cols
for i in range(num_cols):
channel = row[3*i + 3]
value = row[3*i + 4]
unit = row[3*i + 5]
full_path = path + "/" + channel
#uuid = uuid_gen.get_uuid(full_path)
timestamp = int(float(row[2])) * 1000
if full_path not in data:
data[full_path] = []
data[full_path].append([timestamp, float(value)])
#op.write(str(uuid) + "," + full_path + "," + str(timestamp) + "," + value + "\n")
j += 1
#if j == 10:
# break
full_json = {}
#print "data", data
for full_path, value in data.iteritems():
uuid = uuid_gen.get_uuid(full_path)
d = {
"Readings": data[full_path],
"uuid": str(uuid)
}
full_json[full_path] = d
op.write(json.dumps(full_json, indent=2))
curl_file_smap(op_fname)
if __name__ == "__main__":
files = [f for f in os.listdir('.') if os.path.isfile(f)]
for file in files:
if file.split(".")[-1] == "txt":
#print file
parse(file)
|
resolutedreamer/NESLDashboard
|
src/sam/sam_out/smap_out_parser_json.py
|
Python
|
mit
| 2,605
|
from subprocess import PIPE, Popen
from sqlalchemy import create_engine
def run(p):
try:
p["log"].info(p["action"]['query'])
proc = Popen(p["action"]['query'], shell=True,
stdin=PIPE, stdout=PIPE, stderr=PIPE)
result = proc.communicate()
message = ''
for r in result:
if r: message += r + '\n'
p["log"].success(message)
except Exception, e:
AllGood = False
p["log"].error("command line execution failed",e)
return True
|
unkyulee/elastic-cms
|
src/task/modules/CMD.py
|
Python
|
mit
| 530
|
from starcluster.clustersetup import ClusterSetup
from starcluster.logger import log
class PysamInstaller(ClusterSetup):
def run(self, nodes, master, user, user_shell, volumes):
for node in nodes:
log.info("Installing PySam 0.8.4 on %s" % (node.alias))
node.ssh.execute('mkdir -p /opt/software/pysam')
node.ssh.execute('pip install --target=d:\/opt/software/pysam pysam') #https://github.com/pysam-developers/pysam/archive/v0.8.4.tar.gz')
node.ssh.execute('mkdir -p /usr/local/Modules/applications/pysam/;touch /usr/local/Modules/applications/pysam/0.8.4')
node.ssh.execute('echo "#%Module" >> /usr/local/Modules/applications/pysam/0.8.4')
node.ssh.execute('echo "set root /opt/software/pysam/pysam-0.8.4" >> /usr/local/Modules/applications/pysam/0.8.4')
node.ssh.execute('echo -e "prepend-path\tPATH\t\$root" >> /usr/local/Modules/applications/pysam/0.8.4')
|
meissnert/StarCluster-Plugins
|
Pysam_0_8_4.py
|
Python
|
mit
| 905
|
"""
Author: Eric J. Ma
Affiliation: Massachusetts Institute of Technology
"""
from random import choice
from generate_id import generate_id
class Sequence(object):
"""
The Sequence object is the lowest level object in the pathogen simulator.
It provides a container for storing seed sequences for the pathogens present
in the environment.
This can be subclassed to store seed sequences for other pathogens, rather
than using a generated sequence.
Note that when a virus replicates, the full sequence object is not copied
for each of its segments; rather, each segment only keeps track of the
mutations that have happened.
"""
def __init__(self, length=1000, sequence=None, id=None):
"""
Initialize the sequence with a random sequence of length 1000 if
sequence is not specified.
Otherwise, initialize sequence with a sequence that is specified.
"""
super(Sequence, self).__init__()
self.sequence = None
if sequence == None:
self.sequence = self.generate_sequence(length)
else:
self.sequence = sequence
if id == None:
self.id = generate_id()
else:
self.id = id
def __repr__(self):
return self.id
def generate_sequence(self, length):
"""
This method will generate a sequence, and set the Sequence object's
sequence to that sequence.
"""
sequence = ''
for i in range(length):
letter = choice(['A', 'T', 'G', 'C'])
sequence += letter
return sequence
|
ericmjl/reassortment-simulation-and-reconstruction
|
sequence.py
|
Python
|
mit
| 1,435
|
# Copyright 2009-2014 Ram Rachum.
# This program is distributed under the MIT license.
'''
This module defines scripts for selecting stuff.
See their documentation for more information.
'''
from __future__ import with_statement
import bisect
import re
import _ast
import os.path, sys
sys.path += [
os.path.dirname(__file__),
os.path.join(os.path.dirname(__file__), 'third_party.zip'),
]
import wingapi
import shared
SAFETY_LIMIT = 60
'''The maximum number of times we'll do `select-more` before giving up.'''
def _ast_parse(string):
return compile(string, '<unknown>', 'exec', _ast.PyCF_ONLY_AST)
def _is_expression(string):
'''Is `string` a Python expression?'''
# Throwing out '\r' characters because `ast` can't process them for some
# reason:
string = string.replace('\r', '')
try:
nodes = _ast_parse(string).body
except SyntaxError:
return False
else:
if len(nodes) != 1:
return False
else:
(node,) = nodes
return type(node) == _ast.Expr
variable_name_pattern_text = r'[a-zA-Z_][0-9a-zA-Z_]*'
dotted_name_pattern = re.compile(
r'\.?^%s(\.%s)*$' %
(variable_name_pattern_text, variable_name_pattern_text)
)
def _is_dotted_name(string):
'''Is `string` a dotted name?'''
assert isinstance(string, str)
return bool(dotted_name_pattern.match(string.strip()))
whitespace_characters = ' \n\r\t\f\v'
def _is_whitespaceless_name(string):
'''Is `string` a whitespace-less name?'''
assert isinstance(string, str)
return not any((whitespace_character in string for whitespace_character
in whitespace_characters))
def _select_more_until_biggest_match(condition, editor=wingapi.kArgEditor):
'''`select-more` until reaching biggest text that satisfies `condition`.'''
assert isinstance(editor, wingapi.CAPIEditor)
document = editor.GetDocument()
select_more = lambda: wingapi.gApplication.ExecuteCommand('select-more')
is_selection_good = lambda: condition(
document.GetCharRange(*editor.GetSelection()).strip()
)
last_success_n_iterations = None
last_start, last_end = original_selection = editor.GetSelection()
with shared.ScrollRestorer(editor):
with shared.SelectionRestorer(editor):
for i in range(SAFETY_LIMIT):
select_more()
current_start, current_end = editor.GetSelection()
if (current_start == last_start) and (current_end == last_end):
break
if is_selection_good():
last_success_n_iterations = i
last_start, last_end = current_start, current_end
if last_success_n_iterations is not None:
for i in range(last_success_n_iterations+1):
select_more()
def select_expression(editor=wingapi.kArgEditor):
'''
Select the Python expression that the cursor is currently on.
This does `select-more` until the biggest possible legal Python expression
is selected.
Suggested key combination: `Ctrl-Alt-Plus`
'''
_select_more_until_biggest_match(_is_expression, editor)
def select_dotted_name(editor=wingapi.kArgEditor):
'''
Select the dotted name that the cursor is currently on, like `foo.bar.baz`.
This does `select-more` until the biggest possible dotted name is selected.
Suggested key combination: `Alt-Plus`
'''
_select_more_until_biggest_match(_is_dotted_name, editor)
def select_whitespaceless_name(editor=wingapi.kArgEditor):
'''
Select the whitespace-less name that the cursor is currently on.
Example: `foo.bar.baz(e=3)`.
This does `select-more` until the biggest possible whitespace-less name is
selected.
Suggested key combination: `Ctrl-Alt-Equal`
'''
_select_more_until_biggest_match(_is_whitespaceless_name, editor)
_scope_name_pattern = re.compile(
r'''(?:^|[ \t\r\n])(?:def|class) +([a-zA-Z_][0-9a-zA-Z_]*)'''
r'''[ \t\r\n]*[(:]''',
flags=re.DOTALL
)
def _get_scope_name_positions(document):
document_text = shared.get_text(document)
matches = _scope_name_pattern.finditer(document_text)
return tuple(match.span(1) for match in matches)
def select_next_scope_name(editor=wingapi.kArgEditor,
app=wingapi.kArgApplication):
'''
Select the next scope name like `def thing():` or `class Thing():`.
(Selects just the name.)
Suggested key combination: `Alt-Semicolon`
'''
assert isinstance(editor, wingapi.CAPIEditor)
_, position = editor.GetSelection()
position += 1
scope_name_positions = _get_scope_name_positions(editor.GetDocument())
scope_name_ends = tuple(scope_name_position[1] for scope_name_position in
scope_name_positions)
scope_name_index = bisect.bisect_left(scope_name_ends, position)
if 0 <= scope_name_index < len(scope_name_ends):
app.ExecuteCommand('set-visit-history-anchor')
editor.SetSelection(*scope_name_positions[scope_name_index])
def select_prev_scope_name(editor=wingapi.kArgEditor,
app=wingapi.kArgApplication):
'''
Select the previous scope name like `def thing():` or `class Thing():`.
(Selects just the name.)
Suggested key combination: `Alt-Colon`
'''
assert isinstance(editor, wingapi.CAPIEditor)
position, _ = editor.GetSelection()
position -= 1
scope_name_positions = _get_scope_name_positions(editor.GetDocument())
scope_name_starts = tuple(scope_name_position[0] for scope_name_position
in scope_name_positions)
scope_name_index = bisect.bisect_left(scope_name_starts, position) - 1
if 0 <= scope_name_index < len(scope_name_starts):
app.ExecuteCommand('set-visit-history-anchor')
editor.SetSelection(*scope_name_positions[scope_name_index])
|
cool-RR/cute-wing-stuff
|
scripts/selecting_stuff.py
|
Python
|
mit
| 6,144
|
from .routes_test_fixture import app # noqa
def test_stats_wiki_campaign(client):
# test active campaign
assert client.get("/stats/enwiki/1")._status_code == 200
# test archived campaign
assert client.get("/stats/enwiki/7")._status_code == 200
def test_stats(client):
assert client.get("/stats/")._status_code == 200
|
wiki-ai/wikilabels
|
tests/test_stats_routes.py
|
Python
|
mit
| 342
|
from pyramid.security import (
_get_authentication_policy
)
def my_get_authentication_policy(request):
# CRITICAL
# _get_authentication_policy(request)
# this method will return the instanciate singleton object that handle
# policy in pyramid app
# the policy object store keys from conf for generate token
return _get_authentication_policy(request)
|
NaturalSolutions/NsPortal
|
Back/ns_portal/utils/utils.py
|
Python
|
mit
| 380
|
#!/usr/bin/env python
# coding=utf-8
import requests
import time
import json
"""
ansible 运行结果回调
"""
class CallbackModule(object):
def v2_runner_item_on_ok(self, *args, **kwargs):
# time.sleep(10)
# print args
for i in dir(args[0]):
if not i.startswith('__'):
print i
print '======'
# print args[0]._result
print json.dumps(args[0]._result, indent=4)
print args[0]._task
print 'runner item on ok'
def v2_runner_item_on_failed(self, *args, **kwargs):
# print args
print dir(args[0])
print 'runner item on failed'
# print args[0]._result
print json.dumps(args[0]._result, indent=4)
print args[0]._task
print '======'
def v2_runner_item_on_skipped(self, *args, **kwargs):
# print args
print dir(args[0])
print 'runner item on skipped'
def v2_runner_retry(self, *args, **kwargs):
# print args
print dir(args[0])
print 'runner on retry'
def v2_runner_on_ok(self, *args, **kwargs):
print 'runner on ok'
# # print args
# print dir(args[0])
for i in dir(args[0]):
if not i.startswith('__'):
print i
print json.dumps(args[0]._result, indent=4)
print args[0]._task
requests.post('http://127.0.0.1:9999/api/callback/test', args[0]._result)
# print type(args[0]._task), 'task type'
# print args[0]._host
# print kwargs
def v2_runner_on_unreachable(self, *args, **kwargs):
print 'runner on unreacheable'
# # print args
print dir(args[0])
# print args[0]._result
# print args[0]._task
# print args[0]._host
# print kwargs
def v2_runner_on_failed(self, *args, **kwargs):
# # print args
print dir(args[0])
# print args[0]._result
# print args[0]._task
# print args[0]._host
# print kwargs
print 'runner on failed'
print json.dumps(args[0]._result, indent=4)
print args[0]._task
requests.post('http://127.0.0.1:9999/api/callback/test', args[0]._result)
requests.post('http://127.0.0.1:9999/api/callback/test', args[0]._task)
print args[0].is_failed(), '-*/***********'
print '------'
def v2_runner_on_skipped(self, *args, **kwargs):
print 'runner on skipped'
def v2_playbook_on_stats(self, *args, **kwargs):
# print args
# print dir(args[0])
for i in dir(args[0]):
if not i.startswith('__'):
print i
# print args[0].changed, 'changed'
# print args[0].ok, 'ok'
# print args[0].dark, 'dark'
print args[0].failures, 'failures'
# print args[0].increment, 'increment'
# print args[0].processed, 'processed'
# print args[0].skipped, 'skipped'
# print args[0].summarize, 'summarize'
# print kwargs
print 'on stats'
if __name__ == '__main__':
print 'callback'
|
tao12345666333/Talk-Is-Cheap
|
ansible/plugins/callback/test.py
|
Python
|
mit
| 3,110
|
import json
import os
from errata_tool import ErrataConnector, Erratum
from errata_tool.products import ProductList
import requests
import pytest
TESTS_DIR = os.path.dirname(os.path.abspath(__file__))
FIXTURES_DIR = os.path.join(TESTS_DIR, 'fixtures')
class MockResponse(object):
status_code = 200
encoding = 'utf-8'
headers = {'content-type': 'application/json; charset=utf-8'}
def raise_for_status(self):
pass
@property
def _fixture(self):
""" Return path to our static fixture file. """
return self.url.replace('https://errata.devel.redhat.com/',
os.path.join(FIXTURES_DIR,
'errata.devel.redhat.com/'))
def json(self):
try:
with open(self._fixture) as fp:
return json.load(fp)
except IOError:
print('Try ./new-fixture.sh %s' % self.url)
raise
@property
def text(self):
""" Return contents of our static fixture file. """
try:
with open(self._fixture) as fp:
return fp.read()
except IOError:
print('Try ./new-fixture.sh %s' % self.url)
raise
class RequestRecorder(object):
""" Record args to requests.get() or requests.post() """
def __call__(self, url, **kwargs):
""" mocking requests.get() or requests.post() """
self.response = MockResponse()
self.response.url = url
self.kwargs = kwargs
return self.response
@pytest.fixture
def mock_get():
return RequestRecorder()
@pytest.fixture
def mock_post():
return RequestRecorder()
@pytest.fixture
def mock_put():
return RequestRecorder()
@pytest.fixture
def advisory(monkeypatch, mock_get):
monkeypatch.delattr('requests.sessions.Session.request')
monkeypatch.setattr(ErrataConnector, '_auth', None)
monkeypatch.setattr(requests, 'get', mock_get)
return Erratum(errata_id=26175)
@pytest.fixture
def rhsa(monkeypatch, mock_get):
""" Like the advisory() fixture above, but an RHSA. """
monkeypatch.delattr('requests.sessions.Session.request')
monkeypatch.setattr(ErrataConnector, '_auth', None)
monkeypatch.setattr(requests, 'get', mock_get)
return Erratum(errata_id=25856)
@pytest.fixture
def productlist(monkeypatch, mock_get):
monkeypatch.delattr('requests.sessions.Session.request')
monkeypatch.setattr(ErrataConnector, '_auth', None)
monkeypatch.setattr(requests, 'get', mock_get)
return ProductList()
|
mmuzila/errata-tool
|
errata_tool/tests/conftest.py
|
Python
|
mit
| 2,567
|
# -*- coding: utf-8 -*-
"""Test events classes.
This file is part of PyVISA.
:copyright: 2019-2020 by PyVISA Authors, see AUTHORS for more details.
:license: MIT, see LICENSE for more details.
"""
import logging
import pytest
from pyvisa import constants, errors
from pyvisa.events import Event
from . import BaseTestCase
class TestEvent(BaseTestCase):
"""Test Event functionalities."""
def setup_method(self):
self.old = Event._event_classes.copy()
def teardown_method(self):
Event._event_classes = self.old
def test_register(self):
assert Event._event_classes[constants.EventType.clear] is Event
def test_double_register_event_cls(self, caplog):
class SubEvent(Event):
pass
with caplog.at_level(logging.DEBUG, logger="pyvisa"):
Event.register(constants.EventType.clear)(SubEvent)
assert caplog.records
assert Event._event_classes[constants.EventType.clear] is SubEvent
def test_register_event_cls_missing_attr(self):
class SubEvent(Event):
pass
with pytest.raises(TypeError):
Event.register(constants.EventType.exception)(SubEvent)
assert Event._event_classes[constants.EventType.exception] is not SubEvent
def test_event_context(self):
event = Event(None, constants.EventType.clear, 1)
assert event.context == 1
event.close()
with pytest.raises(errors.InvalidSession):
event.context
|
pyvisa/pyvisa
|
pyvisa/testsuite/test_event.py
|
Python
|
mit
| 1,508
|
"""Findall regex operations in python.
findall(string[, pos[, endpos]])
Returns a list:
not like search and match which returns objects
Otherwise, it returns an empty list.
"""
import re
# look for every word in a string
pattern = re.compile(r"\w+")
result = pattern.findall("hey bro")
print result
patt = re.compile(r"a*b")
# returns ['ab', 'ab', 'ab', 'b']
res = patt.findall("abababb")
print res
# match a group of words onto a tuple
p = re.compile(r"(\w+) (\w+)")
rv = p.findall("Hello world, i lived")
print rv
# Using unicode characters
print re.findall(ur"\w+", u"这是一个例子", re.UNICODE)
# using named groups inside pattern itself
patt = re.compile(r"(?P<word>\w+) (?P=word)")
|
andela-ggikera/regex
|
findall.py
|
Python
|
mit
| 700
|
from OpenGLCffi.GL import params
@params(api='gl', prms=['index', 'type', 'normalized', 'value'])
def glVertexAttribP1ui(index, type, normalized, value):
pass
@params(api='gl', prms=['index', 'type', 'normalized', 'value'])
def glVertexAttribP1uiv(index, type, normalized, value):
pass
@params(api='gl', prms=['index', 'type', 'normalized', 'value'])
def glVertexAttribP2ui(index, type, normalized, value):
pass
@params(api='gl', prms=['index', 'type', 'normalized', 'value'])
def glVertexAttribP2uiv(index, type, normalized, value):
pass
@params(api='gl', prms=['index', 'type', 'normalized', 'value'])
def glVertexAttribP3ui(index, type, normalized, value):
pass
@params(api='gl', prms=['index', 'type', 'normalized', 'value'])
def glVertexAttribP3uiv(index, type, normalized, value):
pass
@params(api='gl', prms=['index', 'type', 'normalized', 'value'])
def glVertexAttribP4ui(index, type, normalized, value):
pass
@params(api='gl', prms=['index', 'type', 'normalized', 'value'])
def glVertexAttribP4uiv(index, type, normalized, value):
pass
@params(api='gl', prms=['type', 'value'])
def glVertexP2ui(type, value):
pass
@params(api='gl', prms=['type', 'value'])
def glVertexP2uiv(type, value):
pass
@params(api='gl', prms=['type', 'value'])
def glVertexP3ui(type, value):
pass
@params(api='gl', prms=['type', 'value'])
def glVertexP3uiv(type, value):
pass
@params(api='gl', prms=['type', 'value'])
def glVertexP4ui(type, value):
pass
@params(api='gl', prms=['type', 'value'])
def glVertexP4uiv(type, value):
pass
@params(api='gl', prms=['type', 'coords'])
def glTexCoordP1ui(type, coords):
pass
@params(api='gl', prms=['type', 'coords'])
def glTexCoordP1uiv(type, coords):
pass
@params(api='gl', prms=['type', 'coords'])
def glTexCoordP2ui(type, coords):
pass
@params(api='gl', prms=['type', 'coords'])
def glTexCoordP2uiv(type, coords):
pass
@params(api='gl', prms=['type', 'coords'])
def glTexCoordP3ui(type, coords):
pass
@params(api='gl', prms=['type', 'coords'])
def glTexCoordP3uiv(type, coords):
pass
@params(api='gl', prms=['type', 'coords'])
def glTexCoordP4ui(type, coords):
pass
@params(api='gl', prms=['type', 'coords'])
def glTexCoordP4uiv(type, coords):
pass
@params(api='gl', prms=['texture', 'type', 'coords'])
def glMultiTexCoordP1ui(texture, type, coords):
pass
@params(api='gl', prms=['texture', 'type', 'coords'])
def glMultiTexCoordP1uiv(texture, type, coords):
pass
@params(api='gl', prms=['texture', 'type', 'coords'])
def glMultiTexCoordP2ui(texture, type, coords):
pass
@params(api='gl', prms=['texture', 'type', 'coords'])
def glMultiTexCoordP2uiv(texture, type, coords):
pass
@params(api='gl', prms=['texture', 'type', 'coords'])
def glMultiTexCoordP3ui(texture, type, coords):
pass
@params(api='gl', prms=['texture', 'type', 'coords'])
def glMultiTexCoordP3uiv(texture, type, coords):
pass
@params(api='gl', prms=['texture', 'type', 'coords'])
def glMultiTexCoordP4ui(texture, type, coords):
pass
@params(api='gl', prms=['texture', 'type', 'coords'])
def glMultiTexCoordP4uiv(texture, type, coords):
pass
@params(api='gl', prms=['type', 'coords'])
def glNormalP3ui(type, coords):
pass
@params(api='gl', prms=['type', 'coords'])
def glNormalP3uiv(type, coords):
pass
@params(api='gl', prms=['type', 'color'])
def glColorP3ui(type, color):
pass
@params(api='gl', prms=['type', 'color'])
def glColorP3uiv(type, color):
pass
@params(api='gl', prms=['type', 'color'])
def glColorP4ui(type, color):
pass
@params(api='gl', prms=['type', 'color'])
def glColorP4uiv(type, color):
pass
@params(api='gl', prms=['type', 'color'])
def glSecondaryColorP3ui(type, color):
pass
@params(api='gl', prms=['type', 'color'])
def glSecondaryColorP3uiv(type, color):
pass
|
cydenix/OpenGLCffi
|
OpenGLCffi/GL/EXT/ARB/vertex_type_2_10_10_10_rev.py
|
Python
|
mit
| 3,794
|
public_key = b"\x30\x81\x9f\x30\x0d\x06\x09\x2a\x86\x48\x86\xf7\x0d\x01\x01\x01\x05\x00\x03\x81\x8d\x00\x30\x81\x89\x02\x81\x81\x00\xaf\x15\xe8" + \
b"\x75\x00\x06\xe4\xc5\xd5\xda\x2c\xc5\x63\x6a\xef\xa3\x06\x81\x99\x19\x8f\x2a\xb5\xd3\x2e\x50\x76\x94\xe1\xc1\x5a\xa2\x84\xd7\xad" + \
b"\x91\x2b\xbf\x42\xe6\xb1\x08\x2f\x15\x53\x80\xc1\xa7\xa9\xaf\x22\xd7\x81\x95\xc4\x1e\xea\x4b\x60\x60\xf8\x00\xe5\x9e\x9d\x8a\xe1" + \
b"\x4f\x37\x41\xe7\x4d\xc6\x2e\x9c\xbb\x5c\x03\x5e\x60\x04\x9b\x8b\x3f\x8c\x27\xfc\x1c\x9c\x82\xec\xec\xa1\x30\x0e\x42\x9c\xd3\xaa" + \
b"\x91\x8a\xf4\xcf\x0c\x60\x9b\xb3\xb4\x77\x14\x24\xe3\x22\xcb\xb8\x79\xa6\x3c\x20\xe3\x8d\x09\x28\x34\xda\x78\xfe\x8d\x02\x03\x01" + \
b"\x00\x01"
private_key = b"\x30\x82\x02\x78\x02\x01\x00\x30\x0d\x06\x09\x2a\x86\x48\x86\xf7\x0d\x01\x01\x01\x05\x00\x04\x82\x02\x62\x30\x82\x02\x5e\x02\x01" + \
b"\x00\x02\x81\x81\x00\xaf\x15\xe8\x75\x00\x06\xe4\xc5\xd5\xda\x2c\xc5\x63\x6a\xef\xa3\x06\x81\x99\x19\x8f\x2a\xb5\xd3\x2e\x50\x76" + \
b"\x94\xe1\xc1\x5a\xa2\x84\xd7\xad\x91\x2b\xbf\x42\xe6\xb1\x08\x2f\x15\x53\x80\xc1\xa7\xa9\xaf\x22\xd7\x81\x95\xc4\x1e\xea\x4b\x60" + \
b"\x60\xf8\x00\xe5\x9e\x9d\x8a\xe1\x4f\x37\x41\xe7\x4d\xc6\x2e\x9c\xbb\x5c\x03\x5e\x60\x04\x9b\x8b\x3f\x8c\x27\xfc\x1c\x9c\x82\xec" + \
b"\xec\xa1\x30\x0e\x42\x9c\xd3\xaa\x91\x8a\xf4\xcf\x0c\x60\x9b\xb3\xb4\x77\x14\x24\xe3\x22\xcb\xb8\x79\xa6\x3c\x20\xe3\x8d\x09\x28" + \
b"\x34\xda\x78\xfe\x8d\x02\x03\x01\x00\x01\x02\x81\x80\x36\xeb\x4b\x50\x2f\xe2\xf9\xad\xa8\xa7\xd7\xf5\x4e\x7b\x03\x92\x02\x7f\x72" + \
b"\x53\x97\x19\xd1\x90\xdd\x6d\x35\xd4\xfb\x7f\x57\xfb\xb4\x69\xa6\xb2\xeb\xa3\x01\xcc\x34\xe9\x99\x43\x3a\x3f\x1f\xff\x84\x75\x40" + \
b"\x1b\x93\x35\x34\x20\x72\x63\x94\x66\xb6\x44\x29\xc1\xf1\xdd\xd4\x65\x3a\x30\xa8\x05\xe4\x53\x54\x44\x78\x8f\x92\x0a\x43\x4d\x82" + \
b"\x51\x54\xdc\x41\xc9\x87\xa5\x98\xc8\x80\xf3\x1b\x91\xc3\x2f\x3d\x32\xf5\xec\x86\x4c\xa7\x4d\xde\x61\x12\xf7\xaf\xd7\x16\x66\xb0" + \
b"\x1f\xf4\xd1\x53\xf6\x8a\x4f\x44\xce\xcd\x22\x85\x81\x02\x41\x00\xf5\xff\xd3\x2a\x39\x3b\x0f\xaf\x64\xc8\x04\x60\x87\xa3\x1b\xe5" + \
b"\x34\x99\x69\x2d\xa8\x2f\x6c\x17\xef\x4e\xc1\xb8\x75\x61\x88\x60\x5e\xbe\x3c\x16\xa0\x87\xda\x3e\x9d\x43\x56\x67\x84\x8d\xbe\x86" + \
b"\x70\x91\x8d\x8f\xf9\x3a\xa4\x0e\x59\xa0\x42\x24\x69\xaa\xb0\x6d\x02\x41\x00\xb6\x34\x11\x08\x6e\x0b\x65\xc0\x9f\x53\xe2\x11\x52" + \
b"\x13\xf0\x6a\x55\x2c\x4a\xb4\xe3\x50\xe8\xe9\x66\x11\xd4\x7a\x85\xbf\x98\xfd\x11\xd9\x9a\x09\xf3\x8f\x69\x53\x70\xbf\xca\xc3\xc5" + \
b"\x72\x8d\x47\xf5\x6a\x37\xc8\x07\x2c\xfd\x43\x9b\x78\x8d\x05\xeb\x1d\xf2\xa1\x02\x41\x00\xac\x7e\x20\x11\xa1\x63\xba\x91\xdf\xf7" + \
b"\x28\xaa\x8f\x31\x5e\x24\x10\x07\xea\x6a\x6b\x5e\x25\x4b\x7b\x30\x1c\x42\x3d\x7c\x90\x66\x12\xc9\x0d\xd5\x47\xe7\x3a\xaf\x61\x12" + \
b"\x90\x89\xb1\xb6\xba\x7c\x06\x7e\xe9\x66\xa4\xf9\xeb\x83\x6c\x71\x25\x2f\xe7\x30\x1a\xd9\x02\x41\x00\x89\x60\xfc\xae\xc4\x7a\x67" + \
b"\x80\x33\x21\xc6\x44\x95\x04\x5f\xb3\x6d\x00\xf6\x5b\x29\x42\x2a\x3b\x41\x30\x94\x6a\xc5\x49\xcf\x8a\x90\xd8\xe7\x62\x35\x78\x9e" + \
b"\x4b\xc1\xa9\x7a\xb2\xdd\xbf\x1f\x73\x70\x41\x64\x49\xb7\xcf\x5e\x2e\x89\x9c\xfd\x87\xc6\xdd\x4f\xc1\x02\x41\x00\xec\xa2\x72\x69" + \
b"\x0b\xd0\x77\x0e\xdc\x8e\x6e\x18\x07\x50\xcb\x22\x37\x95\x87\x38\x6c\xd7\xa7\x2a\xb9\x8e\x83\x66\xb1\x79\x05\x73\xf8\xbc\x50\x57" + \
b"\xd1\x2a\x19\xe5\x49\x85\x5f\xdf\x28\xe0\x96\x9e\xf3\x9d\x70\x6b\x1f\xf8\x60\xb8\xc8\x56\x04\xb1\xfc\x0c\x2c\xcc"
certs = [b"\x30\x82\x01\xac\x30\x82\x01\x15\xa0\x03\x02\x01\x02\x02\x01\x00\x30\x0d\x06\x09\x2a\x86\x48\x86\xf7\x0d\x01\x01\x0b\x05\x00\x30" + \
b"\x1c\x31\x1a\x30\x18\x06\x03\x55\x04\x03\x0c\x11\x75\x6e\x69\x63\x6f\x64\x65\x5f\x70\x61\x73\x73\x77\x6f\x72\x64\x73\x30\x1e\x17" + \
b"\x0d\x31\x37\x30\x38\x31\x31\x31\x39\x31\x30\x35\x31\x5a\x17\x0d\x31\x39\x30\x38\x31\x31\x31\x39\x31\x30\x35\x31\x5a\x30\x1c\x31" + \
b"\x1a\x30\x18\x06\x03\x55\x04\x03\x0c\x11\x75\x6e\x69\x63\x6f\x64\x65\x5f\x70\x61\x73\x73\x77\x6f\x72\x64\x73\x30\x81\x9f\x30\x0d" + \
b"\x06\x09\x2a\x86\x48\x86\xf7\x0d\x01\x01\x01\x05\x00\x03\x81\x8d\x00\x30\x81\x89\x02\x81\x81\x00\xaf\x15\xe8\x75\x00\x06\xe4\xc5" + \
b"\xd5\xda\x2c\xc5\x63\x6a\xef\xa3\x06\x81\x99\x19\x8f\x2a\xb5\xd3\x2e\x50\x76\x94\xe1\xc1\x5a\xa2\x84\xd7\xad\x91\x2b\xbf\x42\xe6" + \
b"\xb1\x08\x2f\x15\x53\x80\xc1\xa7\xa9\xaf\x22\xd7\x81\x95\xc4\x1e\xea\x4b\x60\x60\xf8\x00\xe5\x9e\x9d\x8a\xe1\x4f\x37\x41\xe7\x4d" + \
b"\xc6\x2e\x9c\xbb\x5c\x03\x5e\x60\x04\x9b\x8b\x3f\x8c\x27\xfc\x1c\x9c\x82\xec\xec\xa1\x30\x0e\x42\x9c\xd3\xaa\x91\x8a\xf4\xcf\x0c" + \
b"\x60\x9b\xb3\xb4\x77\x14\x24\xe3\x22\xcb\xb8\x79\xa6\x3c\x20\xe3\x8d\x09\x28\x34\xda\x78\xfe\x8d\x02\x03\x01\x00\x01\x30\x0d\x06" + \
b"\x09\x2a\x86\x48\x86\xf7\x0d\x01\x01\x0b\x05\x00\x03\x81\x81\x00\x42\x54\xd2\x1b\xcb\xdf\x28\x2d\xa0\x5c\x7f\x8e\x82\x90\xae\x79" + \
b"\x3c\x37\x9a\x57\x10\xcb\x43\x09\xb1\x09\xf1\x3e\xa9\x58\x0c\x4c\x16\x9a\xf8\xd2\xa8\x35\x70\xb4\x0c\x9b\xb0\xd3\xef\xce\x54\xbf" + \
b"\x0e\xf0\x19\xf5\x7e\x66\x07\xcb\xcb\x48\x6d\x92\x75\xca\x5c\x54\xa6\x8f\xa8\x47\x8a\x82\x6d\x38\xec\x07\xda\x52\x91\x28\x9b\x5d" + \
b"\x0d\x07\xda\xc3\x22\xd2\x13\x0e\x70\x1e\xc6\xd4\xda\x63\xb3\x3d\xc1\xd3\xfa\xa0\xb5\x1b\x5e\x08\xc5\xfa\x53\x03\x9d\xab\x87\xdc" + \
b"\x63\x19\xf0\x7a\x9e\x93\xfd\xbc\xdc\xbe\x44\x5c\xa5\x82\x73\x0e"]
|
voetsjoeba/pyjks
|
tests/expected/unicode_passwords.py
|
Python
|
mit
| 5,713
|
####
#Importing modules can serve 3 purposes if done right
#1-Allows you to add features to bare bones python
#2-Only importing what you need lets your imports serve as documentation for
# someone else reading your code later.
#3-By forcing you to import things, python helps you keep your programs small
####
#import the 'hook' which lets python read command line arguments
from sys import argv
#argv is a list. This line puts all the elements into variables
script, first, second, third, fourth = argv
#You understand printing by now
print "Your script is called ", script, ", which was the zeroth variable."
print "Your first variable was ", first
print "Your second variable was ", second
print "Your third variable was ", third
print "Your first variable was ", first
|
isaac-friedman/lphw
|
ex13.py
|
Python
|
mit
| 777
|
from __future__ import print_function
from helpers import flatten, merge, add, search
import sys
import os
import yaml
import boto3
import termcolor
def str_presenter(dumper, data):
if len(data.splitlines()) == 1 and data[-1] == '\n':
return dumper.represent_scalar(
'tag:yaml.org,2002:str', data, style='>')
if len(data.splitlines()) > 1:
return dumper.represent_scalar(
'tag:yaml.org,2002:str', data, style='|')
return dumper.represent_scalar(
'tag:yaml.org,2002:str', data.strip())
yaml.SafeDumper.add_representer(str, str_presenter)
class SecureTag(yaml.YAMLObject):
yaml_tag = u'!secure'
def __init__(self, secure):
self.secure = secure
def __repr__(self):
return self.secure
def __str__(self):
return termcolor.colored(self.secure, 'magenta')
def __eq__(self, other):
return self.secure == other.secure if isinstance(other, SecureTag) else False
def __hash__(self):
return hash(self.secure)
def __ne__(self, other):
return (not self.__eq__(other))
@classmethod
def from_yaml(cls, loader, node):
return SecureTag(node.value)
@classmethod
def to_yaml(cls, dumper, data):
if len(data.secure.splitlines()) > 1:
return dumper.represent_scalar(cls.yaml_tag, data.secure, style='|')
return dumper.represent_scalar(cls.yaml_tag, data.secure)
yaml.SafeLoader.add_constructor('!secure', SecureTag.from_yaml)
yaml.SafeDumper.add_multi_representer(SecureTag, SecureTag.to_yaml)
class LocalState(object):
def __init__(self, filename):
self.filename = filename
def get(self, paths, flat=True):
try:
output = {}
with open(self.filename,'rb') as f:
l = yaml.safe_load(f.read())
for path in paths:
if path.strip('/'):
output = merge(output, search(l, path))
else:
return flatten(l) if flat else l
return flatten(output) if flat else output
except IOError as e:
print(e, file=sys.stderr)
if e.errno == 2:
print("Please, run init before doing plan!")
sys.exit(1)
except TypeError as e:
if 'object is not iterable' in e.args[0]:
return dict()
raise
def save(self, state):
try:
with open(self.filename, 'wb') as f:
f.write(yaml.safe_dump(
state,
default_flow_style=False))
except Exception as e:
print(e, file=sys.stderr)
sys.exit(1)
class RemoteState(object):
def __init__(self, profile):
if profile:
boto3.setup_default_session(profile_name=profile)
self.ssm = boto3.client('ssm')
def get(self, paths=['/'], flat=True):
paginator = self.ssm.get_paginator('get_parameters_by_path')
output = {}
for path in paths:
for page in paginator.paginate(
Path=path,
Recursive=True,
WithDecryption=True):
for param in page['Parameters']:
add(
obj=output,
path=param['Name'],
value=self._read_param(param['Value'], param['Type']))
return flatten(output) if flat else output
def _read_param(self, value, ssm_type='String'):
return SecureTag(value) if ssm_type == 'SecureString' else str(value)
def apply(self, diff):
for k in diff.added():
ssm_type = 'String'
if isinstance(diff.target[k], list):
ssm_type = 'StringList'
if isinstance(diff.target[k], SecureTag):
ssm_type = 'SecureString'
self.ssm.put_parameter(
Name=k,
Value=repr(diff.target[k]) if type(diff.target[k]) == SecureTag else str(diff.target[k]),
Type=ssm_type)
for k in diff.removed():
self.ssm.delete_parameter(Name=k)
for k in diff.changed():
ssm_type = 'SecureString' if isinstance(diff.target[k], SecureTag) else 'String'
self.ssm.put_parameter(
Name=k,
Value=repr(diff.target[k]) if type(diff.target[k]) == SecureTag else str(diff.target[k]),
Overwrite=True,
Type=ssm_type)
|
AndrewChubatiuk/ssm-diff
|
states/states.py
|
Python
|
mit
| 4,515
|
import random
from ..simulator import Simulator
class RandomMover(Simulator):
ACTIONS = ('up', 'down', 'left', 'right')
def start(self):
self.init_game()
while True:
self._check_pygame_events()
for drone in self.drones:
drone.do_move(random.choice(self.ACTIONS))
self.print_map()
self._draw()
|
dev-coop/plithos
|
src/plithos/simulations/random_mover.py
|
Python
|
mit
| 388
|
from __future__ import unicode_literals
from __future__ import absolute_import, division, print_function
"""
Tests for RecordView module and view
Note: this module tests for rendering specifically for RecordView values, using
view description sitedata files, and as such duplicates some tests covered by
module test_entitygenericedit.
"""
__author__ = "Graham Klyne (GK@ACM.ORG)"
__copyright__ = "Copyright 2014, G. Klyne"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
import os
import json
import unittest
import logging
log = logging.getLogger(__name__)
from django.conf import settings
from django.db import models
from django.http import QueryDict
from django.contrib.auth.models import User
from django.test import TestCase # cf. https://docs.djangoproject.com/en/dev/topics/testing/tools/#assertions
from django.test.client import Client
from annalist.identifiers import RDF, RDFS, ANNAL
from annalist import layout
from annalist import message
from annalist.models.site import Site
from annalist.models.sitedata import SiteData
from annalist.models.collection import Collection
from annalist.models.recordview import RecordView
from annalist.models.recordfield import RecordField
from annalist.views.uri_builder import uri_with_params
from annalist.views.recordviewdelete import RecordViewDeleteConfirmedView
from annalist.views.form_utils.fieldchoice import FieldChoice
from .AnnalistTestCase import AnnalistTestCase
from .tests import (
TestHost, TestHostUri, TestBasePath, TestBaseUri, TestBaseDir
)
from .init_tests import (
init_annalist_test_site,
init_annalist_test_coll,
install_annalist_named_coll,
create_test_coll_inheriting,
init_annalist_named_test_coll,
resetSitedata
)
from .entity_testutils import (
make_message, make_quoted_message,
site_dir, collection_dir,
site_view_url, collection_edit_url,
collection_entity_view_url,
collection_create_values,
render_select_options,
create_test_user,
context_field_map,
context_view_field,
context_bind_fields,
check_context_field, check_context_field_value,
)
from .entity_testviewdata import (
recordview_dir,
recordview_coll_url, recordview_url, recordview_edit_url,
recordview_value_keys, recordview_load_keys,
recordview_create_values, recordview_values, recordview_read_values,
view_view_context_data,
default_view_fields_list, view_view_fields_list,
view_view_form_data,
recordview_delete_confirm_form_data
)
from .entity_testentitydata import (
entity_url, entitydata_edit_url, entitydata_list_type_url,
default_fields, default_label, default_comment, error_label,
layout_classes
)
from .entity_testsitedata import (
make_field_choices, no_selection,
get_site_default_entity_fields_sorted,
get_site_bibentry_fields_sorted
)
# -----------------------------------------------------------------------------
#
# RecordView tests
#
# -----------------------------------------------------------------------------
class RecordViewTest(AnnalistTestCase):
def setUp(self):
init_annalist_test_site()
self.testsite = Site(TestBaseUri, TestBaseDir)
self.sitedata = SiteData(self.testsite)
self.testcoll = Collection(self.testsite, "testcoll")
self.layout = (
{ 'enum_field_placement_id': layout.ENUM_FIELD_PLACEMENT_ID
, 'enum_list_type_id': layout.ENUM_LIST_TYPE_ID
, 'enum_render_type_id': layout.ENUM_RENDER_TYPE_ID
, 'enum_value_type_id': layout.ENUM_VALUE_TYPE_ID
, 'enum_value_mode_id': layout.ENUM_VALUE_MODE_ID
, 'field_typeid': layout.FIELD_TYPEID
, 'group_typeid': layout.GROUP_TYPEID
, 'list_typeid': layout.LIST_TYPEID
, 'type_typeid': layout.TYPE_TYPEID
, 'user_typeid': layout.USER_TYPEID
, 'view_typeid': layout.VIEW_TYPEID
, 'vocab_typeid': layout.VOCAB_TYPEID
, 'field_dir': layout.FIELD_DIR
, 'group_dir': layout.GROUP_DIR
, 'list_dir': layout.LIST_DIR
, 'type_dir': layout.TYPE_DIR
, 'user_dir': layout.USER_DIR
, 'view_dir': layout.VIEW_DIR
, 'vocab_dir': layout.VOCAB_DIR
})
return
def tearDown(self):
return
@classmethod
def setUpClass(cls):
super(RecordViewTest, cls).setUpClass()
return
@classmethod
def tearDownClass(cls):
super(RecordViewTest, cls).tearDownClass()
resetSitedata(scope="collections")
return
def test_RecordViewTest(self):
self.assertEqual(Collection.__name__, "Collection", "Check Collection class name")
return
def test_recordview_init(self):
t = RecordView(self.testcoll, "testview")
u = recordview_coll_url(self.testsite, coll_id="testcoll", view_id="testview")
self.assertEqual(t._entitytype, ANNAL.CURIE.View)
self.assertEqual(t._entityfile, layout.VIEW_META_FILE)
self.assertEqual(t._entityref, layout.COLL_BASE_VIEW_REF%{'id': "testview"})
self.assertEqual(t._entityid, "testview")
self.assertEqual(t._entityurl, u)
self.assertEqual(t._entitydir, recordview_dir(view_id="testview"))
self.assertEqual(t._values, None)
return
def test_recordview1_data(self):
t = RecordView(self.testcoll, "view1")
self.assertEqual(t.get_id(), "view1")
self.assertEqual(t.get_type_id(), layout.VIEW_TYPEID)
self.assertIn(
"/c/testcoll/d/%(view_dir)s/view1/"%self.layout,
t.get_url()
)
self.assertEqual(
TestBaseUri + "/c/testcoll/d/%(view_typeid)s/view1/"%self.layout,
t.get_view_url()
)
t.set_values(recordview_create_values(view_id="view1"))
td = t.get_values()
self.assertEqual(set(td.keys()), set(recordview_value_keys()))
v = recordview_values(view_id="view1")
self.assertDictionaryMatch(td, v)
return
def test_recordview2_data(self):
t = RecordView(self.testcoll, "view2")
self.assertEqual(t.get_id(), "view2")
self.assertEqual(t.get_type_id(), layout.VIEW_TYPEID)
self.assertIn(
"/c/testcoll/d/%(view_dir)s/view2/"%self.layout,
t.get_url()
)
self.assertEqual(
TestBaseUri + "/c/testcoll/d/%(view_typeid)s/view2/"%self.layout,
t.get_view_url()
)
t.set_values(recordview_create_values(view_id="view2"))
td = t.get_values()
self.assertEqual(set(td.keys()), set(recordview_value_keys()))
v = recordview_values(view_id="view2")
self.assertDictionaryMatch(td, v)
return
def test_recordview_create_load(self):
t = RecordView.create(self.testcoll, "view1", recordview_create_values(view_id="view1"))
td = RecordView.load(self.testcoll, "view1").get_values()
v = recordview_read_values(view_id="view1")
self.assertKeysMatch(td, v)
self.assertDictionaryMatch(td, v)
return
def test_recordview_default_data(self):
t = RecordView.load(self.testcoll, "Default_view", altscope="all")
self.assertEqual(t.get_id(), "Default_view")
self.assertIn(
"/c/_annalist_site/d/%(view_dir)s/Default_view"%self.layout,
t.get_url()
)
self.assertIn(
"/c/testcoll/d/%(view_typeid)s/Default_view"%self.layout,
t.get_view_url()
)
self.assertEqual(t.get_type_id(), layout.VIEW_TYPEID)
td = t.get_values()
self.assertEqual(
set(td.keys()),
set(recordview_load_keys(view_uri=True, view_entity_type=True))
)
v = recordview_read_values(view_id="Default_view")
v.update(
{ 'rdfs:label': 'Default record view'
, 'annal:uri': 'annal:display/Default_view'
})
v.pop('rdfs:comment', None)
v.pop('annal:view_entity_type', None)
self.assertDictionaryMatch(td, v) # actual, expect
return
# -----------------------------------------------------------------------------
#
# RecordView edit view tests
#
# -----------------------------------------------------------------------------
class RecordViewEditViewTest(AnnalistTestCase):
"""
Tests for record view edit views
"""
def setUp(self):
init_annalist_test_site()
self.testsite = Site(TestBaseUri, TestBaseDir)
self.testcoll = Collection.create(self.testsite, "testcoll", collection_create_values("testcoll"))
self.no_options = [ FieldChoice('', label="(no options)") ]
def special_field(fid):
return (
fid == "Entity_see_also" or
fid.startswith("Field_") or
fid.startswith("List_") or
fid.startswith("Type_") or
fid.startswith("View_") or
fid.startswith("User_") or
fid.startswith("Coll_") or
fid.startswith("Vocab_") or
fid.startswith("Enum_") or
fid.startswith("Group_") or
False
)
self.field_options = sorted(
[ fid for fid in self.testcoll.child_entity_ids(RecordField, altscope="all")
if fid != layout.INITIAL_VALUES_ID
])
self.field_options_no_bibentry = sorted(
[ fid for fid in self.testcoll.child_entity_ids(RecordField, altscope="all")
if fid != layout.INITIAL_VALUES_ID and not fid.startswith("Bib_")
])
self.field_options_bib_no_special = sorted(
[ fid for fid in self.testcoll.child_entity_ids(RecordField, altscope="all")
if fid != layout.INITIAL_VALUES_ID and not special_field(fid)
])
self.field_options_no_special = sorted(
[ fid for fid in self.testcoll.child_entity_ids(RecordField, altscope="all")
if fid != layout.INITIAL_VALUES_ID and
not ((fid.startswith("Bib_") or special_field(fid)))
])
# log.info(self.field_options_no_bibentry)
# For checking Location: header values...
self.continuation_path = entitydata_list_type_url(
coll_id="testcoll", type_id=layout.VIEW_TYPEID
)
self.continuation_url = self.continuation_path
create_test_user(self.testcoll, "testuser", "testpassword")
self.client = Client(HTTP_HOST=TestHost)
loggedin = self.client.login(username="testuser", password="testpassword")
self.assertTrue(loggedin)
return
def tearDown(self):
resetSitedata(scope="collections")
return
@classmethod
def setUpClass(cls):
super(RecordViewEditViewTest, cls).setUpClass()
return
@classmethod
def tearDownClass(cls):
super(RecordViewEditViewTest, cls).tearDownClass()
resetSitedata()
return
# -----------------------------------------------------------------------------
# Helpers
# -----------------------------------------------------------------------------
def _create_record_view(
self, view_id,
view_entity_type="annal:View",
extra_field=None, extra_field_uri=None
):
"Helper function creates record view entry with supplied view_id"
t = RecordView.create(
self.testcoll, view_id,
recordview_create_values(
view_id=view_id,
view_entity_type=view_entity_type,
extra_field=extra_field, extra_field_uri=extra_field_uri
)
)
return t
def _check_recordview_values(
self, view_id, view_uri=None,
view_entity_type="annal:View",
update="RecordView",
num_fields=4, field3_placement="small:0,12",
extra_field=None, extra_field_uri=None,
update_dict=None,
):
"Helper function checks content of record view entry with supplied view_id"
self.assertTrue(RecordView.exists(self.testcoll, view_id))
t = RecordView.load(self.testcoll, view_id)
self.assertEqual(t.get_id(), view_id)
self.assertEqual(t.get_view_url(), TestHostUri + recordview_url("testcoll", view_id))
v = recordview_values(
view_id=view_id, view_uri=view_uri, update=update,
view_entity_type=view_entity_type,
num_fields=num_fields, field3_placement=field3_placement,
extra_field=extra_field, extra_field_uri=extra_field_uri
)
if update_dict:
v.update(update_dict)
for k in update_dict:
if update_dict[k] is None:
v.pop(k, None)
# log.info("*** actual: %r"%(t.get_values(),))
# log.info("*** expect: %r"%(v,))
self.assertDictionaryMatch(t.get_values(), v)
return t
# Check context values common to all view fields
#@@TODO: remove when references below replaced
# see: _check_view_view_context_fields
def _check_common_view_context_fields(self, response,
action="",
view_id="(?view_id)", orig_view_id=None,
view_label="(?view_label)",
view_entity_type="(?view_entity_type)",
view_edit_view=True
):
self.assertEqual(response.context['entity_id'], view_id)
self.assertEqual(response.context['orig_id'], orig_view_id)
self.assertEqual(response.context['type_id'], '_view')
self.assertEqual(response.context['orig_type'], '_view')
self.assertEqual(response.context['coll_id'], 'testcoll')
self.assertEqual(response.context['action'], action)
self.assertEqual(response.context['view_id'], 'View_view')
# Fields
#
# NOTE: context['fields'][i]['field_id'] comes from FieldDescription instance via
# bound_field, so type prefix is stripped. This does not apply to the field
# ids actually coming from the view form.
#
self.assertEqual(len(response.context['fields']), 6)
f0 = context_view_field(response.context, 0, 0)
f1 = context_view_field(response.context, 1, 0)
f2 = context_view_field(response.context, 2, 0)
f3 = context_view_field(response.context, 3, 0)
f4 = context_view_field(response.context, 4, 0)
# 1st field - Id
check_context_field(self, f0,
field_id= "View_id",
field_name= "entity_id",
field_label= "View Id",
field_placeholder= "(view id)",
field_property_uri= "annal:id",
field_render_type= "EntityId",
field_value_mode= "Value_direct",
field_value_type= "annal:EntityRef",
field_placement= "small-12 medium-6 columns",
field_value= view_id,
options= self.no_options
)
# 2nd field - Label
check_context_field(self, f1,
field_id= "View_label",
field_name= "View_label",
field_label= "Label",
field_placeholder= "(view label)",
field_property_uri= "rdfs:label",
field_render_type= "Text",
field_value_mode= "Value_direct",
field_value_type= "annal:Text",
field_placement= "small-12 columns",
field_value= view_label,
options= self.no_options
)
# 3rd field - comment
check_context_field(self, f2,
field_id= "View_comment",
field_name= "View_comment",
field_label= "Help",
field_property_uri= "rdfs:comment",
field_render_type= "Markdown",
field_value_mode= "Value_direct",
field_value_type= "annal:Richtext",
field_placement= "small-12 columns",
options= self.no_options
)
# 4th field - type of entity for view
check_context_field(self, f3,
field_id= "View_entity_type",
field_name= "View_entity_type",
field_property_uri= "annal:view_entity_type",
field_render_type= "Identifier",
field_value_mode= "Value_direct",
field_value_type= "annal:Identifier",
field_value= view_entity_type,
options= self.no_options
)
# 5th field - editable view option
check_context_field(self, f4,
field_id= "View_edit_view",
field_name= "View_edit_view",
field_property_uri= "annal:open_view",
field_render_type= "CheckBox",
field_value_mode= "Value_direct",
field_value_type= "annal:Boolean",
field_value= view_edit_view,
options= self.no_options
)
return
# Check context values for view using default record view
def _check_default_view_context_fields(self, response,
action="",
view_id="(?view_id)", orig_view_id=None,
view_uri=None,
view_label="(?view_label)",
view_descr=None,
view_entity_type="(?view_entity_type)",
view_edit_view=True,
view_fields=None, field_choices=None,
add_field=None, remove_field=None,
move_up=None, move_down=None,
update="RecordView",
continuation_url=None
):
expect_context = view_view_context_data(
coll_id="testcoll", view_id=view_id, orig_id=orig_view_id,
action=action,
view_uri=view_uri,
view_label=view_label,
view_descr=view_descr,
view_entity_type=view_entity_type,
view_edit_view=view_edit_view,
view_fields=view_fields, field_choices=field_choices,
add_field=add_field, remove_field=remove_field,
move_up=move_up, move_down=move_down,
update=update,
continuation_url=continuation_url
)
actual_context = context_bind_fields(response.context)
self.assertEqual(len(response.context['fields']), 6)
self.assertDictionaryMatch(actual_context, expect_context)
return
# The View_view test case checks descriptions of repeat-field-groups that are not
# covererd by the Default_view case.
def _check_view_view_context_fields(self, response,
action="",
num_fields=6):
# Common fields
self._check_common_view_context_fields(response,
action=action,
view_id="View_view", orig_view_id="View_view",
view_label="View definition",
view_entity_type="annal:View",
view_edit_view=False
)
# 6th field - field list
f5 = context_view_field(response.context, 5, 0)
expect_field_data = (
[
{ 'annal:field_placement': 'small:0,12;medium:0,6'
, 'annal:field_id': layout.FIELD_TYPEID+'/View_id'
}
, { 'annal:field_placement': 'small:0,12'
, 'annal:field_id': layout.FIELD_TYPEID+'/View_label'
}
, { 'annal:field_placement': 'small:0,12'
, 'annal:field_id': layout.FIELD_TYPEID+'/View_comment'
}
, { 'annal:field_placement': 'small:0,12'
, 'annal:field_id': layout.FIELD_TYPEID+'/View_entity_type'
}
, { 'annal:field_placement': 'small:0,12;medium:0,6'
, 'annal:field_id': layout.FIELD_TYPEID+'/View_edit_view'
}
, { 'annal:field_placement': 'small:0,12'
, 'annal:field_id': layout.FIELD_TYPEID+'/View_fields'
}
])
if num_fields == 7:
# New blank field, if selected
expect_field_data.append(
{ 'annal:property_uri': None
, 'annal:field_placement': None
, 'annal:field_id': None
})
# log.info(repr(r.context['fields'][5]['field_value']))
check_context_field(self, f5,
field_id= "View_fields",
field_name= "View_fields",
field_label= "Fields",
field_property_uri= "annal:view_fields",
field_render_type= "Group_Seq_Row",
field_value_mode= "Value_direct",
field_value_type= "annal:View_field",
field_value= expect_field_data,
options= self.no_options
)
return
# -----------------------------------------------------------------------------
# Form rendering tests
# -----------------------------------------------------------------------------
def test_get_form_rendering(self):
u = entitydata_edit_url("new", "testcoll", layout.VIEW_TYPEID, view_id="View_view")
r = self.client.get(u+"?continuation_url=/xyzzy/")
self.assertEqual(r.status_code, 200)
self.assertEqual(r.reason_phrase, "OK")
field_vals = default_fields(
coll_id="testcoll", type_id=layout.VIEW_TYPEID, entity_id="00000001",
tooltip1=context_view_field(r.context, 0, 0)['field_tooltip'],
tooltip2=context_view_field(r.context, 1, 0)['field_tooltip'],
tooltip3=context_view_field(r.context, 2, 0)['field_tooltip'],
tooltip4=context_view_field(r.context, 3, 0)['field_tooltip'],
tooltip5=context_view_field(r.context, 4, 0)['field_tooltip'],
tooltip6f1=context_view_field(r.context, 5, 0).
_field_description['group_field_descs'][0]['field_tooltip_test']
)
formrow1 = """
<div class="small-12 medium-6 columns" title="%(tooltip1)s">
<div class="row view-value-row">
<div class="%(label_classes)s">
<span>View Id</span>
</div>
<div class="%(input_classes)s">
<input type="text" size="64" name="entity_id"
placeholder="(view id)" value="%(entity_id)s"/>
</div>
</div>
</div>
"""%field_vals(width=6)
formrow2 = """
<div class="small-12 columns" title="%(tooltip2)s">
<div class="row view-value-row">
<div class="%(label_classes)s">
<span>Label</span>
</div>
<div class="%(input_classes)s">
<input type="text" size="64" name="View_label"
placeholder="(view label)"
value="%(default_label_esc)s"/>
</div>
</div>
</div>
"""%field_vals(width=12)
formrow3 = """
<div class="small-12 columns" title="%(tooltip3)s">
<div class="row view-value-row">
<div class="%(label_classes)s">
<span>Help</span>
</div>
<div class="%(input_classes)s">
<textarea cols="64" rows="6" name="View_comment"
class="small-rows-4 medium-rows-8"
placeholder="(description of record view)">
%(default_comment_esc)s
</textarea>
</div>
</div>
</div>
"""%field_vals(width=12)
formrow4 = """
<div class="small-12 columns" title="%(tooltip4)s">
<div class="row view-value-row">
<div class="%(label_classes)s">
<span>View entity type</span>
</div>
<div class="%(input_classes)s">
<input type="text" size="64" name="View_entity_type"
placeholder="(Entity type URI/CURIE displayed by view)"
value=""/>
</div>
</div>
</div>
"""%field_vals(width=12)
formrow5 = """
<div class="small-12 medium-6 columns" title="%(tooltip5)s">
<div class="row view-value-row">
<div class="%(label_classes)s">
<span>Editable view?</span>
</div>
<div class="%(input_classes)s">
<input type="checkbox" name="View_edit_view" value="Yes" checked="checked" />
<span class="value-placeholder">(edit view from edit entity form)</span>
</div>
</div>
</div>
"""%field_vals(width=6)
formrow6 = """
<div class="small-1 columns checkbox-in-edit-padding">
<input type="checkbox" class="select-box right"
name="View_fields__select_fields"
value="0" />
</div>
"""
formrow6f1 = ("""
<div class="small-12 medium-4 columns" title="%(tooltip6f1)s">
<div class="row show-for-small-only">
<div class="view-label small-12 columns">
<span>Field ref</span>
</div>
</div>
<div class="row view-value-col">
<div class="view-value small-12 columns">
"""+
render_select_options(
"View_fields__0__View_field_sel", "Field ref",
no_selection("(field ref)") + get_site_default_entity_fields_sorted(),
layout.FIELD_TYPEID+"/Entity_id",
placeholder="(field reference)"
)+
"""
</div>
</div>
</div>
""")%field_vals(width=4)
# log.info("*** View content: "+r.content)
self.assertContains(r, formrow1, html=True)
self.assertContains(r, formrow2, html=True)
self.assertContains(r, formrow3, html=True)
self.assertContains(r, formrow4, html=True)
self.assertContains(r, formrow5, html=True)
self.assertContains(r, formrow6, html=True)
self.assertContains(r, formrow6f1, html=True)
return
def test_get_new(self):
u = entitydata_edit_url("new", "testcoll", layout.VIEW_TYPEID, view_id="View_view")
r = self.client.get(u+"?continuation_url=/xyzzy/")
self.assertEqual(r.status_code, 200)
self.assertEqual(r.reason_phrase, "OK")
# Test context
# view_url = collection_entity_view_url(
# coll_id="testcoll", type_id=layout.VIEW_TYPEID, entity_id="00000001"
# )
self.assertEqual(r.context['coll_id'], "testcoll")
self.assertEqual(r.context['type_id'], layout.VIEW_TYPEID)
self.assertEqual(r.context['entity_id'], "00000001")
self.assertEqual(r.context['orig_id'], None)
self.assertEqual(r.context['entity_uri'], None)
self.assertEqual(r.context['action'], "new")
self.assertEqual(r.context['edit_view_button'], False)
self.assertEqual(r.context['continuation_url'], "/xyzzy/")
# Fields initially created
self._check_default_view_context_fields(r,
action="new",
view_id="00000001", orig_view_id=None,
view_label="", # default_label("testcoll", layout.VIEW_TYPEID, "00000001"),
view_entity_type="",
# view_url=recordview_url("testcoll", "00000001"),
field_choices=self.field_options_no_special,
continuation_url="/xyzzy/"
)
return
def test_get_copy(self):
u = entitydata_edit_url(
"copy", "testcoll", layout.VIEW_TYPEID, entity_id="Default_view", view_id="View_view"
)
r = self.client.get(u)
self.assertEqual(r.status_code, 200)
self.assertEqual(r.reason_phrase, "OK")
# Test context (values read from test data fixture)
# view_url = collection_entity_view_url(
# coll_id="testcoll", type_id=layout.VIEW_TYPEID, entity_id="Default_view"
# )
self.assertEqual(r.context['coll_id'], "testcoll")
self.assertEqual(r.context['type_id'], layout.VIEW_TYPEID)
self.assertEqual(r.context['entity_id'], "Default_view_01")
self.assertEqual(r.context['orig_id'], "Default_view")
self.assertEqual(r.context['entity_uri'], None)
self.assertEqual(r.context['action'], "copy")
self.assertEqual(r.context['edit_view_button'], False)
self.assertEqual(r.context['continuation_url'], "")
# Fields
self._check_default_view_context_fields(r,
action="copy",
view_id="Default_view_01", orig_view_id="Default_view",
view_label="Default record view",
# view_url=view_url,
view_uri=None,
view_entity_type="",
field_choices=self.field_options_no_special,
continuation_url=""
)
return
def test_get_copy_not_exists(self):
u = entitydata_edit_url(
"copy", "testcoll", layout.VIEW_TYPEID,
entity_id="noview", view_id="View_view"
)
r = self.client.get(u)
# log.info(r.content)
self.check_entity_not_found_response(r,
err_msg=make_message(
message.ENTITY_DOES_NOT_EXIST,
type_id=layout.VIEW_TYPEID,
id="noview",
label=error_label("testcoll", layout.VIEW_TYPEID, "noview")
)
)
return
def test_get_edit(self):
u = entitydata_edit_url(
"edit", "testcoll", layout.VIEW_TYPEID, entity_id="Default_view", view_id="View_view"
)
r = self.client.get(u)
self.assertEqual(r.status_code, 200)
self.assertEqual(r.reason_phrase, "OK")
# Test context (values read from test data fixture)
# view_url = collection_entity_view_url(
# coll_id="testcoll", type_id=layout.VIEW_TYPEID, entity_id="Default_view"
# )
self.assertEqual(r.context['coll_id'], "testcoll")
self.assertEqual(r.context['type_id'], layout.VIEW_TYPEID)
self.assertEqual(r.context['entity_id'], "Default_view")
self.assertEqual(r.context['orig_id'], "Default_view")
self.assertEqual(r.context['entity_uri'], "annal:display/Default_view")
self.assertEqual(r.context['action'], "edit")
self.assertEqual(r.context['edit_view_button'], False)
self.assertEqual(r.context['continuation_url'], "")
# Fields
self._check_default_view_context_fields(r,
action="edit",
view_id="Default_view", orig_view_id="Default_view",
view_label="Default record view",
# view_url=view_url,
view_uri="annal:display/Default_view",
view_entity_type="",
field_choices=self.field_options_no_special,
continuation_url=""
)
return
def test_get_edit_not_exists(self):
u = entitydata_edit_url(
"edit", "testcoll", layout.VIEW_TYPEID,
entity_id="noview", view_id="View_view"
)
r = self.client.get(u)
# log.info(r.content)
self.check_entity_not_found_response(r,
err_msg=make_message(
message.ENTITY_DOES_NOT_EXIST,
type_id=layout.VIEW_TYPEID,
id="noview",
label=error_label("testcoll", layout.VIEW_TYPEID, "noview")
)
)
return
# Test rendering of view with repeated field structure - in this case, View_view
def test_get_recordview_edit(self):
u = entitydata_edit_url(
"edit", "testcoll", layout.VIEW_TYPEID, entity_id="View_view",
view_id="View_view"
)
r = self.client.get(u)
self.assertEqual(r.status_code, 200)
self.assertEqual(r.reason_phrase, "OK")
# Test context (values read from test data fixture)
# view_url = collection_entity_view_url(
# coll_id="testcoll", type_id=layout.VIEW_TYPEID, entity_id="View_view"
# )
self.assertEqual(r.context['coll_id'], "testcoll")
self.assertEqual(r.context['type_id'], layout.VIEW_TYPEID)
self.assertEqual(r.context['entity_id'], "View_view")
self.assertEqual(r.context['orig_id'], "View_view")
self.assertEqual(r.context['entity_uri'], "annal:display/View_view")
self.assertEqual(r.context['action'], "edit")
self.assertEqual(r.context['continuation_url'], "")
# Fields
self._check_view_view_context_fields(r, action="edit")
return
def test_get_recordview_edit_add_field(self):
u = entitydata_edit_url(
"edit", "testcoll", layout.VIEW_TYPEID, entity_id="View_view",
view_id="View_view"
)
u = uri_with_params(u, {'add_field': 'View_fields'})
r = self.client.get(u)
self.assertEqual(r.status_code, 200)
self.assertEqual(r.reason_phrase, "OK")
# Test context (values read from test data fixture)
# view_url = collection_entity_view_url(
# coll_id="testcoll", type_id=layout.VIEW_TYPEID, entity_id="View_view"
# )
self.assertEqual(r.context['coll_id'], "testcoll")
self.assertEqual(r.context['type_id'], layout.VIEW_TYPEID)
self.assertEqual(r.context['entity_id'], "View_view")
self.assertEqual(r.context['orig_id'], "View_view")
self.assertEqual(r.context['entity_uri'], "annal:display/View_view")
self.assertEqual(r.context['action'], "edit")
self.assertEqual(r.context['continuation_url'], "")
# View context
self._check_view_view_context_fields(r, action="edit", num_fields=7)
return
# -----------------------------------------------------------------------------
# Form response tests
# -----------------------------------------------------------------------------
# -------- new view --------
def test_post_new_view(self):
self.assertFalse(RecordView.exists(self.testcoll, "newview"))
f = view_view_form_data(view_id="newview", action="new", update="NewView")
u = entitydata_edit_url("new", "testcoll", layout.VIEW_TYPEID, view_id="View_view")
r = self.client.post(u, f)
# print r.content
self.assertEqual(r.status_code, 302)
self.assertEqual(r.reason_phrase, "Found")
self.assertEqual(r.content, b"")
self.assertEqual(r['location'], self.continuation_url)
# Check that new record type exists
self._check_recordview_values("newview", update="NewView", num_fields=0)
return
def test_post_new_view_cancel(self):
self.assertFalse(RecordView.exists(self.testcoll, "newview"))
f = view_view_form_data(
view_id="newview",
action="new", cancel="Cancel", update="Updated RecordView"
)
u = entitydata_edit_url("new", "testcoll", layout.VIEW_TYPEID, view_id="View_view")
r = self.client.post(u, f)
self.assertEqual(r.status_code, 302)
self.assertEqual(r.reason_phrase, "Found")
self.assertEqual(r.content, b"")
self.assertEqual(r['location'], self.continuation_url)
# Check that new record type still does not exist
self.assertFalse(RecordView.exists(self.testcoll, "newview"))
return
def test_post_new_view_missing_id(self):
f = view_view_form_data(
view_id="",
action="new", update="RecordView"
)
u = entitydata_edit_url("new", "testcoll", layout.VIEW_TYPEID, view_id="View_view")
# log.info("u %s, f %r"%(u,f))
r = self.client.post(u, f)
# print r.content
self.assertEqual(r.status_code, 200)
self.assertEqual(r.reason_phrase, "OK")
self.assertContains(r, "<h3>%s</h3>"%(message.RECORD_VIEW_ID,))
# Test context
self._check_default_view_context_fields(r,
action="new",
view_id="", orig_view_id="orig_view_id",
view_label=None,
view_entity_type="annal:View",
)
return
def test_post_new_view_invalid_id(self):
f = view_view_form_data(
view_id="!badview", orig_id="orig_view_id",
action="new", update="RecordView"
)
u = entitydata_edit_url("new", "testcoll", layout.VIEW_TYPEID, view_id="View_view")
# log.info("u %s, f %r"%(u,f))
r = self.client.post(u, f)
self.assertEqual(r.status_code, 200)
self.assertEqual(r.reason_phrase, "OK")
self.assertContains(r, "<h3>%s</h3>"%(message.RECORD_VIEW_ID,))
# Check context
self._check_default_view_context_fields(r,
action="new",
view_id="!badview", orig_view_id="orig_view_id",
view_label=None,
view_entity_type="annal:View",
)
return
# -------- copy view --------
def test_post_copy_view(self):
self.assertFalse(RecordView.exists(self.testcoll, "copyview"))
f = view_view_form_data(
view_id="copyview",
orig_id="Default_view", orig_coll="_annalist_site", action="copy",
update="RecordView"
)
u = entitydata_edit_url(
"copy", "testcoll", layout.VIEW_TYPEID, entity_id="Default_view", view_id="View_view"
)
r = self.client.post(u, f)
self.assertEqual(r.status_code, 302)
self.assertEqual(r.reason_phrase, "Found")
self.assertEqual(r.content, b"")
self.assertEqual(r['location'], self.continuation_url)
# Check that new record type exists
self._check_recordview_values("copyview", update="RecordView")
return
def test_post_copy_view_cancel(self):
self.assertFalse(RecordView.exists(self.testcoll, "copyview"))
f = view_view_form_data(
view_id="copyview", orig_id="Default_view",
action="copy", cancel="Cancel", update="RecordView"
)
u = entitydata_edit_url(
"copy", "testcoll", layout.VIEW_TYPEID, entity_id="Default_view", view_id="View_view"
)
r = self.client.post(u, f)
self.assertEqual(r.status_code, 302)
self.assertEqual(r.reason_phrase, "Found")
self.assertEqual(r.content, b"")
self.assertEqual(r['location'], self.continuation_url)
# Check that target record view still does not exist
self.assertFalse(RecordView.exists(self.testcoll, "copyview"))
return
def test_post_copy_view_missing_id(self):
f = view_view_form_data(
view_id="", orig_id="Default_view",
action="copy", update="Updated RecordView"
)
u = entitydata_edit_url(
"copy", "testcoll", layout.VIEW_TYPEID, entity_id="Default_view", view_id="View_view"
)
r = self.client.post(u, f)
self.assertEqual(r.status_code, 200)
self.assertEqual(r.reason_phrase, "OK")
self.assertContains(r, "<h3>%s</h3>"%(message.RECORD_VIEW_ID,))
# Test context
self._check_default_view_context_fields(r,
action="copy",
view_id="", orig_view_id="Default_view",
view_label=None,
view_entity_type="annal:View",
update="Updated RecordView"
)
return
def test_post_copy_view_invalid_id(self):
f = view_view_form_data(
view_id="!badview", orig_id="Default_view", action="copy", update="Updated RecordView"
)
u = entitydata_edit_url(
"copy", "testcoll", layout.VIEW_TYPEID, entity_id="Default_view", view_id="View_view"
)
r = self.client.post(u, f)
self.assertEqual(r.status_code, 200)
self.assertEqual(r.reason_phrase, "OK")
self.assertContains(r, "<h3>%s</h3>"%(message.RECORD_VIEW_ID,))
# Test context
self._check_default_view_context_fields(r,
action="copy",
view_id="!badview", orig_view_id="Default_view",
view_label=None,
view_entity_type="annal:View",
update="Updated RecordView"
)
return
# -------- edit view --------
def test_post_edit_view(self):
self._create_record_view("editview")
self._check_recordview_values("editview")
f = view_view_form_data(
view_id="editview", orig_id="editview",
action="edit",
view_entity_type="annal:View",
update="Updated RecordView"
)
u = entitydata_edit_url(
"edit", "testcoll", layout.VIEW_TYPEID, entity_id="editview", view_id="View_view"
)
r = self.client.post(u, f)
self.assertEqual(r.status_code, 302)
self.assertEqual(r.reason_phrase, "Found")
self.assertEqual(r.content, b"")
self.assertEqual(r['location'], self.continuation_url)
# Check that new record view exists
self._check_recordview_values("editview", update="Updated RecordView")
return
def test_post_edit_view_new_id(self):
self._create_record_view("editview1")
self._check_recordview_values("editview1")
f = view_view_form_data(
view_id="editview2", orig_id="editview1",
action="edit",
view_entity_type="annal:View",
update="Updated RecordView"
)
u = entitydata_edit_url(
"edit", "testcoll", layout.VIEW_TYPEID, entity_id="editview1", view_id="View_view"
)
r = self.client.post(u, f)
self.assertEqual(r.status_code, 302)
self.assertEqual(r.reason_phrase, "Found")
self.assertEqual(r.content, b"")
self.assertEqual(r['location'], self.continuation_url)
# Check that new record view exists and old does not
self.assertFalse(RecordView.exists(self.testcoll, "editview1"))
self._check_recordview_values("editview2", update="Updated RecordView")
return
def test_post_edit_view_cancel(self):
self._create_record_view("editview")
self._check_recordview_values("editview")
f = view_view_form_data(
view_id="editview", orig_id="editview",
action="edit", cancel="Cancel",
view_entity_type="annal:View",
update="Updated RecordView"
)
u = entitydata_edit_url(
"edit", "testcoll", layout.VIEW_TYPEID, entity_id="editview", view_id="View_view"
)
r = self.client.post(u, f)
self.assertEqual(r.status_code, 302)
self.assertEqual(r.reason_phrase, "Found")
self.assertEqual(r.content, b"")
self.assertEqual(r['location'], self.continuation_url)
# Check that target record view still does not exist and unchanged
self._check_recordview_values("editview")
return
def test_post_edit_view_missing_id(self):
self._create_record_view("editview")
self._check_recordview_values("editview")
# Form post with ID missing
f = view_view_form_data(
view_id="", orig_id="editview",
action="edit",
view_entity_type="annal:View",
update="Updated RecordView"
)
u = entitydata_edit_url(
"edit", "testcoll", layout.VIEW_TYPEID, entity_id="editview", view_id="View_view"
)
r = self.client.post(u, f)
self.assertEqual(r.status_code, 200)
self.assertEqual(r.reason_phrase, "OK")
self.assertContains(r, "<h3>%s</h3>"%(message.RECORD_VIEW_ID,))
# Test context
self._check_default_view_context_fields(r,
action="edit",
view_id="", orig_view_id="editview",
view_label=None,
view_entity_type="annal:View",
update="Updated RecordView"
)
# Check original data is unchanged
self._check_recordview_values("editview")
return
def test_post_edit_view_invalid_id(self):
self._create_record_view("editview")
self._check_recordview_values("editview")
# Form post with invalid ID
f = view_view_form_data(
view_id="!badview", orig_id="editview", action="edit", update="Updated RecordView"
)
u = entitydata_edit_url(
"edit", "testcoll", layout.VIEW_TYPEID, entity_id="editview", view_id="View_view"
)
r = self.client.post(u, f)
self.assertEqual(r.status_code, 200)
self.assertEqual(r.reason_phrase, "OK")
self.assertContains(r, "<h3>%s</h3>"%(message.RECORD_VIEW_ID,))
# Test context
self._check_default_view_context_fields(r,
action="edit",
view_id="!badview", orig_view_id="editview",
view_label=None,
view_entity_type="annal:View",
update="Updated RecordView"
)
# Check original data is unchanged
self._check_recordview_values("editview")
return
def test_post_edit_view_field_placement_missing(self):
self._create_record_view("editview")
self._check_recordview_values("editview")
f = view_view_form_data(
view_id="editview", orig_id="editview",
action="edit", update="Updated RecordView",
field3_placement=""
)
u = entitydata_edit_url(
"edit", "testcoll", layout.VIEW_TYPEID, entity_id="editview", view_id="View_view"
)
r = self.client.post(u, f)
self.assertEqual(r.status_code, 302)
self.assertEqual(r.reason_phrase, "Found")
self.assertEqual(r.content, b"")
self.assertEqual(r['location'], self.continuation_url)
# Check that new record view exists
self._check_recordview_values("editview", update="Updated RecordView", field3_placement="")
return
# -----------------------------------------------------------------------------
# Form response tests for view descriptions with repeating fields
# -----------------------------------------------------------------------------
def test_post_add_field(self):
self._create_record_view("addfieldview")
self._check_recordview_values("addfieldview")
f = view_view_form_data(
view_id="addfieldview", orig_id="addfieldview",
action="edit",
view_entity_type="annal:View",
add_field=True
)
u = entitydata_edit_url(
action="edit", coll_id="testcoll", type_id=layout.VIEW_TYPEID, entity_id="addfieldview",
view_id="View_view"
)
r = self.client.post(u, f)
self.assertEqual(r.status_code, 302)
self.assertEqual(r.reason_phrase, "Found")
self.assertEqual(r.content, b"")
v = u + "?continuation_url=" + self.continuation_path
self.assertEqual(v, r['location'])
# Retrieve from redirect location, and test result
r = self.client.get(v)
self.assertEqual(r.status_code, 200)
self.assertEqual(r.reason_phrase, "OK")
# Test context
self._check_default_view_context_fields(r,
action="edit",
add_field=True,
view_id="addfieldview", orig_view_id="addfieldview",
view_label=None,
view_entity_type="annal:View",
)
return
def test_post_remove_field(self):
self._create_record_view("removefieldview")
self._check_recordview_values("removefieldview")
f = view_view_form_data(
view_id="removefieldview", orig_id="removefieldview",
action="edit",
remove_fields=['3']
)
u = entitydata_edit_url(
action="edit", coll_id="testcoll", type_id=layout.VIEW_TYPEID, entity_id="removefieldview",
view_id="View_view"
)
r = self.client.post(u, f)
self.assertEqual(r.status_code, 302)
self.assertEqual(r.reason_phrase, "Found")
self.assertEqual(r.content, b"")
v = u + "?continuation_url=" + self.continuation_path
self.assertEqual(v, r['location'])
# Retrieve from redirect location, and test result
r = self.client.get(v)
self.assertEqual(r.status_code, 200)
self.assertEqual(r.reason_phrase, "OK")
# Test context
self._check_default_view_context_fields(r,
action="edit",
remove_field=True,
view_id="removefieldview", orig_view_id="removefieldview",
view_label=None,
view_entity_type="annal:View",
)
return
def test_post_remove_no_field_selected(self):
self._create_record_view("removefieldview")
self._check_recordview_values("removefieldview")
f = view_view_form_data(
view_id="removefieldview", orig_id="removefieldview",
action="edit",
remove_fields="no-selection"
)
u = entitydata_edit_url(
action="edit", coll_id="testcoll", type_id=layout.VIEW_TYPEID, entity_id="removefieldview",
view_id="View_view"
)
r = self.client.post(u, f)
self.assertEqual(r.status_code, 200)
self.assertEqual(r.reason_phrase, "OK")
self.assertContains(r, "<h3>%s</h3>"%(message.REMOVE_FIELD_ERROR,))
self.assertContains(r, """<p class="messages">%s</p>"""%(message.NO_FIELD_SELECTED,))
# Test context
self._check_default_view_context_fields(r,
action="edit",
view_id="removefieldview", orig_view_id="removefieldview",
view_label=None,
view_entity_type="annal:View",
)
return
def test_post_move_up_fields(self):
self._create_record_view("movefieldview")
self._check_recordview_values("movefieldview")
f = view_view_form_data(
view_id="movefieldview", orig_id="movefieldview",
action="edit",
view_entity_type="annal:View",
move_up_fields=["2","3"]
)
u = entitydata_edit_url(
action="edit", coll_id="testcoll", type_id=layout.VIEW_TYPEID, entity_id="movefieldview",
view_id="View_view"
)
r = self.client.post(u, f)
self.assertEqual(r.status_code, 302)
self.assertEqual(r.reason_phrase, "Found")
self.assertEqual(r.content, b"")
v = u + "?continuation_url=" + self.continuation_path
self.assertEqual(v, r['location'])
# Retrieve from redirect location, and test result
r = self.client.get(v)
self.assertEqual(r.status_code, 200)
self.assertEqual(r.reason_phrase, "OK")
# Test context
self._check_default_view_context_fields(r,
action="edit",
move_up=[2,3],
view_id="movefieldview", orig_view_id="movefieldview",
view_label=None,
view_entity_type="annal:View",
)
return
def test_post_move_down_fields(self):
self._create_record_view("movefieldview")
self._check_recordview_values("movefieldview")
f = view_view_form_data(
view_id="movefieldview", orig_id="movefieldview",
action="edit",
view_entity_type="annal:View",
move_down_fields=["1"]
)
u = entitydata_edit_url(
action="edit", coll_id="testcoll", type_id=layout.VIEW_TYPEID, entity_id="movefieldview",
view_id="View_view"
)
r = self.client.post(u, f)
self.assertEqual(r.status_code, 302)
self.assertEqual(r.reason_phrase, "Found")
self.assertEqual(r.content, b"")
v = u + "?continuation_url=" + self.continuation_path
self.assertEqual(v, r['location'])
# Retrieve from redirect location, and test result
r = self.client.get(v)
self.assertEqual(r.status_code, 200)
self.assertEqual(r.reason_phrase, "OK")
return
# -----------------------------------------------------------------------------
#
# ConfirmRecordViewDeleteTests tests for completion of record deletion
#
# -----------------------------------------------------------------------------
class ConfirmRecordViewDeleteTests(AnnalistTestCase):
"""
Tests for record type deletion on response to confirmation form
"""
def setUp(self):
init_annalist_test_site()
self.testsite = Site(TestBaseUri, TestBaseDir)
self.testcoll = Collection.create(self.testsite, "testcoll", collection_create_values("testcoll"))
# Login and permissions
create_test_user(self.testcoll, "testuser", "testpassword")
self.client = Client(HTTP_HOST=TestHost)
loggedin = self.client.login(username="testuser", password="testpassword")
self.assertTrue(loggedin)
return
def tearDown(self):
return
def test_CollectionActionViewTest(self):
self.assertEqual(RecordViewDeleteConfirmedView.__name__, "RecordViewDeleteConfirmedView", "Check RecordViewDeleteConfirmedView class name")
return
# NOTE: test_collection checks the appropriate response from clicking the delete button,
# so here only need to test completion code.
def test_post_confirmed_remove_view(self):
t = RecordView.create(self.testcoll, "deleteview", recordview_create_values("deleteview"))
self.assertTrue(RecordView.exists(self.testcoll, "deleteview"))
# Submit positive confirmation
u = TestHostUri + recordview_edit_url("delete", "testcoll")
f = recordview_delete_confirm_form_data("deleteview")
r = self.client.post(u, f)
self.assertEqual(r.status_code, 302)
self.assertEqual(r.reason_phrase, "Found")
self.assertEqual(r.content, b"")
v = collection_edit_url("testcoll")
e1 = "info_head="
e2 = "info_message="
e3 = "deleteview"
e4 = "testcoll"
self.assertIn(v, r['location'])
self.assertIn(e1, r['location'])
self.assertIn(e2, r['location'])
self.assertIn(e3, r['location'])
# Confirm deletion
self.assertFalse(RecordView.exists(self.testcoll, "deleteview"))
return
# End.
#........1.........2.........3.........4.........5.........6.........7.........8
|
gklyne/annalist
|
src/annalist_root/annalist/tests/test_recordview.py
|
Python
|
mit
| 56,603
|
'''
Created on 17-Feb-2015
@author: Asawari.Vaidya
'''
from PythonNetBanxSDK.common.DomainObject import DomainObject
class Pagination(DomainObject):
'''
classdocs
'''
def __init__(self,obj):
'''
Constructor
'''
# Handler dictionary
handler = dict()
handler['limit'] = self.limit
handler['offset'] = self.offset
handler['startDate'] = self.startDate
handler['endDate'] = self.endDate
if obj is not None:
self.setProperties(obj, handler=handler)
else:
pass
'''
Property Limit
'''
def limit(self, limit):
self.__dict__['limit'] = limit
'''
Property Offset
'''
def offset(self, offset):
self.__dict__['offset'] = offset
'''
Property Start Date
'''
def startDate(self, startDate):
self.__dict__['startDate'] = startDate
'''
Property End Date
'''
def endDate(self, endDate):
self.__dict__['endDate'] = endDate
|
OptimalPayments/Python_SDK
|
src/PythonNetBanxSDK/CardPayments/Pagination.py
|
Python
|
mit
| 1,081
|
from pikka_bird_collector.parsers.table import Table as Parser
from .base_port_command import BasePortCommand, Base
class Mysql(BasePortCommand):
"""
Collector for MySQL (https://www.mysql.com/).
The collector is enabled whenever non-empty settings are passed.
Multiple instances running on the same box are supported; just specify
each port within settings.
By default, core status, master status, slave status, and slave hosts
are gathered. Optionally, variables can be gathered.
Because MySQL metrics are inconsistent in their representation of
booleans (e.g. `ON`, `YES`, `Yes`) and to minimise payload size and
downstream storage, all values are remapped if they match these. This
probably won't cause you problems, but if encounter a string which is no
longer a string, this is probably why. :)
DEPENDENCIES:
mysql
Available in PATH.
SETTINGS:
minimal:
{
3306: None}
supported:
{
3306: {
'user': "USER",
'password': "PASSWORD",
'collect': {
'master_status': False,
'slave_status': False,
'slave_hosts': False,
'variables': True}}}
"""
COLLECT_SETTING_DEFAULTS = {
'master_status': True,
'slave_hosts': True,
'slave_status': True,
'variables': False}
CMD_SHOW_MASTER_STATUS = 'SHOW MASTER STATUS'
CMD_SHOW_SLAVE_HOSTS = 'SHOW SLAVE HOSTS'
CMD_SHOW_SLAVE_STATUS = 'SHOW SLAVE STATUS'
CMD_SHOW_STATUS = 'SHOW /*!50002 GLOBAL */ STATUS'
CMD_SHOW_VARIABLES = 'SHOW VARIABLES'
PARSE_BOOLS = { # the stringy booleans are inconsistent
'ON': True,
'OFF': False,
'YES': True,
'NO': False,
'Yes': True,
'No': False}
@staticmethod
def command_tool(port, settings, command):
settings = settings or {}
c = ['mysql',
'--host', '127.0.0.1', # socket not (yet) supported
'--port', port,
'--execute', command,
'--batch',
'--raw',
'--column-names']
if settings.get('user'):
c.append('--user=%s' % settings['user'])
if settings.get('password'):
c.append('--password=%s' % settings['password'])
return c
def collect_port(self, port, settings):
metrics = {}
o = self.command_output(port, settings, self.CMD_SHOW_STATUS)
parser = Parser(
converter_key=Base.parse_str_setting_key,
converter_value=Mysql.__parse_str_setting_value)
ms = parser.parse(o)
if len(ms):
metrics['status'] = ms
else:
return metrics # service down; give up
if self.collect_setting('master_status', settings):
o = self.command_output(port, settings, self.CMD_SHOW_MASTER_STATUS)
parser = Parser(
converter_key=Base.parse_str_setting_key,
converter_value=Mysql.__parse_str_setting_value,
tag_header_col='file')
ms = parser.parse(o)
if len(ms):
metrics['master_status'] = ms
if self.collect_setting('slave_status', settings):
o = self.command_output(port, settings, self.CMD_SHOW_SLAVE_STATUS)
parser = Parser(
converter_key=Base.parse_str_setting_key,
converter_value=Mysql.__parse_str_setting_value,
transpose=True)
ms = parser.parse(o)
if len(ms):
metrics['slave_status'] = ms
if self.collect_setting('slave_hosts', settings):
o = self.command_output(port, settings, self.CMD_SHOW_SLAVE_HOSTS)
parser = Parser(
converter_key=Base.parse_str_setting_key,
converter_value=Mysql.__parse_str_setting_value,
tag_header_col='server_id')
ms = parser.parse(o)
if len(ms):
metrics['slave_hosts'] = ms
if self.collect_setting('variables', settings):
o = self.command_output(port, settings, self.CMD_SHOW_VARIABLES)
parser = Parser(
converter_key=Base.parse_str_setting_key,
converter_value=Mysql.__parse_str_setting_value)
ms = parser.parse(o)
if len(ms):
metrics['variables'] = ms
return metrics
@staticmethod
def __parse_str_setting_value(value):
v = Base.parse_str_setting_value(value)
if v in Mysql.PARSE_BOOLS:
v = Mysql.PARSE_BOOLS[v]
return v
|
tiredpixel/pikka-bird-collector-py
|
pikka_bird_collector/collectors/mysql.py
|
Python
|
mit
| 5,160
|
from django.db.models import signals
from django.utils.functional import curry
from django.contrib.contenttypes.models import ContentType
from django.core import serializers
from django.contrib.admin.models import LogEntry
from django.contrib.sessions.models import Session
from django_extlog.models import ExtLog
class AuditLoggingMiddleware(object):
ip_address = None
def process_request(self, request):
if request.method not in ('GET', 'HEAD', 'OPTIONS', 'TRACE'):
if hasattr(request, 'user') and request.user.is_authenticated():
user = request.user
else:
user = None
session = request.session.session_key
self.ip_address = request.META.get('REMOTE_ADDR', None)
update_post_save_info = curry(
self._update_post_save_info,
user,
session,
)
update_post_delete_info = curry(
self._update_post_delete_info,
user,
session,
)
signals.post_save.connect(
update_post_save_info,
dispatch_uid=(self.__class__, request,),
weak=False
)
signals.post_delete.connect(
update_post_delete_info,
dispatch_uid=(self.__class__, request,),
weak=False
)
def process_response(self, request, response):
signals.post_save.disconnect(dispatch_uid=(self.__class__, request,))
signals.post_delete.disconnect(dispatch_uid=(self.__class__, request,))
return response
def _save_to_log(self, instance, action, user):
content_type = ContentType.objects.get_for_model(instance)
if content_type.app_label != 'django_extlog' and user:
object_id = instance.id if hasattr(instance, 'id') else 0
ExtLog.objects.create(
object_id=object_id,
app_name=content_type.app_label,
model_name=content_type.model,
action=action,
object_instance=serializers.serialize('json', [instance]),
user=user,
ip=self.ip_address,
)
def _update_post_save_info(
self,
user,
session,
sender,
instance,
**kwargs
):
if sender in [LogEntry, Session]:
return
if kwargs['created']:
self._save_to_log(instance, ExtLog.ACTION_TYPE_CREATE, user)
else:
self._save_to_log(instance, ExtLog.ACTION_TYPE_UPDATE, user)
def _update_post_delete_info(
self,
user,
session,
sender,
instance,
**kwargs
):
if sender in [LogEntry, Session]:
return
self._save_to_log(instance, ExtLog.ACTION_TYPE_DELETE, user)
|
AxiaCore/django-extlog
|
django_extlog/middleware.py
|
Python
|
mit
| 2,967
|
import io
import os
import requests
import shutil
import sys
import zipfile
from waxe_image import __version__
API_RELEASES_URL = 'https://api.github.com/repos/waxe/waxe-image/releases'
NG_BUILD_FOLDER = 'website'
def main(argv=sys.argv):
if len(argv) > 2:
print('Too many arguments')
sys.exit(1)
global NG_BUILD_FOLDER
if len(argv) == 2:
NG_BUILD_FOLDER = argv[1]
if os.path.isdir(NG_BUILD_FOLDER):
shutil.rmtree(NG_BUILD_FOLDER)
if os.path.exists(NG_BUILD_FOLDER):
print('There is an issue with the folder %s' % NG_BUILD_FOLDER)
sys.exit(1)
r = requests.get(API_RELEASES_URL)
if r.status_code != 200:
raise ValueError('Bad status code %s' % r.status_code)
releases = r.json()
release = None
for rel in releases:
if rel['tag_name'] == __version__:
release = rel
break
if not release:
raise Exception('No release found for the current version %s' %
__version__)
ng_asset = None
for asset in release['assets']:
if 'waxe-image-ng.zip' in asset['browser_download_url']:
ng_asset = asset
break
assert(ng_asset)
url = ng_asset['browser_download_url']
r = requests.get(url, stream=True)
if r.status_code != 200:
raise ValueError('Bad status code %s' % r.status_code)
z = zipfile.ZipFile(io.StringIO(r.content))
z.extractall(NG_BUILD_FOLDER)
|
waxe/waxe-image
|
waxe_image/scripts/get_ng_build.py
|
Python
|
mit
| 1,483
|
from django.conf import settings
from django.test.client import Client
from .base import BaseRenderer
from django_perseus.exceptions import RendererException
import logging
import mimetypes
import os
logger = logging.getLogger('perseus')
class DefaultRenderer(BaseRenderer):
def render_path(self, path=None, view=None):
if path:
# create deploy dir if not exists
deploy_dir = settings.PERSEUS_SOURCE_DIR
outpath = os.path.join(deploy_dir, '')
if not os.path.exists(deploy_dir):
os.makedirs(deploy_dir)
# create index page
if path == '/':
response, mime = self.render_page(path)
outpath = os.path.join(outpath, 'index{0}'.format(mime))
self.save_page(response, outpath)
return
# strip paths to ready them for mimetyping
if path.startswith('/'):
realpath = path[1:]
if realpath.endswith('/'):
realpath = realpath[:-1]
# split paths to find subdirs
paths = path.split('/')
paths = [p for p in paths if p != '']
# if found more than one, subdirectories exist
if len(paths) > 1:
outdir = os.path.abspath(os.path.join(deploy_dir, *paths[:-1]))
if not os.path.exists(outdir):
os.makedirs(outdir)
response, mime = self.render_page(path)
outpath = os.path.join(outdir, '{0}{1}'.format(paths[-1], mime))
self.save_page(response, outpath)
else:
response, mime = self.render_page(path)
outpath = os.path.join(outpath, '{0}{1}'.format(realpath, mime))
self.save_page(response, outpath)
def render_page(self, path):
response = self.client.get(path)
if response.status_code is not 200:
raise RendererException(
'Path: {0} returns status code: {1}.'.format(path, response.status_code))
return response, self.get_mime(response)
def get_mime(self, response):
mime = response['Content-Type']
encoding = mime.split(';', 1)[0]
return mimetypes.guess_extension(encoding)
def save_page(self, response, outpath):
logger.debug(outpath)
with open(outpath, 'wb') as f:
f.write(response.content)
def generate(self):
self.client = Client()
for path in self.paths():
self.render_path(path=path)
|
lockwooddev/django-perseus
|
django_perseus/renderers/default.py
|
Python
|
mit
| 2,589
|
#!/usr/bin/env python
# LIST NODES PY
# Extract just the nodes from the JSON file for human inspection
import argparse, json
parser = argparse.ArgumentParser()
parser.add_argument('plan', type=str, help='Plan data file')
args = parser.parse_args()
try:
with open(args.plan) as fp:
J = json.load(fp)
except Exception as e:
print("could not read JSON in file: %s\n" % args.plan + str(e))
exit(1)
for k in J.keys():
print(k)
|
ECP-CANDLE/Supervisor
|
workflows/cp-leaveout/scripts/list-nodes.py
|
Python
|
mit
| 450
|
"""
Task
You are the manager of a supermarket.
You have a list of N items together with their prices that consumers bought on a particular day.
Your task is to print each item_name and net_price in order of its first occurrence.
item_name = Name of the item.
net_price = Quantity of the item sold multiplied by the price of each item.
Input Format
The first line contains the number of items, N.
The next N lines contains the item's name and price, separated by a space.
Constraints
0<N<=00
Output Format
Print the item_name and net_price in order of its first occurrence.
Sample Input
9
BANANA FRIES 12
POTATO CHIPS 30
APPLE JUICE 10
CANDY 5
APPLE JUICE 10
CANDY 5
CANDY 5
CANDY 5
POTATO CHIPS 30
Sample Output
BANANA FRIES 12
POTATO CHIPS 60
APPLE JUICE 20
CANDY 20
Explanation
BANANA FRIES: Quantity bought: 1, Price: 12
Net Price: 12
POTATO CHIPS: Quantity bought: 2, Price: 30
Net Price: 60
APPLE JUICE: Quantity bought: 2, Price: 10
Net Price: 20
CANDY: Quantity bought: 4, Price: 5
Net Price: 20
"""
from collections import OrderedDict
N = int(raw_input())
ordered_dict = OrderedDict()
for i in range(N):
l = raw_input().split()
item = " ".join(l[0:-1])
price = int("".join(l[-1:]))
if ordered_dict.has_key(item):
total_price = ordered_dict.get(item)
total_price += price
ordered_dict[item] = total_price
else:
ordered_dict[item] = price
for k, v in ordered_dict.items():
print k, v
|
spradeepv/dive-into-python
|
hackerrank/domain/python/collections/ordered-dict.py
|
Python
|
mit
| 1,458
|
# -*- coding: utf-8 -*-
"""Python library module for LSM6DS33 accelerometer and gyroscope.
This module for the Raspberry Pi computer helps interface the LSM6DS33
accelerometer and gyro.The library makes it easy to read
the raw accelerometer and gyro data through I²C interface and it also provides
methods for getting angular velocity and g forces.
The datasheet for the LSM6DS33 is available at
[https://www.pololu.com/file/download/LSM6DS33.pdf?file_id=0J1087]
"""
import math
from i2c import I2C
from time import sleep
from constants import *
class LSM6DS33(I2C):
""" Set up and access LSM6DS33 accelerometer and gyroscope.
"""
# Output registers used by the gyroscope
gyro_registers = [
LSM6DS33_OUTX_L_G, # low byte of X value
LSM6DS33_OUTX_H_G, # high byte of X value
LSM6DS33_OUTY_L_G, # low byte of Y value
LSM6DS33_OUTY_H_G, # high byte of Y value
LSM6DS33_OUTZ_L_G, # low byte of Z value
LSM6DS33_OUTZ_H_G, # high byte of Z value
]
# Output registers used by the accelerometer
accel_registers = [
LSM6DS33_OUTX_L_XL, # low byte of X value
LSM6DS33_OUTX_H_XL, # high byte of X value
LSM6DS33_OUTY_L_XL, # low byte of Y value
LSM6DS33_OUTY_H_XL, # high byte of Y value
LSM6DS33_OUTZ_L_XL, # low byte of Z value
LSM6DS33_OUTZ_H_XL, # high byte of Z value
]
def __init__(self, bus_id=1):
""" Set up I2C connection and initialize some flags and values.
"""
super(LSM6DS33, self).__init__(bus_id)
self.is_accel_enabled = False
self.is_gyro_enabled = False
self.is_gyro_calibrated = False
self.gyro_cal = [0, 0, 0]
self.is_accel_calibrated = False
self.accel_angle_cal = [0, 0]
def __del__(self):
""" Clean up."""
try:
# Power down accelerometer and gyro
self.writeRegister(LSM6DS33_ADDR, LSM6DS33_CTRL1_XL, 0x00)
self.writeRegister(LSM6DS33_ADDR, LSM6DS33_CTRL2_G, 0x00)
super(LSM6DS33, self).__del__()
print('Destroying')
except:
pass
def enable(self, accelerometer=True, gyroscope=True, calibration=True):
""" Enable and set up the given sensors in the IMU."""
if accelerometer:
# 1.66 kHz (high performance) / +/- 4g
# binary value -> 0b01011000, hex value -> 0x58
self.write_register(LSM6DS33_ADDR, LSM6DS33_CTRL1_XL, 0x58)
self.is_accel_enabled = True
if gyroscope:
# 208 Hz (high performance) / 1000 dps
# binary value -> 0b01011000, hex value -> 0x58
self.write_register(LSM6DS33_ADDR, LSM6DS33_CTRL2_G, 0x58)
self.is_gyro_enabled = True
if calibration:
self.calibrate()
self.is_gyro_calibrated = True
self.is_accel_calibrated = True
def calibrate(self, iterations=2000):
""" Calibrate the gyro's raw values."""
print('Calibrating Gryo and Accelerometer...')
for i in range(iterations):
gyro_raw = self.get_gyroscope_raw()
accel_angles = self.get_accelerometer_angles()
self.gyro_cal[0] += gyro_raw[0]
self.gyro_cal[1] += gyro_raw[1]
self.gyro_cal[2] += gyro_raw[2]
self.accel_angle_cal[0] += accel_angles[0]
self.accel_angle_cal[1] += accel_angles[1]
sleep(0.004)
self.gyro_cal[0] /= iterations
self.gyro_cal[1] /= iterations
self.gyro_cal[2] /= iterations
self.accel_angle_cal[0] /= iterations
self.accel_angle_cal[1] /= iterations
print('Calibration Done')
def get_gyroscope_raw(self):
""" Return a 3D vector of raw gyro data.
"""
# Check if gyroscope has been enabled
if not self.is_gyro_enabled:
raise(Exception('Gyroscope is not enabled!'))
sensor_data = self.read_3d_sensor(LSM6DS33_ADDR, self.gyro_registers)
# Return the vector
if self.is_gyro_calibrated:
calibrated_gyro_data = sensor_data
calibrated_gyro_data[0] -= self.gyro_cal[0]
calibrated_gyro_data[1] -= self.gyro_cal[1]
calibrated_gyro_data[2] -= self.gyro_cal[2]
return calibrated_gyro_data
else:
return sensor_data
def get_gyro_angular_velocity(self):
""" Return a 3D vector of the angular velocity measured by the gyro
in degrees/second.
"""
# Check if gyroscope has been enabled
if not self.is_gyro_enabled:
raise(Exception('Gyroscope is not enabled!'))
# Check if gyroscope has been calibrated
if not self.is_gyro_calibrated:
raise(Exception('Gyroscope is not calibrated!'))
gyro_data = self.get_gyroscope_raw()
gyro_data[0] = (gyro_data[0] * GYRO_GAIN) / 1000
gyro_data[1] = (gyro_data[1] * GYRO_GAIN) / 1000
gyro_data[2] = (gyro_data[2] * GYRO_GAIN) / 1000
return gyro_data
def get_accelerometer_raw(self):
""" Return a 3D vector of raw accelerometer data.
"""
# Check if accelerometer has been enabled
if not self.is_accel_enabled:
raise(Exception('Accelerometer is not enabled!'))
return self.read_3d_sensor(LSM6DS33_ADDR, self.accel_registers)
def get_accelerometer_g_forces(self):
""" Return a 3D vector of the g forces measured by the accelerometer"""
[x_val, y_val, z_val] = self.get_accelerometer_raw()
x_val = (x_val * ACCEL_CONVERSION_FACTOR) / 1000
y_val = (y_val * ACCEL_CONVERSION_FACTOR) / 1000
z_val = (z_val * ACCEL_CONVERSION_FACTOR) / 1000
return [x_val, y_val, z_val]
def get_accelerometer_angles(self, round_digits=0):
""" Return a 2D vector of roll and pitch angles,
based on accelerometer g forces
"""
# Get raw accelerometer g forces
[acc_xg_force, acc_yg_force, acc_zg_force] = self.get_accelerometer_g_forces()
# Calculate angles
xz_dist = self._get_dist(acc_xg_force, acc_zg_force)
yz_dist = self._get_dist(acc_yg_force, acc_zg_force)
accel_roll_angle = math.degrees(math.atan2(acc_yg_force, xz_dist))
accel_pitch_angle = -math.degrees(math.atan2(acc_xg_force, yz_dist))
if self.is_accel_calibrated:
accel_roll_angle -= self.accel_angle_cal[0]
accel_pitch_angle -= self.accel_angle_cal[1]
if round_digits != 0:
return [round(accel_roll_angle, round_digits), round(accel_pitch_angle, round_digits)]
else:
return [accel_roll_angle, accel_pitch_angle]
else:
return [accel_roll_angle, accel_pitch_angle]
def _get_dist(self, a, b):
return math.sqrt((a * a) + (b * b))
|
SvetoslavKuzmanov/altimu10v5
|
altimu10v5/lsm6ds33.py
|
Python
|
mit
| 6,967
|
from glob import glob
import numpy
import pickle
import os
# Run iterations of "count" to count the number of terms in each folder of zipped up pubmed articles
home = os.environ["HOME"]
scripts = "%s/SCRIPT/repofish/analysis/methods" %(home)
base = "%s/data/pubmed" %os.environ["LAB"]
outfolder = "%s/repos" %(base)
articles_folder = "%s/articles" %(base)
if not os.path.exists(outfolder):
os.mkdir(outfolder)
folders = [x for x in glob("%s/*" %articles_folder) if os.path.isdir(x)]
batch_size = 1000.0
iters = int(numpy.ceil(len(folders)/batch_size))
# Prepare and submit a job for each
for i in range(iters):
start = i*int(batch_size)
if i != iters:
end = start + int(batch_size)
else:
end = len(folders)
subset = folders[start:end]
script_file = "%s/findgithub_%s.job" %(scripts,i)
filey = open(script_file,'w')
filey.writelines("#!/bin/bash\n")
filey.writelines("#SBATCH --job-name=%s\n" %i)
filey.writelines("#SBATCH --output=.out/%s.out\n" %i)
filey.writelines("#SBATCH --error=.out/%s.err\n" %i)
filey.writelines("#SBATCH --time=2:00:00\n")
for folder in subset:
filey.writelines('python %s/1.find_repos.py "%s" %s\n' % (scripts,folder,outfolder))
filey.close()
os.system("sbatch -A Analysis_Lonestar -p normal -n 24 findgithub_%s.job" %i)
|
vsoch/repofish
|
analysis/methods/1.run_find_repos.py
|
Python
|
mit
| 1,341
|
#!/usr/bin/env python
import os.path
<<<<<<< HEAD
import re
import sys
=======
import sys
import gspread
>>>>>>> # This is a combination of 2 commits.
=======
<<<<<<< HEAD
import re
import sys
=======
import sys
import gspread
>>>>>>> # This is a combination of 2 commits.
>>>>>>> Update README.md
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
sys.exit()
def read(filename):
return open(os.path.join(os.path.dirname(__file__), filename)).read()
description = 'Google Spreadsheets Python API'
long_description = """
{index}
License
-------
MIT
Download
========
"""
long_description = long_description.lstrip("\n").format(index=read('docs/index.txt'))
version = re.search(r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]',
read('gspread/__init__.py'), re.MULTILINE).group(1)
setup(
name='gspread',
packages=['gspread'],
description=description,
long_description=long_description,
version=version,
author='Anton Burnashev',
author_email='fuss.here@gmail.com',
url='https://github.com/burnash/gspread',
keywords=['spreadsheets', 'google-spreadsheets'],
install_requires=['requests>=2.2.1'],
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Intended Audience :: End Users/Desktop",
"Intended Audience :: Science/Research",
"Topic :: Office/Business :: Financial :: Spreadsheet",
"Topic :: Software Development :: Libraries :: Python Modules"
],
license='MIT'
)
|
ShivaShinde/gspread
|
setup.py
|
Python
|
mit
| 1,845
|
import pygame
pygame.init()
#-- SCREEN CHARACTERISTICS ------------------------->>>
background_color = (255,255,255)
(width, height) = (300, 200)
class Particle:
def __init__(self, (x, y), radius):
self.x = x
self.y = y
self.radius = radius
self.color = (255, 0, 0)
self.thickness = 1
def display(self):
pygame.draw.circle(screen, self.color, (self.x, self.y), self.radius, self.thickness)
#-- RENDER SCREEN ---------------------------------->>>
screen = pygame.display.set_mode((width, height))
screen.fill(background_color)
#pygame.draw.circle(canvas, color, position(x,y), radius, thickness)
particle = Particle((150, 100), 20)
particle.display()
#-- RUN LOOP --------------------------------------->>>
pygame.display.flip()
running = True
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
|
withtwoemms/pygame-explorations
|
render_particle.py
|
Python
|
mit
| 926
|
import unittest
from scrapy.utils.sitemap import Sitemap, sitemap_urls_from_robots
class SitemapTest(unittest.TestCase):
def test_sitemap(self):
s = Sitemap("""<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.google.com/schemas/sitemap/0.84">
<url>
<loc>http://www.example.com/</loc>
<lastmod>2009-08-16</lastmod>
<changefreq>daily</changefreq>
<priority>1</priority>
</url>
<url>
<loc>http://www.example.com/Special-Offers.html</loc>
<lastmod>2009-08-16</lastmod>
<changefreq>weekly</changefreq>
<priority>0.8</priority>
</url>
</urlset>""")
assert s.type == 'urlset'
self.assertEqual(list(s),
[{'priority': '1', 'loc': 'http://www.example.com/', 'lastmod': '2009-08-16', 'changefreq': 'daily'}, {'priority': '0.8', 'loc': 'http://www.example.com/Special-Offers.html', 'lastmod': '2009-08-16', 'changefreq': 'weekly'}])
def test_sitemap_index(self):
s = Sitemap("""<?xml version="1.0" encoding="UTF-8"?>
<sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<sitemap>
<loc>http://www.example.com/sitemap1.xml.gz</loc>
<lastmod>2004-10-01T18:23:17+00:00</lastmod>
</sitemap>
<sitemap>
<loc>http://www.example.com/sitemap2.xml.gz</loc>
<lastmod>2005-01-01</lastmod>
</sitemap>
</sitemapindex>""")
assert s.type == 'sitemapindex'
self.assertEqual(list(s), [{'loc': 'http://www.example.com/sitemap1.xml.gz', 'lastmod': '2004-10-01T18:23:17+00:00'}, {'loc': 'http://www.example.com/sitemap2.xml.gz', 'lastmod': '2005-01-01'}])
def test_sitemap_strip(self):
"""Assert we can deal with trailing spaces inside <loc> tags - we've
seen those
"""
s = Sitemap("""<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.google.com/schemas/sitemap/0.84">
<url>
<loc> http://www.example.com/</loc>
<lastmod>2009-08-16</lastmod>
<changefreq>daily</changefreq>
<priority>1</priority>
</url>
<url>
<loc> http://www.example.com/2</loc>
<lastmod />
</url>
</urlset>
""")
self.assertEqual(list(s),
[{'priority': '1', 'loc': 'http://www.example.com/', 'lastmod': '2009-08-16', 'changefreq': 'daily'},
{'loc': 'http://www.example.com/2', 'lastmod': ''},
])
def test_sitemap_wrong_ns(self):
"""We have seen sitemaps with wrongs ns. Presumably, Google still works
with these, though is not 100% confirmed"""
s = Sitemap("""<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.google.com/schemas/sitemap/0.84">
<url xmlns="">
<loc> http://www.example.com/</loc>
<lastmod>2009-08-16</lastmod>
<changefreq>daily</changefreq>
<priority>1</priority>
</url>
<url xmlns="">
<loc> http://www.example.com/2</loc>
<lastmod />
</url>
</urlset>
""")
self.assertEqual(list(s),
[{'priority': '1', 'loc': 'http://www.example.com/', 'lastmod': '2009-08-16', 'changefreq': 'daily'},
{'loc': 'http://www.example.com/2', 'lastmod': ''},
])
def test_sitemap_wrong_ns2(self):
"""We have seen sitemaps with wrongs ns. Presumably, Google still works
with these, though is not 100% confirmed"""
s = Sitemap("""<?xml version="1.0" encoding="UTF-8"?>
<urlset>
<url xmlns="">
<loc> http://www.example.com/</loc>
<lastmod>2009-08-16</lastmod>
<changefreq>daily</changefreq>
<priority>1</priority>
</url>
<url xmlns="">
<loc> http://www.example.com/2</loc>
<lastmod />
</url>
</urlset>
""")
assert s.type == 'urlset'
self.assertEqual(list(s),
[{'priority': '1', 'loc': 'http://www.example.com/', 'lastmod': '2009-08-16', 'changefreq': 'daily'},
{'loc': 'http://www.example.com/2', 'lastmod': ''},
])
def test_sitemap_urls_from_robots(self):
robots = """User-agent: *
Disallow: /aff/
Disallow: /wl/
# Search and shopping refining
Disallow: /s*/*facet
Disallow: /s*/*tags
# Sitemap files
Sitemap: http://example.com/sitemap.xml
Sitemap: http://example.com/sitemap-product-index.xml
# Forums
Disallow: /forum/search/
Disallow: /forum/active/
"""
self.assertEqual(list(sitemap_urls_from_robots(robots)),
['http://example.com/sitemap.xml', 'http://example.com/sitemap-product-index.xml'])
def test_sitemap_blanklines(self):
"""Assert we can deal with starting blank lines before <xml> tag"""
s = Sitemap("""\
<?xml version="1.0" encoding="UTF-8"?>
<sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<!-- cache: cached = yes name = sitemap_jspCache key = sitemap -->
<sitemap>
<loc>http://www.example.com/sitemap1.xml</loc>
<lastmod>2013-07-15</lastmod>
</sitemap>
<sitemap>
<loc>http://www.example.com/sitemap2.xml</loc>
<lastmod>2013-07-15</lastmod>
</sitemap>
<sitemap>
<loc>http://www.example.com/sitemap3.xml</loc>
<lastmod>2013-07-15</lastmod>
</sitemap>
<!-- end cache -->
</sitemapindex>
""")
self.assertEqual(list(s), [
{'lastmod': '2013-07-15', 'loc': 'http://www.example.com/sitemap1.xml'},
{'lastmod': '2013-07-15', 'loc': 'http://www.example.com/sitemap2.xml'},
{'lastmod': '2013-07-15', 'loc': 'http://www.example.com/sitemap3.xml'},
])
if __name__ == '__main__':
unittest.main()
|
Mitali-Sodhi/CodeLingo
|
Dataset/python/test_utils_sitemap.py
|
Python
|
mit
| 5,404
|
#!/usr/bin/python3
# Example using a character LCD connected to a Raspberry Pi or BeagleBone Black.
import time
import datetime
import Adafruit_CharLCD as LCD
def file_get_contents(filename):
with open(filename) as f:
return f.read()
# Raspberry Pi pin configuration:
lcd_rs = 24 # Note this might need to be changed to 21 for older revision Pi's.
lcd_en = 23
lcd_d4 = 9
lcd_d5 = 11
lcd_d6 = 10
lcd_d7 = 18
lcd_backlight = 8
# BeagleBone Black configuration:
# lcd_rs = 'P8_8'
# lcd_en = 'P8_10'
# lcd_d4 = 'P8_18'
# lcd_d5 = 'P8_16'
# lcd_d6 = 'P8_14'
# lcd_d7 = 'P8_12'
# lcd_backlight = 'P8_7'
# Define LCD column and row size for 16x2 LCD.
lcd_columns = 16
lcd_rows = 2
# Alternatively specify a 20x4 LCD.
# lcd_columns = 20
# lcd_rows = 4
# Initialize the LCD using the pins above.
lcd = LCD.Adafruit_CharLCD(lcd_rs, lcd_en, lcd_d4, lcd_d5, lcd_d6, lcd_d7,
lcd_columns, lcd_rows, lcd_backlight)
datestring = datetime.datetime.now().strftime('%Y-%m-%d %H:%M')
lcd.clear()
lcd.message(file_get_contents("../data/lcd.txt") );
|
mhkyg/OrangePIStuff
|
lcd/lcd_update.py
|
Python
|
mit
| 1,190
|
import os
from celery import Celery
from django.apps import apps, AppConfig
from django.conf import settings
if not settings.configured:
# set the default Django settings module for the 'celery' program.
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'config.settings.local') # pragma: no cover
app = Celery('swagger_ui')
class CeleryConfig(AppConfig):
name = 'swagger_ui.taskapp'
verbose_name = 'Celery Config'
def ready(self):
# Using a string here means the worker will not have to
# pickle the object when using Windows.
app.config_from_object('django.conf:settings')
installed_apps = [app_config.name for app_config in apps.get_app_configs()]
app.autodiscover_tasks(lambda: installed_apps, force=True)
@app.task(bind=True)
def debug_task(self):
print('Request: {0!r}'.format(self.request)) # pragma: no cover
|
cuongnb14/swagger-ui
|
swagger_ui/taskapp/celery.py
|
Python
|
mit
| 911
|
def runProgram(programLines,swapLine):
result = [False,0]
accumulator = 0
executionIndex = 0
setOfRunInstructions = []
while (executionIndex not in setOfRunInstructions) and (executionIndex < len(programLines)):
#print("Execution index: " + str(executionIndex)+ " accumulator: " + str(accumulator)+ " instruction: "+ programLines[executionIndex])
setOfRunInstructions.append(executionIndex)
instructionLine = programLines[executionIndex]
instructionParts = instructionLine.split(" ")
instruction = instructionParts[0]
if executionIndex == swapLine:
if instruction == "nop":
instruction = "jmp"
else:
instruction = "nop"
argument = instructionParts[1]
if instruction == "nop":
executionIndex += 1
elif instruction == "acc":
accumulator += int(argument)
executionIndex += 1
else:
executionIndex += int(argument)
if executionIndex in setOfRunInstructions:
print("Loop detected on line: " + str(executionIndex) + " accumulator value: " + str(accumulator))
return result
else:
print("program executed successfully")
return [True,accumulator]
filename = "inputs\\2020\\input-day8.txt"
with open(filename) as f:
originalProgramLines = f.readlines()
# Go through program swapping each nop/jmp and executing program to see if corrected
# If still incorrect continue to look for a different instruction to swap
curLine = 0
result = [False,0]
while not result[0]:
instructionLine = originalProgramLines[curLine]
if instructionLine.startswith("nop") or instructionLine.startswith("jmp"):
result = runProgram(originalProgramLines,curLine)
curLine += 1
print("When fixed, accumlator value is: " + str(result[1]))
|
caw13/adventofcode
|
python/2020/day_eight_part2.py
|
Python
|
mit
| 1,872
|
import unittest
import os
import sys
import responses
current_dir = (os.path.abspath(os.path.dirname(__file__)))
sys.path.insert(0, os.path.join(current_dir, '..', '..'))
from processes.get_tmdb import Main, RequestAPI, StandardiseResponse, GatherException
class TestMain(unittest.TestCase):
"""Testing GetAPI"""
@classmethod
def setUpClass(cls):
cls.main = Main()
@responses.activate
def test_get_info(self):
# Mock the request to the API
responses.add(responses.GET, 'https://api.themoviedb.org/3/movie/tt0083658',
json={'budget': 28000000,
'overview': 'In the smog-choked dystopian Los Angeles of 2019, blade runner Rick Deckard '
'is called out of retirement to terminate a quartet of replicants who have'
' escaped to Earth seeking their creator for a way to extend their'
' short life spans.',
'tagline': "Man has made his match... now it's his problem.",
'release_date': '1982-06-25',
'id': 78,
'status': 'Released',
'title': 'Blade Runner',
'popularity': 102.026128,
'credits': {
'crew': [{
'name': 'Ridley Scott',
'credit_id': '52fe4214c3a36847f8002595',
'gender': 2,
'profile_path': '/oTAL0z0vsjipCruxXUsDUIieuhk.jpg',
'id': 578,
'job': 'Director',
'department': 'Directing'
}, {
'name': 'Michael Deeley',
'credit_id': '52fe4214c3a36847f800259b',
'gender': 2,
'profile_path': None,
'id': 581,
'job': 'Producer',
'department': 'Production'
}, {
'name': 'Jordan Cronenweth',
'credit_id': '52fe4214c3a36847f80025c9',
'gender': 2,
'profile_path': None,
'id': 594,
'job': 'Director of Photography',
'department': 'Camera'
}],
'cast': [{
'cast_id': 6,
'character': 'Rick Deckard',
'credit_id': '52fe4214c3a36847f800259f',
'order': 0,
'gender': 2,
'id': 3,
'name': 'Harrison Ford',
'profile_path': '/7CcoVFTogQgex2kJkXKMe8qHZrC.jpg'
}, {
'cast_id': 7,
'character': 'Roy Batty',
'credit_id': '52fe4214c3a36847f80025a3',
'order': 1,
'gender': 2,
'id': 585,
'name': 'Rutger Hauer',
'profile_path': '/2x1S2VAUvZXZuDjZ4E9iEKINvNu.jpg'
}, {
'cast_id': 8,
'character': 'Rachael',
'credit_id': '52fe4214c3a36847f80025a7',
'order': 2,
'gender': 1,
'id': 586,
'name': 'Sean Young',
'profile_path': '/4zgkRFQruIlaJ4JakNZLoKJ70fH.jpg'
}]
},
'backdrop_path': '/5hJ0XDCxE3qGfp1H3h7HQP9rLfU.jpg',
'original_title': 'Blade Runner',
'belongs_to_collection': {
'poster_path': '/foT46aJ7QPUFDl3CK8ArDl0JaZX.jpg',
'backdrop_path': '/57zhlMYblPute6qb8v16ZmGSPVv.jpg',
'id': 422837,
'name': 'Blade Runner Collection'
},
'vote_average': 7.9,
'production_companies': [{
'id': 5798,
'name': 'Shaw Brothers'
}, {
'id': 6194,
'name': 'Warner Bros.'
}, {
'id': 7965,
'name': 'The Ladd Company'
}],
'adult': False,
'original_language': 'en',
'spoken_languages': [{
'iso_639_1': 'en',
'name': 'English'
}, {
'iso_639_1': 'de',
'name': 'Deutsch'
}, {
'iso_639_1': 'cn',
'name': '广州话 / 廣州話'
}, {
'iso_639_1': 'ja',
'name': '日本語'
}, {
'iso_639_1': 'hu',
'name': 'Magyar'
}],
'imdb_id': 'tt0083658',
'genres': [{
'id': 878,
'name': 'Science Fiction'
}, {
'id': 18,
'name': 'Drama'
}, {
'id': 53,
'name': 'Thriller'
}],
'production_countries': [{
'iso_3166_1': 'US',
'name': 'United States of America'
}, {
'iso_3166_1': 'HK',
'name': 'Hong Kong'
}, {
'iso_3166_1': 'GB',
'name': 'United Kingdom'
}],
'keywords': {
'keywords': [{
'id': 310,
'name': 'artificial intelligence'
}, {
'id': 801,
'name': 'bounty hunter'
}]
},
'video': False,
'poster_path': '/p64TtbZGCElxQHpAMWmDHkWJlH2.jpg',
'homepage': 'http://www.warnerbros.com/blade-runner',
'videos': {
'results': [{
'key': 'PSIiGE105iA',
'type': 'Featurette',
'name': 'Harrison Ford On Blade Runner',
'iso_639_1': 'en',
'id': '533ec651c3a368544800008a',
'site': 'YouTube',
'iso_3166_1': 'US',
'size': 480
}, {
'key': 'W_9rhPDLHWk',
'type': 'Trailer',
'name': 'The Final Cut trailer',
'iso_639_1': 'en',
'id': '54ff5ca09251413d9b00032c',
'site': 'YouTube',
'iso_3166_1': 'US',
'size': 1080
}, {
'key': 'AQL9hRRYDIw',
'type': 'Trailer',
'name': 'Trailer',
'iso_639_1': 'en',
'id': '586522349251412b8701d59c',
'site': 'YouTube',
'iso_3166_1': 'US',
'size': 480
}]
},
'vote_count': 3912,
'revenue': 33139618,
'runtime': 117
},
status=200)
# Check get_info for a correct imdb_id
request = {'imdb_id': 'tt0083658'}
expected_keys = ['tmdb_main', 'tmdb_cast', 'tmdb_crew', 'tmdb_company', 'tmdb_genre', 'tmdb_keywords',
'tmdb_trailer']
info = self.main.run(request)
self.assertEqual(set(info.keys()), set(expected_keys))
class TestRequestAPI(unittest.TestCase):
"""Testing RequestAPI"""
@classmethod
def setUpClass(cls):
cls.req = RequestAPI()
@responses.activate
def test_get_tmdb_good(self):
# Mock the request to the API
responses.add(responses.GET, 'https://api.themoviedb.org/3/movie/tt0083658',
json={'imdb_id': 'tt0083658', 'title': 'Blade Runner'},
status=200)
# Blade Runner
imdb_id = 'tt0083658'
response = self.req.get_tmdb(imdb_id)
self.assertEqual(response['title'], 'Blade Runner')
self.assertEqual(response['imdb_id'], imdb_id)
@responses.activate
def test_get_tmdb_bad(self):
# Mock the request to the API
responses.add(responses.GET, 'https://api.themoviedb.org/3/movie/invalid',
json={'status_message': 'The resource you requested could not be found.', 'status_code': 34},
status=200)
# Blade Runner
imdb_id = 'invalid'
self.failUnlessRaises(GatherException, self.req.get_tmdb, imdb_id)
class TestStandardiseResponse(unittest.TestCase):
"""Testing StandardiseResponse"""
@classmethod
def setUpClass(cls):
cls.stan = StandardiseResponse()
cls.imdb_id = 'tt0083658'
# Shortened response for Blade Runner from the TMDB API.
cls.response = {
'budget': 28000000,
'overview': 'In the smog-choked dystopian Los Angeles of 2019, blade runner Rick Deckard is called out'
' of retirement to terminate a quartet of replicants who have escaped to Earth seeking their'
' creator for a way to extend their short life spans.',
'tagline': "Man has made his match... now it's his problem.",
'release_date': '1982-06-25',
'id': 78,
'status': 'Released',
'title': 'Blade Runner',
'popularity': 102.026128,
'credits': {
'crew': [{
'name': 'Ridley Scott',
'credit_id': '52fe4214c3a36847f8002595',
'gender': 2,
'profile_path': '/oTAL0z0vsjipCruxXUsDUIieuhk.jpg',
'id': 578,
'job': 'Director',
'department': 'Directing'
}, {
'name': 'Michael Deeley',
'credit_id': '52fe4214c3a36847f800259b',
'gender': 2,
'profile_path': None,
'id': 581,
'job': 'Producer',
'department': 'Production'
}, {
'name': 'Jordan Cronenweth',
'credit_id': '52fe4214c3a36847f80025c9',
'gender': 2,
'profile_path': None,
'id': 594,
'job': 'Director of Photography',
'department': 'Camera'
}],
'cast': [{
'cast_id': 6,
'character': 'Rick Deckard',
'credit_id': '52fe4214c3a36847f800259f',
'order': 0,
'gender': 2,
'id': 3,
'name': 'Harrison Ford',
'profile_path': '/7CcoVFTogQgex2kJkXKMe8qHZrC.jpg'
}, {
'cast_id': 7,
'character': 'Roy Batty',
'credit_id': '52fe4214c3a36847f80025a3',
'order': 1,
'gender': 2,
'id': 585,
'name': 'Rutger Hauer',
'profile_path': '/2x1S2VAUvZXZuDjZ4E9iEKINvNu.jpg'
}, {
'cast_id': 8,
'character': 'Rachael',
'credit_id': '52fe4214c3a36847f80025a7',
'order': 2,
'gender': 1,
'id': 586,
'name': 'Sean Young',
'profile_path': '/4zgkRFQruIlaJ4JakNZLoKJ70fH.jpg'
}]
},
'backdrop_path': '/5hJ0XDCxE3qGfp1H3h7HQP9rLfU.jpg',
'original_title': 'Blade Runner',
'belongs_to_collection': {
'poster_path': '/foT46aJ7QPUFDl3CK8ArDl0JaZX.jpg',
'backdrop_path': '/57zhlMYblPute6qb8v16ZmGSPVv.jpg',
'id': 422837,
'name': 'Blade Runner Collection'
},
'vote_average': 7.9,
'production_companies': [{
'id': 5798,
'name': 'Shaw Brothers'
}, {
'id': 6194,
'name': 'Warner Bros.'
}, {
'id': 7965,
'name': 'The Ladd Company'
}],
'adult': False,
'original_language': 'en',
'spoken_languages': [{
'iso_639_1': 'en',
'name': 'English'
}, {
'iso_639_1': 'de',
'name': 'Deutsch'
}, {
'iso_639_1': 'cn',
'name': '广州话 / 廣州話'
}, {
'iso_639_1': 'ja',
'name': '日本語'
}, {
'iso_639_1': 'hu',
'name': 'Magyar'
}],
'imdb_id': 'tt0083658',
'genres': [{
'id': 878,
'name': 'Science Fiction'
}, {
'id': 18,
'name': 'Drama'
}, {
'id': 53,
'name': 'Thriller'
}],
'production_countries': [{
'iso_3166_1': 'US',
'name': 'United States of America'
}, {
'iso_3166_1': 'HK',
'name': 'Hong Kong'
}, {
'iso_3166_1': 'GB',
'name': 'United Kingdom'
}],
'keywords': {
'keywords': [{
'id': 310,
'name': 'artificial intelligence'
}, {
'id': 801,
'name': 'bounty hunter'
}]
},
'video': False,
'poster_path': '/p64TtbZGCElxQHpAMWmDHkWJlH2.jpg',
'homepage': 'http://www.warnerbros.com/blade-runner',
'videos': {
'results': [{
'key': 'PSIiGE105iA',
'type': 'Featurette',
'name': 'Harrison Ford On Blade Runner',
'iso_639_1': 'en',
'id': '533ec651c3a368544800008a',
'site': 'YouTube',
'iso_3166_1': 'US',
'size': 480
}, {
'key': 'W_9rhPDLHWk',
'type': 'Trailer',
'name': 'The Final Cut trailer',
'iso_639_1': 'en',
'id': '54ff5ca09251413d9b00032c',
'site': 'YouTube',
'iso_3166_1': 'US',
'size': 1080
}, {
'key': 'AQL9hRRYDIw',
'type': 'Trailer',
'name': 'Trailer',
'iso_639_1': 'en',
'id': '586522349251412b8701d59c',
'site': 'YouTube',
'iso_3166_1': 'US',
'size': 480
}]
},
'vote_count': 3912,
'revenue': 33139618,
'runtime': 117
}
def test_get_main_data(self):
expected_result = [{'title': 'Blade Runner', 'runtime': 117, 'revenue': 33139618, 'budget': 28000000,
'imdb_id': 'tt0083658', 'original_language': 'en', 'release_date': '1982-06-25',
'plot': 'In the smog-choked dystopian Los Angeles of 2019, blade runner Rick Deckard is'
' called out of retirement to terminate a quartet of replicants who have escaped'
' to Earth seeking their creator for a way to extend their short life spans.'}]
main_data = self.stan.get_main_data(self.imdb_id, self.response)
self.assertEqual(expected_result, main_data)
def test_get_crew_data(self):
expected_result = [{'name': 'Ridley Scott', 'imdb_id': 'tt0083658', 'role': 'director'},
{'name': 'Michael Deeley', 'imdb_id': 'tt0083658', 'role': 'producer'},
{'name': 'Jordan Cronenweth', 'imdb_id': 'tt0083658', 'role': 'director of photography'}]
crew_data = self.stan.get_crew_data(self.imdb_id, self.response)
self.assertEqual(crew_data, expected_result)
def test_get_cast_data(self):
expected_result = [{'imdb_id': 'tt0083658', 'name': 'Harrison Ford', 'role': 'actor', 'cast_order': 0},
{'imdb_id': 'tt0083658', 'name': 'Rutger Hauer', 'role': 'actor', 'cast_order': 1},
{'imdb_id': 'tt0083658', 'name': 'Sean Young', 'role': 'actor', 'cast_order': 2}]
cast_data = self.stan.get_cast_data(self.imdb_id, self.response)
self.assertEqual(cast_data, expected_result)
def test_get_keyword_data(self):
expected_result = [{'keyword': 'artificial intelligence', 'imdb_id': 'tt0083658'},
{'keyword': 'bounty hunter', 'imdb_id': 'tt0083658'}]
keywords_data = self.stan.get_keywords_data(self.imdb_id, self.response)
self.assertEqual(keywords_data, expected_result)
def test_get_genre_data(self):
expected_result = [{'genre': 'Science Fiction', 'imdb_id': 'tt0083658'},
{'genre': 'Drama', 'imdb_id': 'tt0083658'},
{'genre': 'Thriller', 'imdb_id': 'tt0083658'}]
genre_data = self.stan.get_genre_data(self.imdb_id, self.response)
self.assertEqual(genre_data, expected_result)
def test_get_company_data(self):
expected_result = [{'name': 'Shaw Brothers', 'imdb_id': 'tt0083658'},
{'name': 'Warner Bros.', 'imdb_id': 'tt0083658'},
{'name': 'The Ladd Company', 'imdb_id': 'tt0083658'}]
company_data = self.stan.get_company_data(self.imdb_id, self.response)
self.assertEqual(company_data, expected_result)
def test_get_trailer_data(self):
expected_result = [{'video_id': 'W_9rhPDLHWk', 'imdb_id': 'tt0083658', 'size': 1080}]
trailer_data = self.stan.get_trailer_data(self.imdb_id, self.response)
self.assertEqual(trailer_data, expected_result)
def test_get_trailer_data_bad(self):
response = {
'videos': {
'results': [{
'key': 'PSIiGE105iA',
'type': 'Featurette',
'name': 'Harrison Ford On Blade Runner',
'iso_639_1': 'en',
'id': '533ec651c3a368544800008a',
'site': 'YouTube',
'iso_3166_1': 'US',
'size': 480
}]
}}
expected_result = []
trailer_data = self.stan.get_trailer_data(self.imdb_id, response)
self.assertEqual(trailer_data, expected_result)
def test_sort_video_list_trailer(self):
video_list = [{
'name': 'Tv Promo',
'iso_639_1': 'en',
'size': 1080
}, {
'name': 'Trailer',
'iso_639_1': 'en',
'size': 1080
}]
result = self.stan.sort_videos_list(video_list)
expected = [{
'name': 'Trailer',
'iso_639_1': 'en',
'size': 1080
}, {
'name': 'Tv Promo',
'iso_639_1': 'en',
'size': 1080
}]
self.assertEqual(result, expected)
def test_sort_video_list_official(self):
video_list = [{
'name': 'Official video',
'iso_639_1': 'en',
'size': 1080
}, {
'name': 'Video',
'iso_639_1': 'en',
'size': 1080
}]
result = self.stan.sort_videos_list(video_list)
expected = [{
'name': 'Official video',
'iso_639_1': 'en',
'size': 1080
}, {
'name': 'Video',
'iso_639_1': 'en',
'size': 1080
}]
self.assertEqual(result, expected)
def test_sort_video_list_size(self):
video_list = [{
'name': 'Official Trailer',
'iso_639_1': 'en',
'size': 720
}, {
'name': 'Official Trailer',
'iso_639_1': 'en',
'size': 1080
}]
video_list = self.stan.sort_videos_list(video_list)
expected = [{
'name': 'Official Trailer',
'iso_639_1': 'en',
'size': 1080
}, {
'name': 'Official Trailer',
'iso_639_1': 'en',
'size': 720
}]
self.assertEqual(video_list, expected)
def test_sort_video_list_(self):
video_list = [{
'name': 'Official Video',
'iso_639_1': 'en',
'size': 720
}, {
'name': 'Trailer',
'iso_639_1': 'en',
'size': 480
}, {
'name': 'Random Video',
'iso_639_1': 'en',
'size': 1080
}]
result = self.stan.sort_videos_list(video_list)
expected = [{
'name': 'Trailer',
'iso_639_1': 'en',
'size': 480
}, {
'name': 'Official Video',
'iso_639_1': 'en',
'size': 720
}, {
'name': 'Random Video',
'iso_639_1': 'en',
'size': 1080
}]
self.assertEqual(result, expected)
if __name__ == '__main__':
unittest.main()
|
kinoreel/kino-gather
|
processes/tests/test_get_tmdb.py
|
Python
|
mit
| 24,240
|
# This file is part of Indico.
# Copyright (C) 2002 - 2022 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
import time
from marshmallow import fields
from marshmallow.decorators import post_dump
from indico.core.config import config
from indico.core.marshmallow import mm
class HTTPAPIError(Exception):
def __init__(self, message, code=None):
self.message = message
self.code = code
class HTTPAPIResult:
def __init__(self, results, path='', query='', ts=None, extra=None):
if ts is None:
ts = int(time.time())
self.results = results
self.path = path
self.query = query
self.ts = ts
self.extra = extra or {}
@property
def url(self):
prefix = config.BASE_URL
if self.query:
return f'{prefix}{self.path}?{self.query}'
return prefix + self.path
@property
def count(self):
return len(self.results)
class HTTPAPIResultSchema(mm.Schema):
count = fields.Integer()
extra = fields.Raw(data_key='additionalInfo')
ts = fields.Integer()
url = fields.String()
results = fields.Raw()
@post_dump
def _add_type(self, data, **kwargs):
data['_type'] = 'HTTPAPIResult'
return data
|
indico/indico
|
indico/web/http_api/responses.py
|
Python
|
mit
| 1,367
|
'''
Created on Jun 20, 2016
@author: ionut
'''
import logging
logging.basicConfig(level=logging.DEBUG,
format='[%(asctime)s] - %(name)s - %(levelname)s - %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
logging.getLogger("requests").setLevel(logging.WARNING)
logging.getLogger("urllib3").setLevel(logging.WARNING)
logging.getLogger("spotify.search").setLevel(logging.WARNING)
logging.getLogger("spotify.session").setLevel(logging.WARNING)
#Database connection - Postgres / Amazon RDS
DSN = "dbname='spotlipy' user='postgres' host='127.0.0.1' password='password'"
#dogstarradio search URL
SEARCH_URL = 'http://www.dogstarradio.com/search_playlist.php'
JUNK_INDICATORS = ['@', '#', '.com', 'Hip Hop Nation', 'SiriusXM']
#for stations numbers and names see stations.txt
STATIONS = [
34, 44
]
#if MONTH or DATE are None we will use yesterday for searching
MONTH = None
DATE = None
#Spotify settings
SPOTIFY = {
'username': 'username',
'client_id': 'client_id',
'client_secret': 'client_secret',
'redirect_url': 'redirect_url',
'api_scopes': 'playlist-read-private playlist-modify-public playlist-modify-private'
}
|
iticus/spotlipy
|
settings_default.py
|
Python
|
mit
| 1,141
|
import math
from param import *
class Sample:
def __init__(self):
self.time = 0
self.home1_x = 0.0
self.home1_y = 0.0
self.home1_theta = 0.0
self.home2_x = 0.0
self.home2_y = 0.0
self.home2_theta = 0.0
self.away1_x = 0.0
self.away1_y = 0.0
self.away1_theta = 0.0
self.away2_x = 0.0
self.away2_y = 0.0
self.away2_theta = 0.0
self.ball_x = 0.0
self.ball_y = 0.0
self.kill = 0.0
def setDataFromSample(self,data):
self.time = round(timeToInt(data.header.stamp),2)
self.home1_theta = round(degreeToRadian(data.home1_theta),3)
home1_x = pixelToMeter(data.home1_x)
home1_y = pixelToMeter(data.home1_y)
angleField = math.atan2(home1_y, home1_x)
mag = math.sqrt(home1_x**2+home1_y**2)
angleCamera = math.atan(HEIGHT_CAMERA/mag)
offset = HEIGHT_ROBOT / math.tan(angleCamera)
home1_x = home1_x - offset * math.cos(angleField)
home1_y = home1_y - offset * math.sin(angleField)
self.home1_x = round(home1_x,3)
self.home1_y = round(home1_y,3)
self.home2_x = pixelToMeter(data.home2_x)
self.home2_y = pixelToMeter(data.home2_y)
self.home2_theta = degreeToRadian(data.home2_theta)
self.away1_x = pixelToMeter(data.away1_x)
self.away1_y = pixelToMeter(data.away1_y)
self.away1_theta = degreeToRadian(data.away1_theta)
self.away2_x = pixelToMeter(data.away2_x)
self.away2_y = pixelToMeter(data.away2_y)
self.away2_theta = degreeToRadian(data.away2_theta)
self.ball_x = pixelToMeter(data.ball_x)
self.ball_y = pixelToMeter(data.ball_y)
def getDiscreteSample(self):
home1_x = meterToPixel(self.home1_x);
home1_y = meterToPixel(self.home1_y);
home1_theta = radianToDegree(self.home1_theta);
home2_x = meterToPixel(self.home2_x);
home2_y = meterToPixel(self.home2_y);
home2_theta = radianToDegree(self.home2_theta);
away1_x = meterToPixel(self.away1_x);
away1_y = meterToPixel(self.away1_y);
away1_theta = radianToDegree(self.away1_theta);
away2_x = meterToPixel(self.away2_x);
away2_y = meterToPixel(self.away2_y);
away2_theta = radianToDegree(self.away2_theta);
ball_x = meterToPixel(self.ball_x);
ball_y = meterToPixel(self.ball_y);
return (home1_x, home1_y, home1_theta,
home2_x, home2_y, home2_theta,
away1_x, away1_y, away1_theta,
away2_x, away2_x, away2_theta,
ball_x, ball_y)
|
lukehsiao/RobotSoccer
|
MotionControl/scripts/kalman_filter/Sample.py
|
Python
|
mit
| 2,688
|
import _plotly_utils.basevalidators
class AlignValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="align", parent_name="scatterternary.hoverlabel", **kwargs
):
super(AlignValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "style"),
values=kwargs.pop("values", ["left", "right", "auto"]),
**kwargs
)
|
plotly/python-api
|
packages/python/plotly/plotly/validators/scatterternary/hoverlabel/_align.py
|
Python
|
mit
| 591
|
import numpy as np
import cv2
import math
# Calculates rotation matrix to euler angles
# The result is the same as MATLAB except the order
# of the euler angles ( x and z are swapped ).
def rot_vec_to_euler(r):
# Rotate around x axis by 180 degrees to have [0, 0, 0] when facing forward
R = np.dot(np.array([[1, 0, 0],
[0, -1, 0],
[0, 0, -1]]),
np.array(cv2.Rodrigues(r)[0]))
sy = math.sqrt(R[0, 0] * R[0, 0] + R[1, 0] * R[1, 0])
singular = sy < 1e-6
if not singular:
x = math.atan2(R[2, 1], R[2, 2])
y = math.atan2(-R[2, 0], sy)
z = math.atan2(R[1, 0], R[0, 0])
else:
x = math.atan2(-R[1, 2], R[1, 1])
y = math.atan2(-R[2, 0], sy)
z = 0
return np.array([x, y, z])
# Calculates Rotation Matrix given euler angles.
def euler_to_rot_vec(theta):
r_x = np.array([[1, 0, 0],
[0, math.cos(theta[0]), -math.sin(theta[0])],
[0, math.sin(theta[0]), math.cos(theta[0])]
])
r_y = np.array([[math.cos(theta[1]), 0, math.sin(theta[1])],
[0, 1, 0],
[-math.sin(theta[1]), 0, math.cos(theta[1])]
])
r_z = np.array([[math.cos(theta[2]), -math.sin(theta[2]), 0],
[math.sin(theta[2]), math.cos(theta[2]), 0],
[0, 0, 1]
])
return np.array(cv2.Rodrigues(np.dot(np.array([[1, 0, 0],
[0, -1, 0],
[0, 0, -1]]),
np.dot(r_z, np.dot(r_y, r_x))))[0])
class poseExtractor:
def __init__(self):
self.image_points = np.array([30, 29, 28, 27, 33, 32, 34, 31, 35,
36, 45, 39, 42,
21, 22, 20, 23, 19, 24, 18, 25
], dtype=np.intp)
self.model_points = np.array([
(0.0, 0.0, 0.0), # Nose tip
(0.0, 0.40412, -0.35702), # Nose 1
(0.0, 0.87034, -0.65485), # Nose 2
(0, 1.33462, -0.92843), # Nose 3
(0, -0.63441, -0.65887), # Under Nose #0
(0, 0, 0), # Under Nose #1, L
(0.25466, -0.59679, -0.80215), # Under Nose #1, R
(0, 0, 0), # Under Nose #2, L
(0.49277, -0.56169, -0.96709), # Under Nose #2, R
(0, 0, 0), # Left eye outer corner
(1.60745, 1.21855, -1.9585), # Right eye outer corner
(0, 0, 0), # Left eye inner corner
(0.53823, 1.15389, -1.37273), # Right eye inner corner
(0, 0, 0), # Eyebrow #0, L
(0.34309, 1.67208, -0.96486), # Eyebrow #0, R
(0, 0, 0), # Eyebrow #1, L
(0.65806, 1.85405, -1.04975), # Eyebrow #1, R
(0, 0, 0), # Eyebrow #2, L
(0.96421, 1.95277, -1.23015), # Eyebrow #2, R
(0, 0, 0), # Eyebrow #3, L
(1.32075, 1.95305, -1.48482) # Eyebrow #3, R
])
for i in range(5, self.model_points.shape[0], 2):
self.model_points[i, 0] = -self.model_points[i + 1, 0]
self.model_points[i, 1:3] = self.model_points[i + 1, 1:3]
self.camera_matrix = None # Hack so camera matrix can be used for printing later
self.dist_coeffs = np.zeros((4, 1)) # Assuming no lens distortion
self.rvec = None
self.tvec = None
def get_head_rotation(self, landmarks, img_size):
# Camera internals
focal_length = img_size[1]
center = (img_size[1] / 2, img_size[0] / 2)
self.camera_matrix = np.array(
[[focal_length, 0, center[0]],
[0, focal_length, center[1]],
[0, 0, 1]], dtype="double"
)
if self.rvec is None:
(success, self.rvec, self.tvec) = cv2.solvePnP(
self.model_points, landmarks[self.image_points[:, np.newaxis], :],
self.camera_matrix, self.dist_coeffs, flags=cv2.SOLVEPNP_EPNP)
else:
(success, self.rvec, self.tvec) = cv2.solvePnP(
self.model_points, landmarks[self.image_points[:, np.newaxis], :],
self.camera_matrix, self.dist_coeffs, flags=cv2.SOLVEPNP_EPNP,
rvec=self.rvec, tvec=self.tvec, useExtrinsicGuess=True)
return success
def get_positional_features(self, landmarks, img_size):
rotation_success = self.get_head_rotation(landmarks, img_size)
if not rotation_success:
return None
return self.tvec, rot_vec_to_euler(self.rvec)
def get_position_by_average(landmarks, img_size):
position = np.mean(landmarks, axis=0)
size = 2 * np.mean(np.linalg.norm((landmarks - position), axis=1, ord=2))
return np.append(position / img_size[0], size / img_size[0])
|
JustusSchwan/MasterThesis
|
trash/utility_positional.py
|
Python
|
mit
| 4,965
|
import os
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, 'requirements.txt')) as fp:
requires = fp.readlines()
setup(
name='cebulany manager',
version='0.0.4',
classifiers=[],
author='Firemark',
author_email='marpiechula@gmail.com',
url='https://github.com/hackerspace-silesia/cebulany-manager',
packages=find_packages(),
install_requires=requires,
tests_require=requires,
)
|
hackerspace-silesia/cebulany-manager
|
setup.py
|
Python
|
mit
| 495
|
# -*- coding: utf-8 -*-
#
# hash-ring-ctypes documentation build configuration file, created by
# sphinx-quickstart on Wed Oct 2 18:10:26 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'hash-ring-ctypes'
copyright = u'2013, Matt Dennewitz'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0.0'
# The full version, including alpha/beta/rc tags.
release = '1.0.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'hash-ring-ctypesdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto/manual]).
latex_documents = [
('index', 'hash-ring-ctypes.tex', u'hash-ring-ctypes Documentation',
u'Matt Dennewitz', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'hash-ring-ctypes', u'hash-ring-ctypes Documentation',
[u'Matt Dennewitz'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'hash-ring-ctypes', u'hash-ring-ctypes Documentation',
u'Matt Dennewitz', 'hash-ring-ctypes', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
mattdennewitz/hash-ring-ctypes
|
docs/conf.py
|
Python
|
mit
| 8,253
|
from typing import List
from test_framework import generic_test
def expression_synthesis(digits: List[int], target: int) -> bool:
# TODO - you fill in here.
return True
if __name__ == '__main__':
exit(
generic_test.generic_test_main('insert_operators_in_string.py',
'insert_operators_in_string.tsv',
expression_synthesis))
|
shobhitmishra/CodingProblems
|
epi_judge_python/insert_operators_in_string.py
|
Python
|
mit
| 426
|
# -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Tests for autotagging functionality.
"""
from __future__ import division, absolute_import, print_function
import re
import copy
from test import _common
from test._common import unittest
from beets import autotag
from beets.autotag import match
from beets.autotag.hooks import Distance, string_dist
from beets.library import Item
from beets.util import plurality
from beets.autotag import AlbumInfo, TrackInfo
from beets import config
class PluralityTest(_common.TestCase):
def test_plurality_consensus(self):
objs = [1, 1, 1, 1]
obj, freq = plurality(objs)
self.assertEqual(obj, 1)
self.assertEqual(freq, 4)
def test_plurality_near_consensus(self):
objs = [1, 1, 2, 1]
obj, freq = plurality(objs)
self.assertEqual(obj, 1)
self.assertEqual(freq, 3)
def test_plurality_conflict(self):
objs = [1, 1, 2, 2, 3]
obj, freq = plurality(objs)
self.assertTrue(obj in (1, 2))
self.assertEqual(freq, 2)
def test_plurality_empty_sequence_raises_error(self):
with self.assertRaises(ValueError):
plurality([])
def test_current_metadata_finds_pluralities(self):
items = [Item(artist='The Beetles', album='The White Album'),
Item(artist='The Beatles', album='The White Album'),
Item(artist='The Beatles', album='Teh White Album')]
likelies, consensus = match.current_metadata(items)
self.assertEqual(likelies['artist'], 'The Beatles')
self.assertEqual(likelies['album'], 'The White Album')
self.assertFalse(consensus['artist'])
def test_current_metadata_artist_consensus(self):
items = [Item(artist='The Beatles', album='The White Album'),
Item(artist='The Beatles', album='The White Album'),
Item(artist='The Beatles', album='Teh White Album')]
likelies, consensus = match.current_metadata(items)
self.assertEqual(likelies['artist'], 'The Beatles')
self.assertEqual(likelies['album'], 'The White Album')
self.assertTrue(consensus['artist'])
def test_albumartist_consensus(self):
items = [Item(artist='tartist1', album='album',
albumartist='aartist'),
Item(artist='tartist2', album='album',
albumartist='aartist'),
Item(artist='tartist3', album='album',
albumartist='aartist')]
likelies, consensus = match.current_metadata(items)
self.assertEqual(likelies['artist'], 'aartist')
self.assertFalse(consensus['artist'])
def test_current_metadata_likelies(self):
fields = ['artist', 'album', 'albumartist', 'year', 'disctotal',
'mb_albumid', 'label', 'catalognum', 'country', 'media',
'albumdisambig']
items = [Item(**dict((f, '%s_%s' % (f, i or 1)) for f in fields))
for i in range(5)]
likelies, _ = match.current_metadata(items)
for f in fields:
self.assertEqual(likelies[f], '%s_1' % f)
def _make_item(title, track, artist=u'some artist'):
return Item(title=title, track=track,
artist=artist, album=u'some album',
length=1,
mb_trackid='', mb_albumid='', mb_artistid='')
def _make_trackinfo():
return [
TrackInfo(u'one', None, u'some artist', length=1, index=1),
TrackInfo(u'two', None, u'some artist', length=1, index=2),
TrackInfo(u'three', None, u'some artist', length=1, index=3),
]
def _clear_weights():
"""Hack around the lazy descriptor used to cache weights for
Distance calculations.
"""
Distance.__dict__['_weights'].computed = False
class DistanceTest(_common.TestCase):
def tearDown(self):
super(DistanceTest, self).tearDown()
_clear_weights()
def test_add(self):
dist = Distance()
dist.add('add', 1.0)
self.assertEqual(dist._penalties, {'add': [1.0]})
def test_add_equality(self):
dist = Distance()
dist.add_equality('equality', 'ghi', ['abc', 'def', 'ghi'])
self.assertEqual(dist._penalties['equality'], [0.0])
dist.add_equality('equality', 'xyz', ['abc', 'def', 'ghi'])
self.assertEqual(dist._penalties['equality'], [0.0, 1.0])
dist.add_equality('equality', 'abc', re.compile(r'ABC', re.I))
self.assertEqual(dist._penalties['equality'], [0.0, 1.0, 0.0])
def test_add_expr(self):
dist = Distance()
dist.add_expr('expr', True)
self.assertEqual(dist._penalties['expr'], [1.0])
dist.add_expr('expr', False)
self.assertEqual(dist._penalties['expr'], [1.0, 0.0])
def test_add_number(self):
dist = Distance()
# Add a full penalty for each number of difference between two numbers.
dist.add_number('number', 1, 1)
self.assertEqual(dist._penalties['number'], [0.0])
dist.add_number('number', 1, 2)
self.assertEqual(dist._penalties['number'], [0.0, 1.0])
dist.add_number('number', 2, 1)
self.assertEqual(dist._penalties['number'], [0.0, 1.0, 1.0])
dist.add_number('number', -1, 2)
self.assertEqual(dist._penalties['number'], [0.0, 1.0, 1.0, 1.0,
1.0, 1.0])
def test_add_priority(self):
dist = Distance()
dist.add_priority('priority', 'abc', 'abc')
self.assertEqual(dist._penalties['priority'], [0.0])
dist.add_priority('priority', 'def', ['abc', 'def'])
self.assertEqual(dist._penalties['priority'], [0.0, 0.5])
dist.add_priority('priority', 'gh', ['ab', 'cd', 'ef',
re.compile('GH', re.I)])
self.assertEqual(dist._penalties['priority'], [0.0, 0.5, 0.75])
dist.add_priority('priority', 'xyz', ['abc', 'def'])
self.assertEqual(dist._penalties['priority'], [0.0, 0.5, 0.75,
1.0])
def test_add_ratio(self):
dist = Distance()
dist.add_ratio('ratio', 25, 100)
self.assertEqual(dist._penalties['ratio'], [0.25])
dist.add_ratio('ratio', 10, 5)
self.assertEqual(dist._penalties['ratio'], [0.25, 1.0])
dist.add_ratio('ratio', -5, 5)
self.assertEqual(dist._penalties['ratio'], [0.25, 1.0, 0.0])
dist.add_ratio('ratio', 5, 0)
self.assertEqual(dist._penalties['ratio'], [0.25, 1.0, 0.0, 0.0])
def test_add_string(self):
dist = Distance()
sdist = string_dist(u'abc', u'bcd')
dist.add_string('string', u'abc', u'bcd')
self.assertEqual(dist._penalties['string'], [sdist])
self.assertNotEqual(dist._penalties['string'], [0])
def test_add_string_none(self):
dist = Distance()
dist.add_string('string', None, 'string')
self.assertEqual(dist._penalties['string'], [1])
def test_add_string_both_none(self):
dist = Distance()
dist.add_string('string', None, None)
self.assertEqual(dist._penalties['string'], [0])
def test_distance(self):
config['match']['distance_weights']['album'] = 2.0
config['match']['distance_weights']['medium'] = 1.0
_clear_weights()
dist = Distance()
dist.add('album', 0.5)
dist.add('media', 0.25)
dist.add('media', 0.75)
self.assertEqual(dist.distance, 0.5)
# __getitem__()
self.assertEqual(dist['album'], 0.25)
self.assertEqual(dist['media'], 0.25)
def test_max_distance(self):
config['match']['distance_weights']['album'] = 3.0
config['match']['distance_weights']['medium'] = 1.0
_clear_weights()
dist = Distance()
dist.add('album', 0.5)
dist.add('medium', 0.0)
dist.add('medium', 0.0)
self.assertEqual(dist.max_distance, 5.0)
def test_operators(self):
config['match']['distance_weights']['source'] = 1.0
config['match']['distance_weights']['album'] = 2.0
config['match']['distance_weights']['medium'] = 1.0
_clear_weights()
dist = Distance()
dist.add('source', 0.0)
dist.add('album', 0.5)
dist.add('medium', 0.25)
dist.add('medium', 0.75)
self.assertEqual(len(dist), 2)
self.assertEqual(list(dist), [('album', 0.2), ('medium', 0.2)])
self.assertTrue(dist == 0.4)
self.assertTrue(dist < 1.0)
self.assertTrue(dist > 0.0)
self.assertEqual(dist - 0.4, 0.0)
self.assertEqual(0.4 - dist, 0.0)
self.assertEqual(float(dist), 0.4)
def test_raw_distance(self):
config['match']['distance_weights']['album'] = 3.0
config['match']['distance_weights']['medium'] = 1.0
_clear_weights()
dist = Distance()
dist.add('album', 0.5)
dist.add('medium', 0.25)
dist.add('medium', 0.5)
self.assertEqual(dist.raw_distance, 2.25)
def test_items(self):
config['match']['distance_weights']['album'] = 4.0
config['match']['distance_weights']['medium'] = 2.0
_clear_weights()
dist = Distance()
dist.add('album', 0.1875)
dist.add('medium', 0.75)
self.assertEqual(dist.items(), [('medium', 0.25), ('album', 0.125)])
# Sort by key if distance is equal.
dist = Distance()
dist.add('album', 0.375)
dist.add('medium', 0.75)
self.assertEqual(dist.items(), [('album', 0.25), ('medium', 0.25)])
def test_update(self):
dist1 = Distance()
dist1.add('album', 0.5)
dist1.add('media', 1.0)
dist2 = Distance()
dist2.add('album', 0.75)
dist2.add('album', 0.25)
dist2.add('media', 0.05)
dist1.update(dist2)
self.assertEqual(dist1._penalties, {'album': [0.5, 0.75, 0.25],
'media': [1.0, 0.05]})
class TrackDistanceTest(_common.TestCase):
def test_identical_tracks(self):
item = _make_item(u'one', 1)
info = _make_trackinfo()[0]
dist = match.track_distance(item, info, incl_artist=True)
self.assertEqual(dist, 0.0)
def test_different_title(self):
item = _make_item(u'foo', 1)
info = _make_trackinfo()[0]
dist = match.track_distance(item, info, incl_artist=True)
self.assertNotEqual(dist, 0.0)
def test_different_artist(self):
item = _make_item(u'one', 1)
item.artist = u'foo'
info = _make_trackinfo()[0]
dist = match.track_distance(item, info, incl_artist=True)
self.assertNotEqual(dist, 0.0)
def test_various_artists_tolerated(self):
item = _make_item(u'one', 1)
item.artist = u'Various Artists'
info = _make_trackinfo()[0]
dist = match.track_distance(item, info, incl_artist=True)
self.assertEqual(dist, 0.0)
class AlbumDistanceTest(_common.TestCase):
def _mapping(self, items, info):
out = {}
for i, t in zip(items, info.tracks):
out[i] = t
return out
def _dist(self, items, info):
return match.distance(items, info, self._mapping(items, info))
def test_identical_albums(self):
items = []
items.append(_make_item(u'one', 1))
items.append(_make_item(u'two', 2))
items.append(_make_item(u'three', 3))
info = AlbumInfo(
artist=u'some artist',
album=u'some album',
tracks=_make_trackinfo(),
va=False,
album_id=None,
artist_id=None,
)
self.assertEqual(self._dist(items, info), 0)
def test_incomplete_album(self):
items = []
items.append(_make_item(u'one', 1))
items.append(_make_item(u'three', 3))
info = AlbumInfo(
artist=u'some artist',
album=u'some album',
tracks=_make_trackinfo(),
va=False,
album_id=None,
artist_id=None,
)
dist = self._dist(items, info)
self.assertNotEqual(dist, 0)
# Make sure the distance is not too great
self.assertTrue(dist < 0.2)
def test_global_artists_differ(self):
items = []
items.append(_make_item(u'one', 1))
items.append(_make_item(u'two', 2))
items.append(_make_item(u'three', 3))
info = AlbumInfo(
artist=u'someone else',
album=u'some album',
tracks=_make_trackinfo(),
va=False,
album_id=None,
artist_id=None,
)
self.assertNotEqual(self._dist(items, info), 0)
def test_comp_track_artists_match(self):
items = []
items.append(_make_item(u'one', 1))
items.append(_make_item(u'two', 2))
items.append(_make_item(u'three', 3))
info = AlbumInfo(
artist=u'should be ignored',
album=u'some album',
tracks=_make_trackinfo(),
va=True,
album_id=None,
artist_id=None,
)
self.assertEqual(self._dist(items, info), 0)
def test_comp_no_track_artists(self):
# Some VA releases don't have track artists (incomplete metadata).
items = []
items.append(_make_item(u'one', 1))
items.append(_make_item(u'two', 2))
items.append(_make_item(u'three', 3))
info = AlbumInfo(
artist=u'should be ignored',
album=u'some album',
tracks=_make_trackinfo(),
va=True,
album_id=None,
artist_id=None,
)
info.tracks[0].artist = None
info.tracks[1].artist = None
info.tracks[2].artist = None
self.assertEqual(self._dist(items, info), 0)
def test_comp_track_artists_do_not_match(self):
items = []
items.append(_make_item(u'one', 1))
items.append(_make_item(u'two', 2, u'someone else'))
items.append(_make_item(u'three', 3))
info = AlbumInfo(
artist=u'some artist',
album=u'some album',
tracks=_make_trackinfo(),
va=True,
album_id=None,
artist_id=None,
)
self.assertNotEqual(self._dist(items, info), 0)
def test_tracks_out_of_order(self):
items = []
items.append(_make_item(u'one', 1))
items.append(_make_item(u'three', 2))
items.append(_make_item(u'two', 3))
info = AlbumInfo(
artist=u'some artist',
album=u'some album',
tracks=_make_trackinfo(),
va=False,
album_id=None,
artist_id=None,
)
dist = self._dist(items, info)
self.assertTrue(0 < dist < 0.2)
def test_two_medium_release(self):
items = []
items.append(_make_item(u'one', 1))
items.append(_make_item(u'two', 2))
items.append(_make_item(u'three', 3))
info = AlbumInfo(
artist=u'some artist',
album=u'some album',
tracks=_make_trackinfo(),
va=False,
album_id=None,
artist_id=None,
)
info.tracks[0].medium_index = 1
info.tracks[1].medium_index = 2
info.tracks[2].medium_index = 1
dist = self._dist(items, info)
self.assertEqual(dist, 0)
def test_per_medium_track_numbers(self):
items = []
items.append(_make_item(u'one', 1))
items.append(_make_item(u'two', 2))
items.append(_make_item(u'three', 1))
info = AlbumInfo(
artist=u'some artist',
album=u'some album',
tracks=_make_trackinfo(),
va=False,
album_id=None,
artist_id=None,
)
info.tracks[0].medium_index = 1
info.tracks[1].medium_index = 2
info.tracks[2].medium_index = 1
dist = self._dist(items, info)
self.assertEqual(dist, 0)
class AssignmentTest(unittest.TestCase):
def item(self, title, track):
return Item(
title=title, track=track,
mb_trackid='', mb_albumid='', mb_artistid='',
)
def test_reorder_when_track_numbers_incorrect(self):
items = []
items.append(self.item(u'one', 1))
items.append(self.item(u'three', 2))
items.append(self.item(u'two', 3))
trackinfo = []
trackinfo.append(TrackInfo(u'one', None))
trackinfo.append(TrackInfo(u'two', None))
trackinfo.append(TrackInfo(u'three', None))
mapping, extra_items, extra_tracks = \
match.assign_items(items, trackinfo)
self.assertEqual(extra_items, [])
self.assertEqual(extra_tracks, [])
self.assertEqual(mapping, {
items[0]: trackinfo[0],
items[1]: trackinfo[2],
items[2]: trackinfo[1],
})
def test_order_works_with_invalid_track_numbers(self):
items = []
items.append(self.item(u'one', 1))
items.append(self.item(u'three', 1))
items.append(self.item(u'two', 1))
trackinfo = []
trackinfo.append(TrackInfo(u'one', None))
trackinfo.append(TrackInfo(u'two', None))
trackinfo.append(TrackInfo(u'three', None))
mapping, extra_items, extra_tracks = \
match.assign_items(items, trackinfo)
self.assertEqual(extra_items, [])
self.assertEqual(extra_tracks, [])
self.assertEqual(mapping, {
items[0]: trackinfo[0],
items[1]: trackinfo[2],
items[2]: trackinfo[1],
})
def test_order_works_with_missing_tracks(self):
items = []
items.append(self.item(u'one', 1))
items.append(self.item(u'three', 3))
trackinfo = []
trackinfo.append(TrackInfo(u'one', None))
trackinfo.append(TrackInfo(u'two', None))
trackinfo.append(TrackInfo(u'three', None))
mapping, extra_items, extra_tracks = \
match.assign_items(items, trackinfo)
self.assertEqual(extra_items, [])
self.assertEqual(extra_tracks, [trackinfo[1]])
self.assertEqual(mapping, {
items[0]: trackinfo[0],
items[1]: trackinfo[2],
})
def test_order_works_with_extra_tracks(self):
items = []
items.append(self.item(u'one', 1))
items.append(self.item(u'two', 2))
items.append(self.item(u'three', 3))
trackinfo = []
trackinfo.append(TrackInfo(u'one', None))
trackinfo.append(TrackInfo(u'three', None))
mapping, extra_items, extra_tracks = \
match.assign_items(items, trackinfo)
self.assertEqual(extra_items, [items[1]])
self.assertEqual(extra_tracks, [])
self.assertEqual(mapping, {
items[0]: trackinfo[0],
items[2]: trackinfo[1],
})
def test_order_works_when_track_names_are_entirely_wrong(self):
# A real-world test case contributed by a user.
def item(i, length):
return Item(
artist=u'ben harper',
album=u'burn to shine',
title=u'ben harper - Burn to Shine {0}'.format(i),
track=i,
length=length,
mb_trackid='', mb_albumid='', mb_artistid='',
)
items = []
items.append(item(1, 241.37243007106997))
items.append(item(2, 342.27781704375036))
items.append(item(3, 245.95070222338137))
items.append(item(4, 472.87662515485437))
items.append(item(5, 279.1759535763187))
items.append(item(6, 270.33333768012))
items.append(item(7, 247.83435613222923))
items.append(item(8, 216.54504531525072))
items.append(item(9, 225.72775379800484))
items.append(item(10, 317.7643606963552))
items.append(item(11, 243.57001238834192))
items.append(item(12, 186.45916150485752))
def info(index, title, length):
return TrackInfo(title, None, length=length, index=index)
trackinfo = []
trackinfo.append(info(1, u'Alone', 238.893))
trackinfo.append(info(2, u'The Woman in You', 341.44))
trackinfo.append(info(3, u'Less', 245.59999999999999))
trackinfo.append(info(4, u'Two Hands of a Prayer', 470.49299999999999))
trackinfo.append(info(5, u'Please Bleed', 277.86599999999999))
trackinfo.append(info(6, u'Suzie Blue', 269.30599999999998))
trackinfo.append(info(7, u'Steal My Kisses', 245.36000000000001))
trackinfo.append(info(8, u'Burn to Shine', 214.90600000000001))
trackinfo.append(info(9, u'Show Me a Little Shame', 224.0929999999999))
trackinfo.append(info(10, u'Forgiven', 317.19999999999999))
trackinfo.append(info(11, u'Beloved One', 243.733))
trackinfo.append(info(12, u'In the Lord\'s Arms', 186.13300000000001))
mapping, extra_items, extra_tracks = \
match.assign_items(items, trackinfo)
self.assertEqual(extra_items, [])
self.assertEqual(extra_tracks, [])
for item, info in mapping.items():
self.assertEqual(items.index(item), trackinfo.index(info))
class ApplyTestUtil(object):
def _apply(self, info=None, per_disc_numbering=False):
info = info or self.info
mapping = {}
for i, t in zip(self.items, info.tracks):
mapping[i] = t
config['per_disc_numbering'] = per_disc_numbering
autotag.apply_metadata(info, mapping)
class ApplyTest(_common.TestCase, ApplyTestUtil):
def setUp(self):
super(ApplyTest, self).setUp()
self.items = []
self.items.append(Item({}))
self.items.append(Item({}))
trackinfo = []
trackinfo.append(TrackInfo(
u'oneNew',
u'dfa939ec-118c-4d0f-84a0-60f3d1e6522c',
medium=1,
medium_index=1,
medium_total=1,
index=1,
artist_credit='trackArtistCredit',
artist_sort='trackArtistSort',
))
trackinfo.append(TrackInfo(
u'twoNew',
u'40130ed1-a27c-42fd-a328-1ebefb6caef4',
medium=2,
medium_index=1,
index=2,
medium_total=1,
))
self.info = AlbumInfo(
tracks=trackinfo,
artist=u'artistNew',
album=u'albumNew',
album_id='7edb51cb-77d6-4416-a23c-3a8c2994a2c7',
artist_id='a6623d39-2d8e-4f70-8242-0a9553b91e50',
artist_credit=u'albumArtistCredit',
artist_sort=u'albumArtistSort',
albumtype=u'album',
va=False,
mediums=2,
)
def test_titles_applied(self):
self._apply()
self.assertEqual(self.items[0].title, 'oneNew')
self.assertEqual(self.items[1].title, 'twoNew')
def test_album_and_artist_applied_to_all(self):
self._apply()
self.assertEqual(self.items[0].album, 'albumNew')
self.assertEqual(self.items[1].album, 'albumNew')
self.assertEqual(self.items[0].artist, 'artistNew')
self.assertEqual(self.items[1].artist, 'artistNew')
def test_track_index_applied(self):
self._apply()
self.assertEqual(self.items[0].track, 1)
self.assertEqual(self.items[1].track, 2)
def test_track_total_applied(self):
self._apply()
self.assertEqual(self.items[0].tracktotal, 2)
self.assertEqual(self.items[1].tracktotal, 2)
def test_disc_index_applied(self):
self._apply()
self.assertEqual(self.items[0].disc, 1)
self.assertEqual(self.items[1].disc, 2)
def test_disc_total_applied(self):
self._apply()
self.assertEqual(self.items[0].disctotal, 2)
self.assertEqual(self.items[1].disctotal, 2)
def test_per_disc_numbering(self):
self._apply(per_disc_numbering=True)
self.assertEqual(self.items[0].track, 1)
self.assertEqual(self.items[1].track, 1)
def test_per_disc_numbering_track_total(self):
self._apply(per_disc_numbering=True)
self.assertEqual(self.items[0].tracktotal, 1)
self.assertEqual(self.items[1].tracktotal, 1)
def test_mb_trackid_applied(self):
self._apply()
self.assertEqual(self.items[0].mb_trackid,
'dfa939ec-118c-4d0f-84a0-60f3d1e6522c')
self.assertEqual(self.items[1].mb_trackid,
'40130ed1-a27c-42fd-a328-1ebefb6caef4')
def test_mb_albumid_and_artistid_applied(self):
self._apply()
for item in self.items:
self.assertEqual(item.mb_albumid,
'7edb51cb-77d6-4416-a23c-3a8c2994a2c7')
self.assertEqual(item.mb_artistid,
'a6623d39-2d8e-4f70-8242-0a9553b91e50')
def test_albumtype_applied(self):
self._apply()
self.assertEqual(self.items[0].albumtype, 'album')
self.assertEqual(self.items[1].albumtype, 'album')
def test_album_artist_overrides_empty_track_artist(self):
my_info = copy.deepcopy(self.info)
self._apply(info=my_info)
self.assertEqual(self.items[0].artist, 'artistNew')
self.assertEqual(self.items[1].artist, 'artistNew')
def test_album_artist_overriden_by_nonempty_track_artist(self):
my_info = copy.deepcopy(self.info)
my_info.tracks[0].artist = 'artist1!'
my_info.tracks[1].artist = 'artist2!'
self._apply(info=my_info)
self.assertEqual(self.items[0].artist, 'artist1!')
self.assertEqual(self.items[1].artist, 'artist2!')
def test_artist_credit_applied(self):
self._apply()
self.assertEqual(self.items[0].albumartist_credit, 'albumArtistCredit')
self.assertEqual(self.items[0].artist_credit, 'trackArtistCredit')
self.assertEqual(self.items[1].albumartist_credit, 'albumArtistCredit')
self.assertEqual(self.items[1].artist_credit, 'albumArtistCredit')
def test_artist_sort_applied(self):
self._apply()
self.assertEqual(self.items[0].albumartist_sort, 'albumArtistSort')
self.assertEqual(self.items[0].artist_sort, 'trackArtistSort')
self.assertEqual(self.items[1].albumartist_sort, 'albumArtistSort')
self.assertEqual(self.items[1].artist_sort, 'albumArtistSort')
def test_full_date_applied(self):
my_info = copy.deepcopy(self.info)
my_info.year = 2013
my_info.month = 12
my_info.day = 18
self._apply(info=my_info)
self.assertEqual(self.items[0].year, 2013)
self.assertEqual(self.items[0].month, 12)
self.assertEqual(self.items[0].day, 18)
def test_date_only_zeros_month_and_day(self):
self.items = []
self.items.append(Item(year=1, month=2, day=3))
self.items.append(Item(year=4, month=5, day=6))
my_info = copy.deepcopy(self.info)
my_info.year = 2013
self._apply(info=my_info)
self.assertEqual(self.items[0].year, 2013)
self.assertEqual(self.items[0].month, 0)
self.assertEqual(self.items[0].day, 0)
def test_missing_date_applies_nothing(self):
self.items = []
self.items.append(Item(year=1, month=2, day=3))
self.items.append(Item(year=4, month=5, day=6))
self._apply()
self.assertEqual(self.items[0].year, 1)
self.assertEqual(self.items[0].month, 2)
self.assertEqual(self.items[0].day, 3)
def test_data_source_applied(self):
my_info = copy.deepcopy(self.info)
my_info.data_source = 'MusicBrainz'
self._apply(info=my_info)
self.assertEqual(self.items[0].data_source, 'MusicBrainz')
class ApplyCompilationTest(_common.TestCase, ApplyTestUtil):
def setUp(self):
super(ApplyCompilationTest, self).setUp()
self.items = []
self.items.append(Item({}))
self.items.append(Item({}))
trackinfo = []
trackinfo.append(TrackInfo(
u'oneNew',
u'dfa939ec-118c-4d0f-84a0-60f3d1e6522c',
u'artistOneNew',
u'a05686fc-9db2-4c23-b99e-77f5db3e5282',
index=1,
))
trackinfo.append(TrackInfo(
u'twoNew',
u'40130ed1-a27c-42fd-a328-1ebefb6caef4',
u'artistTwoNew',
u'80b3cf5e-18fe-4c59-98c7-e5bb87210710',
index=2,
))
self.info = AlbumInfo(
tracks=trackinfo,
artist=u'variousNew',
album=u'albumNew',
album_id='3b69ea40-39b8-487f-8818-04b6eff8c21a',
artist_id='89ad4ac3-39f7-470e-963a-56509c546377',
albumtype=u'compilation',
)
def test_album_and_track_artists_separate(self):
self._apply()
self.assertEqual(self.items[0].artist, 'artistOneNew')
self.assertEqual(self.items[1].artist, 'artistTwoNew')
self.assertEqual(self.items[0].albumartist, 'variousNew')
self.assertEqual(self.items[1].albumartist, 'variousNew')
def test_mb_albumartistid_applied(self):
self._apply()
self.assertEqual(self.items[0].mb_albumartistid,
'89ad4ac3-39f7-470e-963a-56509c546377')
self.assertEqual(self.items[1].mb_albumartistid,
'89ad4ac3-39f7-470e-963a-56509c546377')
self.assertEqual(self.items[0].mb_artistid,
'a05686fc-9db2-4c23-b99e-77f5db3e5282')
self.assertEqual(self.items[1].mb_artistid,
'80b3cf5e-18fe-4c59-98c7-e5bb87210710')
def test_va_flag_cleared_does_not_set_comp(self):
self._apply()
self.assertFalse(self.items[0].comp)
self.assertFalse(self.items[1].comp)
def test_va_flag_sets_comp(self):
va_info = copy.deepcopy(self.info)
va_info.va = True
self._apply(info=va_info)
self.assertTrue(self.items[0].comp)
self.assertTrue(self.items[1].comp)
class StringDistanceTest(unittest.TestCase):
def test_equal_strings(self):
dist = string_dist(u'Some String', u'Some String')
self.assertEqual(dist, 0.0)
def test_different_strings(self):
dist = string_dist(u'Some String', u'Totally Different')
self.assertNotEqual(dist, 0.0)
def test_punctuation_ignored(self):
dist = string_dist(u'Some String', u'Some.String!')
self.assertEqual(dist, 0.0)
def test_case_ignored(self):
dist = string_dist(u'Some String', u'sOME sTring')
self.assertEqual(dist, 0.0)
def test_leading_the_has_lower_weight(self):
dist1 = string_dist(u'XXX Band Name', u'Band Name')
dist2 = string_dist(u'The Band Name', u'Band Name')
self.assertTrue(dist2 < dist1)
def test_parens_have_lower_weight(self):
dist1 = string_dist(u'One .Two.', u'One')
dist2 = string_dist(u'One (Two)', u'One')
self.assertTrue(dist2 < dist1)
def test_brackets_have_lower_weight(self):
dist1 = string_dist(u'One .Two.', u'One')
dist2 = string_dist(u'One [Two]', u'One')
self.assertTrue(dist2 < dist1)
def test_ep_label_has_zero_weight(self):
dist = string_dist(u'My Song (EP)', u'My Song')
self.assertEqual(dist, 0.0)
def test_featured_has_lower_weight(self):
dist1 = string_dist(u'My Song blah Someone', u'My Song')
dist2 = string_dist(u'My Song feat Someone', u'My Song')
self.assertTrue(dist2 < dist1)
def test_postfix_the(self):
dist = string_dist(u'The Song Title', u'Song Title, The')
self.assertEqual(dist, 0.0)
def test_postfix_a(self):
dist = string_dist(u'A Song Title', u'Song Title, A')
self.assertEqual(dist, 0.0)
def test_postfix_an(self):
dist = string_dist(u'An Album Title', u'Album Title, An')
self.assertEqual(dist, 0.0)
def test_empty_strings(self):
dist = string_dist(u'', u'')
self.assertEqual(dist, 0.0)
def test_solo_pattern(self):
# Just make sure these don't crash.
string_dist(u'The ', u'')
string_dist(u'(EP)', u'(EP)')
string_dist(u', An', u'')
def test_heuristic_does_not_harm_distance(self):
dist = string_dist(u'Untitled', u'[Untitled]')
self.assertEqual(dist, 0.0)
def test_ampersand_expansion(self):
dist = string_dist(u'And', u'&')
self.assertEqual(dist, 0.0)
def test_accented_characters(self):
dist = string_dist(u'\xe9\xe1\xf1', u'ean')
self.assertEqual(dist, 0.0)
class EnumTest(_common.TestCase):
"""
Test Enum Subclasses defined in beets.util.enumeration
"""
def test_ordered_enum(self):
OrderedEnumClass = match.OrderedEnum('OrderedEnumTest', ['a', 'b', 'c']) # noqa
self.assertLess(OrderedEnumClass.a, OrderedEnumClass.b)
self.assertLess(OrderedEnumClass.a, OrderedEnumClass.c)
self.assertLess(OrderedEnumClass.b, OrderedEnumClass.c)
self.assertGreater(OrderedEnumClass.b, OrderedEnumClass.a)
self.assertGreater(OrderedEnumClass.c, OrderedEnumClass.a)
self.assertGreater(OrderedEnumClass.c, OrderedEnumClass.b)
def suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == '__main__':
unittest.main(defaultTest='suite')
|
jcoady9/beets
|
test/test_autotag.py
|
Python
|
mit
| 34,286
|
"""1449. Form Largest Integer With Digits That Add up to Target
https://leetcode.com/problems/form-largest-integer-with-digits-that-add-up-to-target/
"""
from functools import lru_cache
from typing import List
class Solution:
def largest_number(self, cost: List[int], target: int) -> str:
def compare(a: str, b: str) -> bool:
return a > b if len(a) == len(b) else len(a) > len(b)
@lru_cache(None)
def dfs(x: int) -> str:
if x == 0:
return ''
res = '0'
for i in range(len(cost)):
if cost[i] <= x:
ret = dfs(x - cost[i])
if ret != '0':
ret = str(i + 1) + ret
if compare(ret, res):
res = ret
return res
return dfs(target)
def largest_number3(self, cost: List[int], target: int) -> str:
def gt(a: str, b: str) -> bool:
return a > b if len(a) == len(b) else len(a) > len(b)
dp = [''] * (target + 1)
for i in range(1, target + 1):
dp[i] = '0'
for j in range(9):
if cost[j] <= i:
ret = dp[i - cost[j]]
if ret != '0':
ret = str(j + 1) + ret
if gt(ret, dp[i]):
dp[i] = ret
return dp[target]
def largest_number2(self, cost: List[int], target: int) -> str:
def get_digits() -> int:
for i in range(1, len(cost) + 1):
for j in range(1, target + 1):
dp[i][j] = dp[i - 1][j]
if cost[i - 1] == j:
dp[i][j] = max(dp[i][j], 1)
elif cost[i - 1] < j and dp[i][j - cost[i - 1]] != 0:
dp[i][j] = max(dp[i][j], 1 + dp[i][j - cost[i - 1]])
return dp[len(cost)][target]
dp = [[0] * (target + 1) for _ in range(len(cost) + 1)]
digits = get_digits()
if digits <= 0:
return '0'
ans = ''
for num in range(len(cost), 0, -1):
c = cost[num - 1]
while target >= c and dp[-1][target] == 1 + dp[-1][target - c]:
if target == c:
return ans + str(num)
elif dp[-1][target - c] != 0:
ans += str(num)
target -= c
else:
break
return ans
if __name__ == '__main__':
sol = Solution()
print(sol.largest_number2([4, 3, 2, 5, 6, 7, 2, 5, 5], 9))
print(sol.largest_number3([4, 3, 2, 5, 6, 7, 2, 5, 5], 9))
|
isudox/leetcode-solution
|
python-algorithm/leetcode/problem_1449.py
|
Python
|
mit
| 2,700
|
#!/usr/bin/env python
"""
Copyright (c) 2006-2015 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
import os
from xml.etree import ElementTree as et
from lib.core.data import conf
from lib.core.data import logger
from lib.core.data import paths
from lib.core.datatype import AttribDict
from lib.core.exception import SqlmapInstallationException
def cleanupVals(text, tag):
if tag in ("clause", "where"):
text = text.split(',')
if isinstance(text, basestring):
text = int(text) if text.isdigit() else str(text)
elif isinstance(text, list):
count = 0
for _ in text:
text[count] = int(_) if _.isdigit() else str(_)
count += 1
if len(text) == 1 and tag not in ("clause", "where"):
text = text[0]
return text
def parseXmlNode(node):
for element in node.getiterator('boundary'):
boundary = AttribDict()
for child in element.getchildren():
if child.text:
values = cleanupVals(child.text, child.tag)
boundary[child.tag] = values
else:
boundary[child.tag] = None
conf.boundaries.append(boundary)
for element in node.getiterator('test'):
test = AttribDict()
for child in element.getchildren():
if child.text and child.text.strip():
values = cleanupVals(child.text, child.tag)
test[child.tag] = values
else:
if len(child.getchildren()) == 0:
test[child.tag] = None
continue
else:
test[child.tag] = AttribDict()
for gchild in child.getchildren():
if gchild.tag in test[child.tag]:
prevtext = test[child.tag][gchild.tag]
test[child.tag][gchild.tag] = [prevtext, gchild.text]
else:
test[child.tag][gchild.tag] = gchild.text
conf.tests.append(test)
def loadBoundaries():
try:
doc = et.parse(paths.BOUNDARIES_XML)
except Exception, ex:
errMsg = "something seems to be wrong with "
errMsg += "the file '%s' ('%s'). Please make " % (paths.BOUNDARIES_XML, ex)
errMsg += "sure that you haven't made any changes to it"
raise SqlmapInstallationException, errMsg
root = doc.getroot()
parseXmlNode(root)
def loadPayloads():
payloadFiles = os.listdir(paths.SQLMAP_XML_PAYLOADS_PATH)
payloadFiles.sort()
for payloadFile in payloadFiles:
payloadFilePath = os.path.join(paths.SQLMAP_XML_PAYLOADS_PATH, payloadFile)
#logger.debug("Parsing payloads from file '%s'" % payloadFile)
try:
doc = et.parse(payloadFilePath)
except Exception, ex:
errMsg = "something seems to be wrong with "
errMsg += "the file '%s' ('%s'). Please make " % (payloadFilePath, ex)
errMsg += "sure that you haven't made any changes to it"
raise SqlmapInstallationException, errMsg
root = doc.getroot()
parseXmlNode(root)
|
V11/volcano
|
server/sqlmap/lib/parse/payloads.py
|
Python
|
mit
| 3,204
|
import codecs
import logging
import random
def import_url(path,lo,hi):
with codecs.open(path,encoding='utf-8') as f:
string = f.read()
arr = string.split('\n')
if not lo:
lo=0
if not hi:
hi=len(arr)
arr=arr[lo:hi]
url_arr = []
want = range(lo,hi)
# returns url and its number
for i,line in enumerate(arr):
if i+lo in want:
url = line.split(':')[0]
num = str(i+lo).zfill(5)
url_arr.append((num,url))
return url_arr
def import_proxy(path,mode):
with open(path) as f:
string = f.read()
arr = string.split('\n')
del(arr[-1])
proxy_arr = []
for line in arr:
if mode=='comma':
line_arr=line.split(',')
addr=line_arr[0]
port=line_arr[1]
line=addr+':'+port
dic = {}
dic['http'] = 'http://' + line
dic['https'] = 'https://' + line
proxy_arr.append(dic)
random.shuffle(proxy_arr)
return proxy_arr
def setLogger(path):
console_logger = logging.getLogger('consoleLogger')
hdlr = logging.FileHandler('./console.log')
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
hdlr.setFormatter(formatter)
consoleHandler = logging.StreamHandler()
consoleHandler.setFormatter(formatter)
console_logger.addHandler(hdlr)
console_logger.addHandler(consoleHandler)
console_logger.setLevel(logging.DEBUG)
result_logger = logging.getLogger('resultLogger')
hdlr2 = logging.FileHandler('./'+path,encoding='utf-8')
formatter2 = logging.Formatter('%(message)s')
hdlr2.setFormatter(formatter2)
result_logger.addHandler(hdlr2)
result_logger.setLevel(logging.DEBUG)
return console_logger, result_logger
|
xmeng17/Malicious-URL-Detection
|
host/http_proxy/helper.py
|
Python
|
mit
| 1,790
|
"""Templatetags for the ``document_library`` app."""
from django import template
from ..models import Document
register = template.Library()
@register.assignment_tag
def get_files_for_document(document):
"""
Returns the available files for all languages.
In case the file is already present in another language, it does not re-add
it again.
"""
files = []
for doc_trans in document.translations.all():
if doc_trans.filer_file is not None and \
doc_trans.filer_file not in files:
doc_trans.filer_file.language = doc_trans.language_code
files.append(doc_trans.filer_file)
return files
@register.assignment_tag(takes_context=True)
def get_frontpage_documents(context):
"""Returns the library favs that should be shown on the front page."""
req = context.get('request')
qs = Document.objects.published(req).filter(is_on_front_page=True)
return qs
@register.assignment_tag(takes_context=True)
def get_latest_documents(context, count=5):
"""
Returns the latest documents.
:param count: Number of documents to be returned. Defaults to 5.
"""
req = context.get('request')
qs = Document.objects.published(req)[:count]
return qs
|
bitmazk/django-document-library
|
document_library/templatetags/document_library_tags.py
|
Python
|
mit
| 1,256
|
# -*- coding: utf-8 -*-
from Instanssi.common.auth import user_access_required
from django.shortcuts import render
from django.http import HttpResponseRedirect
from django.contrib import auth
from django.urls import reverse
from Instanssi.users.forms import OpenIDLoginForm, DjangoLoginForm, ProfileForm
from Instanssi.common.misc import get_url_local_path
AUTH_METHODS = [
# Short name, social-auth, friendly name
('facebook', 'facebook', 'Facebook'),
('google', 'google-oauth2', 'Google'),
('twitter', 'twitter', 'Twitter'),
('github', 'github', 'Github'),
('battlenet', 'battlenet-oauth2', 'Battle.net'),
('steam', 'steam', 'Steam'),
]
def login(request):
if request.user.is_authenticated:
return HttpResponseRedirect(reverse('users:profile'))
# Get referer for redirect
# Make sure that the referrer is a local path.
if 'next' in request.GET:
next_page = get_url_local_path(request.GET['next'])
else:
next_page = get_url_local_path(request.META.get('HTTP_REFERER', reverse('users:profile')))
# Test django login form
if request.method == "POST":
djangoform = DjangoLoginForm(request.POST)
if djangoform.is_valid():
djangoform.login(request)
return HttpResponseRedirect(djangoform.cleaned_data['next'])
else:
djangoform = DjangoLoginForm(next=next_page)
# Openid login form
# The form will be handled elsewhere; this is only for rendering the form.
openidform = OpenIDLoginForm(next=next_page)
# Render response
return render(request, "users/login.html", {
'djangoform': djangoform,
'openidform': openidform,
'next': next_page,
'AUTH_METHODS': AUTH_METHODS
})
def loggedout(request):
return render(request, "users/loggedout.html")
@user_access_required
def profile(request):
from social_django.models import DjangoStorage
if request.method == "POST":
profileform = ProfileForm(request.POST, instance=request.user, user=request.user)
if profileform.is_valid():
profileform.save()
return HttpResponseRedirect(reverse('users:profile'))
else:
profileform = ProfileForm(instance=request.user, user=request.user)
# Get all active providers for this user
active_providers = []
for social_auth in DjangoStorage.user.get_social_auth_for_user(request.user):
active_providers.append(social_auth.provider)
# Providers list
methods = []
for method in AUTH_METHODS:
methods.append(method + (method[1] in active_providers, ))
return render(request, "users/profile.html", {
'profileform': profileform,
'active_providers': active_providers,
'AUTH_METHODS': methods
})
def logout(request):
auth.logout(request)
return HttpResponseRedirect(reverse('users:loggedout'))
|
Instanssi/Instanssi.org
|
Instanssi/users/views.py
|
Python
|
mit
| 2,915
|