code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
import simtk.unit as units
from intermol.decorators import accepts_compatible_units
from intermol.forces.abstract_bond_type import AbstractBondType
class HarmonicPotentialBondType(AbstractBondType):
__slots__ = ['length', 'k', 'order', 'c']
@accepts_compatible_units(None, None,
length=units.nanometers,
k=units.kilojoules_per_mole * units.nanometers ** (-2),
order=None,
c=None)
def __init__(self, bondingtype1, bondingtype2,
length=0.0 * units.nanometers,
k=0.0 * units.kilojoules_per_mole * units.nanometers ** (-2),
order=1, c=False):
AbstractBondType.__init__(self, bondingtype1, bondingtype2, order, c)
self.length = length
self.k = k
class HarmonicPotentialBond(HarmonicPotentialBondType):
"""
stub documentation
"""
def __init__(self, atom1, atom2, bondingtype1=None, bondingtype2=None,
length=0.0 * units.nanometers,
k=0.0 * units.kilojoules_per_mole * units.nanometers ** (-2),
order=1, c=False):
self.atom1 = atom1
self.atom2 = atom2
HarmonicPotentialBondType.__init__(self, bondingtype1, bondingtype2,
length=length,
k=k,
order=order, c=c)
|
ctk3b/InterMol
|
intermol/forces/harmonic_potential_bond_type.py
|
Python
|
mit
| 1,407
|
# pylint: disable-all
import unittest
from circleci.error import CircleCIException, BadKeyError, BadVerbError, InvalidFilterError
class TestCircleCIError(unittest.TestCase):
def setUp(self):
self.base = CircleCIException('fake')
self.key = BadKeyError('fake')
self.verb = BadVerbError('fake')
self.filter = InvalidFilterError('fake', 'status')
self.afilter = InvalidFilterError('fake', 'artifacts')
def test_error_implements_str(self):
self.assertTrue(self.base.__str__ is not object.__str__)
string = self.base.__str__()
self.assertIn('invalid', string)
def test_verb_message(self):
self.assertIn('DELETE', self.verb.message)
def test_key_message(self):
self.assertIn('deploy-key', self.key.message)
def test_filter_message(self):
self.assertIn('running', self.filter.message)
self.assertIn('completed', self.afilter.message)
|
levlaz/circleci.py
|
tests/circle/test_error.py
|
Python
|
mit
| 951
|
from .commutative import Commutative
class Product(Commutative):
def __init__(self, *args):
super(Product, self).__init__(*self.simplified(*args))
def simplified(self, *args):
"""
Returns a sequence containing expressions that make a simplified Product.
Used when ``Product`` is initialized to simplify.
Uses ``self.exprs`` when no arguments are provided.
:type: args: int or Expression
:rtype: seq
"""
coefficient = 1
args = args or self._exprs
for arg in args:
if isinstance(arg, int):
# If any part is 0 the whole thing is 0
if arg == 0:
yield None
# 1 can be eliminated because 1 * x = x
if arg == 1:
continue
coefficient *= arg
else:
yield arg
if coefficient != 0:
yield coefficient
def __call__(self, val):
prod = 1
for expr in self._exprs:
prod *= self._val_of_exp(expr, val)
return prod
def degree(self):
"""
Returns total degree (ex degree x is 1, degree 3x^3 is 3) of product.
:rtype: int
"""
deg = 0
for expr in self._exprs:
deg += self._calc_degree(expr)
return deg
def order(self, ascending=True):
"""
Converts ''frozenset'' exprs into ''list'' ordered by degree.
:rtype: list
"""
return super(Product, self).order(ascending=True)
def same_base(self, other):
return isinstance(other, self.__class__) and \
self.rem_int() == other.rem_int()
def rem_int(self):
return frozenset([expr for expr in self._exprs if not isinstance(expr, int)])
def __str__(self):
return ''.join("{} * ".format(expr) for expr in self.order())[:-2] # Removes leftover *
def __mul__(self, other):
if not isinstance(other, self.__class__):
return Product(*self._exprs, other)
no_overlap = self._exprs.union(other.exprs) - self._exprs.intersection(other.exprs)
overlap = set([expr**2 for expr in self._exprs.intersection(other.exprs)])
return no_overlap.union(overlap)
def __pow__(self, power, modulo=None):
return Product(*[expr**power for expr in self._exprs])
|
LordDarkula/polypy
|
polypy/product.py
|
Python
|
mit
| 2,416
|
# coding: utf-8
from datetime import datetime
from flask import Flask
from flask import render_template
from views.todos import todos_view
app = Flask(__name__)
app.register_blueprint(todos_view, url_prefix='/todos')
@app.route('/')
def index():
return render_template('index.html')
@app.route('/time')
def time():
return str(datetime.now())
@app.route('/1/ping')
def ping():
"""健康监测
LeanEngine 会根据 `/1/ping` 判断应用是否正常运行。
如果返回状态码为 200 则认为正常。
其他状态码或者超过 5 秒没响应则认为应用运行异常。
"""
return 'pong'
|
sdjcw/python-getting-started
|
app.py
|
Python
|
mit
| 638
|
"""
:created: 2017-09
:author: Alex BROSSARD <abrossard@artfx.fr>
"""
from PySide2 import QtWidgets, QtCore
from pymel import core as pmc
from auri.auri_lib import AuriScriptView, AuriScriptController, AuriScriptModel, grpbox
from auri.scripts.Maya_Scripts import rig_lib
from auri.scripts.Maya_Scripts.rig_lib import RigController
reload(rig_lib)
class View(AuriScriptView):
def __init__(self, *args, **kwargs):
self.modules_cbbox = QtWidgets.QComboBox()
self.outputs_cbbox = QtWidgets.QComboBox()
self.refresh_btn = QtWidgets.QPushButton("Refresh")
self.prebuild_btn = QtWidgets.QPushButton("Prebuild")
super(View, self).__init__(*args, **kwargs)
def set_controller(self):
self.ctrl = Controller(self.model, self)
def set_model(self):
self.model = Model()
def refresh_view(self):
self.ctrl.look_for_parent()
def setup_ui(self):
self.modules_cbbox.setModel(self.ctrl.modules_with_output)
self.modules_cbbox.currentTextChanged.connect(self.ctrl.on_modules_cbbox_changed)
self.outputs_cbbox.setModel(self.ctrl.outputs_model)
self.outputs_cbbox.currentTextChanged.connect(self.ctrl.on_outputs_cbbox_changed)
self.refresh_btn.clicked.connect(self.ctrl.look_for_parent)
self.prebuild_btn.clicked.connect(self.ctrl.prebuild)
main_layout = QtWidgets.QVBoxLayout()
select_parent_layout = QtWidgets.QVBoxLayout()
select_parent_grp = grpbox("Select parent", select_parent_layout)
cbbox_layout = QtWidgets.QHBoxLayout()
cbbox_layout.addWidget(self.modules_cbbox)
cbbox_layout.addWidget(self.outputs_cbbox)
select_parent_layout.addLayout(cbbox_layout)
select_parent_layout.addWidget(self.refresh_btn)
main_layout.addWidget(select_parent_grp)
main_layout.addWidget(self.prebuild_btn)
self.setLayout(main_layout)
class Controller(RigController):
def __init__(self, model, view):
"""
Args:
model (Model):
view (View):
"""
self.guides_grp = None
self.guide = None
self.guide_name = "None"
RigController.__init__(self, model, view)
def prebuild(self):
self.create_temporary_outputs(["OUTPUT"])
self.guide_name = "{0}_GUIDE".format(self.model.module_name)
if self.guide_check(self.guide_name):
self.guide = pmc.ls(self.guide_name)
self.guides_grp = pmc.ls("{0}_guides".format(self.model.module_name))[0]
self.guides_grp.setAttr("visibility", 1)
self.view.refresh_view()
pmc.select(cl=1)
return
self.guide = pmc.spaceLocator(p=(0, 0, 0), n=self.guide_name)
self.guide.setAttr("translate", (0, 7.5, 0))
self.guides_grp = self.group_guides(self.guide)
self.view.refresh_view()
pmc.select(cl=1)
def execute(self):
self.prebuild()
self.delete_existing_objects()
self.connect_to_parent()
cog_shape = rig_lib.large_box_curve("{0}_CTRL_shape".format(self.model.module_name))
cog_ctrl = rig_lib.create_jnttype_ctrl(name="{0}_CTRL".format(self.model.module_name), shape=cog_shape,
drawstyle=2)
cog_ofs = pmc.group(cog_ctrl, n="{0}_ctrl_OFS".format(self.model.module_name))
cog_ofs.setAttr("translate", pmc.xform(self.guide, q=1, ws=1, translation=1))
pmc.parent(cog_ofs, self.ctrl_input_grp)
rig_lib.create_output(name="{0}_OUTPUT".format(self.model.module_name), parent=cog_ctrl)
rig_lib.clean_ctrl(cog_ctrl, 20, trs="s")
self.jnt_input_grp.setAttr("visibility", 0)
self.parts_grp.setAttr("visibility", 0)
self.guides_grp.setAttr("visibility", 0)
info_crv = rig_lib.signature_shape_curve("{0}_INFO".format(self.model.module_name))
info_crv.getShape().setAttr("visibility", 0)
info_crv.setAttr("hiddenInOutliner", 1)
info_crv.setAttr("translateX", lock=True, keyable=False, channelBox=False)
info_crv.setAttr("translateY", lock=True, keyable=False, channelBox=False)
info_crv.setAttr("translateZ", lock=True, keyable=False, channelBox=False)
info_crv.setAttr("rotateX", lock=True, keyable=False, channelBox=False)
info_crv.setAttr("rotateY", lock=True, keyable=False, channelBox=False)
info_crv.setAttr("rotateZ", lock=True, keyable=False, channelBox=False)
info_crv.setAttr("scaleX", lock=True, keyable=False, channelBox=False)
info_crv.setAttr("scaleY", lock=True, keyable=False, channelBox=False)
info_crv.setAttr("scaleZ", lock=True, keyable=False, channelBox=False)
info_crv.setAttr("visibility", lock=True, keyable=False, channelBox=False)
info_crv.setAttr("overrideEnabled", 1)
info_crv.setAttr("overrideDisplayType", 2)
pmc.parent(info_crv, self.parts_grp)
rig_lib.add_parameter_as_extra_attr(info_crv, "Module", "cog")
rig_lib.add_parameter_as_extra_attr(info_crv, "parent_Module", self.model.selected_module)
rig_lib.add_parameter_as_extra_attr(info_crv, "parent_output", self.model.selected_output)
pmc.select(cl=1)
class Model(AuriScriptModel):
def __init__(self):
AuriScriptModel.__init__(self)
self.selected_module = None
self.selected_output = None
|
Sookhaal/auri_maya_rigging_scripts
|
general/center_of_gravity.py
|
Python
|
mit
| 5,456
|
#import factorial
#import square
x = int(raw_input("What is 'x'?\n"))
y = int(raw_input("What is y?\n"))
# question0 = str(raw_input("Define a y value? (y/n)\n"))
# if (question0 == "y","Y","yes","Yes"):
# y = int(raw_input("What will 'y' be?\n"))
# elif (y == "n","N","no","No"):
# question2 = str(raw_input("Is y = 10 ok?\n"))
# if (question2 == "y","Y","yes","Yes"):
# y = 10
# elif (question2 == "n","N","no","No"):
# y = int(raw_input("What will 'y' be?\n"))
# else:
# print "Please insert and interger"
# else:
# print "Please insert an interger."
print "Using that information, we can do some mathematical equations."
if x > y: #is not None:
print "x, %d, is greater than y, %d." % (x, y)
elif x == y: #is not None:
print "x, %d, is equal to y, %d." % (x, y)
elif x < y: #is not None:
print "x, %d, is less than y, %d." % (x, y)
elif x is not int:
print "x should be a interger, you put it as %d" % (x)
elif x is None:
print "Please rerun the code."
else:
print "Something went wrong!"
add = (x + y)
sub = (x - y)
mult = (x * y)
div = (x / y)
rem = (x % y)
xeven = (x % 2 == 0)
xodd = (x % 2 != 0)
yeven = (y % 2 == 0)
yodd = (y % 2 != 0)
# xfact = (factorial(x))
# yfact = (factorial(y))
print "If you add x and y, you'll get %s." % add
print "If you subtract x and y, you'll get %s." % sub
print "If you multiply x and y, you'll get %s." % mult
print "If you divide x and y, you'll get %s, with a remainder of %s." % (div, rem)
if (x % 2 == 0):
print "x is even."
if (x % 2 != 0):
print "x is odd."
if (y % 2 == 0):
print "y is even."
if (y % 2 != 0):
print "y is odd."
print "If you square x, you get %s, and y squared is %s." % ((x^2),(y^2))
print "If you cube x, you get %s, and y cubed is %s." % ((x^3), (y^3))
#print "If you take x factorial, you get %s, and y factorial is %s." % ((xfact), (yfact))
#print "The square root of x is %s, and the square root of y is %s." % (square(x), square(y))
print ""
# from sys import argv
# import random
# value = (1,2,3,4,5,6)
# roll, string = argv
# def choice(roll):
# random.choice(dice)
# return choice
# choice(roll)
# dice = choice(value)
|
chrisortman/CIS-121
|
k0459866/Lessons/ex12.py
|
Python
|
mit
| 2,216
|
from pyparsing import *
TOP = Forward()
BOTTOM = Forward()
HAND = Forward()
GRAVEYARD = Forward()
LIBRARY = Forward()
BATTLEFIELD = Forward()
|
jrgdiz/cardwalker
|
grammar/constants/zones/decl.py
|
Python
|
mit
| 143
|
import numpy as n, matplotlib.pyplot as p, scipy.special
import cosmolopy.perturbation as pb
import cosmolopy.density as cd
from scipy.integrate import quad,tplquad
import itertools
from scipy.interpolate import interp1d
from scipy.interpolate import RectBivariateSpline as RBS
import optparse, sys
from sigmas import sig0
o = optparse.OptionParser()
o.add_option('-d','--del0', dest='del0', default=5.)
o.add_option('-m','--mul', dest='mul', default=1.)
o.add_option('-z','--red', dest='red', default=12.)
opts,args = o.parse_args(sys.argv[1:])
print opts, args
Om,sig8,ns,h,Ob = 0.315, 0.829, 0.96, 0.673, 0.0487
Planck13 = {'baryonic_effects':True,'omega_k_0':0,'omega_M_0':0.315, 'omega_b_0':0.0487, 'n':0.96, 'N_nu':0, 'omega_lambda_0':0.685,'omega_n_0':0., 'sigma_8':0.829,'h':0.673}
cosmo = Planck13
def m2R(m):
rhobar = cd.cosmo_densities(**cosmo)[1] #msun/Mpc
RL = (3*m/4/n.pi/rhobar)**(1./3)
return RL
def m2V(m):
rhobar = cd.cosmo_densities(**cosmo)[1] #msun/Mpc
return m/rhobar
def R2m(RL):
rhobar = cd.cosmo_densities(**cosmo)[1] #msun/Mpc
m = 4*n.pi/3*rhobar*RL**3
return m
def mmin(z,Tvir=1.E4):
return pb.virial_mass(Tvir,z,**cosmo)
dmS = n.load('m2S.npz')
MLtemp,SLtemp = dmS['arr_0'],dmS['arr_1']
fs2m = interp1d(SLtemp,MLtemp,kind='cubic')
def S2M(S):
return fs2m(S)
def Deltac(z):
fgrowth = pb.fgrowth(z, cosmo['omega_M_0']) # = D(z)/D(0)
return 1.686/fgrowth
#return 1.686*fgrowth
######################## SIZE DISTRIBUTION #############################
####################### FZH04 ##############################
def fFZH(S,zeta,B0,B1):
res = B0/n.sqrt(2*n.pi*S**3)*n.exp(-B0**2/2/S-B0*B1-B1**2*S/2)
return res
def BFZH(S0,deltac,smin,K):
return deltac-n.sqrt(2*(smin-S0))*K
def BFZHlin(S0,deltac,smin,K):
b0 = deltac-K*n.sqrt(2*smin)
b1 = K/n.sqrt(2*smin)
return b0+b1*S0
def dlnBFdlnS0(S0,deltac,smin,K,d=0.001):
Bp,Bo,Bm = BFZH(S0+d,deltac,smin,K), BFZH(S0,deltac,smin,K), BFZH(S0-d,deltac,smin,K)
return S0/Bo*(Bp-Bm)/2/d
def dlnBFlindlnS0(S0,deltac,smin,K,d=0.001):
Bp,Bo,Bm = BFZHlin(S0+d,deltac,smin,K), BFZHlin(S0,deltac,smin,K), BFZHlin(S0-d,deltac,smin,K)
return S0/Bo*(Bp-Bm)/2/d
##### m_min
dDoZ = n.load('theta.npz')
thetal,DoZl = dDoZ['arr_0'],dDoZ['arr_1']
ftheta = interp1d(DoZl,thetal,kind='cubic')
def theta(z,del0):
return ftheta(del0/(1+z))
def RphysoR0(del0,z):
th = theta(z,del0)
return 3./10/del0*(1-n.cos(th))
def RcovEul(del0,z):
return RphysoR0(del0,z)*(1+z)
def dlinSdlnR(lnR,d=0.001):
res = (n.log(sig0(n.exp(lnR+d)))-n.log(sig0(n.exp(lnR-d))))/d/2
return n.abs(res)
################################## MAIN ######################################
for z in [12., 16.]:
PLOT = True
zeta = 40.
K = scipy.special.erfinv(1-1./zeta)
Tvir = 1.E4
#z = 12.
deltac = Deltac(z)
mm = mmin(z)
M0min = zeta*mm
RLmin,R0min = m2R(mm), m2R(M0min)
print 'R',RLmin
smin = sig0(RLmin)
Rmin = R0min*RcovEul(deltac,z) #S0=smin, so del0=deltac; convertion from lagragian to comoving eulerian
####### FZH04 #######
bFZH0 = deltac-K*n.sqrt(2*smin)
bFZH1 = K/n.sqrt(2*smin)
#bFZH = deltac-n.sqrt(2*(smin-S0))*K
#bFZHlin = bFZH0+bFZH1*S0
def dlnRdlnR0(lnR0,S0,del0):
S0 = sig0(n.exp(lnR0))
del0 = BFZH(S0,deltac,smin,K)
th = theta(z,del0)
thfactor = 1-3./2*th*(th-n.sin(th))/(1-n.cos(th))**2
res = 1-dlinSdlnR(lnR0)*dlnBFdlnS0(S0,deltac,smin,K)*thfactor
return res
def V0dndlnR0(lnR0):
S0 = sig0(n.exp(lnR0))
return S0*fFZH(S0,zeta,bFZH0,bFZH1)*dlinSdlnR(lnR0)
def VdndlnR0(lnR0):
S0 = sig0(n.exp(lnR0))
del0 = BFZHlin(S0,deltac,smin,K)
#lnR0 = n.log(n.exp(lnR)/RcovEul(del0,z))
VoV0 = (RcovEul(del0,z))**3
#return VoV0/dlnRdlnR0(lnR0,S0,del0)*S0*fFZH(S0,zeta,bFZH0,bFZH1)*dlinSdlnR(lnR0)
return VoV0*S0*fFZH(S0,zeta,bFZH0,bFZH1)*dlinSdlnR(lnR0)
def VdndlnR(lnR0):
S0 = sig0(n.exp(lnR0))
del0 = BFZH(S0,deltac,smin,K)
VoV0 = (RcovEul(del0,z))**3
return VoV0/dlnRdlnR0(lnR0,S0,del0)*S0*fFZH(S0,zeta,bFZH0,bFZH1)*dlinSdlnR(lnR0)
if True:
print 'computing z=',z
#Q = quad(lambda lnR: VdndlnR(lnR),n.log(Rmin),3.5) #integrated over eulerian coordinates
Q = quad(lambda lnR0: VdndlnR0(lnR0),n.log(R0min),3.5) #integrated over eulerian coordinates
print 'Q=',Q
Q = Q[0]
#######
lnR0 = n.arange(n.log(R0min),3,0.03)
S0list = []
for lnr0 in lnR0: S0list.append(sig0(n.exp(lnr0)))
S0list = n.array(S0list)
#lnR = n.arange(n.log(Rmin),3,0.1)
del0list = BFZH(S0list,deltac,smin,K)
lnR = n.log(n.exp(lnR0)*RcovEul(del0list,z))
normsize = []
for lnr0 in lnR0:
res = VdndlnR(lnr0)/Q
print n.exp(lnr0),res
normsize.append(res)
p.figure(1)
p.semilogx(n.exp(lnR),normsize,label=str(z))
p.legend()
if True:
S0max = sig0(m2R(M0min))
S0 = n.arange(0,S0max,0.2)
bFZH = deltac-n.sqrt(2*(smin-S0))*K
bFZHlin = bFZH0+bFZH1*S0
p.figure(2)
p.plot(S0,bFZH,'b', label=str(z))
p.plot(S0,bFZHlin,'b.-')
p.ylim([0,20])
p.xlim([0,25])
p.legend()
if False: #for benchmark
for i in range(1000):
S0max = sig0(m2R(M0min))
S0 = n.arange(0,S0max,0.2)
bFZH = deltac-n.sqrt(2*(smin-S0))*K
bFZHlin = bFZH0+bFZH1*S0
p.show()
################
# Z = float(opts.red)
# M0 = zeta*mmin(Z)*float(opts.mul)
# del0 = float(opts.del0)
###########################
# dlist = n.linspace(8,10,10)
# for del0 in dlist:
# res = fcoll_trapz_log(del0,M0,Z)
# print m2S(M0), res[0]
# if False:
# p.figure()
# p.plot(res[1],res[2])
# p.show()
#tplquad(All,mm,M0,lambda x: 0, lambda x: 5., lambda x,y: gam(m2R(x))*y,lambda x,y: 10.,args=(del0,M0,z))
|
yunfanz/ReionBub
|
Choud14/FZH04.py
|
Python
|
mit
| 5,526
|
# This file is part of Indico.
# Copyright (C) 2002 - 2021 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
import uuid
from operator import attrgetter
from flask import flash, jsonify, redirect, request, session
from sqlalchemy.orm import undefer
from werkzeug.exceptions import BadRequest, Forbidden, NotFound
from indico.core.cache import make_scoped_cache
from indico.core.config import config
from indico.core.db import db
from indico.core.db.sqlalchemy.protection import ProtectionMode, render_acl
from indico.core.permissions import get_principal_permissions, update_permissions
from indico.legacy.pdfinterface.latex import ContribsToPDF, ContributionBook
from indico.modules.attachments.controllers.event_package import AttachmentPackageGeneratorMixin
from indico.modules.events.abstracts.forms import AbstractContentSettingsForm
from indico.modules.events.abstracts.settings import abstracts_settings
from indico.modules.events.contributions import contribution_settings, get_contrib_field_types
from indico.modules.events.contributions.clone import ContributionCloner
from indico.modules.events.contributions.controllers.common import ContributionListMixin
from indico.modules.events.contributions.forms import (ContributionDefaultDurationForm, ContributionDurationForm,
ContributionExportTeXForm, ContributionProtectionForm,
ContributionStartDateForm, ContributionTypeForm,
SubContributionForm)
from indico.modules.events.contributions.lists import ContributionListGenerator
from indico.modules.events.contributions.models.contributions import Contribution
from indico.modules.events.contributions.models.fields import ContributionField
from indico.modules.events.contributions.models.references import ContributionReference, SubContributionReference
from indico.modules.events.contributions.models.subcontributions import SubContribution
from indico.modules.events.contributions.models.types import ContributionType
from indico.modules.events.contributions.operations import (create_contribution, create_subcontribution,
delete_contribution, delete_subcontribution,
update_contribution, update_subcontribution)
from indico.modules.events.contributions.util import (contribution_type_row, generate_spreadsheet_from_contributions,
get_boa_export_formats, import_contributions_from_csv,
make_contribution_form)
from indico.modules.events.contributions.views import WPManageContributions
from indico.modules.events.logs import EventLogKind, EventLogRealm
from indico.modules.events.management.controllers import RHManageEventBase
from indico.modules.events.management.controllers.base import RHContributionPersonListMixin
from indico.modules.events.management.util import flash_if_unregistered
from indico.modules.events.models.references import ReferenceType
from indico.modules.events.sessions import Session
from indico.modules.events.timetable.forms import ImportContributionsForm
from indico.modules.events.timetable.operations import update_timetable_entry
from indico.modules.events.tracks.models.tracks import Track
from indico.modules.events.util import check_event_locked, get_field_values, track_location_changes, track_time_changes
from indico.util.date_time import format_datetime, format_human_timedelta
from indico.util.i18n import _, ngettext
from indico.util.spreadsheets import send_csv, send_xlsx
from indico.util.string import handle_legacy_description
from indico.web.flask.templating import get_template_module
from indico.web.flask.util import send_file, url_for
from indico.web.forms.base import FormDefaults
from indico.web.forms.fields.principals import serialize_principal
from indico.web.util import jsonify_data, jsonify_form, jsonify_template
export_list_cache = make_scoped_cache('contrib-export-list')
def _render_subcontribution_list(contrib):
tpl = get_template_module('events/contributions/management/_subcontribution_list.html')
subcontribs = (SubContribution.query.with_parent(contrib)
.options(undefer('attachment_count'))
.order_by(SubContribution.position)
.all())
return tpl.render_subcontribution_list(contrib.event, contrib, subcontribs)
class RHManageContributionsBase(RHManageEventBase):
"""Base class for all contributions management RHs."""
def _process_args(self):
RHManageEventBase._process_args(self)
self.list_generator = ContributionListGenerator(event=self.event)
class RHManageContributionBase(RHManageContributionsBase):
"""Base class for a specific contribution."""
normalize_url_spec = {
'locators': {
lambda self: self.contrib
}
}
def _process_args(self):
RHManageContributionsBase._process_args(self)
self.contrib = Contribution.query.filter_by(id=request.view_args['contrib_id'], is_deleted=False).one()
def _check_access(self):
if not self.contrib.can_manage(session.user):
raise Forbidden
check_event_locked(self, self.event)
class RHManageSubContributionBase(RHManageContributionBase):
"""Base RH for a specific subcontribution."""
normalize_url_spec = {
'locators': {
lambda self: self.subcontrib
}
}
def _process_args(self):
RHManageContributionBase._process_args(self)
self.subcontrib = SubContribution.get_or_404(request.view_args['subcontrib_id'], is_deleted=False)
class RHManageContributionsActionsBase(RHManageContributionsBase):
"""Base class for classes performing actions on event contributions."""
def _process_args(self):
RHManageContributionsBase._process_args(self)
self.contrib_ids = [int(x) for x in request.form.getlist('contribution_id')]
self.contribs = Contribution.query.with_parent(self.event).filter(Contribution.id.in_(self.contrib_ids)).all()
class RHManageSubContributionsActionsBase(RHManageContributionBase):
"""Base class for RHs performing actions on subcontributions."""
def _process_args(self):
RHManageContributionBase._process_args(self)
ids = {int(x) for x in request.form.getlist('subcontribution_id')}
self.subcontribs = (SubContribution.query
.with_parent(self.contrib)
.filter(SubContribution.id.in_(ids))
.all())
class RHContributions(ContributionListMixin, RHManageContributionsBase):
"""Display contributions management page."""
template = 'management/contributions.html'
view_class = WPManageContributions
class RHContributionListCustomize(RHManageContributionsBase):
"""Filter options for the contributions list of an event."""
ALLOW_LOCKED = True
def _process_GET(self):
return jsonify_template('events/contributions/contrib_list_filter.html',
filters=self.list_generator.list_config['filters'],
static_items=self.list_generator.static_items)
def _process_POST(self):
self.list_generator.store_configuration()
return jsonify_data(**self.list_generator.render_list())
class RHContributionListStaticURL(RHManageContributionsBase):
"""Generate a static URL for the configuration of the contribution list."""
ALLOW_LOCKED = True
def _process(self):
return jsonify(url=self.list_generator.generate_static_url())
class RHCreateContribution(RHManageContributionsBase):
def _process(self):
inherited_location = self.event.location_data
inherited_location['inheriting'] = True
default_duration = contribution_settings.get(self.event, 'default_duration')
contrib_form_class = make_contribution_form(self.event)
form = contrib_form_class(obj=FormDefaults(location_data=inherited_location, duration=default_duration),
event=self.event)
if form.validate_on_submit():
# Create empty contribution so it can be compared to the new one in flash_if_unregistered
contrib = Contribution()
with flash_if_unregistered(self.event, lambda: contrib.person_links):
contrib = create_contribution(self.event, *get_field_values(form.data))
flash(_("Contribution '{}' created successfully").format(contrib.title), 'success')
tpl_components = self.list_generator.render_list(contrib)
if tpl_components['hide_contrib']:
self.list_generator.flash_info_message(contrib)
return jsonify_data(**tpl_components)
return jsonify_template('events/contributions/forms/contribution.html', form=form)
class RHEditContribution(RHManageContributionBase):
def _process(self):
contrib_form_class = make_contribution_form(self.event)
custom_field_values = {f'custom_{x.contribution_field_id}': x.data for x in self.contrib.field_values}
parent_session_block = (self.contrib.timetable_entry.parent.session_block
if (self.contrib.timetable_entry and self.contrib.timetable_entry.parent) else None)
form = contrib_form_class(obj=FormDefaults(self.contrib, start_date=self.contrib.start_dt,
**custom_field_values),
event=self.event, contrib=self.contrib, session_block=parent_session_block)
if form.validate_on_submit():
with (
track_time_changes(),
track_location_changes(),
flash_if_unregistered(self.event, lambda: self.contrib.person_links)
):
update_contribution(self.contrib, *get_field_values(form.data))
flash(_("Contribution '{}' successfully updated").format(self.contrib.title), 'success')
tpl_components = self.list_generator.render_list(self.contrib)
if tpl_components['hide_contrib']:
self.list_generator.flash_info_message(self.contrib)
return jsonify_data(flash=(request.args.get('flash') == '1'), **tpl_components)
elif not form.is_submitted():
handle_legacy_description(form.description, self.contrib)
self.commit = False
return jsonify_template('events/contributions/forms/contribution.html', form=form)
class RHDeleteContributions(RHManageContributionsActionsBase):
def _process(self):
for contrib in self.contribs:
delete_contribution(contrib)
deleted_count = len(self.contribs)
flash(ngettext('The contribution has been deleted.',
'{count} contributions have been deleted.', deleted_count)
.format(count=deleted_count), 'success')
return jsonify_data(**self.list_generator.render_list())
class RHContributionACL(RHManageContributionBase):
"""Display the ACL of the contribution."""
def _process(self):
return render_acl(self.contrib)
class RHContributionACLMessage(RHManageContributionBase):
"""Render the inheriting ACL message."""
def _process(self):
mode = ProtectionMode[request.args['mode']]
return jsonify_template('forms/protection_field_acl_message.html', object=self.contrib, mode=mode,
endpoint='contributions.acl')
class RHContributionREST(RHManageContributionBase):
def _process_DELETE(self):
delete_contribution(self.contrib)
flash(_("Contribution '{}' successfully deleted").format(self.contrib.title), 'success')
return jsonify_data(**self.list_generator.render_list())
def _process_PATCH(self):
data = request.json
updates = {}
if set(data.keys()) > {'session_id', 'track_id'}:
raise BadRequest
if 'session_id' in data:
updates.update(self._get_contribution_session_updates(data['session_id']))
if 'track_id' in data:
updates.update(self._get_contribution_track_updates(data['track_id']))
rv = {}
if updates:
rv = update_contribution(self.contrib, updates)
return jsonify(unscheduled=rv.get('unscheduled', False), undo_unschedule=rv.get('undo_unschedule'))
def _get_contribution_session_updates(self, session_id):
updates = {}
if session_id is None:
updates['session'] = None
else:
session = Session.query.with_parent(self.event).filter_by(id=session_id).first()
if not session:
raise BadRequest('Invalid session id')
if session != self.contrib.session:
updates['session'] = session
return updates
def _get_contribution_track_updates(self, track_id):
updates = {}
if track_id is None:
updates['track_id'] = None
else:
track = Track.get(track_id)
if not track:
raise BadRequest('Invalid track id')
if track_id != self.contrib.track_id:
updates['track_id'] = track_id
return updates
class RHContributionPersonList(RHContributionPersonListMixin, RHManageContributionsActionsBase):
"""List of persons in the contribution."""
template = 'events/contributions/management/contribution_person_list.html'
ALLOW_LOCKED = True
@property
def _membership_filter(self):
contribution_ids = {contrib.id for contrib in self.contribs}
return Contribution.id.in_(contribution_ids)
class RHContributionProtection(RHManageContributionBase):
"""Manage contribution protection."""
def _process(self):
form = ContributionProtectionForm(obj=FormDefaults(**self._get_defaults()), contrib=self.contrib,
prefix='contribution-protection-')
if form.validate_on_submit():
update_permissions(self.contrib, form)
update_contribution(self.contrib, {'protection_mode': form.protection_mode.data})
return jsonify_data(flash=False, **self.list_generator.render_list(self.contrib))
return jsonify_template('events/management/protection_dialog.html', form=form)
def _get_defaults(self):
permissions = [[serialize_principal(p.principal), list(get_principal_permissions(p, Contribution))]
for p in self.contrib.acl_entries]
permissions = [item for item in permissions if item[1]]
return {'permissions': permissions, 'protection_mode': self.contrib.protection_mode}
class RHContributionSubContributions(RHManageContributionBase):
"""Get a list of subcontributions."""
def _process(self):
return jsonify_data(html=_render_subcontribution_list(self.contrib))
class RHCreateSubContribution(RHManageContributionBase):
"""Create a subcontribution."""
def _process(self):
form = SubContributionForm(event=self.event)
if form.validate_on_submit():
subcontrib = create_subcontribution(self.contrib, form.data)
flash(_("Subcontribution '{}' created successfully").format(subcontrib.title), 'success')
return jsonify_data(html=_render_subcontribution_list(self.contrib))
return jsonify_template('events/contributions/forms/subcontribution.html', form=form)
class RHEditSubContribution(RHManageSubContributionBase):
"""Edit the subcontribution."""
def _process(self):
form = SubContributionForm(obj=FormDefaults(self.subcontrib), event=self.event, subcontrib=self.subcontrib)
if form.validate_on_submit():
update_subcontribution(self.subcontrib, form.data)
flash(_("Subcontribution '{}' updated successfully").format(self.subcontrib.title), 'success')
return jsonify_data(html=_render_subcontribution_list(self.contrib))
elif not form.is_submitted():
handle_legacy_description(form.description, self.subcontrib)
self.commit = False
return jsonify_template('events/contributions/forms/subcontribution.html', form=form)
class RHSubContributionREST(RHManageSubContributionBase):
"""REST endpoint for management of a single subcontribution."""
def _process_DELETE(self):
delete_subcontribution(self.subcontrib)
flash(_("Subcontribution '{}' deleted successfully").format(self.subcontrib.title), 'success')
return jsonify_data(html=_render_subcontribution_list(self.contrib))
class RHCreateSubContributionREST(RHManageContributionBase):
"""REST endpoint to create a subcontribution."""
def _process_POST(self):
form = SubContributionForm(event=self.event)
if form.validate_on_submit():
subcontrib = create_subcontribution(self.contrib, form.data)
return jsonify(id=subcontrib.id, contribution_id=subcontrib.contribution_id, event_id=self.event.id)
return jsonify_data(success=False, errors=form.errors), 400
class RHDeleteSubContributions(RHManageSubContributionsActionsBase):
def _process(self):
for subcontrib in self.subcontribs:
delete_subcontribution(subcontrib)
return jsonify_data(html=_render_subcontribution_list(self.contrib))
class RHSortSubContributions(RHManageContributionBase):
def _process(self):
subcontrib_ids = request.form.getlist('subcontrib_ids', type=int)
subcontribs = {s.id: s for s in self.contrib.subcontributions}
for position, subcontrib_id in enumerate(subcontrib_ids, 1):
if subcontrib_id in subcontribs:
subcontribs[subcontrib_id].position = position
class RHContributionUpdateStartDate(RHManageContributionBase):
def _process_args(self):
RHManageContributionBase._process_args(self)
if self.contrib.session_block:
raise BadRequest
def _process(self):
form = ContributionStartDateForm(obj=FormDefaults(start_dt=self.contrib.start_dt), contrib=self.contrib)
if form.validate_on_submit():
with track_time_changes():
update_timetable_entry(self.contrib.timetable_entry, {'start_dt': form.start_dt.data})
return jsonify_data(new_value=format_datetime(self.contrib.start_dt, 'short'))
return jsonify_form(form, back_button=False, disabled_until_change=True)
class RHContributionUpdateDuration(RHManageContributionBase):
def _process_args(self):
RHManageContributionBase._process_args(self)
if self.contrib.session_block:
raise BadRequest
def _process(self):
form = ContributionDurationForm(obj=FormDefaults(self.contrib), contrib=self.contrib)
if form.validate_on_submit():
with track_time_changes():
update_contribution(self.contrib, {'duration': form.duration.data})
return jsonify_data(new_value=format_human_timedelta(self.contrib.duration, narrow=True))
return jsonify_form(form, back_button=False, disabled_until_change=True)
class RHManageContributionsExportActionsBase(RHManageContributionsActionsBase):
ALLOW_LOCKED = True
def _process_args(self):
RHManageContributionsActionsBase._process_args(self)
# some PDF export options do not sort the contribution list so we keep
# the order in which they were displayed when the user selected them
self.contribs.sort(key=lambda c: self.contrib_ids.index(c.id))
class RHContributionsMaterialPackage(RHManageContributionsExportActionsBase, AttachmentPackageGeneratorMixin):
"""Generate a ZIP file with materials for a given list of contributions."""
ALLOW_UNSCHEDULED = True
ALLOW_LOCKED = True
def _process(self):
attachments = self._filter_by_contributions({c.id for c in self.contribs}, None)
if not attachments:
flash(_('The selected contributions do not have any materials.'), 'warning')
return redirect(url_for('.manage_contributions', self.event))
return self._generate_zip_file(attachments, name_suffix=self.event.id)
class RHContributionsExportCSV(RHManageContributionsExportActionsBase):
"""Export list of contributions to CSV."""
def _process(self):
headers, rows = generate_spreadsheet_from_contributions(self.contribs)
return send_csv('contributions.csv', headers, rows)
class RHContributionsExportExcel(RHManageContributionsExportActionsBase):
"""Export list of contributions to XLSX."""
def _process(self):
headers, rows = generate_spreadsheet_from_contributions(self.contribs)
return send_xlsx('contributions.xlsx', headers, rows, tz=self.event.tzinfo)
class RHContributionsExportPDF(RHManageContributionsExportActionsBase):
def _process(self):
if not config.LATEX_ENABLED:
raise NotFound
pdf = ContribsToPDF(self.event, self.contribs)
return send_file('contributions.pdf', pdf.generate(), 'application/pdf')
class RHContributionsExportTeX(RHManageContributionsExportActionsBase):
def _process(self):
tex = ContribsToPDF(self.event, self.contribs)
archive = tex.generate_source_archive()
return send_file('contributions-tex.zip', archive, 'application/zip', inline=False)
class RHContributionExportTexConfig(RHManageContributionsExportActionsBase):
"""Configure Export via LaTeX."""
ALLOW_LOCKED = True
def _process(self):
form = ContributionExportTeXForm(contribs=self.contribs)
form.format.choices = [(k, v[0]) for k, v in get_boa_export_formats().items()]
if form.validate_on_submit():
data = form.data
data.pop('submitted', None)
key = str(uuid.uuid4())
export_list_cache.set(key, data, timeout=1800)
download_url = url_for('.contributions_tex_export_book', self.event, uuid=key)
return jsonify_data(flash=False, redirect=download_url, redirect_no_loading=True)
return jsonify_form(form, submit=_('Export'), disabled_until_change=False)
class RHContributionsExportTeXBook(RHManageContributionsExportActionsBase):
"""Handle export contributions via LaTeX."""
def _process(self):
config_params = export_list_cache.get(request.view_args['uuid'])
output_format = config_params['format']
sort_by = config_params['sort_by']
contribs = (Contribution.query.with_parent(self.event)
.filter(Contribution.id.in_(config_params['contribution_ids']))
.all())
func = get_boa_export_formats()[output_format][1]
return func(self.event, contribs, sort_by, ContributionBook)
class RHContributionsImportCSV(RHManageContributionsBase):
"""Import contributions from a CSV file."""
def _process(self):
form = ImportContributionsForm()
if form.validate_on_submit():
contributions, changes = import_contributions_from_csv(self.event, form.source_file.data)
flash(ngettext('{} contribution has been imported.',
'{} contributions have been imported.',
len(contributions)).format(len(contributions)), 'success')
if changes:
flash(_('Event dates/times adjusted due to imported data.'), 'warning')
return jsonify_data(flash=False, redirect=url_for('.manage_contributions', self.event))
return jsonify_template('events/contributions/management/import_contributions.html', form=form,
event=self.event)
class RHManageContributionTypes(RHManageContributionsBase):
"""Dialog to manage the ContributionTypes of an event."""
def _process(self):
contrib_types = self.event.contribution_types.order_by(db.func.lower(ContributionType.name)).all()
return jsonify_template('events/contributions/management/types_dialog.html', event=self.event,
contrib_types=contrib_types)
class RHManageDefaultContributionDuration(RHManageContributionsBase):
"""Dialog to manage the default contribution duration."""
def _process(self):
form = ContributionDefaultDurationForm(duration=contribution_settings.get(self.event, 'default_duration'))
if form.validate_on_submit():
contribution_settings.set(self.event, 'default_duration', form.duration.data)
flash(_('Default contribution duration was changed successfully'))
return jsonify_data()
return jsonify_form(form)
class RHManageContributionPublicationREST(RHManageContributionsBase):
"""Manage contribution publication setting."""
def _process_GET(self):
return jsonify(contribution_settings.get(self.event, 'published'))
def _process_PUT(self):
contribution_settings.set(self.event, 'published', True)
self.event.log(EventLogRealm.management, EventLogKind.positive, 'Contributions',
'Contributions published', session.user)
return '', 204
def _process_DELETE(self):
contribution_settings.set(self.event, 'published', False)
self.event.log(EventLogRealm.management, EventLogKind.negative, 'Contributions',
'Contributions unpublished', session.user)
return '', 204
class RHManageContributionTypeBase(RHManageContributionsBase):
"""Manage a contribution type of an event."""
normalize_url_spec = {
'locators': {
lambda self: self.contrib_type
}
}
def _process_args(self):
RHManageContributionsBase._process_args(self)
self.contrib_type = ContributionType.get_or_404(request.view_args['contrib_type_id'])
class RHEditContributionType(RHManageContributionTypeBase):
"""Dialog to edit a ContributionType."""
def _process(self):
form = ContributionTypeForm(event=self.event, obj=self.contrib_type)
if form.validate_on_submit():
old_name = self.contrib_type.name
form.populate_obj(self.contrib_type)
db.session.flush()
self.event.log(EventLogRealm.management, EventLogKind.change, 'Contributions',
f'Updated type: {old_name}', session.user)
return contribution_type_row(self.contrib_type)
return jsonify_form(form)
class RHCreateContributionType(RHManageContributionsBase):
"""Dialog to add a ContributionType."""
def _process(self):
form = ContributionTypeForm(event=self.event)
if form.validate_on_submit():
contrib_type = ContributionType()
form.populate_obj(contrib_type)
self.event.contribution_types.append(contrib_type)
db.session.flush()
self.event.log(EventLogRealm.management, EventLogKind.positive, 'Contributions',
f'Added type: {contrib_type.name}', session.user)
return contribution_type_row(contrib_type)
return jsonify_form(form)
class RHDeleteContributionType(RHManageContributionTypeBase):
"""Dialog to delete a ContributionType."""
def _process(self):
db.session.delete(self.contrib_type)
db.session.flush()
self.event.log(EventLogRealm.management, EventLogKind.negative, 'Contributions',
f'Deleted type: {self.contrib_type.name}', session.user)
return jsonify_data(flash=False)
class RHManageContributionFields(RHManageContributionsBase):
"""Dialog to manage the custom contribution fields of an event."""
def _process(self):
custom_fields = self.event.contribution_fields.order_by(ContributionField.position)
custom_field_types = sorted(list(get_contrib_field_types().values()), key=attrgetter('friendly_name'))
return jsonify_template('events/contributions/management/fields_dialog.html', event=self.event,
custom_fields=custom_fields, custom_field_types=custom_field_types)
class RHSortContributionFields(RHManageContributionsBase):
"""Sort the custom contribution fields of an event."""
def _process(self):
field_by_id = {field.id: field for field in self.event.contribution_fields}
field_ids = request.form.getlist('field_ids', type=int)
for index, field_id in enumerate(field_ids, 0):
field_by_id[field_id].position = index
del field_by_id[field_id]
for index, field in enumerate(sorted(list(field_by_id.values()), key=attrgetter('position')), len(field_ids)):
field.position = index
db.session.flush()
self.event.log(EventLogRealm.management, EventLogKind.change, 'Contributions',
'Custom fields reordered', session.user)
return jsonify_data(flash=False)
class RHCreateContributionField(RHManageContributionsBase):
"""Dialog to create a new custom field."""
def _process_args(self):
RHManageContributionsBase._process_args(self)
field_types = get_contrib_field_types()
try:
self.field_cls = field_types[request.view_args['field_type']]
except KeyError:
raise NotFound
def _process(self):
form = self.field_cls.create_config_form()
if form.validate_on_submit():
contrib_field = ContributionField()
field = self.field_cls(contrib_field)
field.update_object(form.data)
self.event.contribution_fields.append(contrib_field)
db.session.flush()
self.event.log(EventLogRealm.management, EventLogKind.positive, 'Contributions',
f'Added field: {contrib_field.title}', session.user)
return jsonify_data(flash=False)
return jsonify_template('events/contributions/forms/contribution_field_form.html', form=form)
class RHManageContributionFieldBase(RHManageContributionsBase):
"""Manage a custom contribution field of an event."""
normalize_url_spec = {
'locators': {
lambda self: self.contrib_field
}
}
def _process_args(self):
RHManageContributionsBase._process_args(self)
self.contrib_field = ContributionField.get_or_404(request.view_args['contrib_field_id'])
class RHEditContributionField(RHManageContributionFieldBase):
"""Dialog to edit a custom field."""
def _process(self):
field_class = get_contrib_field_types()[self.contrib_field.field_type]
form = field_class.create_config_form(obj=FormDefaults(self.contrib_field, **self.contrib_field.field_data))
if form.validate_on_submit():
old_title = self.contrib_field.title
self.contrib_field.mgmt_field.update_object(form.data)
db.session.flush()
self.event.log(EventLogRealm.management, EventLogKind.change, 'Contributions',
f'Modified field: {old_title}', session.user)
return jsonify_data(flash=False)
return jsonify_template('events/contributions/forms/contribution_field_form.html', form=form)
class RHDeleteContributionField(RHManageContributionFieldBase):
"""Dialog to delete a custom contribution field."""
def _process(self):
db.session.delete(self.contrib_field)
db.session.flush()
self.event.log(EventLogRealm.management, EventLogKind.negative, 'Contributions',
f'Deleted field: {self.contrib_field.title}', session.user)
class RHManageDescriptionField(RHManageContributionsBase):
"""Manage the description field used by the abstracts."""
def _process(self):
description_settings = abstracts_settings.get(self.event, 'description_settings')
form = AbstractContentSettingsForm(obj=FormDefaults(description_settings))
if form.validate_on_submit():
abstracts_settings.set(self.event, 'description_settings', form.data)
return jsonify_data(flash=False)
return jsonify_form(form)
class RHCreateReferenceMixin:
"""
Common methods for RH class creating a ContibutionReference
or SubContributionReference.
"""
def _process_args(self):
self.reference_value = request.form['value']
reference_type_name = request.form['type']
self.reference_type = (ReferenceType.query
.filter(db.func.lower(ReferenceType.name) == reference_type_name.lower())
.one())
@staticmethod
def jsonify_reference(reference):
return jsonify(id=reference.id)
class RHCreateContributionReferenceREST(RHCreateReferenceMixin, RHManageContributionBase):
"""REST endpoint to add a reference to a Contribution."""
def _process_args(self):
RHManageContributionBase._process_args(self)
RHCreateReferenceMixin._process_args(self)
def _process_POST(self):
reference = ContributionReference(reference_type=self.reference_type, value=self.reference_value,
contribution=self.contrib)
db.session.flush()
return self.jsonify_reference(reference)
class RHCreateSubContributionReferenceREST(RHCreateReferenceMixin, RHManageSubContributionBase):
"""REST endpoint to add a reference to a SubContribution."""
def _process_args(self):
RHManageSubContributionBase._process_args(self)
RHCreateReferenceMixin._process_args(self)
def _process_POST(self):
reference = SubContributionReference(reference_type=self.reference_type, value=self.reference_value)
self.subcontrib.references.append(reference)
db.session.flush()
return self.jsonify_reference(reference)
class RHCloneContribution(RHManageContributionBase):
def _check_access(self):
# Just like creating contributions, cloning one requires full event management access
RHManageContributionsBase._check_access(self)
def _process(self):
ContributionCloner.clone_single_contribution(self.contrib)
return jsonify_data(**self.list_generator.render_list())
|
ThiefMaster/indico
|
indico/modules/events/contributions/controllers/management.py
|
Python
|
mit
| 34,546
|
try:
import json
except ImportError:
import simplejson as json
from . import TestCase
from server import ListHostsHandler
class TestListHostsHandler(TestCase):
def setUp(self):
"""
Create an instance each time for testing.
"""
self.instance = ListHostsHandler()
def test_call(self):
"""
Verify running ListHostsHandler returns proper information.
"""
environ = {}
buffer = {}
def start_response(code, headers):
buffer['code'] = code
buffer['headers'] = headers
result = self.instance.__call__(environ, start_response)
assert buffer['code'] == '200 OK'
assert buffer['headers'] == [("Content-Type", "application/json")]
assert type(result) == str
results = json.loads(result)
assert results == self.instance._conf['hosts']
|
RHInception/talook
|
test/test_listhostshandler.py
|
Python
|
mit
| 899
|
basedir = '/data/t3serv014/snarayan/deep/v_deepgen_4_akt_small/'
figsdir = '/home/snarayan/public_html/figs/deepgen/v4_akt/'
|
sidnarayanan/BAdNet
|
train/gen/akt/paths.py
|
Python
|
mit
| 125
|
#This bot was written by /u/GoldenSights for /u/FourMakesTwoUNLESS on behalf of /r/pkmntcgtrades. Uploaded to GitHub with permission.
import praw
import time
import datetime
import sqlite3
'''USER CONFIGURATION'''
APP_ID = ""
APP_SECRET = ""
APP_URI = ""
APP_REFRESH = ""
# https://www.reddit.com/comments/3cm1p8/how_to_make_your_bot_use_oauth2/
USERAGENT = ""
#This is a short description of what the bot does. For example "/u/GoldenSights' Newsletter Bot"
SUBREDDIT = ""
#This is the sub or list of subs to scan for new posts. For a single sub, use "sub1". For multiple subreddits, use "sub1+sub2+sub3+..."
MAXPOSTS = 30
#This is how many posts you want to retrieve all at once. PRAW can download 100 at a time.
WAIT = 20
#This is how many seconds you will wait between cycles. The bot is completely inactive during this time.
DELAY = 518400
#This is the time limit between a user's posts, IN SECONDS. 1h = 3600 || 12h = 43200 || 24h = 86400 || 144h = 518400
'''All done!'''
WAITS = str(WAIT)
sql = sqlite3.connect('sql.db')
print('Loaded SQL Database')
cur = sql.cursor()
cur.execute('CREATE TABLE IF NOT EXISTS users(name TEXT, lastpost TEXT)')
print('Loaded Users')
cur.execute('CREATE TABLE IF NOT EXISTS oldposts(id TEXT)')
print('Loaded Oldposts')
sql.commit()
r = praw.Reddit(USERAGENT)
r.set_oauth_app_info(APP_ID, APP_SECRET, APP_URI)
r.refresh_access_information(APP_REFRESH)
def getTime(bool):
timeNow = datetime.datetime.now(datetime.timezone.utc)
timeUnix = timeNow.timestamp()
if bool == False:
return timeNow
else:
return timeUnix
def scan():
print('Scanning ' + SUBREDDIT)
subreddit = r.get_subreddit(SUBREDDIT)
posts = subreddit.get_new(limit=MAXPOSTS)
for post in posts:
try:
pauthor = post.author.name
except Exception:
pauthor = '[deleted]'
pid = post.id
plink = post.short_link
ptime = post.created_utc
cur.execute('SELECT * FROM oldposts WHERE id=?', [pid])
if not cur.fetchone():
cur.execute('SELECT * FROM users WHERE name=?', [pauthor])
if not cur.fetchone():
print('Found new user: ' + pauthor)
cur.execute('INSERT INTO users VALUES(?, ?)', (pauthor, pid))
r.send_message(pauthor, 'Welcome','Dear ' + pauthor + ',\n\n This appears to be your first time here', captcha=None)
sql.commit()
print('\t' + pauthor + ' has been added to the database.')
time.sleep(5)
else:
cur.execute('SELECT * FROM users WHERE name=?', [pauthor])
fetch = cur.fetchone()
print('Found post by known user: ' + pauthor)
previousid = fetch[1]
previous = r.get_info(thing_id='t3_'+previousid)
previoustime = previous.created_utc
if ptime > previoustime:
curtime = getTime(True)
difference = curtime - previoustime
if difference >= DELAY:
print('\tPost complies with timelimit guidelines. Permitting')
cur.execute('DELETE FROM users WHERE name=?', [pauthor])
cur.execute('INSERT INTO users VALUES(?, ?)', (pauthor, pid))
sql.commit()
print('\t' + pauthor + "'s database info has been reset.")
else:
differences = '%.0f' % (DELAY - difference)
timestring = str(datetime.timedelta(seconds=float(differences)))
timestring = timestring.replace(':', ' hours, and ', 1)
timestring = timestring.replace(':', ' minutes.x', 1)
timestring = timestring.split('x')
timestring = timestring[0]
print('\tPost does not comply with timelimit guidelines. Author must wait ' + timestring)
print('\t' + pauthor + "'s database info remains unchanged")
response = post.add_comment('You are posting here too frequently, so your post has been deleted. You may post again in ' + str(timestring))
response.distinguish()
post.remove(spam=False)
time.sleep(5)
cur.execute('INSERT INTO oldposts VALUES(?)', [pid])
sql.commit()
while True:
try:
scan()
except Exception as e:
print('An error has occured:', e)
print('Running again in ' + WAITS + ' seconds.\n')
time.sleep(WAIT)
|
TacticalGoat/reddit
|
DelayBot/delaybot.py
|
Python
|
mit
| 3,987
|
'''
Pulse characterization
Created Fri May 12 2017
@author: cpkmanchee
'''
import numpy as np
import os.path
import inspect
from beamtools.constants import h,c,pi
from beamtools.common import normalize, gaussian, sech2, alias_dict
from beamtools.import_data_file import import_data_file as _import
from beamtools.import_data_file import objdict
from scipy.optimize import curve_fit
__all__ = ['spectrumFT', 'fit_ac', 'ac_x2t', 'sigma_fwhm']
class FitResult():
def __init__(self, ffunc, ftype, popt, pcov=0, indep_var='time'):
self.ffunc = ffunc
self.ftype = ftype
self.popt = popt
self.pcov = pcov
self.iv=indep_var
def subs(self,x):
return self.ffunc(x,*self.popt)
def get_args(self):
return inspect.getargspec(self.ffunc)
def spectrumFT(data,from_file = False, file_type='oo_spec', units_wl='nm', n_interp=0):
'''Compute transform limited pulse from spectrum.
data = wavelength vs. PSD (intensity) if from_file=False
= filename of spectrum file to be imported if from_file=True
Units assumed to be nm for wavelength.
If from_file is set True, data should be filename
Optional file_format, default is oceanoptics_spectrometer. Currently
can not change this (filetype handling for x/y).
n_interp = bit depth of frequency interpolation, n = 2**n_interp. 0 = auto
'''
if from_file:
if type(data) is str:
if not os.path.exists(data):
print('File does not exist')
return -1
imported_data = _import(data,file_type)
#insert testing for wavelength/intensity location in dataobject
wavelength = imported_data.wavelength
intensity = imported_data.intensity
#get units from dataobject
else:
print('invalid filetype')
return -1
else:
wavelength = data[0]
intensity = data[1]
imported_data = data
if n_interp == 0:
#insert here later - round up to nearest power of two.
n = 2**12
else:
n = 2**12
#use units to convert wavelength to SI
wl = wavelength*1E-9
psd = normalize(intensity)
nu = c/wl #nu is SI
#interpolate psd, linear freq spacing
nui = np.linspace(min(nu),max(nu),n)
df = (max(nu)-min(nu))/(n-1)
psdi = normalize(np.interp(nui,np.flipud(nu),np.flipud(psd)))
#i = (np.abs(nui-nu0)).argmin() #centre freq index
#perform FT-1, remove centre spike
t = np.fft.ifftshift(np.fft.fftfreq(n,df)[1:-1])
ac =np.fft.ifftshift((np.fft.ifft(np.fft.ifftshift(psdi)))[1:-1])
output_dict = {'time': t, 'ac': ac, 'nu': nui, 'psd': psdi}
output = objdict(output_dict)
return output, imported_data
def ac_x2t(position,aoi=15,config='sym'):
'''Convert autocorrelation position to time
Symmetric - stage moves perp to normal.
Asymmetric - stage moves along incoming optical axis
'''
if type(config) is not str:
print('Unrecognized configuration. Must be symmetric or asymmetric.')
return position
if config.lower() in alias_dict['symmetric']:
time = (1/c)*position*2*np.cos(aoi*pi/180)
elif config.lower() in alias_dict['asymmetric']:
time = (1/c)*position*(1+np.cos(2*aoi*pi/180))
else:
print('Unrecognized configuration. Must be symmetric or asymmetric.')
return position
return time
def fit_ac(data, from_file = False, file_type='bt_ac', form='all', bgform = 'constant'):
'''Fit autocorrelation peak.
data must be either:
1. 2 x n array - data[0] = time(delay), data[1] = intensity
2. datafile name --> from_file must be True
If there is no 'delay' parameter in data file (only position), the position is
auto converted to time delay.
'''
if from_file:
if type(data) is str:
if not os.path.exists(data):
print('File does not exist')
return -1
imported_data = _import(data,file_type)
#insert testing for power location in dataobject
position = imported_data.position
intensity = imported_data.power
if 'delay' in imported_data.__dict__:
delay = imported_data.delay
else:
delay = ac_x2t(position,aoi=15,config='sym')
#get units from dataobject
else:
print('invalid filetype')
return -1
else:
imported_data = data
delay = data[0]
intensity = data[1]
x = delay
y = intensity
bgpar, bgform = _background(x,y,form = bgform)
mean = np.average(x,weights = y)
stdv = np.sqrt(np.average((x-mean)**2 ,weights = y))
#set fitting function (including background)
if bgform is None:
def fitfuncGaus(x,sigma,a,x0):
return gaussian(x,sigma,a,x0)
def fitfuncSech2(x,sigma,a,x0):
return sech2(x,sigma,a,x0)
if bgform.lower() in alias_dict['constant']:
def fitfuncGaus(x,sigma,a,x0,p0):
return gaussian(x,sigma,a,x0) + p0
def fitfuncSech2(x,sigma,a,x0,p0):
return sech2(x,sigma,a,x0) + p0
elif bgform.lower() in alias_dict['linear']:
def fitfuncGaus(x,sigma,a,x0,p0,p1):
return gaussian(x,sigma,a,x0) + p1*x + p0
def fitfuncSech2(x,sigma,a,x0,p0,p1):
return sech2(x,sigma,a,x0) + p1*x + p0
elif bgform.lower() in alias_dict['quadratic']:
def fitfuncGaus(x,sigma,a,x0,p0,p1,p2):
return gaussian(x,sigma,a,x0) + p2*x**2 + p1*x + p0
def fitfuncSech2(x,sigma,a,x0,p0,p1,p2):
return sech2(x,sigma,a,x0) + p2*x**2 + p1*x + p0
else:
def fitfuncGaus(x,sigma,a,x0):
return gaussian(x,sigma,a,x0)
def fitfuncSech2(x,sigma,a,x0):
return sech2(x,sigma,a,x0)
nFitArgs = len(inspect.getargspec(fitfuncGaus).args) - 1
#sets which functions are to be fit... this can be streamlined i think
if form.lower() in ['both', 'all']:
fitGaus = True
fitSech2 = True
elif form.lower() in alias_dict['gaus']:
fitGaus = True
fitSech2 = False
elif form.lower() in alias_dict['sech2']:
fitGaus = False
fitSech2 = True
else:
print('Unknown fit form: '+form[0])
fitGaus = False
fitSech2 = False
#start fitting
popt=[]
pcov=[]
fit_results=[]
if type(bgpar) is np.float64:
p0=[stdv,max(y)-min(y),mean,bgpar]
elif type(bgpar) is np.ndarray:
p0=[stdv,max(y)-min(y),mean]+bgpar.tolist()
else:
p0=None
if fitGaus:
try:
poptGaus,pcovGaus = curve_fit(fitfuncGaus,x,y,p0)
except RuntimeError:
poptGaus = np.zeros(nFitArgs)
pcovGaus = np.zeros((nFitArgs,nFitArgs))
popt.append(poptGaus)
pcov.append(pcovGaus)
fit_results.append(FitResult(ffunc=fitfuncGaus, ftype='gaussian',
popt=poptGaus, pcov=pcovGaus))
if fitSech2:
try:
poptSech2,pcovSech2 = curve_fit(fitfuncSech2,x,y,p0)
except RuntimeError:
poptSech2 = np.zeros(nFitArgs)
pcovSech2 = np.zeros((nFitArgs,nFitArgs))
popt.append(poptSech2)
pcov.append(pcovSech2)
fit_results.append(FitResult(ffunc=fitfuncSech2, ftype='sech2',
popt=poptSech2, pcov=pcovSech2))
return fit_results, imported_data
def sigma_fwhm(sigma, shape='gaus'):
'''Convert sigma to full-width half-max
'''
if shape.lower() in alias_dict['gaus']:
A = 2*np.sqrt(2*np.log(2))
elif shape.lower() in alias_dict['sech2']:
A = 2*np.arccosh(np.sqrt(2))
else:
A = 1
return A*sigma
def _background(x,y,form = 'constant'):
'''Provides starting values for background parameters.
Takes x,y data and the desired background form (default to constant)
returns p, the polynomial coefficients. p is variable in length.
'''
if form is None:
p = np.zeros((3))
if form.lower() in ['const','constant']:
p = min(y)
#p = np.hstack((p,[0,0]))
elif form.lower() in ['lin','linear']:
p = np.linalg.solve([[1,x[0]],[1,x[-1]]], [y[0],y[-1]])
#p = np.hstack((p,0))
elif form.lower() in ['quad','quadratic']:
index = np.argmin(y)
if index == 0:
x3 = 2*x[0]-x[-1]
y3 = y[-1]
elif index == len(y)-1:
x3 = 2*x[-1]-x[0]
y3 = y[0]
else:
x3 = x[index]
y3 = y[index]
a = [[1,x[0],x[0]**2],[1,x[-1],x[-1]**2],[1,x3,x3**2]]
b = [y[0],y[-1],y3]
p = np.linalg.solve(a,b)
else:
print('Unknown background form')
p = np.zeros((3))
return p, form
|
kikimaroca/beamtools
|
build/lib/beamtools/pulse.py
|
Python
|
mit
| 9,048
|
#!/usr/bin/env python -tt -Wall
def prime_sieve(upper):
marked = [False] * (upper-2)
def next_prime():
for i,v in enumerate(marked):
if not v:
yield i+2
next_prime_gen = next_prime()
for p in next_prime_gen:
for n in xrange(2*p - 2, len(marked), p):
marked[n] = True
yield p
def main():
print(sum(prime_sieve(2000000)))
if __name__ == '__main__':
main()
|
cveazey/ProjectEuler
|
10/e10.py
|
Python
|
mit
| 380
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_request(
subscription_id: str,
location: str,
*,
shared_to: Optional[Union[str, "_models.SharedToValues"]] = None,
**kwargs: Any
) -> HttpRequest:
api_version = "2020-09-30"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/sharedGalleries')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"location": _SERIALIZER.url("location", location, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
if shared_to is not None:
query_parameters['sharedTo'] = _SERIALIZER.query("shared_to", shared_to, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_request(
subscription_id: str,
location: str,
gallery_unique_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2020-09-30"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/sharedGalleries/{galleryUniqueName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"location": _SERIALIZER.url("location", location, 'str'),
"galleryUniqueName": _SERIALIZER.url("gallery_unique_name", gallery_unique_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
class SharedGalleriesOperations(object):
"""SharedGalleriesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.compute.v2020_09_30.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list(
self,
location: str,
shared_to: Optional[Union[str, "_models.SharedToValues"]] = None,
**kwargs: Any
) -> Iterable["_models.SharedGalleryList"]:
"""List shared galleries by subscription id or tenant id.
:param location: Resource location.
:type location: str
:param shared_to: The query parameter to decide what shared galleries to fetch when doing
listing operations.
:type shared_to: str or ~azure.mgmt.compute.v2020_09_30.models.SharedToValues
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SharedGalleryList or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.compute.v2020_09_30.models.SharedGalleryList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SharedGalleryList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
location=location,
shared_to=shared_to,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
subscription_id=self._config.subscription_id,
location=location,
shared_to=shared_to,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("SharedGalleryList", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/sharedGalleries'} # type: ignore
@distributed_trace
def get(
self,
location: str,
gallery_unique_name: str,
**kwargs: Any
) -> "_models.SharedGallery":
"""Get a shared gallery by subscription id or tenant id.
:param location: Resource location.
:type location: str
:param gallery_unique_name: The unique name of the Shared Gallery.
:type gallery_unique_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SharedGallery, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2020_09_30.models.SharedGallery
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SharedGallery"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
subscription_id=self._config.subscription_id,
location=location,
gallery_unique_name=gallery_unique_name,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('SharedGallery', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/sharedGalleries/{galleryUniqueName}'} # type: ignore
|
Azure/azure-sdk-for-python
|
sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_09_30/operations/_shared_galleries_operations.py
|
Python
|
mit
| 9,946
|
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
# these are system modules
import numpy
import sys
# these are my local ones
from env import gidgetConfigVars
import tsvIO
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
NA_VALUE = -999999
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
def cleanUpName(aName):
bName = ''
aName = aName.upper()
## ii = aName.find(" - Homo sapiens (human)")
ii = aName.find(" - HOMO SAPIENS (HUMAN)")
if (ii >= 0):
aName = aName[:ii]
aName = aName.strip()
ii = aName.find("(")
while (ii >= 0):
jj = aName.find(")", ii)
aName = aName[:ii] + aName[jj + 1:]
ii = aName.find("(")
aName = aName.strip()
ii = aName.find("<")
while (ii >= 0):
jj = aName.find(">", ii)
aName = aName[:ii] + aName[jj + 1:]
ii = aName.find("<")
aName = aName.strip()
for ii in range(len(aName)):
if (aName[ii] == ','):
continue
elif (aName[ii] == '('):
bName += '_'
elif (aName[ii] == ')'):
bName += '_'
elif (aName[ii] == '-'):
bName += '_'
elif (aName[ii] == '/'):
bName += '_'
elif (aName[ii] == ';'):
bName += '_'
elif (aName[ii] == '&'):
continue
elif (aName[ii] == '#'):
continue
elif (aName[ii] == ' '):
bName += '_'
else:
bName += aName[ii].upper()
ii = bName.find("__")
while (ii >= 0):
print " ", ii, bName
bName = bName[:ii] + bName[ii + 1:]
print " ", bName
ii = bName.find("__")
return (bName)
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
def readPathways():
fh = file(
gidgetConfigVars['TCGAFMP_BIOINFORMATICS_REFERENCES'] + "/nci_pid/only_NCI_Nature_ver4.tab", 'r')
pwDict = {}
for aLine in fh:
aLine = aLine.strip()
aLine = aLine.upper()
tokenList = aLine.split('\t')
if (len(tokenList) != 3):
continue
if (tokenList[0] == "pathway"):
continue
longPathwayName = tokenList[0]
shortPathwayName = tokenList[1]
geneTokens = tokenList[2].strip()
geneList = geneTokens.split(',')
geneList.sort()
if (len(geneList) > 0):
while (geneList[0] == ''):
geneList = geneList[1:]
if (len(geneList) == 0):
continue
if (len(geneList) == 0):
continue
pathwayName = cleanUpName(shortPathwayName)
if (pathwayName not in pwDict.keys()):
# print " adding pathway %s (%d) " % ( pathwayName, len(geneList) )
pwDict[pathwayName] = geneList
else:
if (len(pwDict[pathwayName]) < len(geneList)):
# print " substituting shorter list of genes for %s (%d) " % (
# pathwayName, len(geneList) )
pwDict[pathwayName] = geneList
# else:
# print " NOT substituing list for %s " % pathwayName
fh.close()
print " "
print " have pathway dictionary with %d pathways " % len(pwDict)
print " --> now looking for duplicate pathways ... "
pwList = pwDict.keys()
pwList.sort()
delList = []
pairDict = {}
for ii in range(len(pwList) - 1):
iiName = pwList[ii]
iiLen = len(pwDict[iiName])
for jj in range(ii + 1, len(pwList)):
jjName = pwList[jj]
jjLen = len(pwDict[jjName])
if (jjLen != iiLen):
continue
if (pwDict[iiName] == pwDict[jjName]):
print "\n\n SAME !!! "
print iiName, iiLen
print pwDict[iiName]
print jjName, jjLen
print pwDict[jjName]
iiSplit = iiName.split('__')
jjSplit = jjName.split('__')
if (iiSplit[1] <= jjSplit[1]):
pairNames = (iiSplit[1], jjSplit[1])
else:
pairNames = (jjSplit[1], iiSplit[1])
if (pairNames in pairDict.keys()):
pairDict[pairNames] += 1
else:
pairDict[pairNames] = 1
if (iiSplit[1] == jjSplit[1]):
if (len(iiName) <= len(jjName)):
delList += [jjName]
else:
delList += [iiName]
else:
if (iiSplit[1] == "NCI-NATURE"):
delList += [jjName]
elif (jjSplit[1] == "NCI-NATURE"):
delList += [iiName]
elif (iiSplit[1] == "PID"):
delList += [jjName]
elif (jjSplit[1] == "PID"):
delList += [iiName]
elif (iiSplit[1] == "KEGG"):
delList += [jjName]
elif (jjSplit[1] == "KEGG"):
delList += [iiName]
elif (iiSplit[1] == "PWCOMMONS"):
delList += [jjName]
elif (jjSplit[1] == "PWCOMMONS"):
delList += [iiName]
elif (iiSplit[1] == "REACTOME"):
delList += [jjName]
elif (jjSplit[1] == "REACTOME"):
delList += [iiName]
elif (iiSplit[1] == "WIKIPATHWAYS"):
delList += [jjName]
elif (jjSplit[1] == "WIKIPATHWAYS"):
delList += [iiName]
elif (iiSplit[1] == "WIKIPW"):
delList += [jjName]
elif (jjSplit[1] == "WIKIPW"):
delList += [iiName]
elif (iiSplit[1] == "SMPDB"):
delList += [jjName]
elif (jjSplit[1] == "SMPDB"):
delList += [iiName]
elif (iiSplit[1] == "HUMANCYC"):
delList += [jjName]
elif (jjSplit[1] == "HUMANCYC"):
delList += [iiName]
else:
sys.exit(-1)
for aName in delList:
try:
del pwDict[aName]
except:
doNothing = 1
print " "
print " returning pathway dictionary with %d pathways " % len(pwDict)
print " "
for aKey in pairDict.keys():
print aKey, pairDict[aKey]
print " "
print " "
return (pwDict)
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
def setFeatBits(rowLabels, featPrefix, doesContainList, notContainList):
numSet = 0
numRow = len(rowLabels)
bitVec = numpy.zeros(numRow, dtype=numpy.bool)
for iR in range(numRow):
if (featPrefix != ""):
if (not rowLabels[iR].startswith(featPrefix)): continue
if (len(doesContainList) > 0):
skipFlag = 1
for aStr in doesContainList:
if (rowLabels[iR].find(aStr) >= 0): skipFlag = 0
if (len(notContainList) > 0):
skipFlag = 0
for aStr in notContainList:
if (rowLabels[iR].find(aStr) >= 0): skipFlag = 1
if (skipFlag): continue
## set bit if we get here ...
bitVec[iR] = 1
numSet += 1
print featPrefix, doesContainList, notContainList, numRow, numSet
if (numSet == 0):
print " numSet=0 ... this is probably a problem ... "
# sys.exit(-1)
return (bitVec)
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
# B:GNAB:ADAM7:chr8:24298509:24384483:+:y_n_somatic y_n y_del
# --> B:GNAB:ADAM7:chr8:24298509:24384483:+:y_del_somatic
def makeNewFeatureName(curFeatName, oldStringList, newStringList):
for jj in range(len(oldStringList)):
oldStr = oldStringList[jj]
newStr = newStringList[jj]
i1 = curFeatName.find(oldStr)
if ( i1 >= 0 ):
i2 = i1 + len(oldStr)
newFeatName = curFeatName[:i1] + newStr + curFeatName[i2:]
return ( newFeatName )
print " ERROR in makeNewFeatureName ???? ", curFeatName, oldStringList, newStringList
sys.exit(-1)
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
def chooseCountThreshold(dataD):
rowLabels = dataD['rowLabels']
dMat = dataD['dataMatrix']
numBits = 0
for ii in range(len(rowLabels)):
if (numBits > 0):
continue
if (rowLabels[ii].find("B:GNAB:TP53:") >= 0):
for jj in range(len(dMat[ii])):
if (dMat[ii][jj] == 0):
numBits += 1
elif (dMat[ii][jj] == 1):
numBits += 1
print " number of bits found for TP53 mutation feature: ", numBits
countThreshold = int(numBits / 11) - 1
return (countThreshold)
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
def findFeature ( rowLabels, s1, s2 ):
for iR in range(len(rowLabels)):
if ( rowLabels[iR].find(s1) >= 0 ):
if ( rowLabels[iR].find(s2) >= 0 ):
return ( iR )
return ( -1 )
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
def pathwayGnab(dataD, pathways={}):
print " "
print " ************************************************************* "
print " ************************************************************* "
print " "
print " in pathwayGnab ... "
# check that the input feature matrix looks ok ...
try:
numRow = len(dataD['rowLabels'])
numCol = len(dataD['colLabels'])
rowLabels = dataD['rowLabels']
print " %d rows x %d columns " % (numRow, numCol)
# print rowLabels[:5]
# print rowLabels[-5:]
except:
print " ERROR in pathwayGnab ??? bad data ??? "
return (dataD)
if (len(pathways) == 0):
print " "
print " WARNING: no pathway information found ... using a few hard-coded pathways for now "
print " "
pathways = {}
pathways[
"TP53_pathway"] = ["E2F1", "TP53", "RB1", "CDK4", "TIMP3", "CDK2", "ATM",
"CCNE1", "CCND1", "CDKN1A", "BCL2", "BAX", "PCNA", "MDM2",
"APAF1", "GADD45A"]
pathways[
"PI3K_AKT_pathway"] = ["FRAP1", "LST8", "PDPK1", "NGF", "NR4A1", "FOXO1", "CHUK",
"THEM4", "PTEN", "CREB1", "BAD", "RHOA", "TRIB3", "PHLPP",
"CASP9", "AKT1S1", "MDM2", "RPS6KB2"]
pathways[
"Wnt_pathway"] = ["PPP2R5B", "PPP2R5A", "PPP2R5D", "BTRC", "WNT3A",
"PPP2R5C", "MMP7", "PRKX", "CTNNB1", "WNT2", "CSNK2A2", "MAP3K7", "PRKACG",
"WNT1", "WNT4", "WNT3", "CSNK2A1", "PRKACA", "PRKACB", "WNT6", "CUL1",
"WNT10A", "WNT10B", "VANGL1", "ROCK1", "ROCK2", "VANGL2", "CHP2", "SKP1",
"EP300", "JUN", "MAPK9", "PPP2R5E", "MAPK8", "LOC728622", "WNT5A", "WNT5B",
"CXXC4", "DAAM1", "DAAM2", "RBX1", "RAC2", "RAC3", "RAC1", "CACYBP",
"AXIN2", "AXIN1", "DVL2", "DVL3", "TCF7", "CREBBP", "SMAD4", "SMAD3",
"SMAD2", "PORCN", "DVL1", "SFRP5", "SFRP1", "PRICKLE1", "SFRP2", "SFRP4",
"PRICKLE2", "WIF1", "PPARD", "PLCB3", "PLCB4", "FRAT1", "RHOA", "FRAT2",
"SOX17", "PLCB1", "FOSL1", "MYC", "PLCB2", "PPP2R1B", "PRKCA", "PPP2R1A",
"TBL1XR1", "CTBP1", "CTBP2", "TP53", "LEF1", "PRKCG", "PRKCB", "CTNNBIP1",
"SENP2", "CCND1", "PSEN1", "CCND3", "CCND2", "WNT9B", "WNT11", "SIAH1",
"RUVBL1", "WNT9A", "CER1", "NKD1", "WNT16", "NKD2", "APC2", "CAMK2G",
"PPP3R1", "PPP3R2", "TCF7L2", "TCF7L1", "CHD8", "PPP2CA", "PPP2CB",
"PPP3CB", "NFAT5", "CAMK2D", "PPP3CC", "NFATC4", "CAMK2B", "CHP",
"PPP3CA", "NFATC2", "NFATC3", "FBXW11", "CAMK2A", "WNT8A", "WNT8B",
"APC", "NFATC1", "CSNK1A1", "FZD9", "FZD8", "NLK", "FZD1", "CSNK2B",
"CSNK1A1L", "FZD3", "FZD2", "MAPK10", "FZD5", "FZD4", "FZD7", "DKK4",
"WNT2B", "FZD6", "DKK2", "FZD10", "WNT7B", "DKK1", "CSNK1E", "GSK3B",
"LRP6", "TBL1X", "WNT7A", "LRP5", "TBL1Y"]
print " "
print " total number of pathways : ", len(pathways)
print " "
mutationTypes = [":y_n_somatic", ":code_potential_somatic",
":missense_somatic",
":y_del_somatic", ":y_amp_somatic"]
numTypes = len(mutationTypes)
pathwayList = pathways.keys()
pathwayList.sort()
numPW = len(pathways)
newNameVec = [0] * (numPW * numTypes)
newDataMat = [0] * (numPW * numTypes)
dMat = dataD['dataMatrix']
min_numON = chooseCountThreshold(dataD)
if (min_numON < (numCol / 100)):
min_numON = int(numCol / 100)
print " minimum count threshold : ", min_numON
kFeat = 0
max_numON = 0
max_fracON = 0.
## outer loop is over pathways ...
for aPathway in pathwayList:
print " "
print " outer loop over pathways ... ", aPathway
## next loop is over mutation types
for aMutType in mutationTypes:
numON = 0
newFeatName = "B:GNAB:" + aPathway + "::::" + aMutType
print " new feature name : ", newFeatName
# first make sure we don't already have a feature with this name
stopNow = 0
for iRow in range(numRow):
if (newFeatName == rowLabels[iRow]):
print " STOPPING ... this feature already exists ??? ", newFeatName
stopNow = 1
if (stopNow): continue
print " tentative new feature #%d ... <%s> " % (kFeat, newFeatName)
newNameVec[kFeat] = newFeatName
newDataMat[kFeat] = numpy.zeros(numCol)
if (0):
print " "
print " "
print aPathway, newFeatName
print len(pathways[aPathway]), pathways[aPathway]
## and now we can loop over the genes in the pathway
for gnabGene in pathways[aPathway]:
print " looking for pathway gene ", gnabGene
## and look for the desired feature
iR = findFeature ( rowLabels, "B:GNAB:"+gnabGene+":", aMutType )
## if we don't find anything, and we are working on y_del or y_amp
## then we can use y_n instead
if ( iR < 0 ):
print " --> failed to find desired feature ", gnabGene, aMutType
if ( (aMutType==":y_del_somatic") or (aMutType==":y_amp_somatic") ):
iR = findFeature ( rowLabels, "B:GNAB:"+gnabGene+":", ":y_n_somatic" )
if ( iR >= 0 ):
print " --> will use this feature instead ", iR, rowLabels[iR]
else:
print " --> failed to find even a backup feature "
else:
print " --> FOUND desired feature ", gnabGene, aMutType, iR, rowLabels[iR]
if ( iR < 0 ): continue
gnabLabel = rowLabels[iR]
for iCol in range(numCol):
if (dMat[iR][iCol] == 1):
print " %d using mutation bit from gene %s, column %d (%s) [%d] " % \
(newDataMat[kFeat][iCol], gnabGene, iCol, gnabLabel, numON)
if (newDataMat[kFeat][iCol] == 0):
numON += 1
newDataMat[kFeat][iCol] = 1
if (numON > min_numON):
kFeat += 1
print " --> keeping this feature ... ", kFeat, newFeatName, numON, min_numON
# keep track of which pathways are the MOST mutated ...
if (max_numON <= numON):
max_numON = numON
max_pathway = newFeatName
print " MOST mutated so far (1) ... ", max_pathway, max_numON, len(pathways[aPathway])
# note that this is not the fraction of the genes in the pathway that are
# mutated, but just a count normalized by the # of genes in the
# pathway
numGenes = len(pathways[aPathway])
fracON = float(numON) / float(len(pathways[aPathway]))
if (numGenes >= 10):
if (max_fracON <= fracON):
max_fracON = fracON
max_pathway2 = newFeatName
print " MOST mutated so far (2) ... ", max_pathway2, max_fracON, len(pathways[aPathway])
else:
print " --> NOT keeping this feature ... ", newFeatName, numON, min_numON
numNewFeat = kFeat
print " "
print " --> number of new features : ", numNewFeat
print len(newDataMat), len(newDataMat[0])
# now we need to append these new features to the input data matrix
newRowLabels = [0] * (numRow + numNewFeat)
newMatrix = [0] * (numRow + numNewFeat)
for iR in range(numRow):
newRowLabels[iR] = rowLabels[iR]
newMatrix[iR] = dMat[iR]
for iR in range(numNewFeat):
newRowLabels[iR + numRow] = newNameVec[iR]
newMatrix[iR + numRow] = newDataMat[iR]
dataD['rowLabels'] = newRowLabels
dataD['dataMatrix'] = newMatrix
print " "
print " --> finished with pathwayGnab ... "
print " "
return (dataD)
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
def driverGnab(dataD, driverList):
print " "
print " ************************************************************* "
print " ************************************************************* "
print " "
print " in driverGnab ... "
# check that the input feature matrix looks ok ...
try:
numRow = len(dataD['rowLabels'])
numCol = len(dataD['colLabels'])
rowLabels = dataD['rowLabels']
print " %d rows x %d columns " % (numRow, numCol)
# print rowLabels[:5]
# print rowLabels[-5:]
except:
print " ERROR in driverGnab ??? bad data ??? "
return (dataD)
mutationTypes = [":y_n_somatic", ":code_potential_somatic",
":missense_somatic",
":y_del_somatic", ":y_amp_somatic"]
numTypes = len(mutationTypes)
numK = 1
newNameVec = [0] * (numK * numTypes)
newDataMat = [0] * (numK * numTypes)
dMat = dataD['dataMatrix']
kFeat = 0
if (1):
for aMutType in mutationTypes:
newFeatName = "B:GNAB:driverMut" + "::::" + aMutType
# first make sure we don't already have a feature with this name
# ...
stopNow = 0
for iRow in range(numRow):
if (newFeatName == rowLabels[iRow]):
stopNow = 1
if (stopNow):
continue
print " tentative new feature #%d ... <%s> " % (kFeat, newFeatName)
newNameVec[kFeat] = newFeatName
newDataMat[kFeat] = numpy.zeros(numCol)
for iR in range(numRow):
if (iR % 1000 == 0):
print iR, numRow
if (1):
gnabLabel = rowLabels[iR]
if (not gnabLabel.startswith("B:GNAB:")):
continue
if (gnabLabel.find(aMutType) < 0):
continue
try:
gnabTokens = gnabLabel.split(':')
gnabGene = gnabTokens[2].upper()
except:
print " FAILED to parse GNAB feature name ??? ", gnabLabel
continue
print " considering ", iR, gnabTokens, gnabGene
if (gnabGene in driverList):
for iCol in range(numCol):
if (dMat[iR][iCol] == 1):
print " yes! setting bit at ", kFeat, iCol
newDataMat[kFeat][iCol] = 1
if (1):
print " --> keeping this feature ... ", kFeat, newFeatName
kFeat += 1
numNewFeat = kFeat
print " "
print " --> number of new features : ", numNewFeat
print len(newDataMat), len(newDataMat[0])
# now we need to append these new features to the input data matrix
newRowLabels = [0] * (numRow + numNewFeat)
newMatrix = [0] * (numRow + numNewFeat)
for iR in range(numRow):
newRowLabels[iR] = rowLabels[iR]
newMatrix[iR] = dMat[iR]
for iR in range(numNewFeat):
newRowLabels[iR + numRow] = newNameVec[iR]
newMatrix[iR + numRow] = newDataMat[iR]
dataD['rowLabels'] = newRowLabels
dataD['dataMatrix'] = newMatrix
print " "
print " --> finished with driverGnab ... "
print " "
return (dataD)
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
def combineGnabCnvr(dataD):
print " "
print " ************************************************************* "
print " ************************************************************* "
print " "
print " in combineGnabCnvr ... "
# check that the input feature matrix looks ok ...
try:
numRow = len(dataD['rowLabels'])
numCol = len(dataD['colLabels'])
rowLabels = dataD['rowLabels']
colLabels = dataD['colLabels']
print " %d rows x %d columns " % (numRow, numCol)
# print rowLabels[:5]
# print rowLabels[-5:]
except:
print " ERROR in combineGnabCnvr ??? bad data ??? "
return (dataD)
# next, we need to find all of the GNAB features and all of the CNVR
# features
print " --> assigning gnab / cnvr flags ... "
gnabFeatIncSubstrings = [ ":y_n", ":code_potential" ]
gnabFeatAmpSubstrings = [ ":y_amp", ":cp_amp" ]
gnabFeatDelSubstrings = [ ":y_del", ":cp_del" ]
cnvrFeatExcSubstrings = [ "Gistic" ]
isGnab = setFeatBits(rowLabels, "B:GNAB:", gnabFeatIncSubstrings, [])
isCnvr = setFeatBits(rowLabels, "N:CNVR:", [], cnvrFeatExcSubstrings)
print len(isGnab), max(isGnab)
print len(isCnvr), max(isCnvr)
if (not max(isGnab) and not max(isCnvr)):
print " missing either GNAB or CNVR features ... "
return (dataD)
# now we need to map each of the GNAB features to one or more CNVR features
mapVec = [0] * numRow
for iR in range(numRow):
if (iR % 1000 == 0):
print iR, numRow
if (isGnab[iR]):
mapVec[iR] = []
gnabLabel = rowLabels[iR]
try:
gnabTokens = gnabLabel.split(':')
gnabChrName = gnabTokens[3].upper()
gnabStart = int(gnabTokens[4])
gnabStop = int(gnabTokens[5])
except:
print " FAILED to parse GNAB feature name ??? ", gnabLabel
continue
# avoid X and Y chromosome genes ...
if (gnabChrName.endswith("X")):
continue
if (gnabChrName.endswith("Y")):
continue
for jR in range(numRow):
if (isCnvr[jR]):
cnvrLabel = rowLabels[jR]
cnvrTokens = cnvrLabel.split(':')
cnvrChrName = cnvrTokens[3].upper()
if (gnabChrName != cnvrChrName):
continue
# print " comparing ... ", gnabLabel, cnvrLabel
cnvrStart = int(cnvrTokens[4])
if (gnabStop < cnvrStart):
continue
cnvrStop = int(cnvrTokens[5])
if (gnabStart > cnvrStop):
continue
mapVec[iR] += [jR]
# print " found match! ", gnabLabel, cnvrLabel, iR, jR,
# mapVec[iR]
if (0):
if (len(mapVec[iR]) > 5):
print iR, gnabLabel, len(mapVec[iR])
for kR in mapVec[iR]:
print " ", kR, rowLabels[kR]
# sys.exit(-1)
# now we need to actually loop over the data ...
dMat = dataD['dataMatrix']
# -------------------------------------------------------------------------
if (0):
# FIRST we want to check for any "adjacent normal" samples and set those to 0 ...
# --> ACTUALLY, deciding NOT to do this anymore ( 31 oct 2012 ) NEW CHANGE
numGNABfeat = 0
numCNVRfeat = 0
for iRow in range(numRow):
curFeature = rowLabels[iRow]
if (curFeature.startswith("B:GNAB:")):
numGNABfeat += 1
elif (curFeature.startswith("N:CNVR:")):
numCNVRfeat += 1
print " number of GNAB features : %d " % (numGNABfeat)
print " number of CNVR features : %d " % (numCNVRfeat)
print " "
numGNABset = 0
numCNVRset = 0
numGNABfeat = 0
numCNVRfeat = 0
numNormalCol = 0
for iCol in range(numCol):
curLabel = colLabels[iCol]
if (curLabel.startswith("TCGA-")):
if (len(curLabel) >= 15):
sampleType = curLabel[13:15]
if (sampleType == '11'):
numNormalCol += 1
# print iCol, curLabel
for iRow in range(numRow):
curFeature = rowLabels[iRow]
if (curFeature.startswith("B:GNAB:")):
# print curFeature, dMat[iRow][iCol]
if (dMat[iRow][iCol] == "NA" or dMat[iRow][iCol] == NA_VALUE):
dMat[iRow][iCol] = 0
numGNABset += 1
elif (curFeature.startswith("N:CNVR:")):
if (curFeature.find(":chrX:") > 0):
continue
if (curFeature.find(":chrY:") > 0):
continue
# print curFeature, dMat[iRow][iCol]
if (dMat[iRow][iCol] == "NA" or dMat[iRow][iCol] == NA_VALUE):
dMat[iRow][iCol] = 0.
numCNVRset += 1
# -------------------------------------------------------------------------
## cnvrThreshold = 2.
## cnvrThreshold = 1.
cnvrAmpThresh = 0.30
cnvrDelThresh = -0.46
print " --> now checking for deletions and amplifications ... ", cnvrAmpThresh, cnvrDelThresh
print " and creating new y_del and y_amp features "
numNewFeat = 0
newNameVec = []
newDataMat = []
for iR in range(numRow):
if (iR % 1000 == 0):
print iR, numRow
if (isGnab[iR]):
print " "
print " having a look at this feature: "
print iR, rowLabels[iR], len(mapVec[iR])
print mapVec[iR]
# how often is this gene mutated?
numYes = 0
numDel = 0
numAmp = 0
numYesDel = 0
numYesAmp = 0
maxCN = -999.
minCN = 999.
for iCol in range(numCol):
mutFlag = 0
ampFlag = 0
delFlag = 0
for jR in mapVec[iR]:
if (dMat[iR][iCol] == 1):
mutFlag = 1
if (dMat[jR][iCol] == NA_VALUE):
continue
if (dMat[jR][iCol] > maxCN):
maxCN = dMat[jR][iCol]
if (dMat[jR][iCol] < minCN):
minCN = dMat[jR][iCol]
if (dMat[jR][iCol] < cnvrDelThresh):
delFlag = 1
if (dMat[jR][iCol] > cnvrAmpThresh):
ampFlag = 1
numYes += mutFlag
numDel += delFlag
numAmp += ampFlag
if (mutFlag or delFlag): numYesDel += 1
if (mutFlag or ampFlag): numYesAmp += 1
addDelFeat = 0
addAmpFeat = 0
fThresh = 0.025
if (numYes + numAmp + numDel > 0):
print " --> %3d mutations (%3d mut or del, %3d mut or amp) " % \
( numYes, numYesDel, numYesAmp )
print " %3d deletions " % numDel, minCN
print " %3d amplifications " % numAmp, maxCN
if (numYesDel > 0):
delFrac1 = float(numYesDel-numYes)/float(numCol)
delFrac2 = float(numYesDel-numDel)/float(numCol)
delFrac3 = 0
if ( numYes > 0 ): delFrac3 += float(numYesDel/numYes)
if ( numDel > 0 ): delFrac3 += float(numYesDel/numDel)
if ( delFrac1>fThresh and delFrac2>fThresh ):
print " deletion looks significant ", numYesDel, numYes, numDel, numCol, delFrac1, delFrac2, delFrac3
addDelFeat = 1
else:
print " deletion does not seem significant (?) ", numYesDel, numYes, numDel, numCol, delFrac1, delFrac2, delFrac3
if (numYesAmp > 0):
ampFrac1 = float(numYesAmp-numYes)/float(numCol)
ampFrac2 = float(numYesAmp-numAmp)/float(numCol)
ampFrac3 = 0
if ( numYes > 0 ): ampFrac3 += float(numYesAmp/numYes)
if ( numAmp > 0 ): ampFrac3 += float(numYesAmp/numAmp)
if ( ampFrac1>fThresh and ampFrac2>fThresh ):
print " amplification looks significant ", numYesAmp, numYes, numAmp, numCol, ampFrac1, ampFrac2, ampFrac3
addAmpFeat = 1
else:
print " amplification does not seem significant (?) ", numYesAmp, numYes, numAmp, numCol, ampFrac1, ampFrac2, ampFrac3
## add the "DEL" feature if appropriate ...
if ( addDelFeat ):
numNewFeat += 1
curFeatName = rowLabels[iR]
newFeatName = makeNewFeatureName(curFeatName, gnabFeatIncSubstrings, gnabFeatDelSubstrings)
print " newFeatName <%s> " % newFeatName
# make sure that there is not already a feature by this name!!!
addFeat = 1
for aLabel in rowLabels:
if (aLabel == newFeatName):
addFeat = 0
print " oops ??? <%s> already exists ??? " % aLabel
if (addFeat):
print " --> adding this new feature: ", newFeatName
newNameVec += [newFeatName]
newDataMat += [numpy.zeros(numCol)]
numBitsOn = 0
for iCol in range(numCol):
# we need to start with NA
newDataMat[-1][iCol] = NA_VALUE
# if we already have a 'yes' for the mutation, that's
# all we need ...
if (dMat[iR][iCol] == 1):
newDataMat[-1][iCol] = 1
numBitsOn += 1
continue
# if not, then check for deletions ...
for jR in mapVec[iR]:
if (dMat[jR][iCol] == NA_VALUE): continue
if (newDataMat[-1][iCol] == 1): continue
if (dMat[jR][iCol] < cnvrDelThresh):
newDataMat[-1][iCol] = 1
numBitsOn += 1
# if we have set this bit we are done ...
if (newDataMat[-1][iCol] == 1): continue
# and otherwise if we have no mutation, set it to 0
if (dMat[iR][iCol] == 0): newDataMat[-1][iCol] = 0
print " number of bits set: ", numBitsOn
## add the "AMP" feature if appropriate ...
if ( addAmpFeat ):
numNewFeat += 1
curFeatName = rowLabels[iR]
newFeatName = makeNewFeatureName(curFeatName, gnabFeatIncSubstrings, gnabFeatAmpSubstrings)
print " newFeatName <%s> " % newFeatName
# make sure that there is not already a feature by this name!!!
addFeat = 1
for aLabel in rowLabels:
if (aLabel == newFeatName):
addFeat = 0
print " oops ??? <%s> already exists ??? " % aLabel
if (addFeat):
print " --> adding this new feature: ", newFeatName
newNameVec += [newFeatName]
newDataMat += [numpy.zeros(numCol)]
numBitsOn = 0
for iCol in range(numCol):
# we need to start with NA
newDataMat[-1][iCol] = NA_VALUE
# if we already have a 'yes' for the mutation, that's
# all we need ...
if (dMat[iR][iCol] == 1):
newDataMat[-1][iCol] = 1
numBitsOn += 1
continue
# if not, then check for amplifications ...
for jR in mapVec[iR]:
if (dMat[jR][iCol] == NA_VALUE): continue
if (newDataMat[-1][iCol] == 1): continue
if (dMat[jR][iCol] > cnvrAmpThresh):
newDataMat[-1][iCol] = 1
numBitsOn += 1
# if we have set this bit we are done ...
if (newDataMat[-1][iCol] == 1): continue
# and otherwise if we have no mutation, set it to 0
if (dMat[iR][iCol] == 0): newDataMat[-1][iCol] = 0
print " number of bits set: ", numBitsOn
# if ( numNewFeat == 0 ):
# print " --> NO new features "
# print " --> finished with combineGnabCnvr ... "
# return ( dataD )
print " "
print " --> number of new features : ", numNewFeat
if ( 0 ):
if (numNewFeat > 0):
print len(newNameVec)
print len(newDataMat), len(newDataMat[0])
for ii in range(numNewFeat):
if (newNameVec[ii].find("CSMD1") > 0):
print newNameVec[ii]
print newDataMat[ii]
print " "
# now we need to append these new features to the input data matrix
newRowLabels = [0] * (numRow + numNewFeat)
newMatrix = [0] * (numRow + numNewFeat)
for iR in range(numRow):
newRowLabels[iR] = rowLabels[iR]
newMatrix[iR] = dMat[iR]
for iR in range(numNewFeat):
newRowLabels[iR + numRow] = newNameVec[iR]
newMatrix[iR + numRow] = newDataMat[iR]
dataD['rowLabels'] = newRowLabels
dataD['dataMatrix'] = newMatrix
print " "
print " --> finished with combineGnabCnvr ... "
print " "
return (dataD)
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
if __name__ == "__main__":
if (1):
if (len(sys.argv) == 3):
inFile = sys.argv[1]
outFile = sys.argv[2]
## do_combineGnabCnvr = 1
do_combineGnabCnvr = 0
do_pathwayGnab = 0
do_driverGnab = 0
driverList = ["TP53", "KRAS", "PIK3CA", "PTEN"]
else:
print " "
print " Usage: %s <input TSV file> <output TSV file> "
print " "
print " ERROR -- bad command line arguments "
sys.exit(-1)
print " "
print " Running : %s %s %s " % (sys.argv[0], sys.argv[1], sys.argv[2])
print " "
# read in the input feature matrix first, just in case there
# actually isn't one yet available ...
print " --> reading in feature matrix ... "
testD = tsvIO.readTSV(inFile)
try:
print len(testD['rowLabels']), len(testD['colLabels'])
except:
print " --> invalid / missing input feature matrix "
sys.exit(-1)
# we want to come up with a 'merged' mutation OR deletion feature
if (do_combineGnabCnvr):
print " calling combineGnabCnvr ... "
newD = combineGnabCnvr(testD)
testD = newD
# and then pathway level mutation features
if (do_pathwayGnab):
print " calling pathwayGnab ... "
pwDict = readPathways()
newD = pathwayGnab(testD, pwDict)
testD = newD
# and then a 'driverMut' feature based on the driverList above
# (which is just 4 hardcoded genes for now)
if (do_driverGnab):
print " calling driverGnab ... "
newD = driverGnab(testD, driverList)
testD = newD
# and finally write it out ...
print " --> writing out output feature matrix "
tsvIO.writeTSV_dataMatrix(testD, 0, 0, outFile)
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
|
cancerregulome/gidget
|
commands/feature_matrix_construction/main/addGnabFeatures.py
|
Python
|
mit
| 38,773
|
from PETScMatOps import *
|
wathen/PhD
|
MHD/FEniCS/MyPackage/PackageName/PETScFunc/__init__.py
|
Python
|
mit
| 26
|
from . import claim, util
from .attr_dict import AttrDict
class Statement(AttrDict):
@classmethod
def from_json(cls, statement_doc):
return normalize(statement_doc)
def normalize(statement_doc):
statement_doc = util.ensure_decoded_json(statement_doc)
references = {}
for item in statement_doc.get('references', []):
for pid, ref_docs in item['snaks'].items():
references[pid] = [claim.normalize(ref_doc)
for ref_doc in ref_docs]
return Statement({
'id': statement_doc.get('id'),
'hash': statement_doc.get('hash'),
'claim': claim.normalize(statement_doc['mainsnak']),
'rank': statement_doc.get('rank', None),
'references': references,
'qualifiers': {
prop: [claim.normalize(qualifier_doc)
for qualifier_doc in statement_doc['qualifiers'][prop]]
for prop in statement_doc.get('qualifiers-order', [])}
})
|
mediawiki-utilities/python-mwbase
|
mwbase/statement.py
|
Python
|
mit
| 987
|
from sqlalchemy import Column, ForeignKey, Integer, String, Text, DateTime, Table
from sqlalchemy.orm import relationship, backref
from models import DecBase
from models.document import Document
from models.keyword import Keyword
from jsonschema import *
from json_schemas import *
from models.collection_version import CollectionVersion
from timestamp.timestampfile import TimestampFile
import time
import json
# Define foreign keys required for joining defined tables together
collection_keywords = Table('collection_keywords', DecBase.metadata,
Column('keyword_id', Integer, ForeignKey('keyword.id')),
Column('collection_address', String, ForeignKey('collection.address'))
)
collection_docs = Table('collection_docs', DecBase.metadata,
Column('document_address', String, ForeignKey('document.hash')),
Column('collection_address_docs', String, ForeignKey('collection.address'))
)
hash_association = Table('collection_hashes', DecBase.metadata,
Column('hash', String, ForeignKey('collection_version.root_hash')),
Column('collection_address', String, ForeignKey('collection.address'))
)
class Collection(DecBase):
""" A Collection is the fundamental unit of organization in the FreeJournal network.
A Collection is a uniquely identifiable set of documents. Each collection is associated
with and signed by a BitMessage broadcast channel address. Each collection contains
a list of documents, a Bitcoin address for ranking, and a version. Messages on the network
called DocIndex messages share the state of a collection at a given version.
This class stores the latest version of each collection the FreeJournal node decides to mirror.
It also stores old timestamps and Merkle trees for bookkeeping purposes (@todo).
Attributes:
title: Title of collection (as in message spec)
description: Collection description (as in message spec)
address: Bitmessage address uniquely ID'ing collection (as in message spec)
btc: Bitcoin address for rating documents (as in message spec)
keywords: Keywords as list of Keyword class for searching (as in message spec)
documents: List of document classes included in the collection (as in message spec)
latest_broadcast_date: The date that this collection was last seen broadcasted in the Main Channel
creation_date: Earliest known timestamp of collection, or if none earliest approximation of creation date of
current version of collection
oldest_date: Earliest known timestamp of collection, or if none earliest approximation of creation date of
any version of collection
latest_btc_tx: Latest Bitcoin transaction timestamping merkle belonging to this collection
oldest_btc_tx: Oldest Bitcoin transaction timestamping merkle belonging to this collection
accesses: Number of times this collection is accessed by a user of this node (for cache pruning)
votes: Latest vote count from the Bitcoin network, used to rank collection
votes_last_checked: Latest poll of Bitcoin network for collection votes, to coordinate internal repolling
"""
__tablename__ = 'collection'
title = Column(Text, nullable=False)
description = Column(String)
address = Column(String, primary_key=True)
btc = Column(String)
keywords = relationship("Keyword", secondary=collection_keywords, backref='collection')
documents = relationship("Document", secondary=collection_docs, backref='collection')
latest_broadcast_date = Column(DateTime, nullable=False)
creation_date = Column(DateTime, nullable=False)
oldest_date = Column(DateTime, nullable=False)
latest_btc_tx = Column(String)
oldest_btc_tx = Column(String)
accesses = Column(Integer, nullable=False, default=0)
votes = Column(Integer, nullable=False, default=0)
votes_last_checked = Column(DateTime)
version_list = relationship(CollectionVersion, backref="collection", lazy='dynamic', secondary=hash_association)
def to_json(self):
"""
Encodes a Collection as a json representation so it can be sent through the bitmessage network
:return: the json representation of the given Collection
"""
json_docs = []
for doc in self.documents:
json_docs.append({"address": doc.collection_address, "description": doc.description, "title": doc.title,
"hash": doc.hash, "filename": doc.filename, "accesses": doc.accesses})
json_keywords = []
for key in self.keywords:
json_keywords.append({"id": key.id, "name": key.name})
json_representation = {"type_id": 1,
"title": self.title,
"description": self.description,
"keywords": json_keywords,
"address": self.address,
"documents": json_docs,
"btc": self.btc,
"latest_broadcast_date": self.latest_broadcast_date.strftime("%A, %d. %B %Y %I:%M%p"),
"creation_date": self.creation_date.strftime("%A, %d. %B %Y %I:%M%p"),
"oldest_date": self.oldest_date.strftime("%A, %d. %B %Y %I:%M%p"),
"latest_btc_tx": self.latest_btc_tx,
"oldest_btc_tx": self.oldest_btc_tx,
"accesses": self.accesses,
"votes": self.votes,
"votes_last_checked": self.votes_last_checked.strftime("%A, %d. %B %Y %I:%M%p")}
try:
validate(json_representation, coll_schema)
return json.dumps(json_representation, sort_keys=True)
except ValidationError as m:
return None
def get_latest_version(self):
latest_version = self.version_list.order_by(CollectionVersion.collection_version.desc()).first()
if latest_version is None:
return 0
else:
return latest_version.collection_version
def get_latest_collection_version(self):
latest_version = self.version_list.order_by(CollectionVersion.collection_version.desc()).first()
return latest_version
|
FreeJournal/freejournal
|
models/collection.py
|
Python
|
mit
| 6,736
|
#-*-coding: utf-8-*-
from tornado import gen
from tornado.httpclient import AsyncHTTPClient, HTTPError, HTTPRequest
from tornado.options import options
from functools import wraps
from tornado import escape
import tornado.ioloop
import base64
import time
import datetime
import json
from math import exp
AsyncHTTPClient.configure("tornado.curl_httpclient.CurlAsyncHTTPClient")
formula = lambda x: 2 ** 10 / (1 + pow(exp(1), -(x - 2 ** 7) / 2 ** 5))
def loop_call(delta=60 * 1000):
def wrap_loop(func):
@wraps(func)
def wrap_func(*args, **kwargs):
func(*args, **kwargs)
tornado.ioloop.IOLoop.instance().add_timeout(
datetime.timeelta(milliseconds=delta),
wrap_func)
return wrap_func
return wrap_loop
def sync_loop_call(delta=60 * 1000):
"""
Wait for func down then process add_timeout
"""
def wrap_loop(func):
@wraps(func)
@gen.coroutine
def wrap_func(*args, **kwargs):
options.logger.info("function %r start at %d" %
(func.__name__, int(time.time())))
try:
yield func(*args, **kwargs)
except Exception, e:
options.logger.error("function %r error: %s" %
(func.__name__, e))
options.logger.info("function %r end at %d" %
(func.__name__, int(time.time())))
tornado.ioloop.IOLoop.instance().add_timeout(
datetime.timedelta(milliseconds=delta),
wrap_func)
return wrap_func
return wrap_loop
class TornadoDataRequest(HTTPRequest):
def __init__(self, url, **kwargs):
super(TornadoDataRequest, self).__init__(url, **kwargs)
self.auth_username = options.username
self.auth_password = options.password
self.user_agent = "Tornado-data"
@gen.coroutine
def GetPage(url):
client = AsyncHTTPClient()
request = TornadoDataRequest(url, method='GET')
try:
response = yield client.fetch(request)
except HTTPError, e:
response = e
raise gen.Return(response)
@gen.coroutine
def PutPage(url, body):
client = AsyncHTTPClient()
request = TornadoDataRequest(url, method='PUT', body=body)
try:
response = yield client.fetch(request)
except HTTPError, e:
response = e
raise gen.Return(response)
@gen.coroutine
def PatchPage(url, body):
client = AsyncHTTPClient.configurable_default()()
request = TornadoDataRequest(url, method="PATCH", body=body)
try:
response = yield client.fetch(request)
except HTTPError, e:
response = e
raise gen.Return(response)
@gen.coroutine
def commit(url, message, data):
resp = yield GetPage(url)
if resp.code == 200:
resp = escape.json_decode(resp.body)
sha = resp["sha"]
body = json.dumps({
"message": message,
"content": base64.b64encode(json.dumps(data)),
"committer": {"name": "cloudaice", "email": "cloudaice@163.com"},
"sha": sha
})
resp = yield PutPage(url, body)
raise gen.Return(resp)
else:
raise gen.Return(resp)
@gen.coroutine
def update_file(gist_url, filename, data):
try:
body = json.dumps({
"description": "update file at utctime %s" %
datetime.datetime.utcfromtimestamp(time.time()),
"files": {
filename: {
"content": json.dumps(data, indent=4, separators=(',', ': '))
}
}
})
except Exception, e:
options.logger.error("Error: %s" % e)
resp = yield PatchPage(gist_url, body)
raise gen.Return(resp)
|
cloudaice/simple-data
|
github/libs/client.py
|
Python
|
mit
| 3,814
|
#!/usr/bin/env python3
# vim:fileencoding=utf-8:ts=8:et:sw=4:sts=4:tw=79
"""
papatcher.py: simple python PA patcher
Copyright (c) 2014 Pyrus <pyrus@coffee-break.at>
See the file LICENSE for copying permission.
"""
from argparse import ArgumentParser
from concurrent import futures
from contextlib import contextmanager
from getpass import getpass
from gzip import decompress
from hashlib import sha1
from http.client import OK as HTTP_OK, HTTPSConnection
from json import dumps, loads
from operator import itemgetter
from os import cpu_count, environ
from pathlib import Path
from ssl import create_default_context
from signal import signal, SIGINT
from stat import S_IEXEC
from urllib.error import URLError
from urllib.request import urlopen
import atexit
import sys
import pycurl
CPU_COUNT = cpu_count()
UBERNET_HOST = "uberent.com"
# set up paths according to XDG basedir spec
if "XDG_DATA_HOME" in environ:
DATA_HOME = Path(environ["XDG_DATA_HOME"])
else:
DATA_HOME = Path(environ["HOME"], ".local", "share")
if "XDG_CACHE_HOME" in environ:
CACHE_HOME = Path(environ["XDG_CACHE_HOME"])
else:
CACHE_HOME = Path(environ["HOME"], ".cache")
GAME_ROOT = DATA_HOME / "Planetary Annihilation"
CACHE_DIR = CACHE_HOME / "Planetary Annihilation"
class Cursor(object):
@staticmethod
def hide():
"""Hide the cursor using ANSI escape codes."""
sys.stdout.write("\033[?25l")
sys.stdout.flush()
@staticmethod
def show():
"""Show the cursor using ANSI escape codes."""
sys.stdout.write("\033[?25h")
sys.stdout.flush()
@contextmanager
def shown():
"""Show the cursor within a context."""
Cursor.show()
yield
Cursor.hide()
class ProgressMeter(object):
def __init__(self):
self.last_fraction = None
def display_progress(self, download_total, downloaded,
upload_total, uploaded):
if not int(download_total):
return
fraction = (downloaded / download_total) if downloaded else 0
# display progress only if it has advanced by at least 1 percent
if self.last_fraction and abs(self.last_fraction - fraction) < 0.01:
return
self.last_fraction = fraction
print("* Progress: {0: >4.0%} of {1} bytes.".format(
fraction, int(download_total)), end="\r")
class PAPatcher(object):
"""
PA Patcher class.
Logs in to UberNet, retrieves stream information and downloads patches.
"""
def __init__(self, ubername, password, threads, ratelimit):
"""
Initialize the patcher with UberNet credentials. They will be used to
login, check for and retrieve patches.
"""
self.credentials = dumps({"TitleId": 4,
"AuthMethod": "UberCredentials",
"UberName": ubername,
"Password": password})
ssl_context = create_default_context()
self.connection = HTTPSConnection(UBERNET_HOST,
context=ssl_context)
self.threads = threads
self.ratelimit = ratelimit
def login(self):
"""
Login to UberNet and store a session ticket if successful.
"""
# return immediately if we already have a session ticket
if hasattr(self, "_session"):
return True
# otherwise request a new one
headers = {"Content-Type": "application/json;charset=utf-8"}
self.connection.request("POST", "/GC/Authenticate", headers=headers,
body=self.credentials)
response = self.connection.getresponse()
if response.status is not HTTP_OK:
print("! Encountered an error: {0} {1}.".format(response.status,
response.reason))
return False
# get and parse response data
raw_data = response.read()
result = loads(raw_data.decode("utf-8"))
if "SessionTicket" not in result:
print("! Result doesn't contain a session ticket.")
return False
self._session = result["SessionTicket"]
print("* Got Session Ticket: {0}.".format(self._session))
return True
def get_streams(self):
"""
Request and return a list of streams we can download from UberNet.
"""
# we can't continue without a session ticket
if not hasattr(self, "_session"):
return None
headers = {"X-Authorization": self._session}
# we no longer need the session ticket
del self._session
self.connection.request("GET", "/Launcher/ListStreams?Platform=Linux",
headers=headers)
response = self.connection.getresponse()
if response.status is not HTTP_OK:
print("! Encountered an error: {0} {1}.".format(response.status,
response.reason))
return None
# get and parse response data
raw_data = response.read()
result = loads(raw_data.decode("utf-8"))
self._streams = {stream["StreamName"]: stream
for stream in result["Streams"]}
return self._streams
def get_manifest(self, stream, full):
if not hasattr(self, "_streams") or stream not in self._streams:
return False
self._stream = self._streams[stream]
# we no longer need all streams
del self._streams
print("* Downloading manifest from {0}/{1}/{2}.".format(
self._stream["DownloadUrl"],
self._stream["TitleFolder"],
self._stream["ManifestName"]))
# we still need to add the AuthSuffix for the download to work
manifest_url = "{0}/{1}/{2}{3}".format(
self._stream["DownloadUrl"],
self._stream["TitleFolder"],
self._stream["ManifestName"],
self._stream["AuthSuffix"])
try:
with urlopen(manifest_url) as response:
manifest_raw = decompress(response.read())
self._manifest = loads(manifest_raw.decode("utf-8"))
return self._verify_manifest(full)
except URLError as err:
print("! Could not retrieve manifest: {0}.".format(err.reason))
return False
def _verify_manifest(self, full):
if not hasattr(self, "_stream") or not hasattr(self, "_manifest"):
return False
# clean up cache in the process
cache_dir = CACHE_DIR / self._stream["StreamName"]
print("* Verifying contents of cache folder {0}.".format(
str(cache_dir)))
if cache_dir.exists():
bundle_names = [bundle["checksum"]
for bundle in self._manifest["bundles"]]
old_bundles = 0
for cache_file in cache_dir.iterdir():
if full or cache_file.name not in bundle_names:
cache_file.unlink()
old_bundles += 1
if old_bundles:
print("* Purged {0} old bundle(s).".format(old_bundles))
# verify bundles in parallel
with futures.ThreadPoolExecutor(max_workers=self.threads) as executor:
# this list will contain the bundles we actually need to download
self._bundles = list()
bundle_futures = [executor.submit(self._verify_bundle, bundle)
for bundle in self._manifest["bundles"]]
for completed in futures.as_completed(bundle_futures):
if not completed.result():
# cancel waiting futures
for future in bundle_futures:
future.cancel()
return False
print("* Need to get {0} bundle(s).".format(len(self._bundles)))
# if we get here there, all bundles were verified
# we no longer need the manifest
del self._manifest
return True
def _verify_bundle(self, bundle):
if not hasattr(self, "_stream") or not hasattr(self, "_bundles"):
return False
bundle_checksum = bundle["checksum"]
cache_file = CACHE_DIR / self._stream["StreamName"] / bundle_checksum
# if we don't have that file we need to download it
if not cache_file.exists():
self._bundles.append(bundle)
return True
# if we have it, make sure the checksum is correct
with cache_file.open("rb") as cache_fp:
sha = sha1()
sha.update(cache_fp.read())
checksum = sha.hexdigest()
if checksum != bundle_checksum:
self._bundles.append(bundle)
return True
# we have that file and checksums match, nothing to do
return True
def patch(self):
if not hasattr(self, "_bundles"):
return False
with futures.ThreadPoolExecutor(max_workers=self.threads) as executor:
bundle_futures = list()
# download bundles sorted by size
self._bundles.sort(key=lambda bundle: int(bundle["size"]),
reverse=True)
for bundle in self._bundles:
bundle_checksum = bundle["checksum"]
print("* Downloading bundle {0}.".format(bundle_checksum))
if not self._download_bundle(bundle):
return False
# bundle was downloaded, start extraction in parallel
print("* Extracting bundle {0}.".format(bundle_checksum))
bundle_future = executor.submit(self._extract_bundle, bundle)
bundle_futures.append(bundle_future)
for completed in futures.as_completed(bundle_futures):
if not completed.result():
# cancel waiting futures
for future in bundle_futures:
future.cancel()
return False
# if we're here everything has been downloaded and extracted
return True
def _download_bundle(self, bundle):
if not hasattr(self, "_stream"):
return False
bundle_checksum = bundle["checksum"]
cache_base = CACHE_DIR / self._stream["StreamName"]
# make sure that path exists
if not cache_base.exists():
cache_base.mkdir(parents=True)
cache_file = cache_base / bundle_checksum
# remove the file first if it already exists
if cache_file.exists():
cache_file.unlink()
bundle_url = "{0}/{1}/hashed/{2}{3}".format(
self._stream["DownloadUrl"],
self._stream["TitleFolder"],
bundle_checksum,
self._stream["AuthSuffix"])
with cache_file.open("x+b") as cache_fp:
curl = pycurl.Curl()
curl.setopt(pycurl.URL, bundle_url)
curl.setopt(pycurl.FOLLOWLOCATION, 1)
curl.setopt(pycurl.MAXREDIRS, 5)
curl.setopt(pycurl.CONNECTTIMEOUT, 30)
curl.setopt(pycurl.NOSIGNAL, 1)
curl.setopt(pycurl.MAX_RECV_SPEED_LARGE, self.ratelimit)
curl.setopt(pycurl.WRITEDATA, cache_fp)
curl.setopt(pycurl.NOPROGRESS, 0)
progress_meter = ProgressMeter()
curl.setopt(pycurl.PROGRESSFUNCTION,
progress_meter.display_progress)
try:
curl.perform()
except:
print("! Downloading bundle {0} failed!".format(
bundle_checksum))
return False
finally:
curl.close()
# verify checksum
cache_fp.seek(0)
sha = sha1()
sha.update(cache_fp.read())
checksum = sha.hexdigest()
if checksum != bundle_checksum:
print("! Checksums don't match. Expected {0}, got {1}.".format(
bundle_checksum, checksum))
return False
# everything worked out OK
return True
def _extract_bundle(self, bundle):
if not hasattr(self, "_stream"):
return False
bundle_checksum = bundle["checksum"]
cache_file = CACHE_DIR / self._stream["StreamName"] / bundle_checksum
# open cache file with gzip
with cache_file.open("rb") as cache_fp:
game_base = GAME_ROOT / self._stream["StreamName"]
# get entries sorted by offset
entries = sorted(bundle["entries"], key=itemgetter("offset"))
for entry in entries:
entry_file = game_base / entry["filename"][1:]
# make sure that path exists
if not entry_file.parent.exists():
entry_file.parent.mkdir(parents=True)
entry_offset = int(entry["offset"])
cache_fp.seek(entry_offset)
# remove the file first if it already exists
if entry_file.exists():
entry_file.unlink()
with entry_file.open("xb") as entry_fp:
# data might be compressed further, check sizeZ for that
if entry["sizeZ"] != "0":
entry_size = int(entry["sizeZ"])
raw_data = cache_fp.read(entry_size)
entry_fp.write(decompress(raw_data))
else:
entry_size = int(entry["size"])
entry_fp.write(cache_fp.read(entry_size))
# set executable
if "executable" in entry:
entry_file.chmod(entry_file.stat().st_mode | S_IEXEC)
return True
if __name__ == "__main__":
Cursor.hide()
atexit.register(Cursor.show)
signal(SIGINT, lambda sig, frame: sys.exit(SIGINT))
print("Python PA Patcher\n"
"=================")
arg_parser = ArgumentParser()
arg_parser.add_argument("-u", "--ubername",
action="store", type=str,
help="UberName used for login.")
arg_parser.add_argument("-p", "--password",
action="store", type=str,
help="Password used for login.")
arg_parser.add_argument("-s", "--stream",
action="store", type=str,
help="Stream being downloaded.")
arg_parser.add_argument("-f", "--full",
action="store_true",
help="Patch even unchanged files.")
arg_parser.add_argument("-t", "--threads",
action="store", type=int,
default=CPU_COUNT,
help="Number of threads used.")
arg_parser.add_argument("-r", "--ratelimit",
action="store", type=int,
default=0,
help="Limit downloads to bytes/sec.")
arg_parser.add_argument("--unattended",
action="store_true",
help="Don't ask any questions. If you use this "
"option, --ubername, --password and --stream "
"are mandatory")
arguments = arg_parser.parse_args()
unattended = arguments.unattended
if (unattended and not (arguments.ubername and
arguments.password and
arguments.stream)):
print("! For unattended mode you need to use "
"--ubername, --password and --stream. "
"Exiting...")
sys.exit(-1)
with Cursor.shown():
ubername = arguments.ubername or input("? UberName: ")
password = arguments.password or getpass("? Password: ")
print("* Creating patcher...")
patcher = PAPatcher(ubername, password,
arguments.threads, arguments.ratelimit)
print("* Logging in to UberNet...")
if not patcher.login():
print("! Login failed. Exiting...")
sys.exit(-1)
print("* Requesting streams...")
streams = patcher.get_streams()
if not streams:
print("! Could not acquire streams. Exiting...")
sys.exit(-1)
stream = arguments.stream
if not stream or stream not in streams:
if unattended:
print("! Invalid Stream. "
"For a selection of streams use interactive mode. "
"Exiting...")
sys.exit(-1)
with Cursor.shown():
while True:
print("* Available streams: {0}.".format(
", ".join(streams.keys())))
stream = input("? Select stream: ")
if stream in streams:
break
print("! Invalid Stream.")
print("* Downloading manifest for stream '{0}'...".format(stream))
if not patcher.get_manifest(stream, arguments.full):
print("! Could not download manifest. Exiting...")
sys.exit(-1)
print("* Patching installation for stream '{0}'...".format(stream))
if not patcher.patch():
print("! Could not patch stream. Exiting...")
sys.exit(-1)
print("* Successfully updated stream '{0}'.".format(stream))
sys.exit(0)
|
pa-pyrus/papatcher
|
papatcher.py
|
Python
|
mit
| 17,605
|
"""
Module for testing the Blacklist model
"""
import unittest
from app.models.shopping import BlacklistToken, ShoppingList
try:
from .common_functions import BaseModelTestClass
except (ImportError, SystemError):
from common_functions import BaseModelTestClass
class BlacklistModelTest(BaseModelTestClass):
"""
Handles the testing for the Blacklist model class
"""
def blacklist_token(self, token=None):
"""
Helper method to blacklist a token
"""
with self.app.app_context():
if not token:
# create token from default user
token = self.user.generate_token(self.user.id)
# blacklist token
try:
blacklisted_token = BlacklistToken(token=token)
# putting save() commits to session and closes the session
return blacklisted_token
except Exception as e:
raise e
def test_token_can_be_blacklisted(self):
"""
A token can be blacklisted
"""
with self.app.app_context():
blacklisted_token = self.blacklist_token()
blacklisted_token.save() # save in the same session
from_database = BlacklistToken.query.get(blacklisted_token.id)
self.assertEqual(blacklisted_token.token, from_database.token)
def test_token_is_string_or_bytes(self):
"""
The token must be a string
"""
with self.app.app_context():
# try blacklisting a token that is an int
self.assertRaises(TypeError, self.blacklist_token, token=76)
self.assertRaises(TypeError, self.blacklist_token, token=True)
self.assertRaises(TypeError, self.blacklist_token, token=
{'token':'should be string or bytes'})
def test_token_can_be_searched_for(self):
"""
Blacklisted token can be searched for and found
"""
with self.app.app_context():
blacklisted_token = self.blacklist_token()
blacklisted_token.save() # save in the same session
self.assertTrue(BlacklistToken.check_blacklist(blacklisted_token.token))
def test_only_valid_tokens_allowed(self):
"""
Only valid tokens should be blacklisted
"""
with self.app.app_context():
# try blacklisting a token that is an int
self.assertRaises(ValueError, self.blacklist_token,
token='some random string')
self.assertRaises(ValueError, self.blacklist_token,
token='some random string to be converted to bytes'.encode('utf-8'))
|
Tinitto/ShoppingListAPI
|
api/test/test_blacklist_model.py
|
Python
|
mit
| 2,727
|
#---------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#---------------------------------------------------------------------------------------------
#The MIT License (MIT)
#Copyright (c) 2016 Blockstack
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
#pylint: skip-file
class InvalidLineException(Exception):
pass
|
BurtBiel/azure-cli
|
src/command_modules/azure-cli-network/azure/cli/command_modules/network/zone_file/exceptions.py
|
Python
|
mit
| 1,506
|
from pypov.pov import Texture, Pigment, Intersection, Cylinder
from pypov.pov import Union, Difference, Object, Box, Sphere
from pypov.common import grey, white
from pypov.colors import Colors
from lib.base import five_by_ten_edge
from lib.textures import cross_hatch, cross_hatch_2, wall_texture_1
from lib.metadata import Metadata
def edge_5x10_003_info():
return Metadata("Non connected edge passages", "e3",
description="Non connected edge passages",
block_type="edge",
bottom=0, top=20,
size="5x10",
repeatable=True,
fully_connected=False,
dead_ends=False,
entrance=False,
has_rooms=False,
passage_type="hewn",
wet=False,
multi_level=False,
keywords=['passages', 'boring', 'basic'])
def edge_5x10_003(rotate=(0, 0, 0), translate=(0, 0, 0), detail_level=1,
cross_hatch_texture=cross_hatch_2):
"""docstring for gm02"""
geomorph = Union(
Difference(
Union(
Object(five_by_ten_edge(), cross_hatch_texture),
),
Union(
# Halls
Box(( -22.5, 10.0001, -10), ( -27.5, 21, -26)),
Box(( 22.5, 10.0002, -10), ( 27.5, 21, -26)),
Box(( -51, 10, -2.5), (51, 21, 2.5)),
Box(( -27.5, 10, -10.0), (27.5, 21, -5.0)),
wall_texture_1
),
),
translate=translate,
rotate=rotate
)
return geomorph
|
autowitch/pypov
|
scenes/geomorphs/lib/geomorphs/edge_5x10_003.py
|
Python
|
mit
| 1,577
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import getpass
import requests
import ConfigParser
BASE_DIR = os.path.dirname(__file__)
class Auth:
"""GitHub API Auth"""
def __init__(self):
self.auth_url = 'https://api.github.com'
auth_conf = os.path.join(BASE_DIR, 'auth.conf')
if os.path.exists(auth_conf):
cf = ConfigParser.ConfigParser()
cf.read(auth_conf)
self.user = cf.get("auth", "user")
self.passwd = cf.get("auth", "passwd")
else:
self.user = ''
self.passwd = ''
def get_session(self):
s = requests.Session()
s.auth = (self.user, self.passwd)
r = s.get(self.auth_url)
if r.status_code != 200:
print("authentication failed. status_code: " + r.status_code)
return requests.Session()
else:
print("authentication succeed")
return s
class Repo:
"""GitHub Repository"""
def __init__(self):
self.auth_url = 'https://api.github.com'
def get_commits(self, s, owner, repo, nums=30):
url = '/'.join([self.auth_url, 'repos', owner, repo, 'commits'])
commits = s.get(url)
if commits.status_code == 200:
return commits.json()
def get_commit_info(self, s, commit_json):
commit_info = {}
# url for get_commit_diff
commit_info['diff_url'] = commit_json['url']
commit_info['diff'] = self.get_commit_diff(s, commit_info['diff_url'])
commit_info['html_url'] = commit_json['html_url']
commit_info['sha'] = commit_json['sha']
commit = commit_json['commit']
commit_info['url'] = commit['url']
author = {}
author['name'] = commit['author']['name']
author['email'] = commit['author']['email']
commit_info['author'] = author
commit_info['updated'] = commit['author']['date']
commit_info['message'] = commit['message']
return commit_info
def get_commit_diff(self, s, commit_url):
diff_headers = {'Accept': 'application/vnd.github.diff'}
commit_diff = s.get(commit_url, headers=diff_headers)
if commit_diff.status_code == 200:
commit_diff_txt = commit_diff.text
return commit_diff_txt
else:
return ''
def get_repo_info(self, s, owner, repo):
url = '/'.join([self.auth_url, 'repos', owner, repo])
repo_json = s.get(url).json()
repo_info = {}
repo_info['description'] = repo_json['description']
repo_info['full_name'] = repo_json['full_name']
repo_info['html_url'] = repo_json['html_url']
repo_info['updated_at'] = repo_json['updated_at']
repo_info['author'] = self.get_author(s, owner)
return repo_info
def get_author(self, s, owner):
url = '/'.join([self.auth_url, 'users', owner])
author_raw = s.get(url)
if author_raw.status_code != 200:
return None
author_json = author_raw.json()
author_info = {}
author_info['name'] = owner
author_info['email'] = author_json['email']
return author_info
def get_commits_info(self, s, owner, repo):
commits_json = self.get_commits(s, owner, repo)
commits_info = []
for commit_json in commits_json:
commit_info = self.get_commit_info(s, commit_json)
commits_info.append(commit_info)
return commits_info
class GitHubRSS:
"""GitHub RSS"""
def __init__(self):
self.atom = True
def init_fg(self, repo_info):
fg = FeedGenerator()
title = 'Recent commits to ' + repo_info['full_name']
fg.title(title)
fg.link(href=repo_info['html_url'])
fg.updated(repo_info['updated_at'])
fg.id(repo_info['html_url'])
fg.author(repo_info['author'])
return fg
def add_entry(self, fg, commit_info):
fe = fg.add_entry()
fe.title(commit_info['message'])
fe.link(href=commit_info['html_url'])
id_prefix = 'tag:github.com,2008:Grit::Commit/'
entry_id = id_prefix + commit_info['sha']
fe.id(entry_id)
fe.author(commit_info['author'])
fe.published(commit_info['updated'])
fe.updated(commit_info['updated'])
fe.content(commit_info['diff'])
return fg
def gen_atom(self, fg, atom_fn='atom.xml'):
fg.atom_file(atom_fn)
if __name__ == "__main__":
# auth with GitHub username and password
user = raw_input('Enter your GitHub username: ')
passwd = getpass.getpass()
g_commit = GitHubCommit(user, passwd)
s = requests.Session()
s.auth = (g_commit.user, g_commit.passwd)
r = s.get(g_commit.auth_url)
if r.status_code == 401:
print("Unauthorized. Wrong username or password!")
sys.exit("Exit for Unauthorized status")
owner = 'billryan'
repo = 'algorithm-exercise'
repo_info = g_commit.get_repo_info(s, owner, repo)
commits_json = g_commit.get_commits(s, owner, repo)
commits_info = []
for commit_json in commits_json:
commit_info = g_commit.get_commit_info(s, commit_json)
commits_info.append(commit_info)
# generate rss
rss = GitHubRSS()
fg_repo = rss.init_fg(repo_info)
for commit_info in commits_info:
rss.add_entry(fg_repo, commit_info)
rss.gen_atom(fg_repo, '/tmp/test/atom_test.xml')
|
billryan/github-rss
|
github/gh.py
|
Python
|
mit
| 5,484
|
"""
DotStar_Emulator
config.py in current working directory will be automatically read and loaded.
Author: Christopher Ross
License: MIT Something Rather
"""
from DotStar_Emulator.manage import manage
if __name__ == "__main__":
manage()
|
chrisrossx/DotStar_Emulator
|
DotStar_Emulator/emulator/init/manage.py
|
Python
|
mit
| 245
|
import datetime
class AuthenticationInfo:
def __init__(self, password, email):
self.Password = password
self.Email = email
class ProfileInfo:
def __init__(self, display_name):
self.DisplayName = display_name
class Token:
def __init__(self, id_token, valid_until):
self.Id = id_token
self.ValidUntil = valid_until
class User:
def __init__(self, id_user, username, display_name, groups):
self.IdUser = id_user
self.Username = username
self.DisplayName = display_name
self.Groups = groups
class Group:
def __init__(self, id_group, name):
self.Id = id_group
self.Name = name
class CreateUserRequest:
def __init__(self, username, authentication, profile):
self.Username = username
self.Authentication = authentication
self.Profile = profile
class ModifyCredentialsRequest:
def __init__(self, username, token, authentication):
self.Username = username
self.Token = token
self.Authentication = authentication
class ModifyProfileRequest:
def __init__(self, username, token, profile):
self.Username = username
self.Token = token
self.Profile = profile
class AddUserToGroupRequest:
def __init__(self, username, token, user_to_add, id_group):
self.Username = username
self.Token = token
self.UserToAdd = user_to_add
self.IdGroup = id_group
class TokenSuccessResponse:
def __init__(self, success, token):
self.Success = success
self.Token = token
@staticmethod
def invalid():
return TokenSuccessResponse(
False,
Token("", datetime.datetime.now()))
class ConnectUserResponse:
def __init__(self, success, token, id_user):
self.Success = success
self.Token = token
self.IdUser = id_user
@staticmethod
def invalid():
return ConnectUserResponse(
False,
Token("", datetime.datetime.now()),
0)
class UserSummaryResponse:
def __init__(self, success, token, display_name, groups):
self.Success = success
self.Token = token
self.DisplayName = display_name
self.Groups = groups
@staticmethod
def invalid():
return UserSummaryResponse(
False,
Token("", datetime.datetime.now()),
"", [])
class UserListResponse:
def __init__(self, success, token, users):
self.Success = success
self.Token = token
self.Users = users
@staticmethod
def invalid():
return UserListResponse(
False,
Token("", datetime.datetime.now()),
[])
|
Com-Ericmas001/py-userbase
|
py_userbase/userbase_models.py
|
Python
|
mit
| 2,746
|
"""
Django settings for MapaAsentamientosTecho project.
Generated by 'django-admin startproject' using Django 1.9.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'l4eg9uqh!s0w&6@2t+xdedd-7m=$1z13s7ylzi_mc^-w2m@jsk'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework', # Django API REST FRAMEWORK
'crispy_forms', #Formularios mas bonitos
'main',
'constance',
'constance.backends.database',
'import_export',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'MapaAsentamientosTecho.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'constance.context_processors.config',
],
},
},
]
WSGI_APPLICATION = 'MapaAsentamientosTecho.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, "static/")
REST_FRAMEWORK = {
'DEFAULT_FILTER_BACKENDS': ('rest_framework.filters.DjangoFilterBackend',)
}
CONSTANCE_CONFIG = {
'dominio': ('localhost:8000', 'Dominio de la plataforma' , str),
'Analytics': ('Analytics ID', 'Google Analytics' , str),
}
CONSTANCE_BACKEND = 'constance.backends.database.DatabaseBackend'
|
Girolabs/MapaAsentamientosTecho
|
MapaAsentamientosTecho/settings.py
|
Python
|
mit
| 3,830
|
import os
import codecs
try:
from setuptools import (setup, find_packages)
except ImportError:
from distutils.core import (setup, find_packages)
VERSION = (0, 2, 0)
__version__ = '.'.join(map(str, VERSION[:3])) + "".join(VERSION[3:])
__package_name__ = 'pelican-readtime'
__description__ = 'Plugin for Pelican that computes average read time.'
__contact_names__ = 'David Jenkins, Deepak Bhalla, Jonathan Dektiar'
__contact_emails__ = 'djenkinsdev@gmail.com, contact@deepakrb.com, contact@jonathandekhtiar.eu'
__homepage__ = 'https://github.com/JenkinsDev/pelican-readtime'
__repository_url__ = 'https://github.com/JenkinsDev/pelican-readtime'
__download_url__ = 'https://github.com/JenkinsDev/pelican-readtime'
__docformat__ = 'markdown'
__license__ = 'MIT'
__keywords__ = 'pelican blogging blog static webdevelopment plugin pelican-plugin readtime python python3 python2'
here = os.path.abspath(os.path.dirname(__file__))
if os.path.exists('README.rst'):
# codec is used for consistent encoding
long_description = codecs.open(
os.path.join(here, 'README.rst'), 'r', 'utf-8').read()
else:
long_description = 'See ' + __homepage__
setup(
name=__package_name__,
version=__version__,
description=__description__,
long_description=long_description,
url=__repository_url__,
download_url=__download_url__,
license='MIT',
author=__contact_names__,
author_email=__contact_emails__,
maintainer=__contact_names__,
maintainer_email=__contact_emails__,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Software Development :: Libraries :: Python Modules'
],
keywords=__keywords__,
packages=[''],
install_requires=['pelican>=3.6'],
zip_safe=True,
include_package_data=True
)
|
JenkinsDev/pelican-readtime
|
setup.py
|
Python
|
mit
| 2,230
|
from shutit_module import ShutItModule
class test16(ShutItModule):
def build(self, shutit):
shutit.login()
shutit.login(command='bash')
shutit.send('ls',note='We are listing files')
shutit.logout()
shutit.logout()
return True
def module():
return test16(
'shutit.test16.test16.test16', 210790650.002938762,
description='',
maintainer='',
depends=['shutit.tk.setup']
)
|
ianmiell/shutit-test
|
test/docker_tests/16/test16.py
|
Python
|
mit
| 395
|
#-- GAUDI jobOptions generated on Wed Jun 10 17:31:51 2015
#-- Contains event types :
#-- 11104041 - 117 files - 2010995 events - 432.61 GBytes
#-- Extra information about the data processing phases:
#-- Processing Pass Step-124834
#-- StepId : 124834
#-- StepName : Reco14a for MC
#-- ApplicationName : Brunel
#-- ApplicationVersion : v43r2p7
#-- OptionFiles : $APPCONFIGOPTS/Brunel/DataType-2012.py;$APPCONFIGOPTS/Brunel/MC-WithTruth.py;$APPCONFIGOPTS/Persistency/Compression-ZLIB-1.py
#-- DDDB : fromPreviousStep
#-- CONDDB : fromPreviousStep
#-- ExtraPackages : AppConfig.v3r164
#-- Visible : Y
#-- Processing Pass Step-124620
#-- StepId : 124620
#-- StepName : Digi13 with G4 dE/dx
#-- ApplicationName : Boole
#-- ApplicationVersion : v26r3
#-- OptionFiles : $APPCONFIGOPTS/Boole/Default.py;$APPCONFIGOPTS/Boole/DataType-2012.py;$APPCONFIGOPTS/Boole/Boole-SiG4EnergyDeposit.py;$APPCONFIGOPTS/Persistency/Compression-ZLIB-1.py
#-- DDDB : fromPreviousStep
#-- CONDDB : fromPreviousStep
#-- ExtraPackages : AppConfig.v3r164
#-- Visible : Y
#-- Processing Pass Step-124632
#-- StepId : 124632
#-- StepName : TCK-0x409f0045 Flagged for Sim08 2012
#-- ApplicationName : Moore
#-- ApplicationVersion : v14r8p1
#-- OptionFiles : $APPCONFIGOPTS/Moore/MooreSimProductionWithL0Emulation.py;$APPCONFIGOPTS/Conditions/TCK-0x409f0045.py;$APPCONFIGOPTS/Moore/DataType-2012.py;$APPCONFIGOPTS/L0/L0TCK-0x0045.py
#-- DDDB : fromPreviousStep
#-- CONDDB : fromPreviousStep
#-- ExtraPackages : AppConfig.v3r164
#-- Visible : Y
#-- Processing Pass Step-124630
#-- StepId : 124630
#-- StepName : Stripping20-NoPrescalingFlagged for Sim08
#-- ApplicationName : DaVinci
#-- ApplicationVersion : v32r2p1
#-- OptionFiles : $APPCONFIGOPTS/DaVinci/DV-Stripping20-Stripping-MC-NoPrescaling.py;$APPCONFIGOPTS/DaVinci/DataType-2012.py;$APPCONFIGOPTS/DaVinci/InputType-DST.py;$APPCONFIGOPTS/Persistency/Compression-ZLIB-1.py
#-- DDDB : fromPreviousStep
#-- CONDDB : fromPreviousStep
#-- ExtraPackages : AppConfig.v3r164
#-- Visible : Y
#-- Processing Pass Step-125577
#-- StepId : 125577
#-- StepName : Sim08a - 2012 - MD - Pythia8
#-- ApplicationName : Gauss
#-- ApplicationVersion : v45r3
#-- OptionFiles : $APPCONFIGOPTS/Gauss/Sim08-Beam4000GeV-md100-2012-nu2.5.py;$DECFILESROOT/options/@{eventType}.py;$LBPYTHIA8ROOT/options/Pythia8.py;$APPCONFIGOPTS/Gauss/G4PL_FTFP_BERT_EmNoCuts.py;$APPCONFIGOPTS/Persistency/Compression-ZLIB-1.py
#-- DDDB : Sim08-20130503-1
#-- CONDDB : Sim08-20130503-1-vc-md100
#-- ExtraPackages : AppConfig.v3r171;DecFiles.v27r11
#-- Visible : Y
from Gaudi.Configuration import *
from GaudiConf import IOHelper
IOHelper('ROOT').inputFiles(['LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000001_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000002_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000003_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000004_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000005_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000006_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000007_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000008_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000009_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000010_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000011_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000012_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000013_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000014_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000015_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000016_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000017_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000018_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000019_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000020_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000021_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000022_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000023_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000024_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000025_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000026_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000027_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000028_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000029_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000030_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000031_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000032_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000033_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000034_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000035_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000036_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000037_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000038_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000039_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000040_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000041_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000042_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000043_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000044_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000045_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000046_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000047_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000048_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000049_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000050_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000051_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000052_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000053_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000054_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000055_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000056_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000057_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000058_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000059_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000060_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000061_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000062_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000063_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000064_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000065_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000066_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000067_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000068_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000069_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000070_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000071_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000072_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000073_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000074_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000075_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000076_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000077_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000078_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000079_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000080_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000081_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000082_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000083_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000084_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000085_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000086_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000087_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000088_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000089_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000090_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000091_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000092_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000093_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000094_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000095_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000096_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000097_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000098_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000099_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000100_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000101_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000102_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000103_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000104_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000105_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000106_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000108_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000109_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000110_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000111_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000112_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000113_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000114_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000115_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000116_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000117_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000118_1.allstreams.dst'
], clear=True)
|
Williams224/davinci-scripts
|
kstaretappipig/MC_12_MagDown_kstar_rho_kpipipi.py
|
Python
|
mit
| 12,721
|
# muddersOnRails()
# Sara McAllister November 17, 2017
# Last updated: 11-17-2017
# delete all data from database and remove generated graphs (this is super sketch)
import os
import dbCalls
summary_file = 'app/assets/images/summary.png'
overall_file = 'app/assets/images/overall.png'
def main():
dbCalls.remove_all()
# remove both summary and overall picture
try:
os.remove(summary_file)
os.remove(overall_file)
except OSError:
pass
if __name__ == "__main__":
main()
|
nathanljustin/teamwork-analysis
|
teamworkApp/lib/delete_data.py
|
Python
|
mit
| 520
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
def f(k, n):
pass
assert f(1, 2) == 2
assert f(2, 6) == 3
assert f(3, 14) == 14
|
uxlsl/uxlsl.github.io
|
demo/code/2021-11-15/f.py
|
Python
|
mit
| 135
|
import os
import time
from abc import abstractmethod, ABC
from typing import Dict, Tuple, List
from cereal import car
from common.kalman.simple_kalman import KF1D
from common.realtime import DT_CTRL
from selfdrive.car import gen_empty_fingerprint
from selfdrive.config import Conversions as CV
from selfdrive.controls.lib.drive_helpers import V_CRUISE_MAX
from selfdrive.controls.lib.events import Events
from selfdrive.controls.lib.vehicle_model import VehicleModel
GearShifter = car.CarState.GearShifter
EventName = car.CarEvent.EventName
MAX_CTRL_SPEED = (V_CRUISE_MAX + 4) * CV.KPH_TO_MS
ACCEL_MAX = 2.0
ACCEL_MIN = -3.5
# generic car and radar interfaces
class CarInterfaceBase(ABC):
def __init__(self, CP, CarController, CarState):
self.CP = CP
self.VM = VehicleModel(CP)
self.frame = 0
self.steering_unpressed = 0
self.low_speed_alert = False
self.silent_steer_warning = True
if CarState is not None:
self.CS = CarState(CP)
self.cp = self.CS.get_can_parser(CP)
self.cp_cam = self.CS.get_cam_can_parser(CP)
self.cp_body = self.CS.get_body_can_parser(CP)
self.cp_loopback = self.CS.get_loopback_can_parser(CP)
self.CC = None
if CarController is not None:
self.CC = CarController(self.cp.dbc_name, CP, self.VM)
@staticmethod
def get_pid_accel_limits(CP, current_speed, cruise_speed):
return ACCEL_MIN, ACCEL_MAX
@staticmethod
@abstractmethod
def get_params(candidate, fingerprint=gen_empty_fingerprint(), car_fw=None):
pass
@staticmethod
def init(CP, logcan, sendcan):
pass
@staticmethod
def get_steer_feedforward_default(desired_angle, v_ego):
# Proportional to realigning tire momentum: lateral acceleration.
# TODO: something with lateralPlan.curvatureRates
return desired_angle * (v_ego**2)
@classmethod
def get_steer_feedforward_function(cls):
return cls.get_steer_feedforward_default
# returns a set of default params to avoid repetition in car specific params
@staticmethod
def get_std_params(candidate, fingerprint):
ret = car.CarParams.new_message()
ret.carFingerprint = candidate
ret.unsafeMode = 0 # see panda/board/safety_declarations.h for allowed values
# standard ALC params
ret.steerControlType = car.CarParams.SteerControlType.torque
ret.steerMaxBP = [0.]
ret.steerMaxV = [1.]
ret.minSteerSpeed = 0.
ret.wheelSpeedFactor = 1.0
ret.pcmCruise = True # openpilot's state is tied to the PCM's cruise state on most cars
ret.minEnableSpeed = -1. # enable is done by stock ACC, so ignore this
ret.steerRatioRear = 0. # no rear steering, at least on the listed cars aboveA
ret.openpilotLongitudinalControl = False
ret.stopAccel = -2.0
ret.stoppingDecelRate = 0.8 # brake_travel/s while trying to stop
ret.vEgoStopping = 0.5
ret.vEgoStarting = 0.5
ret.stoppingControl = True
ret.longitudinalTuning.deadzoneBP = [0.]
ret.longitudinalTuning.deadzoneV = [0.]
ret.longitudinalTuning.kf = 1.
ret.longitudinalTuning.kpBP = [0.]
ret.longitudinalTuning.kpV = [1.]
ret.longitudinalTuning.kiBP = [0.]
ret.longitudinalTuning.kiV = [1.]
# TODO estimate car specific lag, use .15s for now
ret.longitudinalActuatorDelayLowerBound = 0.15
ret.longitudinalActuatorDelayUpperBound = 0.15
ret.steerLimitTimer = 1.0
return ret
@abstractmethod
def update(self, c: car.CarControl, can_strings: List[bytes]) -> car.CarState:
pass
@abstractmethod
def apply(self, c: car.CarControl) -> Tuple[car.CarControl.Actuators, List[bytes]]:
pass
def create_common_events(self, cs_out, extra_gears=None, pcm_enable=True):
events = Events()
if cs_out.doorOpen:
events.add(EventName.doorOpen)
if cs_out.seatbeltUnlatched:
events.add(EventName.seatbeltNotLatched)
if cs_out.gearShifter != GearShifter.drive and (extra_gears is None or
cs_out.gearShifter not in extra_gears):
events.add(EventName.wrongGear)
if cs_out.gearShifter == GearShifter.reverse:
events.add(EventName.reverseGear)
if not cs_out.cruiseState.available:
events.add(EventName.wrongCarMode)
if cs_out.espDisabled:
events.add(EventName.espDisabled)
if cs_out.gasPressed:
events.add(EventName.gasPressed)
if cs_out.stockFcw:
events.add(EventName.stockFcw)
if cs_out.stockAeb:
events.add(EventName.stockAeb)
if cs_out.vEgo > MAX_CTRL_SPEED:
events.add(EventName.speedTooHigh)
if cs_out.cruiseState.nonAdaptive:
events.add(EventName.wrongCruiseMode)
if cs_out.brakeHoldActive and self.CP.openpilotLongitudinalControl:
events.add(EventName.brakeHold)
# Handle permanent and temporary steering faults
self.steering_unpressed = 0 if cs_out.steeringPressed else self.steering_unpressed + 1
if cs_out.steerFaultTemporary:
# if the user overrode recently, show a less harsh alert
if self.silent_steer_warning or cs_out.standstill or self.steering_unpressed < int(1.5 / DT_CTRL):
self.silent_steer_warning = True
events.add(EventName.steerTempUnavailableSilent)
else:
events.add(EventName.steerTempUnavailable)
else:
self.silent_steer_warning = False
if cs_out.steerFaultPermanent:
events.add(EventName.steerUnavailable)
# Disable on rising edge of gas or brake. Also disable on brake when speed > 0.
if (cs_out.gasPressed and not self.CS.out.gasPressed) or \
(cs_out.brakePressed and (not self.CS.out.brakePressed or not cs_out.standstill)):
events.add(EventName.pedalPressed)
# we engage when pcm is active (rising edge)
if pcm_enable:
if cs_out.cruiseState.enabled and not self.CS.out.cruiseState.enabled:
events.add(EventName.pcmEnable)
elif not cs_out.cruiseState.enabled:
events.add(EventName.pcmDisable)
return events
class RadarInterfaceBase(ABC):
def __init__(self, CP):
self.pts = {}
self.delay = 0
self.radar_ts = CP.radarTimeStep
self.no_radar_sleep = 'NO_RADAR_SLEEP' in os.environ
def update(self, can_strings):
ret = car.RadarData.new_message()
if not self.no_radar_sleep:
time.sleep(self.radar_ts) # radard runs on RI updates
return ret
class CarStateBase(ABC):
def __init__(self, CP):
self.CP = CP
self.car_fingerprint = CP.carFingerprint
self.out = car.CarState.new_message()
self.cruise_buttons = 0
self.left_blinker_cnt = 0
self.right_blinker_cnt = 0
self.left_blinker_prev = False
self.right_blinker_prev = False
# Q = np.matrix([[10.0, 0.0], [0.0, 100.0]])
# R = 1e3
self.v_ego_kf = KF1D(x0=[[0.0], [0.0]],
A=[[1.0, DT_CTRL], [0.0, 1.0]],
C=[1.0, 0.0],
K=[[0.12287673], [0.29666309]])
def update_speed_kf(self, v_ego_raw):
if abs(v_ego_raw - self.v_ego_kf.x[0][0]) > 2.0: # Prevent large accelerations when car starts at non zero speed
self.v_ego_kf.x = [[v_ego_raw], [0.0]]
v_ego_x = self.v_ego_kf.update(v_ego_raw)
return float(v_ego_x[0]), float(v_ego_x[1])
def get_wheel_speeds(self, fl, fr, rl, rr, unit=CV.KPH_TO_MS):
factor = unit * self.CP.wheelSpeedFactor
wheelSpeeds = car.CarState.WheelSpeeds.new_message()
wheelSpeeds.fl = fl * factor
wheelSpeeds.fr = fr * factor
wheelSpeeds.rl = rl * factor
wheelSpeeds.rr = rr * factor
return wheelSpeeds
def update_blinker_from_lamp(self, blinker_time: int, left_blinker_lamp: bool, right_blinker_lamp: bool):
"""Update blinkers from lights. Enable output when light was seen within the last `blinker_time`
iterations"""
# TODO: Handle case when switching direction. Now both blinkers can be on at the same time
self.left_blinker_cnt = blinker_time if left_blinker_lamp else max(self.left_blinker_cnt - 1, 0)
self.right_blinker_cnt = blinker_time if right_blinker_lamp else max(self.right_blinker_cnt - 1, 0)
return self.left_blinker_cnt > 0, self.right_blinker_cnt > 0
def update_blinker_from_stalk(self, blinker_time: int, left_blinker_stalk: bool, right_blinker_stalk: bool):
"""Update blinkers from stalk position. When stalk is seen the blinker will be on for at least blinker_time,
or until the stalk is turned off, whichever is longer. If the opposite stalk direction is seen the blinker
is forced to the other side. On a rising edge of the stalk the timeout is reset."""
if left_blinker_stalk:
self.right_blinker_cnt = 0
if not self.left_blinker_prev:
self.left_blinker_cnt = blinker_time
if right_blinker_stalk:
self.left_blinker_cnt = 0
if not self.right_blinker_prev:
self.right_blinker_cnt = blinker_time
self.left_blinker_cnt = max(self.left_blinker_cnt - 1, 0)
self.right_blinker_cnt = max(self.right_blinker_cnt - 1, 0)
self.left_blinker_prev = left_blinker_stalk
self.right_blinker_prev = right_blinker_stalk
return bool(left_blinker_stalk or self.left_blinker_cnt > 0), bool(right_blinker_stalk or self.right_blinker_cnt > 0)
@staticmethod
def parse_gear_shifter(gear: str) -> car.CarState.GearShifter:
d: Dict[str, car.CarState.GearShifter] = {
'P': GearShifter.park, 'R': GearShifter.reverse, 'N': GearShifter.neutral,
'E': GearShifter.eco, 'T': GearShifter.manumatic, 'D': GearShifter.drive,
'S': GearShifter.sport, 'L': GearShifter.low, 'B': GearShifter.brake
}
return d.get(gear, GearShifter.unknown)
@staticmethod
def get_cam_can_parser(CP):
return None
@staticmethod
def get_body_can_parser(CP):
return None
@staticmethod
def get_loopback_can_parser(CP):
return None
|
commaai/openpilot
|
selfdrive/car/interfaces.py
|
Python
|
mit
| 9,765
|
from _external import *
fontconfig = LibWithHeaderChecker('fontconfig', 'fontconfig/fontconfig.h', 'c')
|
tuttleofx/sconsProject
|
autoconf/fontconfig.py
|
Python
|
mit
| 105
|
#
# adapters/tensorflow/imagenet/__init__.py - a service adapter for the tensorflow ImageNet pre-trained graph
#
# Copyright (c) 2018 SingularityNET
#
# Distributed under the MIT software license, see LICENSE file.
#
import base64
import logging
import os
from pathlib import Path
from typing import List
import tensorflow as tf
from adapters.tensorflow.imagenet.node_lookup import NodeLookup
from sn_agent.job.job_descriptor import JobDescriptor
from sn_agent.ontology import Service
from sn_agent.service_adapter import ServiceManager, ServiceAdapterABC
IMAGENET_CLASSIFIER_ID = 'deadbeef-aaaa-bbbb-cccc-111111111102'
logger = logging.getLogger(__name__)
FLAGS = None
AGENT_DIRECTORY = Path(__file__).parent
CHECK_ACCURACY = False
MINIMUM_SCORE = 0.20
class TensorflowImageNet(ServiceAdapterABC):
type_name = "TensorflowImageNet"
def __init__(self, app, service: Service, required_services: List[Service] = None):
super().__init__(app, service, required_services)
if not service.node_id == IMAGENET_CLASSIFIER_ID:
raise RuntimeError("TensorflowImageNet cannot perform service %s", service.node_id)
def example_job(self):
bucket_image_64 = '/9j/4AAQSkZJRgABAQAAAQABAAD/2wCEAAkGBxAQEBUQEhAQFRUQEg8SEA8VEBUQFRAPFRUWFhUVFRUYHSggGBolGxUVITEhJSkr' \
+ 'Li4uFx8zODMtNygtLisBCgoKDg0OFxAQGi0lHR0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0t' \
+ 'LS0tLf/AABEIAOEA4QMBEQACEQEDEQH/xAAbAAEAAgMBAQAAAAAAAAAAAAAAAgMBBQYEB//EAEEQAAIBAgIHBAgEBAMJAAAAAAAB' \
+ 'AgMRBCEFBhIxQVFhInGBkRMyUmKhscHRFEJykkOC4fAHU/EVFhcjM1RjotL/xAAaAQEBAQEBAQEAAAAAAAAAAAAAAQIDBQQG/8QA' \
+ 'LBEBAQABAwMEAQMDBQAAAAAAAAECAwQREiExBRNBUSIyYaEVcZEjUoGx8P/aAAwDAQACEQMRAD8A+4gAAAAAAAAAAAAAAAAAAAAA' \
+ 'AAAAAAAAAAAAAAAAAAAAAAAGGwK54iC3zgu+SQZuUnyqlpGgt9an++P3Ce5h9xB6Xw6/jU/3Icp72n9xH/bOG/zqfmOU97T+4ktL' \
+ 'Yd/xqf7kOV97D7iyOPovdVp/vQX3MftdGtF7pRfc0w1zEwoAAAAAAAAAAAAAAAAAGB4cTpahT9apG/Jdp/AcuWWthj5rUYvXGhD1' \
+ 'Yyl1bUUTl8+e+wx8NHjNfJ7o7Ee5bT+Ji6kj5M/Uvpp8RrhiJfxZ+D2fkZurHy5eoZ35a+tpytPfOT75NmLquN3Wd+Xnljqj4k9y' \
+ 'se7lUPxM+bJ11OvI9NLmx106qyq0uZeunVU44qfTyLNStddWLGy5R8mX3KvuVNaSa/L5SaNTUX37HqpadnHdOqu6dy+5HWbrKfNb' \
+ 'DDa211urS/njf7l647Y7/P7bWhrtUXrRpyXFrJ/34GuX0Y+oWeY22E1woT9aMo9cpL7l5fRhvtPLy3GF0pQq+pUi3yvZ+TK+nHVw' \
+ 'y8V7A6AAAAAAAAACutWjCLlKSilvbdglsk5rntJa1Rjf0aT9+WS8FvZOXx6m8k/S43S+tk5ZObl03R8kc8tSR5etv7e3LncRpepP' \
+ 'jZHK6tvh5+e5zyeV1JPe2c7bXHqt81mMQ1IsiiNyJpBuRJIKzYqslVkDJRkDDRSwsE4EgSAVJTZeqi+li5R4m5m3jqWN5ozWatTy' \
+ 'VR29mXaXk/odJlK+zS3mePy6zR2tlOeVWOy/bXaj4rejXL0dPeY5fqdDSqRklKLTT3NO6ZX1yy94mFAAAAB4tK6Rhh4bUs2/Vjxk' \
+ '/sHPV1ZpzmuC0zpac71Kksluj+WPcjNvDyNfXt/LKuN0hpOVR5OyPmz1LfDydTWyz/s8KObinFERZFBqRZFB0iaDcSRVZQVkqpAZ' \
+ 'KoBkoACgACAAAB6cNinF2e7mdMc/tvDPh0miNLVKDvB3i/Wpt5SXTk+p1eho6+WHjw77AYyFamqkHk+HGL4p9TT1sM5nOY9IbAAA' \
+ 'DgNN4x1a0pcItxiuUVkZeTr59WdcbrLXe0ocErvx3HDWvw8nd5flMWlSOD5OVkYkE0inCaDcTQbiSDSSCsoqshWSjIVlFADJQAAA' \
+ 'AAoBGANto6V4dzt4HfC9n06V5xdVqXi3GtKlwnFyt70ePk/gbj0dln+VxdqV6QAAwwPmld9uX6n8zLxc/wBVaHWLCOVqqz2Vsz7u' \
+ 'DOOrjz3fBu9Pn840iifM+LhJILwykGuEkF4SQaSTCs3Cs3KMoKyVWbgCjJVZuAAFAIBWQAGYQcnZK7e5Fk5JLe0bmhS2IqPLe+b4' \
+ 'n0YziPqxnTOG31OntY1JflhUb8rfUsvd9uynOdr6GaeoAAAHz3WvASoVnNLsVM4vk+MSeHj7zTuGfVPFamlXUvqh5fNMpWuxmiE8' \
+ '6dl7j3fyv6Hz56PPeOGe3+cf8NVUpOLtJNPk1Y+e42eXzXGztUSIAZuFZuFLgSTKrNwrNyqzcDJeVZuUAMlAAAuBko9NHBSlv7K6' \
+ '7/I3MLXTHTte6nCFNcube9naSYusmOEavHaYTexTzu7bS+S5kuXPhjrud4xfSNQ9Ayw1L0tVWqVUuy98Ib7Pq978DWM4e5tdH28O' \
+ '7qjT6gAAApxeGhVg6c4qUZb0/wC8mGc8JlOK+eaf1Vq4dupSvUp78vXguq4rqjnZZ4eLuNlnp/lh3jS0cY1vLMvt8uOpXpvGas0p' \
+ 'Lz/0LZK6czKd3kq6JhL1W49PWXxzOWWjjfDnlt8b4eKromot2zLudvgzldDKeHHLQyjy1MPOO+El4HO4ZTzHO45TzFW0ZY5EwvLK' \
+ 'YXlJMKkmGmUUZKrJRkBcqrIUpPdF+RqY2rxaujgpvku9m5p5NTCr4YCK9aXgsjc0p8tTTnzU51qNL2V14/c1+OK9WGLWYzWOK9RN' \
+ '9dyMXV+mbq2+I8GDo4vSFRUqcXLaf6YR6tmPyya09DPVvEfVtUNRaWCtVqtVay3St2Kb9xPj1fwO+OPD2dts8dLve9dgbfaAAAAA' \
+ 'Bhq+QHznTujYbcuzZpvNZXMWPJ3Whjzzw0FTDOLyf0Jw864WeF2D9NUqRpRW1KbtFO2fiWWtYdeWUxnmul/3XxKhtP0V0rumpv5t' \
+ 'WNd3oTZanHPZop4iKbjJ7Mou0oyi04vk7GeY+TLKS8XtUJOnLe6T77fVDtWfxv0rlg6T/hx8GvoydGN+Gbp4X4QejqXsyXc2S6WH' \
+ '0ntYfSD0bS9/zZn2cE9rBB4Cnzl5/wBB7OKe3ix+Dpe1Lz/oPaxTow+2fw9Dm/Me3gvTgbGHX+rL06cT/THXw8eEfmPwh16cQlpW' \
+ 'jHdbwRfcxie/hPDz1dPxW5Nmbqxn378R4q2sEuCS78zN1anuZ14aukq89zfgrE6sqcW+arhhJzzlJLx2n8CdNaxwjY4HRlNO7W1+' \
+ 'rd5I1MY+jTwj6XqHhO05WSUI2StazfLwud8Y9jbY8R2xp9QAAAAAAAByms1Dtt81czXybnHlyWKgHlZx7dUpxjio34xmovlJoR12' \
+ 'Vk1py7v0hp7jgdddF1FV/FRjeEk4VWt8WvVk1y4XOWU4vLyPUtC3jUx/5cVV0nTU3G0nsuzkkmr9M8znbHk3BNY+m+L8mZ6oxeyf' \
+ '4te38y9X7s9V+0ZYz3/ix1funVkreNXtrzJz+7POorljF7a8xynGorli17a8yL0ZqpYqPtfML7eSqWJjzfkRqaVQ9OuUvgg3NNly' \
+ 'V7KL8X9jcxi8RJdy8i8SKupRvnfeWNeXrpQK1G1wUBH1aUfStS7KEo8XsvwzR2j2dGcYulK6gAAAAAAAGl1kpXipcrolcdac4uJx' \
+ 'sM2R5GpGuldO6bTWafJh895l5jf6L1nltRhWtZ5elW9Pg5fcSvQ0N/eZjqf5dWlFx2Wk4yWaeakmbep2sfK9dtVPwsvS0l/ypPL/' \
+ 'AMbfB/RnzamHDwt9tbpXrx/Tf4crFWODzbeXogWOVZcCpMlM6Rmx0map0yNzJBwDXKLiVeWNkcrynShdmse9S1ZTjmdWeV+zcWKu' \
+ 'oQaQxnZqPZCJXad22wECx9ejHdatVLVUuacfhf6HSeXr4eHWmmwAAAAAAADx6Xp7VKXTMVnOcxwWOhmZeTqzu1NVB8mTzzRHKt/q' \
+ '3p30dqNV9jdCb/I+T6fIsr0NnvOn8M/DrqtONSLpzSlGSs4vNNM3e717JlOL4r5ZrZqxLCS24XlRk+zLe4P2ZfRny6mnw/Ob3ZXQ' \
+ 'vVj+m/w51HJ59WRYjFjNioqnAljcqqUTLpKg0GpUWiryspqyOuMS1ZSiWEWRWaJle8bxeqKOjT10Vcjtj3brRsc78s/I1i9DRjpN' \
+ 'AVO3CXvr4v8AqXGvR0/Dujo2AAAAAAAARqw2otc00BwGk6dm+hl5evj3aWsg+HJ5ZkcqpkiMV0eren9m1Gs+zup1G/V92T5dTUye' \
+ 'ls95x+Gfj4rqa0Yzi4TSlGSs4vNNGr3evZMpxfD5xrTqrLDt1aV5Ut7W+VLv5rqfNnp8d4/P730+6f56ffH/AKcucXlpKReU4ZNI' \
+ 'rnEzY3KqaI2wo3yEnKrZRtY7xEopIvDUTptN9UZuPLeL0QefmbbnPL24ZB3wjpNB4dTlsvds1JS/TGLb+hqPS0I2eiE1GHfH5oYe' \
+ 'H26c7O/OjYAAAAAAAAA43WCjapJdb+eZmvh3OLmcQg83J46gcclMjLnVTDLf6C1gdO1Ks24boz3uHR818izJ6O033R+Gp4+3Vqom' \
+ 'uDTW/emmbe3LLOzkNY9UFO9XDJJ75Udyf6OT6HHPT+Y8nd+mzLnPS8/ThatOUW4yTTi7OLVmn1R89nDw8sbjeL5RUhyzwncqcITj' \
+ 'yHDUqdKnbM3jjw0lNXRpVdN8GaIzh09pkbx8vXDj3FdI2OFW4Pr046LRTtRrz5whRj+qpLP/ANYsvPavQ0vFbnA07OCW/aj80akf' \
+ 'bjOI7c2oAAAAAAAAA53Welmpc1byJXz7idnG4qJHk5x4agcMlEiVzqqRlmq2GWx0RpueH7L7VPjDjHrF8O4sy4fVtt7lo9r3x/8A' \
+ 'eHX4TGwqx26crrjwcXya4M3Ly9/S1sNXHqwrx6Y0PQxS7atNerVjlJd/NGcsJk57ja6evPynf7cFprV2vhrtrbhwqxzVveX5T58s' \
+ 'Li8HcbHU0e/mfbUJmI+PhJ3NeGWVOxZViamjcrcqFWPFGotidGpffv8AmWxrHJ6oR3+BHaYvbTqpfcza+rGyOj1cTqUllaPpJz/U' \
+ '7KMX3JJ+bNYd3o7Wc48us0NQ2qyfCF5Pv3L++h1j7XTFQAAAAAAAAAavWGlelf2X8H/aJXPVnOLhcZEjyNSNbUQfNk88kRyqqSIz' \
+ 'VciM1XIjFZw2KqUpbcJOL+DXJriizsunrZ6WXVheK6fR2sVOpaNS0Jc/ySffwNzLl7u29Swz7Z9r/DcqbXVPxTRXp9q0ukNXMNWb' \
+ 'lFeim+MV2W+sfsYuE8vh1/T9LU7ztf2c7pDVvEUs9j0kfah2suq3ozca8nW9P1dPxOZ+zR1cO1/8vJnO4/T4rjZ5UNNGfDLO0WZN' \
+ 'cl0amZ2WxqpL+2Lk6zLjw6PQur1as1OrFwp79lq0p+HBFmNr0tvtM871Z9p9O2pUowiopJJKySO0nD2ZJJxHS6Hwvo6d2u1PN9Fw' \
+ 'Roe8AAAAAAAAAAox1PapyXuvzWYS+Hz7HQzMvJ1se7VVUHx5PNNEc7FUkRiqpEYquRGaqkg51XJBHrwOlq1HKMrx9iWcfDl4FmVj' \
+ '69Deauj+m9vqt/hNY6M8pp03z9aPnvRrqj2NH1LTy7Zdr/DbUayktqE1Jc07mnoY545TnG8o4mjTqf8AUpQl1az8ycSs56OGf6py' \
+ '1tbVvCT3bcPHaXxM3CPkz9N0cvHZ4p6l0nurtfyoz7T576Tj8ZUhqRS415PuSRPaJ6Tj85VtMBq5hKLUlHaks1KT2rPotyNTTkfX' \
+ 'pbHS07zJ3bb06TUVvbSS4tvodH19o3+i9FbHbqWcuEeEPuyq2oQAAAAAAAAAAAHCaYo7M5Lk2jLztxj3aGsg8/J5ZkcqokRiqZkY' \
+ 'quQYquRGKrkGVbCosKzTqyi7xk4vmnYOuOeWN/G8NhQ1gxMPzqS96KfxLMq+zDf62Pzz/d7aWtUvzUovuk18y9T6sfU8vnFctaY/' \
+ '5L/ei9Tr/Up/tRlrM/y0l4y+xOtb6h9YqpaZrTyuor3Vb4vMnVWLutTJvdT6DniYN52bm289y+9jWLvoTm930g2+0AAAAAAAAAAA' \
+ 'ADldZ6Nql/aSf0M18m4xcniFmHl5PFUI4V55krnVUiMVXIM1CRGKrYRWwItBpFoNRGwWFg3Ekg6RbBEdI9mGiV203fag0O1OfsxU' \
+ 'fFu/0OmL1NvO3LtDT6QAAAAAAAAAAAANJrNRvCMuV0SuOvOcXEYuOZHkak7tfVQfPk80iOVVMyygwxVciM1BhlWwqLQWItEa+WA0' \
+ 'FaiaQbi2miOuL3YZFj6dOPp2peH2cNte3JvwWS+TOserozjFvyuoAAAAAAAAAAAAHi0xS2qMuma8CVnOc4uAx0cyPH1p3auqg+TK' \
+ 'PLMjlVUiMq5EZqEgxVciIgwiLDTFgsYsG4ykGolEjcW00V1xjYYSOZY+vSnd9e0VQ9HQpw9mEb99rv4nV6+M4kj1hQAAAAAAAAAA' \
+ 'AAI1I3TXNNAfPtK0XGTT4Noy8rcY8VpKyD4co8s0RxqmRGKrkRmq5BmoMjKLC8otBUbBYxYNMoNROKI6RbTQdcY3ur2EdavTppb5' \
+ 'JvpFZt+SN4vv2+POUfWUdHpsgAAAAAAAAAAAAAAc5rVo5temity7a6e0Svk3OnzOqOIxCI8nJ46hHCqJEYqtkYVsMoMjKDQGGg0i' \
+ 'GoMNCI1EkHSL6Yd8Y+nakaFdGn6aorTqpWT3wp8F3vf5HXGPX2+n048105p9AAAAAAAAAAAAAAABhoDjtY9W5K9WirrfKmt8esea' \
+ '6E4ebudrf1Yf4cbVVjLy8o88iOdVyRGKraDKLRGUGgIhYiGoEagkGonTi20km23ZJZtvkkHbCc3iPoeqOqDg1XxKW0s6dF57L4Sn' \
+ '16cDpjj9vW2+26fyydubfcAAAAAAAAAAAAAAAAAADT6Y1doYntNbM/8AMjx71uZOHza21w1fPlxek9UsTSu4x9JHnDfbrHf5GbHl' \
+ '6ux1MfHdz1anKLtJNPk1Z+TMvhyxs8qmgxUGiM1BoIjYLGLBqRKlRlN7MYyk+CScm/BEdccMsvEdJonUfFVrOolRjzlnLwivrY1M' \
+ 'a+/S2GeXfLs7zQerWHwmcI7U+NWWcvD2fA6THh6elt8NPxO7cldwAAAAAAAAAAAAAAAAAAAAACjEYSnUVp04S/VFMM5YY5eY02J1' \
+ 'Nwc81CUP0zaXk7memPly2Ojl8NfU/wAP6D3Vqq8Iv6Dpcb6Zp/dU/wDDyn/3E/2L7k6Gf6Xh9raX+HuHXrVar7tmP0L0xvH03Tnm' \
+ '1sMNqZgYb6Tn+ubfwVkOmO2Oy0cfhusLg6VJWp04QXuxUfkafTjhjj4i8NAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA' \
+ 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAD//2Q==';
cup_image_64 = '/9j/4AAQSkZJRgABAQAAAQABAAD/2wCEAAkGBxAPDw0PDQ0PDg0PDQ0PDQ0PDQ8ODQ0NFRIWFhUSExUYHyghGB4lJxMWITEhJSor' \
+ 'Li4uGB8zODMsNygtLisBCgoKDQ0NDxAPFSsdFRktLSs4LCsrKysrKysrKzcrLSsrKy0tKzcrKysrKysrKystNy0rKysrKysrKysr' \
+ 'KysrK//AABEIAOEA4QMBIgACEQEDEQH/xAAcAAEAAQUBAQAAAAAAAAAAAAAAAQIDBAUGBwj/xABAEAEAAgECAgcEBAoLAQAAAAAA' \
+ 'AQIDBBEFIQYHEhMxQWFRcZGxMkKBghQiI3KDkqGissEIF1NUYpOjwtHh8BX/xAAWAQEBAQAAAAAAAAAAAAAAAAAAAQL/xAAWEQEB' \
+ 'AQAAAAAAAAAAAAAAAAAAEQH/2gAMAwEAAhEDEQA/APcQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA' \
+ 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA' \
+ 'AEJavjHGKabsRe1K3vv2O3aKVnbbfn9scgbRG7jtV0g1XjSKxWfCa0i1fjzYF+OaqfHPMfmxWPkD0Dc3ee//AEs0/SzXn70q6ay0' \
+ '+N7frSlHf7m7iseaZ+tPxlk4r+s/GVHWG7n8NLz4Tf8AWmGTSuSNvyto+9MiVuBqq58sfW329tYU243XHMRmmkbzFYmLecztHKRW' \
+ '3EQkAAAAAAAAAAAAAABby56UibXvWlY8bWmKxH2y13SPjWPRae+fJtO3KlZtFe3f2b+UcpmZ8oiXzv0g41rOOavutPGfUT2prWkX' \
+ 'tGGN5+rjjlWvrbefOdvAH0Nk6T6Cv0tdp4/TUl551q8c0mpppq6fU481/wAtE1pbteVZ5uMp1McVmsTP4LW08+xOptvHpvFWB0g6' \
+ 'DcQ4Vp41GpjT1xfhGPfusk5LTNpiu2+0bRyhUYmk4jk0/b7jJOObREb18vWPZ4s7F0y1lYiLXrl287d5Fp98xZps/jLGmVSOjv03' \
+ '1EzMzHYjs8opett7b+M9uszt6Ndbp7xKsztfBNd+W+Cm+3q02SWLkNV0tesjicf3f/Jj/lcjrO4p7dNH6H/tyMoQdl/Wjxfy1GGv' \
+ 'l+LhhTbrH4raPxtZbf8Aw1rVx6uAdFm6Wa/J9PWZ538fykx8mz6LZb5dXpu8vbJPfYtptabfXj2uQxO06vcXb12jr7dRh393bjcH' \
+ '0qECKAAAAAAAAAAAAAA8O/pBcbvXLp9JWZikaeMt9vC1r3tG3+nHxbrqI4TSmnz55rE5N6Ui3nG9Ztaft3j4Of8A6RXDLxl0urrE' \
+ '93fDGG0+UXpe1o+MX/Y3HUXxuk0y6e1oi2SKZMUT52rE1vX38o+Erg9ecV1xaLvuC62IjnSMeSPSa3iXaxLXdI9H3+j1WKY37eDJ' \
+ 'G3rtvHyQfLVrdqtbR9albfGIY9pXcNdsVYnxpNqT762mv8li0tIoySxrr1pWLoKJQlAEKoUK4BfxPSeqLS95r8M+VO1f3bVnb+Tz' \
+ 'XD4va+ozQ721OaY5Ux0pE+tpmZ/hQevwAKAAAAAAAAAAAAAA03Szo7i4lpMulz8otzx5Ij8bFlj6N4/94TL5u4jwniHANVtel+xF' \
+ '+1iy07Vcd4ieVsd48J9PH5vqpj63RYs9Jx58VMuO0bWpkrF6z9kg8e4P14460rXV6bJa8RtN68pn37bxLbf138PmOeDUe6YiGx4t' \
+ '1P8AC802tipl0lp/sctrUj3UvvEfY4/inUXkjedJrcWSee1NRinHM/epv8io4HUWre2ovjiYx31WovjiY2mMd8k2pvHumGtyOy4l' \
+ '0O1ugwXnXY6xvetaXpkjJS0RTaNp8fKPGHH6iNpUY9lmy5ZasCiUJlAJhMKd1UAyNNG9ofS/VTwzuOG4rTG1s8zln29nwr8v2vn7' \
+ 'olwy2q1enwUjnkyRX3R5z9kRMvqzSYK4sePHSNqY6UpWPZWsbQir4jc3BIjdIAAAAAAAAAAAAAAAANV0m4VGs0ubBPjakzjn2ZI5' \
+ '1l8x8b0VsOTJS9drVtatonymJ2fWEw8f65OjG141uGm9cnLP2a8q5IjlafZv84XDXit1qWTnpsxpEUShMqQSrqt7tl0f4Xk1eoxY' \
+ 'MUb3yWiPbFY352n0jxQeudRnAezGXX5K7bTOHTzPtmI7do+O3xevd40PCNPTS4MOnxRtjxUiseU2nztPrM82dXNuK2PeJ7xg1uuR' \
+ 'YGZFlcSxK2X6SC8IhIAAAAAAAAAAAAAACm9ItExaImJ8YmN4mFQDlOK9XfCtVM2y6GlbT9fDfJgn9yYhodR1M8Jn6P4VT0jUzaP3' \
+ 'ol6SpsDyfN1K6D6up1cfexz/ALWPHUvoonnqdVMe/HH8nrlqLU4geZYOqDhlfpVz5PztRav8OzpODdEtJo940unri35WtG83mPW0' \
+ 'zM/tdT3SYxA11NIvV07N7tVFAY1MK53a92U9kFqtF2tVUQkBKEgAAAAAAAAAAAAAAAAI2SApmDZUAp2TskBGxskBAkBAkAAAAAAA' \
+ 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB//2Q=='
return [
{
"input_type": "attached",
"input_data": {
"images": [bucket_image_64, cup_image_64],
"image_types": ['jpg', 'jpg']
},
"output_type": "attached"
}
]
def post_load_initialize(self, service_manager: ServiceManager):
# Load the graph model.
graph_path = os.path.join(AGENT_DIRECTORY, 'model_data', 'classify_image_graph_def.pb')
with tf.gfile.FastGFile(graph_path, 'rb') as f:
self.graph_def = tf.GraphDef()
self.graph_def.ParseFromString(f.read())
tf.import_graph_def(self.graph_def, name='')
# Create our long-running Tensorflow session
self.session = tf.Session()
# Save the softmax tensor which will run the model.
self.softmax_tensor = self.session.graph.get_tensor_by_name('softmax:0')
# Creates node ID --> English string lookup.
self.node_lookup = NodeLookup()
def perform(self, job: JobDescriptor):
# Process the items in the job. A single job may include a request to classify
# many different images. Each item, in turn, may include an array of images.
results = []
for job_item in job:
# Make sure the input type is one we can handle...
input_type = job_item['input_type']
if input_type != 'attached':
logger.error("BAD input dict %s", str(job_item))
raise RuntimeError("TensorflowImageNet - job item 'input_type' must be 'attached'.")
# Get the images to classify, while making sure our job item dict is of the appropriate format.
input_data = job_item['input_data']
if input_data is None:
raise RuntimeError("TensorflowImageNet - job item 'input_data' must be defined.")
images_to_classify = input_data.get('images')
if images_to_classify is None:
raise RuntimeError("TensorflowImageNet - job item 'input_data' missing 'images'")
image_types = input_data.get('image_types')
if image_types is None:
raise RuntimeError("TensorflowImageNet - job item 'input_data' missing 'image_types'")
# Clear the predictions for the new job item.
predictions = []
prediction_confidences = []
# Classify all the images for this job item.
for image, image_type in zip(images_to_classify, image_types):
binary_image = base64.b64decode(image)
if (image_type == 'jpeg' or image_type == 'jpg'):
decoder_key = 'DecodeJpeg/contents:0'
elif (image_type == 'png'):
decoder_key = 'DecodeJpeg/contents:0'
elif (image_type == 'gif'):
decoder_key = 'DecodeGif/contents:0'
raise RuntimeError("TensorflowImageNet - cannot decode gif images")
elif (image_type == 'bmp'):
decoder_key = 'DecodeBmp/contents:0'
raise RuntimeError("TensorflowImageNet - cannot decode bmp images")
else:
decoder_key = 'DecodeJpeg/contents:0'
logger.warn("Missing image type {0}".format(image_type))
raw_predictions = self.session.run(self.softmax_tensor, {decoder_key: binary_image})
logger.debug("classifying '{0}' image".format(image_type))
# Pull the predicted scorces out of the raw predictions.
predicted_scores = raw_predictions[0]
# Sort and strip off the top 5 predictions.
top_predictions = predicted_scores.argsort()[-5:][::-1]
image_predictions = []
image_scores = []
for predicted_node_id in top_predictions:
# Get a text description for the top predicted node.
description = self.node_lookup.id_to_string(predicted_node_id)
# Cast to a float so JSON can serialize it. Normal Tensorflow float32 are not serializable.
score = float(predicted_scores[predicted_node_id])
logger.debug(" prediction = '{0}', score = {1}".format(description, score))
# Add only those that exceed our minimum score to the predictions and scores lists.
if (score > MINIMUM_SCORE):
image_predictions.append(description)
image_scores.append(score)
# Append the filtered predictions and scores for this image.
predictions.append(image_predictions)
prediction_confidences.append(image_scores)
# Add the job results to our combined results array for all job items.
single_job_result = {
'predictions': predictions,
'confidences': prediction_confidences,
}
results.append(single_job_result)
return results
|
singnet/singnet
|
agent/adapters/tensorflow/imagenet/__init__.py
|
Python
|
mit
| 19,222
|
""" The aomi "seed" loop """
from __future__ import print_function
import os
import difflib
import logging
from shutil import rmtree
import tempfile
from termcolor import colored
import yaml
from future.utils import iteritems # pylint: disable=E0401
from aomi.helpers import dict_unicodeize
from aomi.filez import thaw
from aomi.model import Context
from aomi.template import get_secretfile, render_secretfile
from aomi.model.resource import Resource
from aomi.model.backend import CHANGED, ADD, DEL, OVERWRITE, NOOP, \
CONFLICT, VaultBackend
from aomi.model.auth import Policy
from aomi.model.aws import AWSRole
from aomi.validation import is_unicode
import aomi.error
import aomi.exceptions
LOG = logging.getLogger(__name__)
def auto_thaw(vault_client, opt):
"""Will thaw into a temporary location"""
icefile = opt.thaw_from
if not os.path.exists(icefile):
raise aomi.exceptions.IceFile("%s missing" % icefile)
thaw(vault_client, icefile, opt)
return opt
def seed(vault_client, opt):
"""Will provision vault based on the definition within a Secretfile"""
if opt.thaw_from:
opt.secrets = tempfile.mkdtemp('aomi-thaw')
auto_thaw(vault_client, opt)
Context.load(get_secretfile(opt), opt) \
.fetch(vault_client) \
.sync(vault_client, opt)
if opt.thaw_from:
rmtree(opt.secrets)
def render(directory, opt):
"""Render any provided template. This includes the Secretfile,
Vault policies, and inline AWS roles"""
if not os.path.exists(directory) and not os.path.isdir(directory):
os.mkdir(directory)
a_secretfile = render_secretfile(opt)
s_path = "%s/Secretfile" % directory
LOG.debug("writing Secretfile to %s", s_path)
open(s_path, 'w').write(a_secretfile)
ctx = Context.load(yaml.safe_load(a_secretfile), opt)
for resource in ctx.resources():
if not resource.present:
continue
if issubclass(type(resource), Policy):
if not os.path.isdir("%s/policy" % directory):
os.mkdir("%s/policy" % directory)
filename = "%s/policy/%s" % (directory, resource.path)
open(filename, 'w').write(resource.obj())
LOG.debug("writing %s to %s", resource, filename)
elif issubclass(type(resource), AWSRole):
if not os.path.isdir("%s/aws" % directory):
os.mkdir("%s/aws" % directory)
if 'policy' in resource.obj():
filename = "%s/aws/%s" % (directory,
os.path.basename(resource.path))
r_obj = resource.obj()
if 'policy' in r_obj:
LOG.debug("writing %s to %s", resource, filename)
open(filename, 'w').write(r_obj['policy'])
def export(vault_client, opt):
"""Export contents of a Secretfile from the Vault server
into a specified directory."""
ctx = Context.load(get_secretfile(opt), opt) \
.fetch(vault_client)
for resource in ctx.resources():
resource.export(opt.directory)
def maybe_colored(msg, color, opt):
"""Maybe it will render in color maybe it will not!"""
if opt.monochrome:
return msg
return colored(msg, color)
def normalize_val(val):
"""Normalize JSON/YAML derived values as they pertain
to Vault resources and comparison operations """
if is_unicode(val) and val.isdigit():
return int(val)
elif isinstance(val, list):
return ','.join(val)
elif val is None:
return ''
return val
def details_dict(obj, existing, ignore_missing, opt):
"""Output the changes, if any, for a dict"""
existing = dict_unicodeize(existing)
obj = dict_unicodeize(obj)
for ex_k, ex_v in iteritems(existing):
new_value = normalize_val(obj.get(ex_k))
og_value = normalize_val(ex_v)
if ex_k in obj and og_value != new_value:
print(maybe_colored("-- %s: %s" % (ex_k, og_value),
'red', opt))
print(maybe_colored("++ %s: %s" % (ex_k, new_value),
'green', opt))
if (not ignore_missing) and (ex_k not in obj):
print(maybe_colored("-- %s: %s" % (ex_k, og_value),
'red', opt))
for ob_k, ob_v in iteritems(obj):
val = normalize_val(ob_v)
if ob_k not in existing:
print(maybe_colored("++ %s: %s" % (ob_k, val),
'green', opt))
return
def maybe_details(resource, opt):
"""At the first level of verbosity this will print out detailed
change information on for the specified Vault resource"""
if opt.verbose == 0:
return
if not resource.present:
return
obj = None
existing = None
if isinstance(resource, Resource):
obj = resource.obj()
existing = resource.existing
elif isinstance(resource, VaultBackend):
obj = resource.config
existing = resource.existing
if not obj:
return
if is_unicode(existing) and is_unicode(obj):
a_diff = difflib.unified_diff(existing.splitlines(),
obj.splitlines(),
lineterm='')
for line in a_diff:
if line.startswith('+++') or line.startswith('---'):
continue
if line[0] == '+':
print(maybe_colored("++ %s" % line[1:], 'green', opt))
elif line[0] == '-':
print(maybe_colored("-- %s" % line[1:], 'red', opt))
else:
print(line)
elif isinstance(existing, dict):
ignore_missing = isinstance(resource, VaultBackend)
details_dict(obj, existing, ignore_missing, opt)
def diff_a_thing(thing, opt):
"""Handle the diff action for a single thing. It may be a Vault backend
implementation or it may be a Vault data resource"""
changed = thing.diff()
if changed == ADD:
print("%s %s" % (maybe_colored("+", "green", opt), str(thing)))
elif changed == DEL:
print("%s %s" % (maybe_colored("-", "red", opt), str(thing)))
elif changed == CHANGED:
print("%s %s" % (maybe_colored("~", "yellow", opt), str(thing)))
elif changed == OVERWRITE:
print("%s %s" % (maybe_colored("+", "yellow", opt), str(thing)))
elif changed == CONFLICT:
print("%s %s" % (maybe_colored("!", "red", opt), str(thing)))
if changed != OVERWRITE and changed != NOOP:
maybe_details(thing, opt)
def diff(vault_client, opt):
"""Derive a comparison between what is represented in the Secretfile
and what is actually live on a Vault instance"""
if opt.thaw_from:
opt.secrets = tempfile.mkdtemp('aomi-thaw')
auto_thaw(vault_client, opt)
ctx = Context.load(get_secretfile(opt), opt) \
.fetch(vault_client)
for backend in ctx.mounts():
diff_a_thing(backend, opt)
for resource in ctx.resources():
diff_a_thing(resource, opt)
if opt.thaw_from:
rmtree(opt.secrets)
|
otakup0pe/aomi
|
aomi/seed_action.py
|
Python
|
mit
| 7,172
|
import numpy
from wiki_scraper import (
parse_html_simple,
crawl_page)
import pickle
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.naive_bayes import MultinomialNB
import os.path
from parallel_webscrape import scrape_wikipedia
PATH = 'wikimodel/'
class WikiModel():
def __init__(self):
self.vocabulary = set()
self.stop_words = set()
self.english_words = set()
self.label_map = {}
self.reverse_label_map = {}
self.count_data = []
self.labels = []
self.vectorizer = None
self.classifier = None
self.load_training_data()
def load_training_data(self):
# make some dictionaries to preprocess the words
english_words = set()
with open(PATH + "american-english.txt") as english_dictionary:
english_words = set(
word.strip().lower() for word in english_dictionary)
stop_words = set()
with open(PATH + "english_stopwords.txt") as stopwords:
stop_words = set(word.strip().lower() for word in stopwords)
self.english_words = english_words
self.stop_words = stop_words
if not os.path.isfile(PATH + 'categories.pickle'):
scrape_wikipedia()
categories = pickle.load(open(PATH + 'categories.pickle', 'rb'))
# parse the html, turning it into a list of words
# and removing stop words and non-dictionary words
# we'll also collect all of the words so that we can make a map of
# words to numbers
all_words = set()
# the category level
for k, v in categories.iteritems():
# the document level
for inner_k, inner_document in v.iteritems():
# parse the html to get lists of words per document
words = parse_html_simple(inner_document)
parsed = []
for word in words:
if word in english_words and word not in stop_words:
all_words.add(word)
parsed.append(word)
categories[k][inner_k] = parsed
# aggregate all of the documents into one big data set while
# transforming them into counts
self.vocabulary = set(all_words)
self.vectorizer = CountVectorizer(vocabulary=self.vocabulary)
count_data = []
string_data = []
labels = []
# the category level
for k, v in categories.iteritems():
# the document level
for inner_k, inner_document in v.iteritems():
# oops, we actually need this in string format
string_data.append(' '.join(inner_document))
labels.append(k)
# transform the string data into count data
count_data = self.vectorizer.transform(string_data).todense()
# transform count_data and babels into numpy arrays for easy indexing
count_data = numpy.array(count_data)
labels = numpy.array(labels).squeeze()
# make a map from the string label to a number and vice versa
self.label_map = {}
self.reverse_label_map = {}
i = 0
for label in sorted(set(labels)):
self.reverse_label_map[i] = label
self.label_map[label] = i
i += 1
# fit the model
self.classifier = MultinomialNB()
self.classifier.fit(count_data, labels)
def classify_url(self, domain, page, depth=0):
"""
Classify the documents after crawling them.
args:
domain - the domain part of the url
page - the other part of the url
depth - how deep to crawl
returns:
a list of predicted probabilities for each instance belonging to
each class
"""
# get the documents
documents, _ = crawl_page(domain, page, depth=0)
# parse the documents
string_data = []
for page, doc in documents.iteritems():
words = parse_html_simple(doc)
parsed = []
for word in words:
if (word in self.english_words
and word not in self.stop_words
and word in self.vocabulary):
parsed.append(word)
string_data.append(' '.join(parsed))
count_data = self.vectorizer.transform(string_data)
# classify the documents
probs = self.classifier.predict_proba(count_data)
return probs
|
arider/wikimodel
|
wiki_model.py
|
Python
|
mit
| 4,572
|
# -- coding: utf-8 --
# Copyright 2015 Tim Santor
#
# This file is part of proprietary software and use of this file
# is strictly prohibited without written consent.
#
# @author Tim Santor <tsantor@xstudios.agency>
"""Generates HTML for HTML5 banner ads."""
# -----------------------------------------------------------------------------
from __future__ import absolute_import, print_function, unicode_literals
import argparse
import logging
import os
import re
import shlex
import shutil
import time
from subprocess import PIPE, Popen
import pkg_resources
import six
import six.moves.configparser as configparser
from bashutils import logmsg
from .adkit import AdKitBase
# -----------------------------------------------------------------------------
class Main(AdKitBase):
"""Generates HTML for HTML5 banner ads."""
def __init__(self):
self.logger = logging.getLogger(__name__)
super(Main, self).__init__()
# def copy_files(self):
# """Copy files."""
# dest = os.path.join(self.input_dir, 'js')
# if not os.path.isdir(dest):
# if self.verbose:
# logmsg.info('Creating "js" directory...')
# shutil.copytree(self.get_data('js'), dest)
# else:
# if self.verbose:
# logmsg.warning('"js" directory already exists')
@staticmethod
def replace_all(text, dict):
"""Replace all."""
for src, target in six.iteritems(dict):
text = text.replace(src, target)
return text
def create_divs(self, dirpath):
jpg_files = self.get_files_matching(dirpath, '*.jpg')
png_files = self.get_files_matching(dirpath, '*.png')
all_files = jpg_files + png_files
output = ''
for f in all_files:
basename = os.path.basename(f)
name = os.path.splitext(basename)[0]
if basename in self.ignore_list:
continue
output += '<div id="{0}"></div>\n'.format(name)
# soup=BeautifulSoup(output, "html.parser")
# pretty_html=soup.prettify()
return output
def create_html(self, filename):
"""
Create a HTML file for an ad.
:param str size: width x height (eg - 300x250)
:param str name: output file name
:rtype bool:
"""
# get filename and extension
# basename = os.path.basename(filename)
# name = os.path.splitext(basename)[0]
dirpath = os.path.dirname(filename)
# get size
# size = self.get_size_from_filename(name)
size = self.get_size_from_dirname(filename)
# get width height based on size string (eg - 300x250)
width, height = size.split('x')
# create divs
divs = self.create_divs(dirpath)
# open the template and open a new file for writing
html = pkg_resources.resource_string(__name__, 'templates/' + self.type + '/index.html').decode("utf-8")
#print(html)
outfile = open(filename, 'w')
# replace the variables with the correct value
replacements = {
# '{{filename}}': name,
# '{{size}}': size,
'{{width}}': width,
'{{height}}': height,
'{{divs}}': divs,
}
html = Main.replace_all(html, replacements)
outfile.write(html)
outfile.close()
logmsg.success('"{0}" generated successfully'.format(filename))
def generate_html(self, dirs):
"""
Loop through all folders in the input directory and create an HTML page.
"""
num_files = 0
for d in dirs:
filepath = os.path.join(d, 'index.html')
if not os.path.exists(filepath):
self.create_html(filepath)
num_files+=1
else:
logmsg.warning('"{0}" already exists'.format(filepath))
logmsg.success('Generated {0} HTML files'.format(num_files))
def get_parser(self):
"""Return the parsed command line arguments."""
parser = argparse.ArgumentParser(
description='Generate HTML for banners..')
parser.add_argument('type', choices=['doubleclick', 'sizemek', 'adwords', 'dcm'], help='Ad type')
parser.add_argument('-l', '--log', help='Enable logging',
action='store_true')
return parser.parse_args()
def run(self):
"""Run script."""
config = self.get_config()
args = self.get_parser()
if args.log:
self.create_logger()
self.logger.debug('-' * 10)
self.type = args.type
self.input_dir = config.get('html5', 'input')
self.ignore_list = self.create_list(config.get('html5', 'exclude_list'))
# Check if the input dir exists
if not os.path.isdir(self.input_dir):
logmsg.error('"{0}" does not exist'.format(self.input_dir))
sys.exit()
# Do the stuff we came here to do
dirs = self.find_ad_dirs()
self.generate_html(dirs)
logmsg.success('HTML Generated')
# -----------------------------------------------------------------------------
def main():
"""Main script."""
script = Main()
script.run()
# -----------------------------------------------------------------------------
if __name__ == "__main__":
main()
|
tsantor/banner-ad-toolkit
|
adkit/generate_html.py
|
Python
|
mit
| 5,427
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-09-08 19:14
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('invoice', '0007_profile_invoice_logo'),
]
operations = [
migrations.AddField(
model_name='invoiceitem',
name='quantity',
field=models.DecimalField(decimal_places=1, default=1, max_digits=5),
preserve_default=False,
),
migrations.AddField(
model_name='invoiceitem',
name='total',
field=models.DecimalField(blank=True, decimal_places=2, default=0, max_digits=10),
preserve_default=False,
),
]
|
pickleshb/PyInvoice
|
invoice/migrations/0008_auto_20170908_1914.py
|
Python
|
mit
| 760
|
#!/usr/bin/env python
import socket, struct
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(("127.0.0.1", 9123))
try:
while True:
line = raw_input( ">" )
parts = line.split()
if parts[0] == "SET":
if parts[1] == "A":
value = int( parts[2] )
s.send( struct.pack( "<BBBBB", 0, value, 0, 0, 0 ) )
if parts[1] == "B":
value = int( parts[2] )
s.send( struct.pack( "<BBBBB", 1, value, 0, 0, 0 ) )
if parts[0] == "GET":
if parts[1] == "A":
s.send( struct.pack( "<BBBBB", 2, 0, 0, 0, 0 ) )
data = s.recv( 5 )
print struct.unpack( "<BBBBB", data )
if parts[1] == "B":
s.send( struct.pack( "<BBBBB", 3, 0, 0, 0, 0 ) )
data = s.recv( 5 )
print struct.unpack( "<BBBBB", data )
except KeyboardInterrupt:
pass
s.close()
|
mgronhol/tamandua-framework
|
scripts/emulator-cli.py
|
Python
|
mit
| 798
|
# CS4243: Computer Vision and Pattern Recognition
# Zhou Bin
# 29th, Oct, 2014
import numpy as np
from Vertex import Vertex
class Polygon:
def __init__(self, newVertexList, newTexelList):
# Create list to store all vertex
self.Vertex = []
for i in newVertexList:
self.Vertex.append(i)
# Create list to store all texel value
self.Texel = []
for i in newTexelList:
self.Texel.append(i)
|
WuPei/cv_reconstructor
|
Polygon.py
|
Python
|
mit
| 486
|
import re
import json
import urlparse
from holster.enum import Enum
from unidecode import unidecode
from disco.types.base import cached_property
from disco.types.channel import ChannelType
from disco.util.sanitize import S
from disco.api.http import APIException
from rowboat.redis import rdb
from rowboat.util.stats import timed
from rowboat.util.zalgo import ZALGO_RE
from rowboat.plugins import RowboatPlugin as Plugin
from rowboat.types import SlottedModel, Field, ListField, DictField, ChannelField, snowflake, lower
from rowboat.types.plugin import PluginConfig
from rowboat.models.message import Message
from rowboat.plugins.modlog import Actions
from rowboat.constants import INVITE_LINK_RE, URL_RE
CensorReason = Enum(
'INVITE',
'DOMAIN',
'WORD',
'ZALGO',
)
class CensorSubConfig(SlottedModel):
filter_zalgo = Field(bool, default=True)
filter_invites = Field(bool, default=True)
invites_guild_whitelist = ListField(snowflake, default=[])
invites_whitelist = ListField(lower, default=[])
invites_blacklist = ListField(lower, default=[])
filter_domains = Field(bool, default=True)
domains_whitelist = ListField(lower, default=[])
domains_blacklist = ListField(lower, default=[])
blocked_words = ListField(lower, default=[])
blocked_tokens = ListField(lower, default=[])
unidecode_tokens = Field(bool, default=False)
channel = Field(snowflake, default=None)
bypass_channel = Field(snowflake, default=None)
@cached_property
def blocked_re(self):
return re.compile(u'({})'.format(u'|'.join(
map(re.escape, self.blocked_tokens) +
map(lambda k: u'\\b{}\\b'.format(re.escape(k)), self.blocked_words)
)), re.I + re.U)
class CensorConfig(PluginConfig):
levels = DictField(int, CensorSubConfig)
channels = DictField(ChannelField, CensorSubConfig)
# It's bad kids!
class Censorship(Exception):
def __init__(self, reason, event, ctx):
self.reason = reason
self.event = event
self.ctx = ctx
self.content = S(event.content, escape_codeblocks=True)
@property
def details(self):
if self.reason is CensorReason.INVITE:
if self.ctx['guild']:
return u'invite `{}` to {}'.format(
self.ctx['invite'],
S(self.ctx['guild']['name'], escape_codeblocks=True)
)
else:
return u'invite `{}`'.format(self.ctx['invite'])
elif self.reason is CensorReason.DOMAIN:
if self.ctx['hit'] == 'whitelist':
return u'domain `{}` is not in whitelist'.format(S(self.ctx['domain'], escape_codeblocks=True))
else:
return u'domain `{}` is in blacklist'.format(S(self.ctx['domain'], escape_codeblocks=True))
elif self.reason is CensorReason.WORD:
return u'found blacklisted words `{}`'.format(
u', '.join([S(i, escape_codeblocks=True) for i in self.ctx['words']]))
elif self.reason is CensorReason.ZALGO:
return u'found zalgo at position `{}` in text'.format(
self.ctx['position']
)
@Plugin.with_config(CensorConfig)
class CensorPlugin(Plugin):
def compute_relevant_configs(self, event, author):
if event.channel_id in event.config.channels:
yield event.config.channels[event.channel.id]
if event.config.levels:
user_level = int(self.bot.plugins.get('CorePlugin').get_level(event.guild, author))
for level, config in event.config.levels.items():
if user_level <= level:
yield config
def get_invite_info(self, code):
if rdb.exists('inv:{}'.format(code)):
return json.loads(rdb.get('inv:{}'.format(code)))
try:
obj = self.client.api.invites_get(code)
except:
return
if obj.channel and obj.channel.type == ChannelType.GROUP_DM:
obj = {
'id': obj.channel.id,
'name': obj.channel.name
}
else:
obj = {
'id': obj.guild.id,
'name': obj.guild.name,
'icon': obj.guild.icon
}
# Cache for 12 hours
rdb.setex('inv:{}'.format(code), json.dumps(obj), 43200)
return obj
@Plugin.listen('MessageUpdate')
def on_message_update(self, event):
try:
msg = Message.get(id=event.id)
except Message.DoesNotExist:
self.log.warning('Not censoring MessageUpdate for id %s, %s, no stored message', event.channel_id, event.id)
return
if not event.content:
return
return self.on_message_create(
event,
author=event.guild.get_member(msg.author_id))
@Plugin.listen('MessageCreate')
def on_message_create(self, event, author=None):
author = author or event.author
if author.id == self.state.me.id:
return
if event.webhook_id:
return
configs = list(self.compute_relevant_configs(event, author))
if not configs:
return
tags = {'guild_id': event.guild.id, 'channel_id': event.channel.id}
with timed('rowboat.plugin.censor.duration', tags=tags):
try:
# TODO: perhaps imap here? how to raise exception then?
for config in configs:
if config.channel:
if event.channel_id != config.channel:
continue
if config.bypass_channel:
if event.channel_id == config.bypass_channel:
continue
if config.filter_zalgo:
self.filter_zalgo(event, config)
if config.filter_invites:
self.filter_invites(event, config)
if config.filter_domains:
self.filter_domains(event, config)
if config.blocked_words or config.blocked_tokens:
self.filter_blocked_words(event, config)
except Censorship as c:
self.call(
'ModLogPlugin.create_debounce',
event,
['MessageDelete'],
message_id=event.message.id,
)
try:
event.delete()
self.call(
'ModLogPlugin.log_action_ext',
Actions.CENSORED,
event.guild.id,
e=event,
c=c)
except APIException:
self.log.exception('Failed to delete censored message: ')
def filter_zalgo(self, event, config):
s = ZALGO_RE.search(event.content)
if s:
raise Censorship(CensorReason.ZALGO, event, ctx={
'position': s.start()
})
def filter_invites(self, event, config):
invites = INVITE_LINK_RE.findall(event.content)
for _, invite in invites:
invite_info = self.get_invite_info(invite)
need_whitelist = (
config.invites_guild_whitelist or
(config.invites_whitelist or not config.invites_blacklist)
)
whitelisted = False
if invite_info and invite_info.get('id') in config.invites_guild_whitelist:
whitelisted = True
if invite.lower() in config.invites_whitelist:
whitelisted = True
if need_whitelist and not whitelisted:
raise Censorship(CensorReason.INVITE, event, ctx={
'hit': 'whitelist',
'invite': invite,
'guild': invite_info,
})
elif config.invites_blacklist and invite.lower() in config.invites_blacklist:
raise Censorship(CensorReason.INVITE, event, ctx={
'hit': 'blacklist',
'invite': invite,
'guild': invite_info,
})
def filter_domains(self, event, config):
urls = URL_RE.findall(INVITE_LINK_RE.sub('', event.content))
for url in urls:
try:
parsed = urlparse.urlparse(url)
except:
continue
if (config.domains_whitelist or not config.domains_blacklist)\
and parsed.netloc.lower() not in config.domains_whitelist:
raise Censorship(CensorReason.DOMAIN, event, ctx={
'hit': 'whitelist',
'url': url,
'domain': parsed.netloc,
})
elif config.domains_blacklist and parsed.netloc.lower() in config.domains_blacklist:
raise Censorship(CensorReason.DOMAIN, event, ctx={
'hit': 'blacklist',
'url': url,
'domain': parsed.netloc
})
def filter_blocked_words(self, event, config):
content = event.content
if config.unidecode_tokens:
content = unidecode(content)
blocked_words = config.blocked_re.findall(content)
if blocked_words:
raise Censorship(CensorReason.WORD, event, ctx={
'words': blocked_words,
})
|
ThaTiemsz/jetski
|
rowboat/plugins/censor.py
|
Python
|
mit
| 9,546
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-03-29 17:03
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('orders', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='UserAddress',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('type', models.CharField(choices=[('billing', 'Billing'), ('shipping', 'Shipping')], max_length=120)),
('street', models.CharField(max_length=120)),
('city', models.CharField(max_length=120)),
('state', models.CharField(max_length=120)),
('zipcode', models.CharField(max_length=120)),
('user_checkout', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='orders.UserCheckout')),
],
),
]
|
loafbaker/django_ecommerce2
|
orders/migrations/0002_useraddress.py
|
Python
|
mit
| 1,035
|
import numpy
import six
from chainer.dataset import dataset_mixin
class SubDataset(dataset_mixin.DatasetMixin):
"""Subset of a base dataset.
SubDataset defines a subset of a given base dataset. The subset is defined
as an interval of indexes, optionally with a given permutation.
If ``order`` is given, then the ``i``-th example of this dataset is the
``order[start + i]``-th example of the base dataset, where ``i`` is a
non-negative integer. If ``order`` is not given, then the ``i``-th example
of this dataset is the ``start + i``-th example of the base dataset.
Negative indexing is also allowed: in this case, the term ``start + i`` is
replaced by ``finish + i``.
SubDataset is often used to split a dataset into training and validation
subsets. The training set is used for training, while the validation set is
used to track the generalization performance, i.e. how the learned model
works well on unseen data. We can tune hyperparameters (e.g. number of
hidden units, weight initializers, learning rate, etc.) by comparing the
validation performance. Note that we often use another set called test set
to measure the quality of the tuned hyperparameter, which can be made by
nesting multiple SubDatasets.
There are two ways to make training-validation splits. One is a single
split, where the dataset is split just into two subsets. It can be done by
:func:`split_dataset` or :func:`split_dataset_random`. The other one is a
:math:`k`-fold cross validation, in which the dataset is divided into
:math:`k` subsets, and :math:`k` different splits are generated using each
of the :math:`k` subsets as a validation set and the rest as a training
set. It can be done by :func:`get_cross_validation_datasets`.
Args:
dataset: Base dataset.
start (int): The first index in the interval.
finish (int): The next-to-the-last index in the interval.
order (sequence of ints): Permutation of indexes in the base dataset.
If this is ``None``, then the ascending order of indexes is used.
"""
def __init__(self, dataset, start, finish, order=None):
if start < 0 or finish > len(dataset):
raise ValueError('subset overruns the base dataset.')
self._dataset = dataset
self._start = start
self._finish = finish
self._size = finish - start
if order is not None and len(order) != len(dataset):
msg = ('order option must have the same length as the base '
'dataset: len(order) = {} while len(dataset) = {}'.format(
len(order), len(dataset)))
raise ValueError(msg)
self._order = order
def __len__(self):
return self._size
def get_example(self, i):
if i >= 0:
if i >= self._size:
raise IndexError('dataset index out of range')
index = self._start + i
else:
if i < -self._size:
raise IndexError('dataset index out of range')
index = self._finish + i
if self._order is not None:
index = self._order[index]
return self._dataset[index]
def split_dataset(dataset, split_at, order=None):
"""Splits a dataset into two subsets.
This function creates two instances of :class:`SubDataset`. These instances
do not share any examples, and they together cover all examples of the
original dataset.
Args:
dataset: Dataset to split.
split_at (int): Position at which the base dataset is split.
order (sequence of ints): Permutation of indexes in the base dataset.
See the document of :class:`SubDataset` for details.
Returns:
tuple: Two :class:`SubDataset` objects. The first subset represents the
examples of indexes ``order[:split_at]`` while the second subset
represents the examples of indexes ``order[split_at:]``.
"""
n_examples = len(dataset)
if split_at < 0:
raise ValueError('split_at must be non-negative')
if split_at >= n_examples:
raise ValueError('split_at exceeds the dataset size')
subset1 = SubDataset(dataset, 0, split_at, order)
subset2 = SubDataset(dataset, split_at, n_examples, order)
return subset1, subset2
def split_dataset_random(dataset, first_size):
"""Splits a dataset into two subsets randomly.
This function creates two instances of :class:`SubDataset`. These instances
do not share any examples, and they together cover all examples of the
original dataset. The split is automatically done randomly.
Args:
dataset: Dataset to split.
first_size (int): Size of the first subset.
Returns:
tuple: Two :class:`SubDataset` objects. The first subset contains
``first_size`` examples randomly chosen from the dataset without
replacement, and the second subset contains the rest of the
dataset.
"""
order = numpy.random.permutation(len(dataset))
return split_dataset(dataset, first_size, order)
def get_cross_validation_datasets(dataset, n_fold, order=None):
"""Creates a set of training/test splits for cross validation.
This function generates ``n_fold`` splits of the given dataset. The first
part of each split corresponds to the training dataset, while the second
part to the test dataset. No pairs of test datasets share any examples, and
all test datasets together cover the whole base dataset. Each test dataset
contains almost same number of examples (the numbers may differ up to 1).
Args:
dataset: Dataset to split.
n_fold (int): Number of splits for cross validation.
order (sequence of ints): Order of indexes with which each split is
determined. If it is ``None``, then no permutation is used.
Returns:
list of tuples: List of dataset splits.
"""
if order is None:
order = numpy.arange(len(dataset))
else:
order = numpy.array(order) # copy
whole_size = len(dataset)
borders = [whole_size * i // n_fold for i in six.moves.range(n_fold + 1)]
test_sizes = [borders[i + 1] - borders[i] for i in six.moves.range(n_fold)]
splits = []
for test_size in reversed(test_sizes):
size = whole_size - test_size
splits.append(split_dataset(dataset, size, order))
new_order = numpy.empty_like(order)
new_order[:test_size] = order[-test_size:]
new_order[test_size:] = order[:-test_size]
order = new_order
return splits
def get_cross_validation_datasets_random(dataset, n_fold):
"""Creates a set of training/test splits for cross validation randomly.
This function acts almost same as :func:`get_cross_validation_dataset`,
except automatically generating random permutation.
Args:
dataset: Dataset to split.
n_fold (int): Number of splits for cross validation.
Returns:
list of tuples: List of dataset splits.
"""
order = numpy.random.permutation(len(dataset))
return get_cross_validation_datasets(dataset, n_fold, order)
|
kikusu/chainer
|
chainer/datasets/sub_dataset.py
|
Python
|
mit
| 7,241
|
import json
import logging
import webapp2
from datetime import datetime
from google.appengine.ext import ndb
from controllers.api.api_base_controller import ApiBaseController
from database.event_query import EventListQuery
from helpers.award_helper import AwardHelper
from helpers.district_helper import DistrictHelper
from helpers.event_insights_helper import EventInsightsHelper
from helpers.model_to_dict import ModelToDict
from models.event import Event
class ApiEventController(ApiBaseController):
CACHE_KEY_FORMAT = "apiv2_event_controller_{}" # (event_key)
CACHE_VERSION = 4
CACHE_HEADER_LENGTH = 60 * 60
def __init__(self, *args, **kw):
super(ApiEventController, self).__init__(*args, **kw)
self.event_key = self.request.route_kwargs["event_key"]
self._partial_cache_key = self.CACHE_KEY_FORMAT.format(self.event_key)
@property
def _validators(self):
return [("event_id_validator", self.event_key)]
def _set_event(self, event_key):
self.event = Event.get_by_id(event_key)
if self.event is None:
self._errors = json.dumps({"404": "%s event not found" % self.event_key})
self.abort(404)
def _track_call(self, event_key):
self._track_call_defer('event', event_key)
def _render(self, event_key):
self._set_event(event_key)
event_dict = ModelToDict.eventConverter(self.event)
return json.dumps(event_dict, ensure_ascii=True)
class ApiEventTeamsController(ApiEventController):
CACHE_KEY_FORMAT = "apiv2_event_teams_controller_{}" # (event_key)
CACHE_VERSION = 3
CACHE_HEADER_LENGTH = 60 * 60 * 24
def __init__(self, *args, **kw):
super(ApiEventTeamsController, self).__init__(*args, **kw)
self._partial_cache_key = self.CACHE_KEY_FORMAT.format(self.event_key)
def _track_call(self, event_key):
self._track_call_defer('event/teams', event_key)
def _render(self, event_key):
self._set_event(event_key)
teams = filter(None, self.event.teams)
team_dicts = [ModelToDict.teamConverter(team) for team in teams]
return json.dumps(team_dicts, ensure_ascii=True)
class ApiEventMatchesController(ApiEventController):
CACHE_KEY_FORMAT = "apiv2_event_matches_controller_{}" # (event_key)
CACHE_VERSION = 2
CACHE_HEADER_LENGTH = 61
def __init__(self, *args, **kw):
super(ApiEventMatchesController, self).__init__(*args, **kw)
self._partial_cache_key = self.CACHE_KEY_FORMAT.format(self.event_key)
def _track_call(self, event_key):
self._track_call_defer('event/matches', event_key)
def _render(self, event_key):
self._set_event(event_key)
matches = self.event.matches
match_dicts = [ModelToDict.matchConverter(match) for match in matches]
return json.dumps(match_dicts, ensure_ascii=True)
class ApiEventStatsController(ApiEventController):
CACHE_KEY_FORMAT = "apiv2_event_stats_controller_{}" # (event_key)
CACHE_VERSION = 5
CACHE_HEADER_LENGTH = 61
def __init__(self, *args, **kw):
super(ApiEventStatsController, self).__init__(*args, **kw)
self._partial_cache_key = self.CACHE_KEY_FORMAT.format(self.event_key)
def _track_call(self, event_key):
self._track_call_defer('event/stats', event_key)
def _render(self, event_key):
self._set_event(event_key)
stats = {}
matchstats = self.event.matchstats
if matchstats:
stats.update(matchstats)
year_specific = EventInsightsHelper.calculate_event_insights(self.event.matches, self.event.year)
if year_specific:
stats['year_specific'] = year_specific
return json.dumps(stats)
class ApiEventRankingsController(ApiEventController):
CACHE_KEY_FORMAT = "apiv2_event_rankings_controller_{}" # (event_key)
CACHE_VERSION = 1
CACHE_HEADER_LENGTH = 61
def __init__(self, *args, **kw):
super(ApiEventRankingsController, self).__init__(*args, **kw)
self._partial_cache_key = self.CACHE_KEY_FORMAT.format(self.event_key)
def _track_call(self, event_key):
self._track_call_defer('event/rankings', event_key)
def _render(self, event_key):
self._set_event(event_key)
ranks = json.dumps(Event.get_by_id(event_key).rankings)
if ranks is None or ranks == 'null':
return '[]'
else:
return ranks
class ApiEventAwardsController(ApiEventController):
CACHE_KEY_FORMAT = "apiv2_event_awards_controller_{}" # (event_key)
CACHE_VERSION = 4
CACHE_HEADER_LENGTH = 60 * 60
def __init__(self, *args, **kw):
super(ApiEventAwardsController, self).__init__(*args, **kw)
self._partial_cache_key = self.CACHE_KEY_FORMAT.format(self.event_key)
def _track_call(self, event_key):
self._track_call_defer('event/awards', event_key)
def _render(self,event_key):
self._set_event(event_key)
award_dicts = [ModelToDict.awardConverter(award) for award in AwardHelper.organizeAwards(self.event.awards)]
return json.dumps(award_dicts, ensure_ascii=True)
class ApiEventDistrictPointsController(ApiEventController):
CACHE_KEY_FORMAT = "apiv2_event_district_points_controller_{}" # (event_key)
CACHE_VERSION = 1
CACHE_HEADER_LENGTH = 61
def __init__(self, *args, **kw):
super(ApiEventDistrictPointsController, self).__init__(*args, **kw)
self.partial_cache_key = self.CACHE_KEY_FORMAT.format(self.event_key)
def _track_call(self, event_key):
self._track_call_defer('event/district_points', event_key)
def _render(self, event_key):
self._set_event(event_key)
points = DistrictHelper.calculate_event_points(self.event)
return json.dumps(points, ensure_ascii=True)
class ApiEventListController(ApiBaseController):
CACHE_KEY_FORMAT = "apiv2_event_list_controller_{}" # (year)
CACHE_VERSION = 2
CACHE_HEADER_LENGTH = 60 * 60 * 24
def __init__(self, *args, **kw):
super(ApiEventListController, self).__init__(*args, **kw)
self.year = int(self.request.route_kwargs.get("year") or datetime.now().year)
self._partial_cache_key = self.CACHE_KEY_FORMAT.format(self.year)
@property
def _validators(self):
return []
def _track_call(self, *args, **kw):
self._track_call_defer('event/list', self.year)
def _render(self, year=None):
if self.year < 1992 or self.year > datetime.now().year + 1:
self._errors = json.dumps({"404": "No events found for %s" % self.year})
self.abort(404)
events = EventListQuery(self.year).fetch()
event_list = [ModelToDict.eventConverter(event) for event in events]
return json.dumps(event_list, ensure_ascii=True)
|
synth3tk/the-blue-alliance
|
controllers/api/api_event_controller.py
|
Python
|
mit
| 6,883
|
from __future__ import unicode_literals
from django.utils.encoding import python_2_unicode_compatible
from django.conf import settings
from django.db import models
from django.core.urlresolvers import reverse
from django.db.models.signals import pre_save
from django.utils import timezone
from django.utils.text import slugify
class CareerManager(models.Manager):
def active(self, *args, **kwargs):
return super(CareerManager, self).filter(draft = False).filter(published_at__lte = timezone.now())
@python_2_unicode_compatible
class Career(models.Model):
FULLTIME = 'Full-time'
PARTTIME = 'Part-time'
INTERNSHIP = 'Internship'
RESEARCH = 'Research'
ROLE_CATEGORY_CHOICES = (
(FULLTIME, 'Full-time'),
(PARTTIME, 'Part-time'),
(INTERNSHIP, 'Internship'),
(RESEARCH, 'Research'),
)
role_category = models.CharField(
max_length=12,
choices=ROLE_CATEGORY_CHOICES,
default=FULLTIME,
)
# Role
role = models.CharField(max_length = 120)
# Location
city = models.CharField(max_length=255)
# Plain text and urlify slug
career_slug = models.SlugField(unique = True)
career_offer_title = models.CharField(max_length=255, default="")
career_offer_description = models.TextField(default="")
career_experience = models.TextField(default="")
career_terms = models.TextField(default="")
# Time and meta staff
draft = models.BooleanField(default = False)
published_at = models.DateField(auto_now = False, auto_now_add = False)
updated = models.DateTimeField(auto_now = True, auto_now_add = False)
timestamp = models.DateTimeField(auto_now = False, auto_now_add = True)
objects = CareerManager()
def __unicode__(self):
return self.role
def __str__(self):
return self.role
def get_absolute_url(self):
return reverse('careers:detail', kwargs = {'slug':self.career_slug})
class Meta:
ordering = ["-timestamp", "-updated"]
def create_slug(instance, new_slug = None):
career_slug = slugify(instance.title)
if new_slug is not None:
career_slug = new_slug
qs = Career.objects.filter(career_slug = career_slug).order_by("-id")
exists = qs.exists()
if exists:
new_slug = "%s-%s" %(career_slug, qs.first().id)
return create_slug(instance, slug = new_slug)
return career_slug
def pre_save_post_receiver(sender, instance, *args, **kwargs):
if not instance.career_slug:
instance.career_slug = create_slug(instance)
pre_save.connect(pre_save_post_receiver, sender = Career)
|
neldom/qessera
|
careers/models.py
|
Python
|
mit
| 2,634
|
import re
import os.path
import datetime
import base64
import aql
# ==============================================================================
info = aql.get_aql_info()
HEADER = """#!/usr/bin/env python
#
# THIS FILE WAS AUTO-GENERATED. DO NOT EDIT!
#
# Copyright (c) 2011-{year} of the {name} project, site: {url}
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom
# the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
# OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
""".format(year=datetime.date.today().year,
name=info.name, url=info.url)
# ==============================================================================
AQL_DATE = '_AQL_VERSION_INFO.date = "{date}"'.format(
date=datetime.date.today().isoformat())
# ==============================================================================
MAIN = """
if __name__ == '__main__':
aql_module_globals = globals().copy()
aql_module_name = "aql"
aql_module = imp.new_module(aql_module_name)
aql_module_globals.update( aql_module.__dict__)
aql_module.__dict__.update(aql_module_globals)
sys.modules[aql_module_name] = aql_module
{embedded_tools}
sys.exit(main())
"""
# ==============================================================================
EMBEDDED_TOOLS = '\n _EMBEDDED_TOOLS.append(b"""\n%s""")\n'
# ==============================================================================
class AqlPreprocess (aql.FileBuilder):
split = aql.FileBuilder.split_single
# ----------------------------------------------------------
def get_trace_name(self, source_entities, brief):
return "Preprocess file"
# ----------------------------------------------------------
def get_trace_targets(self, target_entities, brief):
return None
# -----------------------------------------------------------
def build(self, source_entities, targets):
src_file = source_entities[0].get()
empty_re = re.compile(r'^\s*\r*\n', re.MULTILINE)
slash_re = re.compile(r'\\\r*\n', re.MULTILINE)
comments_re = re.compile(r"^\s*#.*$", re.MULTILINE)
all_stmt_re = re.compile(
r"^__all__\s*=\s*\(.+?\)", re.MULTILINE | re.DOTALL)
content = aql.read_text_file(src_file)
content = slash_re.sub("", content)
content = comments_re.sub("", content)
content = all_stmt_re.sub("", content)
# -----------------------------------------------------------
import_re = re.compile(r"^import\s+(.+)$", re.MULTILINE)
std_imports = set()
def import_handler(match, _std_imports=std_imports):
module_name = match.group(1)
_std_imports.add(module_name)
return ""
content = import_re.sub(import_handler, content)
# -----------------------------------------------------------
aql_import_re = re.compile(r"^\s*from\s+(\.?aql.+)\s+import\s+.+$",
re.MULTILINE)
aql_imports = set()
def aql_import_handler(match, _aql_imports=aql_imports):
module_name = match.group(1)
if module_name.startswith('.'):
module_name = os.sep + module_name[1:] + '.py'
else:
module_name = os.sep + \
module_name.replace('.', os.sep) + os.sep
_aql_imports.add(module_name)
return ""
content = aql_import_re.sub(aql_import_handler, content)
# -----------------------------------------------------------
content = empty_re.sub("", content)
target = aql.SimpleEntity(name=src_file,
data=(std_imports, aql_imports, content))
targets.add_target_entity(target)
# ==============================================================================
class AqlLinkCore (aql.FileBuilder):
def __init__(self, options, target):
self.target = self.get_target_path(target, ext='.py')
def get_trace_name(self, source_entities, brief):
return "Link AQL Module"
# ----------------------------------------------------------
def get_target_entities(self, source_entities):
return self.target
# ----------------------------------------------------------
def get_trace_sources(self, source_entities, brief):
return (os.path.basename(src.name) for src in source_entities)
# -----------------------------------------------------------
def replace(self, options, source_entities):
finder = aql.FindFilesBuilder(options,
mask='*.py',
exclude_mask="__init__.py")
core_files = aql.Node(finder, source_entities)
return aql.Node(AqlPreprocess(options), core_files)
# -----------------------------------------------------------
@staticmethod
def _mod_to_files(file2deps, modules):
mod2files = {}
for mod in modules:
files = set()
for file in file2deps:
if file.find(mod) != -1:
files.add(file)
mod2files[mod] = files
return mod2files
# -----------------------------------------------------------
@staticmethod
def _get_dep_to_files(file2deps, mod2files):
dep2files = {}
tmp_file2deps = {}
for file, mods in file2deps.items():
for mod in mods:
files = mod2files[mod]
tmp_file2deps.setdefault(file, set()).update(files)
for f in files:
dep2files.setdefault(f, set()).add(file)
return dep2files, tmp_file2deps
# -----------------------------------------------------------
@staticmethod
def _get_content(files_content, dep2files, file2deps, tails):
content = ""
while tails:
tail = tails.pop(0)
content += files_content[tail]
files = dep2files.pop(tail, [])
for file in files:
deps = file2deps[file]
deps.remove(tail)
if not deps:
tails.append(file)
del file2deps[file]
return content
# -----------------------------------------------------------
def build(self, source_entities, targets):
file2deps = {}
files_content = {}
modules = set()
tails = []
std_modules = set()
for entity in source_entities:
file_name = entity.name
mod_std_imports, mod_deps, mod_content = entity.data
if not mod_content:
continue
if not mod_deps:
tails.append(file_name)
files_content[file_name] = mod_content
file2deps[file_name] = mod_deps
std_modules.update(mod_std_imports)
modules.update(mod_deps)
mod2files = self._mod_to_files(file2deps, modules)
dep2files, file2deps = self._get_dep_to_files(file2deps, mod2files)
content = self._get_content(files_content, dep2files, file2deps, tails)
imports_content = '\n'.join(
"import %s" % module for module in sorted(std_modules))
content = '\n'.join([HEADER, imports_content, content, AQL_DATE])
aql.write_text_file(self.target, data=content)
targets.add_target_files(self.target)
# ==============================================================================
class AqlPackTools (aql.FileBuilder):
NAME_ATTRS = ['target']
def __init__(self, options, target):
self.target = target
self.build_target = self.get_target_path(target, ext='.b64')
# ----------------------------------------------------------
def get_trace_name(self, source_entities, brief):
return "Pack Tools"
# ----------------------------------------------------------
def get_target_entities(self, source_values):
return self.build_target
# ----------------------------------------------------------
def replace(self, options, source_entities):
tools_path = [source.get() for source in source_entities]
if not tools_path:
return None
finder = aql.FindFilesBuilder(options, '*.py')
zipper = aql.ZipFilesBuilder(options,
target=self.target,
basedir=tools_path)
tool_files = aql.Node(finder, source_entities)
zip = aql.Node(zipper, tool_files)
return zip
# -----------------------------------------------------------
def build(self, source_entities, targets):
target = self.build_target
with aql.open_file(target, write=True,
binary=True, truncate=True) as output:
for source in source_entities:
zip_file = source.get()
with aql.open_file(zip_file, read=True, binary=True) as input:
base64.encode(input, output)
targets.add_target_files(target, tags="embedded_tools")
# ==============================================================================
class AqlLinkStandalone (aql.FileBuilder):
def __init__(self, options, target):
self.target = self.get_target_path(target)
# -----------------------------------------------------------
def get_trace_name(self, source_entities, brief):
return "Link AQL standalone script"
# ----------------------------------------------------------
def get_target_entities(self, source_values):
return self.target
# ----------------------------------------------------------
def build(self, source_entities, targets):
content = []
embedded_tools = ""
for source in source_entities:
data = aql.read_text_file(source.get())
if not data:
continue
if "embedded_tools" in source.tags:
embedded_tools = EMBEDDED_TOOLS % data
else:
content.append(data)
content.append(MAIN.format(embedded_tools=embedded_tools))
content = '\n'.join(content)
aql.write_text_file(self.target, content)
targets.add_target_files(self.target)
# ==============================================================================
class AqlBuildTool(aql.Tool):
def pack_tools(self, options, target):
return AqlPackTools(options, target)
def link_module(self, options, target):
return AqlLinkCore(options, target)
def link_standalone(self, options, target):
return AqlLinkStandalone(options, target)
PackTools = pack_tools
LinkModule = link_module
LinkStandalone = link_standalone
|
aqualid/aqualid
|
make/aql_linker.py
|
Python
|
mit
| 11,736
|
from birdseye.server import main
if __name__ == '__main__':
main()
|
alexmojaki/birdseye
|
birdseye/__main__.py
|
Python
|
mit
| 72
|
from ..GenericInstrument import GenericInstrument
from .helper import SignalGenerator, amplitudelimiter
class Wiltron360SS69(GenericInstrument, SignalGenerator):
"""Wiltron 360SS69 10e6, 40e9.
.. figure:: images/SignalGenerator/Wiltron360SS69.jpg
"""
def __init__(self, instrument):
"""."""
super().__init__(instrument)
# self.log = logging.getLogger(__name__)
# self.log.info('Creating an instance of\t' + str(__class__))
self.amps = [-140, 17]
self.freqs = [10e6, 40e9]
# self.siggen.write("*CLS") # clear error status
# self.frequency = min(self.freqs)
@property
def frequency(self):
"""."""
return(self.query("OF0"))
@frequency.setter
def frequency(self, frequency):
self.write(f"F0{frequency:.2f}GH")
@property
def amplitude(self):
"""."""
return(self.query("OL0"))
@amplitude.setter
@amplitudelimiter
def amplitude(self, amplitude):
self.write(f"L0{amplitude:.2f}DM")
'''@property
def output(self):
if self.query("OUTPut:STATe?") == "1":
return(True)
else:
return(False)
@output.setter
def output(self, boolean=False):
self.write("OUTPut:STATe {:d}".format(boolean))
'''
|
DavidLutton/EngineeringProject
|
labtoolkit/SignalGenerator/Wiltron360SS69.py
|
Python
|
mit
| 1,322
|
default_app_config = 'journeys.apps.JourneyConfig'
|
vishalsahu5/carpool
|
journeys/__init__.py
|
Python
|
mit
| 51
|
import lassie
from .base import LassieBaseTestCase
class LassieOpenGraphTestCase(LassieBaseTestCase):
def test_open_graph_all_properties(self):
url = 'http://lassie.it/open_graph/all_properties.html'
data = lassie.fetch(url)
self.assertEqual(data['url'], url)
self.assertEqual(data['title'], 'Lassie Open Graph All Properies Test')
self.assertEqual(data['description'], 'Just a test template with OG data!')
self.assertEqual(data['locale'], 'en_US')
self.assertEqual(data['site_name'], 'Lassie')
self.assertEqual(len(data['images']), 1)
image = data['images'][0]
self.assertEqual(image['src'], 'http://i.imgur.com/cvoR7zv.jpg')
self.assertEqual(image['width'], 550)
self.assertEqual(image['height'], 365)
self.assertEqual(image['type'], 'og:image')
self.assertEqual(len(data['videos']), 1)
video = data['videos'][0]
self.assertEqual(video['src'], 'http://www.youtube.com/v/dQw4w9WgXcQ?version=3&autohide=1')
self.assertEqual(video['width'], 640)
self.assertEqual(video['height'], 480)
self.assertEqual(video['type'], 'application/x-shockwave-flash')
def test_open_graph_no_og_title_no_og_url(self):
url = 'http://lassie.it/open_graph/no_og_title_no_og_url.html'
data = lassie.fetch(url)
self.assertEqual(data['url'], url)
self.assertEqual(data['title'], 'Lassie Open Graph Test | No og:title, No og:url')
def test_open_graph_og_image_plus_two_body_images(self):
url = 'http://lassie.it/open_graph/og_image_plus_two_body_images.html'
data = lassie.fetch(url)
# Try without passing "all_images", then pass it
self.assertEqual(len(data['images']), 1)
data = lassie.fetch(url, all_images=True)
self.assertEqual(len(data['images']), 3)
image_0 = data['images'][0]
image_1 = data['images'][1]
image_2 = data['images'][2]
self.assertEqual(image_0['type'], 'og:image')
self.assertEqual(image_1['type'], 'body_image')
self.assertEqual(image_2['type'], 'body_image')
def test_open_graph_og_image_relative_url(self):
url = 'http://lassie.it/open_graph/og_image_relative_url.html'
data = lassie.fetch(url)
self.assertEqual(
data['images'][0]['src'], 'http://lassie.it/open_graph/name.jpg')
|
michaelhelmick/lassie
|
tests/test_open_graph.py
|
Python
|
mit
| 2,426
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
with open('README.rst') as readme_file:
README = readme_file.read()
with open('HISTORY.rst') as history_file:
HISTORY = history_file.read()
REQUIREMENTS = [
'gitpython',
'requests',
'tqdm',
'requests_cache',
]
TEST_REQUIREMENTS = [
'pytest',
'mock',
]
setup(
name='packyou',
version='0.1.6',
description="Downloads or clones a python project from github and allows to import it from anywhere. Very useful when the repo is not a package",
long_description=README + '\n\n' + HISTORY,
author="Leonardo Lazzaro",
author_email='llazzaro@dc.uba.ar',
url='https://github.com/llazzaro/packyou',
packages=find_packages(),
package_dir={'packyou':
'packyou'},
include_package_data=True,
install_requires=REQUIREMENTS,
license="MIT license",
zip_safe=False,
keywords='packyou',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
test_suite='tests',
tests_require=TEST_REQUIREMENTS,
)
|
llazzaro/packyou
|
setup.py
|
Python
|
mit
| 1,573
|
from django.db import models
from .bleachfield import BleachField
class BleachCharField(BleachField, models.CharField):
def pre_save(self, model_instance, add):
new_value = getattr(model_instance, self.attname)
clean_value = self.clean_text(new_value)
setattr(model_instance, self.attname, clean_value)
return super(BleachCharField, self).pre_save(model_instance, add)
|
BetterWorks/django-bleachfields
|
bleachfields/bleachchar.py
|
Python
|
mit
| 409
|
import astropy.cosmology as co
aa=co.Planck15
import astropy.io.fits as fits
import matplotlib
import matplotlib
matplotlib.rcParams['agg.path.chunksize'] = 2000000
matplotlib.rcParams.update({'font.size': 12})
matplotlib.use('Agg')
import matplotlib.pyplot as p
import numpy as n
import os
import sys
# global cosmo quantities
z_min = float(sys.argv[1])
z_max = float(sys.argv[2])
#imf = 'kroupa'
lO2_min = float(sys.argv[3]) # 'salpeter'
SNlimit = 5
out_dir = os.path.join(os.environ['OBS_REPO'], 'spm', 'results')
#previous catalogs
ll_dir = os.path.join(os.environ['OBS_REPO'], 'spm', 'literature')
cosmos_dir = os.path.join(os.environ['OBS_REPO'], 'COSMOS', 'catalogs' )
path_2_cosmos_cat = os.path.join( cosmos_dir, "photoz-2.0", "photoz_vers2.0_010312.fits")
#path_2_cosmos_cat = os.path.join( cosmos_dir, "COSMOS2015_Laigle+_v1.1.fits.gz")
# FIREFLY CATALOGS
# SDSS data and catalogs
sdss_dir = os.path.join(os.environ['OBS_REPO'], 'SDSS', 'dr14')
path_2_spall_sdss_dr14_cat = os.path.join( sdss_dir, "specObj-SDSS-dr14.fits" )
path_2_spall_boss_dr14_cat = os.path.join( sdss_dir, "specObj-BOSS-dr14.fits" )
path_2_sdss_cat = os.path.join( sdss_dir, "FireflyGalaxySdss26.fits" )
path_2_eboss_cat = os.path.join( sdss_dir, "FireflyGalaxyEbossDR14.fits" )
# DEEP SURVEYS
deep2_dir = os.path.join(os.environ['OBS_REPO'], 'DEEP2')
path_2_deep2_cat = os.path.join( deep2_dir, "zcat.deep2.dr4.v4.LFcatalogTC.Planck13.spm.v2.fits" )
vipers_dir = os.path.join(os.environ['OBS_REPO'], 'VIPERS')
path_2_vipers_cat = os.path.join( vipers_dir, "VIPERS_W14_summary_v2.1.linesFitted.spm.fits" )
vvds_dir = os.path.join(os.environ['OBS_REPO'], 'VVDS')
path_2_vvdsW_cat = os.path.join( vvds_dir, "catalogs", "VVDS_WIDE_summary.v1.spm.fits" )
path_2_vvdsD_cat = os.path.join( vvds_dir, "catalogs", "VVDS_DEEP_summary.v1.spm.fits" )
# path_2_F16_cat = os.path.join( sdss_dir, "RA_DEC_z_w_fluxOII_Mstar_grcol_Mr_lumOII.dat" )
# OPENS THE CATALOGS
deep2 = fits.open(path_2_deep2_cat)[1].data
#vvdsD = fits.open(path_2_vvdsD_cat)[1].data
#vvdsW = fits.open(path_2_vvdsW_cat)[1].data
#vipers = fits.open(path_2_vipers_cat)[1].data
#sdss = fits.open(path_2_sdss_cat)[1].data
#boss = fits.open(path_2_eboss_cat)[1].data
cosmos = fits.open(path_2_cosmos_cat)[1].data
lineSelection = lambda catalog, lineName : (catalog[lineName+'_flux']>0.)& (catalog[lineName+'_fluxErr'] >0.) & (catalog[lineName+'_flux'] > SNlimit * catalog[lineName+'_fluxErr']) # & (catalog[lineName+'_luminosity']>0)& (catalog[lineName+'_luminosity']<1e50)
out_dir = os.path.join('/data42s/comparat/firefly/v1_1_0/figures')
smf_ilbert13 = lambda M, M_star, phi_1s, alpha_1s, phi_2s, alpha_2s : ( phi_1s * (M/M_star) ** alpha_1s + phi_2s * (M/M_star) ** alpha_2s ) * n.e ** (-M/M_star) * (M/ M_star)
path_ilbert13_SMF = os.path.join(ll_dir, "ilbert_2013_mass_function_params.txt")
zmin, zmax, N, M_comp, M_star, phi_1s, alpha_1s, phi_2s, alpha_2s, log_rho_s = n.loadtxt(os.path.join( ll_dir, "ilbert_2013_mass_function_params.txt"), unpack=True)
#smfs_ilbert13 = n.array([lambda mass : smf_ilbert13( mass , 10**M_star[ii], phi_1s[ii]*10**(-3), alpha_1s[ii], phi_2s[ii]*10**(-3), alpha_2s[ii] ) for ii in range(len(M_star)) ])
smf01 = lambda mass : smf_ilbert13( mass , 10**M_star[0], phi_1s[0]*10**(-3), alpha_1s[0], phi_2s[0]*10**(-3), alpha_2s[0] )
#print 10**M_star[0], phi_1s[0]*10**(-3), alpha_1s[0], phi_2s[0]*10**(-3), alpha_2s[0]
smf08 = lambda mass : smf_ilbert13( mass , 10**M_star[2], phi_1s[2]*10**(-3), alpha_1s[2], phi_2s[2]*10**(-3), alpha_2s[2] )
#print 10**M_star[2], phi_1s[2]*10**(-3), alpha_1s[2], phi_2s[2]*10**(-3), alpha_2s[2]
volume_per_deg2 = ( aa.comoving_volume(z_max) - aa.comoving_volume(z_min) ) * n.pi / 129600.
volume_per_deg2_val = volume_per_deg2.value
# global spm quantities
# stat functions
ld = lambda selection : len(selection.nonzero()[0])
# stats about DEEP2 run
area1=0.60
area2=0.62
area3=0.90
area4=0.66
if z_min>=0.7:
area_deep2 = area1+area2+area3+area4
else :
area_deep2 = 0.6
#area_vvdsD = 0.6
#area_vvdsW = 5.785
#area_vipers = 24.
#area_cosmos = 1.52
def get_basic_stat(catalog, z_name, z_flg, name, zflg_min, prefix):
catalog_zOk = (catalog[z_name] > z_min) & (catalog[z_flg]>=zflg_min)
catalog_stat = (catalog_zOk) & (catalog[z_name] > z_min) & (catalog[z_name] < z_max) & (catalog['SSR']>0) & (catalog['TSR']>0) & (catalog['SSR']<=1.0001) & (catalog['TSR']<=1.0001)
catalog_sel = (catalog_stat) & (catalog[prefix+'stellar_mass'] < 10**14. ) & (catalog[prefix+'stellar_mass'] >= 10**5. ) & (catalog[prefix+'stellar_mass'] <= catalog[prefix+'stellar_mass_up'] ) & (catalog[prefix+'stellar_mass'] >= catalog[prefix+'stellar_mass_low'] ) & (-n.log10(catalog[prefix+'stellar_mass_low']) + n.log10(catalog[prefix+'stellar_mass_up']) < 0.6 )
l_o2 = lineSelection(catalog, "O2_3728") & catalog_stat
l_o3 = lineSelection(catalog, "O3_5007") & catalog_stat
l_hb = lineSelection(catalog, "H1_4862") & catalog_stat
m_catalog = n.log10(catalog[prefix+'stellar_mass'])
w_catalog = 1. / (catalog['TSR'] * catalog['SSR'])
#print name, '& $',len(catalog), "$ & $", ld(catalog_zOk),"$ & $", ld(catalog_stat), "\\;(", ld(catalog_sel),")$ & $", ld(l_o2), "\\;(", ld(catalog_sel & l_o2),")$ & $", ld(l_o3), "\\;(", ld(catalog_sel & l_o3),")$ & $", ld(l_hb), "\\;(", ld(catalog_sel & l_hb),")$ \\\\"
return catalog_sel, m_catalog, w_catalog, l_o2, l_o3, l_hb
def get_hist(masses, weights, mbins):
NN = n.histogram(masses, mbins)[0]
NW = n.histogram(masses, mbins, weights = weights)[0]
xx = (mbins[1:] + mbins[:-1])/2.
return xx, NW, NN**(-0.5)*NW
def plotMF_raw(prefix="Chabrier_ELODIE_"):
deep2_sel, deep2_m, deep2_w, deep2_o2, deep2_o3, deep2_hb = get_basic_stat(deep2, 'ZBEST', 'ZQUALITY', 'DEEP2', 3., prefix)
#vvdsD_sel, vvdsD_m, vvdsD_w, vvdsD_o2, vvdsD_o3, vvdsD_hb = get_basic_stat(vvdsD, 'Z', 'ZFLAGS', 'VVDS Deep', 2., prefix)
#vvdsW_sel, vvdsW_m, vvdsW_w, vvdsW_o2, vvdsW_o3, vvdsW_hb = get_basic_stat(vvdsW, 'Z', 'ZFLAGS', 'VVDS Wide', 2., prefix)
#vipers_sel, vipers_m, vipers_w, vipers_o2, vipers_o3, vipers_hb = get_basic_stat(vipers, 'zspec', 'zflg', 'VIPERS', 1., prefix)
lbins = n.arange(40.5,44,0.25)
x_lum = (lbins[1:] + lbins[:-1])/2.
p.figure(1, (4.5,4.5))
p.axes([0.19,0.17,0.74,0.72])
N_O2_all = n.histogram(deep2['O2_3728_luminosity'][deep2_o2], bins = 10**lbins)[0]
N_O2_mass = n.histogram(deep2['O2_3728_luminosity'][deep2_sel & deep2_o2], bins = 10**lbins)[0]
N_O2_all_normed = n.histogram(n.log10(deep2['O2_3728_luminosity'][deep2_o2]), bins = lbins, normed = True)[0]
#print N_O2_all_normed
ok_o2 = (N_O2_all>0)
p.plot(x_lum, N_O2_all_normed/2., label = 'normed hist')
p.plot(x_lum[ok_o2], 1. * N_O2_mass[ok_o2] / N_O2_all[ok_o2], label = 'DEEP2')
p.axvline(lO2_min)
p.title(str(z_min)+'<z<'+str(z_max))
p.xlabel('[OII] luminosity')
p.ylabel('[OII] with mass measurement / all [OII] detections')
#p.yscale('log')
p.legend(loc=0, frameon = False)
p.ylim((-0.01, 1.01))
p.xlim((40.5, 43.5))
p.grid()
p.savefig(os.path.join(out_dir, "SMF_"+prefix+"line_detection_raw_"+"_"+str(z_min)+'_z_'+str(z_max)+".jpg" ))
p.clf()
dlog10m = 0.25
mbins = n.arange(8,12.5,dlog10m)
p.figure(1, (4.5,4.5))
p.axes([0.19,0.17,0.74,0.72])
p.plot(mbins, smf01(10**mbins), label='Ilbert 13, 0.2<z<0.5', ls='dashed')
p.plot(mbins, smf08(10**mbins), label='Ilbert 13, 0.8<z<1.1', ls='dashed')
x, y, ye = get_hist(deep2_m[deep2_sel], weights = deep2_w[deep2_sel]/(dlog10m*n.log(10)*area_deep2*volume_per_deg2_val), mbins = mbins)
p.errorbar(x, y, yerr = ye, label='DEEP2', lw=1)
x, y, ye = get_hist(deep2_m[deep2_sel & deep2_o2 & (deep2['O2_3728_luminosity']>10**lO2_min)], weights = deep2_w[deep2_sel & deep2_o2 & (deep2['O2_3728_luminosity']>10**lO2_min)]/(dlog10m*n.log(10)*area_deep2*volume_per_deg2_val), mbins = mbins)
p.errorbar(x, y, yerr = ye, label='DEEP2 L([OII])>'+str(lO2_min), lw=1)
#x, y, ye = get_hist(vvdsD_m, weights = vvdsD_w/(dlog10m*n.log(10)*area_vvdsD*volume_per_deg2_val), mbins = mbins)
#p.errorbar(x, y, yerr = ye, label='VVDSDEEP', lw=1)
#x, y, ye = get_hist(vipers_m, weights = vipers_w/(dlog10m*n.log(10)*area_vipers*volume_per_deg2_val), mbins = mbins)
#p.errorbar(x, y, yerr = ye, label='VIPERS', lw=0.5)
#x, y, ye = get_hist(vvdsW_m, weights = vvdsW_w/(dlog10m*n.log(10)*area_vvdsW*volume_per_deg2_val), mbins = mbins)
#p.errorbar(x, y, yerr = ye, label='VVDSWide', lw=0.5)
#cosmos_sel = (cosmos['flag_maskI']==0) &( cosmos['K'] < 24.) & ( cosmos['photoz'] > z_min) & (cosmos['photoz'] < z_max )
#cosmos_w = n.ones_like(cosmos['photoz'][cosmos_sel])
#p.hist(cosmos['mass_med'][cosmos_sel], weights = cosmos_w/(dlog10m*n.log(10)*area_cosmos*volume_per_deg2_val), bins = mbins, label='COSMOS K<24', histtype='step')
#cosmos_sel = (cosmos['flag_maskI']==0) & ( cosmos['R'] < 24.1) & ( cosmos['photoz'] > z_min) & (cosmos['photoz'] < z_max )
#cosmos_w = n.ones_like(cosmos['photoz'][cosmos_sel])
#p.hist(cosmos['mass_med'][cosmos_sel], weights = cosmos_w/(dlog10m*n.log(10)*area_cosmos*volume_per_deg2_val), bins = mbins, label='COSMOS R<24.1', histtype='step')
#for smfEq in smfs_ilbert13:
p.title(str(z_min)+'<z<'+str(z_max))
p.xlabel(r'$\log_{10}$ (stellar mass '+r" / $M_\odot$ )")
p.ylabel(r'$\Phi(M)$ [Mpc$^{-3}$ dex$^{-1}$]')
p.yscale('log')
p.legend(loc=0, frameon = False)
p.ylim((1e-8, 1e-2))
p.xlim((9.5, 12.5))
p.grid()
p.savefig(os.path.join(out_dir, "SMF_"+prefix+"SMF_"+prefix+"SMF_raw_"+"_"+str(z_min)+'_z_'+str(z_max)+".jpg" ))
p.clf()
p.figure(1, (4.5,4.5))
p.axes([0.19,0.17,0.74,0.72])
x, y, ye = get_hist(deep2_m[deep2_sel], weights = deep2_w[deep2_sel]/(dlog10m*n.log(10)*area_deep2*volume_per_deg2_val), mbins = mbins)
p.errorbar(x, y/smf08(10**x), yerr = ye/smf08(10**x), label='DEEP2', lw=1)
x, y, ye = get_hist(deep2_m[deep2_sel & deep2_o2 & (deep2['O2_3728_luminosity']>10**lO2_min)], weights = deep2_w[deep2_sel & deep2_o2 & (deep2['O2_3728_luminosity']>10**lO2_min)]/(dlog10m*n.log(10)*area_deep2*volume_per_deg2_val), mbins = mbins)
p.errorbar(x, y/smf08(10**x), yerr = ye/smf08(10**x), label='DEEP2 L([OII])>'+str(lO2_min), lw=1)
p.title(str(z_min)+'<z<'+str(z_max))
p.xlabel(r'$\log_{10}$ (stellar mass '+r" / $M_\odot$ )")
p.ylabel(r'$\Phi_{[OII]} / \Phi_{all}(M)$')
p.yscale('log')
p.legend(loc=0, frameon = False)
p.ylim((1e-4, 2.))
p.xlim((9.5, 12.5))
p.grid()
p.savefig(os.path.join(out_dir, "SMF_"+prefix+"ratio_SMF_"+"_"+str(z_min)+'_z_'+str(z_max)+".jpg" ))
p.clf()
def plotMF_raw_many(prefixs=["Chabrier_ELODIE_"]):
dlog10m = 0.2
mbins = n.arange(8,12.5,dlog10m)
p.figure(1, (4.5,4.5))
p.axes([0.19,0.17,0.74,0.72])
ys_u, yso2_u = [], []
ys_l, yso2_l = [], []
yso2P_u, yso2P_l = [], []
yso2D_u, yso2D_l = [], []
for prefix in prefixs :
deep2_sel, deep2_m, deep2_w, deep2_o2, deep2_o3, deep2_hb = get_basic_stat(deep2, 'ZBEST', 'ZQUALITY', 'DEEP2', 2., prefix)
x, y, ye = get_hist(deep2_m[deep2_sel], weights = deep2_w[deep2_sel]/(dlog10m*n.log(10)*area_deep2*volume_per_deg2_val), mbins = mbins)
ys_u.append(y+ye)
ys_l.append(y-ye)
x, y, ye = get_hist(deep2_m[deep2_sel & deep2_o2 & (deep2['O2_3728_luminosity']>10**lO2_min)], weights = deep2_w[deep2_sel & deep2_o2 & (deep2['O2_3728_luminosity']>10**lO2_min)]/(dlog10m*n.log(10)*area_deep2*volume_per_deg2_val), mbins = mbins)
yso2_u.append(y+ye)
yso2_l.append(y-ye)
x, y, ye = get_hist(deep2_m[deep2_sel & deep2_o2 & (deep2['O2_3728_luminosity']>10**(lO2_min+0.2))], weights = deep2_w[deep2_sel & deep2_o2 & (deep2['O2_3728_luminosity']>10**(lO2_min+0.2))]/(dlog10m*n.log(10)*area_deep2*volume_per_deg2_val), mbins = mbins)
yso2P_u.append(y+ye)
yso2P_l.append(y-ye)
x, y, ye = get_hist(deep2_m[deep2_sel & deep2_o2 & (deep2['O2_3728_luminosity']>10**(lO2_min+0.4))], weights = deep2_w[deep2_sel & deep2_o2 & (deep2['O2_3728_luminosity']>10**(lO2_min+0.4))]/(dlog10m*n.log(10)*area_deep2*volume_per_deg2_val), mbins = mbins)
yso2D_u.append(y+ye)
yso2D_l.append(y-ye)
#print n.array(ys_l).shape, n.min(n.array(ys_l), axis=0).shape
#p.fill_between(x, y1=n.min(n.array(ys_l), axis=0), y2=n.max(n.array(ys_u), axis=0), alpha=0.5, color='r')
p.plot(x, (n.median(n.array(ys_l), axis=0) + n.median(n.array(ys_u), axis=0))/2., color='r', label='DEEP2')
#p.plot(x, n.median(n.array(ys_l), axis=0), ls='dashed', alpha=0.5, color='r')
#p.plot(x, n.median(n.array(ys_l), axis=0), ls='dashed', alpha=0.5, color='r')
#p.fill_between(x, y1=n.min(n.array(yso2_l), axis=0), y2=n.max(n.array(yso2_u), axis=0), alpha=0.5, color='b')
p.plot(x, (n.median(n.array(yso2_l), axis=0)+n.median(n.array(yso2_u), axis=0))/2., color='b', label='L[OII]>'+str(n.round(lO2_min,1)) )
#p.plot(x, n.median(n.array(yso2_l), axis=0), ls='dashed', color='b' )
#p.plot(x, n.median(n.array(yso2_u), axis=0), ls='dashed', color='b' )
#p.fill_between(x, y1=n.min(n.array(yso2_l), axis=0), y2=n.max(n.array(yso2_u), axis=0), alpha=0.5, color='b')
p.plot(x, (n.median(n.array(yso2P_l), axis=0)+n.median(n.array(yso2P_u), axis=0))/2., color='g', label='L[OII]>'+str(n.round(lO2_min+0.2,1)) )
#p.plot(x, n.median(n.array(yso2P_l), axis=0), ls='dashed', color='b' )
#p.plot(x, n.median(n.array(yso2P_u), axis=0), ls='dashed', color='b' )
#p.fill_between(x, y1=n.min(n.array(yso2_l), axis=0), y2=n.max(n.array(yso2_u), axis=0), alpha=0.5, color='b')
p.plot(x, (n.median(n.array(yso2D_l), axis=0)+n.median(n.array(yso2D_u), axis=0))/2., color='m', label='L[OII]>'+str(n.round(lO2_min+0.4,1)) )
#p.plot(x, n.median(n.array(yso2P_l), axis=0), ls='dashed', color='b' )
#p.plot(x, n.median(n.array(yso2P_u), axis=0), ls='dashed', color='b' )
#p.plot(mbins, smf01(10**mbins), label='Ilbert 13, 0.2<z<0.5', ls='dashed')
p.plot(mbins, smf08(10**mbins), label='Ilbert 13, 0.8<z<1.1', color='k')
p.title(str(z_min)+'<z<'+str(z_max))
p.xlabel(r'$\log_{10}$ (stellar mass '+r" / $M_\odot$ )")
p.ylabel(r'$\Phi(M)$ [Mpc$^{-3}$ dex$^{-1}$]')
p.yscale('log')
p.legend(loc=0, frameon = False, fontsize=12)
p.ylim((1e-8, 1e-2))
p.xlim((8.5, 12.))
p.grid()
p.savefig(os.path.join(out_dir, "all_contour_SMF_raw_"+str(lO2_min) +"_"+str(z_min)+'_z_'+str(z_max)+".png" ))
p.clf()
#plotMF_raw_many(["Chabrier_ELODIE_"])
plotMF_raw_many(["Chabrier_ELODIE_" ,"Chabrier_MILES_", "Chabrier_STELIB_" ])#,"Kroupa_ELODIE_","Kroupa_MILES_", "Kroupa_STELIB_","Salpeter_ELODIE_" ,"Salpeter_MILES_","Salpeter_STELIB_"])
#plotMF_raw_many(["Chabrier_ELODIE_" ,"Chabrier_MILES_","Chabrier_STELIB_" ,"Kroupa_ELODIE_","Kroupa_MILES_", "Kroupa_STELIB_","Salpeter_ELODIE_" ,"Salpeter_MILES_","Salpeter_STELIB_"])
sys.exit()
plotMF_raw("Chabrier_ELODIE_")
plotMF_raw("Chabrier_MILES_")
plotMF_raw("Chabrier_STELIB_")
plotMF_raw("Kroupa_ELODIE_")
plotMF_raw("Kroupa_MILES_")
plotMF_raw("Kroupa_STELIB_")
plotMF_raw("Salpeter_ELODIE_")
plotMF_raw("Salpeter_MILES_")
plotMF_raw("Salpeter_STELIB_")
|
JohanComparat/pySU
|
spm/bin_SMF/smf_plot.py
|
Python
|
cc0-1.0
| 15,194
|
import html5lib
import traceback
def build_html_dom_from_str(html_str):
return html5lib.parse(html_str, 'dom')
def find_html_element_list_for_tag(element, tag, class_style = None):
elements = element.getElementsByTagName(tag)
if not class_style: return elements
result = []
for e in elements:
e_class_style = e.getAttribute('class')
if e_class_style == class_style:
result.append(e)
return result
def find_element_content(element):
try:
content = None
for content_element_text_child in element.childNodes:
content = content_element_text_child.nodeValue
if content is not None:
content = content.strip()
if content is not None and content != '' : return content
if not hasattr(content_element_text_child, 'childNodes') : continue
if (content is None or content == '') and (content_element_text_child.childNodes is None or len(content_element_text_child.childNodes) == 0):
continue
content = find_element_content(content_element_text_child)
if content is not None and content != '' : return content
return content
except Exception, e:
exstr = traceback.format_exc()
print exstr
|
Bargetor/chestnut
|
bargetor/common/HTMLUtil.py
|
Python
|
gpl-2.0
| 1,310
|
# coding=utf-8
from __future__ import unicode_literals
"""
Name: MyArgparse
Author: Andy Liu
Email : andy.liu.ud@hotmail.com
Created: 3/26/2015
Copyright: All rights reserved.
Licence: This program is free software: you can redistribute it
and/or modify it under the terms of the GNU General Public License
as published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import argparse
import logging
def parse_command_line():
parser = argparse.ArgumentParser(prog='PROG', description='%(prog)s can ...')
parser.add_argument('NoPre', action="store", help='help information')
parser.add_argument('-t', action="store_true", dest='boolean_switch', default=False, help='Set a switch to true')
parser.add_argument('-f', action="store_false", dest='boolean_switch', default=True, help='Set a switch to false')
parser.add_argument('-s', action="store", dest='simple_value', help="Store a simple value")
parser.add_argument('-st', action="store", dest="simple_value", type=int,
help='Store a simple value and define type')
parser.add_argument('-c', action='store_const', dest='constant_value', const='value-to-store',
help='Store a constant value')
parser.add_argument('-a', action='append', dest='collection', default=[], help='Add repeated values to a list')
parser.add_argument('-A', action='append_const', dest='const_collection', const='value-1-to-append', default=[],
help='Add different values to list')
parser.add_argument('-B', action='append_const', dest='const_collection', const='value-2-to-append',
help='Add different values to list')
args = parser.parse_args()
logging.debug('NoPre = %r' % args.NoPre)
logging.debug('simple_value = %r' % args.simple_value)
logging.debug('constant_value = %r' % args.constant_value)
logging.debug('boolean_switch = %r' % args.boolean_switch)
logging.debug('collection = %r' % args.collection)
logging.debug('const_collection = %r' % args.const_collection)
return args
if __name__ == '__main__':
from MyLog import init_logger
logger = init_logger()
parse_command_line()
|
asiroliu/MyTools
|
MyArgparse.py
|
Python
|
gpl-2.0
| 2,738
|
import pytest
from cfme.containers.provider import ContainersProvider
from utils import testgen, version
from cfme.web_ui import toolbar as tb
from utils.appliance.implementations.ui import navigate_to
pytestmark = [
pytest.mark.uncollectif(
lambda: version.current_version() < "5.6"),
pytest.mark.usefixtures('setup_provider'),
pytest.mark.tier(2)]
pytest_generate_tests = testgen.generate([ContainersProvider], scope='function')
@pytest.mark.polarion('CMP-9878')
def test_reload_button_provider(provider):
""" This test verifies the data integrity of the fields in
the Relationships table after clicking the "reload"
button. Fields that are being verified as part of provider.validate.stats():
Projects, Routes, Container Services, Replicators, Pods, Containers, and Nodes.
Images are being validated separately, since the total
number of images in CFME 5.7 includes all images from the OSE registry as well
as the images that are being created from the running pods. The images are searched
according to the @sha. Image Registries are also validated separately.
"""
navigate_to(provider, 'Details')
tb.select('Reload Current Display')
provider.validate_stats(ui=True)
list_img_from_registry = provider.mgmt.list_image()
list_img_from_registry_splitted = [i.id.split(
'@sha256:')[-1] for i in list_img_from_registry]
list_img_from_openshift = provider.mgmt.list_image_openshift()
list_img_from_openshift_splitted = [d['name']
for d in list_img_from_openshift]
list_img_from_openshift_parsed = [i[7:].split(
'@sha256:')[-1] for i in list_img_from_openshift_splitted]
list_img_from_registry_splitted_new = set(list_img_from_registry_splitted)
list_img_from_openshift_parsed_new = set(list_img_from_openshift_parsed)
list_img_from_openshift_parsed_new.update(list_img_from_registry_splitted_new)
num_img_in_cfme = provider.num_image()
# TODO Fix num_image_ui()
num_img_cfme_56 = len(provider.mgmt.list_image())
num_img_cfme_57 = len(list_img_from_openshift_parsed_new)
assert num_img_in_cfme == version.pick({version.LOWEST: num_img_cfme_56,
'5.7': num_img_cfme_57})
# validate the number of image registries
list_all_rgstr = provider.mgmt.list_image_registry()
list_all_rgstr_revised = [i.host for i in list_all_rgstr]
list_all_rgstr_new = filter(lambda ch: 'openshift3' not in ch, list_all_rgstr_revised)
num_rgstr_in_cfme = provider.summary.relationships.image_registries.value
assert len(list_all_rgstr_new) == num_rgstr_in_cfme
|
rananda/cfme_tests
|
cfme/tests/containers/test_reload_button_provider.py
|
Python
|
gpl-2.0
| 2,719
|
#!/usr/bin/env python
"""
================================================
ABElectronics IO Pi Tests | test get_bus_pullups function
Requires python smbus to be installed
For Python 2 install with: sudo apt-get install python-smbus
For Python 3 install with: sudo apt-get install python3-smbus
run with: python3 get_bus_pullups.py
================================================
This test validates the get_bus_pullups function in the IOPi class.
=== Expected Result ============================
> Console Output:
Test Passed
"""
from __future__ import absolute_import, division, print_function, \
unicode_literals
try:
import sys
sys.path.append("..")
from IOPi import IOPi
except ImportError:
raise ImportError("Failed to import IOPi library")
def main():
"""
Main program function
"""
passed = True
iopi = IOPi(0x20, False) # new iopi object without initialisation
for a in range(1, 65536):
iopi.set_bus_pullups(a)
x = iopi.get_bus_pullups()
if x != a:
passed = False
break
iopi.set_bus_pullups(a)
x = iopi.get_bus_pullups()
if x != a:
passed = False
break
if passed is False:
print("Test Failed")
else:
print("Test Passed")
if __name__ == "__main__":
main()
|
abelectronicsuk/ABElectronics_Python_Libraries
|
IOPi/tests/get_bus_pullups.py
|
Python
|
gpl-2.0
| 1,397
|
# Copyright (c) 2007-2017 Joseph Hager.
#
# Copycat is free software; you can redistribute it and/or modify
# it under the terms of version 2 of the GNU General Public License,
# as published by the Free Software Foundation.
#
# Copycat is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Copycat; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
"""Sliplink"""
class Sliplink(object):
"""Sliplink is a link between two nodes in the slipnet.
Attributes:
from_node: The node this link starts at.
to_node: The node this link ends at.
label: The node that labels this link.
fixed_length: A static length of the link has no label."""
def __init__(self, from_node, to_node, label, fixed_length):
"""Initializes Sliplink."""
self.from_node = from_node
self.to_node = to_node
self.label = label
self.fixed_length = fixed_length
def intrinsic_degree_of_association(self):
"""Return the intrinsic degree of association of the link."""
if self.fixed_length != None:
return 100 - self.fixed_length
else:
return self.label.intrinsic_degree_of_association()
def degree_of_association(self):
"""Return the degree of association of the link."""
if self.fixed_length != None:
return 100 - self.fixed_length
else:
return self.label.degree_of_association()
|
ajhager/copycat
|
copycat/slipnet/sliplink.py
|
Python
|
gpl-2.0
| 1,760
|
from shutil import copy
from os import remove
import sys
from I_Data_Degradation_Block.Code.Degradation import main as blockI
from II_Cryptography_Block.Code.encrypt import main as blockII
from III_Shamirs_Block.Code.SecretSharing import main as blockIII
from IV_DHT_Block.Code.Parted_Keys_to_OWI_Input import main as blockIV
def removeE(path):
try:
remove(path)
except:
pass
def checkentryInt(var):
try:
return int(var)
except:
print "Incorrect entry, please retype it."
var = raw_input()
var = checkentryInt(var)
return var
def checkentryFloat(var):
try:
return float(var)
except:
print "Incorrect entry, please retype it."
var = raw_input()
var = checkentryFloat(var)
return var
def checkentryDuration(var):
try:
int(var[0])
int(var[1])
int(var[2])
return var
except:
print "Incorrect entry, please retype it. Syntax Hours/Minutes/Seconds"
var = raw_input()
var = var.split("/")
var = checkentryDuration(var)
return var
def main(n,churn,std):
n= int(n)
churn=int(churn)
std=int(std)
print "Hello"
print "Please enter your current latitude:"
latitude = raw_input()
latitude = checkentryFloat(latitude)
print "Please enter your current longitude:"
longitude = raw_input()
longitude = checkentryFloat(longitude)
print "How many degradation levels do you want ?"
degradationLevels = raw_input()
degradationLevels = checkentryInt(degradationLevels)
################################################################
# BLOCK 1
################################################################
blockIinput = open('./I_Data_Degradation_Block/Inputs/Data to Degrade/Data', 'w')
blockIinput.write(str(latitude)+" ; "+str(longitude))
blockIinput.close()
blockI('./I_Data_Degradation_Block/Inputs/Data to Degrade/Data','./I_Data_Degradation_Block/Outputs/Degraded Data/Degraded Data',degradationLevels)
copy('./I_Data_Degradation_Block/Outputs/Degraded Data/Degraded Data' , './II_Cryptography_Block/Inputs/Degraded Data')
################################################################
# BLOCK 2
################################################################
blockII('./II_Cryptography_Block/Inputs/Degraded Data/Degraded Data','./II_Cryptography_Block/Outputs/Encrypted Data/Encrypted Data','./II_Cryptography_Block/Outputs/Encryption Keys/Keys','./II_Cryptography_Block/Outputs/Encryption Keys/Record')
copy('./II_Cryptography_Block/Outputs/Encryption Keys/Keys' , './III_Shamirs_Block/Inputs/Encryption Keys')
################################################################
# BLOCK 3
################################################################
dataduration = []
for a in range(0,degradationLevels):
i = a+1
print "How long do you want the data of privacy level "+str(i)+" to be available ? (1 is the most private level, the syntax is hours/minutes/seconds)"
currentduration = raw_input()
currentduration = currentduration.split("/")
currentduration = checkentryDuration(currentduration)
dataduration.append(currentduration)
blockIII('./III_Shamirs_Block/Inputs/Encryption Keys/Keys', './III_Shamirs_Block/Outputs/Parted Keys/KeyParts',churn,dataduration,n,std,'./PointRecord')
copy('./III_Shamirs_Block/Outputs/Parted Keys/KeyParts' , './IV_DHT_Block/Inputs/Parted Keys')
copy('./II_Cryptography_Block/Outputs/Encryption Keys/Record' , '.')
################################################################
# BLOCK 4
################################################################
blockIV('./IV_DHT_Block/Inputs/Parted Keys/KeyParts',"./LocalizationsInstruction","127.0.0.1","10000")
FinalOutputFile = open("./FinalOutput","w")
with open("./Record") as RecordFile:
PointFile = open("./PointRecord")
for line in RecordFile:
if(not "*************" in line):
dataHash = line.split(";")[0]
i=0
for i in range(0,n):
pointline = PointFile.readline()
pointline = pointline.split(" ")
FinalOutputFile.write(dataHash +" ; "+ pointline[1])
PointFile.close()
FinalOutputFile.close()
################################################################
# Cleaning up
################################################################
removeE('./I_Data_Degradation_Block/Inputs/Data to Degrade/Data')
removeE('./I_Data_Degradation_Block/Outputs/Degraded Data/Degraded Data')
removeE('./II_Cryptography_Block/Outputs/Encryption Keys/Keys')
removeE('./II_Cryptography_Block/Outputs/Encryption Keys/Record')
removeE('./II_Cryptography_Block/Inputs/Degraded Data/Degraded Data')
removeE('./III_Shamirs_Block/Inputs/Encryption Keys/Keys')
removeE('./III_Shamirs_Block/Outputs/Parted Keys/KeyParts')
removeE('./IV_DHT_Block/Inputs/Parted Keys')
removeE('./Record')
removeE('./PointRecord')
removeE('./tempfile.owi')
if __name__ == "__main__":
sys.exit(main(sys.argv[1], sys.argv[2], sys.argv[3]))
|
ioniATX/DHT_oblivion
|
Blocs/InputDataClient.py
|
Python
|
gpl-2.0
| 5,237
|
#! /usr/bin/env python
# -*- mode: python; indent-tabs-mode: nil; -*-
# vim:expandtab:shiftwidth=2:tabstop=2:smarttab:
#
# Copyright (C) 2011 Patrick Crews
#
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import os
import time
import shutil
import signal
import subprocess
from lib.util.mysqlBaseTestCase import mysqlBaseTestCase
from lib.util.mysql_methods import execute_cmd
server_requirements = [[]]
servers = []
server_manager = None
test_executor = None
pamcfg = '/etc/pam.d/mysqld'
class basicTest(mysqlBaseTestCase):
def test_pam_basic(self):
percent_string = '%'
opt_matrix_req = ['pam_plugin_dir']
self.servers = servers
logging = test_executor.logging
master_server = servers[0]
output_path = os.path.join(master_server.vardir, 'pam.out')
test_executor.matrix_manager.matrix_check_req(opt_matrix_req)
# This is a master
if (test_executor.matrix_manager.option_matrix['pam_user']):
pam_user = test_executor.matrix_manager.option_matrix['pam_user']
else:
pam_user = 'pamuser'
# Create UNIX system account
if (test_executor.system_manager.user_exists(pam_user)):
pass
else:
subprocess.call(["useradd", pam_user])
# Create PAM config
if (os.path.isfile(pamcfg)):
os.remove(pamcfg)
pamcfg_fh = open("/etc/pam.d/mysqld", "wb")
pamcfg_fh.write("auth\trequired\tpam_permit.so\n")
pamcfg_fh.close();
# Stop server
master_server.stop()
# Specify mysql plugin dir
master_server.server_options.append('--plugin-dir=%s' %(test_executor.matrix_manager.option_matrix['pam_plugin_dir']))
# Start server with new options
master_server.start()
self.assertEqual( master_server.status, 1, msg = 'Server failed to restart')
# Install plugin
query = "INSTALL PLUGIN auth_pam SONAME \'auth_pam.so\'"
expected_result = ''
cmd = "%s --protocol=tcp --port=%d -uroot -e \"%s\"" %(master_server.mysql_client
, master_server.master_port
, query )
retcode, output = execute_cmd(cmd, output_path, None, True)
self.assertEqual(retcode, 0, msg = cmd)
self.assertEqual(output, expected_result, msg = "%s || %s" %(output, expected_result))
# Create user
query = "CREATE USER \'%s\'@\'%s\' IDENTIFIED WITH auth_pam;" %(pam_user, percent_string)
expected_result = ''
cmd = "%s --protocol=tcp --port=%d -uroot -e \"%s\"" %(master_server.mysql_client
, master_server.master_port
, query )
retcode, output = execute_cmd(cmd, output_path, None, True)
self.assertEqual(retcode, 0, msg = output)
self.assertEqual(output, expected_result, msg = "%s || %s" %(output, expected_result))
# Grant permissions
query = "GRANT ALL ON test.* TO \'%s\'@\'%s\';" %(pam_user, percent_string)
expected_result = ''
cmd = "%s --protocol=tcp --port=%d --user=root -e \"%s\"" %(master_server.mysql_client
, master_server.master_port
, query )
retcode, output = execute_cmd(cmd, output_path, None, True)
self.assertEqual(retcode, 0, msg = output)
self.assertEqual(output, expected_result, msg = "%s || %s" %(output, expected_result))
# Test user login
query = "SHOW TABLES;"
expected_result = ''
cmd = "%s --plugin-dir=/usr/lib/mysql/plugin/ --protocol=tcp --port=%d --user=%s --password=\'\' -e \"%s\" test" %(master_server.mysql_client
, master_server.master_port
, pam_user
, query )
retcode, output = execute_cmd(cmd, output_path, None, True)
self.assertEqual(retcode, 0, msg = output)
self.assertEqual(output, expected_result, msg = "%s || %s" %(output, expected_result))
|
vladistan/percona-pam-plugin
|
test/dbqp/percona_tests/percona_pam/pam_basic_test.py
|
Python
|
gpl-2.0
| 4,568
|
import sys
from fsgamesys.plugins.pluginmanager import PluginManager
"""
DOSBox-FS launcher script used for testing.
"""
def app_main():
executable = PluginManager.instance().find_executable("dosbox-fs")
process = executable.popen(sys.argv[1:])
process.wait()
|
FrodeSolheim/fs-uae-launcher
|
launcher/apps/dosbox_fs.py
|
Python
|
gpl-2.0
| 276
|
"""waybacktrack.py
Use this to extract Way Back Machine's
url-archives of any given domain!
TODO: reiterate entire design!
"""
import time
import os
import urllib2
import random
from math import ceil
try:
from cStringIO import StringIO as BytesIO
except ImportError:
from io import BytesIO
from lxml import html
from lxml.html import clean
ARCHIVE_DOMAIN = "http://web.archive.org"
CURR_DIR = os.path.dirname(__file__)
DATASET_DIR = os.path.join(CURR_DIR, '../../dataset/')
def archive_domain(domain, year, dir_path=DATASET_DIR,
percent=0, debug=False, throttle=1):
"""
domain
@type domain: string
@param domain: the domain of the website ie. www.nytimes.com
@type year: int
@param year: the year to extract archives from
@type dir_path: string
@param dir_path: the directory path to store archive, if
empty, directory will automatically be created
TODO: Think of better solution to storing
downloaded archives
@type percent: int
@param percent: the percentage of Way Back archives to crawl
@rtype:
@return: Returns a list of archived sites
"""
# TODO: Improve this for module portability
# WARNING: Module will likely break if used outside of
# crawl-to-the-future project
# automatically find or eventually create directory
# based off domain name
# Found way to check if file is being ran in crawl-to-the-future
# super "hacky" though
# TODO: Find better way to check if module is getting ran in
# in crawl-to-the-future project
if os.path.split(
os.path.abspath(os.path.join(__file__, os.pardir)))[1] != "Way-Back":
raise Exception("Please manually specify 'dir_name' value")
if dir_path is DATASET_DIR:
dir_path = os.path.join(dir_path, domain + '/')
if not os.path.exists(dir_path):
#raise IOError("[Errno 2] No such file or directory: '" + dir_path + "'")
# this part is shady
os.makedirs(dir_path)
if not isinstance(dir_path, basestring):
raise Exception("Directory - third arg. - path must be a string.")
ia_year_url = ARCHIVE_DOMAIN + "/web/" + str(year) + \
"*/http://" + domain + "/"
ia_parsed = html.parse(ia_year_url)
domain_snapshots = list(set(ia_parsed.xpath('//*[starts-with(@id,"' +
str(year) + '-")]//a/@href')))
#snapshot_age_span is a percentage of total snapshots to process from
#the given year
#ie. if percent is 100, and there are a total of 50 snapshots for
#www.cnn.com, we will crawl (to a depth of 1 atm) all 50 snapshots
snapshot_age_span = 1 if percent <= 0 \
else len(domain_snapshots) - 1 \
if percent >= 100 \
else int(percent*len(domain_snapshots)/100)
if debug:
print "Extracting links from: ", domain
# http://margerytech.blogspot.com/2011/06/python-get-last-directory-name-in-path.html
print "Current directory: ", os.path.split(
os.path.abspath(os.path.join(__file__, os.pardir)))[1]
print "Storing files in: ", os.path.abspath(dir_path)
print "Number of domain snapshots: ", len(domain_snapshots)
print "Number of domain snapshots to process: ", snapshot_age_span + 1
random.shuffle(domain_snapshots)
forward_links = []
#for snapshot in domain_snapshots[:snapshot_age_span]:
for snapshot in domain_snapshots[:3]:
curr_snapshot_flinks = get_forwardlink_snapshots(snapshot)
forward_links.extend(curr_snapshot_flinks)
if debug:
print "snapshot url: ", snapshot
print "forward link count: ", len(curr_snapshot_flinks)
random.shuffle(forward_links)
if debug:
print "total number of foward links to download: ", len(forward_links)
random.shuffle(forward_links)
# archive forward links
archived_links = []
duds = []
for forwardlink in forward_links:
if archive(forwardlink, year, dir_path, debug, throttle):
archived_links.append(forwardlink)
else:
duds.append(forwardlink)
if debug:
print "Number of archived forward links: ", len(archived_links)
print "Number of duds: ", len(duds)
return archived_links, duds
# I know I'm breaking so many rules by not seperating concerns
def archive(page, year, dir_path, debug=False, throttle=1):
"""
Check to see if downloaded forward link
satisfies the archival year specification
ie. (2000, 2005, 2010)
"""
#files = [f for f in os.listdir(dir_path) if os.path.isfile(f)]
if debug:
print "requesting ", page
page_file = page.rsplit('/web/')[1].replace('http://', '').replace('-','_')
page_file = page_file.replace('/', '_').replace(':', '_').replace('&','_')
page_file = page_file.replace('?', '_').replace('*','_').replace('=','_')
file_path = dir_path + page_file
if os.path.isfile(file_path):
if debug:
print "Already saved: ", page_file
print
return False
try:
html_file = urllib2.urlopen(ARCHIVE_DOMAIN + page)
except IOError:
if debug:
print "Failed to open request for ", ARCHIVE_DOMAIN + page
print
return False
if html_file.getcode() == 302:
if debug:
print "Got HTTP 302 response for ", ARCHIVE_DOMAIN + page
print
return False
html_string = str(html_file.read())
if html_string.find("HTTP 302 response") != -1:
if debug:
print "Got HTTP 302 response for ", ARCHIVE_DOMAIN + page
print
return False
archival_year_spec = ARCHIVE_DOMAIN + '/web/' + str(year)
page_url = html_file.geturl()
if page_url.startswith(archival_year_spec):
if debug:
print "saving ", page_url
print
try:
with open(file_path, 'wb') as f:
f.write(BytesIO(html_string).read())
time.sleep(throttle)
except IOError as e:
if debug:
print "Got error: ", e
return False
return True
else:
return False
def get_forwardlink_snapshots(parent_site):
"""
@type index: string
@param index: the index.html page from which to extract forward links
@type year: int
@param year: the year to extract archives from
"""
try:
parsed_parent_site = html.parse(ARCHIVE_DOMAIN+parent_site)
except IOError:
print "Did not get extract links in ", ARCHIVE_DOMAIN+parent_site
return []
#cleaner = html.clean.Cleaner(scripts=True, javascript=True,style=True, kill_tags = ["img"])
cleaner = clean.Cleaner(scripts=True, javascript=True, comments=True,
style=True, meta=True, processing_instructions=True, embedded=True,
frames=True, forms=True, kill_tags=["noscript", "iframe", "img"])
parsed_parent_site = cleaner.clean_html(parsed_parent_site)
# spec archival year
# check to see if the archival year of a forwark link
# is that of the parent (ie. 2000|2005|2010)
all_forwardlinks = parsed_parent_site.xpath('//a[starts-with(@href,"' +
parent_site[:9] +'")]/@href')
return all_forwardlinks
|
rodricios/crawl-to-the-future
|
crawlers/Way-Back/waybacktrack.py
|
Python
|
gpl-2.0
| 7,577
|
"""
vUSBf: A KVM/QEMU based USB-fuzzing framework.
Copyright (C) 2015 Sergej Schumilo, OpenSource Security Ralf Spenneberg
This file is part of vUSBf.
See the file LICENSE for copying permission.
"""
__author__ = 'Sergej Schumilo'
from scapy.all import *
#####################################
####### SCAPY EXTENSION STUFF #######
#####################################
# XLEShortField
class XLEShortField(LEShortField, XShortField):
def i2repr(self, pkt, x):
return XShortField.i2repr(self, pkt, x)
# XLEIntField
class XLEIntField(LEIntField, XIntField):
def i2repr(self, pkt, x):
return XIntField.i2repr(self, pkt, x)
####################################
####### REDIR SPECIFIC STUFF #######
####################################
usbredir_type_enum = { # CONTROL PACKETS
0: "hello",
1: "device_connect",
2: "device_disconnect",
3: "reset",
4: "interface_info",
5: "ep_info",
6: "set_configuration",
7: "get_configuration",
8: "configuration_status",
9: "set_alt_setting",
10: "get_alt_setting",
11: "alt_setting_status",
12: "start_iso_stream",
13: "stop_iso_stream",
14: "iso_stream_status",
15: "start_interrupt_receiving",
16: "stop_interrupt_receiving",
17: "interrupt_receiving_status",
18: "alloc_bulk_streams",
19: "free_bulk_streams",
20: "bulk_streams_status",
21: "cancel_data_packet",
22: "filter_reject",
23: "filter_filter",
24: "device_disconnect_ack", # DATA PACKETS
100: "data_control_packet",
101: "data_bulk_packet",
102: "data_iso_packet",
103: "data_interrupt_packet"}
# DO NOT FUZZ THE FOLLOWING REDIR SPECIFIC PACKAGES! FUZZING WILL CAUSE IN QEMU CRASH!
class usbredirheader(Packet):
name = "UsbredirPacket"
fields_desc = [LEIntEnumField("Htype", -1, usbredir_type_enum),
LEIntField("HLength", 0),
LEIntField("Hid", -1)]
# Redir Packet No. 0 (redir hello)
class hello_redir_header(Packet):
name = "Hello_Packet"
fields_desc = [StrLenField("version", "", length_from=64), # StrLenField("caps", "", length_from=4)]
LEIntField("capabilites", 1)]
class hello_redir_header_host(Packet):
name = "Hello_Packet_Host"
fields_desc = [StrLenField("version", "", length_from=56)]
# Redir Packet No. 1 (redir connect)
class connect_redir_header(Packet):
name = "Connect_Packet"
fields_desc = [ByteField("speed", 0),
XByteField("device_class", 0),
XByteField("device_subclass", 0),
XByteField("device_protocol", 0),
XLEShortField("vendor_id", 0),
XLEShortField("product_id", 0),
XLEShortField("device_version_bcd", 0)]
# Redir Packet No. 4 (interface info) [SIZE 132 BYTES]
class if_info_redir_header(Packet):
name = "Interface Info Packet"
fields_desc = [LEIntField("interface_count", None),
FieldListField("interface", None, ByteField("Value", 0), length_from=lambda p: 32),
FieldListField("interface_class", None, ByteField("Value", 0), length_from=lambda p: 32),
FieldListField("interface_subclass", None, ByteField("Value", 0), length_from=lambda p: 32),
FieldListField("interface_protocol", None, ByteField("Value", 0), length_from=lambda p: 32)]
# Redir Packet No. 5 (endpoint info) [SIZE 160 BYTES]
class ep_info_redir_header(Packet):
name = "Endpoint Info Packet"
fields_desc = [FieldListField("ep_type", None, ByteEnumField("type_value", 0, {0: "type_control",
1: "type_iso",
2: "type interrupt",
255: "type invalid", })
, length_from=lambda p: 32),
FieldListField("interval", None, ByteField("Value", 0), length_from=lambda p: 32),
FieldListField("interface", None, ByteField("Value", 0), length_from=lambda p: 32),
FieldListField("max_packet_size", None, XLEShortField("Value", 0), length_from=lambda p: 32 * 2)]
# Redir Packet No. 100 (data control) [SIZE 10 BYTES]
class data_control_redir_header(Packet):
name = "Data_Control_Packet"
fields_desc = [ByteField("endpoint", 0),
ByteField("request", 0),
ByteField("requesttype", 0),
ByteField("status", 0),
XLEShortField("value", 0),
LEShortField("index", 0),
LEShortField("length", 0)]
# Redir Packet No. 101 (data bulk) [SIZE 8 BYTES]
class data_bulk_redir_header(Packet):
name = "Data_Bulk_Packet"
fields_desc = [ByteField("endpoint", 0),
ByteField("status", 0),
LEShortField("length", None),
LEIntField("stream_id", None),
LEShortField("length_high", None)]
# Redir Packet No. 102 (data iso) [SIZE 4 BYTES]
class data_iso_redir_header(Packet):
name = "Data_Iso_Packet"
fields_desc = [ByteField("endpoint", 0),
ByteField("status", 0),
LEShortField("length", 0)]
# Redir Packet No. 103 (data interrupt) [SIZE 4 BYTES]
class data_interrupt_redir_header(Packet):
name = "Data_Interrupt_Packet"
fields_desc = [ByteField("endpoint", 0),
ByteField("status", 0),
LEShortField("length", 0)]
redir_specific_type = [[0, hello_redir_header],
[1, connect_redir_header],
[100, data_control_redir_header],
[101, data_bulk_redir_header],
[102, data_iso_redir_header],
[103, data_interrupt_redir_header]]
##################################
####### USB SPECIFIC STUFF #######
####### ENUMARATION PHASE #######
##################################
# USB Header (URB - replaced by usbredirheader)
class usb_header(Packet):
name = "USB_Packet"
fields_desc = [XLongField("id", 0xffff88003720d540),
ByteField("type", 43),
ByteField("transfer type", 2),
ByteField("endpoint", 80),
ByteField("device", 0),
LEShortField("bus_id", 0),
ByteField("device_setup_request", 0),
ByteField("data_present", 0),
LELongField("urb_sec", 0),
LEIntField("urb_usec", 0),
LEIntField("urb_status", 0),
LEIntField("urb_length", 0),
LEIntField("data_length", 0)]
# Generic USB Descriptor Header
class usb_generic_descriptor_header(Packet):
name = "USB_GENERIC_DESCRIPTOR_HEADER"
fields_desc = [ByteField("bLength", 0),
XByteField("bDescriptorType", 0x1)]
# USB Device Descriptor Packet (DescriptorType 0x01)
class usb_device_descriptor(Packet):
name = "USB_Device_Descriptor"
fields_desc = [ByteField("bLength", 18),
XByteField("bDescriptorType", 0x01),
XLEShortField("bcdUSB", 0x0),
XByteField("bDeviceClass", 0x1),
ByteField("bDeviceSubClass", 0),
ByteField("bDeviceProtocol", 0),
ByteField("bMaxPacketSize", 0),
XLEShortField("isVendor", 0x0),
XLEShortField("idProduct", 0x0),
XLEShortField("bcdDevice", 0x0),
ByteField("iManufacturer", 0),
ByteField("iProduct", 0),
ByteField("iSerialNumber", 0),
ByteField("bNumConfigurations", 1)]
# USB Configuration Descriptor
class usb_configuration_descriptor(Packet):
name = "USB_Configuration_Descriptor"
fields_desc = [ByteField("bLength", 9), # Size of Descriptor in Bytes
XByteField("bDescriptorType", 0x02), # Configuration Descriptor (0x02)
XLEShortField("wTotalLength", 0), # Total length in bytes of data returned
ByteField("bNumInterfaces", None), # Number of Interfaces
ByteField("bConfigurationValue", None), # Value to use as an argument to select this configuration
ByteField("iConfiguration", None), # Index of String Descriptor describing this configuration
FlagsField("bmAttributes", 0b11100000, 8, [
"Reserved_D0", # Reserved Bit
"Reserved_D1", # Reserved Bit
"Reserved_D2", # Reserved Bit
"Reserved_D3", # Reserved Bit
"Reserved_D4", # Reserved Bit
"Remote_Wakeup", # D5 Remote Wakeup
"Self_Powered", # D6 Self Powered
"Reserved_D7", # D7 Reserved: Must be 1 for USB1.1 and higher
]),
ByteField("bMaxPower", None) # Maximum Power consumption in 2mA units
]
# USB Interface_Descriptor
class usb_interface_descriptor(Packet):
name = "USB_Interface_Descriptor"
fields_desc = [ByteField("bLength", 9), # Size of Descriptor in Bytes (9 Bytes)
XByteField("bDescriptorType", 0x04), # Configuration Descriptor (0x04)
ByteField("bInterfaceNumber", None), # Number of Interface
ByteField("bAlternateSetting", None), # Value used to select alternative setting
ByteField("bNumEndpoints", None), # Number of Endpoints used for this interface
XByteField("bInterfaceClass", None), # Class Code [0x08: MASSSTORAGE, ...]
XByteField("bInterfaceSubClass", None), # Subclass Code
XByteField("bInterfaceProtocol", None), # Protocol Code
ByteField("iInterface", None) # Index of String Descriptor describing this interface
]
# USB Endpoint Descriptors
class usb_endpoint_descriptor(Packet):
name = "USB_Endpoint_Descriptor"
fields_desc = [ByteField("bLength", 7), # Size of Descriptor in Bytes (7 Bytes)
XByteField("bDescriptorType", 0x05), # Configuration Descriptor (0x05)
XByteField("bEndpointAddress", None), # Endpoint Adress TODO!
XByteField("bmAttribut", None), # TODO
LEShortField("wMaxPacketSize", None),
# Maximum Packet Size this endpoint is cabable of sending or recving
ByteField("bInterval", None) # Interval for polling endpoint data transfer. Value in frame counts
]
class usb_string_descriptor_langid(Packet):
name = "USB_String_Descriptor_LangID"
fields_desc = [ByteField("bLength", 0),
ByteField("bDescriptorType", 0),
FieldListField("wLANGID", 0x00, XLEShortField("Value", 1), count_from=lambda p: p.bLength)
]
class usb_string_descriptor(Packet):
name = "USB_String_Descriptor"
fields_desc = [ByteField("bLength", 0),
ByteField("bDescriptorType", 0),
FieldListField("UnicodeData", 0x00, XLEShortField("Char", 1), count_from=lambda p: p.bLength)
]
class usb_hid_descriptor(Packet):
name = "USB_HID_Descriptor"
fields_desc = [ByteField("bLength", 0x9),
ByteField("bDescriptorType", 0x21),
XLEShortField("bcdHID", 0x0),
ByteField("bCountryCode", 0x00),
ByteField("bNumDescriptors", 0x00), # WIEDERHOLT SICH IN RELATION ZUR ANZAHL DER DESCRIPTOREN
XByteField("bDescriptorType2", 0x22), # 0x22 REPORT DESCRIPTOR # 0x23 PYSICAL DESCRIPTOR
LEShortField("wDescriptorLength", 0x00)
]
class usb_hid_report_extension(Packet):
name = "USB_HID_Report_Extension"
fields_desc = [XByteField("bDescriptorType2", 0x22), # 0x22 REPORT DESCRIPTOR # 0x23 PYSICAL DESCRIPTOR
LEShortField("wDescriptorLength", 0x00)
]
class usb_hid_report_descriptor(Packet):
name = "USB_HID_Report_Descriptor"
fields_desc = []
descriptor_types = { 0x01: usb_device_descriptor,
0x02: usb_configuration_descriptor,
0x03: usb_string_descriptor,
0x04: usb_interface_descriptor,
0x05: usb_endpoint_descriptor,
0x09: usb_hid_descriptor
}
## PROTOTYPE FOR USB_HUB_DESCRIPTOR ##
##
## typedef struct _USB_HUB_DESCRIPTOR {
## UCHAR bDescriptorLength;
## UCHAR bDescriptorType;
## UCHAR bNumberOfPorts;
## USHORT wHubCharacteristics;
## UCHAR bPowerOnToPowerGood;
## UCHAR bHubControlCurrent;
## UCHAR bRemoveAndPowerMask[64];
## } USB_HUB_DESCRIPTOR, *PUSB_HUB_DESCRIPTOR;
##############################################
####### USB MASSSTORAGE SPECIFIC STUFF #######
###### SCSI #######
##############################################
# dCBWSignatur
dCBWSignature_magic_number = 0x43425355
#dCSWSignatur
dCSWSignature_magic_number = 0x53425355
# Command Generic Header
class massstorage_generic(Packet):
name = "Massstorage_Generic"
fields_desc = [ XLEIntField("dSignature", 0)]
# Command Block Wrapper (CBW) [SIZE: 12 Bytes]
class massstorage_cbw(Packet):
name = "Massstorage_CBW"
fields_desc = [ XLEIntField("dCBWSignature", 0),
IntField("dCBWTag", None),
XLEIntField("dCBWDataTransferLength", None),
ByteField("bmCBWFlags", None),
ByteField("bCBWLUN", None),
ByteField("bCBWCBLength", None)
]
# Command Status Wrapper (CSW)
class massstorage_csw(Packet):
name = "Massstorage_CSW"
fields_desc = [ XLEIntField("dCSWSignature", 0),
IntField("dCSWTag", None),
XLEIntField("dCSWDataResidue", None),
ByteField("bCSWStatus", None)
]
###################################
####### SCSI SPECIFIC STUFF #######
###################################
# SCSI_INQUIRY STRING LENGTH
SCSI_INQUIRY_VENDOR_ID_LENGTH = 8
SCSI_INQUIRY_PRODUCT_ID_LENGTH = 16
SCSI_INQUIRY_PRODUCT_REVISION_LEVEL_LENGTH = 4
# INQUIRY SCSI (SIZE: 36 Bytes)
class scsi_inquiry(Packet):
name = "SCSI_Inquiry"
fields_desc = [ ByteField("peripheral", None),
ByteField("RMB", None),
ByteField("version", None),
ByteField("?", None),
ByteField("additional_length", None),
ByteField("??", None),
ByteField("???", None),
ByteField("????", None),
StrFixedLenField("vendor_id", None, SCSI_INQUIRY_VENDOR_ID_LENGTH),
StrFixedLenField("product_id", None, SCSI_INQUIRY_PRODUCT_ID_LENGTH),
StrFixedLenField("product_revision_level", None, SCSI_INQUIRY_PRODUCT_REVISION_LEVEL_LENGTH)
]
# Raw INQUIRY SCSI
class scsi_raw_inquiry(Packet):
name = "SCSI_Raw_Inquiry"
fields_desc = [ ByteField("peripheral", None),
ByteField("RMB", None),
ByteField("version", None),
ByteField("?", None),
ByteField("additional_length", None),
ByteField("??", None),
ByteField("???", None),
ByteField("????", None),
#PAYLOAD VENDOR ID[8] PRODUCT ID[16] PRODUCT REV[4]
]
# READ CAPICITY SCSI
#class scsi_read_capicity(Packet):
# name = "SCSI_READ_CAPICITY"
# fields_desc = [ ByteField("opcode", 0x25),
# ByteField("reserved", None),
# XLEIntField("logical_block_adress", None),
# ShortField("reserverd", None),
# ByteField("reserverd", None),
# XByteField("control", None)
# ]
# READ CAPICITY SCSI RESONSE
class scsi_read_capicity(Packet):
name = "SCSI_READ_CAPICITY_RESPONSE"
fields_desc = [ XLEIntField("returned_logic_block_addr", None),
XLEIntField("block_length", None) ]
# MODE SELECT (6) SCSI RESPONSE
class scsi_mode_6(Packet):
name = "SCSI_MODE_SELECT_(6)_RESPONSE"
fields_desc = [ ByteField("mode_data_length", None),
ByteField("medium_field", None),
ByteField("dev-specific_parameter", None),
ByteField("block_desc_length", None) ]
# SCSI COMMAND LIST [OPCODE, NAME, SCAPYNAME]
SCSI_COMMAND_LIST = [ ['\x04', "FORMAT UNIT", None],
['\x12', "INQUIRY", scsi_inquiry],
['\x15', "MODE SELECT (6)", scsi_mode_6],
['\x55', "MODE SELECT (10)", None],
['\x1a', "MODE SENSE (6)", scsi_mode_6],
['\x5a', "MODE SENSE (10)", None],
['\x1e', "PREVENT ALLOW MEDIUM REMOVAL", None],
['\x08', "READ (6)", None],
['\x28', "READ (10)", None],
['\xa8', "READ (12)", None],
['\x25', "READ CAPACITY (10)", scsi_read_capicity],
['\x23', "READ FORMAT CAPACITY", None],
['\x43', "READ TOC/PMA/ATIP", None],
['\xa0', "REPORT LUNS", None],
['\x03', "REQUEST SENSE", None],
['\x1d', "SEND DIAGNOSITC", None],
['\x1b', "START STOP UNIT", None],
['\x35', "SYNCHRONIZE CACHE (10)", None],
['\x00', "TEST UNIT READY", None],
['\x2f', "VERIFY (10)", None],
['\x0a', "WRITE (6)", None],
['\x2a', "WRITE (10)", None],
['\xaa', "WRITE (12)", None]
]
|
schumilo/vUSBf
|
usbscapy.py
|
Python
|
gpl-2.0
| 17,727
|
def f2():
def f3():
print("f3")
f3()
f2()
|
pez2001/sVimPy
|
test_scripts/test_nested_function.py
|
Python
|
gpl-2.0
| 46
|
import pygame, sys, random, math
from pygame.locals import *
import organisms
import globalVars
class Graphics:
def __init__(self):
self.screen = pygame.display.set_mode((1080, 820))
pygame.display.set_caption('Ecosystem Simulator')
class GUI:
def __init__(self):
self.sliderX = 150
self.mouseX = pygame.mouse.get_pos()[0]
def render(self):
#Draw GUI pane
box = pygame.Rect(0, 720, 1080, 100)
pygame.draw.rect(globalVars.graphics.screen, (214,214,214), box, 0)
#Draw GUI text
font = pygame.font.Font(None, 20)
slider = font.render("Simulation Speed", 1, (10, 10, 10))
sliderpos = (50, 730)
globalVars.graphics.screen.blit(slider, sliderpos)
#Draw Slider
slider_bar = pygame.Rect(50, 770, 200, 10)
pygame.draw.rect(globalVars.graphics.screen, (255,255,255), slider_bar, 0)
slider = pygame.Rect(self.sliderX, 760, 15, 30)
pygame.draw.rect(globalVars.graphics.screen, (100,100,100), slider, 0)
def sliderDrag(self):
if pygame.mouse.get_pressed()[0] == True:
delta = pygame.mouse.get_pos()[0] - self.mouseX
if abs(pygame.mouse.get_pos()[0] - self.sliderX) <= 15 and abs(pygame.mouse.get_pos()[1] - 760) <= 30:
if self.sliderX + delta <= 250 and self.sliderX + delta >= 50:
self.sliderX += delta
globalVars.simSpeed = self.sliderX - 50
def act(self):
self.sliderDrag()
self.mouseX = pygame.mouse.get_pos()[0]
self.render()
class HUD:
def __init__(self, world):
#World
self.world = world
self.target = False
def getTarget(self):
#pygame.event.get()
if pygame.mouse.get_pressed()[0] == True:
for veg in self.world.vegetation:
if self.mouseClicked(veg):
self.target = veg
return
for prey in self.world.prey:
if self.mouseClicked(prey):
self.target = prey
return
def render(self):
font = pygame.font.Font(None, 20)
#Name
name = font.render("Name: " + str(self.target), 1, (10, 10, 10))
namepos = (25, 25)
globalVars.graphics.screen.blit(name, namepos)
#Position
pos = font.render("Position: (" + str(self.target.x) + "," + str(self.target.y) + ")", 1, (10, 10, 10))
pospos = (25, 45)
globalVars.graphics.screen.blit(pos, pospos)
#Age
age = font.render("Age: " + str(self.target.age), 1, (10, 10, 10))
agepos = (25, 85)
globalVars.graphics.screen.blit(age, agepos)
#Hunger/Food
if type(self.target) is organisms.Prey:
hunger = font.render("Hunger: " + str(self.target.hunger), 1, (10, 10, 10))
hungpos = (25, 65)
globalVars.graphics.screen.blit(hunger, hungpos)
if type(self.target) is organisms.Vegetation:
food = font.render("Food: " + str(self.target.food), 1, (10, 10, 10))
foodpos = (25, 65)
globalVars.graphics.screen.blit(food, foodpos)
#Status
if type(self.target) is organisms.Prey:
status = font.render("Status: " + str(self.target.status), 1, (10, 10, 10))
statuspos = (25, 105)
globalVars.graphics.screen.blit(status, statuspos)
#Target must be a Prey or Vegetation object
def mouseClicked(self, target):
dx = pygame.mouse.get_pos()[0] - target.x
dy = pygame.mouse.get_pos()[1] - target.y
distance = math.hypot(dx, dy)
if distance < target.circle_radius:
return True
else:
return False
def act(self):
self.getTarget()
if self.target != False:
self.render()
class World:
def __init__(self):
self.prey = []
self.predators = []
self.vegetation = []
self.water = []
self.shelter = []
self.time = 0
def addPrey(self, prey):
self.prey.append(prey)
def addVegetation(self, veg):
self.vegetation.append(veg)
def removePrey(self, prey):
self.prey.remove(prey)
def removeVegetation(self, veg):
self.vegetation.remove(veg)
def getDistance(self, element1, element2):
return math.hypot(element1.x - element2.x, element1.y - element2.y)
def collideByRadius(self, p1, p2):
dx = p1.x - p2.x
dy = p1.y - p2.y
distance = math.hypot(dx, dy)
if distance < p1.hunting_radius + p2.radius:
return True
else:
return False
def vegetationInRadius(self, element):
inRadius = []
for element2 in self.vegetation:
if self.collideByRadius(element, element2) == True:
inRadius.append(element2)
return inRadius
|
Liamc0950/EcosystemSimulator
|
utils.py
|
Python
|
gpl-2.0
| 5,066
|
#!/usr/bin/env python
import subprocess
short_name = 'Opt 3'
disp_name = 'Option 3 Submenu'
otype = 'Routine'
need = ['need 1: ', 'need 2: ', 'need 3: ']
answers = []
def run():
global answers
while True:
subprocess.call('clear')
i = 0
while i < len(need):
ans = input(need[i])
if validate(ans):
answers.append(ans)
i += 1
final = 'Doing something with '
for a in answers:
final = '{}, {}'.format(final, a)
print(final)
input()
return
def validate(char):
if char:
return True
return False
|
kbknapp/ConsoleMenu-py3x
|
examples/menu/opt3.py
|
Python
|
gpl-2.0
| 532
|
"""
Django settings for cbs project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '51ff&6zumcwpo8+60&5+dg5nqh6-ehdo@uk-xi$*paicy7b4e%'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'p311',
'p365',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'cbs.urls'
WSGI_APPLICATION = 'cbs.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'ru'
TIME_ZONE = 'Europe/Moscow'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static"),
)
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, "templates"),
)
|
max1k/cbs
|
cbs/settings.py
|
Python
|
gpl-2.0
| 2,196
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Role',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=255)),
('special_role', models.CharField(max_length=255, null=True, blank=True)),
('pickled_permissions', models.TextField(null=True, blank=True)),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
]
|
1905410/Misago
|
misago/acl/migrations/0001_initial.py
|
Python
|
gpl-2.0
| 760
|
# -*- coding: utf-8 -*-
from .TransmitFeatures import TransmitFeatures
from .GetFeatures import GetFeatures
|
Hemisphere-Project/Telemir-DatabitMe
|
Telemir-EEG/TeleMir/analyses/__init__.py
|
Python
|
gpl-2.0
| 110
|
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 21 10:34:18 2014
@author: eegroopm
"""
import os, sys
import pandas as pd
import numpy as np
class common:
def __init__(self):
self.path = os.path.expanduser('~')
#\u0305 is unicode overline character
#self._overline_strings = [u'1\u0305', u'2\u0305' ,u'3\u0305', u'4\u0305', u'5\u0305', u'6\u0305', u'7\u0305',u'8\u0305',u'9\u0305']
#use matplotlib's mathtex rendering for overline strings
self._overline_strings = [r'\\bar{1}',r'\\bar{2}',r'\\bar{3}',
r'\\bar{6}',r'\\bar{5}',r'\\bar{6}',
r'\\bar{7}',r'\\bar{8}',r'\\bar{9}']
self.DSpaces = pd.DataFrame(columns = ['d-space','h','k','l']) #Msum is sum of absolute miller indices, neede for plotting pattern
self.Forbidden = pd.DataFrame(columns = ['d-space','h','k','l'])
self.u = 0
self.v = 0
self.w = 1
self.ZoneAxis = np.array([self.u,self.v,self.w])
self.beamenergy = 200 #keV
self.camlength = 100 #cm
self.camconst = 1.0
self.wavelength = self.Wavelength(self.beamenergy) #angstroms
self._x2 = False
self.a = 1
self.b = 1
self.c = 1
self.astar = 1
self.bstar = 1
self.cstar = 1
self.alpha = 90 #degrees
self.beta = 90
self.gamma = 90
self.alphastar = 90
self.betastar = 90
self.gammastar = 90
#SpaceGroup data
#DataFrame in the form SG Number, Patterson symbol, Geometry,Unit Cell Type, Unit Cell Conditions , Spacegroup conditions
#e.g.
#sg.loc[218] yields:
#Patterson P-43n
#Conditions (h==k and l == 2*n) or (h == 2*n and k==0 and ...
#Name: 218, dtype: object
if sys.version_info[0] == 3: #python3 and python2 pickle h5 files differently. GAH!!
self.sg = pd.read_hdf('resources/SpaceGroups.h5','table')
self.sghex = pd.read_hdf('resources/SpaceGroupsHex.h5','table') #for trigonal crystals with rhombohedral or hexagonal centering
self.mineraldb = pd.read_hdf('resources/MineralDatabase.h5','table')
elif sys.version_info[0] == 2:
self.sg = pd.read_hdf('resources/SpaceGroups_py2.h5','table')
self.sghex = pd.read_hdf('resources/SpaceGroupsHex_py2.h5','table')
self.mineraldb = pd.read_hdf('resources/MineralDatabase_py2.h5','table')
self.manualConds = [] #empty list of strings for manual conditions
def Wavelength(self,E):
hbar = 6.626E-34 #m^2 kg/s
me = 9.109E-31 #kg
c = 3E8 #m/s
e = 1.602E-19 #Coulombs
E = E*1000 #turn to eV
wavelength = hbar/np.sqrt(2*me*e*E)/np.sqrt(1 + (e*E)/(2*me*c**2))*(10**10) #angstroms. relativistic formula
return(wavelength)
|
eegroopm/pyLATTICE
|
resources/common.py
|
Python
|
gpl-2.0
| 2,942
|
# -*- coding: utf-8 -*-
# Third Party Stuff
# Third Party Stuff
from rest_framework.pagination import PageNumberPagination as DrfPageNumberPagination
class PageNumberPagination(DrfPageNumberPagination):
# Client can control the page using this query parameter.
page_query_param = 'page'
# Client can control the page size using this query parameter.
# Default is 'None'. Set to eg 'page_size' to enable usage.
page_size_query_param = 'per_page'
# Set to an integer to limit the maximum page size the client may request.
# Only relevant if 'page_size_query_param' has also been set.
max_page_size = 1000
|
aniketmaithani/kimani-adserver
|
Adserver/base/api/pagination.py
|
Python
|
gpl-2.0
| 640
|
# -*- coding: utf-8 -*-
"""
------
Urls
------
Arquivo de configuração das urls da aplicação blog
Autores:
* Alisson Barbosa Ferreira <alissonbf@hotmail.com>
Data:
============== ==================
Criação Atualização
============== ==================
29/11/2014 29/11/2014
============== ==================
"""
from django.conf.urls import patterns, url
urlpatterns = patterns('blog.views',
url(r'^cadastro-usuario/$', 'usuario', name='usuario'),
url(r'^cadastro-post/$', 'post', name='post'),
url(r'^api-all-posts', 'all_posts', name='all_posts'),
url(r'^api-get-post/(?P<pk>[0-9]+)/$', 'get_post', name='get_post'),
url(r'^api-auth', 'api_auth', name='api_auth'),
url(r'^api-token', 'api_token', name='api_token'),
url(r'^api-login', 'api_login', name='api_login'),
url(r'^enviar-email/$', 'enviar_email', name='enviar_email'),
url(r'^autorelacionamento/$', 'autorelacionamento', name='autorelacionamento'),
)
|
alissonbf/blog-teste
|
blog/urls.py
|
Python
|
gpl-2.0
| 989
|
#!/usr/bin/python
# -*- encoding: utf-8; py-indent-offset: 4 -*-
# +------------------------------------------------------------------+
# | ____ _ _ __ __ _ __ |
# | / ___| |__ ___ ___| | __ | \/ | |/ / |
# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / |
# | | |___| | | | __/ (__| < | | | | . \ |
# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ |
# | |
# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de |
# +------------------------------------------------------------------+
#
# This file is part of Check_MK.
# The official homepage is at http://mathias-kettner.de/check_mk.
#
# check_mk is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation in version 2. check_mk is distributed
# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with-
# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the GNU General Public License for more de-
# ails. You should have received a copy of the GNU General Public
# License along with GNU Make; see the file COPYING. If not, write
# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
# Boston, MA 02110-1301 USA.
import re
import defaults
from lib import *
class MKGuitestFailed(MKException):
def __init__(self, errors):
self.errors = errors
MKException.__init__(self, _("GUI Test failed"))
class GUITester:
def __init__(self):
self.guitest = None
self.replayed_guitest_step = None
self.guitest_repair_step = None
def init_guitests(self):
if self.myfile == "guitest":
self.replay_guitest()
elif self.guitest_recording_active():
self.begin_guitest_recording()
def begin_guitest_recording(self):
self.guitest = {
"variables" : self.vars.copy(),
"filename" : self.myfile,
"output" : {},
}
# Fix transaction ID: We are just interested in whether it is valid or not
if "_transid" in self.vars:
if self.transaction_valid():
self.guitest["variables"]["_transid"] = "valid"
else:
self.guitest["variables"]["_transid"] = "invalid"
self.add_status_icon("guitest", _("GUI test recording is active"))
def end_guitest_recording(self):
if self.guitest != None:
self.guitest["user"] = self.user
self.guitest["elapsed_time"] = time.time() - self.start_time
self.save_guitest_step(self.guitest)
def save_guitest_step(self, step):
path = defaults.var_dir + "/guitests/RECORD"
if not os.path.exists(path):
test_steps = []
else:
test_steps = eval(file(path).read())
if self.guitest_repair_step != None:
mod_step = test_steps[self.guitest_repair_step]
mod_step["output"] = step["output"]
mod_step["user"] = step["user"]
mod_step["elapsed_time"] = step["elapsed_time"]
else:
test_steps.append(step)
file(path, "w").write("%s\n" % pprint.pformat(test_steps))
def load_guitest(self, name):
path = defaults.var_dir + "/guitests/" + name + ".mk"
try:
return eval(file(path).read())
except IOError, e:
raise MKGeneralException(_("Cannot load GUI test file %s: %s") % (self.attrencode(path), e))
def replay_guitest(self):
test_name = self.var("test")
if not test_name:
raise MKGuitestFailed([_("Missing the name of the GUI test to run (URL variable 'test')")])
guitest = self.load_guitest(test_name)
step_nr_text = self.var("step")
try:
step_nr = int(step_nr_text)
except:
raise MKGuitestFailed([_("Invalid or missing test step number (URL variable 'step')")])
if step_nr >= len(guitest) or step_nr < 0:
raise MKGuitestFailed([_("Invalid test step number %d (only 0...%d)") % (step_nr, len(guitest)-1)])
repair = self.var("repair") == "1"
if repair:
self.guitest_repair_step = step_nr
self.begin_guitest_recording()
self.replayed_guitest_step = guitest[step_nr]
self.replayed_guitest_step["replay"] = {}
self.myfile = self.replayed_guitest_step["filename"]
self.guitest_fake_login(self.replayed_guitest_step["user"])
self.vars = self.replayed_guitest_step["variables"]
if "_transid" in self.vars and self.vars["_transid"] == "valid":
self.vars["_transid"] = self.get_transid()
self.store_new_transids()
def guitest_recording_active(self):
# Activated by symoblic link pointing to recording file
return os.path.lexists(defaults.var_dir + "/guitests/RECORD") and not \
self.myfile in self.guitest_ignored_pages()
def guitest_ignored_pages(self):
return [ "run_cron", "index", "side", "sidebar_snapin", "dashboard", "dashboard_dashlet", "login" ]
def guitest_record_output(self, key, value):
if self.guitest:
self.guitest["output"].setdefault(key, []).append(value)
elif self.replayed_guitest_step:
self.replayed_guitest_step["replay"].setdefault(key, []).append(value)
def finalize_guitests(self):
if self.guitest:
self.end_guitest_recording()
if self.replayed_guitest_step:
try:
self.end_guitest_replay()
except MKGuitestFailed, e:
self.write("\n[[[GUITEST FAILED]]]\n%s" % ("\n".join(e.errors)))
def end_guitest_replay(self):
if self.replayed_guitest_step and self.guitest_repair_step == None:
errors = []
for varname in self.replayed_guitest_step["output"].keys():
method = self.guitest_test_method(varname)
errors += [ "%s: %s" % (varname, error)
for error in method(
self.replayed_guitest_step["output"][varname],
self.replayed_guitest_step["replay"].get(varname, [])) ]
if errors:
raise MKGuitestFailed(errors)
def guitest_test_method(self, varname):
if varname == "data_tables":
return guitest_check_datatables
elif varname == "page_title":
return guitest_check_single_value
else:
return guitest_check_element_list
def guitest_check_single_value(reference, reality):
if len(reference) > 1:
errors.append("More than one reference value: %s" % ", ".join(reference))
if len(reality) > 1:
errors.append("More than one value: %s" % ", ".join(reality))
diff_text = guitest_check_text(reference[0], reality[0])
if diff_text:
return [ diff_text ]
else:
return []
def guitest_check_element_list(reference, reality):
errors = []
one_missing = False
for entry in reference:
if not guitest_entry_in_reference_list(entry, reality):
errors.append("missing entry %r" % (entry,))
one_missing = True
if one_missing:
for entry in reality:
if not guitest_entry_in_reference_list(entry, reference):
errors.append("exceeding entry %r" % (entry,))
return errors
def guitest_entry_in_reference_list(entry, ref_list):
for ref_entry in ref_list:
if guitest_entries_match(ref_entry, entry):
return True
return False
def guitest_entries_match(ref, real):
if type(ref) in (list, tuple):
return len(ref) == len(real) and \
map(guitest_drop_dynamic_ids, ref) == map(guitest_drop_dynamic_ids, real)
else:
return guitest_drop_dynamic_ids(ref) == guitest_drop_dynamic_ids(real)
def guitest_check_datatables(reference, reality):
if len(reference) != len(reality):
return [ _("Expected %d data tables, but got %d") % (len(reference), len(reality)) ]
errors = []
for ref_table, real_table in zip(reference, reality):
errors += guitest_check_datatable(ref_table, real_table)
return errors
def guitest_check_datatable(ref_table, real_table):
if ref_table["id"] != real_table["id"]:
return [ "Table id %s expected, but got %s" % (ref_table["id"], real_table["id"]) ]
if len(ref_table["rows"]) != len(real_table["rows"]):
return [ "Table %s: expected %d rows, but got %d" % (
ref_table["id"], len(ref_table["rows"]), len(real_table["rows"])) ]
for row_nr, (ref_row, real_row) in enumerate(zip(ref_table["rows"], real_table["rows"])):
if len(ref_row) != len(real_row):
return [ "Table %s, row %d: expected %d columns, but got %d" % (
ref_table["id"], row_nr+1, len(ref_row), len(real_row)) ]
# Note: Rows are tuples. The first component is the list of cells
for cell_nr, (ref_cell, real_cell) in enumerate(zip(ref_row[0], real_row[0])):
# Note: cell is a triple. The first component contains the text
diff_text = guitest_check_text(ref_cell[0], real_cell[0])
if diff_text:
return [ "Row %d, Column %d: %s" % (row_nr, cell_nr, diff_text) ]
return []
def guitest_check_text(ref, real):
ref_clean = guitest_drop_dynamic_ids(ref)
real_clean = guitest_drop_dynamic_ids(real)
if ref_clean == real_clean:
return ""
prefix, ref_rest, real_rest = find_common_prefix(ref_clean, real_clean)
return "expected %s[[[%s]]], got %s[[[%s]]]" % (prefix, ref_rest, prefix, real_rest)
def find_common_prefix(a, b):
if len(a) > len(b) and a.startswith(b):
return b, a[:len(b)], ""
if len(b) > len(a) and b.startswith(a):
return a, "", b[:len(a)]
for i in range(min(len(a), len(b))):
if a[i] != b[i]:
return a[:i], a[i:], b[i:]
return a, "", ""
def guitest_drop_dynamic_ids(text):
return re.sub("selection(%3d|=)[a-f0-9---]{36}", "selection=*",
re.sub("_transid=1[4-6][0-9]{8}/[0-9]+", "_transid=TRANSID", text))
|
oposs/check_mk_mirror
|
web/htdocs/guitester.py
|
Python
|
gpl-2.0
| 10,454
|
import sqlite3
import RPi.GPIO as GPIO
import os, sys, time
conn = sqlite3.connect( os.path.join( os.path.dirname(os.path.realpath(sys.argv[0])), 'db/timelapsecontroller.db'))
conn.row_factory = sqlite3.Row
sleep=2
def set_pid(pid=None):
c = conn.cursor()
try:
# Update the DB counter
c.execute("UPDATE timelapseconfig SET pid=?", ( int(pid), ) )
except sqlite3.Error as e:
print "An error occurred:", e.args[0]
# Save (commit) the changes
conn.commit()
print "Set the PID to be ", pid
def wakeup():
#Using Port 6 as Ground
#Port 7 is Live
#Sets up GPIO Pin 7 to Output
GPIO.setup(7, GPIO.OUT)
#Turns on GPIO Pin 7 - Enables Power to Pin 7 for focus / wake up.
GPIO.output(7, True)
time.sleep(2)
GPIO.output(7, False)
def running():
c = conn.cursor()
try:
c.execute('SELECT * FROM timelapseconfig')
config = c.fetchone()
if config['running'] and config['count'] < config['target']:
print "Running ({} of {})".format(config['count'], config['target'])
return True
except sqlite3.Error as e:
print "An error occurred:", e.args[0]
return False
def getsleep():
c = conn.cursor()
try:
c.execute('SELECT * FROM timelapseconfig')
config = c.fetchone()
return config['sleep']
except sqlite3.Error as e:
print "An error occurred:", e.args[0]
def shoot():
#Sets up GPIO Pin 11 to Output
GPIO.setup(11, GPIO.OUT)
#Pause for 2 Seconds (Hold Fire for 2 Seconds)
#Turns on GPIO Pin 11 - Enables Power to Pin 11 to Shoot
GPIO.output(11, True)
time.sleep(2)
GPIO.output(11, False)
def updatecounter():
c = conn.cursor()
try:
# Update the DB counter
c.execute("UPDATE timelapseconfig set count=count+1")
except sqlite3.Error as e:
print "An error occurred:", e.args[0]
# Save (commit) the changes
conn.commit()
print "Incrementing counter"
if __name__ == "__main__":
#Set the Board Mode
GPIO.setmode(GPIO.BOARD)
#Write (set) PID to config
set_pid(os.getpid())
while True:
if ( running() ):
wakeup()
shoot()
updatecounter()
#Pause for configured # of seconds (default 2)
sleep = getsleep()
print "Sleeping for %r seconds.." % sleep
time.sleep(sleep)
#Write (unset) PID to config
set_pid(None)
# close the DB conn
conn.close()
#Stops the script and End of script clean up of the GPIO Port
GPIO.cleanup()
|
alanmcna/timelapsecontroller
|
focusandshoot.py
|
Python
|
gpl-2.0
| 2,409
|
import os
import re
import time
import xbmc
import xbmcvfs
import xbmcgui
import urllib2
import bjsonrpc
from bjsonrpc.handlers import BaseHandler
from quasar.addon import ADDON, ADDON_PATH
from quasar.logger import log
from quasar.config import JSONRPC_EXT_PORT, QUASARD_HOST
from quasar.osarch import PLATFORM
from quasar.util import toUtf8, notify, getLocalizedString, getLocalizedLabel, system_information
from quasar.overlay import OverlayText
from quasar.dialog_select import DialogSelect
from quasar.dialog_insert import DialogInsert
XBMC_PLAYER = xbmc.Player()
class QuasarRPCServer(BaseHandler):
public_methods_pattern = r'^[A-Za-z]\w+$'
_objects = {}
_failures = {}
def Reset(self):
for i in self._objects:
try:
self._objects[i].hide()
except:
pass
log.info("Resetting RPC objects...")
self._objects = {}
def Refresh(self):
return xbmc.executebuiltin("Container.Refresh")
def UpdateAddonRepos(self):
return xbmc.executebuiltin("UpdateAddonRepos")
def SystemInformation(self):
return system_information()
def Notify(self, header, message, image):
return notify(getLocalizedLabel(message), header, 3000, image)
def Keyboard(self, default="", heading="", hidden=False):
keyboard = xbmc.Keyboard(default, getLocalizedLabel(heading), hidden)
keyboard.doModal()
if keyboard.isConfirmed():
return keyboard.getText()
def Dialog(self, title, message):
dialog = xbmcgui.Dialog()
return dialog.ok(getLocalizedLabel(title), getLocalizedLabel(message))
def Dialog_Confirm(self, title, message):
dialog = xbmcgui.Dialog()
return dialog.yesno(getLocalizedLabel(title), getLocalizedLabel(message))
def Dialog_Select(self, title, items):
dialog = xbmcgui.Dialog()
return dialog.select(getLocalizedLabel(title), items)
def Dialog_Select_Large(self, title, subject, items):
build = xbmc.getInfoLabel("System.BuildVersion")
kodi_version = int(build.split()[0][:2])
title_encoded = "%s %s" % (getLocalizedLabel(title), toUtf8(subject))
# For Kodi < 15
if kodi_version < 15:
log.warning("Kodi version below 15, using standard xbmc.Dialog()")
log.warning("See https://github.com/scakemyer/plugin.video.quasar/issues/75")
dialog = xbmcgui.Dialog()
for i, item in enumerate(items):
item = item.replace("\n", " - ")
items[i] = item
return dialog.select(title_encoded, items)
# For Kodi >= 15
else:
window = DialogSelect("DialogSelectLarge.xml",
ADDON_PATH,
"Default",
title=title_encoded,
items=items)
window.doModal()
retval = window.retval
del window
return retval
def Player_GetPlayingFile(self, *args, **kwargs):
return XBMC_PLAYER.getPlayingFile()
def Player_IsPlaying(self, *args, **kwargs):
return XBMC_PLAYER.isPlaying(*args, **kwargs)
def Player_IsPaused(self):
return xbmc.getCondVisibility("Player.Paused")
def Player_WatchTimes(self):
error = ""
watchedTime = "0"
videoDuration = "0"
try:
watchedTime = str(XBMC_PLAYER.getTime())
videoDuration = str(XBMC_PLAYER.getTotalTime())
log.debug("Watched: %s, duration: %s" % (watchedTime, videoDuration))
except Exception as e:
error = "Stopped playing: %s" % repr(e)
watchTimes = {
"watchedTime": watchedTime,
"videoDuration": videoDuration,
"error": error
}
return watchTimes
def ConvertLanguage(self, *args, **kwargs):
return xbmc.convertLanguage(*args, **kwargs)
def GetPlatform(self):
return PLATFORM
def GetAddonInfo(self):
info = {}
for key in ("author", "changelog", "description", "disclaimer",
"fanart", "icon", "id", "name", "path", "profile", "stars",
"summary", "type", "version"):
info[key] = ADDON.getAddonInfo(key)
return info
def AddonFailure(self, addonId):
if ADDON.getSetting("provider_disable_failing") == u"false":
return 0
if addonId in self._failures:
self._failures[addonId] += 1
else:
self._failures[addonId] = 1
log.warning("Recorded failure %d for %s" % (self._failures[addonId], addonId))
if self._failures[addonId] > int(ADDON.getSetting("provider_allowed_failures")):
try:
time.sleep(10)
notify(getLocalizedString(30111))
urllib2.urlopen("%s/provider/%s/disable" % (QUASARD_HOST, addonId))
except:
notify(getLocalizedString(30112))
return 0
return self._failures[addonId]
def AddonCheck(self, addonId):
if addonId in self._failures:
return self._failures[addonId]
return 0
def AddonSettings(self, addonId):
return xbmc.executebuiltin("Addon.OpenSettings(%s)" % addonId)
def GetLanguage(self, *args, **kwargs):
return xbmc.getLanguage(*args, **kwargs)
def GetLocalizedString(self, *args, **kwargs):
return ADDON.getLocalizedString(*args, **kwargs).encode('utf-8', 'ignore')
def GetSetting(self, *args, **kwargs):
return ADDON.getSetting(*args, **kwargs)
def GetAllSettings(self):
settings = []
settingsFile = os.path.join(ADDON.getAddonInfo("path"), "resources", "settings.xml")
with open(settingsFile, 'r') as settingsStr:
fileContent = settingsStr.read()
keyType = re.findall(r".*id=\"(\w+)\".*type=\"(\w+)\"", fileContent)
for key, _type in keyType:
settings.append({
"key": key,
"type": _type,
"value": ADDON.getSetting(key)
})
return settings
def SetSetting(self, *args, **kwargs):
return ADDON.setSetting(*args, **kwargs)
def GetCurrentView(self):
skinPath = xbmc.translatePath('special://skin/')
xml = os.path.join(skinPath, 'addon.xml')
f = xbmcvfs.File(xml)
read = f.read()
f.close()
try:
src = re.search('defaultresolution="([^"]+)', read, re.DOTALL).group(1)
except:
src = re.search('<res.+?folder="([^"]+)', read, re.DOTALL).group(1)
src = os.path.join(skinPath, src, 'MyVideoNav.xml')
f = xbmcvfs.File(src)
read = f.read()
f.close()
match = re.search('<views>([^<]+)', read, re.DOTALL)
if match:
views = match.group(1)
log.info("Skin's ViewModes: %s" % views)
for view in views.split(','):
if xbmc.getInfoLabel('Control.GetLabel(%s)' % view):
return view
def TranslatePath(self, *args, **kwargs):
return xbmc.translatePath(*args, **kwargs)
def Log(self, *args, **kwargs):
return xbmc.log(*args, **kwargs)
def Dialog_CloseAll(self, *args, **kwargs):
return xbmc.executebuiltin("Dialog.Close(all, true)")
def DialogInsert(self, *args, **kwargs):
window = DialogInsert("DialogInsert.xml", ADDON_PATH, "Default")
window.doModal()
retval = {"type": ("cancelled", "url", "file")[window.type], "path": window.retval}
del window
return retval
###########################################################################
# DialogProgress
###########################################################################
def DialogProgress_Create(self, *args, **kwargs):
dialog = xbmcgui.DialogProgress()
self._objects[id(dialog)] = dialog
dialog.create(*args, **kwargs)
return id(dialog)
def DialogProgress_IsCanceled(self, hwnd, *args, **kwargs):
return self._objects[hwnd].iscanceled(*args, **kwargs)
def DialogProgress_Update(self, hwnd, *args, **kwargs):
return self._objects[hwnd].update(*args, **kwargs)
def DialogProgress_Close(self, hwnd, *args, **kwargs):
dialog = self._objects.pop(hwnd)
dialog.close()
del dialog
# Background DialogProgress
def DialogProgressBG_Create(self, *args, **kwargs):
dialog = xbmcgui.DialogProgressBG()
self._objects[id(dialog)] = dialog
dialog.create(*args, **kwargs)
return id(dialog)
def DialogProgressBG_IsFinished(self, hwnd, *args, **kwargs):
return self._objects[hwnd].isFinished(*args, **kwargs)
def DialogProgressBG_Update(self, hwnd, *args, **kwargs):
return self._objects[hwnd].update(*args, **kwargs)
def DialogProgressBG_Close(self, hwnd, *args, **kwargs):
dialog = self._objects.pop(hwnd)
dialog.close()
del dialog
# Overlay status
def OverlayStatus_Create(self):
overlay = OverlayText()
overlayId = id(overlay)
self._objects[overlayId] = overlay
return overlayId
def OverlayStatus_Update(self, hwnd, percent, line1, line2, line3):
text = "\n".join([line1, line2, line3])
self._objects[hwnd].text = text
def OverlayStatus_Show(self, hwnd):
self._objects[hwnd].show()
def OverlayStatus_Hide(self, hwnd):
self._objects[hwnd].hide()
def OverlayStatus_Close(self, hwnd):
overlay = self._objects.pop(hwnd)
overlay.hide()
del overlay
def server_thread():
try:
s = bjsonrpc.createserver(port=JSONRPC_EXT_PORT, handler_factory=QuasarRPCServer)
log.info("quasar: starting jsonrpc service")
s.serve()
log.info("quasar: exiting jsonrpc service")
except Exception:
import traceback
map(log.error, traceback.format_exc().split("\n"))
raise
|
felipenaselva/felipe.repository
|
plugin.video.quasar/resources/site-packages/quasar/rpc.py
|
Python
|
gpl-2.0
| 10,162
|
from jinja2 import Environment, FileSystemLoader
import yaml
from tornado.ioloop import IOLoop
from tornado.web import RequestHandler
from bokeh.application import Application
from bokeh.application.handlers import FunctionHandler
from bokeh.embed import server_document
from bokeh.layouts import column
from bokeh.models import ColumnDataSource, Slider
from bokeh.plotting import figure
from bokeh.server.server import Server
from bokeh.themes import Theme
from bokeh.sampledata.sea_surface_temperature import sea_surface_temperature
env = Environment(loader=FileSystemLoader('templates'))
class IndexHandler(RequestHandler):
def get(self):
template = env.get_template('embed.html')
script = server_document('http://localhost:5006/bkapp')
self.write(template.render(script=script, template="Tornado"))
def modify_doc(doc):
df = sea_surface_temperature.copy()
source = ColumnDataSource(data=df)
plot = figure(x_axis_type='datetime', y_range=(0, 25), y_axis_label='Temperature (Celsius)',
title="Sea Surface Temperature at 43.18, -70.43")
plot.line('time', 'temperature', source=source)
def callback(attr, old, new):
if new == 0:
data = df
else:
data = df.rolling('{0}D'.format(new)).mean()
source.data = ColumnDataSource(data=data).data
slider = Slider(start=0, end=30, value=0, step=1, title="Smoothing by N Days")
slider.on_change('value', callback)
doc.add_root(column(slider, plot))
doc.theme = Theme(json=yaml.load("""
attrs:
Figure:
background_fill_color: "#DDDDDD"
outline_line_color: white
toolbar_location: above
height: 500
width: 800
Grid:
grid_line_dash: [6, 4]
grid_line_color: white
"""))
bokeh_app = Application(FunctionHandler(modify_doc))
io_loop = IOLoop.current()
server = Server({'/bkapp': bokeh_app}, io_loop=io_loop, extra_patterns=[('/', IndexHandler)])
server.start()
if __name__ == '__main__':
from bokeh.util.browser import view
print('Opening Tornado app with embedded Bokeh application on http://localhost:5006/')
io_loop.add_callback(view, "http://localhost:5006/")
io_loop.start()
|
Ziqi-Li/bknqgis
|
bokeh/examples/howto/server_embed/tornado_embed.py
|
Python
|
gpl-2.0
| 2,322
|
from setuptools import setup, find_packages
import os
version = '0.5'
setup(name='uwosh.emergency.master',
version=version,
description="",
long_description=open("README.txt").read() + "\n" +
open(os.path.join("docs", "HISTORY.txt")).read(),
# Get more strings from http://www.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
"Framework :: Plone",
"Programming Language :: Python",
"Topic :: Software Development :: Libraries :: Python Modules",
],
keywords='',
author='Nathan Van Gheem',
author_email='vangheem@gmail.com',
url='http://svn.plone.org/svn/plone/plone.example',
license='GPL',
packages=find_packages(exclude=['ez_setup']),
namespace_packages=['uwosh', 'uwosh.emergency'],
include_package_data=True,
zip_safe=False,
install_requires=[
'setuptools',
'uwosh.simpleemergency>=1.1',
'rsa'
],
entry_points="""
# -*- Entry points: -*-
[z3c.autoinclude.plugin]
target = plone
""",
)
|
uwosh/uwosh.emergency.master
|
setup.py
|
Python
|
gpl-2.0
| 1,123
|
#!/usr/bin/env python
# *-* coding:utf-8 *-*
"""
Date :
Author : Vianney Gremmel loutre.a@gmail.com
"""
def memo(f):
class Memo(dict):
def __missing__(self, key):
r = self[key] = f(key)
return r
return Memo().__getitem__
@memo
def isprime(n):
for d in xrange(2, int(n**0.5) + 1):
if n % d == 0:
return False
return True
def maxi_primes():
for a in xrange(-1000, 1001):
for b in xrange(-999, 1001, 2):
n = 0
while True:
if not isprime(abs(n*n + a*n + b)) and n:
yield (n, a, b)
break
n += 1
print 'please wait...'
max_score = max(score for score in maxi_primes())
print max_score
print max_score[1]*max_score[2]
|
vianney-g/python-exercices
|
eulerproject/pb0027.py
|
Python
|
gpl-2.0
| 793
|
# -*- encoding: utf-8 -*-
import io
from bs4 import BeautifulSoup
from bs4 import SoupStrainer
import urllib2
import urlparse
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
import os.path
zenmatePath = "/home/hbc/.mozilla/firefox/yeyuaq0s.default/extensions/firefox@zenmate.com.xpi"
ffprofile = webdriver.FirefoxProfile()
# ffprofile.set_preference("javascript.enabled", False)
# ffprofile.set_preference('permissions.default.image', 2)
# ffprofile.set_preference('permissions.default.stylesheet', 2)
# ffprofile.set_preference('dom.ipc.plugins.enabled.libflashplayer.so', 'false')
ffprofile.add_extension(zenmatePath)
ffprofile.add_extension('/home/hbc/Downloads/quickjava-2.0.6-fx.xpi')
ffprofile.set_preference("thatoneguydotnet.QuickJava.curVersion", "2.0.6.1") ## Prevents loading the 'thank you for installing screen'
ffprofile.set_preference("thatoneguydotnet.QuickJava.startupStatus.Images", 2) ## Turns images off
ffprofile.set_preference("thatoneguydotnet.QuickJava.startupStatus.AnimatedImage", 2) ## Turns animated images off
ffprofile.set_preference("thatoneguydotnet.QuickJava.startupStatus.CSS", 2) ## CSS
# ffprofile.set_preference("thatoneguydotnet.QuickJava.startupStatus.Cookies", 2) ## Cookies
ffprofile.set_preference("thatoneguydotnet.QuickJava.startupStatus.Flash", 2) ## Flash
ffprofile.set_preference("thatoneguydotnet.QuickJava.startupStatus.Java", 2) ## Java
# ffprofile.set_preference("thatoneguydotnet.QuickJava.startupStatus.JavaScript", 2) ## JavaScript
ffprofile.set_preference("thatoneguydotnet.QuickJava.startupStatus.Silverlight", 2) ## Silverlight
driver = webdriver.Firefox(ffprofile)
def _remove_div_vdx(soup):
for div in soup.find_all('div', class_='vidx'):
div.extract()
return soup
def get_data(urlchuong_list, i):
filename = 'urlsach/data/bosung/sach' + str(i) + '.txt'
ftmp = io.open(filename, 'w', encoding='utf-8')
try:
# hdrs = {'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Connection': 'keep-alive',
# 'Cookie': 'ipq_lip=20376774; ipq_set=1453874029; __atuvc=2%7C4; __utma=126044488.676620502.1453787537.1453787537.1453787537.1; __utmz=126044488.1453787537.1.1.utmcsr=(direct)|utmccn=(direct)|utmcmd=(none); PHPSESSID=ed3f4874b92a29b6ed036adfa5ad6fb3; ipcountry=us',
# 'Host': 'www.transcripture.com',
# 'Referer': 'http://www.transcripture.com/vietnamese-spanish-genesis-1.html',
# 'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:43.0) Gecko/20100101 Firefox/43.0'
# }
count = 1
for urlchuong in urlchuong_list:
print('Dang get chuong %d, sach %d'%(count,i))
# urlchuong = 'http://www.transcripture.com/vietnamese-chinese-revelation-3.html'
# print urlchuong
# # create request
# req = urllib2.Request(urlchuong, headers=hdrs)
# # get response
# response = urllib2.urlopen(req)
# soup = BeautifulSoup(response.read())
# Load a page
driver.get(urlchuong)
# delay = 40 # seconds
# try:
# wait = WebDriverWait(driver, delay)
# path = '/html/body/center/div[1]/div[2]/div[4]/table/tbody/tr[2]/td[1]/div/div[1]/form[1]/select/option[66]'
# elem = driver.find_element_by_xpath(path)
# wait.until(EC.visibility_of(elem))
# print "Page is ready!"
# except TimeoutException:
# print "Loading took too much time!"
# #reload page
# body = driver.find_element_by_tag_name("body")
# body.send_keys(Keys.ESCAPE)
# body.send_keys(Keys.F5)
content = driver.page_source
soup = BeautifulSoup(content)
soup = _remove_div_vdx(soup)
# print soup
table_tag = soup.find_all('table', attrs={'width':'100%', 'cellspacing':'0'})[0]
tr_tags = table_tag.find_all('tr')
_len = len(tr_tags)
# in first tr tag:
h2_class = tr_tags[0].find_all('h2', class_='cphd')
ftmp.write(u'' + h2_class[0].get_text() + '|')
ftmp.write(u'' + h2_class[1].get_text() + '\n')
# print table_tag
for x in xrange(1,_len):
data = tr_tags[x].get_text('|')
# print data
# url_ec = url.encode('unicode','utf-8')
ftmp.write(u'' + data + '\n')
count = count + 1
# close file
ftmp.close()
except Exception, e:
print e
# close file
ftmp.close()
def check_numline(filename):
urlsach_list = []
urlsach_file = open(filename, 'r')
for line in urlsach_file:
urlsach_list.append(line.strip())
_len = len(urlsach_list)
return _len
def getsttchuongthieu(sachi):
list_stt = []
urlsach = 'urlsach/sach' + str(sachi) + '.txt'
#kiem tra so dong cua url sach, tuong ung voi so chuong
numline = check_numline(urlsach)
fname = 'urlsach/data/partcomplete/sach' + str(sachi) + '.txt'
#doc data tu file sach data
data = open(fname).read()
#kiem tra xem moi dong trong file sach data da co chuong cac so nay chua
for i in xrange(1,numline + 1):
key = str(i)
# print ('da chay den day')
if key not in data:
list_stt.append(i)
return list_stt
def getlisturlchuongthieu(sachi):
list_chuongthieu = []
list_stt = getsttchuongthieu(sachi)
fname = 'urlsach/sach' + str(sachi) + '.txt'
fp = open(fname)
lines=fp.readlines()
for stt in list_stt:
list_chuongthieu.append(lines[stt-1])
return list_chuongthieu
def main():
for x in xrange(1,67):
#kiem tra xem duong dan co trong thu muc partcomplete hay khong
f2name = 'urlsach/data/partcomplete/sach' + str(x) + '.txt'
if os.path.isfile(f2name):
list_urlchuongthieu = getlisturlchuongthieu(x)
get_data(list_urlchuongthieu, x)
if __name__ == '__main__':
# driver = webdriver.Firefox()
driver.get("about:blank")
# open new tab
# body = driver.find_element_by_tag_name("body")
# body.send_keys(Keys.CONTROL + 't')
# time.sleep(15)
print('Nhap vao mot ky tu bat ky de tiep tuc chuong trinh')
key = raw_input()
main()
# close the tab
driver.find_element_by_tag_name('body').send_keys(Keys.COMMAND + 'w')
driver.close()
# urlchuong_list = ['http://www.transcripture.com/vietnamese-chinese-exodus-1.html']
# get_data(urlchuong_list, 2)
|
hoaibang07/Webscrap
|
transcripture/sources/crawler_chuongthieu.py
|
Python
|
gpl-2.0
| 7,017
|
import unittest
import os
import logging
import re
import shutil
import datetime
import oeqa.utils.ftools as ftools
from oeqa.selftest.base import oeSelfTest
from oeqa.utils.commands import runCmd, bitbake, get_bb_var
from oeqa.utils.decorators import testcase
from oeqa.utils.network import get_free_port
class BitbakePrTests(oeSelfTest):
@classmethod
def setUpClass(cls):
cls.pkgdata_dir = get_bb_var('PKGDATA_DIR')
def get_pr_version(self, package_name):
package_data_file = os.path.join(self.pkgdata_dir, 'runtime', package_name)
package_data = ftools.read_file(package_data_file)
find_pr = re.search("PKGR: r[0-9]+\.([0-9]+)", package_data)
self.assertTrue(find_pr, "No PKG revision found in %s" % package_data_file)
return int(find_pr.group(1))
def get_task_stamp(self, package_name, recipe_task):
stampdata = get_bb_var('STAMP', target=package_name).split('/')
prefix = stampdata[-1]
package_stamps_path = "/".join(stampdata[:-1])
stamps = []
for stamp in os.listdir(package_stamps_path):
find_stamp = re.match("%s\.%s\.([a-z0-9]{32})" % (re.escape(prefix), recipe_task), stamp)
if find_stamp:
stamps.append(find_stamp.group(1))
self.assertFalse(len(stamps) == 0, msg="Cound not find stamp for task %s for recipe %s" % (recipe_task, package_name))
self.assertFalse(len(stamps) > 1, msg="Found multiple %s stamps for the %s recipe in the %s directory." % (recipe_task, package_name, package_stamps_path))
return str(stamps[0])
def increment_package_pr(self, package_name):
inc_data = "do_package_append() {\n bb.build.exec_func('do_test_prserv', d)\n}\ndo_test_prserv() {\necho \"The current date is: %s\"\n}" % datetime.datetime.now()
self.write_recipeinc(package_name, inc_data)
res = bitbake(package_name, ignore_status=True)
self.delete_recipeinc(package_name)
self.assertEqual(res.status, 0, msg=res.output)
self.assertTrue("NOTE: Started PRServer with DBfile" in res.output, msg=res.output)
def config_pr_tests(self, package_name, package_type='rpm', pr_socket='localhost:0'):
config_package_data = 'PACKAGE_CLASSES = "package_%s"' % package_type
self.write_config(config_package_data)
config_server_data = 'PRSERV_HOST = "%s"' % pr_socket
self.append_config(config_server_data)
def run_test_pr_service(self, package_name, package_type='rpm', track_task='do_package', pr_socket='localhost:0'):
self.config_pr_tests(package_name, package_type, pr_socket)
self.increment_package_pr(package_name)
pr_1 = self.get_pr_version(package_name)
stamp_1 = self.get_task_stamp(package_name, track_task)
self.increment_package_pr(package_name)
pr_2 = self.get_pr_version(package_name)
stamp_2 = self.get_task_stamp(package_name, track_task)
self.assertTrue(pr_2 - pr_1 == 1, "Step between same pkg. revision is greater than 1")
self.assertTrue(stamp_1 != stamp_2, "Different pkg rev. but same stamp: %s" % stamp_1)
def run_test_pr_export_import(self, package_name, replace_current_db=True):
self.config_pr_tests(package_name)
self.increment_package_pr(package_name)
pr_1 = self.get_pr_version(package_name)
exported_db_path = os.path.join(self.builddir, 'export.inc')
export_result = runCmd("bitbake-prserv-tool export %s" % exported_db_path, ignore_status=True)
self.assertEqual(export_result.status, 0, msg="PR Service database export failed: %s" % export_result.output)
if replace_current_db:
current_db_path = os.path.join(get_bb_var('PERSISTENT_DIR'), 'prserv.sqlite3')
self.assertTrue(os.path.exists(current_db_path), msg="Path to current PR Service database is invalid: %s" % current_db_path)
os.remove(current_db_path)
import_result = runCmd("bitbake-prserv-tool import %s" % exported_db_path, ignore_status=True)
os.remove(exported_db_path)
self.assertEqual(import_result.status, 0, msg="PR Service database import failed: %s" % import_result.output)
self.increment_package_pr(package_name)
pr_2 = self.get_pr_version(package_name)
self.assertTrue(pr_2 - pr_1 == 1, "Step between same pkg. revision is greater than 1")
@testcase(930)
def test_import_export_replace_db(self):
self.run_test_pr_export_import('m4')
@testcase(931)
def test_import_export_override_db(self):
self.run_test_pr_export_import('m4', replace_current_db=False)
@testcase(932)
def test_pr_service_rpm_arch_dep(self):
self.run_test_pr_service('m4', 'rpm', 'do_package')
@testcase(934)
def test_pr_service_deb_arch_dep(self):
self.run_test_pr_service('m4', 'deb', 'do_package')
@testcase(933)
def test_pr_service_ipk_arch_dep(self):
self.run_test_pr_service('m4', 'ipk', 'do_package')
@testcase(935)
def test_pr_service_rpm_arch_indep(self):
self.run_test_pr_service('xcursor-transparent-theme', 'rpm', 'do_package')
@testcase(937)
def test_pr_service_deb_arch_indep(self):
self.run_test_pr_service('xcursor-transparent-theme', 'deb', 'do_package')
@testcase(936)
def test_pr_service_ipk_arch_indep(self):
self.run_test_pr_service('xcursor-transparent-theme', 'ipk', 'do_package')
@testcase(1419)
def test_stopping_prservice_message(self):
port = get_free_port()
runCmd('bitbake-prserv --host localhost --port %s --loglevel=DEBUG --start' % port)
ret = runCmd('bitbake-prserv --host localhost --port %s --loglevel=DEBUG --stop' % port)
self.assertEqual(ret.status, 0)
|
schleichdi2/OPENNFR-6.1-CORE
|
opennfr-openembedded-core/meta/lib/oeqa/selftest/prservice.py
|
Python
|
gpl-2.0
| 5,821
|
from omf import feeder
import omf.solvers.gridlabd
feed = feeder.parse('GC-12.47-1.glm')
maxKey = feeder.getMaxKey(feed)
print(feed[1])
feed[maxKey + 1] = {
'object': 'node', 'name': 'test_solar_node', 'phases': 'ABCN',
'nominal_voltage': '7200'
}
feed[maxKey + 2] = {
'object': 'underground_line', 'name': 'test_solar_line', 'phases': 'ABCN',
'from': 'test_solar_node', 'to': 'GC-12-47-1_node_26', 'length': '100',
'configuration': 'line_configuration:6'
}
feed[maxKey + 3] = {
'object': 'meter', 'name': 'test_solar_meter', 'parent': 'test_solar_node',
'phases': 'ABCN', 'nominal_voltage': '480'
}
feed[maxKey + 4] = {
'object': 'inverter', 'name': 'test_solar_inverter', 'parent': 'test_solar_meter',
'phases': 'AS', 'inverter_type': 'PWM', 'power_factor': '1.0',
'generator_status': 'ONLINE', 'generator_mode': 'CONSTANT_PF'
}
feed[maxKey + 5] = {
'object': 'solar', 'name': 'test_solar', 'parent': 'test_solar_inverter', 'area': '1000000 sf',
'generator_status': 'ONLINE', 'efficiency': '0.2', 'generator_mode': 'SUPPLY_DRIVEN',
'panel_type': 'SINGLE_CRYSTAL_SILICON'
}
feed[maxKey + 6] = {
'object': 'recorder', 'parent': 'test_solar_meter', 'property': 'voltage_A.real,voltage_A.imag,voltage_B.real,voltage_B.imag,voltage_C.real,voltage_C.imag',
'file': 'GC-addSolar-voltages.csv', 'interval': '60', 'limit': '1440'
}
omf.solvers.gridlabd.runInFilesystem(feed, keepFiles = True, workDir = '.', glmName = 'GC-solarAdd.glm')
'''
output = open('GC-solarAdd.glm', 'w')
output.write(feeder.write(feed))
output.close()
'''
|
dpinney/omf
|
omf/scratch/MPUPV/solarAdd.py
|
Python
|
gpl-2.0
| 1,552
|
import re
from gourmet.plugin import ExporterPlugin
from gourmet.convert import seconds_to_timestring, float_to_frac
from . import gxml2_exporter
from gettext import gettext as _
GXML = _('Gourmet XML File')
class GourmetExportChecker:
def check_rec (self, rec, file):
self.txt = file.read()
self.rec = rec
self.check_attrs()
def check_attrs (self):
for attr in ['title','cuisine',
'source','link']:
if getattr(self.rec,attr):
assert re.search(r'<%(attr)s>\s*%(val)s\s*</%(attr)s>'%{
'attr':attr,
'val':getattr(self.rec,attr)
},
self.txt), \
'Did not find %s value %s'%(attr,getattr(self.rec,attr))
if self.rec.yields:
assert re.search(r'<yields>\s*%s\s*%s\s*</yields>'%(
self.rec.yields,
self.rec.yield_unit),
self.txt) or \
re.search(r'<yields>\s*%s\s*%s\s*</yields>'%(
float_to_frac(self.rec.yields),
self.rec.yield_unit),
self.txt), \
'Did not find yields value %s %s'%(self.rec.yields,
self.rec.yield_unit)
for att in ['preptime','cooktime']:
if getattr(self.rec,att):
tstr = seconds_to_timestring(getattr(self.rec,att))
assert re.search(r'<%(att)s>\s*%(tstr)s\s*</%(att)s>'%locals(),self.txt),\
'Did not find %s value %s'%(att,tstr)
class GourmetExporterPlugin (ExporterPlugin):
label = _('Gourmet XML Export')
sublabel = _('Exporting recipes to Gourmet XML file %(file)s.')
single_completed_string = _('Recipe saved in Gourmet XML file %(file)s.'),
filetype_desc = GXML
saveas_filters = [GXML,['text/xml'],['*.grmt','*.xml','*.XML']]
saveas_single_filters = saveas_filters
def get_multiple_exporter (self, args):
return gxml2_exporter.recipe_table_to_xml(
args['rd'],
args['rv'],
args['file'],
)
def do_single_export (self, args) :
gxml2_exporter.recipe_table_to_xml(args['rd'],
[args['rec']],
args['out'],
change_units=args['change_units'],
mult=args['mult']
).run()
def run_extra_prefs_dialog (self):
pass
def check_export (self, rec, file):
gec = GourmetExportChecker()
gec.check_rec(rec,file)
|
thinkle/gourmet
|
gourmet/plugins/import_export/gxml_plugin/gxml_exporter_plugin.py
|
Python
|
gpl-2.0
| 2,889
|
__author__= "barun"
__date__ = "$20 May, 2011 12:25:36 PM$"
from metrics import Metrics
from wireless_fields import *
DATA_PKTS = ('tcp', 'udp', 'ack',)
def is_control_pkt(pkt_type=''):
return pkt_type not in DATA_PKTS
class TraceAnalyzer(object):
'''
Trace Analyzer
'''
def __init__(self, file_name=None):
print 'Trace Analyzer'
self._receiveEvents = []
self._sendEvents = []
self._dropEvents = []
self._otherEvents = []
self._data_pkts_rcvd = []
self._cntrl_pkts_rcvd = []
self._sourceNodes = []
self._destinationNodes = []
self.parse_events(file_name)
self.get_statistics()
def parse_events(self, file_name):
'''
Parse the send, receive and drop events, and store them in a list. This
method should get called only once (from inside __init__) at the
beginning of processing.
'''
print 'Parse events -- Use normal record scan to filter receive events'
if file_name:
trace_file = None
try:
trace_file = open(file_name, 'r')
for event in trace_file:
if event[0] == EVENT_RECEIVE:
self._receiveEvents.append(event)
elif event[0] == EVENT_SEND:
self._sendEvents.append(event)
elif event[0] == EVENT_DROP:
self._dropEvents.append(event)
else:
self._otherEvents.append(event)
except IOError, ioe:
print 'IOError:', str(ioe)
finally:
if trace_file:
trace_file.close()
for event in self._receiveEvents:
event = event.split()
try:
if event[I_PKT_TYPE_TOKEN] == S_PKT_TYPE_TOKEN and\
event[I_TRACE_LEVEL_TOKEN] == S_TRACE_LEVEL_TOKEN and\
event[I_PKT_TYPE] in DATA_PKTS:
self._data_pkts_rcvd.append(event)
else:
self._cntrl_pkts_rcvd.append(event)
except IndexError:
#print event
self._data_pkts_rcvd.append(event)
continue
# Determine sending and receiving nodes
for event in self._sendEvents:
try:
event = event.split()
if event[I_PKT_TYPE_TOKEN] == S_PKT_TYPE_TOKEN and \
event[I_PKT_TYPE] in DATA_PKTS:
if event[I_SRC_FIELD_TOKEN] == S_SRC_FIELD_TOKEN:
src = event[I_SRC_ADDR_PORT].split('.')[0]
if src not in self._sourceNodes and int(src) >= 0:
self._sourceNodes.append(src)
else:
continue
# Is is required to have destination nodes???
# In case of TCP, source nodes themselves will become
# destination of acknowledgements
#
# if event[I_PKT_TYPE_TOKEN] == S_PKT_TYPE_TOKEN and \
# event[I_PKT_TYPE] in DATA_PKTS:
# if event[I_DST_FIELD_TOKEN] == S_DST_FIELD_TOKEN:
# dst = event[I_DST_ADDR_PORT].split('.')[0]
# if dst not in self._destinationNodes and int(dst) >= 0:
# self._destinationNodes.append(dst)
# else:
# continue
except IndexError:
# IndexError can occur because certain log entries from MAC
# layer may not have source and destination infos -- don't
# know exactly why
continue
# Compute simulation times
try:
self._simulationStartTime = float(self._sendEvents[0].split()[I_TIMESTAMP])
except IndexError:
self._simulationStartTime = 0
try:
self._simulationEndTime = float(self._sendEvents[len(self._sendEvents)-1].split()[I_TIMESTAMP])
except IndexError:
self._simulationEndTime = 0
self._simulationDuration = self._simulationEndTime - self._simulationStartTime
def get_statistics(self):
msg = '''
Simulation start: %f
Simulation end: %f
Duration: %f
Source nodes: %s
# of packets sent: %d
# of packets received: %d
# of data packets: %d
# of control packets:%d
# of packets droped: %d
# of other events: %d
''' % (
self._simulationStartTime,
self._simulationEndTime,
self._simulationDuration,
self._sourceNodes,
len(self._sendEvents),
len(self._receiveEvents),
len(self._data_pkts_rcvd),
len(self._cntrl_pkts_rcvd),
len(self._dropEvents),
len(self._otherEvents),
)
print msg
def get_average_throughput(self):
Metrics.averageThroughput()
def get_instantaneous_throughput(self):
Metrics.instantaneousThroughput()
|
barun-saha/ns2web
|
ns2trace/trace_analyzer_old.py
|
Python
|
gpl-2.0
| 5,513
|
## begin license ##
#
# "Meresco Solr" is a set of components and tools
# to integrate Solr into "Meresco."
#
# Copyright (C) 2011-2013 Seecr (Seek You Too B.V.) http://seecr.nl
# Copyright (C) 2012 SURF http://www.surf.nl
# Copyright (C) 2013 Stichting Kennisnet http://www.kennisnet.nl
#
# This file is part of "Meresco Solr"
#
# "Meresco Solr" is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# "Meresco Solr" is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with "Meresco Solr"; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
## end license ##
from solrinterface import SolrInterface, UNTOKENIZED_PREFIX, SORTED_PREFIX
from fields2solrdoc import Fields2SolrDoc
from cql2solrlucenequery import Cql2SolrLuceneQuery
|
seecr/meresco-solr
|
meresco/solr/__init__.py
|
Python
|
gpl-2.0
| 1,228
|
# ===========================================================================
# eXe
# Copyright 2004-2006, University of Auckland
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
# ===========================================================================
#This is a changed version of the clozeIdevice with selectboxes instead of input fields
"""
ScormDropDown Idevice.
"""
import logging
from exe.engine.idevice import Idevice
from exe.engine.path import Path
from exe.engine.field import ClozeField, TextAreaField
from exe.engine.persist import Persistable
from HTMLParser import HTMLParser
from exe.engine.mimetex import compile
from exe.engine.translate import lateTranslate
from exe import globals as G
#import re
import Image
from exe.engine.resource import Resource
#from exe.engine.flvreader import FLVReader
#from htmlentitydefs import name2codepoint
#from exe.engine.htmlToText import HtmlToText
#from twisted.persisted.styles import Versioned
#from exe import globals as G
#import os
import re
#import urllib
#import shutil
log = logging.getLogger(__name__)
#==============================================================================
# ===========================================================================
# ===========================================================================
class ScormDropDownIdevice(Idevice):
"""
Holds a paragraph with words missing that the student must fill in
"""
persistenceVersion = 4
def __init__(self, parentNode=None):
"""
Sets up the idevice title and instructions etc
"""
Idevice.__init__(self, x_(u"SCORM Test Dropdown"),
x_(u"University of Auckland"),
x_(u"<p>FillIn exercises are texts or "
"sentences where students must fill in "
"missing words. They are often used for the "
"following purposes:</p>"
"<ol>"
"<li>To check knowledge of core course "
"concepts (this could be a pre-check, "
"formative exercise, or summative check).</li>"
"<li>To check reading comprehension.</li>"
"<li>To check vocabulary knowledge.</li>"
"<li>To check word formation and/or grammatical "
"competence. </li></ol>"),
x_(u"<dl>"
" <dt>If your goal is to test understanding "
"of core concepts or reading comprehension"
" </dt>"
" <dd>"
" <p>"
" Write a summary of the concept or reading long "
" enough to adequately test the target's "
"knowledge, but short enough not to "
"induce fatigue. Less than one typed page is "
"probably adequate, but probably "
"considerably less for young students or "
"beginners."
" </p>"
" <p>"
"Select words in the text that"
"are key to understanding the concepts. These"
"will probably be verbs, nouns, and key adverbs."
"Choose alternatives with one clear answer."
" </p>"
" </dd>"
" <dt>"
"If your goal is to test vocabulary knowledge"
" </dt>"
" <dd>"
"<p>Write a text using the target vocabulary. This "
"text should be coherent and cohesive, and be of "
"an appropriate length. Highlight the target "
"words in the text. Choose alternatives with one "
"clear answer.</p>"
" </dd>"
" <dt>"
"If your goal is to test word "
"formation/grammar:"
" </dt>"
" <dd>"
" <p>"
"Write a text using the "
"target forms. This text should be coherent and "
"cohesive, and be of an appropriate length. "
"Remember that the goal is not vocabulary "
"knowledge, so the core meanings of the stem "
"words should be well known to the students."
" </p>"
" <p>"
"Highlight the target words in the text. Provide "
"alternatives with the same word stem, but "
"different affixes. It is a good idea to get a "
"colleague to test the test/exercise to make "
"sure there are no surprises!"
" </p>"
" </dd>"
"</dl>"),
u"question",
parentNode)
self.instructionsForLearners = TextAreaField(
x_(u'Instructions'),
x_(u"""Hier können Sie eine Aufgabenstellung eingeben oder die Standardanweisung übernehmen.
"""),
x_(u"""Wähle im folgenden Abschnitt die richtigen Antworten aus!"""))
self.instructionsForLearners.idevice = self
self._content = ClozeField(x_(u'Cloze'),
x_(u"""<p>Um eine Lücke mit Antwortmöglichkeiten zu erzeugen,
schreiben sie zuerst die richtige Antwort und dann getrennt
mit '|' die falschen Antworten, also folgendermaßen:
richtig|falsch|falsch|falsch...
Markieren Sie die gesamten Antworten und klicken sie
auf den Button 'Wort verbergen/anzeigen'.
Hinweise:<br>In Antworten können Leerzeichen enthalten
sein<br>Das Zeichen '|' erhalten Sie, indem Sie die
'Alt Gr'-Taste gedrückt halten und dann auf die Taste
mit dem Zeichen '|' tippen (auf deutschen Tastaturen meist
neben dem 'Y').
</p>"""))
self._content.idevice = self
self.feedback = TextAreaField(x_(u'Feedback'),
x_(u'Enter any feedback you wish to provide the learner '
'with-in the feedback field. This field can be left blank.'))
self.feedback.idevice = self
self.emphasis = Idevice.SomeEmphasis
self.systemResources += ["common.js"]
self.isCloze = True
# Properties
content = property(lambda self: self._content,
doc="Read only, use 'self.content.encodedContent = x' "
"instead")
def upgradeToVersion1(self):
"""
Upgrades exe to v0.10
"""
self._upgradeIdeviceToVersion1()
self.instructionsForLearners = TextAreaField(
x_(u'Instructions For Learners'),
x_(u'Put instructions for learners here'),
x_(u'Read the paragraph below and '
'fill in the missing words'))
self.instructionsForLearners.idevice = self
self.feedback = TextAreaField(x_(u'Feedback'))
self.feedback.idevice = self
def upgradeToVersion2(self):
"""
Upgrades exe to v0.11
"""
self.content.autoCompletion = True
self.content.autoCompletionInstruc = _(u"Allow auto completion when "
u"user filling the gaps.")
def upgradeToVersion3(self):
"""
Upgrades to v0.12
"""
self._upgradeIdeviceToVersion2()
self.systemResources += ["common.js"]
def upgradeToVersion4(self):
"""
Upgrades to v0.20.3
"""
self.isCloze = True
# ===========================================================================
|
kohnle-lernmodule/exeLearningPlus1_04
|
exe/engine/scormdropdownidevice_tmp.py
|
Python
|
gpl-2.0
| 9,281
|
# -*- coding: utf-8 -*-
#
# Whitespace Evaluation SofTware documentation build configuration file, created by
# sphinx-quickstart on Wed Jul 9 13:12:12 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('../west'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.pngmath',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinx.ext.autodoc',
'sphinx.ext.graphviz',
'sphinx.ext.autosummary',
'sphinx.ext.intersphinx',
'sphinx.ext.inheritance_diagram',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Whitespace Evaluation SofTware'
copyright = u'2014, Kate Harrison'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'WhitespaceEvaluationSofTwaredoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto/manual]).
latex_documents = [
('index', 'WhitespaceEvaluationSofTware.tex', u'Whitespace Evaluation SofTware Documentation',
u'Kate Harrison', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'whitespaceevaluationsoftware', u'Whitespace Evaluation SofTware Documentation',
[u'Kate Harrison'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'WhitespaceEvaluationSofTware', u'Whitespace Evaluation SofTware Documentation',
u'Kate Harrison', 'WhitespaceEvaluationSofTware', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
#autodoc_default_flags = ['inherited-members', 'show-inheritance']
autodoc_default_flags = ['show-inheritance']
#graphviz_dot='neato'
# rankdir="TB"
inheritance_graph_attrs = dict(rankdir="LR", size='""',
fontsize=24)#, ratio='compress')
# nodesep="3", overlap="false",
#inheritance_node_attrs = dict(shape='rect', fontsize=24, height=1.75,
# color='dodgerblue1')
inheritance_node_attrs = dict(shape='rect', fontsize=14)
#intersphinx_mapping = {'python': ('http://docs.python.org/3.2', None)}
intersphinx_mapping = {'python': ('http://docs.python.org/2', None),
'numpy': ('http://docs.scipy.org/doc/numpy/', None),
'scipy': ('http://docs.scipy.org/doc/scipy/reference/', None),
'matplotlib': ('http://matplotlib.sourceforge.net/', None),
'shapely': ('http://toblerity.org/shapely/', None),
'simplekml': ('http://simplekml.readthedocs.org/en/latest/', None)}
|
kate-harrison/west
|
documentation/conf.py
|
Python
|
gpl-2.0
| 9,688
|
from property import *
# Neuron common parameters
iaf_neuronparams = {'E_L': -70.,
'V_th': -50.,
'V_reset': -67.,
'C_m': 2.,
't_ref': 2.,
'V_m': -60.,
'tau_syn_ex': 1.,
'tau_syn_in': 1.33}
# Synapse common parameters
STDP_synapseparams = {
'model': 'stdp_synapse',
'tau_m': {'distribution': 'uniform', 'low': 15., 'high': 25.},
'alpha': {'distribution': 'normal_clipped', 'low': 0.5, 'mu': 5.0, 'sigma': 1.0},
'delay': {'distribution': 'uniform', 'low': 0.8, 'high': 2.5},
'lambda': 0.5
}
# Glutamate synapse
STDP_synparams_Glu = dict({'delay': {'distribution': 'uniform', 'low': 1, 'high': 1.3},
'weight': w_Glu,
'Wmax': 70.}, **STDP_synapseparams)
# GABA synapse
STDP_synparams_GABA = dict({'delay': {'distribution': 'uniform', 'low': 1., 'high': 1.3},
'weight': w_GABA,
'Wmax': -60.}, **STDP_synapseparams)
# Acetylcholine synapse
STDP_synparams_ACh = dict({'delay': {'distribution': 'uniform', 'low': 1, 'high': 1.3},
'weight': w_ACh,
'Wmax': 70.}, **STDP_synapseparams)
# Dopamine synapse common parameter
NORA_synparams = {'delay': 1.}
# Dopamine exhibitory synapse
NORA_synparams_ex = dict({'weight': w_NR_ex,
'Wmax': 100.,
'Wmin': 85.}, **NORA_synparams)
# Dopamine inhibitory synapse
NORA_synparams_in = dict({'weight': w_NR_in,
'Wmax': -100.,
'Wmin': -85.}, **NORA_synparams)
# Create volume transmitters
# Dictionary of synapses with keys and their parameters
types = {GABA: (STDP_synparams_GABA, w_GABA, 'GABA'),
ACh: (STDP_synparams_ACh, w_ACh, 'Ach'),
Glu: (STDP_synparams_Glu, w_Glu, 'Glu'),
DA_ex: (NORA_synparams_ex, w_NR_ex, 'DA_ex', nora_model_ex),
DA_in: (NORA_synparams_in, w_NR_in, 'DA_in', nora_model_in)}
# Parameters for generator links
static_syn = {
'model': 'static_synapse',
'weight': w_Glu * 5,
'delay': pg_delay
}
# Connection parameters
conn_dict = {'rule': 'all_to_all',
'multapses': True}
# Device parameters
multimeter_param = {'to_memory': True,
'to_file': False,
'withtime': True,
'interval': 0.1,
'record_from': ['V_m'],
'withgid': True}
detector_param = {'label': 'spikes',
'withtime': True,
'withgid': True,
'to_file': False,
'to_memory': True,
'scientific': True}
|
research-team/NEUCOGAR
|
NEST/cube/noradrenaline/scripts/parameters.py
|
Python
|
gpl-2.0
| 2,793
|
import sys
from timer import Timer
import os
import pandas as pd
import nipy
import numpy as np
import re
import argparse
def get_images_list(path, regexp, number_images=None):
im_list=[]
dir_list=os.listdir(path)
if regexp=="NO":
im_list=dir_list
return dir_list
reg=re.compile(regexp)
im_list=[i for i in dir_list for m in [reg.search(i)] if m]
if isinstance(number_images, type(None) ):
if len(im_list)!=int(number_images):
raise Exception("set numbers of images have to be the same with numbers images in directory!")
return im_list
def delete_arrays(path_4d, region_code):
'''delete temporal arrays '''
p=1
while True:
if os.path.isfile( os.path.join(path_4d, str(region_code) +'_'+str(p) + ".npy" ) ):
os.remove(os.path.join(path_4d, str(region_code) +'_'+str(p) + ".npy" ))
p+=1
else:
break
def convert_array_for_regression(path_4d, region_code, split_size=1000):
''' merge region array to one and split it in (number images in study) x (voxels split_size) '''
regression_data=[]
p=1
while True:
try:
regression_data.append(np.load( os.path.join(path_4d, str(region_code) +'_'+str(p) + ".npy" ) ) )
print str(region_code) +'_' +str(p) + ".npy"
p+=1
except:
break
regression_data=np.concatenate(regression_data)
print "Region {}, regression data size {}, will be split by {} voxels chunks ".format(region_code,regression_data.shape, split_size)
sample_size, number_voxels=regression_data.shape
d=number_voxels/split_size
r=number_voxels-d*split_size
if d!=0:
l=[range(split_size*i,split_size*(i+1)) for i in range(0,d) ]
for i,j in enumerate(l): # TODO start from 0, maybe change to 1
save_np=regression_data[:,j]
np.save(os.path.join(path_4d, 'reg' + str(region_code) + "_" + str(i)) , save_np )
if r!=0:
save_np=regression_data[:,d*split_size:d*split_size+r]
np.save(os.path.join(path_4d, 'reg' + str(region_code) + "_" + str(i+1)) , save_np )
else:
np.save(os.path.join(path_4d, 'reg' + str(region_code) + "_" + str(0)) , regression_data )
def save_4d_data(Hammer_atlas, image_path, path_4d, image_names):
'''produce nparrays (voxels in region) x (image in study)
only if number of images less then 1000
'''
region_codes=np.unique(Hammer_atlas._data)
region_codes=region_codes[region_codes!=0]
region_coodinates={i:np.where(Hammer_atlas._data==i) for i in region_codes}
data_4d={i:[] for i in region_codes}
for im in image_names:
print im
try:
images_data=nipy.load_image(os.path.join(image_path, im ))._data
for k in data_4d:
data_4d[k].append(images_data[region_coodinates[k]])
except:
raise ValueError("Error during reading image {}".format(str(im)))
for c in region_codes:
c=int(c)
np_4d=np.array(data_4d[c])
print np_4d.shape
np.save(os.path.join(path_4d, str(c) +"_" + str(1)) , np_4d )
convert_array_for_regression(path_4d, c)
delete_arrays(path_4d, c)
def save_4d_data_region(logs_dir, atlas, image_path, path_4d, region_code, regexp='NO'):
image_names=get_images_list(image_path,regexp)
df=pd.DataFrame(image_names)
df.to_csv(os.path.join(logs_dir, str(region_code)+ '.csv'))
if len(image_names)<1000:
if int(region_code)!=0:
print 'FORCE MULTI JOBS SUBMISSION ( NOT EFFICIENT)'
elif int(region_code)==0:
save_4d_data(atlas, image_path, path_4d, image_names)
return 0
data_4d=[]
part=1
coordinate=np.where(atlas._data==int(region_code) )
if coordinate[0].shape[0]==0:
raise ValueError('Region code {} does not exist'.format(region_code))
count=0
for im in image_names:
# reading all images and dump nparrays by voxels in region by 1000 images
try:
images_data=nipy.load_image(os.path.join(image_path, im ))._data
count+=1
data=images_data[coordinate]
data_4d.append(data)
if count==1000:
np_4d=np.array(data_4d)
np.save(os.path.join(path_4d, str(region_code) + "_" + str(part)) , np_4d )
data_4d=[]
np_4d=None
part+=1
count=0
except:
print ("Error during reading image {}".format(str(im)))
if count!=0:
np_4d=np.array(data_4d)
np.save(os.path.join(path_4d, str(region_code) +"_" + str(part)) , np_4d )
convert_array_for_regression(path_4d, region_code)
delete_arrays(path_4d, region_code)
def experiment_save_4d(logs_dir, atlas_path,image_path, path_4d, region_code , reg):
atlas=nipy.load_image(atlas_path)
save_4d_data_region(logs_dir, atlas, image_path, path_4d, region_code , regexp=reg)
if __name__=="__main__":
parser = argparse.ArgumentParser(description='Convert nifti images to nparray files')
parser.add_argument("-o",required=True, type=str, help="path to save result folder")
parser.add_argument("-i",required=True, type=str, help="path to nifti images")
parser.add_argument("-atlas",required=True, type=str, help="path to Atlas images to use to define voxel chunks")
parser.add_argument("-code",required=True,type=int, help="Atlas chunk code")
parser.add_argument("-regexp",type=str,default='NO', help="REGEXP to select images")
parser.add_argument("-logs",type=str,required=True, help="path to save logs")
args = parser.parse_args()
print args
with Timer() as t:
experiment_save_4d(args.logs, args.atlas, args.i, args.o, args.code, args.regexp)
print "save data for analysis %s s" %(t.secs)
|
roshchupkin/VBM
|
scripts/python/nii2np.py
|
Python
|
gpl-2.0
| 6,005
|
from setuptools import setup, find_packages, Extension
recoil_module = Extension('_recoil', sources=['recoil_interface.c', 'recoil.c'])
def readme():
with open('README.rst') as f:
return f.read()
setup(
name="pyrecoil",
version="0.3.1",
packages=find_packages(),
ext_modules=[recoil_module],
include_package_data=True,
author="Matt Westcott",
author_email="matt@west.co.tt",
description="Python bindings for RECOIL, the Retro Computer Image Library",
long_description=readme(),
classifiers=[
"Development Status :: 5 - Production/Stable",
"Topic :: Multimedia :: Graphics",
"Topic :: Multimedia :: Graphics :: Graphics Conversion",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)",
],
url="http://github.com/gasman/pyrecoil",
license="GPLv2+",
)
|
gasman/pyrecoil
|
setup.py
|
Python
|
gpl-2.0
| 1,164
|
from random import uniform as randfloat
class BankAccount:
'A simple class to store money.'
money = 0
owner = ""
def __init__(self, owner, money):
self.owner = owner
self.money = round(money, 2)
def getOwner(self):
return self.owner
def getMoney(self):
return self.money
def deposit(self, amount):
self.money = round(self.money + amount, 2)
return True
def withdraw(self, amount):
if amount > self.money:
print("Sorry, you do not have enough money to complete this transaction.")
return False
else:
self.money -= amount
return True
class ATM:
'A simple class to distribute money.'
def use(self, bankaccount):
if not bankaccount.__class__.__name__ == "BankAccount":
print("Not a BankAcount!")
print(" Welcome %s" % bankaccount.getOwner())
choice = ""
while not (choice == "1" or choice == "2" or choice == "3"):
choice = raw_input('''
Choose an Option:
1: Get Amount of Money
2: Deposit Money
3: Withdraw
''')
if choice == "1":
print("You have $%2.2f." % bankaccount.getMoney())
else:
while True:
try:
amount = float(raw_input("How much money? $"))
break;
except ValueError:
print
if choice == "2":
bankaccount.deposit(amount)
else:
bankaccount.withdraw(amount)
print("Done!")
print("Have a nice day!")
account = BankAccount(raw_input("What is your name? => "), randfloat(0.00, 50.00))
while True:
ATM().use(account)
|
timtim17/IntroToGameProg
|
Experiments/Bank.py
|
Python
|
gpl-2.0
| 1,989
|
import woo.core, woo.dem
from woo.dem import *
import woo.utils
from minieigen import *
from math import *
from woo import utils
m=woo.utils.defaultMaterial()
zeroSphere=woo.utils.sphere((0,0,0),.4) # sphere which is entirely inside the thing
for p in [woo.utils.sphere((0,0,0),1,mat=m),woo.utils.ellipsoid((0,0,0),semiAxes=(.8,1,1.2),mat=m),woo.utils.ellipsoid((0,0,0),semiAxes=(1.,1.,1.),mat=m),woo.utils.capsule((0,0,0),radius=.8,shaft=.6,mat=m)]:
print 100*'#'
print p.shape
#S=woo.core.Scene(fields=[DemField()])
#S.dem.par.add(p)
sp=woo.dem.ShapePack()
sp.add([p.shape,zeroSphere.shape])
r=sp.raws[0]
if isinstance(r,SphereClumpGeom):
for i in range(len(r.radii)): print r.centers[i],r.radii[i]
else:
for rr in r.rawShapes: print rr,rr.className,rr.center,rr.radius,rr.raw
# print [i for i in r.rawShapes]
r.recompute(div=10)
print 'equivRad',r.equivRad,p.shape.equivRadius
print 'volume',r.volume,p.mass/m.density
print 'inertia',r.inertia,p.inertia/m.density
print 'pos',r.pos,p.pos
print 'ori',r.ori,p.ori
print 50*'='
ee=p.shape
print ee
print 'volume',ee.volume
print 'equivRadius',ee.equivRadius
rr=(ee.volume/((4/3.)*pi))**(1/3.)
print 'sphere radius of the same volume',rr
print 'sphere volume',(4/3.)*pi*rr**3
|
sjl767/woo
|
examples/shapepack.py
|
Python
|
gpl-2.0
| 1,342
|
from roam.api import RoamEvents
from PyQt4.QtGui import QLineEdit, QPlainTextEdit
from PyQt4.QtCore import QEvent
from roam.editorwidgets.core import EditorWidget, registerwidgets
class TextWidget(EditorWidget):
widgettype = 'Text'
def __init__(self, *args):
super(TextWidget, self).__init__(*args)
def createWidget(self, parent):
return QLineEdit(parent)
def initWidget(self, widget):
widget.textChanged.connect(self.emitvaluechanged)
widget.installEventFilter(self)
def eventFilter(self, object, event):
# Hack I really don't like this but there doesn't seem to be a better way at the
# moment
if event.type() == QEvent.FocusIn:
RoamEvents.openkeyboard.emit()
return False
def validate(self, *args):
if not self.value():
return False
else:
return True
def setvalue(self, value):
# Not the best way but should cover most use cases
# for now
value = value or ''
value = unicode(value)
try:
self.widget.setPlainText(value)
except AttributeError:
self.widget.setText(value)
def value(self):
try:
return self.widget.toPlainText()
except AttributeError:
return self.widget.text()
class TextBlockWidget(TextWidget):
widgettype = 'TextBlock'
def __init__(self, *args):
super(TextBlockWidget, self).__init__(*args)
def createWidget(self, parent):
return QPlainTextEdit(parent)
|
HeatherHillers/RoamMac
|
src/roam/editorwidgets/textwidget.py
|
Python
|
gpl-2.0
| 1,572
|
from django.apps import apps as django_apps
from django.core.exceptions import ObjectDoesNotExist
from django.views.generic.base import ContextMixin
class SubjectOffstudyViewMixinError(Exception):
pass
class SubjectOffstudyViewMixin(ContextMixin):
"""Adds subject offstudy to the context.
Declare with SubjectIdentifierViewMixin.
"""
offstudy_model_wrapper_cls = None
subject_offstudy_model = None
# def __init__(self, **kwargs):
# super().__init__(**kwargs)
# if not self.offstudy_model_wrapper_cls:
# raise SubjectOffstudyViewMixinError(
# 'subject_offstudy_model_wrapper_cls must be a valid ModelWrapper. Got None')
# if not self.subject_offstudy_model:
# raise SubjectOffstudyViewMixinError(
# 'subject_offstudy_model must be a model (label_lower). Got None')
# def get_context_data(self, **kwargs):
# context = super().get_context_data(**kwargs)
# wrapper = self.offstudy_model_wrapper_cls(
# model_obj=self.subject_offstudy)
# context.update(subject_offstudy=wrapper)
# return context
@property
def subject_offstudy_model_cls(self):
try:
model_cls = django_apps.get_model(self.subject_offstudy_model)
except LookupError as e:
raise SubjectOffstudyViewMixinError(
f'Unable to lookup subject offstudy model. '
f'model={self.subject_offstudy_model}. Got {e}')
return model_cls
@property
def subject_offstudy(self):
"""Returns a model instance either saved or unsaved.
If a save instance does not exits, returns a new unsaved instance.
"""
model_cls = self.subject_offstudy_model_cls
try:
subject_offstudy = model_cls.objects.get(
subject_identifier=self.subject_identifier)
except ObjectDoesNotExist:
subject_offstudy = model_cls(
subject_identifier=self.subject_identifier)
except AttributeError as e:
if 'subject_identifier' in str(e):
raise SubjectOffstudyViewMixinError(
f'Mixin must be declared together with SubjectIdentifierViewMixin. Got {e}')
raise SubjectOffstudyViewMixinError(e)
return subject_offstudy
|
botswana-harvard/edc-offstudy
|
edc_offstudy/view_mixins.py
|
Python
|
gpl-2.0
| 2,362
|
from django.contrib.auth.models import User
from django.shortcuts import get_object_or_404
import django_filters
from rest_framework import viewsets
from rest_framework.response import Response
from api.serializers import UserSerializer, AddressSerializer, DestinationSerializer, GuestSerializer, MessageSerializer, OpenHourSerializer
from portal.models import Guest, Message
from building.models import Address
from transportation.models import Destination, OpenHour
class UserViewSet(viewsets.ModelViewSet):
lookup_field = 'username'
serializer_class = UserSerializer
queryset = User.objects.none() # critical
def list(self, request):
queryset = User.objects.filter(username = request.user.username) # critical
serializer = UserSerializer(queryset, many=True, context={'request': request})
return Response(serializer.data)
def retrieve(self, request, username=None):
queryset = User.objects.filter(username = request.user.username) # critical
guest = get_object_or_404(queryset, username=username)
serializer = UserSerializer(guest, context={'request': request})
return Response(serializer.data)
# portal models
class AddressFilter(django_filters.FilterSet):
street = django_filters.CharFilter(name="street",lookup_type="icontains")
city = django_filters.CharFilter(name="city",lookup_type="icontains")
class Meta:
model = Address
fields = ('street', 'city')
class AddressViewSet(viewsets.ModelViewSet):
queryset = Address.objects.all()
serializer_class = AddressSerializer
filter_class = AddressFilter
filter_backends = (django_filters.rest_framework.DjangoFilterBackend,)
search_fields = ('street', 'city')
class DestinationViewSet(viewsets.ModelViewSet):
queryset = Destination.objects.all()
serializer_class = DestinationSerializer
class GuestViewSet(viewsets.GenericViewSet):
queryset = Guest.objects.none() # critical
def list(self, request):
queryset = Guest.objects.filter(user = request.user) # critical
serializer = GuestSerializer(queryset, many=True, context={'request': request})
return Response(serializer.data)
def retrieve(self, request, pk=None):
queryset = Guest.objects.filter(user = request.user) # critical
guest = get_object_or_404(queryset, pk=pk)
serializer = GuestSerializer(guest, context={'request': request})
return Response(serializer.data)
class MessageViewSet(viewsets.GenericViewSet):
queryset = Message.objects.none() # critical
def list(self, request):
queryset = Message.objects.filter(user = request.user) # critical
serializer = MessageSerializer(queryset, many=True, context={'request': request})
return Response(serializer.data)
def retrieve(self, request, pk=None):
queryset = Message.objects.filter(user = request.user) # critical
message = get_object_or_404(queryset, pk=pk)
serializer = MessageSerializer(message, context={'request': request})
return Response(serializer.data)
class OpenHourViewSet(viewsets.ModelViewSet):
queryset = OpenHour.objects.all()
serializer_class = OpenHourSerializer
# other applications
from sensors.models import Sensor, SensorValue
from api.serializers import SensorSerializer, SensorValueSerializer
class SensorViewSet(viewsets.ModelViewSet):
queryset = Sensor.objects.all()
serializer_class = SensorSerializer
class SensorValueViewSet(viewsets.ModelViewSet):
queryset = SensorValue.objects.all()
serializer_class = SensorValueSerializer
# security application
from security.models import Camera, SafetyIncidentSource, SafetyIncident, SafetyIncidentAlert, SafetyIncidentAlertBoundary
from api.serializers import CameraSerializer, SafetyIncidentSourceSerializer, SafetyIncidentSerializer, SafetyIncidentAlertSerializer, SafetyIncidentAlertBoundarySerializer
class CameraViewSet(viewsets.ModelViewSet):
queryset = Camera.objects.all()
serializer_class = CameraSerializer
class SafetyIncidentSourceViewSet(viewsets.ModelViewSet):
lookup_field = 'name'
queryset = SafetyIncidentSource.objects.all()
serializer_class = SafetyIncidentSourceSerializer
class SafetyIncidentFilter(django_filters.FilterSet):
"""
source is a ForeignKey in SafetyIncident. We look it up by "name" in the query string.
"""
source = django_filters.CharFilter(method='filter_source')
location = django_filters.CharFilter(name="location",lookup_type="icontains")
type = django_filters.CharFilter(name="type",lookup_type="icontains")
def filter_source(self, qs, value):
return qs.filter(source = SafetyIncidentSource.objects.filter(name = value))
class Meta:
model = SafetyIncident
fields = ('source', 'location', 'type')
class SafetyIncidentViewSet(viewsets.ModelViewSet):
queryset = SafetyIncident.objects.all()
serializer_class = SafetyIncidentSerializer
filter_class = SafetyIncidentFilter
class SafetyIncidentAlertViewSet(viewsets.ModelViewSet):
queryset = SafetyIncidentAlert.objects.all()
serializer_class = SafetyIncidentAlertSerializer
class SafetyIncidentAlertBoundaryViewSet(viewsets.ModelViewSet):
queryset = SafetyIncidentAlertBoundary.objects.all()
serializer_class = SafetyIncidentAlertBoundarySerializer
from dataview.models import Attribute, Node
from api.serializers import AttributeSerializer, NodeSerializer
class AttributeFilter(django_filters.FilterSet):
node = django_filters.CharFilter(method='filter_node')
def filter_node(self, qs, name, value):
return qs.filter(node=Node.objects.filter(name=value))
class Meta:
model = Attribute
fields = ('node', 'name')
class NodeViewSet(viewsets.ModelViewSet):
queryset = Node.objects.all()
serializer_class = NodeSerializer
class AttributeViewSet(viewsets.ModelViewSet):
queryset = Attribute.objects.all()
serializer_class = AttributeSerializer
filter_class = AttributeFilter
|
wl-net/dataview
|
api/views.py
|
Python
|
gpl-2.0
| 6,096
|
from dnfpyUtils.stats.statistic import Statistic
import numpy as np
class Trajectory(Statistic):
"""
Abstract class for trajectory
"""
def __init__(self,name,dt=0.1,dim=0,**kwargs):
super().__init__(name=name,size=0,dim=dim,dt=dt,**kwargs)
self.trace = [] #save the trace
def getViewData(self):
return self._data#,self.getMean()
def reset(self):
super().reset()
self.trace = []
self._data = np.nan
def getMean(self):
return np.nanmean(self.trace)
def getRMSE(self):
return np.sqrt(np.nanmean(self.trace))
def getCount(self):
return np.sum(~np.isnan(self.trace))
def getMax(self):
return np.max(self.trace)
def getPercentile(self,percent):
return np.nanpercentile(self.trace,percent)
def getMin(self):
return np.min(self.trace)
def getStd(self):
return np.std(self.trace)
def getTrace(self):
"""
Return the time trace of the statistic
"""
return self.trace
|
bchappet/dnfpy
|
src/dnfpyUtils/stats/trajectory.py
|
Python
|
gpl-2.0
| 1,105
|
# encoding: utf-8
# module PyKDE4.kio
# from /usr/lib/python3/dist-packages/PyKDE4/kio.cpython-34m-x86_64-linux-gnu.so
# by generator 1.135
# no doc
# imports
import PyKDE4.kdeui as __PyKDE4_kdeui
import PyQt4.QtCore as __PyQt4_QtCore
import PyQt4.QtGui as __PyQt4_QtGui
class KIconDialog(__PyKDE4_kdeui.KDialog):
# no doc
def getIcon(self, *args, **kwargs): # real signature unknown
pass
def iconSize(self, *args, **kwargs): # real signature unknown
pass
def newIconName(self, *args, **kwargs): # real signature unknown
pass
def openDialog(self, *args, **kwargs): # real signature unknown
pass
def setCustomLocation(self, *args, **kwargs): # real signature unknown
pass
def setIconSize(self, *args, **kwargs): # real signature unknown
pass
def setStrictIconSize(self, *args, **kwargs): # real signature unknown
pass
def setup(self, *args, **kwargs): # real signature unknown
pass
def showDialog(self, *args, **kwargs): # real signature unknown
pass
def slotOk(self, *args, **kwargs): # real signature unknown
pass
def strictIconSize(self, *args, **kwargs): # real signature unknown
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
|
ProfessorX/Config
|
.PyCharm30/system/python_stubs/-1247971765/PyKDE4/kio/KIconDialog.py
|
Python
|
gpl-2.0
| 1,319
|
#
# Copyright (C) 2001 Andrew T. Csillag <drew_csillag@geocities.com>
#
# You may distribute under the terms of either the GNU General
# Public License or the SkunkWeb License, as specified in the
# README file.
#
import os
import DT
import sys
import time
import marshal
import stat
def phfunc(name, obj):
marshal.dump(obj, open(name,'w'))
if __name__=='__main__':
bt = time.time()
fname=sys.argv[1]
mtime=os.stat(fname)[stat.ST_MTIME]
cform=sys.argv[1]+'.dtcc'
try:
cmtime=os.stat(cform)[stat.ST_MTIME]
comp_form=marshal.load(open(cform))
except:
comp_form=None
cmtime=-1
d=DT.DT(open(fname).read(), fname, comp_form, mtime, cmtime,
lambda x, y=cform: phfunc(y, x))
class dumb: pass
ns=dumb()
text = d(ns)
et = time.time()
print text
print 'elapsed time:', et - bt
|
drewcsillag/skunkweb
|
pylibs/DT/dtrun.py
|
Python
|
gpl-2.0
| 911
|
#!/usr/bin/python
#
# Script to send email notifications when a change in Galera cluster membership
# occurs.
#
# Complies with http://www.codership.com/wiki/doku.php?id=notification_command
#
# Author: Gabe Guillen <gabeguillen@outlook.com>
# Version: 1.5
# Release: 3/5/2015
# Use at your own risk. No warranties expressed or implied.
#
import os
import sys
import getopt
import smtplib
try: from email.mime.text import MIMEText
except ImportError:
# Python 2.4 (CentOS 5.x)
from email.MIMEText import MIMEText
import socket
import email.utils
# Change this to some value if you don't want your server hostname to show in
# the notification emails
THIS_SERVER = socket.gethostname()
# Server hostname or IP address
SMTP_SERVER = 'YOUR_SMTP_HERE'
SMTP_PORT = 25
# Set to True if you need SMTP over SSL
SMTP_SSL = False
# Set to True if you need to authenticate to your SMTP server
SMTP_AUTH = False
# Fill in authorization information here if True above
SMTP_USERNAME = ''
SMTP_PASSWORD = ''
# Takes a single sender
MAIL_FROM = 'YOUR_EMAIL_HERE'
# Takes a list of recipients
MAIL_TO = ['SOME_OTHER_EMAIL_HERE']
# Need Date in Header for SMTP RFC Compliance
DATE = email.utils.formatdate()
# Edit below at your own risk
################################################################################
def main(argv):
str_status = ''
str_uuid = ''
str_primary = ''
str_members = ''
str_index = ''
message = ''
usage = "Usage: " + os.path.basename(sys.argv[0]) + " --status <status str>"
usage += " --uuid <state UUID> --primary <yes/no> --members <comma-seperated"
usage += " list of the component member UUIDs> --index <n>"
try:
opts, args = getopt.getopt(argv, "h", ["status=","uuid=",'primary=','members=','index='])
except getopt.GetoptError:
print usage
sys.exit(2)
if(len(opts) > 0):
message_obj = GaleraStatus(THIS_SERVER)
for opt, arg in opts:
if opt == '-h':
print usage
sys.exit()
elif opt in ("--status"):
message_obj.set_status(arg)
elif opt in ("--uuid"):
message_obj.set_uuid(arg)
elif opt in ("--primary"):
message_obj.set_primary(arg)
elif opt in ("--members"):
message_obj.set_members(arg)
elif opt in ("--index"):
message_obj.set_index(arg)
try:
send_notification(MAIL_FROM, MAIL_TO, 'Galera Notification: ' + THIS_SERVER, DATE,
str(message_obj), SMTP_SERVER, SMTP_PORT, SMTP_SSL, SMTP_AUTH,
SMTP_USERNAME, SMTP_PASSWORD)
except Exception, e:
print "Unable to send notification: %s" % e
sys.exit(1)
else:
print usage
sys.exit(2)
sys.exit(0)
def send_notification(from_email, to_email, subject, date, message, smtp_server,
smtp_port, use_ssl, use_auth, smtp_user, smtp_pass):
msg = MIMEText(message)
msg['From'] = from_email
msg['To'] = ', '.join(to_email)
msg['Subject'] = subject
msg['Date'] = date
if(use_ssl):
mailer = smtplib.SMTP_SSL(smtp_server, smtp_port)
else:
mailer = smtplib.SMTP(smtp_server, smtp_port)
if(use_auth):
mailer.login(smtp_user, smtp_pass)
mailer.sendmail(from_email, to_email, msg.as_string())
mailer.close()
class GaleraStatus:
def __init__(self, server):
self._server = server
self._status = ""
self._uuid = ""
self._primary = ""
self._members = ""
self._index = ""
self._count = 0
def set_status(self, status):
self._status = status
self._count += 1
def set_uuid(self, uuid):
self._uuid = uuid
self._count += 1
def set_primary(self, primary):
self._primary = primary.capitalize()
self._count += 1
def set_members(self, members):
self._members = members.split(',')
self._count += 1
def set_index(self, index):
self._index = index
self._count += 1
def __str__(self):
message = "Galera running on " + self._server + " has reported the following"
message += " cluster membership change"
if(self._count > 1):
message += "s"
message += ":\n\n"
if(self._status):
message += "Status of this node: " + self._status + "\n\n"
if(self._uuid):
message += "Cluster state UUID: " + self._uuid + "\n\n"
if(self._primary):
message += "Current cluster component is primary: " + self._primary + "\n\n"
if(self._members):
message += "Current members of the component:\n"
if(self._index):
for i in range(len(self._members)):
if(i == int(self._index)):
message += "-> "
else:
message += "-- "
message += self._members[i] + "\n"
else:
message += "\n".join((" " + str(x)) for x in self._members)
message += "\n"
if(self._index):
message += "Index of this node in the member list: " + self._index + "\n"
return message
if __name__ == "__main__":
main(sys.argv[1:])
|
gguillen/galeranotify
|
galeranotify.py
|
Python
|
gpl-2.0
| 5,427
|