repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
saltstack/salt
|
tests/integration/files/file/base/_executors/arg.py
|
Python
|
apache-2.0
| 185
| 0
|
def __virtual__():
return True
def execute(*args, **kwargs):
# we use the dunder to assert the loader is pro
|
vided minionmods
return __salt__["test.arg
|
"]("test.arg fired")
|
hirokiky/wraptools
|
wraptools/context.py
|
Python
|
mit
| 630
| 0
|
from functools import wraps
def context(*context_funcs):
""" Decorator to inject additional arguments taken by :param context_funcs:
>>> data = {1: "user1", 2: "user2"}
>>> @context(
... lambda r, i: data.get(i),
... )
... def some_view(request, user_id, username):
... print(username)
...
>>>> some_view("request", 1)
|
# says user1
"""
def dec(func):
@wraps(func)
def wrapped(*args, **kwargs):
contexts = tuple(f(*args, **kwargs) for f in context_func
|
s)
return func(*(args + contexts), **kwargs)
return wrapped
return dec
|
arth-co/shoop
|
shoop/front/views/payment.py
|
Python
|
agpl-3.0
| 2,387
| 0.002933
|
# -*- coding: utf-8 -*-
# This file is part of Shoop.
#
# Copyright (c) 2012-2015, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import unicode_literals, with_statement
from django.core.exceptions import ImproperlyConfigured
from django.core.urlresolvers import
|
reverse
from django.shortcuts import get_object_or_404, redirect
from django.views.generic import DetailView
from shoop.core.models import Order
def get_payment_urls(request, order):
kwargs = dict(pk=order.pk, key=order.key)
return {
"payment": request.build_absolute_uri(reverse("shoop:or
|
der_process_payment", kwargs=kwargs)),
"return": request.build_absolute_uri(reverse("shoop:order_process_payment_return", kwargs=kwargs)),
"cancel": request.build_absolute_uri(reverse("shoop:order_payment_canceled", kwargs=kwargs))
}
class ProcessPaymentView(DetailView):
model = Order
context_object_name = "order"
def get_object(self, queryset=None):
return get_object_or_404(self.model, pk=self.kwargs["pk"], key=self.kwargs["key"])
def get_context_data(self, **kwargs):
context = super(ProcessPaymentView, self).get_context_data(**kwargs)
context["payment_urls"] = get_payment_urls(self.request, self.object)
return context
def dispatch(self, request, *args, **kwargs):
mode = self.kwargs["mode"]
order = self.object = self.get_object()
payment_method = (order.payment_method if order.payment_method_id else None)
if mode == "payment":
if not order.is_paid():
if payment_method:
return payment_method.get_payment_process_response(
order=order, urls=get_payment_urls(request, order))
elif mode == "return":
if payment_method:
payment_method.process_payment_return_request(order=order, request=request)
elif mode == "cancel":
self.template_name = "shoop/front/order/payment_canceled.jinja"
return self.render_to_response(self.get_context_data(object=order))
else:
raise ImproperlyConfigured("Unknown ProcessPaymentView mode: %s" % mode)
return redirect("shoop:order_complete", pk=order.pk, key=order.key)
|
spicyramen/sipLocator
|
tools/testSmS.py
|
Python
|
gpl-2.0
| 471
| 0.012739
|
from twilio.rest import TwilioRestClient
# Your Account Sid and Auth Token from twilio.com/user/account
account_sid = "AC433e7b0bec93dc5996e4fb80b1e56eec"
auth_token = "9cc9267fe09dab362d3be160f711a09d"
client = TwilioRestClient(account_sid, auth_token)
message = client.
|
sms.messages.create(body="Jenny please?! I love you <3",
|
to="+14082186575", # Replace with your phone number
from_="++1415-795-2944") # Replace with your Twilio number
print message.sid
|
backupManager/pyflag
|
src/pyflag/TEXTUI.py
|
Python
|
gpl-2.0
| 8,684
| 0.019691
|
#!/usr/bin/env python
# ******************************************************
# Copyright 2004: Commonwealth of Australia.
#
# Developed by the Computer Network Vulnerability Team,
# Information Security Group.
# Department of Defence.
#
# Michael Cohen <scudette@users.sourceforge.net>
#
# ******************************************************
# Version: FLAG $Version: 0.87-pre1 Date: Thu Jun 12 00:48:38 EST 2008$
# ******************************************************
#
# * This program is free software; you can redistribute it and/or
# * modify it under the terms of the GNU General Public License
# * as published by the Free Software Foundation; either version 2
# * of the License, or (at your option) any later version.
# *
# * This program is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# * GNU General Public License for more details.
# *
# * You should have received a copy of the GNU General Public License
# * along with this program; if not, write to the Free Software
# * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# ******************************************************
""" This is an implementation of an ASCII Text UI suitable for producing simple automated reports.
"""
import re, types, textwrap, csv, sys
import pyflag.FlagFramework as FlagFramework
import pyflag.DB as DB
import pyflag.conf
import pyflag.UI as UI
config=pyflag.conf.ConfObject()
import pyflag.Registry as Registry
import cStringIO
class TextObject:
generator = None
class TEXTUI(UI.GenericUI):
""" A simple text UI """
def __init__(self, default= None, query=None):
self.result = ""
self.text_var = ''
self.current_table=None
self.generator = TextObject()
if query:
self.defaults=query
if default:
self.defaults = default.defaults
def display(self):
if self.current_table:
self.end_table()
return self.result
def __str__(self):
return self.result
def heading(self,string):
self.result+=string+"\r\n"+ "-" * len(string) + "\r\n\r\n"
def pre(self,string):
self.result+=string
def start_table(self,**options):
if self.current_table==None:
self.current_table_size=[0,0]
self.current_table=[]
def table(self,sql="select ",columns=[],names=[],links=[],table='',where='',groupby = None,case=None,callbacks={},**opts):
names=list(names)
## Establish the sorting order
try:
self.sort=[list(names).index(self.defaults['order']),'order']
exc
|
ept KeyError:
try:
self.sort=[self.defaults['dorder'],'dorder']
except KeyError:
self.sort=[0,'order']
self.filter_conditions=[]
self.filter_text=[]
try:
if not gr
|
oupby:
groupby=self.defaults['group_by']
except KeyError:
groupby=None
# Get a new SQL generator for building the table with.
generator,new_query,names,columns,links = self._make_sql(sql=sql,columns=columns,names=names,links=links,table=table,where=where,groupby = groupby,case=case,callbacks=callbacks, query=self.defaults)
output = cStringIO.StringIO()
writer=None
for row in generator:
if not writer:
## Print the headers in a comment field:
output.write("#%s\r\n" % ','.join(row.keys()))
writer=csv.DictWriter(output, row.keys())
writer.writerow(row)
output.seek(0)
self.result+=output.read()
def text(self,*cuts,**options):
self.text_var += "".join(cuts)
try:
if options['wrap']=='full':
for line in self.text_var.splitlines(True):
new_lines = textwrap.wrap(line, config.WRAP)
for i in range(len(new_lines)):
new_line = new_lines[i]
self.result+=new_line
if len(new_line)<len(line) and i<len(new_lines)-1:
self.result += " " * (config.WRAP - len(new_line)) + "\\"
return
except KeyError:
pass
self.result+=self.text_var
self.text_var = ''
def notebook(self,names=[],context="notebook",callbacks=[],descriptions=[]):
""" This text implementation of notebook will only show the page which is currently selected """
print "%r" % self.defaults
query=self.defaults.clone()
try:
context_str=query[context]
cbfunc=callbacks[names.index(context_str)]
except (ValueError,KeyError):
cbfunc=callbacks[0]
context_str=names[0]
result=self.__class__(self)
cbfunc(query,result)
self.result += result.display()
def end_table(self):
for row_index in range(len(self.current_table)):
row=self.current_table[row_index]
temp = []
max_height = 0
for item in row:
width=0
lines = item.splitlines()
if len(lines)>max_height: max_height=len(lines)
for line in lines:
if width<len(line): width=len(line)
#fill the line out to max width:
lines = [ line + " "*(width-len(line)) for line in lines]
temp.append(lines + ["\r\n"] * (max_height - len(lines)))
for i in range(0,max_height):
try:
self.result+="".join([c[i] for c in temp ]) + "\r\n"
except IndexError:
pass
def toolbar(self,cb=None,text=None,icon=None,popup=True,tooltip=None,link=None):
pass
def row(self, *columns, **options):
if self.current_table == None:
self.start_table()
## Add an extra row on the end
self.current_table_size[0]+=1
if self.current_table_size[1]<len(columns):
self.current_table_size[1]=len(columns)
column_widgets=[]
for i in range(len(columns)):
col=columns[i]
if isinstance(col,self.__class__):
col=col.display()
column_widgets.append(col)
##Attach the column to row at the end of the table:
self.current_table.append(column_widgets)
def tree(self,tree_cb = None, pane_cb=None, branch = ('/'), layout="horizontal"):
""" A Text tree implementation """
query = self.defaults
try:
## Get the right part:
branch=FlagFramework.splitpath(query['open_tree'])
except KeyError:
branch=['']
#Start building the tree using the branch.
def draw_branch(depth,tree_array):
#We search through all the items until we find the one
#that matches the branch for this depth, then recurse into
#it.
branch_array=branch[:depth]
path = FlagFramework.joinpath(branch[:depth])
for k,v,t in tree_cb(path):
if not k: continue
if not t: continue
tree_array.append((depth,k,v,t))
try:
if k == branch[depth]:
#Recurse into the next level in the tree
draw_branch(depth+1,tree_array)
except IndexError:
pass
tree_array = []
#The first item in the tree is the first one provided in branch
if not branch[0]:
tree_array.append((0,'/','/','branch'))
else:
tree_array.append((0,branch[0],branch[0],'branch'))
#Build the tree_array
draw_branch(1,tree_array)
left = self.__class__(self)
for depth,k,v,t in tree_array:
|
oourfali/cloud-init-fedora
|
cloudinit/netinfo.py
|
Python
|
gpl-3.0
| 3,816
| 0.000786
|
#!/usr/bin/python
# vi: ts=4 expandtab
#
# Copyright (C) 2012 Canonical Ltd.
# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
#
# Author: Scott Moser <scott.moser@canonical.com>
# Author: Juerg Haefliger <juerg.haefliger@hp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3, as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import subprocess
def check_output(args):
proc = subprocess.Popen(args, stdout=subprocess.PIPE)
out, err = proc.communicate()
ret = proc.returncode
if ret:
raise subprocess.CalledProcessError(ret, ' '.join(args))
return out
def netdev_info(empty=""):
fields = ("hwaddr", "addr", "bcast", "mask")
ifcfg_out = str(check_output(["ifconfig", "-a"]))
devs = {}
for line in ifcfg_out.splitlines():
if len(line) == 0:
continue
if line[0] not in ("\t", " "):
curdev = line.split()[0]
devs[curdev] = {"up": False}
for field in fields:
devs[curdev][field] = ""
toks = line.lower().strip().split()
if toks[0] == "up":
devs[curdev]['up'] = True
fieldpost = ""
if toks[0] == "inet6":
fieldpost = "6"
for i in range(len(toks)):
if toks[i] == "hwaddr":
try:
devs[curdev]["hwaddr"] = toks[i + 1]
except IndexError:
pass
for field in ("addr", "bcast", "mask"):
target = "%s%s" % (field, fieldpost)
if devs[curdev].get(target, ""):
continue
if toks[i] == "%s:" % field:
try:
devs[curdev][target] = toks[i + 1]
except IndexError:
pass
elif toks[i].startswith("%s:" % field):
devs[curdev][target] = toks[i][len(field) + 1:]
if empty != "":
for (_devname, dev) in devs.iteritems():
for field in dev:
if dev[field] == "":
|
dev[field] = empty
return(devs)
def route_info():
route_out = str(check_output(["
|
route", "-n"]))
routes = []
for line in route_out.splitlines()[1:]:
if not line:
continue
toks = line.split()
if toks[0] == "Kernel" or toks[0] == "Destination":
continue
routes.append(toks)
return(routes)
def getgateway():
for r in route_info():
if r[3].find("G") >= 0:
return("%s[%s]" % (r[1], r[7]))
return(None)
def debug_info(pre="ci-info: "):
lines = []
try:
netdev = netdev_info(empty=".")
except Exception:
lines.append("netdev_info failed!")
netdev = {}
for (dev, d) in netdev.iteritems():
lines.append("%s%-6s: %i %-15s %-15s %s" %
(pre, dev, d["up"], d["addr"], d["mask"], d["hwaddr"]))
try:
routes = route_info()
except Exception:
lines.append("route_info failed")
routes = []
n = 0
for r in routes:
lines.append("%sroute-%d: %-15s %-15s %-15s %-6s %s" %
(pre, n, r[0], r[1], r[2], r[7], r[3]))
n = n + 1
return('\n'.join(lines))
if __name__ == '__main__':
print debug_info()
|
fake-name/ReadableWebProxy
|
WebMirror/management/rss_parser_funcs/feed_parse_extractMionline657939096WordpressCom.py
|
Python
|
bsd-3-clause
| 574
| 0.033101
|
def ext
|
ractMionline657939096WordpressCom(item):
'''
Parser for 'mionline65
|
7939096.wordpress.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
|
veragluscevic/dmdd
|
dmdd/tests/test.py
|
Python
|
mit
| 8,718
| 0.016059
|
import os,os.path,shutil
import numpy as np
import pickle
import dmdd
import dmdd_efficiencies as eff
def check_min_mass(element='fluorine', Qmin=1., v_esc=544., v_lag=220., mx_guess=1.):
experiment = dmdd.Experiment('test',element,Qmin, 40.,100., eff.efficiency_unit)
res = experiment.find_min_mass(v_esc=v_esc, v_lag=v_lag, mx_guess=mx_guess)
print res,'GeV'
if res<0:
print 'Problem: try another mx_guess...'
def make_UVmodels(return_models=False):
SI_Higgs = dmdd.UV_Model('SI_Higgs', ['mass', 'sigma_si'], fixed_params={'fnfp_si': 1})
milicharge = dmdd.UV_Model('Milicharge', ['mass', 'sigma_si_massless'], fixed_params={'fnfp_si_massless': 0})
SD_flavoruniversal = dmdd.UV_Model('SD_fu', ['mass','sigma_sd'], fixed_params={'fnfp_sd': -1.1})
anapole = dmdd.UV_Model('Anapole', ['mass','sigma_anapole'])
magdip_heavy = dmdd.UV_Model('Mag.dip.heavy', ['mass','sigma_magdip'])
magdip_0 = dmdd.UV_Model('Mag.dip.light', ['mass','sigma_magdip_massless'])
elecdip_heavy = dmdd.UV_Model('Elec.dip.heavy', ['mass','sigma_elecdip'])
elecdip_0 = dmdd.UV_Model('Elec.dip.light', ['mass','sigma_elecdip_massless'])
f1 = dmdd.UV_Model('f1', ['mass','sigma_f1'], fixed_params={'fnfp_f1': 1.})
f2_Higgs = dmdd.UV_Model('f2_Higgs', ['mass','sigma_f2'], fixed_params={'fnfp_f2': -0.05})
#f2_flavoruniversal = dmdd.UV_Model('f2_flavor-universal', ['mass','sigma_f2'], fixed_params={'fnfp_f2': 1.})
f3_Higgs = dmdd.UV_Model('f3_Higgs', ['mass','sigma_f3'], fixed_params={'fnfp_f3': -0.05})
#f3_flavoruniversal = dmdd.UV_Model('f3_flavor-universal', ['mass','sigma_f3'], fixed_params={'fnfp_f3': 1.})
LS = dmdd.UV_Model('LS', ['mass','sigma_LS'], fixed_params={'fnfp_LS': 0.})
models = [SI_Higgs, milicharge, SD_flavoruniversal, anapole,
magdip_heavy, magdip_0, elecdip_heavy, elecdip_0,
f1, f2_Higgs, f3_Higgs, LS]
if return_models:
return models
def make_experiments(return_experiments=False):
xe = dmdd.Experiment('Xe','xenon',5., 40.,100., eff.efficiency_unit)
ge = dmdd.Experiment('Ge','germanium',0.4, 100.,100., eff.efficiency_unit)
if return_experiments:
return [xe,ge]
def test_MultinestRun(mass=50,test_fits=False):
SI_Higgs = dmdd.UV_Model('SI_Higgs', ['mass', 'sigma_si'], fixed_params={'fnfp_si': 1})
elecdip_heavy = dmdd.UV_Model('Elec.dip.heavy', ['mass','sigma_elecdip'])
experiment = make_experiments(return_experiments=True)
simmodel = SI_Higgs
fitmodel1 = SI_Higgs
fitmodel2 = elecdip_heavy
pardic = {'sigma_si': 70.,'mass': mass}
simname = 'simtest'
testrun1 = dmdd.MultinestRun(simname, experiment, simmodel, pardic,
fitmodel1, prior_ranges={'mass':(1,1000),
'sigma_si':(0.001,100000),
'sigma_elecdip':(0.001,100000)})
data1 = np.loadtxt(testrun1.simulations[0].datafile)
pardic = {'sigma_si': 70.0007,'mass': mass}
testrun2 = dmdd.MultinestRun(simname, experiment, simmodel, pardic,
fitmodel2, empty_run=False,
prior_ranges={'mass':(1,1000),
'sigma_si':(0.001,100000),
'sigma_elecdip':(0.001,100000)})
data2 = np.loadtxt(testrun1.simulations[0].datafile)
#simulation datafile should be created only for the first instance of MultinestRun:
assert np.allclose(data1, data2)
if test_fits:
testrun1.fit()
testrun1.visualize()
testrun2.fit()
testrun2.visualize()
if (not os.path.exists(testrun1.chains_file)) or (not os.path.exists(testrun1.pickle_file)) or (not os.path.exists(testrun1.stats_file)):
raise AssertionError('Stats or chains or pickle are not created or are erased.')
plotfile1 = testrun1.chainspath + '2d_posterior_mass_vs_sigma_si.pdf'
plotfile2 = testrun1.chainspath + '{}_theoryfitdata_Ge.pdf'.format(simname)
plotfile3 = testrun1.chainspath + '{}_theoryfitdata_Xe.pdf'.format(simname)
if (not os.path.exists(plotfile1)) or (not os.path.exists(plotfile2)) or (not os.path.exists(plotfile3)):
raise AssertionError('Plots are not created or are erased.')
if (not os.path.exists(testrun2.chains_file)) or (not os.path.exists(testrun2.pickle_file)) or (not os.path.exists(testrun2.stats_file)):
raise AssertionError('Stats or chains or pickle are not created.')
plotfile1 = testrun2.chainspath + '2d_posterior_mass_vs_sigma_elecdip.pdf'
plotfile2 = testrun2.chainspath + '{}_theoryfitdata_Ge.pdf'.format(simname)
plotfile3 = testrun2.chainspath + '{}_theoryfitdata_Xe.pdf'.format(simname)
if (not os.path.exists(plotfile1)) or (not os.path.exists(plotfile2)) or (not os.path.exists(plotfile3)):
raise AssertionError('Plots are not created.')
def test_UVrate():
experiment = dmdd.Experiment('Xe','xenon',5., 40.,10000., eff.efficiency_unit)
models = make_UVmodels(return_models=True)
mass = 40.
qs = np.array([15.])
v_lag = 200.
v_rms = 100.
v_esc = 600.
rho_x = 0.4
sigma_names = {}
fnfp_names = {}
fnfp_vals = {}
for m in models:
sigma_names[m.name] = m.param_names[1]
if len(m.fixed_params)>0:
fnfp_names[m.name] = m.fixed_params.keys()[0]
fnfp_vals[m.name] = m.fixed_params.values()[0]
else:
fnfp_names[m.name] = None
fnfp_vals[m.name] = None
dRdQs = np.zeros(len(models))
Rs = np.zeros(len(models))
for i,m in enumerate(models):
kwargs = {sigma_names[m.name]:1.}
if fnfp_names[m.name] is not None:
kwargs[fnfp_names[m.name]] = fnfp_vals[m.name]
dRdQs[i] = dmdd.rate_UV.dRdQ(qs, mass=mass, element=experiment.element,
v_lag=v_lag, v_rms=v_rms, v_esc=v_esc, rho_x=rho_x,
**kwargs)
Rs[i] = dmdd.rate_UV.R(eff.efficiency_unit, mass=mass, element=experiment.element,
Qmin=experiment.Qmin, Qmax=experiment.Qmax,
v_lag=v_lag, v_rms=v_rms, v_esc=v_esc, rho_x=rho_x,
**kwargs)
#print 'dRdQs = {}\n'.format(dRdQs)
#print 'Rs = {}\n'.format(Rs)
dRdQs_correct = [ 1.27974652e-12, 1.67031585e-13, 6.28936205e-13, 7.76864477e-13,
7.71724584e-13, 5.66164037e-13, 8.40579288e-13, 6.16678247e-13,
4.72480605e-13, 2.59857470e-16, 9.59390104e-16, 1.14295679e-13]
Rs_correct = [ 6.15358778e-11, 3.10857259e-11, 3.14982315e-11, 4.14119198e-11,
1.82181891e-11, 3.84877268e-11, 2.35638282e-11, 5.50063883e-11,
1.34702925e-11, 5.82472177e-15, 1.64213483e-14, 2.26028126e-12]
assert np.allclose(dRdQs_correct, dRdQs)
assert np.allclose(Rs_correct, Rs)
###
qs = np.array([8.3,15.7])
logtest1 = dmdd.rate_UV.loglikelihood(qs, eff.efficiency_unit, mass=mass,
sigma_si=1.,fnfp_si=1.,
element=experiment.element,
Qmin=experiment.Qmin, Qmax=experiment.Qmax,
exposure=experiment.exposure,energy_resolution=True,
v_lag=v_lag, v_rms=v_rms, v_esc=v_esc, rho_x=rho_x)
logtest2 = dmdd.rate_UV.loglikelihood(qs, eff.efficiency_unit, mass=mass,
sigma_si=1.,fnfp_si=1.,
element=experiment.element,
Qmin
|
=experiment.Qmin, Qmax=experiment.Qmax,
exposu
|
re=experiment.exposure,en
|
scieloorg/Logger
|
tests/test_inspector.py
|
Python
|
bsd-2-clause
| 2,902
| 0
|
try:
from unittest.mock import patch
except ImportError:
from mock import patch
from logger.inspector import Inspector
from unittest import TestCase
class MockCollection(object):
def __init__(self, website_id, collection_id, website_acron_in_filename):
self.website_id = website_id
self.collection_id = collection_id
self.website_acron_in_filename = website_acron_in_filename
_COLLECTIONS = [
MockCollection("nbr", "scl", "nbr"),
MockCollection("scl", "scl", "br"),
MockCollection("spa", "spa", "sp"),
]
class TestInspectorTests(TestCase):
def test_is_valid_filename_node1(self):
insp = Inspector('/var/www/scielo.br/2015-12-30_scielo.br.1.log.gz')
self.assertTrue(insp._is_valid_filename())
expected = {
'date': '2015-12-30',
'website_acron_in_filename': 'br'
}
self.assertEqual(expected, insp._parsed_fn.groupdict())
def test_is_valid_filename(self):
insp = Inspector('/var/www/scielo.br/2015-12-30_scielo.br.log.gz')
self.assertTrue(insp._is_valid_filename())
expected = {
'date': '2015-12-30',
'website_acron_in_filename': 'br'
}
self.assertEqual(expected, insp._parsed_fn.groupdict())
def test_is_valid_filename_false(self):
insp = Inspector('/var/www/scielo.br/2015-12-30_scilo.br.log.gz')
self.assertFalse(insp._is_valid_filename())
def test_is_valid_date_in_filename(self):
insp = Inspector('/var/www/scielo.br/2015-12-30_scielo.br.log.gz')
self.assertTrue(insp._is_valid_date())
def test_is_valid_date_in_filename_false(self):
insp = Inspector('/var/www/scielo.br/2015-31-12_scielo.br.log.gz')
self.assertFalse(insp._is_valid_date())
def test_is_valid_website_in_filename(self):
insp = Inspector('/var/www/scielo.br/2015-12-30_scielo.br.log.gz')
self.assertTrue(insp._is_valid_website())
def test_nbr_is_valid_website_in_filename(self):
insp = Inspector('/var/www/scielo.br/2015-12-30_scielo.nbr.log.gz')
self.assertTrue(insp._is_valid_website())
def test_is_invalid_collection_in_filename(self):
insp = Inspector('/var/www/scielo.br/2015-12-30_scielo.xxx.log.gz')
self.assertFalse(insp._is_valid_website())
def test_is_valid_source_directory(self):
insp = Inspector('/var/www/scielo.br/2015-12-30_scielo.br.log.gz')
self.assertTrue(insp._is_valid_source_directory())
def test_is_valid_source_director
|
y_false_1(self):
insp = Inspector('/var/www/scielo.br/2015-12-30_sciel.br.log.gz')
self.assertFalse(insp._is_valid_source_directory())
def test_is_valid_source_directory_false_2(self):
insp = Inspector('/var/www/scielo.pepsic/2015
|
-12-30_scielo.br.log.gz')
self.assertFalse(insp._is_valid_source_directory())
|
xray7224/CimCity
|
cim/items/civics.py
|
Python
|
gpl-3.0
| 1,199
| 0.000834
|
##
# Copyright (C) 2014, 2015 Matt Molyneaux
#
# This file is part of CimCity.
#
# CimCity is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# CimCity is distributed in the hope that it will be useful,
# but WI
|
THOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with CimCity. If not, see <http://www.gnu.org/licenses/>.
##
from __future__ import absolute_import, division, print_function, unicode_literals
import random
class CivicBuilding(object):
healthiness = 10
class
|
PoliceStation(CivicBuilding):
@property
def healthiness(self):
return int(random.gauss(super(PoliceStation, self).healthiness, 10))
class FireStation(CivicBuilding):
pass
class Hospital(CivicBuilding):
@property
def healthiness(self):
return int(random.gauss(super(Hospital, self).healthiness, 1))
|
frederica07/Dragon_Programming_Process
|
PyOpenGL-3.0.2/OpenGL/raw/GL/AMD/shader_stencil_export.py
|
Python
|
bsd-2-clause
| 372
| 0.008065
|
'''Autogenera
|
ted by get_gl_extensions script, do not edit!'''
from OpenGL import platform as _p
from OpenGL.GL import glget
EXTENSION_NAME = 'GL_AMD_shader_stencil_export'
def glInitShaderStencilExportAMD():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGL
|
Extension( EXTENSION_NAME )
|
JConwayAWT/PGSS14CC
|
lib/python/multimetallics/ase/test/castep/castep_interface.py
|
Python
|
gpl-2.0
| 3,656
| 0.003829
|
#!/usr/bin/python
"""Simple shallow test of the CASTEP interface"""
import os
import shutil
import tempfile
import traceback
from ase.test import NotAvailable
# check if CASTEP_COMMAND is set a environment variable
if not os.environ.has_key('CASTEP_COMMAND'):
print("WARNING: Environment variable CASTEP_COMMAND is not set")
print("Will set CASTEP_COMMAND = castep for the sake of this test")
print("Please change it if this does not run castep in your environment")
os.environ['CASTEP_COMMAND'] = 'castep'
if not (os.system('which %s' % os.environ['CASTEP_COMMAND']) == 0):
raise NotAvailable("""Could not find CASTEP. If you have it
installed make sure, you set the CASTEP_COMMAND
environment variable correctly""")
# check if we can import everything
ase_castep_dir = "ase"
try:
castep_calc = __import__(ase_castep_dir + ".calculators.castep", globals(), locals(), ["Castep", "CastepParam", "create_castep_keywords"])
Castep = castep_calc.Castep
CastepParam = castep_calc.CastepParam
create_castep_keywords = castep_calc.create_castep_keywords
except Exception, e:
traceback.print_exc()
print(e)
assert False, 'Castep calculator module could not be loaded'
try:
__import__(ase_castep_dir + ".io.castep")
except Exception, e:
assert False, 'Castep io module could not be loaded'
tmp_dir = tempfile.mkdtemp()
cwd = os.getcwd()
from ase.calculators.castep import Castep
try:
c = Castep(directory=tmp_dir, label='test_label')
except Exception, e:
traceback.print_exc()
print(e)
assert False, 'Could not instantiate castep calculator'
try:
c.xc_functional = 'PBE'
except Exception, e:
traceback.print_exc()
print(e)
assert False, 'Setting xc_functional failed'
import ase.lattice.cubic
lattice = ase.lattice.cubic.BodyCenteredCubic('Li' )
print('For the sake of evaluating this test, warnings')
print('about auto-generating pseudo-potentials are')
print('normal behavior and can be safely ignored')
try:
lattice.set_calculator(c)
except Exception, e:
traceback.print_exc()
print(e)
assert False, 'Setting the calculator %s failed' % c
try:
create_castep_keywords(
castep_command=os.environ['CASTEP_COMMAND'],
path=tmp_dir,
fetch_only=20)
except Exception, e:
traceback.print_exc()
print(e)
assert False, "Cannot create castep_keywords, this usually means a bug"\
+ " in the interface or the castep binary cannot be called"
param_fn = os.path.join(
|
tmp_dir, 'myParam.param')
param = open(param_fn,'w')
param.write('XC_FUNCTIONAL : PBE #comment\n')
param.write('XC_FUNCTIONAL : PBE #comment\n')
param.write('#comment\n')
param.write('CUT_OFF_ENERGY : 450.\n')
param.close()
try:
c.merge_param(param_fn)
except Exception, e:
traceback.print_exc()
print(e)
assert False,"Error in
|
merge_param_filename, go figure"
# check if the CastepOpt, CastepCell comparison mechanism works
p1 = CastepParam()
p2 = CastepParam()
assert p1._options == p2._options, "Print two newly created CastepParams are not the same"
p1._options['xc_functional'].value = 'PBE'
p1.xc_functional = 'PBE'
assert not p1._options == p2._options, "Changed one CastepParam, but the still look the same"
assert c.calculation_required(lattice), 'Calculator does not fetch that a calculation is required'
if not c.dryrun_ok():
print(c._error)
assert False, "Dryrun_ok does not work, where it should"
else:
print("Dryrun is ok")
c.prepare_input_files(lattice)
os.chdir(cwd)
shutil.rmtree(tmp_dir)
print("Test finished without errors")
|
sburnett/seattle
|
seattlegeni/lockserver/tests/unit/ut_lockserverunit_user_and_node_locks.py
|
Python
|
mit
| 6,231
| 0.002889
|
import unittest
import lockserver_daemon as lockserver
class TheTestCase(unittest.TestCase):
def setUp(self):
# Reset the lockserver's global variables between each test.
lockserver.init_globals()
def testUserAndNodeLockContention_one(self):
# Start three sessions.
sess = []
sess.append(lockserver.do_start_session())
sess.append(lockserver.do_start_session())
sess.append(lockserver.do_start_session())
# First session requests lock on the user 'bob'.
locks = {'user':['bob']}
lockserver.do_acquire_locks(sess[0], locks)
# First session requests locks on the nodes '123' and '456'.
locks = {'node':['123','456']}
lockserver.do_acquire_locks(sess[0], locks)
# Second session requests lock on the node '123'.
locks = {'node':['123']}
lockserver.do_acquire_locks(sess[1], locks)
# Third session requests lock on the user 'bob'.
locks = {'user':['bob']}
lockserver.do_acquire_locks(sess[2], locks)
expected_heldlockdict = {
'user': {'bob': {'locked_by_session': sess[0],
'queue': [sess[2]]}},
'node': {'123': {'locked_by_session': sess[0],
'queue': [sess[1]]},
'456': {'locked_by_session': sess[0],
'queue': []}}}
expected_sessiondict = {
sess[0]: {'heldlocks': {'user': ['bob'], 'node': ['123','456']},
'neededlocks': {'user': [], 'node': []},
'acquirelocksproceedeventset': True},
sess[1]: {'heldlocks': {'user': [], 'node': []},
'neededlocks': {'user': [], 'node': ['123']},
'acquirelocksproceedeventset': False},
sess[2]: {'heldlocks': {'user': [], 'node': []},
'neededlocks': {'user': ['bob'], 'node': []},
'acquirelocksproceedeventset': False}}
status = lockserver.do_get_status()
self.assertEqual(expected_heldlockdict, status["heldlockdict"])
self.assertEqual(expected_sessiondict, status["sessiondict"])
# First session releases user lock on 'bob'.
# Note: they still hold the node locks on '123' and '456'
locks = {'user':['bob']}
lockserver.do_release_locks(sess[0], locks)
expected_heldlockdict = {
'user': {'bob': {'locked_by_session': sess[2],
'queue': []}},
'node': {'123': {'locked_by_session': sess[0],
'queue': [sess[1]]},
'456': {'locked_by_session': sess[0],
'queue': []}}}
expected_sessiondict = {
sess[0]: {'heldlocks': {'user': [], 'node': ['123','456']},
'neededlocks': {'user': [], 'node': []},
'acquirelocksproceedeventset': True},
sess[1]: {'heldlocks': {'user': [], 'node': []},
'neededlocks': {'user': [], 'node': ['123']},
'acquirelocksproceedeventset': False},
sess[2]: {'heldlocks': {'user': ['bob'], 'node': []},
'neededlocks': {'user': [], 'node': []},
'acquirelocksproceedeventset': True}}
status = lockserver.do_get_status()
self.assertEqual(expected_heldlockdict, status["heldlockdict"])
self.assertEqual(expected_sessiondict, status["sessiondict"])
# First session releases node lock on '123'.
# Note: they still hold the node lock on '456'
locks = {'node':['123']}
lockserver.do_release_locks(sess[0], locks)
expected_heldlockdict = {
'user': {'bob': {'locked_by_session': sess[2],
'queue': []}},
'node': {'123': {'locked_by_session': sess[1],
'queue': []},
'456': {'locked_by_session': sess[0],
'queue': []}}}
expected_sessiondict = {
sess[0]: {'heldlocks': {'user': [], 'node': ['456']},
'neededlocks': {'user': [], 'node': []},
'acquirelocksproceedeventset': True},
sess[1]: {'heldlocks': {'user': [], 'node': ['123']},
'neededlocks': {'user': [], 'node': []},
'acquirelocksproceedeventset': True},
sess[2]: {'heldlocks': {'user': ['bob'], 'node': []},
'neededlocks': {'user': [], 'node': []},
'acquirelocksproceedeventset': True}}
status = lockserver.do_get_status()
self.assertEqual(expected_heldlockdict, status["heldlockdict"])
self.assertEqual(expected_sessiondict, status["sessiondict"])
# First session releases node lock on '456' and then requests locks on
# user 'bob' and nodes '123' and '456' again. It can't request the node
# locks, however, until it gets the node lock (in xmlrpc usage, the
# user lock request would have blocked). So, we have the session that is
# holding the lock on user 'bob' release that lock before the first session
# makes the node lock requests.
locks = {'node':['456']}
lockserver.do_release_locks(sess[0], locks)
locks = {'user':['bob']}
lockserver.do_acquire_locks(sess[0], locks)
locks = {'user':['bob']}
lockserver.do_release_locks(sess[2], locks)
locks = {'node':['123', '456']}
lockserver.do_acquire_locks(sess[0], locks)
expected_heldlockdict = {
'user': {'bob': {'locked_by_session': sess[0],
'queue': []}},
'node': {'123': {'locked_by_session': sess[1],
'queue': [sess[0]]},
'456': {'locked_by_session': sess[0],
'queue': []}}}
expected_sessiondict = {
sess[0]: {'heldlocks': {'user': ['bob'], 'node': ['456']},
'neededlocks': {'user': [], 'node': ['123']},
'acquirelocksproceedeventset': False},
sess[1]: {'heldlocks': {'user': [], 'node': ['123']},
'neededlocks': {'user': [], 'node': []},
'acquirelocksproceedeventset': True},
sess[2
|
]: {'heldlocks': {'user': [], 'node': []},
'neededlocks': {'user': [], 'node': []},
'acquirelocksproceedeventset': True}}
|
status = lockserver.do_get_status()
self.assertEqual(expected_heldlockdict, status["heldlockdict"])
self.assertEqual(expected_sessiondict, status["sessiondict"])
|
uclapi/uclapi
|
backend/uclapi/oauth/scoping.py
|
Python
|
mit
| 4,517
| 0
|
# Storage of the scope map
# The purpose of this setup is that the OAuth scope of any app can be stored
# in a single field. This way, we can easily add more scopes later.
# We have a BigIntegerField to work with, which means 64 bits of storage.
# This translates into 64 types of scope, each of which can be checked with a
# bit mask.
# We do not have any OAuth scopes needed yet, but the current plan is:
# roombookings": (0, "Private room bookings data"),
# "timetable": (1, "Private timetable data"),
# "uclu": (2, "Private UCLU data"),
# "moodle": (3, "Private Moodle data")
# E.g. roombookings has scope 0, which is
# 0000000000000000000000000000000000000000000000000000000000000001b.
# This is because the 0th bit (LSB) is set to 1.
# roombookings + uclu = 101b, or a scope number of 2^2 + 2^0 = 4 + 1 = 5
class Scopes:
SCOPE_MAP = {
"timetable": (1, "Personal Timetable"),
"student_number": (2, "Student Number"),
"libcal_read": (3, " Read LibCal Bookings"),
"libcal_write": (4, " Write LibCal Bookings"),
}
def __init__(self, scope_map=None):
if scope_map:
self.SCOPE_MAP = scope_map
# Add a scope to the scope number given and return the new number
def add_scope(self, current, scope_name):
try:
scope_shift = self.SCOPE_MAP[scope_name][0]
except KeyError:
return current
return (current | (1 << scope_shift))
# Check whether a scope is present in the current scope number given
def check_scope(self, current, scope_name):
try:
scope_shift = self.SCOPE_MAP[scope_name][0]
except KeyError:
return False
return ((1 << scope_shift) & current) > 0
# Remove a scope from the current scope number
def remove_scope(self, current, scope_name):
try:
scope_shift = self.SCOPE_MAP[scope_name][0]
except KeyError:
return current
if current & 1 << scope_shift > 0:
return ~(~current + (1 << scope_shift))
else:
return current
# Produce a dictiona
|
ry with the scope information. Example:
# {
# "roombookings": True,
# "timetable": False,
# ...
# }
def scope_dict(self, current, pretty_print=True):
scopes = []
for x in self.SCOPE_MAP.keys():
if self.check_scope(current, x):
if pretty_print:
scope = {
"name": x,
"description": self.SCOPE_MAP[x][1]
|
}
else:
scope = {
"id": self.SCOPE_MAP[x][0],
"name": x
}
scopes.append(scope)
return scopes
# Same as above, but list all possible scopes along with whether they are
# included in the current state given.
# This is used by the dashboard.
def scope_dict_all(self, current, pretty_print=True):
scopes = []
for x in self.SCOPE_MAP.keys():
if pretty_print:
scope = {
"name": x,
"description": self.SCOPE_MAP[x][1],
"enabled": self.check_scope(current, x)
}
else:
scope = {
"id": self.SCOPE_MAP[x][0],
"name": x,
"enabled": self.check_scope(current, x)
}
scopes.append(scope)
return scopes
# Get available scopes for showing to the user
def get_all_scopes(self, pretty_print=True):
scopes = []
for x in self.SCOPE_MAP.keys():
if pretty_print:
scope = {
"name": x,
"description": self.SCOPE_MAP[x][1]
}
else:
scope = {
"id": self.SCOPE_MAP[x][0],
"name": x
}
scopes.append(scope)
return scopes
# Dump the scope map so that developers can track scopes with it
def get_scope_map(self):
scopes = []
for x in self.SCOPE_MAP.keys():
scope = {
"name": x,
"id": self.SCOPE_MAP[x][0],
"description": self.SCOPE_MAP[x][1]
}
scopes.append(scope)
scopes = sorted(scopes, key=lambda k: k["id"])
return scopes
|
jaredkerim/stompy
|
app/settings.py
|
Python
|
mit
| 3,293
| 0.001215
|
"""
Django settings for stompy project.
Generated by 'django-admin startproject' using Django 1.9.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ['SECRET_KEY']
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'home',
'pedals',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': os.environ['DB_NAME'],
|
'USER': os.environ['DB_USER'],
'PASSWORD': os.environ['DB_PASS'],
'HOST': os.environ['DB_HOST'],
'PORT': '5432',
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validat
|
ion.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
|
archf/ansible
|
test/runner/shippable.py
|
Python
|
gpl-3.0
| 2,577
| 0.000776
|
#!/usr/bin/env python
# PYTHON_ARGCOMPLETE_OK
"""Verify the current Shippable run has the required number of jobs."""
from __future__ import absolute_import, print_function
# noinspection PyCompatibility
import argparse
import errno
import os
import sys
from lib.http import (
HttpClient,
)
from lib.util import (
display,
ApplicationError,
ApplicationWarning,
MissingEnvironmentVariable,
)
try:
import argcomplete
except ImportError:
argcomplete = None
def main():
"""Main program function."""
try:
args = parse_args()
display.verbosity = args.verbosity
display.color = args.color
try:
run_id = os.environ['SHIPPABLE_BUILD_ID']
except KeyError as ex:
raise MissingEnvironmentVariable(ex.args[0])
client = HttpClient(args)
response = client.get('https://api.shippable.
|
com/jobs?runIds=%s' % run_id)
jobs = response.json()
if len(jobs) == 1:
raise Applicat
|
ionError('Shippable run %s has only one job. Did you use the "Rebuild with SSH" option?' % run_id)
except ApplicationWarning as ex:
display.warning(str(ex))
exit(0)
except ApplicationError as ex:
display.error(str(ex))
exit(1)
except KeyboardInterrupt:
exit(2)
except IOError as ex:
if ex.errno == errno.EPIPE:
exit(3)
raise
def parse_args():
"""Parse command line arguments."""
parser = argparse.ArgumentParser()
parser.add_argument('-e', '--explain',
action='store_true',
help='explain commands that would be executed')
parser.add_argument('-v', '--verbose',
dest='verbosity',
action='count',
default=0,
help='display more output')
parser.add_argument('--color',
metavar='COLOR',
nargs='?',
help='generate color output: %(choices)s',
choices=('yes', 'no', 'auto'),
const='yes',
default='auto')
if argcomplete:
argcomplete.autocomplete(parser)
args = parser.parse_args()
if args.color == 'yes':
args.color = True
elif args.color == 'no':
args.color = False
elif 'SHIPPABLE' in os.environ:
args.color = True
else:
args.color = sys.stdout.isatty()
return args
if __name__ == '__main__':
main()
|
ericyue/mooncake_utils
|
setup.py
|
Python
|
apache-2.0
| 646
| 0.041796
|
# -*- coding:utf-8 -*-
#
|
# Copyright (c) 2017 mooncake. All Rights Reserved
####
# @brief
# @author Eric Yue ( hi.moonlight@gmail.com )
# @version 0.0.1
from distut
|
ils.core import setup
V = "0.7"
setup(
name = 'mooncake_utils',
packages = ['mooncake_utils'],
version = V,
description = 'just a useful utils for mooncake personal project.',
author = 'mooncake',
author_email = 'hi.moonlight@gmail.com',
url = 'https://github.com/ericyue/mooncake_utils',
download_url = 'https://github.com/ericyue/mooncake_utils/archive/%s.zip' % V,
keywords = ['utils','data','machine-learning'], # arbitrary keywords
classifiers = [],
)
|
Distrotech/PyQt-x11
|
examples/graphicsview/dragdroprobot/dragdroprobot_rc2.py
|
Python
|
gpl-2.0
| 62,689
| 0.00008
|
# -*- coding: utf-8 -*-
# Resource object code
#
# Created: Wed Mar 20 13:39:06 2013
# by: The Resource Compiler for PyQt (Qt v4.8.4)
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore
qt_resource_data = "\
\x00\x00\x3a\x7c\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x84\x00\x00\x00\xb1\x08\x04\x00\x00\x00\xaf\xfa\xdd\x32\
\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0b\x13\x00\x00\x0b\x13\
\x01\x00\x9a\x9c\x18\x00\x00\x00\x07\x74\x49\x4d\x45\x07\xd6\x03\
\x10\x0a\x31\x18\xc7\xac\x62\xef\x00\x00\x00\x1d\x74\x45\x58\x74\
\x43\x6f\x6d\x6d\x65\x6e\x74\x00\x43\x72\x65\x61\x74\x65\x64\x20\
\x77\x69\x74\x68\x20\x54\x68\x65\x20\x47\x49\x4d\x50\xef\x64\x25\
\x6e\x00\x00\x00\x02\x62\x4b\x47\x44\x00\xff\x87\x8f\xcc\xbf\x00\
\x00\x39\xe4\x49\x44\x41\x54\x78\xda\xd5\xbd\xd9\xb2\x5d\xc7\xb5\
\x9e\xf9\x65\xe6\x6c\x57\xbb\xf7\x06\x36\x00\xf6\x24\xa8\xc6\x75\
\x8e\x1d\x61\x59\xf2\x45\xd5\x45\x5d\x54\xd1\xe1\x8b\xba\x26\x1f\
\x81\x7a\x04\xea\x11\xc8\x47\x10\x1f\x41\x7a\x04\x31\xea\xae\xae\
\x7c\x8e\xe3\xd4\x29\x47\x58\x16\x4d\x1c\x51\x02\x89\x6e\x77\xab\
\x9d\x7d\xa6\x2f\x72\xcc\x5c\x73\x2d\x6c\x10\x0d\x49\x85\x0b\x0c\
\x92\x00\x76\xb7\xd6\x98\x99\xa3\xf9\xc7\x3f\xfe\xa1\x56\xfc\xb8\
\xbf\xdc\xe0\xf7\x2a\xfc\x9d\x02\x2c\x4a\x7e\x3f\xfc\x53\x4b\x07\
\xc4\x18\x2a\xda\x8f\xdb\xdf\x72\x19\xfd\x26\xfe\x3c\x46\x63\x9f\
\xf1\x13\x2c\xa0\xe4\xbb\x70\xcd\x4f\x7b\xb1\x5f\xea\x6f\x67\x08\
\xb5\xf7\x37\x8a\x0e\x85\x1a\x18\xa2\xa2\xfd\xb4\xf9\xa4\x43\x61\
\x30\xbf\x57\xf7\x16\x9f\x14\x40\x4e\x0c\x8c\x3e\x9b\xfc\xa6\xa1\
\xc3\xde\x8d\xee\xc5\x68\xc0\xe1\x00\x15\xbe\x9b\xda\x33\xf9\xff\
\xd4\x86\xf0\x2f\xdb\xc9\x5b\x77\x68\x1c\x96\x4e\x8c\xd2\x7c\x7a\
\xf1\x49\x41\x4d\xca\x98\x88\x92\x35\x5b\x1c\x09\x19\x0a\x4b\x42\
\x4e\x8d\x05\x52\x26\xe4\x2a\x12\xf3\x29\xf9\x09\xea\xe0\x27\xbd\
\xbc\x21\x22\xfe\x66\xbf\xfa\x0b\x01\x1a\xb0\x68\x3a\xca\x8f\xb7\
\xbf\xad\xb1\x54\x94\x6c\x51\xa4\x28\x1a\x36\x5c\xb2\x24\x26\xa7\
\xe2\x8a\x0a\x03\xb4\x14\x68\x62\x72\xe6\x1c\xbb\x39\xb9\xca\xe4\
\xad\x2a\x39\x1d\xdf\xef\xd7\xdf\xd0\x10\x6a\x70\x84\x2d\x0d\xd5\
\xdd\xf2\xab\x4b\x2e\xa8\xe8\x68\xd8\x72\xcc\x31\x73\x1c\x0b\xce\
\xd8\x62\xd1\x94\x14\x3c\x62\x43\x06\x14\x18\x12\x32\xb6\x6c\x29\
\xe9\x98\x3b\xad\x22\x8c\x7c\xd7\xff\x5f\x19\xa2\x3f\x17\x16\x47\
\xc3\xd6\x2d\x58\x73\xc6\x25\x30\x22\x22\x26\x05\x0a\x0a\x9e\x70\
\x41\x45\xc4\x15\x0d\x25\x05\x1d\x25\x06\x47\x8c\xc2\x51\x51\x51\
\x53\xb1\xe0\xc4\xa5\x8c\x2f\x47\x27\x11\xfa\xda\xab\xc1\xff\xac\
\x3e\xc2\x7b\x85\x9a\x0e\x28\xdd\x8a\xfb\x6c\x38\xa3\x60\xc2\x29\
\x33\x34\x11\x5b\x56\xac\x28\xa9\x29\x69\x38\x63\x83\xe1\x98\x31\
\x2d\x96\x0c\x87\x01\x6a\x1a\x14\x19\x29\x77\x98\x73\xca\x09\xe3\
\xf7\xd3\x7b\xe6\x7b\x3b\xcb\xe8\xd5\xdf\x9c\x7a\xea\x8d\xf6\xb7\
\xbf\x43\xc9\x53\xe2\xc0\x55\xb6\x34\x1f\x17\xbf\xdd\xb2\xa1\xe4\
\x3e\x1d\x96\x11\x63\x12\x34\x86\x2b\x36\x5c\xb1\x66\xcb\x96\x25\
\x25\x57\x4c\x99\xd3\xf0\x98\x16\x4d\x4c\xd3\xc7\x16\x22\x66\x4c\
\x80\x05\x6b\xd6\xcc\xbf\x9a\x31\x57\x99\x44\x21\xff\x1a\xa0\xc5\
\x1c\x38\xd3\x1f\xe1\x44\xb8\x67\x58\xdc\x49\x36\xe0\x7f\xf5\x41\
\x4e\x0d\x4e\x44\xcd\x85\x5b\x52\x72\xce\x43\x2e\xb0\x8c\xb8\xc1\
\x18\x47\x83\x43\xb3\xe1\x82\x15\x1b\x4a\xb6\x54\x28\x12\x62\x34\
\x1d\x2d\x00\xe7\x00\xc4\xa4\xa4\xc4\x18\x6e\x62\xd1\x44\x64\x9c\
\xf0\x26\xb7\x78\x53\xc5\x98\xf0\xca\xdc\xde\xef\x86\xa1\x5b\xfd\
\x18\x3e\x62\xdf\x24\x3e\x18\x6a\x1c\x0d\xb5\xb3\xe8\xcb\xf4\xa4\
\xf7\xed\x0d\x06\x47\xf5\xe9\x39\xe7\x94\x3c\xe0\x4b\x22\x72\xc6\
\x28\xd6\x2c\x68\x48\x49\xb9\xe2\x21\x17\x94\x34\x58\x2c\x73\x3a\
\x3a\xac\x18\xb8\xa3\xa6\x45\xa1\x18\x13\x63\xd9\xb2\x92\x8f\x6b\
\xe6\x9c\xf1\x0e\xb8\x19\xe3\x5f\xeb\xcf\x15\x86\x18\xb5\xf7\x50\
\x86\x67\xf7\x7a\x63\x7c\xcf\x13\x31\xbc\x97\xed\x20\x2e\x94\x77\
\xb7\x5f\xad\x51\xc4\x8c\x2f\x27\x27\x11\x8e\x0e\x43\xc9\x63\xf7\
\x35\x8f\xd9\xf0\x98\x87\x24\xdc\xe6\x84\x96\x4b\x1a\x26\xcc\xf8\
\x86\x25\x17\xac\xb0\x44\x24\xc4\xa4\x28\x8c\x98\x01\xa0\xa2\xa2\
\x21\x66\x4c\x4c\x4b\xcd\x82\x88\x84\x08\x48\xb9\xcd\x1d\xde\xe1\
\x84\x13\x72\x12\x72\x95\x60\xb0\x72\x55\x86\x79\x86\x1b\xc4\xaf\
\x1f\x25\x6a\xa8\x70\x19\x2c\x0e\x4d\x7a\x4f\xab\x09\x6b\x77\xc1\
\xf9\xf1\xc8\x1d\x33\x55\x31\x9b\x0f\x1e\xfd\xe1\x6b\xce\xb9\xe2\
\x9c\x0d\x13\x40\xb1\xe0\x31\x35\xc7\x38\x9e\xf0\xdf\x68\xb1\x40\
\x42\x8c\x4f\xa9\x33\x72\xa0\xa2\x41\x11\x61\x69\xa9\xd9\xb0\xc1\
\x88\x07\xf2\x97\xa7\xa5\xe6\x9c\x86\x29\x2d\x1d\xc7\x1c\x01\x8e\
\x6e\xcf\x87\xb9\xe7\x86\xd8\xe8\x87\x88\x07\x4a\x6e\xaf\xa5\xb8\
\xbb\xf8\xaa\xc6\xa0\x68\xb9\xf1\x59\xc5\x9a\x05\x09\x25\x85\xcb\
\x78\xcc\x3d\xee\xd3\xb2\xe1\x09\x96\x39\x8e\x2b\xb6\x6c\x99\x10\
\x71\xce\x5f\x29\x31\x24\x68\x1c\x25\x0b\x5a\x26\x8c\xa8\xb1\x6c\
\x28\x81\x08\x47\x4d\x45\x41\x87\x26\x23\x46\xd1\xe1\x48\x71\x40\
\x89\xe1\x4b\x26\x9c\xf2\x3a\x11\x63\x0c\x4a\x2e\xd5\xbe\xb3\xfe\
\x51\x9c\xa5\x93\x3c\xb1\x2f\x86\x62\x2a\xce\xdd\x37\xac\x70\x54\
\x2c\xb9\x43\xc1\x8a\x12\x48\x48\x31\xac\x79\xc2\x0a\x45\xc1\x05\
\x35\x39\x19\x1d\x90\x12\x51\x70\xc9\x92\x11\x96\x96\x8a\x92\x9a\
\x0e\xcb\x1c\x83\xa6\xa5\xa4\x06\x14\x29\x96\x8e\x0e\x87\x26\xc2\
\xe0\x80\x84\x8c\x94\x88\x8c\x9c\x2d\x19\x47\xbc\xc5\xff\xc2\x4f\
\x39\x51\xd1\xc0\x47\xb8\xa7\x6a\x11\xf5\x43\x9e\x08\x9f\xda\xb6\
\x74\x1f\xb7\x9f\xda\x63\xc7\x44\x55\xff\xe0\x1d\x5d\xc7\x15\xdf\
\x72\xc1\x96\x05\x2b\x6a\x14\x31\x31\x13\x1c\xd0\x51\x61\xa9\xd8\
\x70\x8c\xe2\x84\x23\x9e\xf0\x67\x6a\x8e\x68\xd8\xb2\x64\x43\x43\
\x4c\xce\x88\x86\x8a\x8e\x16\x8b\x95\x24\xcc\x07\x68\x03\x34\x54\
\xf2\xd2\x63\x3a\x1c\x2d\x25\x13\xa0\xe4\x9c\xc7\xdc\x20\xfd\x87\
\xd1\xaf\x8c\x44\x2c\xf7\x7d\xf3\x88\xa7\x13\x94\x0e\x27\x5f\xe0\
\xd0\x40\x4d\x71\xd1\x1c\xb7\x94\x94\x74\x94\x6e\xc9\x23\x1e\x71\
\xc6\x86\x0a\xcb\x29\x96\x92\x92\x05\x1b\x6a\x1c\x37\x98\x32\xa1\
\x65\xc9\x96\x88\x11\x0d\xff\x9a\x9c\xff\x8f\x7f\x21\x62\x42\xc9\
\x5f\x68\xa8\x51\x64\x8c\xa4\xe6\xac\x68\x68\x48\xc8\xb0\x54\x74\
\xe4\x44\x38\x22\x32\xa0\x64\x89\xc2\xe2\xe8\x98\x30\x22\x61\x4c\
\x44\x46\xc4\x92\xc7\x8c\x7e\x19\xdd\xcd\xef\xa9\x67\x84\x4a\xf5\
\xaa\x3e\x42\x85\xcc\xc0\xed\xfd\x4d\x7b\x77\x73\x5c\x52\xb3\x61\
\x41\xc1\x92\x4b\x1e\xb3\xa2\x15\xff\xfc\x98\x96\x82\x35\x05\x9a\
\x31\x63\x46\x68\x36\xac\x59\xd1\x31\x22\xe5\x94\x6f\x59\x72\x81\
\xa6\x66\x49\x41\x81\xc3\x61\x29\xe9\xa8\x49\xd8\x62\xe9\xd0\x18\
\x71\x93\x71\x40\x2c\x2a\x14\x8e\x1b\xf2\xd1\x0c\x43\x4b\x87\x22\
\x27\x27\x21\xf1\xce\xf4\x9e\x1b\xbc\x61\xf5\x7d\x4e\x84\xba\xa6\
\x7a\x54\xc1\x03\xb7\x54\x77\x2f\xbf\x7a\xcc\x9a\x8a\x2b\xce\x58\
\xe0\xb8\x62\x41\x4b\x8c\xa2\xa4\xa0\xc0\xd2\xd0\x00\x29\x33\x66\
\x2c\xe9\xc4\x03\x24\xa4\xe4\x38\xfe\xca\x63\x14\x8a\x15\x6b\xc9\
\x0b\x15\x96\x9a\x96\x96\x94\x0c\x7c\x00\x26\xa1\x40\x01\x11\x29\
\xd0\x62\xe9\xb0\x92\xbe\xa5\x80\xc6\xd2\xb1\x65\x2a\xaf\x3b\x25\
\xa5\x71\x77\x54\x44\xf4\xc2\xb9\x65\xf4\x32\x2e\x52\x85\xff\xd6\
\xac\xdd\x43\x2e\x39\x67\x4b\xc1\x25\x67\x6c\x18\x51\xca\x5d\xae\
\x59\xb1\x21\x45\x33\x22\x21\x26\x42\xb1\x61\x25\x3f\xd0\x47\xff\
\x8e\x6f\x59\xd2\xe1\x28\x59\xe2\xc8\x05\xab\xf0\x7e\x40\x03\x39\
\xa0\x88\xc8\x18\x91\x92\xb0\xc0\x90\x12\x53\xb1\xa1\xc5\x52\
|
xd3\
\xa1\x70\x24\xc4\x40\x4b\xcd\x16\x45\xc1\x15\x57\x5c\x72\x83\xad\
\x9b\x32\x23\x57\x26\xa4\xdd\xdf\xdb\x10\xee\
|
xe0\x4f\x1d\x4b\xf7\
\x90\xaf\xd8\x52\x52\xb3\xe6\x92\x15\x0d\x1b\x6a\x1a\x1a\xc9\x03\
\x0d\xa7\x28\x62\x46\xc4\x94\x5c\xb0\x22\xc6\x04\xef\x5d\xd3\xb0\
\x25\x66\xcc\x86\x46\x70\x06\x45\x43\x85\x21\x26\xc6\xa0\x59\x89\
\xd9\xb7\x8c\x88\xe9\xe8\xd8\xd2\x90\xd1\x52\xe1\xe4\xcd\xbb\x41\
\x4d\x33\x25\x92\xef\xdc\xd0\x70\xc9\x82\x37\x78\x8f\xd3\xdf\xe5\
\x1f\x99\x6b\x11\xac\x97\x34\x84\x1b\xfc\xd7\x07\xca\x8e\xb5\x7b\
\xc8\x7d\x2e\xe4\x47\xae\x59\x51\x60\xb9\xa0\x91\x80\x38\x62\x44\
\xca\x2d\x2a\x1a\xf1\xf0\x15\x25\x11\x84\x97\xad\x31\x01\x6c\xcb\
\xc8\x50\x38\x52\xa0\xc3\x11\xcb\x19\x8a\x50\x72\x99\x5a\xc9\x1a\
\x7c\x44\x89\x80\x8c\x29\x5b\x79\x28\x2d\x56\xaa\x92\xfe\x12\x58\
\x36\xb4\x8c\x69\x31\xc4\x1f\x45\xa8\x83\x2a\xe8\x15\x0c\xe1\x06\
\xa6\xd0\x92\x39\x6c\x2e\x1e\xf1\x0d\x17\x74\x14\x6c\x28\xd9\xb0\
\xa1\xa0\xa5\xc4\x61\x18\x71\xcc\x9c\x1c\x43\x22\x61\xb2\xa4\xa6\
\x25\xa1\x23\x26\x23\x0e\x60\x9d\xa6\xc0\x91\x60\x80\x8e\x4e\xae\
\x81\xcf\x12\x22\x34\xb7\x48\x81\x15\x2b\x5a\x34\x8a\x86
|
Curlybear/Socrates
|
battle.py
|
Python
|
gpl-3.0
| 23,254
| 0.001978
|
import configparser
import json
from os.path import join
import discord
import requests
from discord.ext import commands
import ereputils
# Config reader
config = configparser.ConfigParser()
config.read("config.ini")
# API Key
apiKey = config["DEFAULT"]["api_key"]
apiVersion = config["DEFAULT"]["api_version"]
class Battle(commands.Cog, name="Battle"):
def __init__(self, bot):
self.bot = bot
self.utils = ereputils.ErepUtils()
async def cog_command_error(self, ctx, error):
if isinstance(error, (commands.ArgumentParsingError)):
await ctx.send(error)
@commands.command(pass_context=True, aliases=["RH"])
async def rh(self, ctx, *, in_country):
"""Returns the list of occupied regions of a given country"""
try:
if in_country != "World":
uid = self.utils.get_country_id(in_country)
country = self.utils.get_country_name(uid)
region_text = ""
time_text = ""
occupied_text = ""
count = 0
r = requests.get(
"https://api.erepublik.tools/"
+ apiVersion
+ "/region/list?key="
+ apiKey
)
obj = json.loads(r.text)
regions = obj["regions"]
picked_regions = list()
for region in regions:
if (
region["original_owner_country_id"] == uid
or region["current_owner_country_id"] == uid
) and region.get("under_occupation_since"):
picked_regions.append(region)
picked_regions.sort(
key=lambda region: region["under_occupation_since"]["date"],
reverse=True,
)
for region in picked_regions:
if count < 20:
region_text += (
self.utils.get_country_flag(region["original_owner_country_id"])
+ " **"
+ region["name"]
+ "**"
+ "\n"
)
time_text += (
":small_blue_diamond: "
+ region["under_occupation_since"]["date"][:-7]
+ "\n"
)
country_name = self.utils.get_country_name(
region["current_owner_country_id"]
)
country_name = (
(country_name[:17] + "..")
if len(country_name) > 17
else country_name
)
occupied_text += (
":small_orange_diamond: "
+ self.utils.get_country_flag(
region["current_owner_country_id"]
)
+ " "
+ country_name
+ "\n"
)
count = count + 1
if count:
region_text = (
region_text
+ "\n**Total occupied regions: **"
+ str(len(picked_regions))
)
if count < len(picked_regions):
region_text = region_text + " ({} not displayed)".format(
len(picked_regions) - count
)
embed = discord.Embed(colour=discord.Colour(0xCE2C19))
embed.set_author(
name=country + " RHs",
icon_url="https://static.erepublik.tools/assets/img/erepublik/country/"
+ str(uid)
+ ".gif",
)
embed.set_footer(
text="Powered by https://erepublik.tools",
icon_url="https://erepublik.tools/assets/img/icon76.png",
)
if region_text == "":
embed.add_field(
name="Regions",
value="No regions under occupation are held by " + country,
inline=True,
)
else:
embed.add_field(name="Regions", value=region_text, inline=True)
embed.add_field(
name="Under occupation since", value=time_text, inline=True
|
)
embed.add_field(name="Occupied by", value=occupied_text, inline=True)
await ctx.message.channel.send("", embed=embed)
except:
raise c
|
ommands.ArgumentParsingError(
"Country ***" + in_country + "*** not recognized"
)
@commands.command(pass_context=True, aliases=["SH"])
async def sh(self, ctx):
"""Returns the list of the upcoming air rounds as well as air rounds with limited damage done."""
r = requests.get("https://www.erepublik.com/en/military/campaignsJson/list")
data = json.loads(r.text)
battles = data["battles"]
picked_battles = list()
for battle_id in battles:
battle = battles[battle_id]
if battle["type"] == "aircraft":
battle["delay"] = battle["start"] - data["time"]
if battle["delay"] > 0:
picked_battles.append(battle)
else:
battle["started_since"] = data["time"] - battle["start"]
div_id = next(iter(battle["div"]))
if battle["div"][div_id]["stats"]["inv"]:
battle["inv_damage"] = battle["div"][div_id]["stats"]["inv"][
"damage"
]
else:
battle["inv_damage"] = 0
if battle["div"][div_id]["stats"]["def"]:
battle["def_damage"] = battle["div"][div_id]["stats"]["def"][
"damage"
]
else:
battle["def_damage"] = 0
if battle["inv_damage"] < 30000 or battle["def_damage"] < 30000:
picked_battles.append(battle)
if len(picked_battles) > 0:
picked_battles.sort(key=lambda battle: -battle["delay"])
embed = discord.Embed(colour=discord.Colour(0xCE2C19))
embed.set_author(name="SHs")
embed.set_footer(
text="Powered by https://erepublik.tools",
icon_url="https://erepublik.tools/assets/img/icon76.png",
)
battle_text = ""
damage_text = ""
time_text = ""
for battle in picked_battles:
if len(battle_text) > 900:
embed.add_field(name="Battle", value=battle_text, inline=True)
embed.add_field(name="Damage", value=damage_text, inline=True)
embed.add_field(name="Time", value=time_text, inline=True)
battle_text = ""
damage_text = ""
time_text = ""
if "started_since" in battle:
battle_text = "{}{}-{} [{}](https://www.erepublik.com/en/military/battlefield/{})\n".format(
battle_text,
self.utils.get_country_flag(battle["inv"]["id"]),
self.utils.get_country_flag(battle["def"]["id"]),
battle["region"]["name"],
battle["id"],
)
damage_text = "{}{:<6}-{:>6}\n".format(
damage_text, battle["inv_damage"], battle["def_damage"]
)
time_text = "{}+{}m{}s\n".format(
time_text,
battle["started_since"] // 60,
battle["started_since"] % 60,
)
else:
battle_text = "{}{}-{} [{}](https://www.erepublik.com/en/military/battlefield/{})\n".format(
battle_text,
|
ipwnponies/youtube-sort-playlist
|
playlist_updates.py
|
Python
|
unlicense
| 13,622
| 0.002936
|
#! /usr/bin/env python
import argparse
import operator
import os
import sys
from collections import namedtuple
from functools import lru_cache
from functools import reduce
from pathlib import Path
from typing import Any
from typing import Dict
from typing import List
import addict
import arrow
import googleapiclient.errors
import httplib2
import oauth2client.client
import oauth2client.file
import oauth2client.tools
import yaml
from apiclient.discovery import build # pylint: disable=import-error
from isodate import parse_duration
from isodate import strftime
from tqdm import tqdm
from xdg import XDG_CACHE_HOME
print = tqdm.write # pylint: disable=invalid-name,redefined-builtin
# The CLIENT_SECRETS_FILE variable specifies the name of a file that contains
# the OAuth 2.0 information for this application, including its client_id and
# client_secret. You can acquire an OAuth 2.0 client ID and client secret from
# the {{ Google Cloud Console }} at
# {{ https://cloud.google.com/console }}.
# Please ensure that you have enabled the YouTube Data API for your project.
# For more information about using OAuth2 to access the YouTube Data API, see:
# https://developers.google.com/youtube/v3/guides/authentication
# For more information about the client_secrets.json file format, see:
# https://developers.google.com/api-client-library/python/guide/aaa_client_secrets
CLIENT_SECRETS_FILE = 'client_secrets.json'
# This variable defines a message to display if the CLIENT_SECRETS_FILE is
# missing.
MISSING_CLIENT_SECRETS_MESSAGE = """
WARNING: Please configure OAuth 2.0
To make this sample run you will need to populate the client_secrets.json file
found at:
%s
with information from the {{ Cloud Console }}
{{ https://cloud.google.com/console }}
For more information about the client_secrets.json file format, please visit:
https://developers.google.com/api-client-library/python/guide/aaa_client_secrets
""" % os.path.abspath(
os.path.join(os.path.dirname(__file__), CLIENT_SECRETS_FILE)
)
# This OAuth 2.0 access scope allows for full read/write access to the
# authenticated user's account.
YOUTUBE_READ_WRITE_SCOPE = 'https://www.googleapis.com/auth/youtube'
YOUTUBE_API_SERVICE_NAME = 'youtube'
YOUTUBE_API_VERSION = 'v3'
VideoInfo = namedtuple('VideoInfo', ['channel_id', 'published_date', 'duration'])
JsonType = Dict[str, Any]
class YoutubeManager:
def __init__(self, dry_run: bool, args: List[str]) -> None:
self.youtube = self.get_youtube(args)
self.dry_run = dry_run
@staticmethod
def get_creds(args: List[str]) -> oauth2client.client.Credentials:
'''Authorize client with OAuth2.'''
flow = oauth2client.client.flow_from_clientsecrets(
CLIENT_SECRETS_FILE, message=MISSING_CLIENT_SECRETS_MESSAGE, scope=YOUTUBE_READ_WRITE_SCOPE
)
storage = oauth2client.file.Storage('{}-oauth2.json'.format(sys.argv[0]))
credentials = storage.get()
if credentials is None or credentials.invalid:
flags = oauth2client.tools.argparser.parse_args(args)
credentials = oauth2client.tools.run_flow(flow, storage, flags)
return credentials
def get_youtube(self, args: List[str]):
'''Get youtube data v3 object.'''
creds = self.get_creds(args)
return build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION, http=creds.authorize(httplib2.Http()))
def get_watchlater_playlist(self) -> str:
'''Get the id of the 'Sort Watch Later' playlist.
The 'Sort Watch Later' playlist is regular playlist and is not the same as the magical one that all
youtube users have by default.
'''
playlists = self.youtube.playlists().list(part='snippet', mine=True).execute()
playlist_id = next(i['id'] for i in playlists['items'] if i['snippet']['title'] == 'Sort Watch Later')
return playlist_id
def get_playlist_videos(self, watchlater_id: str) -> List[JsonType]:
'''Returns list of playlistItems from Sort Watch Later playlist'''
result: List[Dict] = []
request = self.youtube.playlistItems().list(part='snippet', playlistId=watchlater_id, maxResults=50)
# Iterate through all results pages
while request:
response: Dict[str, Dict] = request.execute()
result.extend(response['items'])
# Prepare next results page
request = self.youtube.playlistItems().list_next(request, response)
return result
def get_video_info(self, p
|
laylist_videos:
|
List[JsonType]) -> Dict[str, VideoInfo]:
'''Returns a dict of VideoInfo for each video
The key is video id and the value is VideoInfo.
'''
result = {}
videos = [i['snippet']['resourceId']['videoId'] for i in playlist_videos]
# Partition videos due to max number of videos queryable with one api call
while videos:
to_query = videos[:50]
remaining = videos[50:]
response = (
self.youtube.videos()
.list(part='snippet,contentDetails', id=','.join(list(to_query)), maxResults=50)
.execute()
)
for i in response['items']:
video_id = i['id']
channel_id = i['snippet']['channelId']
published_date = i['snippet']['publishedAt']
duration = parse_duration(i['contentDetails']['duration'])
result[video_id] = VideoInfo(channel_id, published_date, duration)
videos = remaining
return result
def sort_playlist(self, playlist_videos: List[Dict], video_infos: JsonType) -> None:
'''Sorts a playlist and groups videos by channel.'''
def sort_key(playlist_item):
'''Groups together videos from the same channel, sorted by date in ascending order.'''
video_id = playlist_item['snippet']['resourceId']['videoId']
channel_name, published_date, _ = video_infos[video_id]
return '{}-{}'.format(channel_name, published_date)
sorted_playlist = sorted(playlist_videos, key=sort_key)
for index, i in enumerate(tqdm(sorted_playlist, unit='video')):
print('{} is being put in pos {}'.format(i['snippet']['title'], index))
if not self.dry_run:
i['snippet']['position'] = index
self.youtube.playlistItems().update(part='snippet', body=i).execute()
def get_subscribed_channels(self) -> List[Dict[str, str]]:
channels: List[Dict[str, str]] = []
next_page_token = None
request = self.youtube.subscriptions().list(part='snippet', mine=True, maxResults=50, pageToken=next_page_token)
while request:
response = request.execute()
response = addict.Dict(response)
channels.extend({'title': i.snippet.title, 'id': i.snippet.resourceId.channelId} for i in response['items'])
request = self.youtube.subscriptions().list_next(request, response)
return channels
def get_channel_details(self, channel_id: str) -> addict.Dict:
request = self.youtube.channels().list(part='contentDetails', id=channel_id)
# Only 1 item, since queried by id
channel_details = addict.Dict(request.execute()['items'][0])
return channel_details
def add_channel_videos_watch_later(self, channel: str, uploaded_after: arrow) -> None:
video_ids = []
channel_details = self.get_channel_details(channel)
uploaded_playlist = channel_details.contentDetails.relatedPlaylists.uploads
request = self.youtube.playlistItems().list(part='snippet', playlistId=uploaded_playlist, maxResults=50)
while request:
response = addict.Dict(request.execute())
recent_videos = [
{'id': i.snippet.resourceId.videoId, 'title': i.snippet.title}
for i in response['items']
if i.snippet.resourceId.kind == 'youtube#video' and arrow.get(i.snippet.publishedAt) >= uploaded_after
]
if not recent_videos:
break
|
gcq/pyth
|
server.py
|
Python
|
mit
| 2,516
| 0.006757
|
#!venv/bin/python
from flask import Flask, render_template, request, Response
import os
import time
import subprocess
app = Flask(__name__, template_folder='.', static_folder='.')
@app.route('/')
def root():
time_in_secs = os.path.getmtime('pyth.py')
time_in_python = time.gmtime(time_in_secs)
formatted_time = time.strftime("%d %b %Y", time_in_python)
return render_template('index.html',
formatted_time=formatted_time,
code=request.args.get('code', ''),
input=request.args.get('input', ''),
debug=int(request.args.get('debug', 0)),
test_suite=int(request.args.get('test_suite', 0)),
test_suite_input=request.args.get('test_suite_input', ''),
input_size=int(request.args.get('input_size', 1)))
def run_code(code_message, input_message, debug_on):
resp = ''
input_message += '\n'
pyth
|
_code = '\n'.join(code_message.split("\r\n"))
pyth_process = \
subprocess.
|
Popen(['/usr/bin/env',
'python3',
'pyth.py',
'-csd' if debug_on else '-cs',
pyth_code],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
output, errors = \
pyth_process.communicate(input=bytearray(input_message, 'utf-8'))
if code_message:
resp += output.decode() + (errors if errors else '')
return resp
@app.route('/submit', methods=['POST'])
def submit():
code_message = request.form.get('code', '')
input_message = request.form.get('input', '')
debug_on = int(request.form.get('debug'), 0)
return Response(run_code(code_message, input_message, debug_on))
@app.route('/submit_test_suite', methods=['POST'])
def submit_test_suite():
code_message = request.form.get('code', '')
input_size = int(request.form.get('input_size', '1'), 0)
inputs = ["\n".join(i) for i in zip(*[iter(request.form.get('input', '').split("\n"))]*input_size)]
debug_on = int(request.form.get('debug'), 0)
return Response("\n".join([run_code(code_message, inputs[0], debug_on)] + \
[run_code(code_message, i, False) for i in inputs[1:]]) if inputs else "")
@app.route('/<path>')
def other(path):
return app.send_static_file(path)
if __name__ == '__main__':
app.run(debug=True)
|
unt-libraries/catalog-api
|
django/sierra/shelflist/tests/test_api.py
|
Python
|
bsd-3-clause
| 37,869
| 0.003116
|
"""
Tests API features applicable to the `shelflist` app.
"""
import pytest
import ujson
import jsonpatch
from datetime import datetime
from shelflist.exporters import ItemsToSolr
from shelflist.search_indexes import ShelflistItemIndex
from shelflist.serializers import ShelflistItemSerializer
# FIXTURES AND TEST DATA
# Fixtures used in the below tests can be found in ...
# django/sierra/base/tests/conftest.py:
# API_ROOT: Base URL for the API we're testing.
API_ROOT = '/api/v1/'
REDIS_SHELFLIST_PREFIX = ItemsToSolr.redis_shelflist_prefix
# PARAMETERS__* constants contain parametrization data for certain
# tests. Each should be a tuple, where the first tuple member is a
# header string that describes the parametrization values (such as
# what you'd pass as the first arg to pytest.mark.parametrize); the
# others are single-entry dictionaries where the key is the parameter-
# list ID (such as what you'd pass to pytest.mark.parametrize via its
# `ids` kwarg) and the value is the list of parameters for that ID.
# PARAMETERS__FILTER_TESTS: Parameters for testing API filter
# behavior that works as intended. The provided `search` query string
# matches the `test_data` record(s) they're supposed to match.
# NOTE: Because the shelflistitems resource is used to support the
# inventory app, the tests (particularly filter tests) are aimed at
# testing features as used by that app; they don't test every edge case
# and every possibility.
PARAMETERS__FILTER_TESTS = (
'test_data, search, expected',
# Filter by Call Number
{ 'exact call number | one match': ((
('TEST1', {'call_number': 'AB100 .A1 1', 'call_number_type': 'lc'}),
('TEST2', {'call_number': 'AB101 .A1 1', 'call_number_type': 'lc'}),
), 'callNumber=AB100 .A1 1', ['TEST1']),
}, { 'exact call number, truncated | no matches': ((
('TEST1', {'call_number': 'AB100 .A1 1', 'call_number_type': 'lc'}),
('TEST2', {'call_number': 'AB101 .A1 1', 'call_number_type': 'lc'}),
), 'callNumber=AB100 .A1', []),
}, { 'exact call number, normalized | one match': ((
('TEST1', {'call_number': 'AB100 .A1 1', 'call_number_type': 'lc'}),
('TEST2', {'call_number': 'AB101 .A1 1', 'call_number_type': 'lc'}),
), 'callNumber=ab100a11', ['TEST1']),
}, { 'exact call number, normalized and truncated | no matches': ((
('TEST1', {'call_number': 'AB100 .A1 1', 'call_number_type': 'lc'}),
('TEST2', {'call_number': 'AB101 .A1 1', 'call_number_type': 'lc'}),
), 'callNumber=ab100a1', []),
}, { 'startswith call number | no matches': ((
('TEST1', {'call_number': 'AB100 .A1 1', 'call_number_type': 'lc'}),
('TEST2', {'call_number': 'AB101 .A1 1', 'call_number_type': 'lc'}),
), 'callNumber[startswith]=AB102', []),
}, { 'startswith call number | multiple matches': ((
('TEST1', {'call_number': 'AB100 .A1 1', 'call_number_type': 'lc'}),
('TEST2', {'call_number': 'AB101 .A1 1', 'call_number_type': 'lc'}),
), 'callNumber[startswith]=AB1', ['TEST1', 'TEST2']),
}, { 'startswith call number, extra spaces | one match': ((
('TEST1', {'call_number': 'AB100 .A1 1', 'call_number_type': 'lc'}),
('TEST2', {'call_number': 'AB101 .A1 1', 'call_number_type': 'lc'}),
), 'callNumber[startswith]=AB 100 .A1', ['TEST1']),
},
# Filter by Call Number and Type
{ 'call number is correct; type is incorrect | no matches': ((
('TEST1', {'call_number': 'AB100 .A1 1', 'call_number_type': 'lc'}),
('TEST2', {'call_number': 'AB101 .A1 1', 'call_number_type': 'lc'}),
), 'callNumber=AB 100 .A1 1&callNumberType=other', []),
}, { 'call number is correct; type is correct | one match': ((
('TEST1', {'call_number': 'AB100 .A1 1', 'call_number_type': 'lc'}),
('TEST2', {'call_number': 'AB101 .A1 1', 'call_number_type': 'lc'}),
), 'callNumber=AB 100 .A1 1&callNumberType=lc', ['TEST1']),
},
# Filter by Barcode
{ 'exact barcode | one match': ((
('TEST1', {'barcode': '5555000001'}),
('TEST2', {'barcode': '5555000002'}),
), 'barcode=5555000001', ['TEST1']),
}, { 'exact barcode, truncated | no matches': ((
('TEST1', {'barcode': '5555000001'}),
('TEST2', {'barcode': '5555000002'}),
), 'barcode=555500000', []),
}, { 'startswith barcode, truncated | one match': ((
('TEST1', {'barcode': '5555000001'}),
('TEST2', {'barcode': '5554000001'}),
), 'barcode[startswith]=5555', ['TEST1']),
}, { 'startswith barcode, truncated | multiple matches': ((
('TEST1', {'barcode': '5555000001'}),
('TEST2', {'barcode': '5554000001'}),
), 'barcode[startswith]=555', ['TEST1', 'TEST2']),
},
# Filter by Item Status and Due Date
{ 'status CHECKED OUT | one match': ((
('TEST1', {'status_code': '-',
'due_date': datetime(2019, 6, 30, 00, 00, 00)}),
('TEST2', {'status_code': '-', 'due_date': None}),
), 'status_code=-&dueDate[isnull]=false', ['TEST1']),
}, { 'status CHECKED OUT and status code a | one match': ((
('TEST1', {'status_code': 'a',
'due_date': datetime(2019, 6, 30, 00, 00, 00)}),
('TEST2', {'status_code': '-', 'due_date': None}),
), 'status_code=a&dueDate[isnull]=false', ['TEST1']),
}, { 'status CHECKED OUT and status code a, b | multiple matches': ((
('TEST1', {'status_code': 'a',
'due_date': datetime(2019, 6, 30, 00, 00, 00)}),
('TEST2', {'status_code': 'b', 'due_date': None}),
('TEST3', {'status_code': 'b',
'due_date': datetime(2019, 9, 30, 00, 00, 00)}),
('TEST4', {'status_code': '-', 'due_date': None}),
), 'status_code[in]=[a,b]&dueDate[isnull]=false', ['TEST1', 'TEST3']),
}, { 'status CHECKED OUT and status code a | no matches': ((
('TEST1', {'status_code': '-',
'due_date': datetime(2019, 6, 30, 00, 00, 00)}),
('TEST2', {'status_code': '-', 'due_date': None}),
), 'status_code=a&dueDate[isnull]=false', []),
}, { 'status NOT CHECKED OUT and status code - | one match': ((
('TES
|
T1', {'status_code': '-',
'due_date': datetime(2019, 6, 30, 00, 00, 00)}),
('TEST2', {'status_code': '-', 'due_date': None}),
), 'status_code=-&dueDate[isnull]=true', ['TEST2']),
}, { 'status NOT CHECKED OUT and st
|
atus code a | one match': ((
('TEST1', {'status_code': '-',
'due_date': datetime(2019, 6, 30, 00, 00, 00)}),
('TEST2', {'status_code': 'a', 'due_date': None}),
), 'status_code=a&dueDate[isnull]=true', ['TEST2']),
}, { 'status NOT CHECKED OUT and status code a, b | multiple matches': ((
('TEST1', {'status_code': 'b',
'due_date': datetime(2019, 6, 30, 00, 00, 00)}),
('TEST2', {'status_code': 'a', 'due_date': None}),
('TEST3', {'status_code': 'b', 'due_date': None}),
), 'status_code[in]=[a,b]&dueDate[isnull]=true', ['TEST2', 'TEST3']),
},
# Filter by Suppression
{ 'suppression | one match': ((
('TEST1', {'suppressed': True}),
('TEST2', {'suppressed': False}),
), 'suppressed=true', ['TEST1']),
}, { 'suppression | no matches': ((
('TEST1', {'suppressed': True}),
('TEST2', {'suppressed': True}),
), 'suppressed=false', []),
}, { 'suppression | multiple matches': ((
('TEST1', {'suppressed': True}),
('TEST2', {'suppressed': True}),
), 'suppressed=true', ['TEST1', 'TEST2']),
},
# Filter by Shelf / Inventory Status
{ 'shelf status, on shelf | one match': ((
('TEST1', {'shelf_status': 'onShelf'}),
('TEST2', {'shelf_status': 'unknown'}),
), 'shelfStatus=onShelf', ['TEST1']),
}, { 'shelf status, on shelf | no matches': ((
('TEST1', {'shelf_status': 'unknown'}),
('TEST2', {'shelf_status': 'unknown'}),
), 'shelfStatus=onShelf', []),
}, { 'shelf status, on shelf or unknown | multiple matches': ((
('TEST1', {'shelf_status': 'onShelf'}),
('TEST2', {'shelf_statu
|
jhgoebbert/cvl-fabric-launcher
|
wsgidav/server/run_reloading_server.py
|
Python
|
gpl-3.0
| 1,415
| 0.013428
|
# (c) 2009-2011 Martin Wendt and contributors; see WsgiDAV http://wsgidav.googlecode.com/
# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
"""
Wrapper for ``run_server``, that restarts the server when source code is
modified.
"""
import os
import sys
from subprocess import Popen
def run():
args = sys.argv[1:]
if not "--reload" in args:
args.append("--reload")
print "run_reloading_server", args
try:
serverpath = os.path.join(os.path.dirname(__file__), "run_server.py")
while True:
p = Popen(["python", serverpath] + args,
# stdin=sys.stdin,
# stdout=subprocess.PIPE,
# stderr=subprocess.PIPE,
|
#
|
preexec_fn, close_fds, shell, cwd, env, universal_newlines, startupinfo, creationflags
)
sys.stdout = p.stdout
sys.stderr = p.stderr
p.wait()
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
if p.returncode == 3:
print "run_server returned 3: restarting..."
else:
print "run_server returned %s: terminating." % p.returncode
break
except Exception, e:
raise e
if __name__ == "__main__":
run()
|
DevinDewitt/pyqt5
|
examples/mainwindows/sdi/sdi_rc.py
|
Python
|
gpl-3.0
| 36,524
| 0.000137
|
# -*- coding: utf-8 -*-
# Resource object code
#
# Created: Sun May 12 18:04:51 2013
# by: The Resource Compiler for PyQt (Qt v5.0.2)
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore
qt_resource_data = b"\
\x00\x00\x03\x54\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x20\x00\x00\x00\x20\x08\x06\x00\x00\x00\x73\x7a\x7a\xf4\
\x00\x00\x00\x04\x67\x41\x4d\x41\x00\x00\xd6\xd8\xd4\x4f\x58\x32\
\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\x74\x77\x61\x72\x65\
\x00\x41\x64\x6f\x62\x65\x20\x49\x6d\x61\x67\x65\x52\x65\x61\x64\
\x79\x71\xc9\x65\x3c\x00\x00\x02\xe6\x49\x44\x41\x54\x58\xc3\xd5\
\x97\xcd\x4e\x13\x61\x14\x86\xeb\x35\x94\x95\x7b\x71\xe1\xd2\xc4\
\xe0\x05\xb8\xe2\x0e\x5c\xb8\xf4\x02\x5c\xb1\x30\xea\x05\x18\x96\
\x26\x62\x58\xb8\xb0\x91\x58\x20\xd1\x9d\xbf\x89\xa4\x14\xb1\x52\
\xa4\x48\x45\x94\xfe\xd0\x02\x43\xff\xa6\x9d\x19\xa6\x65\x80\xe3\
\x79\x7b\xfa\x85\x51\x4a\x82\xc9\x21\x86\x49\xde\x9c\x33\xa7\xf3\
\xcd\xfb\x9c\xf3\x4d\x9b\x4e\x84\x88\x22\xff\x53\x91\x73\x01\xc0\
\xc7\xd5\x90\x6e\xff\xa5\xfb\xac\xc7\x3d\x3d\x64\x0d\xa9\x02\xf0\
\x31\x32\x3c\x3c\xbc\x6a\x34\x3a\x3a\xba\x19\x56\x3c\x1e\xaf\x26\
\x93\xc9\x56\x3a\x9d\x76\x13\x89\x44\x6b\x60\x60\x20\xcd\x6b\x6e\
\x68\x02\xa4\x38\xd2\xe1\xe1\x71\x99\xba\xef\xb7\xc9\xb2\x2c\xda\
\xdf\xdf\x27\x86\xf1\x78\xcd\x18\xeb\x8a\x1a\x40\x3f\xf3\xb0\x1c\
\xc7\xa5\x4c\x66\xb9\x0b\x14\x04\x01\xc5\x62\xb1\x3a\xaf\x7b\x70\
\x1a\x88\x53\x01\x1c\x1c\x10\x77\x77\xb2\x6c\xdb\xa1\xf9\xf9\xcf\
\x64\x0e\xd7\x75\xe9\xf9\xc4\x44\x17\x42\x05\x00\x26\x7b\xc1\xc9\
\xaa\x37\x1c\x4a\xce\xcd\x53\xf8\x70\x5d\x0f\x8b\x17\x54\x00\x82\
\x10\x40\x67\x4f\x14\xce\xed\xa6\x47\x1f\x67\x66\xe9\xf5\x9b\xb7\
\x14\x9f\x9c\xa4\xa9\xa9\x69\x7a\xf7\xfe\x03\x45\xa3\xd1\x65\x5e\
\x7f\x41\x05\xc0\xef\x10\xed\xb6\x25\x86\x85\x9a\xe3\x05\x94\x5d\
\xcd\xd1\xe4\xf4\x2b\x7a\x32\xfe\x94\x9e\xc5\x5e\xd0\x4c\x62\x0e\
\x8b\x17\x55\x00\xda\x81\x18\xf5\x13\x20\x3c\xff\x90\x6a\xcd\x36\
\x15\x37\xab\x94\x2f\x6e\x53\x89\x63\x8d\xb7\x85\xd7\x7e\x51\x01\
\xf0\x79\xcc\xcd\x5d\x1e\xb5\xc7\x7b\xdb\xee\x9f\x3b\xbe\xe4\x88\
\x5d\xb8\xbd\xee\xe2\x94\xca\x33\xe0\x75\xe4\xc6\x75\x57\x62\xd8\
\x10\x39\xea\xe6\x33\x44\xd4\x01\xa7\x06\xe0\xf4\x3a\xad\x39\x22\
\x98\x98\x68\x72\x80\x98\x6b\x50\x53\x9d\x00\x00\x2a\x2d\xb9\x31\
\xe2\x4e\x53\x8c\x10\x0d\x04\xf2\x6d\xfb\x28\xb6\x7c\x45\x00\x9b\
\x3b\xdb\x6a\xfc\x69\x8e\x3c\x6c\x88\x1a\xae\x39\x13\x80\x3a\x8f\
\xb7\x54\x23\x2a\xd7\xc5\x04\x06\x06\x00\x35\x28\x9c\x17\xab\xbc\
\x25\xbb\xca\x13\xc0\x4d\x61\x0e\x15\x2a\x72\x6e\xcc\x7e\x5a\x02\
\x68\x6a\xdd\xad\xf1\x94\x27\x00\x53\xdc\x1c\x71\x6d\x5b\x40\x60\
\x9a\xab\x1c\x75\x9e\xeb\x81\x41\x15\x47\x11\xc0\x6a\x89\x31\x0c\
\xd6\x77\x04\x20\x0c\x64\x26\x62\xb6\x69\x75\x8b\xa8\xaa\x09\x50\
\xb6\xc5\xbc\xd0\x03\xf8\xbe\x29\x63\x87\x29\x60\x0c\x18\x84\x1c\
\x00\x5b\x4d\x45\x00\x74\x03\x53\x98\xad\x94\xc5\x1c\xe7\x46\xe6\
\x1c\x00\xc8\x71\x5d\xa9\xa1\x08\x80\xfd\xfc\x56\x12\x73\x33\x01\
\x08\x35\x18\x42\xe8\xda\x7c\x8e\x29\xa8\x4e\x00\x5b\x00\x03\xc8\
\x98\x67\x36\x04\x00\x32\xe6\x85\xde\xf8\x17\x0b\xfc\x2c\xd8\x8a\
\x00\x18\x67\x3a\x4f\xb4\x54\x14\x23\x98\x02\x00\x02\x0c\x3e\xfb\
\xc5\x53\x28\xf0\x43\xb8\x66\x49\xf7\x6b\xf9\x52\x87\xd7\xbe\x54\
\x01\xc8\x55\x8f\xba\x4e\xad\x4b\
|
x0e\x90\xaf\x85\xde\xb7\xc2\x92\
\x3d\x4f\xa6\xb3\xde\xa3\xb1\x71\xeb\xda\xd0\xf5\x15\x98\xb3\x6e\
\xa9\x00\x6c\x34\xa4\x6b\x18\xff\xe0\x11\x7f\x5a\x17\x53\xd4\x13\
\x0b\x59\x6f\xe4\xee\xbd\xe2\xa5\xc1\xcb\x4b\x7c\x6d\x8c\x75\x87\
\x35\xa
|
8\xfa\xb7\x1c\xdd\x65\xd9\x3c\x8f\x1f\x19\xfe\x9e\xcf\x1e\
\x37\xbd\xc9\xba\x78\x26\x6f\x46\x00\x68\xf2\xff\x81\x99\x94\x9e\
\xe9\x3f\xbf\x19\x01\x42\xd3\xf4\xfc\xbd\x9c\x9e\xa5\x7e\x03\x51\
\x6c\x25\xa1\x92\x95\x0a\x77\x00\x00\x00\x00\x49\x45\x4e\x44\xae\
\x42\x60\x82\
\x00\x00\x06\x6d\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x20\x00\x00\x00\x20\x08\x06\x00\x00\x00\x73\x7a\x7a\xf4\
\x00\x00\x06\x34\x49\x44\x41\x54\x78\x5e\xad\x97\x5b\x6c\x54\xc7\
\x1d\xc6\x7f\x73\xce\xd9\x8b\xbd\xf6\xfa\x16\xa0\xbe\x00\x0e\xb2\
\x69\x63\x24\x42\x4a\x21\x22\xa1\x2d\x95\x62\xa5\x2f\xee\x4b\x68\
\x2b\x95\xa6\x55\xa5\xc6\x60\x55\xaa\xda\xb4\xaa\xfa\x56\x09\x55\
\xca\x03\x94\x27\xda\x07\x84\x14\x29\xad\xc4\x8b\xa5\x52\x83\x79\
\x08\xc5\x18\x39\x0e\x69\xd3\x84\x9a\x9b\x63\x6a\xec\xb2\x04\x1b\
\x3b\xbb\xf6\x7a\x8f\xbd\xbb\xde\xb3\x67\xa6\xc3\x68\x85\xe5\x72\
\x6c\x88\xc9\x27\x7d\xfa\x9f\x9d\x87\xfd\x7e\xf3\x9f\x99\x73\x11\
\x4a\x29\x82\x24\x84\x78\x05\x78\x9e\xc7\x6b\x48\x29\xf5\x77\xd6\
\x28\x27\x20\xb8\x43\xbb\x01\x68\x97\x52\xbe\xc6\x63\x64\x59\xd6\
\x07\x1a\xf6\xbb\x40\xb7\x06\x39\xff\x14\x00\x26\xfc\xb7\xed\xf5\
\xe2\x60\x5d\x44\x44\x6e\xce\x89\x8a\x2b\x57\xae\x50\x5d\x53\x8d\
\x40\x00\xa0\x50\x08\x65\x28\x41\x29\x66\xd3\x69\x5e\xa9\x17\x2f\
\xbc\xb4\x4e\x6c\x3b\xf1\x1f\xb9\x47\x83\x7c\x5b\x43\x4c\x3c\x4d\
\x07\xf6\xff\x60\x8b\xdd\x2c\x25\xf8\x4a\x32\x3c\x3c\x4c\x65\x65\
\x25\x2b\xc9\x75\x5d\x1e\xc0\x6e\xa9\xb0\x22\x1b\xa2\x2a\x72\x3f\
\xa7\xea\x81\xb5\x03\x08\x2d\x05\x48\xa1\x0d\xf4\x5d\xbc\x48\x2e\
\x97\xc3\x2f\x16\x51\x4a\x91\xcf\xe7\x59\x5c\x5c\xa4\x50\x28\x50\
\xd4\x63\xb5\xb5\xb5\x94\x01\x58\x80\xf8\x82\xf6\x80\x01\x00\x36\
\x44\x05\x1f\x0f\xbc\x4b\x3e\x3b\x8f\x85\x44\x95\x32\xe2\xb6\xc4\
\xb6\x04\x21\x21\x70\x3e\x53\x6c\x8c\x3b\x80\x44\x2a\x04\xf0\x9c\
\x10\x02\xe0\xcb\x40\x05\x50\x0f\x34\x60\xc4\x48\x69\x9f\x24\x02\
\x01\x4e\x9c\x38\x21\x00\x81\x05\xd2\x87\x96\x96\x67\x09\x65\x6d\
\x14\xe5\x28\xa5\xb4\x41\x08\x58\x57\x19\x25\xe2\xd8\x44\x42\x16\
\xc3\x13\x73\x5c\xbc\x3d\x41\xf7\x58\x8e\x5c\x24\xbe\xa9\xbd\x7d\
\xf7\xef\x2d\xcb\x5a\xdc\xb1\x63\x47\x59\x55\x55\x95\xd3\xd8\xd8\
\x18\x7e\xe0\x86\x86\x86\xd0\xa5\x4b\x97\xdc\xae\xae\xae\x08\xf0\
\xd6\xaa\x1d\x00\x13\x44\x55\x2c\xc2\x73\xd5\x31\xf2\x9e\x4f\xa1\
\x28\x91\x4a\x61\x09\x41\xd8\xb1\x88\x86\x6c\xe6\x72\x05\x12\xa2\
\x8e\x3f\x9f\xff\x2b\x0d\x4d\x1b\x01\x22\xc0\x66\x96\x84\xef\xfb\
\x78\x9e\x47\x75\x75\xb5\x9e\x50\x4b\xf4\xea\xd5\xab\x87\x84\x10\
\x28\xa5\xde\x5a\x11\xc0\xb2\x41\x00\xb6\x2d\x90\xda\xb6\x14\x38\
\x08\xa4\x12\x58\xc2\x8c\x1b\x8f\x4c\xb9\xec\x7b\xf5\x3b\xd4\x37\
\x36\x11\x7c\x2f\xc1\x84\x67\x32\x19\xca\xcb\xcb\xcd\x66\x3e\x76\
\xec\xd8\x26\xbd\x7f\x0e\x2e\x41\x2c\x01\xd0\xd9\xd9\xa9\x0e\x1d\
\x3a\xa4\x6c\x21\x08\x59\x10\xb6\x2d\x1c\xc7\xc6\x42\x50\xb4\xcd\
\x1a\x1b\x00\xc7\xb2\x88\x38\x96\xae\x02\x60\x59\x78\x10\xc0\xdc\
\xdc\x1c\x35\x35\x35\x06\x20\x1a\x8d\x72\xe4\xc8\x91\xcd\xc0\x03\
\x88\x1b\x1a\xa2\xc7\x62\xb9\xb0\x6d\x74\x30\x66\x8d\xcb\x23\x36\
\xb1\xa8\xa3\xc7\x2c\x32\x8b\x1e\x93\x99\x1c\x63\xa9\x79\xee\xcc\
\x2e\xe8\xdf\x45\x72\xf9\x3c\xab\xc8\x2c\x41\x36\x9b\x35\xa7\x66\
\xe9\xff\x6d\x0e\x1c\x38\xb0\x1e\xe8\x00\x58\x06\xa0\xb4\x74\x16\
\x8e\x0d\xe1\x90\xc0\x53\x8a\xb1\xa4\xcb\x8d\x8c\x83\xd3\xb2\x97\
\xa6\x7d\xaf\xb3\xb5\xe3\x17\xac\xdb\xfb\x3a\x0d\x2f\xb4\x73\xfb\
\xce\x24\xfd\xfd\xfd\x24\x93\x49\x94\x52\xe6\xfa\xf8\xf1\xe3\xe8\
\xba\xac\x33\xe7\xce\x9d\xe3\xe8\xd1\xa3\x1c\x3e\x7c\x98\xde\xde\
\x5e\x12\x89\x84\x04\x2c\xa1\x15\xdc\x01\xed\xff\xce\xe6\xf8\xe7\
\x94\x4f\x6b\xc7\xcf\xf8\xe6\x2f\xdf\x26\xf6\xf5\x37\x99\x7c\xa6\
\x83\x6b\xfe\x2e\xae\xf1\x2d\x64\x6b\x17\xad\x7b\x7f\x4e\x5e\x56\
\x73\xfa\x6f\x67\xd1\x77\x4d\xee\xdc\x9d\xe2\x1b\xaf\x76\x72\xfd\
\xfa\x75\x03\xa0\x67\x6b\xd6\x3f\x16\x8b\x99\xeb\x78\x3c\x8e\xe3\
\x38\x25\x38\x04\xc0\x23\x00\x96\x25\x98\xca\x41\x3a\xde\xca\xfe\
\xdf\xbd\x4d\xd5\xae\xd7\x28\x84\x62\x08\xdb\x42\x59\x82\x6c\x41\
\x72\x7f\x66\x91\x4f\xee\x66\x18\xb8\xea\x72\xfa\x1f\x61\x64\xd5\
\x5e\xae\x8f\xdc\x67\x32\xd7\xc6\x85\x0f\xee\x9b\x00\xed\x87\xa1\
\xcd\xcd\xcd\xb4\xb5\xb5\x19\x37\x35\x35\xa1\xa1\x14\x20\x83\x1f\
\x46\x16\xdc\x71\x15\xdf\xff\xe9\x6f\xa8\x6c\xd8\x48\xe2\xec\x3b\
\x4c\x8f\x5e\xc3\x89\x94\xb1\xb5\x79\x07\x9b\x5b\xb6\xf3\x49\x79\
\x25\x63\x09\x97\xcf\x66\xf2\xdc\x9d\xce\x32\xa1\xed\x88\x0d\x4c\
\x27\xe7\xd8\xb7\x2b\xca\xfa\x25\x00\x33\x7b\x3d\x6b\xea\xea\xea\
\x00\xcc\x75\x2a\x95\x32\x00\x4a\x2b\x10\xa0\xb9\x5a\x70\xe1\x9d\
\x63\x28\x2c\xca\xe6\xc6\xd9\x10\x8f\x52\x94\x92\x7b\xc3\x7d\x24\
\x65\x05\xdb\xda\x7f\x4c\x4d\xdb\xcb\x7c\x3c\x9c\x66\xd2\x5f\xc0\
\xcd\x78\x2c\xcc\x6b\x2f\x78\x20\x00\xb5\x74\x3a\x42\xa1\x90\x09\
\x2d\xdd\xea\x1f\
|
hiaselhans/OpenGlider
|
tests/test_patterns.py
|
Python
|
gpl-3.0
| 1,407
| 0.003554
|
import unittest
import tempfile
import os
import op
|
englider
import openglider.plots
import openglider.plots.glider
from common import TestCase
TEMPDIR = tempfile.gettempdir()
class TestPlots(TestCase):
def setUp(self, complete=True):
self.glider_2d = self.im
|
port_glider_2d()
self.glider_3d = self.glider_2d.get_glider_3d()
self.plotmaker = openglider.plots.PlotMaker(self.glider_3d)
@unittest.skip("not working")
def test_patterns_panels(self):
self.plotmaker.get_panels()
dwg = self.plotmaker.get_all_stacked()["panels"]
dwg.export_dxf(os.path.join(TEMPDIR, "test_panels.dxf"))
# Traceback (most recent call last):
# File "/home/travis/build/booya-at/OpenGlider/tests/test_patterns.py", line 22, in test_patterns_dribs
# dwg = self.plotmaker.get_all_stacked()["dribs"]
# AttributeError: 'PlotMaker' object has no attribute 'get_all_stacked'
@unittest.skip("not working")
def test_patterns_dribs(self):
self.plotmaker.get_dribs()
dwg = self.plotmaker.get_all_stacked()["dribs"]
dwg.export_dxf(os.path.join(TEMPDIR, "test_dribs.dxf"))
@unittest.skip("not working")
def test_patterns_ribs(self):
self.plotmaker.get_ribs()
dwg = self.plotmaker.get_all_stacked()["ribs"]
dwg.export_dxf(os.path.join(TEMPDIR, "test_ribs.dxf"))
if __name__ == "__main__":
unittest.main()
|
sgordon007/jcvi_062915
|
apps/gmap.py
|
Python
|
bsd-2-clause
| 5,206
| 0.000192
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
Run GMAP/GSNAP commands. GMAP/GSNAP manual:
<http://research-pub.gene.com/gmap/src/README>
"""
import os.path as op
import sys
import logging
from jcvi.formats.sam import get_prefix
from jcvi.apps.base import OptionParser, ActionDispatcher, need_update, sh, \
get_abs_path
def main():
actions = (
('index', 'wraps gmap_build'),
('align', 'wraps gsnap'),
('gmap', 'wraps gmap'),
)
p = ActionDispatcher(actions)
p.dispatch(globals())
def check_index(dbfile):
dbfile = get_abs_path(dbfile)
dbdir, filename = op.split(dbfile)
if not dbdir:
dbdir = "."
dbname = filename.rsplit(".", 1)[0]
safile = op.join(dbdir, "{0}/{0}.genomecomp".format(dbname))
if dbname == filename:
dbname = filename + ".db"
if need_update(dbfile, safile):
cmd = "gmap_build -D {0} -d {1} {2}".format(dbdir, dbname, filename)
sh(cmd)
else:
logging.error("`{0}` exists. `gmap_build` already run.".format(safile))
return dbdir, dbname
def index(args):
"""
%prog index database.fasta
`
Wrapper for `gmap_build`. Same interface.
"""
p = OptionParser(index.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
dbfile, = args
check_index(dbfile)
def gmap(args):
"""
%prog gmap database.fasta fastafile
Wrapper for `gmap`.
"""
p = OptionParser(gmap.__doc__)
p.add_option("--cross", default=False, action="store_true",
help="Cross-species alignment")
p.add_option("-
|
-npaths", default=0, type="int",
help="Maximum number of paths to show."
" If set to 0, prints two paths if chimera"
" detected, else one.")
p.set_cpus()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
dbfile, fastafile = args
assert op.exists(dbfile) and op.exists(fastafile)
prefix = get_prefix(fastafile, dbfile)
logfile = prefix + ".log"
gmapfile = prefix + ".gmap.gff3"
if no
|
t need_update((dbfile, fastafile), gmapfile):
logging.error("`{0}` exists. `gmap` already run.".format(gmapfile))
else:
dbdir, dbname = check_index(dbfile)
cmd = "gmap -D {0} -d {1}".format(dbdir, dbname)
cmd += " -f 2 --intronlength=100000" # Output format 2
cmd += " -t {0}".format(opts.cpus)
cmd += " --npaths {0}".format(opts.npaths)
if opts.cross:
cmd += " --cross-species"
cmd += " " + fastafile
sh(cmd, outfile=gmapfile, errfile=logfile)
return gmapfile, logfile
def align(args):
"""
%prog align database.fasta read1.fq read2.fq
Wrapper for `gsnap` single-end or paired-end, depending on the number of
args.
"""
from jcvi.formats.fasta import join
from jcvi.formats.fastq import guessoffset
p = OptionParser(align.__doc__)
p.add_option("--join", default=False, action="store_true",
help="Join sequences with padded 50Ns")
p.add_option("--rnaseq", default=False, action="store_true",
help="Input is RNA-seq reads, turn splicing on")
p.add_option("--snp", default=False, action="store_true",
help="Call SNPs after GSNAP")
p.set_home("eddyyeh")
p.set_cpus()
opts, args = p.parse_args(args)
if len(args) == 2:
logging.debug("Single-end alignment")
elif len(args) == 3:
logging.debug("Paired-end alignment")
else:
sys.exit(not p.print_help())
dbfile, readfile = args[0:2]
if opts.join:
dbfile = join([dbfile, "--gapsize=50", "--newid=chr1"])
assert op.exists(dbfile) and op.exists(readfile)
prefix = get_prefix(readfile, dbfile)
logfile = prefix + ".log"
gsnapfile = prefix + ".gsnap"
if not need_update((dbfile, readfile), gsnapfile):
logging.error("`{0}` exists. `gsnap` already run.".format(gsnapfile))
else:
dbdir, dbname = check_index(dbfile)
cmd = "gsnap -D {0} -d {1}".format(dbdir, dbname)
cmd += " -B 5 -m 0.1 -i 2 -n 3" # memory, mismatch, indel penalty, nhits
if opts.rnaseq:
cmd += " -N 1"
cmd += " -t {0}".format(opts.cpus)
cmd += " --gmap-mode none --nofails"
if readfile.endswith(".gz"):
cmd += " --gunzip"
try:
offset = "sanger" if guessoffset([readfile]) == 33 else "illumina"
cmd += " --quality-protocol {0}".format(offset)
except AssertionError:
pass
cmd += " " + " ".join(args[1:])
sh(cmd, outfile=gsnapfile, errfile=logfile)
if opts.snp:
EYHOME = opts.eddyyeh_home
pf = gsnapfile.rsplit(".", 1)[0]
nativefile = pf + ".unique.native"
if need_update(gsnapfile, nativefile):
cmd = op.join(EYHOME, "convert2native.pl")
cmd += " --gsnap {0} -o {1}".format(gsnapfile, nativefile)
cmd += " -proc {0}".format(opts.cpus)
sh(cmd)
return gsnapfile, logfile
if __name__ == '__main__':
main()
|
hogarthww/django-rest-test-data
|
rest_test_data/tests/test_base.py
|
Python
|
gpl-2.0
| 4,061
| 0
|
from django.core.serializers import json
from django.http import HttpResponseNotFound, HttpResponse
from django.views.generic import View
from rest_test_data.models import Simple
from rest_test_data.views import BaseTestDataRestView
from nose.tools import assert_equal, assert_is_instance
from mock import Mock, patch
def create_request(body=None):
request = Mock()
request.body = body
return request
def test_dispatch_model_not_found():
view = BaseTestDataRestView()
result = view.dispatch(None, app='something', model='notfoundmodel')
assert_is_instance(result, HttpResponseNotFound)
@patch.object(View, 'dispatch')
def test_dispatch_model_found(dispatch):
dispatch.return_value = ''
view = BaseTestDataRestView()
view.dispatch(create_request(), app='rest_test_data', model='simple')
assert_equal(view.model, Simple)
assert_equal(dispatch.call_count, 1)
@patch.object(BaseTestDataRestView,
|
'get_object')
@patch.object(View, 'dispatch')
def test_dispatch_get_object(dispatch, get_object):
dispatch.return_value = ''
view = BaseTestDataRestView()
result = view.dispatch(
create_request(),
app='rest_test_data',
model='simple',
pk='1'
)
get_object.assert_called_once_with(1, model=Simple)
assert_is_instance(result, HttpRespo
|
nse)
assert_equal(dispatch.call_count, 1)
@patch.object(BaseTestDataRestView, 'get_object')
def test_dispatch_get_object_failure(get_object):
get_object.side_effect = Exception
view = BaseTestDataRestView()
result = view.dispatch(None, app='rest_test_data', model='simple', pk='1')
get_object.assert_called_once_with(1, model=Simple)
assert_is_instance(result, HttpResponseNotFound)
def test_get_serializer():
view = BaseTestDataRestView()
assert_is_instance(view.serializer, json.Serializer)
@patch.object(View, 'dispatch')
def test_dispatch_wraps_string_result(dispatch):
dispatch.return_value = 'result!'
view = BaseTestDataRestView()
result = view.dispatch(
create_request(),
app='rest_test_data',
model='simple'
)
assert_is_instance(result, HttpResponse)
assert_equal(result['Content-Type'], 'application/json')
assert_equal(result.content, b'result!')
@patch.object(View, 'dispatch')
def test_dispatch_passes_http_response(dispatch):
dispatch.return_value = HttpResponse()
view = BaseTestDataRestView()
result = view.dispatch(
create_request(),
app='rest_test_data',
model='simple'
)
assert_equal(result, dispatch.return_value)
@patch.object(View, 'dispatch')
def test_dispatch_jsons_other(dispatch):
dispatch.return_value = {'test': 'data'}
view = BaseTestDataRestView()
result = view.dispatch(
create_request(),
app='rest_test_data',
model='simple'
)
assert_is_instance(result, HttpResponse)
assert_equal(result['Content-Type'], 'application/json')
assert_equal(result.content, b'{"test": "data"}')
def test_get_object_model():
model = Mock(**{'objects.get.return_value': 'object'})
assert_equal(BaseTestDataRestView.get_object(1, model), 'object')
model.objects.get.assert_called_once_with(pk=1)
@patch('rest_test_data.views.get_model')
def test_get_object_from_string(get_model):
BaseTestDataRestView.get_object('app.model:1')
get_model.assert_called_once_with('app', 'model')
get_model().objects.get.assert_called_once_with(pk=1)
@patch.object(BaseTestDataRestView, 'get_object')
def test_get_data(get_object):
result = BaseTestDataRestView.get_data({'data': {'test': 1},
'objects': {
'test_2': 'app.model:1',
'test_3': ['app.model:1'],
}})
get_object.assert_called_with('app.model:1')
assert_equal(result, {
'test': 1,
'test_2': get_object(),
'test_3': [get_object()]
})
|
foursquare/pants
|
tests/python/pants_test/backend/python/tasks/test_python_run_integration.py
|
Python
|
apache-2.0
| 8,737
| 0.008699
|
# coding=utf-8
# Copyright 2017 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import sys
from pex.pex_bootstrapper import get_pex_info
from pants.util.contextutil import temporary_dir
from pants_test.pants_run_integration_test import PantsRunIntegrationTest, ensure_daemon
from pants_test.testutils.pexrc_util import setup_pexrc_with_pex_python_path
class PythonRunIntegrationTest(PantsRunIntegrationTest):
testproject = 'testprojects/src/python/interpreter_selection'
@ensure_daemon
def test_run_3(self):
self._maybe_run_version('3')
@ensure_daemon
def test_run_27(self):
self._maybe_run_version('2.7')
def test_run_27_and_then
|
_3(self):
if self.skip_if_no_python('2.7') or self.skip_if_no_python('3'):
|
return
with temporary_dir() as interpreters_cache:
pants_ini_config = {'python-setup': {'interpreter_cache_dir': interpreters_cache}}
pants_run_27 = self.run_pants(
command=['run', '{}:echo_interpreter_version_2.7'.format(self.testproject)],
config=pants_ini_config
)
self.assert_success(pants_run_27)
pants_run_3 = self.run_pants(
command=['run', '{}:echo_interpreter_version_3'.format(self.testproject),
'--python-setup-interpreter-constraints=CPython>=2.7,<3',
'--python-setup-interpreter-constraints=CPython>=3.3'],
config=pants_ini_config
)
self.assert_success(pants_run_3)
def test_die(self):
command = ['run',
'{}:die'.format(self.testproject),
'--python-setup-interpreter-constraints=CPython>=2.7,<3',
'--python-setup-interpreter-constraints=CPython>=3.3',
'--quiet']
pants_run = self.run_pants(command=command)
assert pants_run.returncode == 57
def test_get_env_var(self):
var_key = 'SOME_MAGICAL_VAR'
var_val = 'a value'
command = ['-q',
'run',
'testprojects/src/python/print_env',
'--',
var_key]
pants_run = self.run_pants(command=command, extra_env={var_key: var_val})
self.assert_success(pants_run)
self.assertEquals(var_val, pants_run.stdout_data.strip())
def test_pants_run_interpreter_selection_with_pexrc(self):
py27 = '2.7'
py3 = '3'
if self.skip_if_no_python(py27) or self.skip_if_no_python(py3):
return
py27_path, py3_path = self.python_interpreter_path(py27), self.python_interpreter_path(py3)
with setup_pexrc_with_pex_python_path(os.path.join(os.path.dirname(sys.argv[0]), '.pexrc'), [py27_path, py3_path]):
with temporary_dir() as interpreters_cache:
pants_ini_config = {'python-setup': {'interpreter_cache_dir': interpreters_cache}}
pants_run_27 = self.run_pants(
command=['run', '{}:main_py2'.format(os.path.join(self.testproject, 'python_3_selection_testing'))],
config=pants_ini_config
)
self.assert_success(pants_run_27)
# Interpreter selection for Python 2 is problematic in CI due to multiple virtualenvs in play.
if not os.getenv('CI'):
self.assertIn(py27_path.split(py27)[0], pants_run_27.stdout_data)
pants_run_3 = self.run_pants(
command=['run', '{}:main_py3'.format(os.path.join(self.testproject, 'python_3_selection_testing'))],
config=pants_ini_config
)
self.assert_success(pants_run_3)
# Protection for when the sys.executable path underlies a symlink pointing to 'python' without '3'
# at the end of the basename.
self.assertIn(py3_path.split(py3)[0], pants_run_3.stdout_data)
def test_pants_binary_interpreter_selection_with_pexrc(self):
py27 = '2.7'
py3 = '3'
if self.skip_if_no_python(py27) or self.skip_if_no_python(py3):
return
py27_path, py3_path = self.python_interpreter_path(py27), self.python_interpreter_path(py3)
with setup_pexrc_with_pex_python_path(os.path.join(os.path.dirname(sys.argv[0]), '.pexrc') , [py27_path, py3_path]):
with temporary_dir() as interpreters_cache:
pants_ini_config = {'python-setup': {'interpreter_cache_dir': interpreters_cache}}
pants_run_27 = self.run_pants(
command=['binary', '{}:main_py2'.format(os.path.join(self.testproject, 'python_3_selection_testing'))],
config=pants_ini_config
)
self.assert_success(pants_run_27)
pants_run_3 = self.run_pants(
command=['binary', '{}:main_py3'.format(os.path.join(self.testproject, 'python_3_selection_testing'))],
config=pants_ini_config
)
self.assert_success(pants_run_3)
# Ensure proper interpreter constraints were passed to built pexes.
py2_pex = os.path.join(os.getcwd(), 'dist', 'main_py2.pex')
py3_pex = os.path.join(os.getcwd(), 'dist', 'main_py3.pex')
py2_info = get_pex_info(py2_pex)
py3_info = get_pex_info(py3_pex)
self.assertIn('CPython>2.7.6,<3', py2_info.interpreter_constraints)
self.assertIn('CPython>3', py3_info.interpreter_constraints)
# Cleanup created pexes.
os.remove(py2_pex)
os.remove(py3_pex)
def test_target_constraints_with_no_sources(self):
with temporary_dir() as interpreters_cache:
# Run task.
py3 = '3'
if not self.skip_if_no_python(py3):
pants_ini_config = {'python-setup': {'interpreter_cache_dir': interpreters_cache}}
pants_run_3 = self.run_pants(
command=['run', '{}:test_bin'.format(os.path.join(self.testproject, 'test_target_with_no_sources'))],
config=pants_ini_config
)
self.assert_success(pants_run_3)
self.assertIn('python3', pants_run_3.stdout_data)
# Binary task.
pants_ini_config = {'python-setup': {'interpreter_cache_dir': interpreters_cache}}
pants_run_27 = self.run_pants(
command=['binary', '{}:test_bin'.format(os.path.join(self.testproject, 'test_target_with_no_sources'))],
config=pants_ini_config
)
self.assert_success(pants_run_27)
# Ensure proper interpreter constraints were passed to built pexes.
py2_pex = os.path.join(os.getcwd(), 'dist', 'test_bin.pex')
py2_info = get_pex_info(py2_pex)
self.assertIn('CPython>3', py2_info.interpreter_constraints)
# Cleanup.
os.remove(py2_pex)
def skip_if_no_python(self, version):
if not self.has_python_version(version):
msg = 'No python {} found. Skipping.'.format(version)
print(msg)
self.skipTest(msg)
return True
return False
def _maybe_run_version(self, version):
if self.skip_if_no_python(version):
return
echo = self._run_echo_version(version)
v = echo.split('.') # E.g., 2.7.13.
self.assertTrue(len(v) > 2, 'Not a valid version string: {}'.format(v))
expected_components = version.split('.')
self.assertEquals(expected_components, v[:len(expected_components,)])
def _run_echo_version(self, version):
binary_name = 'echo_interpreter_version_{}'.format(version)
binary_target = '{}:{}'.format(self.testproject, binary_name)
# Build a pex.
# Avoid some known-to-choke-on interpreters.
command = ['run',
binary_target,
'--python-setup-interpreter-constraints=CPython>=2.7,<3',
'--python-setup-interpreter-constraints=CPython>=3.3',
'--quiet']
pants_run = self.run_pants(command=command)
return pants_run.stdout_data.rstrip().split('\n')[-1]
def test_pex_resolver_blacklist_integration(self):
py3 = '3'
if self.skip_if_no_python(py3):
return
pex = os.path.join(os.getcwd(), 'dist', 'test_bin.pex')
try:
pants_ini_config = {'python-setup': {'resolver_blacklist': {'functools32': 'CPython>3'}}}
target_address_base = os.path.join(self.testproject, 'resolver_blacklist_testing')
# clean-all to ensure that Pants resolves requirements for each run.
pants_binary_36 = self.run_pants(
command=['clean-all', 'binary', '{}:test_bin'.format(target_addr
|
Og192/Python
|
theano/Loop/loopForShareVariable.py
|
Python
|
gpl-2.0
| 683
| 0.014641
|
import theano
from theano import tensor as T
w = thenao.shared(W_values)
bvis
|
= theano.shared(bvis_values)
bhid = theano.shared(b
|
hid_values)
trng = T.shared_randomstreams.RandomStreams(1234)
def OneStep(vsample) :
hmean = T.nnet.sigmoid(theano.dot(vsample, W) + bhid)
hsample = trng.binomial(size=hmean.shape, n=1, p=hmean)
vmean = T.nnet.sigmoid(theano.dot(hsample, W.T) + bvis)
return trng.binomial(size=vsample.shape, n=1, p=vmean,
dtype=theano.config.floatX)
sample = theano.tensor.vector()
values, updates = theano.scan(OneStep, outputs_info = sample, n_steps = 10)
gibbs10 = theano.function([sample], values[-1], updates = updates)
|
sasha-gitg/python-aiplatform
|
.sample_configs/param_handlers/create_hyperparameter_tuning_job_python_package_sample.py
|
Python
|
apache-2.0
| 3,158
| 0.0019
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
def make_parent(parent: str) -> str:
# Sample function parameter parent in create_hyperparameter_tuning_job_using_python_package_sample
parent = parent
return parent
def make_hyperparameter_tuning_job(
display_name: str, executor_image_uri: str, package_uri: str, python_module: str,
) -> google.cloud.aiplatform_v1beta1.types.hyperparameter_tuning_job.HyperparameterTuningJob:
# study_spec
metric = {
"metric_id": "val_rmse",
"goal": aiplatform.gapic.StudySpec.MetricSpec.GoalType.MINIMIZE,
}
conditional_parameter_decay = {
"parameter_spec": {
"parameter_id": "decay",
"double_value_spec": {"min_value": 1e-07, "max_value": 1},
"scale_type": aiplatform.gapic.StudySpec.ParameterSpec.ScaleType.UNIT_LINEAR_SCALE,
},
"parent_discrete_values": {"values": [32, 64]},
}
conditional_parameter_learning_rate = {
"parameter_spec": {
"parameter_id": "learning_rate",
"double_value_spec": {"min_value": 1e-07, "max_value": 1},
"scale_type": aiplatform.gapic.StudySpec.ParameterSpec.ScaleType.UNIT_LINEAR_SCALE,
},
"paren
|
t_discrete_values": {"values": [4, 8, 16]},
}
parameter = {
"parameter_id": "batch_size",
"discrete_value_spec": {"values": [4, 8, 16, 32, 64, 128]},
"scale_type": aiplatform.gapic.StudySpec.Paramete
|
rSpec.ScaleType.UNIT_LINEAR_SCALE,
"conditional_parameter_specs": [
conditional_parameter_decay,
conditional_parameter_learning_rate,
],
}
# trial_job_spec
machine_spec = {
"machine_type": "n1-standard-4",
"accelerator_type": aiplatform.gapic.AcceleratorType.NVIDIA_TESLA_K80,
"accelerator_count": 1,
}
worker_pool_spec = {
"machine_spec": machine_spec,
"replica_count": 1,
"python_package_spec": {
"executor_image_uri": executor_image_uri,
"package_uris": [package_uri],
"python_module": python_module,
"args": [],
},
}
# hyperparameter_tuning_job
hyperparameter_tuning_job = {
"display_name": display_name,
"max_trial_count": 4,
"parallel_trial_count": 2,
"study_spec": {
"metrics": [metric],
"parameters": [parameter],
"algorithm": aiplatform.gapic.StudySpec.Algorithm.RANDOM_SEARCH,
},
"trial_job_spec": {"worker_pool_specs": [worker_pool_spec]},
}
return hyperparameter_tuning_job
|
ARM-software/lisa
|
lisa/tests/base.py
|
Python
|
apache-2.0
| 77,167
| 0.001089
|
# SPDX-License-Identifier: Apache-2.0
#
# Copyright (C) 2018, Arm Limited and contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import gc
import enum
import functools
import os
import os.path
import abc
import sys
import textwrap
import re
import inspect
import copy
import contextlib
import itertools
import types
import warnings
from operator import attrgetter
from datetime import datetime
from collections import OrderedDict, ChainMap
from collections.abc import Mapping
from inspect import signature
import IPython.display
from devlib.collector.dmesg import KernelLogEntry
from devlib import TargetStableError
from lisa.analysis.tasks import TasksAnalysis
from lisa.analysis.rta import RTAEventsAnalysis
from lisa.trace import requires_events, TraceEventCheckerBase, AndTraceEventChecker
from lisa.trace import Trace, TaskID
from lisa.wlgen.rta import RTA, PeriodicWload, RTAPhase, leaf_precedence
from lisa.target import Target
from lisa.utils import (
Serializable, memoized, lru_memoized, ArtifactPath, non_recursive_property,
update_wrapper_doc, ExekallTaggable, annotations_from_signature,
get_sphinx_name, optional_kwargs, group_by_value, kwargs_dispatcher,
dispatch_kwargs, Loggable, kwargs_forwarded_to, docstring_update,
is_running_ipython,
)
from lisa.datautils import df_filter_task_ids
from lisa.trace import FtraceCollector, FtraceConf, DmesgCollector, ComposedCollector
from lisa.conf import (
SimpleMultiSrcConf, KeyDesc, TopLevelKeyDesc,
)
from lisa._generic import TypedList
from lisa.pelt import pelt_settling_time
def _nested_formatter(multiline):
def sort_mapping(data):
if isinstance(data, Mapping):
# Ensure stable ordering of keys if possible
try:
data = OrderedDict(sorted(data.items()))
except TypeError:
data = data
return data
if multiline:
def format_data(data, level=0):
idt = '\n' + ' ' * 4 * level
def indent(s):
stripped = s.strip()
if '\n' in stripped:
return idt + stripped.replace('\n', idt)
else:
return stripped
if isinstance(data, TestMetric):
out = data.pretty_format(multiline=multiline)
out = indent(out) if '\n' in out else out
elif isinstance(data, Mapping):
data = sort_mapping(data)
body = '\n'.join(
f'{key}: {format_data(data, level + 1)}'
for key, data in data.items()
)
out = indent(body)
else:
out = st
|
r(data)
return out
else:
def format_data(data):
# Handle recursive mappings, like metrics of AggregatedResultBundle
if isinstance(data, Mapping):
data = sort_mapping(data)
return '{' + ', '.join(
f'{key}={format_data(data)}'
for key, data in data.items()
) + '}'
else:
return str(data)
return format_data
class TestMe
|
tric:
"""
A storage class for metrics used by tests
:param data: The data to store. Can be any base type or dict(TestMetric)
:param units: The data units
:type units: str
"""
def __init__(self, data, units=None):
self.data = data
self.units = units
def __str__(self):
return self.pretty_format(multiline=False)
def pretty_format(self, multiline=True):
"""
Pretty print the metrics.
:param multiline: If ``True``, use a multiline format.
:type multiline: bool
"""
format_data = _nested_formatter(multiline=multiline)
result = format_data(self.data)
if self.units:
result += ' ' + self.units
return result
def __repr__(self):
return f'{type(self).__name__}({self.data}, {self.units})'
@enum.unique
class Result(enum.Enum):
"""
A classification of a test result
"""
PASSED = 1
"""
The test has passed
"""
FAILED = 2
"""
The test has failed
"""
UNDECIDED = 3
"""
The test data could not be used to decide between :attr:`PASSED` or :attr:`FAILED`
"""
SKIPPED = 4
"""
The test does not make sense on this platform and should therefore be skipped.
.. note:: :attr:`UNDECIDED` should be used when the data are inconclusive
but the test still makes sense on the target.
"""
@property
def lower_name(self):
"""Return the name in lower case"""
return self.name.lower()
class ResultBundleBase(Exception):
"""
Base class for all result bundles.
.. note:: ``__init__`` is not provided as some classes uses properties to
provide some of the attributes.
"""
def __bool__(self):
"""
``True`` if the ``result`` is :attr:`Result.PASSED`, ``False``
otherwise.
"""
return self.result is Result.PASSED
def __str__(self):
return self.pretty_format(multiline=False)
def pretty_format(self, multiline=True):
format_data = _nested_formatter(multiline=multiline)
metrics_str = format_data(self.metrics)
if '\n' in metrics_str:
idt = '\n' + ' ' * 4
metrics_str = metrics_str.replace('\n', idt)
else:
metrics_str = ': ' + metrics_str
return self.result.name + metrics_str
def _repr_pretty_(self, p, cycle):
"Pretty print instances in Jupyter notebooks"
p.text(self.pretty_format())
def add_metric(self, name, data, units=None):
"""
Lets you append several test :class:`TestMetric` to the bundle.
:Parameters: :class:`TestMetric` parameters
"""
self.metrics[name] = TestMetric(data, units)
def display_and_exit(self) -> type(None):
print(f"Test result: {self}")
if self:
sys.exit(0)
else:
sys.exit(1)
class ResultBundle(ResultBundleBase):
"""
Bundle for storing test results
:param result: Indicates whether the associated test passed.
It will also be used as the truth-value of a ResultBundle.
:type result: :class:`Result`
:param utc_datetime: UTC time at which the result was collected, or
``None`` to record the current datetime.
:type utc_datetime: datetime.datetime
:param context: Contextual information to attach to the bundle.
Keep the content small, as size of :class:`ResultBundle` instances
matters a lot for storing long test sessions results.
:type context: dict(str, object)
:class:`TestMetric` can be added to an instance of this class. This can
make it easier for users of your tests to understand why a certain test
passed or failed. For instance::
def test_is_noon():
now = time.localtime().tm_hour
res = ResultBundle(Result.PASSED if now == 12 else Result.FAILED)
res.add_metric("current time", now)
return res
>>> res_bundle = test_is_noon()
>>> print(res_bundle.result.name)
FAILED
# At this point, the user can wonder why the test failed.
# Metrics are here to help, and are printed along with the result:
>>> print(res_bundle)
FAILED: current time=11
"""
def __init__(self, result, utc_datetime=None, context=None):
self.result = result
self.metrics = {}
self.utc_datetime = utc_datetime or da
|
botswana-harvard/bcvp
|
bcvp/bcvp_subject/admin/subject_locator_admin.py
|
Python
|
gpl-2.0
| 1,944
| 0.002058
|
from django.contrib import admin
from edc_registration.models import RegisteredSubject
from edc_locator.admin import BaseLocatorModelAdmin
from ..forms import SubjectLocatorForm
from ..models import SubjectLocator
class SubjectLocatorAdmin(BaseLocatorModelAdmin):
form = SubjectLocatorForm
fields = (
'registered_subject',
'report_datetime',
'date_signed',
'mail_address',
'home_visit_permission',
'physical_address',
'may_follow_up',
'subject_cell',
'subject_cell_alt',
'subject_phone',
'subject_phone_alt',
'may_call_work',
'subject_work_place',
'subject_work_phone',
'may_contact_someone',
'contact_name',
'contact_rel',
'contact_physical_address',
'contact_cell',
'contact_phone',
'succe
|
ssful_mode_of_contact')
list_display = ('may_follow_up', 'may_call_work')
list_filter = ('may_follow_up', 'may_call_work')
search_fields = (
|
'registered_subject__subject_identifier', 'subject_cell', 'subject_cell_alt',
'subject_phone', 'subject_phone_alt', 'subject_work_place', 'subject_work_phone')
radio_fields = {"home_visit_permission": admin.VERTICAL,
"may_follow_up": admin.VERTICAL,
"may_call_work": admin.VERTICAL,
"may_contact_someone": admin.VERTICAL,
'successful_mode_of_contact': admin.VERTICAL}
actions = [] # do not allow export to CSV
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "registered_subject":
kwargs["queryset"] = RegisteredSubject.objects.filter(id__exact=request.GET.get('registered_subject', 0))
return super(SubjectLocatorAdmin, self).formfield_for_foreignkey(db_field, request, **kwargs)
admin.site.register(SubjectLocator, SubjectLocatorAdmin)
|
chenfengyuan/download-youku-video
|
main.py
|
Python
|
mit
| 2,621
| 0.002289
|
#!/usr/bin/env python3
# coding=utf-8
__author__ = 'chenfengyuan'
import tornado.gen
import re
import tornado.log
import tornado.ioloop
import tornado.options
import youku
import sys
import os
import utils
import tqdm
import math
import decimal
import tornado.httpclient
import shutil
import argparse
def main():
parser = argparse.ArgumentParser(description='download youku videos')
parser.add_argument('urls', type=str, nargs='+',
help='urls to download')
parser.add_argument('--skip', type=int, help='skip first
|
n urls', default=0)
args = parser.parse_args()
tornado.options.parse_config_file('/dev/null')
tornado.httpclient.AsyncHTTPClient.configure("tornado.curl_httpclient.CurlAsyncHTTPClient")
io = tornado.ioloop.IOLoop.instance()
@tornado.gen.coroutine
def dummy():
skipped = 0
for raw_url in ar
|
gs.urls:
for url in (yield youku.Youku.get_videos(raw_url)):
print(url)
continue
skipped += 1
if skipped <= args.skip:
continue
data = yield youku.Youku.get_video_name_and_download_urls(url)
directory = data[0].replace('/', '_')
output_basename = directory
if os.path.exists(output_basename + '.flv') or os.path.exists(output_basename + '.mp4'):
continue
print('Downloading %s' % directory)
urls = data[1]
if not os.path.exists(directory):
os.mkdir(directory)
process = tqdm.tqdm(range(len(urls)), leave=True, mininterval=0)
template = '%%0%dd.%%s' % math.ceil(decimal.Decimal(len(urls)).log10())
video_files = []
for i, durl in enumerate(urls):
file_suffix = re.search(r'st/(\w+)/fileid', durl).group(1)
try:
next(process)
except StopIteration:
pass
path = os.path.join(directory, template % ((i + 1), file_suffix))
video_files.append(path)
yield utils.download_to_file(path, durl)
else:
try:
next(process)
except StopIteration:
pass
utils.merge_videos(video_files, output_basename)
shutil.rmtree(directory)
sys.stderr.write('\n')
io.run_sync(dummy)
if __name__ == '__main__':
main()
|
rkk09c/Flask_Boilerplate
|
db_create.py
|
Python
|
mit
| 484
| 0.008264
|
#!flask/bin/python
from migrate.versioning import api
from config import SQLALCHEMY_DATABASE_URI
from config import SQLALCHEMY_MIGRATE_REPO
|
from app import db
import os.path
db.create_all()
if not os.path.exists(SQLALCHEMY_MIGRATE_REPO):
api.create(SQLALCHEMY_MIGRATE_REPO, 'database repository')
api.version_control(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
else:
api.version_control(SQLALCHEMY_DATABA
|
SE_URI, SQLALCHEMY_MIGRATE_REPO, api.version(SQLALCHEMY_MIGRATE_REPO))
|
aldebaran/qibuild
|
python/qibuild/actions/__init__.py
|
Python
|
bsd-3-clause
| 365
| 0.00274
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2012-2021 SoftBank Robotic
|
s. All rights reserved.
# Use of this source code is governed by a BSD-style licens
|
e (see the COPYING file).
""" This package contains the qibuild actions. """
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
|
ccqpein/Arithmetic-Exercises
|
Add-Digits/add_digits.py
|
Python
|
apache-2.0
| 260
| 0.015385
|
#! /usr/bin/env python
# -*- coding=utf-8 -*-
d
|
ef addDigits(num):
while(1):
sum= 0
for i in xrange(len(str(num))):
sum+= int(str(num)[i])
if len(str(sum))== 1:
return sum
|
else:
num= sum
|
activityworkshop/Murmeli
|
murmeli/pages/messages.py
|
Python
|
gpl-2.0
| 4,728
| 0.005076
|
'''Module for the messages pageset'''
from murmeli.pages.base import PageSet
from murmeli.pagetemplate import PageTemplate
from murmeli import dbutils
from murmeli.contactmgr import ContactManager
from murmeli.messageutils import MessageTree
from murmeli import inbox
class MessagesPageSet(PageSet):
'''Messages page set, for showing list of messages etc'''
def __init__(self, system):
PageSet.__init__(self, system, "messages")
self.messages_template = PageTemplate('messages')
def serve_page(self, view, url, params):
'''Serve a page to the given view'''
print("Messages serving page", url, "params:", params)
self.require_resources(['button-compose.png', 'default.css', 'avatar-none.jpg'])
database = self.system.get_component(self.syst
|
em.COMPNAME_DATABASE)
dbutils.export_all_avatars(database, self.get_web_cache_dir())
self._process_command(url, params)
# Make dictionary to convert ids to names
conta
|
ct_names = {cont['torid']:cont['displayName'] for cont in database.get_profiles()}
unknown_sender = self.i18n("messages.sender.unknown")
unknown_recpt = self.i18n("messages.recpt.unknown")
message_list = database.get_inbox() if database else []
conreqs = []
conresps = []
mail_tree = MessageTree()
for msg in message_list:
if not msg or msg.get(inbox.FN_DELETED):
continue
timestamp = msg.get(inbox.FN_TIMESTAMP)
msg[inbox.FN_SENT_TIME_STR] = self.make_local_time_string(timestamp)
msg_type = msg.get(inbox.FN_MSG_TYPE)
# Lookup sender name for display
sender_id = msg.get(inbox.FN_FROM_ID)
if not msg.get(inbox.FN_FROM_NAME):
msg[inbox.FN_FROM_NAME] = contact_names.get(sender_id, unknown_sender)
if msg_type in ["contactrequest", "contactrefer"]:
conreqs.append(msg)
elif msg_type == "contactresponse":
msg[inbox.FN_MSG_BODY] = self.fix_conresp_body(msg.get(inbox.FN_MSG_BODY),
msg.get(inbox.FN_ACCEPTED))
conresps.append(msg)
elif msg_type == "normal":
recpts = msg.get(inbox.FN_RECIPIENTS)
if recpts:
reply_all = recpts.split(",")
recpt_name_list = [contact_names.get(i, unknown_recpt) for i in reply_all]
msg[inbox.FN_RECIPIENT_NAMES] = ", ".join(recpt_name_list)
reply_all.append(sender_id)
msg[inbox.FN_REPLY_ALL] = ",".join(reply_all)
mail_tree.add_msg(msg)
mails = mail_tree.build()
num_msgs = len(conreqs) + len(conresps) + len(mails)
bodytext = self.messages_template.get_html(self.get_all_i18n(),
{"contactrequests":conreqs,
"contactresponses":conresps,
"mails":mails, "nummessages":num_msgs,
"webcachedir":self.get_web_cache_dir()})
contents = self.build_page({'pageTitle':self.i18n("messages.title"),
'pageBody':bodytext,
'pageFooter':"<p>Footer</p>"})
view.set_html(contents)
def _process_command(self, url, params):
'''Process a command given by the url and params'''
database = self.system.get_component(self.system.COMPNAME_DATABASE)
if url == 'send':
if params.get('messageType') == "contactresponse":
if params.get('accept') == "1":
crypto = self.system.get_component(self.system.COMPNAME_CRYPTO)
ContactManager(database, crypto).handle_accept(params.get('sendTo'),
params.get('messageBody'))
else:
ContactManager(database, None).handle_deny(params.get('sendTo'))
elif url == 'delete':
msg_index = self.get_param_as_int(params, 'msgId')
if msg_index >= 0 and not database.delete_from_inbox(msg_index):
print("Delete of inbox message '%d' failed" % msg_index)
def fix_conresp_body(self, msg_body, accepted):
'''If a contact response message has a blank message body, replace it'''
if msg_body:
return msg_body
suffix = "acceptednomessage" if accepted else "refused"
return self.i18n("messages.contactrequest." + suffix)
|
jaggu303619/asylum
|
openerp/addons/l10n_in_hr_payroll/report/report_payslip_details.py
|
Python
|
agpl-3.0
| 1,644
| 0.00365
|
#-*- coding:utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 OpenERP SA (<http://openerp.com>). All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.report import report_sxw
from openerp.addons.hr_payroll import report
class payslip_details_report_in(report.report_payslip_details.payslip_details_report):
def __init__(self, cr, ui
|
d, name, context):
super(payslip_details_report_in, self).__init__(cr, uid, name, context)
self.localcontext.update({
'get_details_by_rule_category': self.get_details_by_rule_category,
})
report_sxw.report_sxw('report.paylip.details.in', 'hr.pa
|
yslip', 'l10n_in_hr_payroll/report/report_payslip_details.rml', parser=payslip_details_report_in)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
feredean/cs313
|
notes/7_puzzle.py
|
Python
|
mit
| 2,458
| 0.004882
|
"""
UNIT 2: Logic Puzzle
You will write code to solve the following logic puzzle:
1. The person who arrived on Wednesday bought the laptop.
2. The programmer is not Wilkes.
3. Of the programmer and the person who bought the droid,
one is Wilkes and the other is Hamming.
4. The writer is not Minsky.
5. Neither Knuth nor the person who bought the tablet is the manager.
6. Knuth arrived the day after Simon.
7. The person who arrived on Thursday is not the designer.
8. The person who arrived on Friday didn't buy the tablet.
9. The designer didn't buy the droid.
10. Knuth arrived the day after the manager.
11. Of the person who bought the laptop and Wilkes,
one arrived on Monday and the other is the writer.
12. Either the person who bought the iphone or the person who bought the tablet
arrived on Tuesday.
You will write the function logic_puzzle(), which should return a list of the
names of the people in the order in which they arrive. For example, if they
happen to arrive in alphabetical order, Hamming on Monday, Knuth on Tuesday, etc.,
then you would return:
['Hamming', 'Knuth', 'Minsky', 'Simon', 'Wilkes']
(You can assume that the days mentioned are all in the same week.)
"""
import itertools
def day_after(first, second):
return first - 1 == second
def logic_puzzle():
days = [ monday, tuesday, wednesday, thursday, friday ] = [ 0, 1, 2, 3, 4 ]
orderings = list(itertools.permutations(days))
people = ('Wilkes', 'Minsky', 'Hamming', 'Knuth', 'Simon')
order = next((Wilkes, Minsky, Hamming, Knuth, Simon)
for (laptop, droid, tablet, iphone, _) in orderings
for (Wilkes, Minsky, Hamming, Knuth, Simon) in orderings
for (programmer, writer, manager, designer, _) in orderings
if wednesday == laptop
if programmer != Wilkes
if (programmer == Wilkes and droid == Hamming) or (programmer == Hamming and Wilkes == droid)
if writer != Minsky
if tablet != manager and Knuth != manager
if thursday != designer
if designer != droid
if friday != tablet
if day_after(Knuth, manager)
if day_after(Knuth, Si
|
mon)
if (Wilkes == monday and laptop == writer) or (laptop == monday and Wilkes == writer)
if iphone == tuesd
|
ay or tablet == tuesday
)
result = []
print order
for pers in range(5):
result.append(people[order[pers]])
return result
print logic_puzzle()
|
jason-weirather/IDP-fusion-release-1
|
bin/Bfile.py
|
Python
|
apache-2.0
| 4,958
| 0.015934
|
#!/usr/bin/python
import sys
import os
from numpy import *
from scipy import stats
if len(sys.argv) >= 4 :
ref_filename = sys.argv[1]
tag_filename =sys.argv[2]
Npt = int(sys.argv[3])
Nbin = int(sys.argv[4])
else:
print("usage: ~/3seq/bin/exp_len_density.py multiexon_refFlat.txt_positive_known_intact_SM.fa.bestpsl.gpd_refFlat.txt_exp_len multiexon_refFlat.txt_positive_known_intact_SM.fa.bestpsl.gpd_refFlat.txt_exp_len 100")
print("or ")
sys.exit(1)
################################################################################
ref = open(ref_filename,'r')
len_dt = {}
for line in ref:
ls = line.strip().split("\t")
L = int(ls[2])
if not len_dt.has_key(L):
len_dt[L]=[]
len_dt[L].append(ls)
ref.close()
################################################################################
def getdensity(len_ls,len_dt, L,Npt):
result= []
index = searchsorted(len_ls,L,side='right')
left_index = index - 1
right_index = index
left_L = len_ls[left_index]
right_L = len_ls[right_index]
r_left_L = L - left_L
r_right_L = right_L - L
left_iso_ls = []
right_iso_ls = []
if left_L > smallnum:
left_iso_ls =len_dt[left_L]
if right_L < largenum:
right_iso_ls = len_dt[right_L]
len_left_iso_ls = len(left_iso_ls)
len_right_iso_ls = len(right_iso_ls)
if len_left_iso_ls + len_right_iso_ls > Npt:
if r_left_L < r_right_L:
if len_left_iso_ls > Npt:
return left_iso_ls[:Npt]
else:
result.extend(left_iso_ls)
result.extend(right_iso_ls[:Npt-len_left_iso_ls])
return result
else:
if len_right_iso_ls > Npt:
return right_iso_
|
ls[:Npt]
else:
result.extend(right_iso_ls)
result.extend(left_iso_ls[:Npt-len_right_iso_ls])
return result
n = len(result)
while len(result)<Npt:
if r_left_L < r_right_L:
while r_left_L < r_right_L and len(result)<Npt:
|
result.extend(left_iso_ls)
left_index -= 1
left_L = len_ls[left_index]
if left_L > smallnum:
left_iso_ls =len_dt[left_L]
r_left_L = L - left_L
else:
while r_left_L >= r_right_L and len(result)<Npt:
result.extend(right_iso_ls)
right_index += 1
right_L = len_ls[right_index]
if right_L < largenum:
right_iso_ls = len_dt[right_L]
r_right_L = right_L - L
return result[:Npt]
################################################################################
def calculate_b(pt,npt):
RPKM_ls = []
I_ls =[]
L_ls = []
for item in pt:
RPKM_ls.append( float(item[3]) )
I_ls.append(int(item[4]))
L_ls.append(int(item[2]))
temp_a = array([RPKM_ls,I_ls])
temp_a =transpose(temp_a)
temp_a_sorted = transpose( sorted(temp_a, key=lambda a_entry: a_entry[0]) )
RPKM_med_ls = []
D_rate_ls = []
i = 0
L_pt = len(pt)
while i < L_pt:
RPKM_med_ls.append( median( temp_a_sorted[0][i:i+npt] ) )
D_rate_ls.append( 1-mean( temp_a_sorted[1][i:i+npt] ) )
i += npt
gradient, intercept, r_value, p_value, std_err = stats.linregress(RPKM_med_ls, D_rate_ls)
return gradient, intercept, r_value, p_value, std_err,std(L_ls)
def printout(pt):
result = []
s = 0
for item in pt:
result.append(str(item[2]))
s += float(item[2])
print '\t'.join(result)
ave2 = s/len(result)
result = []
s = 0
for item in pt:
result.append(str(item[3]))
s += float(item[3])
print '\t'.join(result)
ave3 = s/len(result)
return ave2, ave3
len_ls = len_dt.keys()
largenum = 1e10
smallnum = -1e10
len_ls.append(largenum)
len_ls.append(smallnum)
len_ls.sort()
L=0
while L<0:
pt = getdensity(len_ls,len_dt, L,Npt)
if len(pt)!=Npt:
sys.exit(1)
gradient, intercept, r_value, p_value, std_err,std_L = calculate_b(pt,Npt/Nbin)
print '\t'.join([str(L),str(gradient), str(intercept), str(r_value), str(p_value), str(std_err),str(std_L)])
L+=1
#sys.exit(1)
tag = open(tag_filename,'r')
for line in tag:
ls = line.strip().split("\t")
exon_start_list=ls[9].strip(',').split(',')
exon_end_list=ls[10].strip(',').split(',')
L = 0
i=0
for start in exon_start_list:
start =int(start)
end = int(exon_end_list[i])
L += (end - start)
i += 1
pt = getdensity(len_ls,len_dt, L,Npt)
if len(pt)!=Npt:
sys.exit(1)
gradient, intercept, r_value, p_value, std_err,std_L = calculate_b(pt,Npt/Nbin)
print '\t'.join([str(L),str(gradient), str(intercept), str(r_value), str(p_value), str(std_err),str(std_L)])
tag.close()
|
N3MIS15/maraschino-webcam
|
maraschino/tools.py
|
Python
|
mit
| 8,552
| 0.00573
|
# -*- coding: utf-8 -*-
"""Util functions for different things. For example: format time or bytesize correct."""
from flask import request, Response
from functools import wraps
from jinja2.filters import FILTERS
import os
import maraschino
from maraschino import app, logger
from maraschino.models import Setting, XbmcServer
from flask import send_file
import StringIO
import urllib
import re
def check_auth(username, password):
"""This function is called to check if a username /
password combination is valid.
"""
return username == maraschino.AUTH['username'] and password == maraschino.AUTH['password']
def authenticate():
"""Sends a 401 response that enables basic auth"""
return Response(
'Could not verify your access level for that URL.\n'
'You have to login with proper credentials', 401,
{'WWW-Authenticate': 'Basic realm="Login Required"'})
def requires_auth(f):
@wraps(f)
def decorated(*args, **kwargs):
if maraschino.AUTH['username'] != None and maraschino.AUTH['password'] != None:
creds = maraschino.AUTH
else:
return f(*args, **kwargs)
auth = request.authorization
if not auth or not check_auth(auth.username, auth.password):
return authenticate()
return f(*args, **kwargs)
return decorated
def using_auth():
"""Check if authentication is necessary"""
if maraschino.AUTH['username'] != None and maraschino.AUTH['password'] != None:
return True
else:
return False
def format_time(time):
"""Format the time for the player info"""
formatted_time = ''
if time['hours'] > 0:
formatted_time += str(time['hours']) + ':'
if time['minutes'] == 0:
formatted_time += '00:'
formatted_time += '%0*d' % (2, time['minutes']) + ':'
formatted_time += '%0*d' % (2, time['seconds'])
return formatted_time
def format_seconds(time):
hours = time / 3600
minutes = time / 60
seconds = time % 60
if time < 3600:
time = '%02d:%02d' % (minutes, seconds)
else:
time = '%02d:%02d:%02d' % (hours, minutes, seconds)
return time
FILTERS['format_seconds'] = format_seconds
def round_number(num):
if (num > 0):
return int(num+.5)
else:
return int(num-.5)
FILTERS['round_number'] = round_number
def format_number(num):
extension_list = ['bytes', 'kB', 'MB', 'GB', 'TB', 'PB', 'EB']
for i in range(len(extension_list)):
base = 1024**i
if num/base < 1024:
return '%.2f' % (float(num)/base) + ' ' + extension_list[i]
return str(num) + ' bytes'
def get_setting(key):
"""Get setting 'key' from db"""
try:
return Setting.query.filter(Setting.key == key).first()
except:
return None
def get_setting_value(key, default=None):
"""Get value for setting 'key' from db"""
try:
value = Setting.query.filter(Setting.key == key).first().value
if value == '':
return None
#Strip http/https from hostnames
if key.endswith('_host') or key.endswith('_ip'):
if value.startswith('http://'):
return value[7:]
elif value.startswith('https://'):
return value[8:]
return value
except:
return default
def get_file_list(folder, extensions, prepend_path=True, prepend_path_minus_root=False):
filelist = []
for root, subFolders, files in os.walk(folder):
for file in files:
if os.path.splitext(file)[1] in extensions:
if prepend_path:
filelist.append(os.path.join(root,file))
elif prepend_path_minus_root:
full = os.path.join(root, file)
partial = full.replace(folder, '')
if partial.startswith('/'):
partial = partial.replace('/', '', 1)
elif partial.startswith('\\'):
partial = partial.replace('\\', '', 1)
filelist.append(partial)
else:
filelist.append(file)
r
|
eturn
|
filelist
def convert_bytes(bytes, with_extension=True):
bytes = float(bytes)
if bytes >= 1099511627776:
terabytes = bytes / 1099511627776
size = '%.2f' % terabytes
extension = 'TB'
elif bytes >= 1073741824:
gigabytes = bytes / 1073741824
size = '%.2f' % gigabytes
extension = 'GB'
elif bytes >= 1048576:
megabytes = bytes / 1048576
size = '%.2f' % megabytes
extension = 'MB'
elif bytes >= 1024:
kilobytes = bytes / 1024
size = '%.2f' % kilobytes
extension = 'KB'
else:
size = '%.2f' % bytes
extension = 'B'
if with_extension:
size = '%s%s' % (size, extension)
return size
return size, extension
FILTERS['convert_bytes'] = convert_bytes
def xbmc_image(url, label='default'):
"""Build xbmc image url"""
if url.startswith('special://'): #eden
return '%s/xhr/xbmc_image/%s/eden/?path=%s' % (maraschino.WEBROOT, label, url[len('special://'):])
elif url.startswith('image://'): #frodo
url = url[len('image://'):]
url = urllib.quote(url.encode('utf-8'), '')
return '%s/xhr/xbmc_image/%s/frodo/?path=%s' % (maraschino.WEBROOT, label, url)
else:
return url
FILTERS['xbmc_image'] = xbmc_image
def epochTime(seconds):
"""Convert the time expressed by 'seconds' since the epoch to string"""
import time
return time.ctime(seconds)
FILTERS['time'] = epochTime
@app.route('/xhr/xbmc_image/<label>/<version>/')
def xbmc_proxy(version, label):
"""Proxy XBMC image to make it accessible from external networks."""
from maraschino.noneditable import server_address
url = request.args['path']
if label != 'default':
server = XbmcServer.query.filter(XbmcServer.label == label).first()
xbmc_url = 'http://'
if server.username and server.password:
xbmc_url += '%s:%s@' % (server.username, server.password)
xbmc_url += '%s:%s' % (server.hostname, server.port)
else:
xbmc_url = server_address()
if version == 'eden':
url = '%s/vfs/special://%s' % (xbmc_url, url)
elif version == 'frodo':
url = '%s/image/image://%s' % (xbmc_url, urllib.quote(url.encode('utf-8'), ''))
img = StringIO.StringIO(urllib.urlopen(url).read())
return send_file(img, mimetype='image/jpeg')
def youtube_to_xbmc(url):
x = url.find('?v=') + 3
id = url[x:]
return 'plugin://plugin.video.youtube/?action=play_video&videoid=' + id
def download_image(image, file_path):
"""Download image file"""
try:
logger.log('Creating file %s' % file_path, 'INFO')
downloaded_image = file(file_path, 'wb')
except:
logger.log('Failed to create file %s' % file_path, 'ERROR')
maraschino.THREADS.pop()
try:
logger.log('Downloading %s' % image, 'INFO')
image_on_web = urllib.urlopen(image)
while True:
buf = image_on_web.read(65536)
if len(buf) == 0:
break
downloaded_image.write(buf)
downloaded_image.close()
image_on_web.close()
except:
logger.log('Failed to download %s' % image, 'ERROR')
maraschino.THREADS.pop()
return
@app.route('/cache/image_file/<type>/<path:file_path>/')
@app.route('/cache/image_url/<path:file_path>/')
@requires_auth
def file_img_cache(file_path, type=None):
if not type:
file_path = 'http://' + file_path
file_path = StringIO.StringIO(urllib.urlopen(file_path).read())
elif type == 'unix':
file_path = '/' + file_path
return send_file(file_path, mimetype='image/jpeg')
def create_dir(dir):
if not os.path.exists(dir):
try:
logger.log('Creating dir %s' % dir, 'INFO')
os.makedirs(dir)
except Exception as e:
logger.log('Problem creating dir %s' % dir, 'ERROR')
logger.log(e, 'DE
|
hfaran/progressive
|
progressive/cursor.py
|
Python
|
mit
| 1,718
| 0
|
import os
from blessings import Terminal
class Cursor(object):
"""Common methods for cursor manipulation
:type term: NoneType|blessings.Terminal
:param term: Terminal instance; if not given, will be created by the class
"""
def __init__(self, term=None):
self.term = Terminal() if term is None else term
self._stream = self.term.stream
self._
|
saved = False
def write(self, s):
"""Writes ``s`` to the terminal output stream
Writes can be disabled by setting the environment variable
`PROGRESSIVE_NOWRITE` to `'True'`
"""
should_write_s = os.getenv('PROGRESSIVE_NOWRITE') != "True"
if should_write_s:
self._stream.write(s)
def save(self):
"""Saves current cursor position, so that it can be restored later"""
self.write(self.term.save)
self._saved =
|
True
def restore(self):
"""Restores cursor to the previously saved location
Cursor position will only be restored IF it was previously saved
by this instance (and not by any external force)
"""
if self._saved:
self.write(self.term.restore)
def flush(self):
"""Flush buffer of terminal output stream"""
self._stream.flush()
def newline(self):
"""Effects a newline by moving the cursor down and clearing"""
self.write(self.term.move_down)
self.write(self.term.clear_bol)
def clear_lines(self, num_lines=0):
for i in range(num_lines):
self.write(self.term.clear_eol)
self.write(self.term.move_down)
for i in range(num_lines):
self.write(self.term.move_up)
|
venetay/Photo-Competition
|
attachments/forms.py
|
Python
|
mit
| 212
| 0.009434
|
from mpc.settings import CATEGORY
from django import forms
class UploadPhotoForm(forms.Form):
photo_file = forms.FileField(label='Select
|
a file')
photo_category = forms.C
|
hoiceField(choices=CATEGORY)
|
cschnei3/forseti-security
|
tests/scanner/audit/data/__init__.py
|
Python
|
apache-2.0
| 610
| 0
|
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# ht
|
tp://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either expr
|
ess or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data for scanner unit tests."""
|
asif-mahmud/Pyramid-Apps
|
pethouse/alembic/versions/0c431867c679_pets_now_have_a_description.py
|
Python
|
gpl-2.0
| 659
| 0.007587
|
"""Pets now have a description
Revision ID: 0c431867c679
Revises: 5b1bdc1f3125
Create Date: 2016-11-07 18:36:25.912155
"""
from alembic import op
import sqlalchemy as sa
# revision identifi
|
ers, used by Alembic.
revision = '0c431867c679'
down_revision = '5b1bdc1f3125'
branch_labels = None
depends_on = None
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('pet', sa.Column('description', sa.Text(), nullable=False))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - plea
|
se adjust! ###
op.drop_column('pet', 'description')
### end Alembic commands ###
|
michaelrosejr/pyaos6
|
netmiko/f5/__init__.py
|
Python
|
mit
| 107
| 0
|
from __fu
|
ture__ import unicode_literals
f
|
rom netmiko.f5.f5_ltm_ssh import F5LtmSSH
__all__ = ['F5LtmSSH']
|
rohitranjan1991/home-assistant
|
homeassistant/components/hassio/__init__.py
|
Python
|
mit
| 23,541
| 0.000935
|
"""Support for Hass.io."""
from __future__ import annotations
import asyncio
from datetime import timedelta
import logging
import os
from typing import Any, NamedTuple
import voluptuous as vol
from homeassistant.auth.const import GROUP_ID_ADMIN
from homeassistant.components import panel_custom, persistent_notification
from homeassistant.components.homeassistant import (
SERVICE_CHECK_CONFIG,
SHUTDOWN_SERVICES,
)
import homeassistant.config as conf_util
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
ATTR_MANUFACTURER,
ATTR_NAME,
EVENT_CORE_CONFIG_UPDATE,
HASSIO_USER_NAME,
SERVICE_HOMEASSISTANT_RESTART,
SERVICE_HOMEASSISTANT_STOP,
Platform,
)
from homeassistant.core import (
DOMAIN as HASS_DOMAIN,
HomeAssistant,
ServiceCall,
callback,
)
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import config_validation as cv, recorder
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.device_registry import (
DeviceEntryType,
DeviceRegistry,
async_get_registry,
)
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.helpers.typing import ConfigType
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator
from homeassistant.loader import bind_hass
from homeassistant.util.dt import utcnow
from .addon_panel import async_setup_addon_panel
from .auth import async_setup_auth_view
from .const import (
ATTR_ADDON,
ATTR_ADDONS,
ATTR_DISCOVERY,
ATTR_FOLDERS,
ATTR_HOMEASSISTANT,
ATTR_INPUT,
ATTR_PASSWORD,
ATTR_REPOSITORY,
ATTR_SLUG,
ATTR_STARTED,
ATTR_STATE,
ATTR_URL,
ATTR_VERSION,
DATA_KEY_ADDONS,
DOMAIN,
SupervisorEntityModel,
)
from .discovery import HassioServiceInfo, async_setup_discovery_view # noqa: F401
from .handler import HassIO, HassioAPIError, api_data
from .http import HassIOView
from .ingress import async_setup_ingress_view
from .websocket_api import async_load_websocket_api
_LOGGER = logging.getLogger(__name__)
STORAGE_KEY = DOMAIN
STORAGE_VERSION = 1
PLATFORMS = [Platform.BINARY_SENSOR, Platform.SENSOR]
CONF_FRONTEND_REPO = "development_repo"
CONFIG_SCHEMA = vol.Schema(
{vol.Optional(DOMAIN): vol.Schema({vol.Optional(CONF_FRONTEND_REPO): cv.isdir})},
extra=vol.ALLOW_EXTRA,
)
DATA_CORE_INFO = "hassio_core_info"
DATA_HOST_INFO = "hassio_host_info"
DATA_STORE = "hassio_store"
DATA_INFO = "hassio_info"
DATA_OS_INFO =
|
"hassio_os_info"
DATA_SUPERVISOR_INFO
|
= "hassio_supervisor_info"
DATA_ADDONS_STATS = "hassio_addons_stats"
HASSIO_UPDATE_INTERVAL = timedelta(minutes=5)
ADDONS_COORDINATOR = "hassio_addons_coordinator"
SERVICE_ADDON_START = "addon_start"
SERVICE_ADDON_STOP = "addon_stop"
SERVICE_ADDON_RESTART = "addon_restart"
SERVICE_ADDON_UPDATE = "addon_update"
SERVICE_ADDON_STDIN = "addon_stdin"
SERVICE_HOST_SHUTDOWN = "host_shutdown"
SERVICE_HOST_REBOOT = "host_reboot"
SERVICE_BACKUP_FULL = "backup_full"
SERVICE_BACKUP_PARTIAL = "backup_partial"
SERVICE_RESTORE_FULL = "restore_full"
SERVICE_RESTORE_PARTIAL = "restore_partial"
SCHEMA_NO_DATA = vol.Schema({})
SCHEMA_ADDON = vol.Schema({vol.Required(ATTR_ADDON): cv.string})
SCHEMA_ADDON_STDIN = SCHEMA_ADDON.extend(
{vol.Required(ATTR_INPUT): vol.Any(dict, cv.string)}
)
SCHEMA_BACKUP_FULL = vol.Schema(
{vol.Optional(ATTR_NAME): cv.string, vol.Optional(ATTR_PASSWORD): cv.string}
)
SCHEMA_BACKUP_PARTIAL = SCHEMA_BACKUP_FULL.extend(
{
vol.Optional(ATTR_HOMEASSISTANT): cv.boolean,
vol.Optional(ATTR_FOLDERS): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(ATTR_ADDONS): vol.All(cv.ensure_list, [cv.string]),
}
)
SCHEMA_RESTORE_FULL = vol.Schema(
{
vol.Required(ATTR_SLUG): cv.slug,
vol.Optional(ATTR_PASSWORD): cv.string,
}
)
SCHEMA_RESTORE_PARTIAL = SCHEMA_RESTORE_FULL.extend(
{
vol.Optional(ATTR_HOMEASSISTANT): cv.boolean,
vol.Optional(ATTR_FOLDERS): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(ATTR_ADDONS): vol.All(cv.ensure_list, [cv.string]),
}
)
class APIEndpointSettings(NamedTuple):
"""Settings for API endpoint."""
command: str
schema: vol.Schema
timeout: int | None = 60
pass_data: bool = False
MAP_SERVICE_API = {
SERVICE_ADDON_START: APIEndpointSettings("/addons/{addon}/start", SCHEMA_ADDON),
SERVICE_ADDON_STOP: APIEndpointSettings("/addons/{addon}/stop", SCHEMA_ADDON),
SERVICE_ADDON_RESTART: APIEndpointSettings("/addons/{addon}/restart", SCHEMA_ADDON),
SERVICE_ADDON_UPDATE: APIEndpointSettings("/addons/{addon}/update", SCHEMA_ADDON),
SERVICE_ADDON_STDIN: APIEndpointSettings(
"/addons/{addon}/stdin", SCHEMA_ADDON_STDIN
),
SERVICE_HOST_SHUTDOWN: APIEndpointSettings("/host/shutdown", SCHEMA_NO_DATA),
SERVICE_HOST_REBOOT: APIEndpointSettings("/host/reboot", SCHEMA_NO_DATA),
SERVICE_BACKUP_FULL: APIEndpointSettings(
"/backups/new/full",
SCHEMA_BACKUP_FULL,
None,
True,
),
SERVICE_BACKUP_PARTIAL: APIEndpointSettings(
"/backups/new/partial",
SCHEMA_BACKUP_PARTIAL,
None,
True,
),
SERVICE_RESTORE_FULL: APIEndpointSettings(
"/backups/{slug}/restore/full",
SCHEMA_RESTORE_FULL,
None,
True,
),
SERVICE_RESTORE_PARTIAL: APIEndpointSettings(
"/backups/{slug}/restore/partial",
SCHEMA_RESTORE_PARTIAL,
None,
True,
),
}
@bind_hass
async def async_get_addon_info(hass: HomeAssistant, slug: str) -> dict:
"""Return add-on info.
The caller of the function should handle HassioAPIError.
"""
hassio = hass.data[DOMAIN]
return await hassio.get_addon_info(slug)
@bind_hass
async def async_update_diagnostics(hass: HomeAssistant, diagnostics: bool) -> dict:
"""Update Supervisor diagnostics toggle.
The caller of the function should handle HassioAPIError.
"""
hassio = hass.data[DOMAIN]
return await hassio.update_diagnostics(diagnostics)
@bind_hass
@api_data
async def async_install_addon(hass: HomeAssistant, slug: str) -> dict:
"""Install add-on.
The caller of the function should handle HassioAPIError.
"""
hassio = hass.data[DOMAIN]
command = f"/addons/{slug}/install"
return await hassio.send_command(command, timeout=None)
@bind_hass
@api_data
async def async_uninstall_addon(hass: HomeAssistant, slug: str) -> dict:
"""Uninstall add-on.
The caller of the function should handle HassioAPIError.
"""
hassio = hass.data[DOMAIN]
command = f"/addons/{slug}/uninstall"
return await hassio.send_command(command, timeout=60)
@bind_hass
@api_data
async def async_update_addon(hass: HomeAssistant, slug: str) -> dict:
"""Update add-on.
The caller of the function should handle HassioAPIError.
"""
hassio = hass.data[DOMAIN]
command = f"/addons/{slug}/update"
return await hassio.send_command(command, timeout=None)
@bind_hass
@api_data
async def async_start_addon(hass: HomeAssistant, slug: str) -> dict:
"""Start add-on.
The caller of the function should handle HassioAPIError.
"""
hassio = hass.data[DOMAIN]
command = f"/addons/{slug}/start"
return await hassio.send_command(command, timeout=60)
@bind_hass
@api_data
async def async_restart_addon(hass: HomeAssistant, slug: str) -> dict:
"""Restart add-on.
The caller of the function should handle HassioAPIError.
"""
hassio = hass.data[DOMAIN]
command = f"/addons/{slug}/restart"
return await hassio.send_command(command, timeout=None)
@bind_hass
@api_data
async def async_stop_addon(hass: HomeAssistant, slug: str) -> dict:
"""Stop add-on.
The caller of the function should handle HassioAPIError.
"""
hassio = hass.data[DOMAIN]
command = f"/addons/{slug}/stop"
return await hassio.send_command(command, timeout=60)
@bind_hass
@api_data
async def async_set_addon_options(
hass: HomeAssistant, slug: str, options: dict
) -> dict:
"""Set add-on options.
The cal
|
JohanComparat/pySU
|
spm/bin_SMF/smf_plot.py
|
Python
|
cc0-1.0
| 15,194
| 0.025536
|
import astropy.cosmology as co
aa=co.Planck15
import astropy.io.fits as fits
import matplotlib
import matplotlib
matplotlib.rcParams['agg.path.chunksize'] = 2000000
matplotlib.rcParams.update({'font.size': 12})
matplotlib.use('Agg')
import matplotlib.pyplot as p
import numpy as n
import os
import sys
# global cosmo quantities
z_min = float(sys.argv[1])
z_max = float(sys.argv[2])
#imf = 'kroupa'
lO2_min = float(sys.argv[3]) # 'salpeter'
SNlimit = 5
out_dir = os.path.join(os.environ['OBS_REPO'], 'spm', 'results')
#previous catalogs
ll_dir = os.path.join(os.environ['OBS_REPO'], 'spm', 'literature')
cosmos_dir = os.path.join(os.environ['OBS_REPO'], 'COSMOS', 'catalogs' )
path_2_cosmos_cat = os.path.join( cosmos_dir, "photoz-2.0", "photoz_vers2.0_010312.fits")
#path_2_cosmos_cat = os.path.join( cosmos_dir, "COSMOS2015_Laigle+_v1.1.fits.gz")
# FIREFLY CATALOGS
# SDSS data and catalogs
sdss_dir = os.path.join(os.environ['OBS_REPO'], 'SDSS', 'dr14')
path_2_spall_sdss_dr14_cat = os.path.join( sdss_dir, "specObj-SDSS-dr14.fits" )
path_2_spall_boss_dr14_cat = os.path.join( sdss_dir, "specObj-BOSS-dr14.fits" )
path_2_sdss_cat = os.path.join( sdss_dir, "FireflyGalaxySdss26.fits" )
path_2_eboss_cat = os.path.join( sdss_dir, "FireflyGalaxyEbossDR14.fits" )
# DEEP SURVEYS
deep2_dir = os.path.join(os.environ['OBS_REPO'], 'DEEP2')
path_2_deep2_cat = os.path.join( deep2_dir, "zcat.deep2.dr4.v4.LFcatalogTC.Planck13.spm.v2.fits" )
vipers_dir = os.path.join(os.environ['OBS_REPO'], 'VIPERS')
path_2_vipers_cat = os.path.join( vipers_dir, "VIPERS_W14_summary_v2.1.linesFitted.spm.fits" )
vvds_dir = os.path.join(os.environ['OBS_REPO'], 'VVDS')
path_2_vvdsW_cat = os.path.join( vvds_dir, "catalogs", "VVDS_WIDE_summary.v1.spm.fits" )
path_2_vvdsD_cat = os.path.join( vvds_dir, "catalogs", "VVDS_DEEP_summary.v1.spm.fits" )
# path_2_F16_cat = os.path.join( sdss_dir, "RA_DEC_z_w_fluxOII_Mstar_grcol_Mr_lumOII.dat" )
# OPENS THE CATALOGS
deep2 = fits.open(path_2_deep2_cat)[1].data
#vvdsD = fits.open(path_2_vvdsD_cat)[1].data
#vvdsW = fits.open(path_2_vvdsW_cat)[1].data
#vipers = fits.open(path_2_vipers_cat)[1].data
#sdss = fits.open(path_2_sdss_cat)[1].data
#boss = fits.open(path_2_eboss_cat)[1].data
cosmos = fits.open(path_2_cosmos_cat)[1].data
lineSelection = lambda catalog, lineName : (catalog[lineName+'_flux']>0.)& (catalog[lineName+'_fluxErr'] >0.) & (catalog[lineName+'_flux'] > SNlimit * catalog[lineName+'_fluxErr']) # & (catalog[lineName+'_luminosity']>0)& (catalog[lineName+'_luminosity']<1e50)
out_dir = os.path.join('/data42s/comparat/firefly/v1_1_0/figures')
smf_ilbert13 = lambda M, M_star, phi_1s, alpha_1s, phi_2s, alpha_2s : ( phi_1s * (M/M_star) ** alpha_1s + phi_2s * (M/M_star) ** alpha_2s ) * n.e ** (-M/M_star) * (M/ M_star)
path_ilbert13_SMF = os.path.join(ll_dir, "ilbert_2013_mass_function_params.txt")
zmin, zmax, N, M_comp, M_star, phi_1s, alpha_1s, phi_2s, alpha_2s, log_rho_s = n.loadtxt(os.path.join( ll_dir, "ilbert_2013_mass_function_params.txt"), unpack=True)
#smfs_ilbert13 = n.array([lambda mass : smf_ilbert13( mass , 10**M_star[ii], phi_1s[ii]*10**(-3), alpha_1s[ii], phi_2s[ii]*10**(-3), alpha_2s[ii] ) for ii in range(len(M_star)) ])
smf01 = lambda mass : smf_ilbert13( mass , 10**M_star[0], phi_1s[0]*10**(-3), alpha_1s[0], phi_2s[0]*10**(-3), alpha_2s[0] )
#print 10**M_star[0], phi_1s[0]*10**(-3), alpha_1s[0], phi_2s[0]*10**(-3), alpha_2s[0]
smf08 = lambda mass : smf_ilbert13( mass , 10**M_star[2], phi_1s[2]*10**(-3), alpha_1s[2], phi_2s[2]*10**(-3), alpha_2s[2] )
#print 10**M_star[2], phi_1s[2]*10**(-3), alpha_1s[2], phi_2s[2]*10**(-3), alpha_2s[2]
volume_per_deg2 = ( aa.comoving_volume(z_max) - aa.comoving_volume(z_min) ) * n.pi / 129600.
volume_per_deg2_val = volume_per_deg2.value
# global spm quantities
# stat functions
ld = lambda selection : len(selection.nonzero()[0])
# stats about DEEP2 run
area1=0.60
area2=0.62
area3=0.90
area4=0.66
if z_min>=0.7:
area_deep2 = area1+area2+area3+area4
else :
area_deep2 = 0.6
#area_vvdsD = 0.6
#area_vvdsW = 5.785
#area_vipers = 24.
#area_cosmos = 1.52
def get_basic_stat(catalog, z_name, z_flg, name, zflg_min, prefix):
catalog_zOk = (catalog[z_name] > z_min) & (catalog[z_flg]>=zflg_min)
catalog_stat = (catalog_zOk) & (catalog[z_name] > z_min) & (catalog[z_name] < z_max) & (catalog['SSR']>0) & (catalog['TSR']>0) & (catalog['SSR']<=1.0001) & (catalog['TSR']<=1.0001)
catalog_sel = (catalog_stat) & (catalog[prefix+'stellar_mass'] < 10**14. ) & (catalog[prefix+'stellar_mass'] >= 10**5. ) & (catalog[prefix+'stellar_mass'] <= catalog[prefix+'stellar_mass_up'] ) & (catalog[prefix+'stellar_mass'] >= catalog[prefix+'stellar_mass_low'] ) & (-n.log10(catalog[prefix+'stellar_mass_low']) + n.log10(catalog[prefix+'stellar_mass_up']) < 0.6 )
l_o2 = lineSelection(catalog, "O2_3728") & catalog_stat
l_o3 = lineSelection(catalog, "O3_5007") & catalog_stat
l_hb = lineSelection(catalog, "H1_4862") & catalog_stat
m_catalog = n.log10(catalog[prefix+'stellar_mass'])
w_catalog = 1. / (catalog['TSR'] * catalog['SSR'])
#print name, '& $',len(catalog), "$ & $", ld(catalog_zOk),"$ & $", ld(catalog_stat), "\\;(", ld(catalog_sel),")$ & $", ld(l_o2), "\\;(", ld(catalog_sel & l_o2),")$ & $", ld(l_o3), "\\;(", ld(catalog_sel & l_o3),")$ & $", ld(l_hb), "\\;(", ld(catalog_sel & l_hb),")$ \\\\"
return catalog_sel, m_catalog, w_catalog, l_o2, l_o3, l_hb
def get_hist(masses, weights, mbins):
NN = n.histogram(masses, mbins)[0]
NW = n.histogram(masses, mbins, weights = wei
|
ghts)[0]
xx = (mbins[1:] + mbins[:-1])/2.
return xx, NW, NN**(-0.5)*NW
def plotMF_raw(prefix="Chabrier_ELODIE_"):
deep2_sel, deep2_m, deep2_w, deep2_o2, deep2_o3, deep2_hb = get_basic_stat(deep2, 'ZBEST', 'ZQUALITY', 'DEEP2', 3., prefix)
#vvdsD_sel, vvdsD_m, vvdsD_w, vvdsD_o2, vvdsD_o3, vvdsD_hb = get_basic_stat(vvdsD, 'Z', 'ZFLAGS', 'VVDS Deep'
|
, 2., prefix)
#vvdsW_sel, vvdsW_m, vvdsW_w, vvdsW_o2, vvdsW_o3, vvdsW_hb = get_basic_stat(vvdsW, 'Z', 'ZFLAGS', 'VVDS Wide', 2., prefix)
#vipers_sel, vipers_m, vipers_w, vipers_o2, vipers_o3, vipers_hb = get_basic_stat(vipers, 'zspec', 'zflg', 'VIPERS', 1., prefix)
lbins = n.arange(40.5,44,0.25)
x_lum = (lbins[1:] + lbins[:-1])/2.
p.figure(1, (4.5,4.5))
p.axes([0.19,0.17,0.74,0.72])
N_O2_all = n.histogram(deep2['O2_3728_luminosity'][deep2_o2], bins = 10**lbins)[0]
N_O2_mass = n.histogram(deep2['O2_3728_luminosity'][deep2_sel & deep2_o2], bins = 10**lbins)[0]
N_O2_all_normed = n.histogram(n.log10(deep2['O2_3728_luminosity'][deep2_o2]), bins = lbins, normed = True)[0]
#print N_O2_all_normed
ok_o2 = (N_O2_all>0)
p.plot(x_lum, N_O2_all_normed/2., label = 'normed hist')
p.plot(x_lum[ok_o2], 1. * N_O2_mass[ok_o2] / N_O2_all[ok_o2], label = 'DEEP2')
p.axvline(lO2_min)
p.title(str(z_min)+'<z<'+str(z_max))
p.xlabel('[OII] luminosity')
p.ylabel('[OII] with mass measurement / all [OII] detections')
#p.yscale('log')
p.legend(loc=0, frameon = False)
p.ylim((-0.01, 1.01))
p.xlim((40.5, 43.5))
p.grid()
p.savefig(os.path.join(out_dir, "SMF_"+prefix+"line_detection_raw_"+"_"+str(z_min)+'_z_'+str(z_max)+".jpg" ))
p.clf()
dlog10m = 0.25
mbins = n.arange(8,12.5,dlog10m)
p.figure(1, (4.5,4.5))
p.axes([0.19,0.17,0.74,0.72])
p.plot(mbins, smf01(10**mbins), label='Ilbert 13, 0.2<z<0.5', ls='dashed')
p.plot(mbins, smf08(10**mbins), label='Ilbert 13, 0.8<z<1.1', ls='dashed')
x, y, ye = get_hist(deep2_m[deep2_sel], weights = deep2_w[deep2_sel]/(dlog10m*n.log(10)*area_deep2*volume_per_deg2_val), mbins = mbins)
p.errorbar(x, y, yerr = ye, label='DEEP2', lw=1)
x, y, ye = get_hist(deep2_m[deep2_sel & deep2_o2 & (deep2['O2_3728_luminosity']>10**lO2_min)], weights = deep2_w[deep2_sel & deep2_o2 & (deep2['O2_3728_luminosity']>10**lO2_min)]/(dlog10m*n.log(10)*area_deep2*volume_per_deg2_val), mbins = mbins)
p.errorbar(x, y, yerr = ye, label='DEEP2 L([OII])>'+str(lO2_min), lw=1)
#x, y, ye = get_hist(vvdsD_m, weights = vvdsD_w/(dlog10m*n.log(10)*area_vvdsD*volume_per_deg2_v
|
rohandavidg/CONCORD-VCF
|
bin/do_logging.py
|
Python
|
mit
| 668
| 0.001497
|
#!/dlmp/sandbox/cgslIS/rohan/Python-2.7.11/python
"""
setting up logging
"""
import logging
import time
import datetime
def main(filename):
logger = configure_logger(filename)
def configure_logger(filename):
|
"""
s
|
etting up logging
"""
logger = logging.getLogger(filename)
logger.setLevel(logging.DEBUG)
handler = logging.FileHandler(time.strftime(filename+"-%Y%m%d.log"))
handler.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(asctime)s'\t'%(name)s'\t'%(levelname)s'\t'%(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
return logger
if __name__ == "__main__":
main(filename)
|
ffmmjj/desafio-dados-2016
|
data_preparation_pipeline/run_all_data_tasks.py
|
Python
|
apache-2.0
| 356
| 0.005618
|
import luigi
|
from preprocess_data import ScaleDirectorFeatureValues, ScaleTeacherFeatureValues
from split_data import SplitAvgSchoolData, SplitOutstandingSchoolData
class AllDataTasks(luigi.WrapperTask):
def requires(self):
return SplitAvgSchoolData(), SplitOutstan
|
dingSchoolData(), ScaleTeacherFeatureValues(), ScaleDirectorFeatureValues()
|
nicostephan/pypuf
|
pypuf/experiments/experimenter.py
|
Python
|
gpl-3.0
| 1,942
| 0.00206
|
import multiprocessing
import logging
class Experimenter(object):
"""
Coordinated, parallel execution of Experiments with logging.
"""
def __init__(self, log_name, experiments, cpu_limit=2**16):
"""
:param experiments: A list of pypuf.experiments.experiment.base.Experiment
:param log_name: A unique file path where to output should be logged.
:param cpu_limit: Maximum number of parallel processes that run experiments.
"""
# Store experiments list
self.experiments = experiments
# Setup logging to both file and console
file_handler = logging.FileHandler(filename='%s.log' % log_name, mode='w')
file_handler.setLevel(logging.INFO)
stream_handler = logging.StreamHandler()
stream_handler.setLevel(logging.INFO)
self.logger = logging.getLogger(log_name)
self.logger.setLevel(logging.INFO)
self.logger.addHandler(file_handler)
self.logger.addHand
|
ler(stream_handler)
# Setup parallel execution l
|
imit
self.cpu_limit = min(cpu_limit, multiprocessing.cpu_count())
self.semaphore = multiprocessing.BoundedSemaphore(self.cpu_limit)
def run(self):
"""
Runs all experiments.
"""
jobs = []
for exp in self.experiments:
# define experiment process
def run_experiment(semaphore):
exp.execute() # run the actual experiment
semaphore.release() # release CPU
job = multiprocessing.Process(
target=run_experiment,
args=(self.semaphore,)
)
# run experiment
self.semaphore.acquire() # wait for a free CPU
job.start()
# keep a list of all jobs
jobs.append(job)
# wait for all processes to be finished
for job in jobs:
job.join()
|
while519/SME
|
WN/WN_TransE.py
|
Python
|
bsd-3-clause
| 604
| 0.006623
|
#! /usr/bin/python
from WN_exp import *
from WN_evaluation import *
if theano.config.fl
|
oatX == 'float32':
sys.stderr.write("""WARNING: Detected floatX=float32 in the configuration.
This might result in NaN in embeddings after several epochs.
""")
launch(op='TransE', dataset='WN', simfn='L1'
|
, ndim=20, nhid=20, marge=2., lremb=0.01, lrparam=1.,
nbatches=100, totepochs=1000, test_all=10, neval=1000, savepath='WN_TransE',
datapath='../data/', Nent=40961, Nsyn=40943, Nrel=18)
print "\n##### EVALUATION #####\n"
RankingEval(datapath='../data/', loadmodel='WN_TransE/best_valid_model.pkl')
|
car3oon/saleor
|
saleor/userprofile/models.py
|
Python
|
bsd-3-clause
| 5,495
| 0.000182
|
from __future__ import unicode_literals
from django.contrib.auth.models import (
AbstractBaseUser, BaseUserManager, PermissionsMixin)
from django.db import models
from django.forms.models import model_to_dict
from django.utils import timezone
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import pgettext_lazy
from django_countries.fields import Country, CountryField
from ..search import index
class AddressManager(models.Manager):
def as_data(self, address):
data = model_to_dict(address, exclude=['id', 'user'])
if isinstance(data['country'], Country):
data['country'] = data['country'].code
return data
def are_identical(self, addr1, addr2):
data1 = self.as_data(addr1)
data2 = self.as_data(addr2)
return data1 == data2
def store_address(self, user, address):
data = self.as_data(address)
address, dummy_created = user.addresses.get_or_create(**data)
return address
@python_2_unicode_compatible
class Address(models.Model):
first_name = models.CharField(
pgettext_lazy('Address field', 'given name'),
max_length=256, blank=True)
last_name = models.CharField(
pgettext_lazy('Address field', 'family name'),
max_length=256, blank=True)
company_name = models.CharField(
pgettext_lazy('Address field', 'company or organization'),
max_length=256, blank=True)
street_address_1 = models.CharField(
pgettext_lazy('Address field', 'address'),
max_length=256, blank=True)
street_address_2 = models.CharField(
pgettext_lazy('Address field', 'address'),
max_length=256, blank=True)
city = models.CharField(
pgettext_lazy('Address field', 'city'),
max_length=256, blank=True)
city_area = models.CharField(
pgettext_lazy('Address field', 'district'),
max_length=128, blank=True)
postal_code = models.CharField(
pgettext_lazy('Address field', 'postal code'),
max_length=20, blank=True)
country = CountryField(
pgettext_lazy('Address field', 'country'))
country_area = models.CharField(
pgettext_lazy('Address field', 'state or province'),
max_length=128, blank=True)
phone = models.CharField(
pgettext_lazy('Address field', 'phone number'),
max_length=30, blank=True)
objects = AddressManager()
@property
def full_name(self):
return '%s %s' % (self.first_name, self.last_name)
class Meta:
verbose_name = pgettext_lazy('Address model', 'address')
verbose_name_plural = pgettext_lazy('Address model', 'addresses')
def __str__(self):
if self.company_name:
|
return '%s - %s' %
|
(self.company_name, self.full_name)
return self.full_name
def __repr__(self):
return (
'Address(first_name=%r, last_name=%r, company_name=%r, '
'street_address_1=%r, street_address_2=%r, city=%r, '
'postal_code=%r, country=%r, country_area=%r, phone=%r)' % (
self.first_name, self.last_name, self.company_name,
self.street_address_1, self.street_address_2, self.city,
self.postal_code, self.country, self.country_area,
self.phone))
class UserManager(BaseUserManager):
def create_user(self, email, password=None, is_staff=False,
is_active=True, **extra_fields):
'Creates a User with the given username, email and password'
email = UserManager.normalize_email(email)
user = self.model(email=email, is_active=is_active,
is_staff=is_staff, **extra_fields)
if password:
user.set_password(password)
user.save()
return user
def create_superuser(self, email, password=None, **extra_fields):
return self.create_user(email, password, is_staff=True,
is_superuser=True, **extra_fields)
class User(PermissionsMixin, AbstractBaseUser, index.Indexed):
email = models.EmailField(pgettext_lazy('User field', 'email'), unique=True)
addresses = models.ManyToManyField(
Address, blank=True,
verbose_name=pgettext_lazy('User field', 'addresses'))
is_staff = models.BooleanField(
pgettext_lazy('User field', 'staff status'),
default=False)
is_active = models.BooleanField(
pgettext_lazy('User field', 'active'),
default=True)
date_joined = models.DateTimeField(
pgettext_lazy('User field', 'date joined'),
default=timezone.now, editable=False)
default_shipping_address = models.ForeignKey(
Address, related_name='+', null=True, blank=True,
on_delete=models.SET_NULL,
verbose_name=pgettext_lazy('User field', 'default shipping address'))
default_billing_address = models.ForeignKey(
Address, related_name='+', null=True, blank=True,
on_delete=models.SET_NULL,
verbose_name=pgettext_lazy('User field', 'default billing address'))
USERNAME_FIELD = 'email'
objects = UserManager()
search_fields = [
index.SearchField('email')]
class Meta:
verbose_name = pgettext_lazy('User model', 'user')
verbose_name_plural = pgettext_lazy('User model', 'users')
def get_full_name(self):
return self.email
def get_short_name(self):
return self.email
|
alexmilowski/python-hadoop-rest-api
|
pyox/apps/tracker/api.py
|
Python
|
apache-2.0
| 22,821
| 0.034836
|
from flask import Blueprint, g, current_app, request, Response, jsonify, copy_current_request_context
import json
import functools
import sys
import traceback
import logging
from redis import Redis
from time import sleep
from uuid import uuid4
from io import StringIO
from datetime import datetime
from pyox.apps.tracker.tasks import task_authenticate, task_create
from pyox import ServiceError, ClusterInformation, Oozie, Workflow
job_update_expiry = 60
# expire in 24 hours
REDIS_EXPIRES = 24*60*60
def get_redis():
r = getattr(g, '_redis', None)
if r is None:
port = 6379
password = None
host = current_app.config.get('REDIS_HOST')
if host is None:
host = 'localhost'
else:
parts = host.split(':')
host = parts[0]
if len(parts)>1:
port = int(parts[1])
if len(parts)>2:
password = parts[2]
r = g._redis = Redis(host=host,port=port,password=password,decode_responses=True)
return r
def json_seq(f):
@functools.wraps(f)
def wrapped(*args,**kwargs):
data = f(*args,**kwargs)
if type(data)=='function':
def iter():
for item in data():
yield '\x1e'+json.dumps(item)
return Response(status=200,response=iter,mimetype='application/json-seq; charset=utf-8')
else:
return Response(status=200,response=json.dumps(data),mimetype='application/json; charset=utf-8')
return wrapped
def get_cluster_client():
conf = current_app.config.get('KNOX')
if conf is None:
raise ValueError('Missing gateway configuration')
client = ClusterInformation(
base=conf.get('base'),
secure=conf.get('secure',False),
host=conf.get('host','localhost'),
port=conf.get('port',50070),
gateway=conf.get('gateway'),
username=request.authorization.username if request.authorization is not None else None,
password=request.authorization.password if request.authorization is not None else None)
client.proxies = conf.get('proxies')
client.verify = conf.get('verify',True)
return client
def get_oozie_client(app,username=None,password=None,cookies=None,bearer_token=None,bearer_token_encode=True):
conf = app.config.get('KNOX')
if conf is None:
raise ValueError('Missing gateway configuration')
client = Oozie(
base=conf.get('base'),
secure=conf.get('secure',False),
host=conf.get('host','localhost'),
port=conf.get('port',50070),
gateway=conf.get('gateway'),
namenode=conf.get('namenode'),
tracker=conf.get('tracker'),
username=username,
password=password,
cookies=cookies,
bearer_token=bearer_token,
bearer_token_encode=bearer_token_encode)
client.proxies = conf.get('proxies')
client.verify = conf.get('verify',True)
return client
def application_ids(info):
actions = info.get('actions')
if actions is not None:
return list(map(lambda x : x[4:],filter(lambda x : x[0:4]=='job_' if x is not None else False,map(lambda action : action.get('externalId'),actions)))) + \
list(map(lambda x : x[4:],filter(lambda x : x[0:4]=='job_' if x is not None else False,map(lambda action : action.get('externalChildIDs'),actions))))
else:
return []
def set_property(redis,objid,propname,value):
redis.hset(objid,propname,value)
redis.expire(objid,REDIS_EXPIRES)
def get_property(redis,objid,propname):
return redis.hget(objid,propname)
def get_object(redis,objid):
obj = {}
for propname in redis.hkeys(objid):
obj[propname] = redis.hget(objid,propname)
return obj if len(obj.keys())>0 else None
def action_copy_job_id(app_id):
return 'action-copy-job-'+app_id
def invoke_application_log_copy(oozie,redis,parent_id,action_id,username,verbose=False):
logger = logging.getLogger(__name__)
logger.info('Invoking copy from job {} for application {}'.format(parent_id,action_id))
confid = str(uuid4())
logdir = '/user/'+username+'/WORK/logs'
path = logdir + '/' + confid
workflow = Workflow.start(
'shell-'+action_id,'shell',
job_tracker='sandbox-RMS:8032',
name_node='hdfs://sandbox'
).action(
'shell',
Workflow.shell(
'copy.sh',
configuration={
'mapred.job.queue.name' : 'HQ_IST'
},
argument=[logdir,parent_id,'application_'+action_id],
file=path+'/copy.sh'
)
).kill('error','Cannot run copy workflow')
if verbose:
print(str(workflow))
script = StringIO('''#!/bin/bash
hdfs dfs -mkdir -p $1/$2
hdfs dfs -rm $1/$2/$3.log
yarn logs -applicationId $3 | hdfs dfs -put - $1/$2/$3.log
''')
jobid = oozie.submit(
path,
properties={
'oozie.use.system.libpath' : True,
'user.name' : username
},
workflow=workflow,
copy=[(script,'copy.sh')],
verbose=verbose
)
set_property(redis,parent_id,action_copy_job_id(action_id),jobid)
set_property(redis,jobid,'status','RUNNING')
set_property(redis,jobid,'path',path)
set_property(redis,jobid,'cleanup',True)
return jobid
service_api = Blueprint('service_api',__name__)
def nocache_headers():
return {
'Last-Modified' : datetime.now(),
'Cache-Control' : 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0, max-age=0',
'Pragma' : 'no-cache',
'Expires' : '-1'
|
}
def error_respon
|
se(status_code,message,**kwargs):
obj = {'message':message,'status_code':status_code}
for name in kwargs:
obj[name] = kwargs[name]
headers = nocache_headers()
if status_code==401:
headers['WWW-Authenticate'] = 'Basic realm="KNOX Credentials"'
return Response(status=status_code,response=json.dumps(obj)+'\n',mimetype='application/json; charset=utf-8',headers=headers)
def api_response(status_code,obj,**kwargs):
for name in kwargs:
obj[name] = kwargs[name]
headers = nocache_headers()
return Response(status=status_code,response=json.dumps(obj)+'\n',mimetype='application/json; charset=utf-8',headers=headers)
def request_job_ids():
content_type = request.headers['content-type']
if content_type.startswith('text/plain'):
ids = list(map(lambda x : x.strip(),request.data.decode('UTF-8').split('\n')))
elif content_type.startswith('application/json'):
data = json.loads(request.data)
ids = data if type(data)==list else data.get('id')
if type(ids)==str:
ids = [ids]
else:
ids = None
return ids
def get_job_summary(redis,job_id):
logger = logging.getLogger(__name__)
job_summary = get_object(redis,job_id)
if job_summary is None:
return None
last_checked = job_summary.get('last-checked')
if last_checked is None or (datetime.now()-datetime.strptime(last_checked,'%Y-%m-%dT%H:%M:%S.%f')).seconds>job_update_expiry:
logger.info('{} is out of date, updating from {}'.format(job_id,last_checked))
update_job_summary(redis,job_id)
raw_app_ids = job_summary.get('application-ids')
if raw_app_ids is not None:
job_summary['application-ids'] = json.loads(raw_app_ids)
removal = []
for name in job_summary:
if name[0:10]=='action-job':
removal.append(name)
for name in removal:
job_summary.pop(name)
return job_summary
TRACKING_KEY = 'dataplatform.service.tracking'
def tracking(redis,oozie_id):
redis.hset(TRACKING_KEY,oozie_id,datetime.now().isoformat())
redis.expire(TRACKING_KEY,REDIS_EXPIRES)
def stop_tracking(redis,oozie_id):
redis.hdel(TRACKING_KEY,oozie_id)
redis.expire(TRACKING_KEY,REDIS_EXPIRES)
def update_job_summary(redis,job_id):
client = get_oozie_client(current_app,username=request.authorization.username if request.authorization is not None else None,password=request.authorization.password if request.authorization is not None else None)
info = client.status(job_id)
status = info.get('status')
app_ids = application_ids(info)
set_property(redis,job_id,'status',status)
set_property(redis,job_id,'last-checked',datetime.now().isoformat())
set_property(redis,job_id,'application-ids',json.dumps(app_ids))
return
|
damsonn/django-docker-compose
|
proj/settings/__init__.py
|
Python
|
mit
| 211
| 0
|
""" Settings for proj """
from .base imp
|
ort *
try:
from .local import *
except ImportError as exc:
exc.args = tuple(
['%s (did you rename settings/local-dist.py?)' % exc.args[0]])
raise exc
| |
gaberger/pybvc
|
samples/sampleopenflow/demos/demo8.py
|
Python
|
bsd-3-clause
| 7,431
| 0.004979
|
#!/usr/bin/python
# Copyright (c) 2015, BROCADE COMMUNICATIONS SYSTEMS, INC
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
"""
@authors: Sergei Garbuzov
@status: Development
@version: 1.1.0
"""
import time
import json
from pybvc.controller.controller import Controller
from pybvc.openflowdev.ofswitch import (OFSwitch,
FlowEntry,
Instruction,
OutputAction,
Match)
from pybvc.common.status import STATUS
from pybvc.common.utils import load_dict_from_file
from pybvc.common.constants import (ETH_TYPE_IPv4,
IP_PROTO_TLSP,
IP_DSCP_CS3)
def of_demo_8():
f = "cfg.yml"
d = {}
if(load_dict_from_file(f, d) is False):
print("Config file '%s' read error: " % f)
exit()
try:
ctrlIpAddr = d['ctrlIpAddr']
ctrlPortNum = d['ctrlPortNum']
ctrlUname = d['ctrlUname']
ctrlPswd = d['ctrlPswd']
nodeName = d['nodeName']
rundelay = d['rundelay']
except:
print ("Failed to get Controller device attributes")
exit(0)
print ("<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<")
print ("<<< Demo 8 Start")
print ("<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<")
ctrl = Controller(ctrlIpAddr, ctrlPortNum, ctrlUname, ctrlPswd)
ofswitch = OFSwitch(ctrl, nodeName)
# --- Flow Match: Ethernet Source Address
# Ethernet Destination Address
# IPv4 Source Address
# IPv4 Destination Address
# IP Protocol Number
# IP DSCP
# Input Port
# NOTE: Ethernet type MUST be 2048 (0x800) -> IPv4 protocol
eth_type = ETH_TYPE_IPv4
eth_src = "00:1c:01:00:23:aa"
eth_dst = "00:02:02:60:ff:fe"
ipv4_src = "10.0.245.1/24"
ipv4_dst = "192.168.1.123/16"
ip_proto = IP_PROTO_TLSP
ip_dscp = IP_DSCP_CS3 # 'Class Selector' = 'Flash'
input_port = 13
print ("<<< 'Controller': %s, 'OpenFlow' switch: '%s'"
% (ctrlIpAddr, nodeName))
print "\n"
print ("<<< Set OpenFlow flow on the Controller")
print (" Match: Ethernet Type (%s)\n"
" Ethernet Source Address (%s)\n"
" Ethernet Destination Address (%s)\n"
" IPv4 Source Address (%s)\n"
" IPv4 Destination Address (%s)\n"
" IP Protocol Number (%s)\n"
" IP DSCP (%s)\n"
" Input Port (%s)"
% (hex(eth_type), eth_src,
eth_dst, ipv4_src, ipv4_dst,
ip_proto, ip_dscp,
input_port))
print (" Action: Output (CONTROLLER)")
time.sleep(rundelay)
flow_entry = FlowEntry()
table_id = 0
flow_entry.set_flow_table_id(table_id)
flow_id = 15
flow_entry.set_flow_id(flow_id)
flow_entry.set_flow_priority(flow_priority=1006)
flow_entry.set_flow_cookie(cookie=100)
flow_entry.set_flow_cookie_mask(cookie_mask=255)
# --- Instruction: 'Apply-actions'
# Action: 'Output' to CONTROLLER
instruction = Instruction(instruction_order=0)
action = OutputAction(order=0, port="CONTROLLER", max_len=60)
instruction.add_apply_action(action)
flow_entry.add_instruction(instruction)
# --- Match Fields: Ethernet Type
# Ethernet Source A
|
ddress
# Ethernet Destination Address
# IPv4 Source Address
# IPv4 Destination Address
# IP Protocol Number
# IP DSCP
#
|
Input Port
match = Match()
match.set_eth_type(eth_type)
match.set_eth_src(eth_src)
match.set_eth_dst(eth_dst)
match.set_ipv4_src(ipv4_src)
match.set_ipv4_dst(ipv4_dst)
match.set_ip_proto(ip_proto)
match.set_ip_dscp(ip_dscp)
match.set_in_port(input_port)
flow_entry.add_match(match)
print ("\n")
print ("<<< Flow to send:")
print flow_entry.get_payload()
time.sleep(rundelay)
result = ofswitch.add_modify_flow(flow_entry)
status = result.get_status()
if(status.eq(STATUS.OK)):
print ("<<< Flow successfully added to the Controller")
else:
print ("\n")
print ("!!!Demo terminated, reason: %s" % status.brief().lower())
exit(0)
print ("\n")
print ("<<< Get configured flow from the Controller")
time.sleep(rundelay)
result = ofswitch.get_configured_flow(table_id, flow_id)
status = result.get_status()
if(status.eq(STATUS.OK)):
print ("<<< Flow successfully read from the Controller")
print ("Flow info:")
flow = result.get_data()
print json.dumps(flow, indent=4)
else:
print ("\n")
print ("!!!Demo terminated, reason: %s" % status.brief().lower())
exit(0)
print ("\n")
print ("<<< Delete flow with id of '%s' from the Controller's cache "
"and from the table '%s' on the '%s' node"
% (flow_id, table_id, nodeName))
time.sleep(rundelay)
result = ofswitch.delete_flow(flow_entry.get_flow_table_id(),
flow_entry.get_flow_id())
status = result.get_status()
if(status.eq(STATUS.OK)):
print ("<<< Flow successfully removed from the Controller")
else:
print ("\n")
print ("!!!Demo terminated, reason: %s" % status.brief().lower())
exit(0)
print ("\n")
print (">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>")
print (">>> Demo End")
print (">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>")
if __name__ == "__main__":
of_demo_8()
|
stonelake/pyoptimization
|
pyopt/discrete/randomsearch.py
|
Python
|
apache-2.0
| 6,894
| 0.002756
|
__author__ = "Alex Baranov"
from inequalities import chernikov as c
from permutations import *
import numpy as np
def find_minimum(goal_func,
constraints_system,
combinatorial_set,
add_constraints=True,
series_count=3,
experiments_per_series=5,
quiet=True):
"""
Gets the minimum of the linear function with linear constraints
on the combinatorial set
Returns:
- (point and function value)
"""
# define function to calculate goal function value
f = lambda x: sum(i * j for i, j in zip(goal_func, x))
# copying the constraints system to modify it then
copied_system = list(constraints_system)
if add_constraints:
if not quiet:
print "Addding additional constraints to the constraints system"
copied_system = add_additional_constraints(copied_system, combinatorial_set.generation_elements)
if not quiet:
print "Constraints system is: \n", np.array(copied_system)
solver = c.InequalitiesSolver()
best_func_value = None
best_point = None
last_system_index = len(copied_system)
const_was_inserted = False
# starting series of experiments
for series_number in xrange(series_count):
if not quiet:
print "---> Starting series #", series_number
# store the valid points in the dict
experiment_valid_points = dict()
for experiment_number in xrange(experiments_per_series):
if not quiet:
print "Starting experiment #", experiment_number
# getting some solution of the system
s = solver.get_solution(copied_system)
if not quiet:
print "Generated new point within the search area: ", s
# get the nearest point of the set
nearest_set_point = combinatorial_set.find_nearest_set_point(s)
if not quiet:
print "The nearest combinatorial set point is: ", nearest_set_point
# check whether the set point is valid
if is_solution(copied_system, nearest_set_point):
func_value = f(nearest_set_point)
experiment_valid_points[func_value] = nearest_set_point
if not quiet:
print "Found point is valid. Goal function value in this point is: ", func_value
else:
if not quiet:
print "The nearest set point is not valid"
# save this point
if len(experiment_valid_points):
current_min = min(experiment_valid_points)
if best_func_value is None or current_min < best_func_value:
best_func_value = min(experiment_valid_points)
best_point = experiment_valid_points[best_func_value]
if not quiet:
print "Current best point {0} with function value = {1}".format(best_point, best_func_value)
# add the aditional constraint to shrink the search area.
if not quiet:
print "Added additional constraints: {0} <= {1}".format(goal_func, best_func_value)
if not const_was_inserted:
copied_system.append(goal_func + (-1 * best_func_value,))
else:
copied_system.insert(last_system_index, goal_func + (-1 * best_func_value,))
return best_point, best_func_value
def add_additional_constraints(system, coefs, add_less_then_zero=False, add_simplex=True):
"""
Adds additional constraints to the constraints system.
First adds the constraints of type: -x_i <= 0
If add_simplex parameter is True than add also constraints to bounds all the elements of the
combinatorial set with the simplex.
Arguments:
system -- the matrix that represents the constraint system
coefs -- the array of coefficients that will be used to add new constraints
add_less_then_zero -- specifies whether the constraints of type: -x_i <= 0 should be added (default - True)
add_simplex -- specifies whether the simplex constraints should be added (default - True)
"""
constraints_system = np.array(system)
constraint_coefs = np.array(coefs)
var_count = constraints_system.shape[1]
if add_less_then_zero:
# add conditional constraints that all variables are less or equal than zero
left_part = -1 * np.eye(var_count - 1)
right_part = np.zeros([var_count - 1, 1])
positive_variables_consts = np.hstack((left_part, right_part))
constraints_system = np.vstack((constraints_system, positive_variables_consts))
if add_simplex:
left_part = np.eye(var_count - 1)
min = constraint_coefs.min()
sum = constraint_coefs.sum()
right_part1 = min * np.ones([var_count - 1, 1])
# first add constraints of type: x_i >= min
type1 = np.hstack((-1 * left_part, right_part1))
# first add constraints of type: sum(x_i) <= sum
type2 = np.hstack((np.ones(var_count - 1), -1*sum))
constraints_system = np.vstack((constraints_system, type1))
constraints_system = np.vstack((constraints_system, type2))
return constraints_system.tolist()
def find_minimum_with_exhaustive_search(goal_func,
system,
combinatorial_set):
"""
Gets the solution by iterating all the elements in the set
Retruns pair of combinatorial element and minimal function value
"""
#calcualte goal functions for all the elements
valid_values = map(lambda e: (e, sum(i * j for i, j in zip(goal_func, e))) if is_solution(system, e) else None, combinatorial_set)
# remove all the None
valid_values = filter(lambda x: x != None, valid_values)
# get minimal value
return min(valid_values, key=lambda x: x[1])
def is_solution(system, point)
|
:
"""
Checks whether the point is the solution for a given constraints system.
"""
a = np.array(system)
|
# get the left part
left = a[:, :-1] * point
left = sum(left.T)
# get the right part
right = (-1) * a[:, -1]
return np.all(left <= right)
if __name__ == '__main__':
s = [[1, -2, 3, 0], [-4, 1, 1, 2]]
func = (-1, 1, 2)
pset = PermutationSet((1, 2, 3))
point, func_value = find_minimum_with_exhaustive_search(func, s, pset)
print "Point and min fuc value found using exhaustive search: ", (point, func_value)
point2, func_value2 = find_minimum(func, s, pset, quiet=False)
print "Point and min fuc value found using random search: ", (point2, func_value2)
|
ffledgling/Senbonzakura
|
senbonzakura/frontend/api.py
|
Python
|
mpl-2.0
| 10,807
| 0.006107
|
#from flask import Flask, request, Response
import argparse
import ConfigParser
import flask
import logging
import os
import shutil
import sys
import tempfile
import time
import senbonzakura.backend.core as core
import senbonzakura.cache.cache as cache
import senbonzakura.database.db as db
import senbonzakura.backend.tasks as tasks
import senbonzakura.utils.oddity as oddity
DB_URI = None
CACHE_URI = None
__here__ = os.path.dirname(os.path.abspath(__file__))
app = flask.Flask(__name__)
# Werkzeug logging
#log = logging.getLogger('werkzeug')
#log.setLevel(logging.INFO)
@app.route('/')
def index():
return "Welcome to Senbonzakura, the Partial MAR on demand Web-Service."\
"Please see https://wiki.mozilla.org/User:Ffledgling/Senbonzakura"
@app.route('/partial', methods=['POST'])
def trigger_partial(version='latest'):
"""
Needs params: mar_from, mar_to, mar_from_hash, mar_to_hash
"""
if version in app.config['unsupported_versions']:
return flask.Response("{'result': 'Version %s of API is no longer supported'}" % version, status=410)
# Flask's URL routing should prevent this from happening.
if version not in app.config['supported_versions']:
return flask.Response("{'result': 'Version %s of API does not exist'}" % version, status=400)
else:
# Some version specific code here?
# We have nothing at the moment so leaving it as a stub
pass
cacheo = cache.Cache(app.config['CACHE_URI'])
dbo = db.Database(app.config['DB_URI'])
logging.debug('Parameters passed in : %s' % flask.request.form)
required_params = ('mar_from', 'mar_to', 'mar_from_hash', 'mar_to_hash', 'channel_id', 'product_version')
# Check we have all params
if not all(param in flask.request.form.keys() for param in required_params):
logging.info('Parameters could not we validated')
flask.abort(400)
# TODO: Validate params and values in form Ideally
# These params are being pased to shell directly, we should probably sanitize them at some point.
mar_from = flask.request.form['mar_from']
mar_to = flask.request.form['mar_to']
mar_from_hash = flask.request.form['mar_from_hash']
mar_to_hash = flask.request.form['mar_to_hash']
channel_id = flask.request.form['channel_id']
product_version = flask.request.form['product_version']
# TODO: Verify hashes and URLs are valid before returning the URL with a 201
# or return the concat anyway and just return a 202?
# Try inserting into DB, if it fails, check error
identifier = mar_from_hash+'-'+mar_to_hash
url = flask.url_for('get_partial', identifier=identifier)
if dbo.lookup(identifier=identifier):
logging.info('Partial has already been triggered')
resp = flask.Response(
"{'result': '%s'}" % url,
status=201,
mimetype='application/json'
)
return resp
try:
# error testing and parameter validation, maybe do this up close to checking
# existence
# If record already exists it makes no difference and the insert
# 'proceeds' as expected. (It is logged at the DB level)
dbo.insert(identifier=identifier, status=db.status_code['IN_PROGRESS'], start_timestamp=time.time())
#except oddity.DBError, e:
except: # Any sort of error should result in a 500 on the client side and
# nothing more, do we retry in such a situation or do we raise
# warning bells? Ideally no error should reach this place.
# Going with warning bells.
logging.error('Error raised while processing trigger request for URL:',
'%s\n' % url)
resp = flask.Response(
"{'result': 'Error processing request'}" % url,
status=500,
mimetype='application/json'
)
return resp
else:
logging.info('calling generation functions')
# Call generation functions here
resp = flask.Response("{'result' : '%s'}" % url, status=202, mimetype='application/json')
logging.critical('Calling build, should see immediate return after this')
tasks.build_partial_mar.delay(mar_to, mar_to_hash, mar_from,
mar_from_hash, identifier, channel_id, product_version)
logging.critical('Called and moved on')
return resp
# If record exists, just say done
# If other major error, do something else
# TODO: Hook responses up with relengapi -- https://api.pub.build.mozilla.org/docs/development/api_methods/
dbo.close()
return resp
@app.route('/cache/<identifier>', methods=['GET'])
def get_from_cache(identifier):
""" URL to allow direct access to cache """
raise oddity.NotImplementedError()
@app.route('/partial/<identifier>', methods=['GET'])
def get_partial(identifier, version='latest'):
logging.debug('Request recieved with headers : %s' % flask.request.headers)
logging.debug('Got request with version %s' % version)
if version in app.config['unsupported_versions']:
return flask.Response("{'result': 'Version %s of API is no longer supported'}" % version, status=410)
# Flask's URL routing should prevent this from happening.
if version not in app.config['supported_versions']:
return flask.Response("{'result': 'Version %s of API does not exist'}" % version, status=400)
else:
# Some version specific code here?
# We have nothing at the moment so leaving it as a stub
pass
# Should these be in a try catch?
cacheo = cache.Cache(app.config['CACHE_URI'])
dbo = db.Database(app.config['DB_URI'])
logging.debug('Cache and DB setup done')
# Check DB state corresponding to URL
# if "Completed", return blob and hash
# if "Started", stall by inprogress error code
# if "Invalid", return error code
# if "does not exist", return different error code
# FIXME: This try-except doesn't work anymore since we changed the
# behaviour on lookup failure from raising a DBError to simply returning # None
try:
logging.debug('looking up record with identifier %s' % iden
|
tifier)
partial = dbo.lookup(identifier=identifier)
except oddity.DBError:
logging.warning('Record lookup for identifier %s failed' % identifier)
resp = flask.Response("{'result':'partial does not exist'}", status=404)
else:
logging.debug('Record ID: %s' % identifier)
status = partial.status
|
if status == db.status_code['COMPLETED']:
logging.info('Record found, status: COMPLETED')
# Lookup DB and return blob
# We'll want to stream the data to the client eventually, right now,
# we can just throw it at the client just like that.
# See -- http://stackoverflow.com/questions/7877282/how-to-send-image-generated-by-pil-to-browser
return cacheo.retrieve(identifier, 'partial')
elif status == db.status_code['ABORTED']:
logging.info('Record found, status: ABORTED')
# Something went wrong, what do we do?
resp = flask.Response("{'result': '%s'}" %
"Something went wrong while generating this partial",
status=204)
elif status == db.status_code['IN_PROGRESS']:
logging.info('Record found, status: IN PROGRESS')
# Stall still status changes
resp = flask.Response("{'result': '%s'}" % "wait", status=202)
elif status == db.status_code['INVALID']:
logging.info('Record found, status: INVALID')
# Not sure what this status code is even for atm.
resp = flask.Response("{'result': '%s'}" % "invalid partial", status=204)
else:
# This should not happen
logging.error('Record found, status: UNKNOWN')
resp = flask.Response("{'result':'%s'}" %
"Status of this partial is unknown",
status=400)
dbo.close()
return resp
|
edisonlz/fruit
|
web_project/base/site-packages/south/db/oracle.py
|
Python
|
apache-2.0
| 12,431
| 0.005712
|
import os.path
import sys
import re
import warnings
import cx_Oracle
from django.db import connection, models
from django.db.backends.util import truncate_name
from django.core.management.color import no_style
from django.db.models.fields import NOT_PROVIDED
from django.db.utils import DatabaseError
# In revision r16016 function get_sequence_name has been transformed into
# method of DatabaseOperations class. To make code backward-compatible we
# need to handle both situations.
try:
from django.db.backends.oracle.base import get_sequence_name\
as original_get_sequence_name
except ImportError:
original_get_sequence_name = None
from south.db import generic
warnings.warn("! WARNING: South's Oracle support is still alpha. "
"Be wary of possible bugs.")
class DatabaseOperations(generic.DatabaseOperations):
"""
Oracle implementation of database operations.
"""
backend_name = 'oracle'
alter_string_set_type = 'ALTER TABLE %(table_name)s MODIFY %(column)s %(type)s %(nullity)s;'
alter_string_set_default = 'ALTER TABLE %(table_name)s MODIFY %(column
|
)s DEFAULT %(default)s;'
add_co
|
lumn_string = 'ALTER TABLE %s ADD %s;'
delete_column_string = 'ALTER TABLE %s DROP COLUMN %s;'
add_constraint_string = 'ALTER TABLE %(table_name)s ADD CONSTRAINT %(constraint)s %(clause)s'
allows_combined_alters = False
has_booleans = False
constraints_dict = {
'P': 'PRIMARY KEY',
'U': 'UNIQUE',
'C': 'CHECK',
'R': 'FOREIGN KEY'
}
def get_sequence_name(self, table_name):
if original_get_sequence_name is None:
return self._get_connection().ops._get_sequence_name(table_name)
else:
return original_get_sequence_name(table_name)
#TODO: This will cause very obscure bugs if anyone uses a column name or string value
# that looks like a column definition (with 'CHECK', 'DEFAULT' and/or 'NULL' in it)
# e.g. "CHECK MATE" varchar(10) DEFAULT 'NULL'
def adj_column_sql(self, col):
# Syntax fixes -- Oracle is picky about clause order
col = re.sub('(?P<constr>CHECK \(.*\))(?P<any>.*)(?P<default>DEFAULT \d+)',
lambda mo: '%s %s%s'%(mo.group('default'), mo.group('constr'), mo.group('any')), col) #syntax fix for boolean/integer field only
col = re.sub('(?P<not_null>(NOT )?NULL) (?P<misc>(.* )?)(?P<default>DEFAULT.+)',
lambda mo: '%s %s %s'%(mo.group('default'),mo.group('not_null'),mo.group('misc') or ''), col) #fix order of NULL/NOT NULL and DEFAULT
return col
def check_meta(self, table_name):
return table_name in [ m._meta.db_table for m in models.get_models() ] #caching provided by Django
def normalize_name(self, name):
"""
Get the properly shortened and uppercased identifier as returned by quote_name(), but without the actual quotes.
"""
nn = self.quote_name(name)
if nn[0] == '"' and nn[-1] == '"':
nn = nn[1:-1]
return nn
@generic.invalidate_table_constraints
def create_table(self, table_name, fields):
qn = self.quote_name(table_name)
columns = []
autoinc_sql = ''
for field_name, field in fields:
col = self.column_sql(table_name, field_name, field)
if not col:
continue
col = self.adj_column_sql(col)
columns.append(col)
if isinstance(field, models.AutoField):
autoinc_sql = connection.ops.autoinc_sql(table_name, field_name)
sql = 'CREATE TABLE %s (%s);' % (qn, ', '.join([col for col in columns]))
self.execute(sql)
if autoinc_sql:
self.execute(autoinc_sql[0])
self.execute(autoinc_sql[1])
@generic.invalidate_table_constraints
def delete_table(self, table_name, cascade=True):
qn = self.quote_name(table_name)
# Note: PURGE is not valid syntax for Oracle 9i (it was added in 10)
if cascade:
self.execute('DROP TABLE %s CASCADE CONSTRAINTS;' % qn)
else:
self.execute('DROP TABLE %s;' % qn)
# If the table has an AutoField a sequence was created.
sequence_sql = """
DECLARE
i INTEGER;
BEGIN
SELECT COUNT(*) INTO i FROM USER_CATALOG
WHERE TABLE_NAME = '%(sq_name)s' AND TABLE_TYPE = 'SEQUENCE';
IF i = 1 THEN
EXECUTE IMMEDIATE 'DROP SEQUENCE "%(sq_name)s"';
END IF;
END;
/""" % {'sq_name': self.get_sequence_name(table_name)}
self.execute(sequence_sql)
@generic.invalidate_table_constraints
def alter_column(self, table_name, name, field, explicit_name=True):
if self.dry_run:
if self.debug:
print ' - no dry run output for alter_column() due to dynamic DDL, sorry'
return
qn = self.quote_name(table_name)
# hook for the field to do any resolution prior to it's attributes being queried
if hasattr(field, 'south_init'):
field.south_init()
field = self._field_sanity(field)
# Add _id or whatever if we need to
field.set_attributes_from_name(name)
if not explicit_name:
name = field.column
qn_col = self.quote_name(name)
# First, change the type
# This will actually also add any CHECK constraints needed,
# since e.g. 'type' for a BooleanField is 'NUMBER(1) CHECK (%(qn_column)s IN (0,1))'
params = {
'table_name':qn,
'column': qn_col,
'type': self._db_type_for_alter_column(field),
'nullity': 'NOT NULL',
'default': 'NULL'
}
if field.null:
params['nullity'] = 'NULL'
if not field.null and field.has_default():
params['default'] = self._default_value_workaround(field.get_default())
sql_templates = [
(self.alter_string_set_type, params),
(self.alter_string_set_default, params.copy()),
]
# drop CHECK constraints. Make sure this is executed before the ALTER TABLE statements
# generated above, since those statements recreate the constraints we delete here.
check_constraints = self._constraints_affecting_columns(table_name, [name], "CHECK")
for constraint in check_constraints:
self.execute(self.delete_check_sql % {
'table': self.quote_name(table_name),
'constraint': self.quote_name(constraint),
})
for sql_template, params in sql_templates:
try:
self.execute(sql_template % params)
except DatabaseError, exc:
description = str(exc)
# Oracle complains if a column is already NULL/NOT NULL
if 'ORA-01442' in description or 'ORA-01451' in description:
# so we just drop NULL/NOT NULL part from target sql and retry
params['nullity'] = ''
sql = sql_template % params
self.execute(sql)
# Oracle also has issues if we try to change a regular column
# to a LOB or vice versa (also REF, object, VARRAY or nested
# table, but these don't come up much in Django apps)
elif 'ORA-22858' in description or 'ORA-22859' in description:
self._alter_column_lob_workaround(table_name, name, field)
else:
raise
def _alter_column_lob_workaround(self, table_name, name, field):
"""
Oracle refuses to change a column type from/to LOB to/from a regular
column. In Django, this shows up when the field is changed from/to
a TextField.
What we need to do instead is:
- Rename the original column
- Add the des
|
DinoV/PTVS
|
Python/Templates/Samples/ProjectTemplates/Python/Samples/PollsDjango/app/views.py
|
Python
|
apache-2.0
| 3,472
| 0.004032
|
"""
Definition of views.
"""
from app.models import Choice, Poll
from datetime import datetime
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse
from django.http import HttpRequest, HttpResponseRedirect
from django.shortcuts import get_object_or_404, render
from django.template import RequestContext
from django.utils import timezone
from django.views.generic import ListView, DetailView
from os import path
import json
class PollListView(ListView):
"""Renders the home page, with a list of all polls."""
model = Poll
def get_context_data(self, **kwargs):
context = super(PollListView, self).get_context_data(**kwargs)
context['title'] = 'Polls'
context['year'] = datetime.now().year
return context
class PollDetailView(DetailView):
"""Renders the poll details page."""
model = Poll
def get_context_data(self, **kwargs):
context = super(PollDetailView, self).get_context_data(**kwargs)
context['title'] = 'Poll'
context['year'] = datetime.now().year
return context
class PollResultsView(DetailView):
"""Renders the results page."""
model = Poll
def get_context_data(self, **kwargs):
context = super(PollResultsView, self).get_context_data(**kwargs)
context['title'] = 'Results'
context['year'] = datetime.now().year
return context
def contact(request):
"""Renders the contact page."""
assert isinstance(request, HttpRequest)
return render(
request,
'app/contact.html',
context_instance = RequestContext(request,
{
'title': 'Contact',
'message': 'Your contact page.',
'year': datetime.now().year,
})
)
def about(request):
"""Renders the about page."""
assert isinstance(request, HttpRequest)
return render(
request,
'app/about.html',
context_instance = RequestContext(request,
{
'title': 'About',
'message': 'Your application description page.',
'year': datetime.now().year,
})
)
def vote(request, poll_id):
"""Handles voting. Validates input and updates the repository."""
poll = get_object_or_404(Poll, pk=poll_id)
try:
selected_choice = poll.choice_set.get(pk=request.POST['choice'])
except (KeyError, Choice.DoesNotExist):
return render(request, 'a
|
pp/details.html', {
'title': 'Poll',
'year': datetime.now().year,
'poll': poll,
'error_message': "Please make a selection.",
})
else:
selected_choice.votes += 1
selected_choice.save()
return HttpResponseRedirect(reverse('app:results', args=(poll.id,)))
@login_required
def seed(request):
"""Seeds the database with sample
|
polls."""
samples_path = path.join(path.dirname(__file__), 'samples.json')
with open(samples_path, 'r') as samples_file:
samples_polls = json.load(samples_file)
for sample_poll in samples_polls:
poll = Poll()
poll.text = sample_poll['text']
poll.pub_date = timezone.now()
poll.save()
for sample_choice in sample_poll['choices']:
choice = Choice()
choice.poll = poll
choice.text = sample_choice
choice.votes = 0
choice.save()
return HttpResponseRedirect(reverse('app:home'))
|
practo/r5d4
|
r5d4/publisher.py
|
Python
|
mit
| 1,023
| 0
|
from __future__ import absolute_import
from werkzeug.exceptions import ServiceUnavailable, NotFound
from r5d4.flask_redis import get_conf_db
def publish_transaction(channel, tr_type, payload):
conf_db = get_conf_db()
if tr_type not in ["insert", "delete"]:
raise ValueError("Unknown transaction type", tr_type)
subscribed = conf_db.scard("Subscriptions:%s:ActiveAnalytics" % channel)
if subscribed == 0:
raise NotFound(("Channel not found",
"Channel '%(channel)s' is not found or has 0 "
"subscriptions" % locals()))
listened = conf_db.publish(
channel,
'{'
' "tr_type" : "' + tr_ty
|
pe + '", '
' "payload" : ' + payload +
'}'
)
if listened != subsc
|
ribed:
raise ServiceUnavailable((
"Subscription-Listened mismatch",
"Listened count = %d doesn't match Subscribed count = %d" % (
listened,
subscribed
)
))
|
ashang/calibre
|
setup/installer/linux/__init__.py
|
Python
|
gpl-3.0
| 792
| 0.006313
|
#!/usr/bin/env python2
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import with_statement
__license__ = 'GPL v3'
__copyright__ = '2009, Kovid Goyal <kovid@kovidgoyal.n
|
et>'
__docformat__ = 'restructuredtext en'
from setup.installer import VMInstaller
from setup import Command
class Linux32(VMInstaller):
description = 'Build 32bit linux binary installer'
INSTALLER_EXT = 'txz'
VM_NAME = 'linux32-build'
|
FREEZE_COMMAND = 'linux_freeze'
FREEZE_TEMPLATE = 'python -OO setup.py {freeze_command}'
class Linux64(Linux32):
description = 'Build 64bit linux binary installer'
VM_NAME = 'linux64-build'
IS_64_BIT = True
class Linux(Command):
description = 'Build linux binary installers'
sub_commands = ['linux64', 'linux32']
|
mikewrock/phd_backup_full
|
src/wrock/vs060/scripts/moveit_canceler.py
|
Python
|
apache-2.0
| 1,094
| 0.008227
|
#!/usr/bin/env python
import rospy
import os
import roslib
roslib.load_manifest("denso_pendant_publisher")
roslib.load_manifest("actionlib_msgs")
import denso_pendant_publisher.msg
import std_msgs.msg
import actionlib_msgs.msg
rospy.init_node("moveit_canceler")
g_runnable = True
g_prev_status = None
def pendantCB(msg):
global g_runnable, g_prev_status
if g_prev_status:
if (not g_prev_status.button_cancel and msg.button_cancel) or (not g_prev_status.button_stop and msg.button_stop): # canceled or stopped
g_runnable
|
= False
# here we should send cancel
cancel = actionlib_msgs.msg.GoalID()
cancel.id = ""
cancel_pub.publish(cancel)
rospy.loginfo("cancel")
g_prev_status = msg
sub = rospy.Subscriber("/denso_pendant_publisher/status", denso_pendant_publ
|
isher.msg.PendantStatus, pendantCB)
cancel_pub = rospy.Publisher("/arm_controller/follow_joint_trajectory/cancel", actionlib_msgs.msg.GoalID);
# cancel_pub = rospy.Publisher("/move_group/cancel", actionlib_msgs.msg.GoalID);
rospy.spin()
|
belokop/indico_bare
|
indico/modules/events/timetable/controllers/legacy.py
|
Python
|
gpl-3.0
| 26,827
| 0.0041
|
# This file is part of Indico.
# Copyright (C) 2002 - 2016 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from collections import Counter
from datetime import timedelta
from operator import attrgetter
import dateutil.parser
from flask import flash, request, jsonify, session
from pytz import utc
from werkzeug.exceptions import BadRequest, NotFound
from indico.core.errors import UserValueError
from indico.modules.events.contributions import Contribution
from indico.modules.events.contributions.controllers.management import _get_field_values
from indico.modules.events.contributions.operations import create_contribution, delete_contribution, update_contribution
from indico.modules.events.sessions.controllers.management.sessions import RHCreateSession, RHSessionREST
from indico.modules.events.sessions.forms import SessionForm
from indico.modules.events.sessions.models.blocks import SessionBlock
from indico.modules.events.sessions.operations import delete_session_block, update_session_block, update_session
from indico.modules.events.timetable.controllers import (RHManageTimetableBase, RHManageTimetableEntryBase,
SessionManagementLevel)
from indico.modules.events.timetable.controllers.manage import RHBreakREST
from indico.modules.events.timetable.forms import (BreakEntryForm, ContributionEntryForm, SessionBlockEntryForm,
BaseEntryForm)
from indico.modules.events.timetable.legacy import (serialize_contribution, serialize_entry_update, serialize_session,
TimetableSerializer)
from indico.modules.events.timetable.models.breaks import Break
from indico.modules.events.timetable.models.entries import TimetableEntryType
from indico.modules.events.timetable.operations import (create_break_entry, create_session_block_entry,
schedule_contribution, fit_session_block_entry,
update_break_entry, update_timetable_entry,
move_timetable_entry, update_timetable_entry_object,
delete_timetable_entry)
from indico.modules.events.timetable.reschedule import Rescheduler, RescheduleMode
from indico.modules.events.timetable.util import (find_next_start_dt, get_session_block_entries,
get_time_changes_notifications,
shift_following_entries)
from indico.modules.events.util import get_random_color, track_time_changes
from indico.util.date_time import iterdays, as_utc
from indico.util.i18n import _
from indico.web.forms.base import FormDefaults
from indico.web.util import jsonify_data, jsonify_form, jsonify_template
class RHLegacyTimetableAddEntryBase(RHManageTimetableBase):
session_management_level = SessionManagementLevel.manage
def _checkParams(self, params):
RHManageTimetableBase._checkParams(self, params)
self.day = dateutil.parser.parse(request.args['day']).date()
self.session_block = None
if 'session_block_id' in request.args:
self.session_block = self.event_new.get_session_block(request.args['session_block_id'])
if not self.session_block:
raise BadRequest
def _get_form_defaults(self, **kwargs):
location_parent = kwargs.pop('location_parent', None)
inherited_location = location_parent.location_data if location_parent else self.event_new.location_data
inherited_location['inheriting'] = True
return FormDefaults(location_data=inherited_location, **kwargs)
def _get_form_params(self):
return {'event': self.event_new,
'session_block': self.session_block,
'day': self.day}
class RHLegacyTimetableAddBreak(RHLegacyTimetableAddEntryBase):
session_management_level = SessionManagementLevel.coordinate
def _get_default_colors(self):
breaks = Break.query.filter(Break.timetable_entry.has(event_new=self.event_new)).all()
common_colors = Counter(b.colors for b in breaks)
most_common = common_colors.most_common(1)
colors = most_common[0][0] if most_common else get_random_color(self.event_new)
return colors
def _process(self):
colors = self._get_default_colors()
defaults = self._get_form_defaults(colors=colors, location_parent=self.session_block)
form = BreakEntryForm(obj=defaults, **self._get_form_params())
if form.validate_on_submit():
with track_time_changes(auto_extend=True, user=session.user) as changes:
entry = create_break_entry(self.event_new, form.data, session_block=self.session_block)
notifications = get_time_changes_notifications(changes, tzinfo=self.event_new.tzinfo, entry=entry)
return jsonify_data(entry=serialize_entry_update(entry), notifications=notifications, flash=False)
return jsonify_form(form, fields=form._display_fields)
class RHLegacyTimetableAddContribution(RHLegacyTimetableAddEntryBase):
session_management_level = SessionManagementLevel.manage
def _process(self):
defaults = self._get_form_defaults(location_parent=self.session_block)
form = ContributionEntryForm(obj=defaults, to_schedule=True, **self._get_form_params())
if form.validate_on_submit():
with track_time_changes(auto_extend=True, user=session.user) as changes:
contrib = create_contribution(self.event_new, form.data, session_block=self.session_block,
extend_parent=True)
entry = contrib.timetable_entry
notifications = get_time_changes_notifications(changes, tzinfo=self.event_new.tzinfo, entry=entry)
return jsonify_data(entries=[serialize_entry_update(entry)], notifications=notifications, flash=False)
self.commit = False
return jsonify_template('events/contributions/forms/contribution.html', form=form, fields=form._display_fields)
class RHLegacyTimetableAddSessionBlock(RHLegacyTimetableAddEntryBase):
session_management_level = SessionManagementLevel.coordinate_with_blocks
def _checkParams(self, params):
RHLegacyTimetableAddEntryBase._checkParams(self, params)
if not self.session:
self.session = self.
|
event_new.get_session(request.args['session_id'])
|
if not self.session:
raise NotFound
def _process(self):
defaults = self._get_form_defaults(location_parent=self.session)
form = SessionBlockEntryForm(obj=defaults, **self._get_form_params())
if form.validate_on_submit():
with track_time_changes(auto_extend=True, user=session.user) as changes:
entry = create_session_block_entry(self.session, form.data)
notifications = get_time_changes_notifications(changes, tzinfo=self.event_new.tzinfo, entry=entry)
return jsonify_data(entry=serialize_entry_update(entry), notifications=notifications, flash=False)
self.commit = False
return jsonify_form(form, fields=form._display_fields, disabled_until_change=False)
class RHLegacyTimetableDeleteEntry(RHManageTimetableEntryBase):
@property
def session_management_level(self):
if self.entry.type == Tim
|
DarkLotus/OakCore
|
tools/esptool.py
|
Python
|
lgpl-2.1
| 28,767
| 0.011506
|
#!/usr/bin/env python
#
# ESP8266 ROM Bootloader Utility
# https://github.com/themadinventor/esptool
#
# Copyright (C) 2014 Fredrik Ahlberg
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51 Franklin
# Street, Fifth Floor, Boston, MA 02110-1301 USA.
import sys
import struct
import serial
import time
import argparse
import os
import subprocess
import tempfile
class ESPROM:
# These are the currently known commands supported by the ROM
ESP_FLASH_BEGIN = 0x02
ESP_FLASH_DATA = 0x03
ESP_FLASH_END = 0x04
ESP_MEM_BEGIN = 0x05
ESP_MEM_END = 0x06
ESP_MEM_DATA = 0x07
ESP_SYNC = 0x08
ESP_WRITE_REG = 0x09
ESP_READ_REG = 0x0a
# Maximum block sized for RAM and Flash writes, respectively.
ESP_RAM_BLOCK = 0x1800
ESP_FLASH_BLOCK = 0x400
# Default baudrate. The ROM auto-bauds, so we can use more or less whatever we want.
ESP_ROM_BAUD = 115200
# First byte of the application image
ESP_IMAGE_MAGIC = 0xe9
# Initial state for the checksum routine
ESP_CHECKSUM_MAGIC = 0xef
# OTP ROM addresses
ESP_OTP_MAC0 = 0x3ff00050
ESP_OTP_MAC1 = 0x3ff00054
# Sflash stub: an assembly routine to read from spi flash and send to host
SFLASH_STUB = "\x80\x3c\x00\x40\x1c\x4b\x00\x40\x21\x11\x00\x40\x00\x80" \
"\xfe\x3f\xc1\xfb\xff\xd1\xf8\xff\x2d\x0d\x31\xfd\xff\x41\xf7\xff\x4a" \
"\xdd\x51\xf9\xff\xc0\x05\x00\x21\xf9\xff\x31\xf3\xff\x41\xf5\xff\xc0" \
"\x04\x00\x0b\xcc\x56\xec\xfd\x06\xff\xff\x00\x00"
def __init__(self, port = 0, baud = ESP_ROM_BAUD):
self._port = serial.Serial(port)
# setting baud rate in a separate step is a workaround for
# CH341 driver on some Linux versions (this opens at 9600 then
# sets), shouldn't matter for other platforms/drivers. See
# https://github.com/themadinventor/esptool/issues/44#issuecomment-107094446
self._port.baudrate = baud
""" Read bytes from the serial port while performing SLIP unescaping """
def read(self, length = 1):
b = ''
while len(b) < length:
c = self._port.read(1)
if c == '\xdb':
c = self._port.read(1)
if c == '\xdc':
b = b + '\xc0'
elif c == '\xdd':
b = b + '\xdb'
else:
raise FatalError('Invalid SLIP escape')
else:
b = b + c
return b
""" Write bytes to the serial port while performing SLIP escaping """
def write(self, packet):
buf = '\xc0'+(packet.replace('\xdb','\xdb\xdd').replace('\xc0','\xdb\xdc'))+'\xc0'
self._port.write(buf)
""" Calculate checksum of a blob, as it is defined by the ROM """
@staticmethod
def checksum(data, state = ESP_CHECKSUM_MAGIC):
for b in data:
state ^= ord(b)
return state
""" Send a request and read the response """
def command(self, op = None, data = None, chk = 0):
if op:
pkt = struct.pack('<BBHI', 0x00, op, len(data), chk) + data
self.write(pkt)
# tries to get a response until that response has the
# same operation as the request or a retries limit has
# exceeded. This is needed for some esp8266s that
# reply with more sync responses than expected.
retries =
|
100
while retries > 0:
(op_ret, val, body) = self.receive_response()
if op is None or op_ret == op:
return val, body # valid response received
retries = retries - 1
raise FatalError("Response doesn't match request")
""" Receive a response to a command """
def receive_response(self):
|
# Read header of response and parse
if self._port.read(1) != '\xc0':
raise FatalError('Invalid head of packet')
hdr = self.read(8)
(resp, op_ret, len_ret, val) = struct.unpack('<BBHI', hdr)
if resp != 0x01:
raise FatalError('Invalid response 0x%02x" to command' % resp)
# The variable-length body
body = self.read(len_ret)
# Terminating byte
if self._port.read(1) != chr(0xc0):
raise FatalError('Invalid end of packet')
return op_ret, val, body
""" Perform a connection test """
def sync(self):
self.command(ESPROM.ESP_SYNC, '\x07\x07\x12\x20'+32*'\x55')
for i in xrange(7):
self.command()
""" Try connecting repeatedly until successful, or giving up """
def connect(self):
print 'Connecting...'
for _ in xrange(4):
# issue reset-to-bootloader:
# RTS = either CH_PD or nRESET (both active low = chip in reset)
# DTR = GPIO0 (active low = boot to flasher)
self._port.setDTR(False)
self._port.setRTS(True)
time.sleep(0.05)
self._port.setDTR(True)
self._port.setRTS(False)
time.sleep(0.05)
self._port.setDTR(False)
self._port.timeout = 0.3 # worst-case latency timer should be 255ms (probably <20ms)
for _ in xrange(4):
try:
self._port.flushInput()
self._port.flushOutput()
self.sync()
self._port.timeout = 5
return
except:
time.sleep(0.05)
raise FatalError('Failed to connect to ESP8266')
""" Read memory address in target """
def read_reg(self, addr):
res = self.command(ESPROM.ESP_READ_REG, struct.pack('<I', addr))
if res[1] != "\0\0":
raise FatalError('Failed to read target memory')
return res[0]
""" Write to memory address in target """
def write_reg(self, addr, value, mask, delay_us = 0):
if self.command(ESPROM.ESP_WRITE_REG,
struct.pack('<IIII', addr, value, mask, delay_us))[1] != "\0\0":
raise FatalError('Failed to write target memory')
""" Start downloading an application image to RAM """
def mem_begin(self, size, blocks, blocksize, offset):
if self.command(ESPROM.ESP_MEM_BEGIN,
struct.pack('<IIII', size, blocks, blocksize, offset))[1] != "\0\0":
raise FatalError('Failed to enter RAM download mode')
""" Send a block of an image to RAM """
def mem_block(self, data, seq):
if self.command(ESPROM.ESP_MEM_DATA,
struct.pack('<IIII', len(data), seq, 0, 0)+data, ESPROM.checksum(data))[1] != "\0\0":
raise FatalError('Failed to write to target RAM')
""" Leave download mode and run the application """
def mem_finish(self, entrypoint = 0):
if self.command(ESPROM.ESP_MEM_END,
struct.pack('<II', int(entrypoint == 0), entrypoint))[1] != "\0\0":
raise FatalError('Failed to leave RAM download mode')
""" Start downloading to Flash (performs an erase) """
def flash_begin(self, size, offset):
old_tmo = self._port.timeout
num_blocks = (size + ESPROM.ESP_FLASH_BLOCK - 1) / ESPROM.ESP_FLASH_BLOCK
sectors_per_block = 16
sector_size = 4096
num_sectors = (size + sector_size - 1) / sector_size
start_sector = offset / sector_size
head_sectors = sectors_per_block - (start_sector % sectors_per_block)
if num_sectors < head_sectors:
head_sectors = num_sectors
if num_secto
|
FrankSalad/django-memcache-status
|
memcache_status/tests/__init__.py
|
Python
|
bsd-3-clause
| 24
| 0.041667
|
f
|
rom test_adm
|
in import *
|
joelchelliah/diy-lisp
|
tests/test_provided_code.py
|
Python
|
bsd-3-clause
| 2,580
| 0.001163
|
# -*- coding: utf-8 -*-
from no
|
se.tools import assert_equals, assert_raises_regexp, assert_raises
from diylang.parser import unparse, find_matching_paren
from diylang.types import DiyLangError
"""
This module contains a few tests for the code provided for part 1.
All tests here should already pass, and should be of no concern to
you as a workshop attendee.
"""
## Tests for find_matching_paren function in parser.py
def test_find_mat
|
ching_paren():
source = "(foo (bar) '(this ((is)) quoted))"
assert_equals(32, find_matching_paren(source, 0))
assert_equals(9, find_matching_paren(source, 5))
def test_find_matching_empty_parens():
assert_equals(1, find_matching_paren("()", 0))
def test_find_matching_paren_throws_exception_on_bad_initial_position():
"""If asked to find closing paren from an index where there is no opening
paren, the function should raise an error"""
with assert_raises(AssertionError):
find_matching_paren("string without parens", 4)
def test_find_matching_paren_throws_exception_on_no_closing_paren():
"""The function should raise error when there is no matching paren to be found"""
with assert_raises_regexp(DiyLangError, "Incomplete expression"):
find_matching_paren("string (without closing paren", 7)
## Tests for unparse in parser.py
def test_unparse_atoms():
assert_equals("123", unparse(123))
assert_equals("#t", unparse(True))
assert_equals("#f", unparse(False))
assert_equals("foo", unparse("foo"))
def test_unparse_list():
assert_equals("((foo bar) baz)", unparse([["foo", "bar"], "baz"]))
def test_unparse_quotes():
assert_equals("''(foo 'bar '(1 2))", unparse(
["quote", ["quote", ["foo", ["quote", "bar"], ["quote", [1, 2]]]]]))
def test_unparse_bool():
assert_equals("#t", unparse(True))
assert_equals("#f", unparse(False))
def test_unparse_int():
assert_equals("1", unparse(1))
assert_equals("1337", unparse(1337))
assert_equals("-42", unparse(-42))
def test_unparse_symbol():
assert_equals("+", unparse("+"))
assert_equals("foo", unparse("foo"))
assert_equals("lambda", unparse("lambda"))
def test_unparse_another_list():
assert_equals("(1 2 3)", unparse([1, 2, 3]))
assert_equals("(if #t 42 #f)",
unparse(["if", True, 42, False]))
def test_unparse_other_quotes():
assert_equals("'foo", unparse(["quote", "foo"]))
assert_equals("'(1 2 3)",
unparse(["quote", [1, 2, 3]]))
def test_unparse_empty_list():
assert_equals("()", unparse([]))
|
arhik/nupic
|
examples/opf/experiments/spatial_classification/scalar_1/description.py
|
Python
|
agpl-3.0
| 2,144
| 0.004664
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
## This file defines parameters for a prediction experiment.
import os
from nupic.frameworks.opf.expdescriptionhelpers import importBaseDescription
# the sub-experiment configuration
config = \
{
'dataSource': 'file://' + os.path.join(os.path.dirname(__file__),
'../datasets/scalar_1.csv'),
'errorMetric': 'aae',
'modelParams': {
'sensorParams': {
'verbosity': 0,
'encoders': {
'field1': {
'clipInput': True,
'fieldname': u'field1',
'maxval': 5.0,
'minval': 0.0,
'n': 600,
'name': u'field1',
'type': 'ScalarEncoder
|
',
'w': 21
},
'classification': {
'classifierOnly': True,
'clipInput': True,
'fieldname': u'classification',
'maxval': 50.0,
'minval': 0.0,
'n': 600,
'name': u'classification',
'type': 'ScalarEncoder',
'w': 21
},
},
},
'clParams': {
'verbosity': 0,
},
}
}
mod
|
= importBaseDescription('../base/description.py', config)
locals().update(mod.__dict__)
|
elielprado/ESOF
|
Programa em Python/Esof Python/Login.py
|
Python
|
gpl-3.0
| 3,192
| 0.001255
|
# -*- coding: utf-8 -*-
from DataBase import *
from Professor import *
from Aluno import *
from Cadastro import *
class Login:
def logInAluno(self, username, senha):
connect.execute('SELECT * FROM alunos WHERE nome="%s" AND senha="%s"' % (username, senha))
row = connect.fetchone()
if row is not None:
print('\n *** Seja bem vindo ' + username + ' ***')
global logged
logged = 1
aNome = row[0]
aEscola = row[2]
pNome = row[3]
aluno = Aluno(aNome, senha, aEscola, pNome)
while True:
print('\nO que deseja fazer?\n')
print('1- Ver aulas')
print('2- Fazer exercicios')
print('3- Acompanhar desempenho')
print('4- Sair (logout)')
opt = int(raw_input('Digite o numero da opção '))
if opt == 1:
aluno.fazerAula()
elif opt == 2:
aluno.fazerExercicios()
elif opt == 3:
aluno.acompanharDesempenho()
elif opt == 4:
break
else:
print('Falha ao logar. User ou senha incorretos!')
global logged
logged = 0
def logInProfessor(self, email, senha):
connect.execute('SELECT * FROM professores WHERE email="%s" AND senha="%s"' % (email, senha))
row = connect.fetchone()
if row is not None:
global logged
logged = 1
pName = row[0]
pEscola = row[2]
prof = Professor(pName, senha, pEscola, email)
while True:
print('\n')
print(pEscola + ' - Seja bem vindo, ' + pName + '!\n')
print('O que deseja fazer? ')
print('\n')
print('1- Criar aula')
print('2- Criar exercicios')
print('3- Acompanhar desempenho')
print('4- Cadastrar aluno')
print('5- Cadastrar outro professor')
print('6- Sair (logout).')
opt = int(raw_input('Informe o numero da opção '))
if opt == 1:
prof.criarAula()
elif opt == 2:
prof.criarExercicios()
elif opt
|
== 3:
prof.acompanharDesempenho()
elif opt == 4:
cad = Cadastro()
|
username = raw_input('User: ')
senha = raw_input('Senha: ')
cad.cadastrarAluno(username, senha, pEscola, pName)
elif opt == 5:
print('\nCadastrar professor:\n')
username = raw_input('nome: ')
senha = raw_input('senha: ')
email = raw_input('email: ')
escola = raw_input('escola: ')
cad = Cadastro()
cad.cadastrarProf(username, senha, escola, email)
else:
break
else:
print('Falha ao logar. User ou senha incorretos!')
|
Snuggert/moda
|
project/app/__init__.py
|
Python
|
mit
| 1,446
| 0.000692
|
from flask import Flask, jsonify, request
from btree import Tree
from asteval_wrapper import Script
# Startup stuff
app = Flask(__name__)
app.config.from_object('config')
# Jinja initialization to use PyJade
app.jinja_env.add_extension('pyjade.ext.jinja.PyJadeExtension')
|
# Global jinja functions
app.jinja_env.globals.update(str=str)
app.jinja_env.globals.update(enumerate=enumerate)
app.jinja_env.globals.update(len=len)
app.jinja_env.globals.update(int=int)
app.jinja_env.globals.update(getattr=getattr)
app.jinja_env.globals.update(hasattr=hasattr)
app.jinja_env.globals.update(isinstance=isinstance)
app.jinja_env.globals.update(type=type)
app.jinja_env.globals.update(dict=dict)
app.jinja_env.globals.update(list=list)
app.jinja_env.globals.update(tuple=tuple)
app.jinja_env.
|
globals.update(zip=zip)
# Import routes
from app import single, multiple
app.register_blueprint(single.bp)
app.register_blueprint(multiple.bp)
@app.route('/compact/', methods=['GET'])
def compact():
Tree.from_file().compact()
return jsonify(success='compacted')
@app.route('/map/', methods=['POST'])
def map():
script = Script()
tree = Tree.from_file()
temp_tree = Tree(filename='map.db')
temp_tree.compact()
script.add_string(request.get_data())
data = []
for k, v in tree:
temp_tree[k] = script.invoke('mapper', k, v)
data = script.invoke('reducer', temp_tree.__iter__())
return jsonify(result=data)
|
lispc/Paddle
|
python/paddle/v2/framework/tests/test_scale_and_identity_op.py
|
Python
|
apache-2.0
| 1,273
| 0
|
import unittest
from op_test_util import OpTestMeta
from gradient_checker import GradientChecker, create_op
import numpy as np
from paddle.v2.framework.op import Operator
class IdentityTest(unittest.TestCase):
__metaclass__ = OpTestMeta
def setUp(self):
self.type = "identity"
self.inputs = {'X': np.random.random((32, 784)).astype("float32")}
self.outputs = {'Out': self.inputs['X']}
class IdentityGradOpTest(GradientChecker):
def test_normal(self):
op = create_op("identity")
inputs = {"X": np.random.random((10, 10)).astype("float32")}
|
self.check_grad(op, inputs, set("X"), "Out")
class ScaleTest(unittest.TestCase):
__metaclass__ = OpTestMeta
def setUp(self):
self.type = "scale"
self.inputs = {'X': np.random.random((32, 784)).astype("float32")}
self.attrs = {'scale': -2.3}
|
self.outputs = {'Out': self.inputs['X'] * self.attrs['scale']}
class ScaleGradTest(GradientChecker):
def test_normal(self):
op = Operator("scale", X="X", Out="Out", scale=3.2)
self.check_grad(op,
{"X": np.random.random((10, 10)).astype("float32")},
set("X"), "Out")
if __name__ == '__main__':
unittest.main()
|
lhfei/spark-in-action
|
spark-3.x/src/main/python/mllib/pca_rowmatrix_example.py
|
Python
|
apache-2.0
| 1,712
| 0.000584
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pyspark import SparkContext
# $example on$
from pyspark.mllib.linalg import Vectors
from pyspark.mllib.linalg.distributed import RowMatrix
# $example off$
if __name__ == "__main__":
sc = SparkContext(appName="PythonPCAOnRowMatrixExample")
# $example on$
rows = sc.parallelize([
Vectors.sparse(5, {1: 1.0, 3: 7.0}),
Vectors.dense(2.0, 0.0, 3.0, 4.0, 5.0),
Vectors.dense(4.0, 0.0, 0.0, 6.0, 7.0)
])
mat = RowMatrix(rows)
# Compute the top 4 principal components.
# Principal components are stored in a local dense matrix.
pc = mat.computePrincipalComponents(4)
# Project the rows to the linear space spanned by the top 4 principal c
|
omponents.
projected = mat.multiply(pc)
# $example off$
collected = projected.ro
|
ws.collect()
print("Projected Row Matrix of principal component:")
for vector in collected:
print(vector)
sc.stop()
|
Enyruas/Kitux
|
setup.py
|
Python
|
apache-2.0
| 2,354
| 0.012319
|
import os
from setuptools import setup
README = open(os.path.join(os.path.dirname(__file__), 'README.rst')).read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='cpdir',
version='0.1',
packages=['sellrobots'],
include_package_data=True,
license='BSD License', # example license
description='A simple Django app to conduct Web-based polls.',
long_description=README,
url='http://www.example.com/',
author='Your Name',
author_email='yourname@example.com',
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
|
'License :: OSI Approved :: BSD License', # example license
'Operating System :: OS Independent',
'Programming Language :: Python',
|
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
)
|
Dylan-halls/Network-Exploitation-Toolkit
|
Unpacking/DNS.py
|
Python
|
mit
| 1,603
| 0.027449
|
from scapy.all import *
from termcolor import colored
def pkthandler(pkt):
try:
ip = pkt[IP]
except IndexError:
pass
try:
src = ip.src
dst = ip.dst
except UnboundLocalError:
pass
if pkt.haslayer(DNS):
dns = pkt[DNS]
query = dns[DNSQR]
qtype = dnsqtypes.get(query.qtype)
print("--------------------------------------------------------\n\n")
print(" .:{}:. ".format(colored('DNS','red')))
|
print(" ")
print(" \033[1;36mSource IP:\033[00m
|
{} \033[1;36mDestination IP:\033[00m {}".format(src, dst))
print(" \033[1;36mDomain: \033[00m {}".format(query.qname))
print(" \033[1;36mQuery Type \033[00m {}".format(qtype))
print(" \033[1;36mId:\033[00m {}".format(dns.id))
print(" \033[1;36mOpcode: \033[00m {}".format(dns.opcode))
print(" \033[1;36mQuery Code: \033[00m {}".format(dns.qr))
print(" \033[1;36mRcode \033[00m {}".format(dns.rcode))
print(" \033[1;36mQuestion Count: \033[00m {}".format(dns.qdcount))
print(" \033[1;36mAnswer Record Count:\033[00m {}".format(dns.ancount))
print(" \033[1;36mAuthority Record Count:\033[00m {}".format(dns.nscount))
print(" \033[1;36mAdditional Record Count:\033[00m {}".format(dns.arcount))
rawLoad = pkt.getlayer(Raw)
if rawLoad == None: pass
else:
print(" \033[1;36mRaw:\n\n\033[00m {}".format(colored(rawLoad, 'green')))
pkt = sniff(iface='wlan0' ,prn=pkthandler)
|
evensonbryan/yocto-autobuilder
|
lib/python2.7/site-packages/sqlalchemy_migrate-0.6-py2.6.egg/migrate/versioning/shell.py
|
Python
|
gpl-2.0
| 6,390
| 0.001721
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""The migrate command-line tool."""
import sys
import inspect
import logging
from optparse import OptionParser, BadOptionError
from migrate.versioning import api, exceptions
from migrate.versioning.config import *
from migrate.versioning.util import asbool
alias = dict(
s=api.script,
vc=api.version_control,
dbv=api.db_version,
v=api.version,
)
def alias_setup():
global alias
for key, val in alias.iteritems():
setattr(api, key, val)
alias_setup()
class PassiveOptionParser(OptionParser):
def _process_args(self, largs, rargs, values):
"""little hack to support all --some_option=value parameters"""
while rargs:
arg = rargs[0]
if arg == "--":
del rargs[0]
return
elif arg[0:2] == "--":
# if parser does not know about the option
# pass it along (make it anonymous)
try:
opt = arg.split('=', 1)[0]
self._match_long_opt(opt)
except BadOptionError:
largs.append(arg)
del rargs[0]
else:
self._process_long_opt(rargs, values)
elif arg[:1] == "-" and len(arg) > 1:
self._process_short_opts(rargs, values)
elif self.allow_interspersed_args:
largs.append(arg)
del rargs[0]
def main(argv=None, **kwargs):
"""Shell interface to :mod:`migrate.versioning.api`.
kwargs are default options that can be overriden with passing
--
|
some_option as command line option
:param disable_logging: Let migrate configure logging
:type disable_logging: bool
"""
argv = argv or list(sys.argv[1:])
commands = list(api.__all__)
commands.sort()
usage = """%%prog COMMAND ...
Available commands:
%s
Enter "%%prog help COMMAND" for information on a particular command.
"""
|
% '\n\t'.join(["%s - %s" % (command.ljust(28), api.command_desc.get(command)) for command in commands])
parser = PassiveOptionParser(usage=usage)
parser.add_option("-d", "--debug",
action="store_true",
dest="debug",
default=False,
help="Shortcut to turn on DEBUG mode for logging")
parser.add_option("-q", "--disable_logging",
action="store_true",
dest="disable_logging",
default=False,
help="Use this option to disable logging configuration")
help_commands = ['help', '-h', '--help']
HELP = False
try:
command = argv.pop(0)
if command in help_commands:
HELP = True
command = argv.pop(0)
except IndexError:
parser.print_help()
return
command_func = getattr(api, command, None)
if command_func is None or command.startswith('_'):
parser.error("Invalid command %s" % command)
parser.set_usage(inspect.getdoc(command_func))
f_args, f_varargs, f_kwargs, f_defaults = inspect.getargspec(command_func)
for arg in f_args:
parser.add_option(
"--%s" % arg,
dest=arg,
action='store',
type="string")
# display help of the current command
if HELP:
parser.print_help()
return
options, args = parser.parse_args(argv)
# override kwargs with anonymous parameters
override_kwargs = dict()
for arg in list(args):
if arg.startswith('--'):
args.remove(arg)
if '=' in arg:
opt, value = arg[2:].split('=', 1)
else:
opt = arg[2:]
value = True
override_kwargs[opt] = value
# override kwargs with options if user is overwriting
for key, value in options.__dict__.iteritems():
if value is not None:
override_kwargs[key] = value
# arguments that function accepts without passed kwargs
f_required = list(f_args)
candidates = dict(kwargs)
candidates.update(override_kwargs)
for key, value in candidates.iteritems():
if key in f_args:
f_required.remove(key)
# map function arguments to parsed arguments
for arg in args:
try:
kw = f_required.pop(0)
except IndexError:
parser.error("Too many arguments for command %s: %s" % (command,
arg))
kwargs[kw] = arg
# apply overrides
kwargs.update(override_kwargs)
# configure options
for key, value in options.__dict__.iteritems():
kwargs.setdefault(key, value)
# configure logging
if not asbool(kwargs.pop('disable_logging', False)):
# filter to log =< INFO into stdout and rest to stderr
class SingleLevelFilter(logging.Filter):
def __init__(self, min=None, max=None):
self.min = min or 0
self.max = max or 100
def filter(self, record):
return self.min <= record.levelno <= self.max
logger = logging.getLogger()
h1 = logging.StreamHandler(sys.stdout)
f1 = SingleLevelFilter(max=logging.INFO)
h1.addFilter(f1)
h2 = logging.StreamHandler(sys.stderr)
f2 = SingleLevelFilter(min=logging.WARN)
h2.addFilter(f2)
logger.addHandler(h1)
logger.addHandler(h2)
if options.debug:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
log = logging.getLogger(__name__)
# check if all args are given
try:
num_defaults = len(f_defaults)
except TypeError:
num_defaults = 0
f_args_default = f_args[len(f_args) - num_defaults:]
required = list(set(f_required) - set(f_args_default))
if required:
parser.error("Not enough arguments for command %s: %s not specified" \
% (command, ', '.join(required)))
# handle command
try:
ret = command_func(**kwargs)
if ret is not None:
log.info(ret)
except (exceptions.UsageError, exceptions.KnownError), e:
parser.error(e.args[0])
if __name__ == "__main__":
main()
|
jrg365/gpytorch
|
gpytorch/functions/rbf_covariance.py
|
Python
|
mit
| 1,186
| 0.004216
|
import torch
class RBFCovariance(torch.autograd.Function):
@staticmethod
def forward(ctx, x1, x2, lengthscale, sq_dist_func):
if any(ctx.needs_input_grad[:2]):
raise RuntimeError("RBFCovariance cannot compute gradients with " "respect to x1 and x2")
if lengthscale.size(-1) > 1:
raise ValueError("RBFCovariance cannot handle multiple lengthscales")
needs_grad = any(ctx.needs_input_grad)
x1_ = x1.div(lengthscale)
x2_ = x2.div(lengthscale)
unitless_sq_dist = sq_dist_func(x1_, x2_)
# clone because inplace operations will mess with what's saved for backward
unitless_sq_dist_ = unitless_sq_dist.clone() if needs_gr
|
ad else unitless_sq_dist
covar_mat = unitless_sq_dist_.div_(-2.0).exp_()
if needs_grad:
d_output_d_input = unitless_sq_dist.mul_(covar_mat).div_(lengthscale)
ctx.save_for_backward(d_output_d_input)
return covar_mat
@staticmethod
def backward(ctx, grad_output):
d_output_d_input = ctx.saved_tensors[0]
lengthscale_grad = grad_output * d_output_d_input
return None, None, lengths
|
cale_grad, None
|
dflemin3/ICgen
|
backup03/calc_velocity.py
|
Python
|
mit
| 6,220
| 0.01254
|
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 9 15:39:28 2014
@author: ibackus
"""
import numpy as np
import pynbody
SimArray = pynbody.array.SimArray
import isaac
import subprocess
import os
import glob
import time
def v_xy(f, param, changbin=None, nr=50, min_per_bin=100):
"""
Attempts to calculate the circular velocities for particles in a thin
(not flat) keplerian disk. Requires ChaNGa
**ARGUMENTS**
f : tipsy snapshot
For a gaseous disk
param : dict
a dictionary containing params for changa. (see isaac.configparser)
changbin : str (OPTIONAL)
If set, should be the full path to the ChaNGa executable. If None,
an attempt to find ChaNGa is made
nr : int (optional)
number of radial bins to use when averaging over accelerations
min_per_bin : int (optional)
The minimum number of particles to be in each bin. If there are too
few particles in a bin, it is merged with an adjacent bin. Thus,
actual number of radial bins may be less than nr.
**RETURNS**
vel : SimArray
An N by 3 SimArray of gas particle velocities.
"""
if changbin is None:
# Try to find the ChaNGa binary full path
changbin = os.popen('which ChaNGa').read().strip()
# Load stuff from the snapshot
x = f.g['x']
y = f.g['y']
z = f.g['z']
r = f.g['rxy']
vel0 = f.g['vel'].copy()
# Remove units from all quantities
r = isaac.strip_units(r)
x = isaac.strip_units(x)
y = isaac.strip_units(y)
z = isaac.strip_units(z)
# Temporary filenames for running ChaNGa
f_prefix = str(np.random.randint(0, 2**32))
f_name = f_prefix + '.std'
p_name = f_prefix + '.param'
# Update parameters
p_temp = param.copy()
p_temp['achInFile'] = f_name
p_temp['achOutName'] = f_prefix
if 'dDumpFrameTime' in p_temp: p_temp.pop('dDumpFrameTime')
if 'dDumpFrameStep' in p_temp: p_temp.pop('dDumpFrameStep')
# --------------------------------------------
# Estimate velocity from gravity only
# --------------------------------------------
# Note, accelerations due to gravity are calculated twice to be extra careful
# This is so that any velocity dependent effects are properly accounted for
# (although, ideally, there should be none)
# The second calculation uses the updated velocities from the first
for iGrav in range(2):
# Save files
f.write(filename=f_name, fmt = pynbody.tipsy.TipsySnap)
isaac.configsave(p_temp, p_name, ftype='param')
# Run ChaNGa, only calculating gravity
command = 'charmrun ++local ' + changbin + ' -gas -n 0 ' + p_name
p = subprocess.Popen(command.split(), stdout=subprocess.PIPE)
while p.poll() is None:
time.sleep(0.1)
# Load accelerations
acc_name = f_prefix + '.000000.acc2'
a = isaac.load_acc(acc_name)
# Clean-up
for fname in glob.glob(f_prefix + '*'): os.remove(fname)
# If a is not a vector, calculate radial acceleration. Otherwise, assume
# a is the radial acceleration
a_r = a[:,0]*x/r + a[:,1]*y/r
# Make sure the units are correct then remove them
a_r = isaac.match_units(a_r, a)[0]
a_r = isaac.strip_units(a_r)
# Calculate cos(theta) where theta is angle above x-y plane
cos = r/np.sqrt(r**2 + z**2)
ar2 = a_r*r**2
# Bin the data
r_edges = np.linspace(r.min(), (1+np.spacing(2))*r.max(), nr + 1)
ind, r_edges = isaac.digitize_threshold(r, min_per_bin, r_edges)
ind -= 1
nr = len(r_edges) - 1
r_bins, ar2_mean, err = isaac.binned_mean(r, ar2, binedges=r_edges, \
weighted_bins=True)
# Fit lines to ar2 vs cos for each radial bin
m = np.zeros(nr)
b = np.zeros(nr)
for i in range(nr):
mask = (ind == i)
p = np.polyfit(cos[mask], ar2[mask], 1)
m[i] = p[0]
b[i] = p[1]
# Interpolate the line fits
m_spline = isaac.extrap1d(r_bins, m)
b_spline = isaac.extrap1d(r_bins, b)
# Calculate circular velocity
ar2_calc = m_spline(r)*cos + b_spline(r)
v_calc = np.sqrt(abs(ar2_calc)/r)
vel = f.g['vel'].copy()
v_calc = isaac.match_units(v_calc,vel)[0]
vel[:,0] = -v_calc*y/r
vel[:,1] = v_calc*x/r
# Assign to f
f.g['vel'] = vel
# --------------------------------------------
# Estimate pressure/gas dynamics accelerations
# --------------------------------------------
a_grav = a
ar2_calc_grav = ar2_calc
# Save files
f.write(filename=f_name, fmt = pynbody.tipsy.TipsySnap)
isaac.configsave(p_temp, p_name, ftype='param')
# Run ChaNGa, including SP
|
H
command = 'charmrun ++local ' + changb
|
in + ' +gas -n 0 ' + p_name
p = subprocess.Popen(command.split(), stdout=subprocess.PIPE)
while p.poll() is None:
time.sleep(0.1)
# Load accelerations
acc_name = f_prefix + '.000000.acc2'
a_total = isaac.load_acc(acc_name)
# Clean-up
for fname in glob.glob(f_prefix + '*'): os.remove(fname)
# Estimate the accelerations due to pressure gradients/gas dynamics
a_gas = a_total - a_grav
ar_gas = a_gas[:,0]*x/r + a_gas[:,1]*y/r
ar_gas = isaac.strip_units(ar_gas)
ar2_gas = ar_gas*r**2
logr_bins, ratio, err = isaac.binned_mean(np.log(r), ar2_gas/ar2_calc_grav, nbins=nr,\
weighted_bins=True)
r_bins = np.exp(logr_bins)
ratio_spline = isaac.extrap1d(r_bins, ratio)
ar2_calc = ar2_calc_grav*(1 + ratio_spline(r))
a_calc = ar2_calc/r**2
v = np.sqrt(r*abs(a_calc))
v = isaac.match_units(v, vel0.units)[0]
vel = vel0.copy()
vel[:,0] = -v*y/r
vel[:,1] = v*x/r
# more cleanup
f.g['vel'] = vel0
return vel
|
mlperf/training_results_v0.5
|
v0.5.0/google/research_v3.32/gnmt-tpuv3-32/code/gnmt/model/t2t/tensor2tensor/bin/t2t_trainer_test.py
|
Python
|
apache-2.0
| 1,357
| 0.002948
|
# coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for t2t_trainer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensor2tensor.bin import t2t_trainer
from tensor2tensor.utils import trainer_lib_test
import tensorflow as tf
FLAGS = tf.flags.FLAGS
|
class TrainerTest(tf.test.TestCase):
@classmethod
def setUpClass(cls):
trainer_lib_test.TrainerLibTest.setUpClass()
def testTrain(self):
FLAGS.problem = "tiny_algo"
FLAGS.model = "transformer"
FLAGS.hparams_set = "tran
|
sformer_tiny"
FLAGS.train_steps = 1
FLAGS.eval_steps = 1
FLAGS.output_dir = tf.test.get_temp_dir()
FLAGS.data_dir = tf.test.get_temp_dir()
t2t_trainer.main(None)
if __name__ == "__main__":
tf.test.main()
|
josenavas/qiime
|
scripts/count_seqs.py
|
Python
|
gpl-2.0
| 3,211
| 0.001246
|
#!/usr/bin/env python
# File created on 29 May 2011
from __future__ import division
__author__ = "Greg Caporaso"
__copyright__ = "Copyright 2011, The QIIME project"
__credits__ = ["Greg Caporaso", "Jose Antonio Navas Molina"]
__license__ = "GPL"
__version__ = "1.9.1-dev"
__maintainer__ = "Greg Caporaso"
__email__ = "gregcaporaso@gmail.com"
from qiime.util import (parse_command_line_parameters,
make_option,
count_seqs_in_filepaths)
script_info = {}
script_info['brief_description'] = ""
script_info['script_description'] = ""
script_info['script_usage'] = [
("",
"Count the sequences in a fasta file and write results to stdout.",
"%prog -i in.fasta"),
("",
"Count the sequences in a fasta file and a fastq file and write results to file. Note that fastq files can only be processed if they end with .fastq -- all other files are assumed to be fasta.",
"%prog -i in1.fasta,in2.fastq -o seq_counts.txt"),
("",
"Count the sequences all .fasta files in current directory and write results to stdout. Note that -i option must be quoted.",
"%prog -i \"*.fasta\"")]
script_info['output_description'] = ""
sc
|
ript_info['required_options'] = [
make_option('-i', '--input_fps', type='existing_filepaths'
|
,
help='the input filepaths (comma-separated)'),
]
script_info['optional_options'] = [
# Example optional option
make_option('-o', '--output_fp', type="new_filepath",
help='the output filepath [default: write to stdout]'),
make_option('--suppress_errors', action='store_true',
help='Suppress warnings about missing files [default: %default]',
default=False)
]
script_info['version'] = __version__
def format_output(count_data, total, inaccessible_filepaths,
suppress_errors=False):
""" Output formatter """
lines = ['']
count_data.sort()
for c in count_data:
if c[0][0] > 0:
lines.append(
'%d : %s (Sequence lengths (mean +/- std): %1.4f +/- %1.4f)' %
(c[0][0], c[1], c[0][1], c[0][2]))
else:
lines.append(
'%d : %s' % (c[0][0], c[1]))
lines.append('%d : Total' % total)
if inaccessible_filepaths and not suppress_errors:
lines.append('')
lines.append(
'Some files were not accessible. Do they exist? Do you have read permission?')
for inaccessible_filepath in inaccessible_filepaths:
lines.append(' %s' % inaccessible_filepath)
lines.append('')
return '\n'.join(lines)
def main():
option_parser, opts, args =\
parse_command_line_parameters(**script_info)
suppress_errors = opts.suppress_errors
input_fps = opts.input_fps
output_fp = opts.output_fp
count_data, total, inaccessible_filepaths = count_seqs_in_filepaths(
input_fps)
r = format_output(
count_data,
total,
inaccessible_filepaths,
suppress_errors)
if opts.output_fp:
f = open(output_fp, 'w')
f.write(r)
f.close()
else:
print r
if __name__ == "__main__":
main()
|
jonguan/cmpe275-proj1-windrose
|
run_reduce.py
|
Python
|
mit
| 242
| 0.020661
|
import sys
import windrosebin
for x in sys.argv:
print x
pr
|
int type(sys.argv)
windrosebin.allocate()
windrosebin.check(sys.argv[1],sys.argv[2],len(sys.arg
|
v))
windrosebin.calc(sys.argv[1],sys.argv[3],len(sys.argv))
windrosebin.printLines()
|
xmnlab/pywim
|
docs/conf.py
|
Python
|
mit
| 8,369
| 0.005377
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# pywim documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import pywim
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_en
|
coding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'PyWIM'
copyright = u"2016, Ivan Ogasawara"
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = pywim.__version__
# The full version, in
|
cluding alpha/beta/rc tags.
release = pywim.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to
# some non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built
# documents.
#keep_warnings = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as
# html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the
# top of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon
# of the docs. This file should be a Windows icon file (.ico) being
# 16x16 or 32x32 pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
# static files, so a file named "default.css" will overwrite the builtin
# "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names
# to template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer.
# Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer.
# Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages
# will contain a <link> tag referring to it. The value of this option
# must be the base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'pywimdoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'pywim.tex',
u'PyWIM Documentation',
u'Ivan Ogasawara', 'manual'),
]
# The name of an image file (relative to this directory) to place at
# the top of the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings
# are parts, not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pywim',
u'PyWIM Documentation',
[u'Ivan Ogasawara'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'pywim',
u'PyWIM Documentation',
u'Ivan Ogasawara',
'pywim',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How
|
dperpeet/cockpit
|
test/verify/storagelib.py
|
Python
|
lgpl-2.1
| 12,449
| 0.002089
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# This file is part of Cockpit.
#
# Copyright (C) 2015 Red Hat, Inc.
#
# Cockpit is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 2.1 of the License, or
# (at your option) any later version.
#
# Cockpit is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Cockpit; If not, see <http://www.gnu.org/licenses/>.
import os
import re
from testlib import *
class StorageCase(MachineCase):
def setUp(self):
if "atomic" in os.getenv("TEST_OS", ""):
self.skipTest("No storage on Atomic")
MachineCase.setUp(self)
self.storagectl_cmd = self.machine.execute("for cmd in storagedctl storagectl udisksctl; do if which $cmd 2>/dev/null; then break; fi; done").strip()
if "udisksctl" in self.storagectl_cmd:
ver = self.machine.execute("busctl --system get-property org.freedesktop.UDisks2 /org/freedesktop/UDisks2/Manager org.freedesktop.UDisks2.Manager Version || true")
else:
ver = self.machine.execute("busctl --system get-property org.storaged.Storaged /org/storaged/Storaged/Manager org.storaged.Storaged.Manager Version || true")
m = re.match('s "(.*)"', ver)
if m:
self.storaged_version = map(int, m.group(1).split("."))
else:
self.storaged_version = [ 0 ]
self.storaged_is_old_udisks = ("udisksctl" in self.storagectl_cmd and self.storaged_version < [2, 6, 0])
if "debian" in self.machine.image or "ubuntu" in self.machine.image:
# Debian's udisks has a patch to use FHS /media directory
self.mount_root = "/media"
|
else:
self.mount_root = "/run/media"
def inode(s
|
elf, f):
return self.machine.execute("stat -L '%s' -c %%i" % f)
def retry(self, setup, check, teardown):
b = self.browser
b.arm_timeout()
while True:
if setup:
setup()
if check():
break
if teardown:
teardown()
b.wait_checkpoint()
b.disarm_timeout()
# Content
def content_row_expand(self, index):
b = self.browser
tbody = "#detail-content tbody:nth-of-type(%d)" % index
b.wait_present(tbody)
if not "open" in b.attr(tbody, "class"):
b.click(tbody + " tr.listing-ct-item")
b.wait_present(tbody + ".open")
def content_row_action(self, index, title):
btn = "#detail-content tbody:nth-of-type(%d) .listing-ct-item .listing-ct-actions button:contains(%s)" % (index, title)
self.browser.wait_present(btn)
self.browser.click(btn)
# The row might come and go a couple of times until it has the
# expected content. However, wait_in_text can not deal with a
# temporarily disappearing element, so we use self.retry.
def content_row_wait_in_col(self, row_index, col_index, val):
col = "#detail-content tbody:nth-of-type(%d) .listing-ct-item :nth-child(%d)" % (row_index, col_index+1)
self.retry(None, lambda: self.browser.is_present(col) and val in self.browser.text(col), None)
def content_head_action(self, index, title):
self.content_row_expand(index)
btn = "#detail-content tbody:nth-of-type(%d) .listing-ct-head .listing-ct-actions button:contains(%s)" % (index, title)
self.browser.wait_present(btn)
self.browser.click(btn)
def content_tab_expand(self, row_index, tab_index):
tab_btn = "#detail-content tbody:nth-of-type(%d) .listing-ct-head li:nth-child(%d) a" % (row_index, tab_index)
tab = "#detail-content tbody:nth-of-type(%d) .listing-ct-body:nth-child(%d)" % (row_index, tab_index + 1)
self.content_row_expand(row_index)
self.browser.wait_present(tab_btn)
self.browser.click(tab_btn)
self.browser.wait_present(tab)
return tab
def content_tab_action(self, row_index, tab_index, title):
tab = self.content_tab_expand(row_index, tab_index)
btn = tab + " button:contains(%s)" % title
self.browser.wait_present(btn)
self.browser.wait_attr(btn, "disabled", None)
self.browser.click(btn)
# To check what's in a tab, we need to open the row and select the
# tab.
#
# However, sometimes we open the wrong row or the wrong tab
# because the right row or right tab still has to be created and
# take its right place. If the right row or tab finally appears,
# it wont be open at that point and we will miss it if we only
# open a row/tab once. So we just run the whole process in a big
# retry loop.
#
# XXX - Clicking a button in a tab has the same problem, but we
# ignore that for now.
def content_tab_wait_in_info(self, row_index, tab_index, title, val):
b = self.browser
def setup():
pass
def check():
row = "#detail-content tbody:nth-of-type(%d)" % row_index
row_item = row + " tr.listing-ct-item"
tab_btn = row + " .listing-ct-head li:nth-child(%d) a" % tab_index
tab = row + " .listing-ct-body:nth-child(%d)" % (tab_index + 1)
cell = tab + " table.info-table-ct tr:contains(%s) td:nth-child(2)" % title
if not b.is_present(row + ".open"):
if not b.is_present(row_item):
return False
b.click(row_item)
if not b.is_present(row + ".open"):
return False
if not b.is_present(tab):
if not b.is_present(tab_btn):
return False
b.click(tab_btn)
if not b.is_present(tab):
return False
if not b.is_present(cell):
return False
return val in b.text(cell)
def teardown():
pass
self.retry(setup, check, teardown)
def content_tab_info_row(self, row_index, tab_index, title):
tab = self.content_tab_expand(row_index, tab_index)
return tab + " table.info-table-ct tr:contains(%s)" % title
def content_tab_info_action(self, row_index, tab_index, title):
tab = self.content_tab_expand(row_index, tab_index)
link = tab + " table.info-table-ct tr:contains(%s) td:nth-child(2) a" % title
self.browser.wait_present(link)
self.browser.click(link)
# Dialogs
def dialog_wait_open(self):
self.browser.wait_present('#dialog')
self.browser.wait_visible('#dialog')
def dialog_wait_alert(self, text):
self.browser.wait_in_text('#dialog .alert-message', text)
def dialog_field(self, field):
return '#dialog [data-field="%s"]' % field
def dialog_val(self, field):
return self.browser.val(self.dialog_field(field))
def dialog_set_val(self, field, val):
if isinstance(val, bool):
self.browser.set_checked(self.dialog_field(field), val)
elif isinstance(val, dict):
for label in val:
self.dialog_select(field, label, val[label])
elif isinstance(val, int):
# size slider
self.browser.set_val(self.dialog_field(field) + " .size-unit", "1048576")
self.browser.set_val(self.dialog_field(field) + " .size-text", str(val))
else:
self.browser.set_val(self.dialog_field(field), val)
def dialog_set_expander(self, field, val):
self.browser.call_js_func(
"""(function (sel, val) {
if ($(sel).hasClass('collapsed') == val) {
$(sel).click();
}
})""", self.dialog_field(field), val)
def dialog_is_present(self, field, label):
return self.bro
|
att-comdev/drydock
|
drydock_provisioner/cli/task/actions.py
|
Python
|
apache-2.0
| 5,257
| 0.000761
|
# Copyright 2017 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Actions related to task commands."""
import time
from drydock_provisioner.cli.action import CliAction
from drydock_provisioner.cli.const import TaskStatus
class TaskList(CliAction): # pylint: disable=too-few-public-methods
"""Action to list tasks."""
def __init__(self, api_client):
"""Object initializer.
:param DrydockClient api_client: The api client used for invocation.
"""
super().__init__(api_client)
self.logger.debug('TaskList action initialized')
def invoke(self):
"""Invoke execution of this action."""
return self.api_client.get_tasks()
class TaskCreate(CliAction): # pylint: disable=too-few-public-methods
"""Action to create tasks against a design."""
def __init__(self,
api_client,
design_ref,
action_name=None,
node_names=None,
rack_names=None,
node_tags=None,
block=False,
poll_interval=15):
"""Object initializer.
:param DrydockClient api_client: The api client used for invocation.
:param string design_ref: The URI reference to design documents
:param string action_name: The name of the action being performed for this task
:param List node_names: The list of node names to restrict action application
:param List rack_names: The list of rack names to restrict action application
:param List node_tags: The list of node tags to restrict action application
:param bool block: Whether to block CLI exit until task completes
:param integer poll_interval: Polling interval to query task status
"""
super().__init__(api_client)
self.design_ref = design_ref
self.action_name = action_name
self.logger.debug('TaskCreate action initialized for design=%s',
design_ref)
self.logger.debug('Action is %s', action_name)
self.logger.debug("Node names = %s", node_names)
self.logger.debug("Rack names = %s", rack_names)
self.logger.debug("Node tags = %s", node_tags)
self.block = block
self.poll_interval = poll_interval
if any([node_names, rack_names, node_tags]):
filter_items = {'filter_type': 'union'}
if node_names is not None:
filter_items['node_names'] = node_names
if rack_names is not None:
filter_items['rack_names'] = rack_names
if node_tags is None:
filter_items['node_tags'] = node_tags
self.node_filter = {
'filter_set_type': 'intersection',
'filter_set': [filter_items]
}
else:
self.node_filter = None
def invoke(self):
"""Invoke execution of this action."""
task = self.api_client.create_task(
design_ref=self.design_ref,
task_action=self.action_name,
node_filter=self.node_filter)
if not self.block:
return task
task_id = task.get('task_id')
while True:
time.sleep(self.poll_interval)
task = self.api_client.get_task(task_id=task_id)
if task.get('status',
'') in [TaskStatus.Complete, TaskStatus.Terminated]:
return task
class TaskShow(CliAction): # pylint: disable=too-few-public-methods
"""Action to show a task's detial."""
def __init__(self, api_client, task_id, block=False, poll_interval=15):
"""Object initializer.
:param DrydockClient api_client: The api client used for invocation.
:param string task_id: the UUID of the task to retrieve
:param bool block: Whether to block CLI exit until task completes
:param integer poll_interval: Polling interval to query task status
"""
super().__init__(api_client)
self.task_id = task_id
self.logger.debug('TaskShow action initialized for task_id=%s,',
task_id)
self.block = block
self.poll_interval = poll_interval
def invoke(self):
"""Invoke execution of this action."""
task = self.api_client.get_task(task_id=self.task_id)
if not self.block:
return
|
task
task_id = task.get('task_id')
while True:
time.sleep(self.poll_interval)
task = self.api_client.get_task(task_id=task_id)
if task.status in [TaskStatus.Complete, TaskStatus.Terminated]:
|
return task
|
Jeff-Wang93/vent
|
vent/core/network_tap/ncontrol/prestart.py
|
Python
|
apache-2.0
| 230
| 0
|
#!/usr/bin/env python3
import docker
def pull_ncapture():
d_c
|
lient = docker.from_env()
d_client.images.pull('cyb
|
erreboot/vent-ncapture', tag='master')
if __name__ == '__main__': # pragma: no cover
pull_ncapture()
|
chrxr/wagtail
|
wagtail/contrib/wagtailapi/utils.py
|
Python
|
bsd-3-clause
| 807
| 0.001239
|
from __future__ import absolute_import, unicode_literals
from django.conf import settings
from django.utils.six.moves.urllib.parse import urlparse
from wagtail.wagtailcore.models import Page
class BadRequestError(Exception):
pass
def get_base_url(request=None):
base_url = getattr(setting
|
s, 'WAGTAILAPI_BASE_URL', request.site.root_url if request else None)
if base_url:
# We only want the scheme and netloc
base_url_parsed = urlparse(base_url)
return base_url_parsed.scheme + '://' + base_url_parsed.netloc
def get_full_url(request, path):
base_url = get_base_url(request) or ''
return base_url + path
def pages_for_site(site):
pages = Page.objects.public().live()
pages = pages.descendant_of(site
|
.root_page, inclusive=True)
return pages
|
mortbauer/openfoam-extend-Breeder-other-scripting-PyFoam
|
examples/compactOutput.py
|
Python
|
gpl-2.0
| 1,329
| 0.017306
|
#! /usr/bin/python
""" Runs an OpenFOAM solver and captures the output. Extracts information
about the linear solvers (initial residual) and outputs it in a "Fluentish"
way (one line per timestep).Called:
compactOutput.py interFoam . damBreak
"""
import re,sys
from PyFoam.LogAnalysis.LogLineAnalyzer import LogLineAnalyzer
from PyFoam.LogAnalysis.BoundingLogAnalyzer import BoundingLogAnalyzer
from PyFoam.Execution.AnalyzedRunner import AnalyzedRunner
class CompactLineAnalyzer(LogLineAnalyzer):
def __init__(self):
LogLineAnalyzer.__init__(self)
self.told=""
self.exp=re.compile("^(.+): Solving for (.+), Initial residual = (.+), Final residual = (.+),
|
No Iterations (.+)$")
def doAnalysis(self,line):
m=self.exp.match(line)
if m!=None:
name=m.groups()[1]
resid=m.groups()[2]
time=self.getTime()
if time!=self.told:
self.told=time
print "\n t = %6g : " % ( float(time) ),
print " %5s: %6e " % (name,float(resid)),
sys.stdout.flush()
class CompactAnalyzer(BoundingLogAnalyzer):
def __init__(self):
B
|
oundingLogAnalyzer.__init__(self)
self.addAnalyzer("Compact",CompactLineAnalyzer())
run=AnalyzedRunner(CompactAnalyzer(),silent=True)
run.start()
|
aldian/tensorflow
|
tensorflow/python/keras/engine/training_v1.py
|
Python
|
apache-2.0
| 138,190
| 0.005065
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""V1 Training-related part of the Keras engine."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
from tensorflow.python import tf2
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.distribute import parameter_server_strategy
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import composite_tensor_utils
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import type_spec
from tensorflow.python.keras import backend as K
from tensorflow.python.keras import losses
from tensorflow.python.keras import metrics as metrics_module
from tensorflow.python.keras import optimizers
from tensorflow.python.keras.distribute import distributed_training_utils
from tensorflow.python.keras.engine import base_layer
from tensorflow.python.keras.engine import training as training_lib
from tensorflow.python.keras.engine import training_arrays
from tensorflow.python.keras.engine import training_distributed
from tensorflow.python.keras.engine import training_eager
from tensorflow.python.keras.engine import training_generator
from tensorflow.python.keras.engine import training_utils
from tensorflow.python.keras.mixed_precision.experimental import loss_scale_optimizer
from tensorflow.python.keras.optimizer_v2 import optimizer_v2
from tensorflow.python.keras.saving.saved_model import model_serialization
from tensorflow.python.keras.utils import data_utils
from tensorflow.python.keras.utils import losses_utils
from tensorflow.python.keras.utils.mode_keys import ModeKeys
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training.tracking import base as trackable
from tensorflow.python.training.tracking import layer_utils as trackable_layer_utils
from tensor
|
flow.python.types import core
from tensorflow.python.util import deprecation
from tensorflow.python.util import nest
from tensorflow.python.util import tf_inspect
from tensorflow.
|
python.util.compat import collections_abc
try:
from scipy.sparse import issparse # pylint: disable=g-import-not-at-top
except ImportError:
issparse = None
class Model(training_lib.Model):
"""`Model` groups layers into an object with training and inference features.
There are two ways to instantiate a `Model`:
1 - With the "functional API", where you start from `Input`,
you chain layer calls to specify the model's forward pass,
and finally you create your model from inputs and outputs:
```python
import tensorflow as tf
inputs = tf.keras.Input(shape=(3,))
x = tf.keras.layers.Dense(4, activation=tf.nn.relu)(inputs)
outputs = tf.keras.layers.Dense(5, activation=tf.nn.softmax)(x)
model = tf.keras.Model(inputs=inputs, outputs=outputs)
```
2 - By subclassing the `Model` class: in that case, you should define your
layers in `__init__` and you should implement the model's forward pass
in `call`.
```python
import tensorflow as tf
class MyModel(tf.keras.Model):
def __init__(self):
super(MyModel, self).__init__()
self.dense1 = tf.keras.layers.Dense(4, activation=tf.nn.relu)
self.dense2 = tf.keras.layers.Dense(5, activation=tf.nn.softmax)
def call(self, inputs):
x = self.dense1(inputs)
return self.dense2(x)
model = MyModel()
```
If you subclass `Model`, you can optionally have
a `training` argument (boolean) in `call`, which you can use to specify
a different behavior in training and inference:
```python
import tensorflow as tf
class MyModel(tf.keras.Model):
def __init__(self):
super(MyModel, self).__init__()
self.dense1 = tf.keras.layers.Dense(4, activation=tf.nn.relu)
self.dense2 = tf.keras.layers.Dense(5, activation=tf.nn.softmax)
self.dropout = tf.keras.layers.Dropout(0.5)
def call(self, inputs, training=False):
x = self.dense1(inputs)
if training:
x = self.dropout(x, training=training)
return self.dense2(x)
model = MyModel()
```
"""
def __init__(self, *args, **kwargs):
super(Model, self).__init__(*args, **kwargs)
# initializing _distribution_strategy here since it is possible to call
# predict on a model without compiling it.
self._distribution_strategy = None
self._compile_time_distribution_strategy = None
if (ops.executing_eagerly_outside_functions() and
distribution_strategy_context.has_strategy()):
self._set_strategy(
distribution_strategy_context.get_strategy())
# This flag is used to track if the user is using the deprecated path of
# passing distribution strategy to compile rather than creating the model
# under distribution strategy scope.
self._compile_distribution = False
self._run_eagerly = None
self._experimental_run_tf_function = (
ops.executing_eagerly_outside_functions())
self._v1_compile_was_called = False
def _init_batch_counters(self):
pass # Batch counters should not be created in legacy graph mode.
@trackable.no_automatic_dependency_tracking
def _set_strategy(self, strategy):
self._compile_time_distribution_strategy = strategy
def get_weights(self):
"""Retrieves the weights of the model.
Returns:
A flat list of Numpy arrays.
"""
strategy = (self._distribution_strategy or
self._compile_time_distribution_strategy)
if strategy:
with strategy.scope():
return base_layer.Layer.get_weights(self)
return base_layer.Layer.get_weights(self)
def load_weights(self, filepath, by_name=False, skip_mismatch=False):
"""Loads all layer weights, either from a TensorFlow or an HDF5 weight file.
If `by_name` is False weights are loaded based on the network's
topology. This means the architecture should be the same as when the weights
were saved. Note that layers that don't have weights are not taken into
account in the topological ordering, so adding or removing layers is fine as
long as they don't have weights.
If `by_name` is True, weights are loaded into layers only if they share the
same name. This is useful for fine-tuning or transfer-learning models where
some of the layers have changed.
Only topological loading (`by_name=False`) is supported when loading weights
from the TensorFlow format. Note that topological loading differs slightly
between TensorFlow and HDF5 formats for user-defined classes inheriting from
`tf.keras.Model`: HDF5 loads based on a flattened list of weights, while the
TensorFlow format loads based on the object-local names of attributes to
which layers are assigned in the `Model`'s constructor.
Arguments:
filepath: String, path to the weights file to load. For weight files in
TensorFlow format, this is the file prefix (the s
|
taedori81/shoop
|
shoop/admin/modules/contacts/views/list.py
|
Python
|
agpl-3.0
| 2,134
| 0.001406
|
# -*- coding: utf-8 -*-
# This file is part of Shoop.
#
# Copyright (c) 2012-2015, Shoop Ltd. All rights reserved.
#
# This source code is li
|
censed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import unicode_literals
from django.db.models import Count
from django.utils.translation import ugettext as _
from shoop.admin.utils.picotable import (
Column, RangeFilter, TextFilter, true_or_false_filter
)
from shoop.admin.util
|
s.views import PicotableListView
from shoop.core.models import CompanyContact, Contact, PersonContact
class ContactListView(PicotableListView):
model = Contact
columns = [
Column("name", _(u"Name"), linked=True, filter_config=TextFilter()),
Column("type", _(u"Type"), display="get_type_display", sortable=False), # TODO: Add a filter
Column("email", _(u"Email"), filter_config=TextFilter()),
Column("phone", _(u"Phone"), filter_config=TextFilter()),
Column("is_active", _(u"Active"), filter_config=true_or_false_filter),
Column("n_orders", _(u"# Orders"), class_name="text-right", filter_config=RangeFilter(step=1)),
]
def get_queryset(self):
return super(ContactListView, self).get_queryset().annotate(n_orders=Count("customer_orders"))
def get_type_display(self, instance):
if isinstance(instance, PersonContact):
return _(u"Person")
elif isinstance(instance, CompanyContact):
return _(u"Company")
else:
return _(u"Contact")
def get_object_abstract(self, instance, item):
"""
:type instance: shoop.core.models.contacts.Contact
"""
bits = filter(None, [
item["type"],
_("Active") if instance.is_active else _("Inactive"),
_("Email: %s") % (instance.email or "\u2014"),
_("Phone: %s") % (instance.phone or "\u2014"),
_("%d orders") % instance.n_orders,
])
return [
{"text": instance.name or _("Contact"), "class": "header"},
{"text": ", ".join(bits)}
]
|
NeuroanatomyAndConnectivity/pipelines
|
src/clustering/clustering/create_input_surface.py
|
Python
|
mit
| 833
| 0.010804
|
import nibabel as nb
from nipype.utils.filemanip import split_filename
sxfmout = nb.load('/scr/schweiz1/Data/results/sxfmout/_session_session1/_subject_id_9630905/_fwhm_0/_hemi_lh/lh.afni_corr_rest_roi_dtype_t
|
shift_detrended_regfilt_gms_filt.fsaverage4.nii').get_data()
##from mask_surface import MaskSurface
data = nb.load(sxfmout).get_data()
origdata = data.shape
affine = nb.spatialimages.SpatialImage.get_affine(nb.load(sxfmout))
data.resize(data.shape[0]*data.shape[2],1,1,data.shape[3])
mask = np.zeros_like(data)
if hemi == 'lh': chosenvertices = lhvertices
if hemi == 'rh': chosenvertices = rhvertices
for i,vertex in enumerate(chosenvertices):
mask[vertex][:] = 1
mask.
|
resize(origdata)
maskImg = nb.Nifti1Image(mask, affine)
_, base, _ = split_filename(sxfmout)
nb.save(maskImg, os.path.abspath(base + '_mask.nii'))
|
p12tic/awn-extras
|
applets/maintained/calendar/calendarprefs.py
|
Python
|
gpl-2.0
| 9,376
| 0.00288
|
#!/usr/bin/python
# -*- coding: iso-8859-15 -*-
#
# Copyright (c) 2007 Mike (mosburger) Desjardins <desjardinsmike@gmail.com>
# Please do not email the above person for support. The
# email address is only there for license/copyright purposes.
#
# This is the preferences dialog for a calendar applet for Avant Window
# Navigator.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
#
import gtk
import random
from awn.extras import _
class CalendarPrefs(gtk.Window):
# There *must* be a more "Pythonic" way to do this:
int_opt = {
"None": 0,
"Evolution": 1,
"Google Calendar": 2,
"Outlook Web Access": 3}
int_opt_inv = {
0: "None",
1: "Evolution",
2: "Google Calendar",
3: "Outlook Web Access"}
clock_appearance = {
_("Classic LCD"): ("667F66FF", "000000FF", "000000FF", False),
_("Indy Glow LCD"): ("22B7B7FF", "000000FF", "000000FF", False),
_("Backlit Amber LCD"): ("AA4A3AFF", "000000FF", "000000FF", False),
_("Backlit Green LCD"): ("337A33FF", "000000FF", "000000FF", False),
_("Green LED"): ("000000FF", "00FF66FF", "000000FF", False),
_("Red LED"): ("000000FF", "FF2211FF", "000000FF", False),
_("Blue LED"): ("000000FF", "00AAFFFF", "000000FF", False),
_("Plain White"): ("0000008F", "FFFFFFFF", "000000FF", True),
_("Plain Black"): ("0000003F", "000000FF", "000000FF", True)}
calendar_appearance = {
_("Red"): "calendar-red.png",
_("Green"): "calendar-green.png",
_("Blue"): "calendar-blue.png",
_("Gray"): "calendar-gray.png",
_("Black"): "calendar-black.png"}
def crypt(self, sequence, key):
sign = (key > 0) * 2 - 1
random.seed(abs(key * sign))
s = ''
for i in xrange(len(sequence)):
r = random.randint(0, 255)
s += chr((ord(sequence[i]) + r * sign) % 128)
return s
def __init__(self, applet):
super(CalendarPrefs, self).__init__()
self.applet = applet
self.set_title(_("Preferences"))
vbox = gtk.VBox(True, 0)
self.add(vbox)
cal_appear
|
ance_index = \
self.applet.get_int_config('cal_appearance_index')
clock_appearance_index = \
self.applet.get_int_config('clock_appearance_index')
self.twelve_hour_checkbox = gtk.CheckButton(_("Twelve Hour Clock"))
self.twelve_hour_checkbox.set_active(applet.twelve_hour_clock)
hbox0 = gtk.HBox(False, 0)
hbox0
|
.pack_start(self.twelve_hour_checkbox, True, False, 0)
vbox.pack_start(hbox0, False, False, 0)
# self.blink_checkbox = gtk.CheckButton(_("Blinking Colon"))
# if applet.blinky_colon == True:
# self.blink_checkbox.set_active(True)
# else:
# self.blink_checkbox.set_active(False)
# hbox1 = gtk.HBox(False,0)
# hbox1.pack_start(self.blink_checkbox,True,False,0)
# vbox.pack_start(hbox1,False,False,0)
hbox1a = gtk.HBox(True, 0)
self.clock_appear_combo = gtk.combo_box_new_text()
for item in self.clock_appearance.keys():
self.clock_appear_combo.append_text(item)
self.clock_appear_combo.set_active(clock_appearance_index)
clock_appear_label = gtk.Label(_("Clock Appearance"))
hbox1a.pack_start(clock_appear_label, True, False, 0)
hbox1a.pack_start(self.clock_appear_combo, True, True, 0)
vbox.pack_start(hbox1a, True, False, 0)
hbox1b = gtk.HBox(True, 0)
self.cal_appear_combo = gtk.combo_box_new_text()
for item in self.calendar_appearance.keys():
self.cal_appear_combo.append_text(item)
self.cal_appear_combo.set_active(cal_appearance_index)
cal_appear_label = gtk.Label(_("Calendar Appearance"))
hbox1b.pack_start(cal_appear_label, True, False, 0)
hbox1b.pack_start(self.cal_appear_combo, True, True, 0)
vbox.pack_start(hbox1b, True, False, 0)
hbox2 = gtk.HBox(True, 0)
self.integ_combo = gtk.combo_box_new_text()
self.integ_combo.append_text(_("None"))
self.integ_combo.append_text(_("Evolution"))
self.integ_combo.append_text(_("Google Calendar"))
self.integ_combo.append_text(_("Outlook Web Access"))
self.integ_combo.set_active(self.int_opt[applet.integ_text])
self.integ_combo.connect("changed", self.combo_changed, "bla")
int_label = gtk.Label(_("Calendar Integration"))
hbox2.pack_start(int_label, True, False, 0)
hbox2.pack_start(self.integ_combo, True, True, 0)
vbox.pack_start(hbox2, True, False, 0)
#hbox3 = gtk.HBox(True, 0)
#self.user_label = gtk.Label(_("Calendar Username"))
#hbox3.pack_start(self.user_label)
#self.user = gtk.Entry(40)
#self.user.set_text(self.applet.username)
#hbox3.pack_start(self.user)
#vbox.pack_start(hbox3,False,False,2)
#hbox4 = gtk.HBox(True, 0)
#self.password_label = gtk.Label(_("Calendar Password"))
#hbox4.pack_start(self.password_label)
#self.password = gtk.Entry(20)
#self.password.set_visibility(False)
#self.password.set_text(self.applet.password)
#hbox4.pack_start(self.password)
#vbox.pack_start(hbox4,False,False,2)
hbox5 = gtk.HBox(True, 0)
self.url_label = gtk.Label(_("Calendar URL"))
hbox5.pack_start(self.url_label)
self.url = gtk.Entry(50)
self.url.set_text(self.applet.url)
hbox5.pack_start(self.url)
vbox.pack_start(hbox5, False, False, 2)
hbox6 = gtk.HBox(True, 0)
ok = gtk.Button(stock=gtk.STOCK_OK)
ok.connect("clicked", self.ok_button, "ok")
hbox6.add(ok)
cancel = gtk.Button(stock=gtk.STOCK_CANCEL)
cancel.connect("clicked", self.cancel_button, "cancel")
hbox6.add(cancel)
vbox.pack_end(hbox6, True, False, 2)
self.set_credential_sensitivity()
def set_credential_sensitivity(self):
option = self.int_opt_inv[self.integ_combo.get_active()]
if option == "Google Calendar" or option == "Outlook Web Access":
#self.user_label.set_sensitive(True)
#self.user.set_sensitive(True)
#self.password_label.set_sensitive(True)
#self.password.set_sensitive(True)
if option == "Google Calendar":
self.url_label.set_sensitive(False)
self.url.set_sensitive(False)
else:
self.url_label.set_sensitive(True)
self.url.set_sensitive(True)
else:
#self.user_label.set_sensitive(False)
#self.user.set_sensitive(False)
#self.password_label.set_sensitive(False)
#self.password.set_sensitive(False)
self.url_label.set_sensitive(False)
self.url.set_sensitive(False)
def combo_changed(self, widget, bla):
self.set_credential_sensitivity()
def ok_button(self, widget, event):
integration = self.int_opt_inv[self.integ_combo.get_active()]
self.applet.set_string_config('integration', integration)
#self.applet.set_string_config('username', self.user.get_text())
#self.applet.set_string_config('password',
# self.crypt(self.password.get_text(),
# 17760704))
sel
|
Endika/website-addons
|
website_sale_stock_status/__openerp__.py
|
Python
|
lgpl-3.0
| 408
| 0.017157
|
{
'name' : 'Product status at website shop',
'version' : '1.0.1',
'author' : 'IT-Projects LLC, Ivan Yelizariev',
'license': 'GPL-3',
'category' : 'Sale',
'website
|
' : 'https://yelizariev.github.io',
'depends' : ['website_sale
|
', 'stock'],
'data':[
'website_sale_stock_status_views.xml',
'website_sale_stock_status_data.xml',
],
'installable': True
}
|
igor-rangel7l/igorrangel.repository
|
plugin.video.SportsDevil/service/oscrypto/_win/_kernel32_cffi.py
|
Python
|
gpl-2.0
| 1,029
| 0.000972
|
# coding: utf-8
f
|
rom __future__ import unicode_literals, division, absolute_import, print_function
from .._ffi import FFIEngineError, register_ffi
from .._types import str_cls
from ..errors import LibraryNotFoundError
try:
import cffi
except (ImportError):
raise FFIEngineError('Error importing cffi')
__all__ = [
'get_error',
'kernel32',
]
ffi = cffi.FFI()
if cffi.__version_info__ >
|
= (0, 9):
ffi.set_unicode(True)
ffi.cdef("""
typedef long long LARGE_INTEGER;
BOOL QueryPerformanceCounter(LARGE_INTEGER *lpPerformanceCount);
typedef struct _FILETIME {
DWORD dwLowDateTime;
DWORD dwHighDateTime;
} FILETIME;
void GetSystemTimeAsFileTime(FILETIME *lpSystemTimeAsFileTime);
""")
try:
kernel32 = ffi.dlopen('kernel32.dll')
register_ffi(kernel32, ffi)
except (OSError) as e:
if str_cls(e).find('cannot load library') != -1:
raise LibraryNotFoundError('kernel32.dll could not be found')
raise
def get_error():
return ffi.getwinerror()
|
rienafairefr/pynYNAB
|
docs/conf.py
|
Python
|
mit
| 4,876
| 0.000205
|
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/stable/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.dirname(os.path.dirname(__file__))))
# -- Project information -----------------------------------------------------
project = 'pynYNAB'
copyright = '2018, rienafairefr'
author = 'rienafairefr'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = ''
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc', 'sphinxarg.ext'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (
|
for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_s
|
idebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'pynYNABdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'pynYNAB.tex', 'pynYNAB Documentation',
'rienafairefr', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pynynab', 'pynYNAB Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'pynYNAB', 'pynYNAB Documentation',
author, 'pynYNAB', 'One line description of project.',
'Miscellaneous'),
]
# -- Extension configuration -------------------------------------------------
|
rogerthat-platform/rogerthat-backend
|
src/facebook/version.py
|
Python
|
apache-2.0
| 625
| 0
|
#!/usr/bin/env python
#
# Copyright 2015 Mobolic
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
#
| ERROR: type should be string, got " https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See "
|
the
# License for the specific language governing permissions and limitations
# under the License.
__version__ = "3.0.0-alpha"
|
rad08d/rssreader_flask
|
flask_rss/rssapp/rss.py
|
Python
|
apache-2.0
| 4,065
| 0.005412
|
import sys
import urllib2
import HTMLParser
import xml.etree.ElementTree as ET
from logging import getLogger
class Rss(object):
"""A class for handling RSS feeds"""
def __init__(self,url=None):
if not url:
self.url = ''
self.articles = ''
else:
self.url = url
self.articles = []
self.logger = getLogger(__name__)
def get_rss_into_articles(self):
self.xml = urllib2.urlopen(self.url.encode('utf-8')).read()
root = ET.fromstring(self.xml)
for item in root.findall(".//item"):
try:
title = item.find("title")
link = item.find("link")
descr = item.find("description")
pubDate = item.find("pubDate")
strgDate = str(pubDate.text)
article = Article(title.text,link.text,descr.text, strgDate)
self.articles.append(article)
except Exception as e:
self.logger.error("Error in get_rss routine! Error report: " + e)
return self.articles
class Article(object):
"""A class for handling the details of an article"""
def __init__(self):
self.title = ''
self.link = ''
self.descr = ''
self.pubDate = ''
self.pic_links = []
self.logger = getLogger(__name__)
def __init__(self, title,link,descr,pubDate):
self.title = title
self.link = link
self.descr = descr
self.pubDate = pubDate
self.full_txt = ""
self.pic_links = []
self.pics = []
def get_full_txt(self):
try:
response = urllib2.urlopen(self.link).read().decode('utf-8', 'ignore')
parser = RssHTMLParser()
parser.feed(response)
self.pic_links = parser.img_links
self.full_txt = parser.data
except Exception as e:
self.logger.error("Error in get_full_txt() of RssClass.Article Error: " + e)
def get_photos(self, pic_links=None):
pics = []
if pic_links == None:
try:
for link in self.pic_links:
img = urllib2.urlopen(link).read()
#f = open('/home/parallels/Desktop/pic.jpg', 'wb')
#f.write(img)
#f.close()
self.pics.append(img)
except Exception as e:
self.logger.error("Error in RssClass.get_photos() using self.pic_links. Error: " + e)
else:
try:
for link in pic_links:
image = urllib2.urlopen(self.link).read()
pics.append(image)
except Exception as e:
self.logger.error("Error in RssClass.get_photos() using pic_links. Error: " + e)
return pics
class RssHTMLParser(HTMLParser.HTMLParser):
def __init__(self):
HTMLParser.HTMLParser.__init__(self)
self.is_start_p = False
self.is_end_p = False
self.is_start_sp = False
self.data = ""
self.img_links = []
def handle_starttag(self, tag, attrs):
if tag == 'p':
self.is_start_p = True
elif tag == 'span':
|
self.is_start_sp = True
elif self.is_start_p and tag == 'a':
|
self.is_start_p = True
elif self.is_start_p and tag == 'img' or self.is_start_sp and tag == 'img':
for attr in attrs:
if attr[0] == 'src':
self.img_links.append(attr[1])
else:
self.is_start_p = False
self.is_start_sp = False
def handle_endtag(self, tag):
if tag == 'p':
self.is_start_p = False
self.is_end_p = True
elif tag == 'a' and self.is_start_p:
self.is_start_p = True
else:
self.is_end_p = False
def handle_data(self, data):
if self.is_start_p:
self.data += data
elif self.is_end_p:
self.data += ' '
|
gangadhar-kadam/sapphire_app
|
patches/january_2013/update_closed_on.py
|
Python
|
agpl-3.0
| 1,386
| 0.034632
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd.
# License: GNU General Public License v3. See license.txt
import webnotes
def execute():
webnotes.reload_doc("core", "doctype", "docfield")
webnotes.reload_doc("support", "doctype", "support_ticket")
# customer issue resolved_by should be Profile
if webnotes.conn.sql("""select count(*) from `tabCustomer Issue`
where ifnull(resolved_by,"")!="" """)[0][0]:
webnotes.make_property_setter({
"doctype":"Customer Issue",
"fieldname": "resolved_by",
"property": "options",
"value": "Sales Person"
})
def get_communication_time(support_ticket, sort_order = 'asc'):
tmp = webnotes.conn.sql("""select creation from tabCommunication where
support_ticket=%s order by cr
|
eation %s limit 1""" % ("%s", sort_order),
support_ticket)
return tmp and tmp[0][0] or None
# update in support ticket
webnotes.conn.auto_commit_on_many_writes = True
for st in webnotes.conn.sql("""select name, modified, status fro
|
m
`tabSupport Ticket`""", as_dict=1):
webnotes.conn.sql("""update `tabSupport Ticket` set first_responded_on=%s where
name=%s""", (get_communication_time(st.name) or st.modified, st.name))
if st.status=="Closed":
webnotes.conn.sql("""update `tabSupport Ticket` set resolution_date=%s where
name=%s""", (get_communication_time(st.name, 'desc') or st.modified, st.name))
|
stoman/CompetitiveProgramming
|
problems/pythonsetdifference/submissions/accepted/stefan.py
|
Python
|
mit
| 214
| 0.004673
|
#!/usr/bin/e
|
nv python3
#Author: Stefan Toman
if __name__ == '__main__':
n = int(input())
a = set(map(int, input().split()))
m = int(input())
|
b = set(map(int, input().split()))
print(len(a-b))
|
Astrophilic/Algorithms_Example
|
AStarSearch/python/astar.py
|
Python
|
apache-2.0
| 5,569
| 0
|
# Copyright (c) 2008 Mikael Lind
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from heapq import heappush, heappop
from sys import maxint
# Represent each node as a list, ordering the elements so that a heap of nodes
# is ordered by f = g + h, with h as a first, greedy tie-breaker and num as a
# second, definite tie-breaker. Store the redundant g for fast and accurate
# calculations.
F, H, NUM, G, POS, OPEN, VALID, PARENT = xrange(8)
def astar(start_pos, neighbors, goal, start_g, cost, heuristic, limit=maxint,
debug=None):
"""Find the shortest path from start to goal.
Arguments:
start_pos - The starting position.
neighbors(pos) - A function returning all neighbor positions of the given
position.
goal(pos) - A function returning true given a goal position, false
otherwise.
start_g - The starting cost.
cost(a, b) - A function returning the cost for moving from one
position to another.
heuristic(pos) - A function returning an estimate of the total cost
remaining for reaching goal from the given position.
Overestimates can yield suboptimal paths.
limit - The maximum number of positions to search.
debug(nodes) - This function will be called with a dictionary of all
nodes.
The function returns the best path found. The returned path excludes the
starting position.
"""
# Create the start node.
nums = iter(xrange(maxint))
start_h = heuristic(start_pos)
start = [start_g + start_h, start_h, nums.next(), start_g, start_pos, True,
True, None]
# Track all nodes seen so far.
nodes = {start_pos: start}
# Maintain a heap of nodes.
heap = [start]
# Track the best path found so far.
best = start
while heap:
# Pop the next node from the heap.
current = heappop(heap)
current[OPEN] = False
# Have we reached the goal?
if goal(current[POS]):
best = current
break
# Visit the neighbors of the current node.
for neighbor_pos in neighbors(current[POS]):
neighbor_g = current[G] + cost(current[POS], neighbor_pos)
neighbor = nodes.get(neighbor_pos)
if neighbor is None:
# Limit the search.
if len(nodes) >= limit:
continue
# We have found a new node.
neighbor_h = heuristic(neighbor_pos)
neighbor = [neighbor_g + neighbor_h, neighbor_h, nums.next(),
neighbor_g, neighbor_pos, True, True, current[POS]]
nodes[neighbor_pos] = neighbor
heappush(heap, neighbor)
if neighbor_h < best[H]:
# We are approaching the goal.
best = neighbor
elif neighbor_g < neighbor[G]:
# We have found a better path to the neighbor.
if neighbor[OPEN]:
# The neighbor is already open. Finding and updating it
# in the heap would be a linear complexity operat
|
ion.
# Instead we mark the neighbor as invalid and
|
make an
# updated copy of it.
neighbor[VALID] = False
nodes[neighbor_pos] = neighbor = neighbor[:]
neighbor[F] = neighbor_g + neighbor[H]
neighbor[NUM] = nums.next()
neighbor[G] = neighbor_g
neighbor[VALID] = True
neighbor[PARENT] = current[POS]
heappush(heap, neighbor)
else:
# Reopen the neighbor.
neighbor[F] = neighbor_g + neighbor[H]
neighbor[G] = neighbor_g
neighbor[PARENT] = current[POS]
neighbor[OPEN] = True
heappush(heap, neighbor)
# Discard leading invalid nodes from the heap.
while heap and not heap[0][VALID]:
heappop(heap)
if debug is not None:
# Pass the dictionary of nodes to the caller.
debug(nodes)
# Return the best path as a list.
path = []
current = best
while current[PARENT] is not None:
path.append(current[POS])
current = nodes[current[PARENT]]
path.reverse()
return path
|
FrodeSolheim/fs-uae-launcher
|
fsgamesys/platforms/zxspectrum/zxspectrummamedriver.py
|
Python
|
gpl-2.0
| 37
| 0
|
clas
|
s ZXSpectrumMameDriver:
|
pass
|
pytroll/satpy
|
satpy/tests/reader_tests/test_safe_sar_l2_ocn.py
|
Python
|
gpl-3.0
| 3,887
| 0.002058
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2019 Satpy developers
#
# This file is part of satpy.
#
# satpy is free software: you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# satpy is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# satpy. If not, see <http://www.gnu.org/licenses/>.
"""Module for testing the satpy.readers.safe_sar_l2_ocn module."""
import unittest
import unittest.mock as mock
import numpy as np
import xarray as xr
from satpy.tests.utils import make_dataid
class TestSAFENC(unittest.TestCase):
"""Test various SAFE SAR L2 OCN file handlers."""
@mock.patch('satpy.readers.safe_sar_l2_ocn.xr')
@mock.patch.multiple('satpy.readers.safe_sar_l2_ocn.SAFENC',
__abstractmethods__=set())
def setUp(self, xr_):
"""Set up the tests."""
from satpy.readers.safe_sar_l2_ocn import SAFENC
self.channels = ['owiWindSpeed', 'owiLon', 'owiLat', 'owiHs', 'owiNrcs', 'foo',
'owiPolarisationName', 'owiCalConstObsi']
# Mock file access to return a fake dataset.
self.dummy3d = np.zeros((2, 2, 1))
self.dummy2d = np.zeros((2, 2))
self.dummy1d = np.zeros((2))
self.band = 1
self.nc = xr.Dataset(
{'owiWindSpeed': xr.DataArray(self.dummy2d, dims=('owiAzSize', 'owiRaSize'), attrs={'_FillValue': np.nan}),
'owiLon': xr.DataArray(data=self.dummy2d, dims=('owiAzSize', 'owiRaSize')),
'owiLat': xr.DataArray(data=self.dummy2d, dims=('owiAzSize', 'owiRaSize')),
'owiHs': xr.DataArray(data=self.dummy3d, dims=('owiAzSize', 'owiRaSize', 'oswPartition')),
'owiNrcs': xr.DataArray(data=self.dummy3d, dims=('owiAzSize', 'owiRaSize', 'oswPolarization')),
'foo': xr.DataArray(self.dummy2d, dims=('owiAzSize', 'owiRaSize')),
'owiPolarisationName': xr.DataArray(self.dummy1d, dims=('owiPolarisation')),
'owiCalConstObsi': xr.DataArray(self.dummy1d, dims=('owiIncSize'))
},
attrs={'_FillValue': np.nan,
'missionName': 'S1A'})
xr_.open_dataset.return_value = self.nc
# Instantiate reader using the mocked open_dataset() method. Also, make
# th
|
e reader believe all abstract methods have been implemented.
self.reader = SAFENC(filename='dummy',
filename_info={'start_time': 0,
'end_time': 0,
'fstart_time': 0,
'fend_time': 0,
|
'polarization': 'vv'},
filetype_info={})
def test_init(self):
"""Test reader initialization."""
self.assertEqual(self.reader.start_time, 0)
self.assertEqual(self.reader.end_time, 0)
self.assertEqual(self.reader.fstart_time, 0)
self.assertEqual(self.reader.fend_time, 0)
def test_get_dataset(self):
"""Test getting a dataset."""
for ch in self.channels:
dt = self.reader.get_dataset(
key=make_dataid(name=ch), info={})
# ... this only compares the valid (unmasked) elements
self.assertTrue(np.all(self.nc[ch] == dt.to_masked_array()),
msg='get_dataset() returns invalid data for '
'dataset {}'.format(ch))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.