repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
beardypig/streamlink | tests/test_streamlink_api.py | Python | bsd-2-clause | 1,749 | 0 | import os.path
import unittest
from unittest.mock import patch
from streamlink import Streamlink
from streaml | ink.api import streams
PluginPath = os.path.join(os.path.dirname(__file__), "plugins")
def get_session():
s = Streamlink()
s.l | oad_plugins(PluginPath)
return s
class TestStreamlinkAPI(unittest.TestCase):
@patch('streamlink.api.Streamlink', side_effect=get_session)
def test_find_test_plugin(self, session):
self.assertTrue(
"rtmp" in streams("test.se")
)
@patch('streamlink.api.Streamlink', side_effect=get_session)
def test_no_streams_exception(self, session):
self.assertEqual({}, streams("test.se/NoStreamsError"))
@patch('streamlink.api.Streamlink', side_effect=get_session)
def test_no_streams(self, session):
self.assertEqual({}, streams("test.se/empty"))
@patch('streamlink.api.Streamlink', side_effect=get_session)
def test_stream_type_filter(self, session):
stream_types = ["rtmp", "hls"]
available_streams = streams("test.se", stream_types=stream_types)
self.assertTrue("rtmp" in available_streams)
self.assertTrue("hls" in available_streams)
self.assertTrue("test" not in available_streams)
self.assertTrue("http" not in available_streams)
@patch('streamlink.api.Streamlink', side_effect=get_session)
def test_stream_type_wildcard(self, session):
stream_types = ["rtmp", "hls", "*"]
available_streams = streams("test.se", stream_types=stream_types)
self.assertTrue("rtmp" in available_streams)
self.assertTrue("hls" in available_streams)
self.assertTrue("test" in available_streams)
self.assertTrue("http" in available_streams)
|
rjpower/fastnet | fastnet/distributed/asgd.py | Python | gpl-3.0 | 4,309 | 0.01903 | #!/usr/bin/env python
'''A relatively simple distributed network implementation, using async SGD.'''
from fastnet import net, layer, data, parser, weights
from fastnet.util import | EZTimer
from mpi4py import MPI
import ctypes
import cudaconv2
import numpy as np
import os
WORLD = MPI.COMM_WORLD
cudaconv2.init(WORLD.Get_rank())
print 'CUDA', os.environ.get('MV2_USE_CUDA')
MASTER = 0
WORKERS = range(1, WORLD.Get_size())
batch_size = 128
data_dir = '/ssd/nn-data/im | agenet/'
data_provider = 'imagenet'
checkpoint_dir = './checkpoint'
param_file = 'config/imagenet.cfg'
train_range = range(101, 1301)
test_range = range(1, 101)
data_provider = 'imagenet'
#train_range = range(1, 41)
#test_range = range(41, 49)
train_dp = data.get_by_name(data_provider)(data_dir,train_range)
test_dp = data.get_by_name(data_provider)(data_dir, test_range)
model = parser.parse_config_file(param_file)
network = net.FastNet((3, 224, 224, 1))
network = parser.load_model(network, model)
class Tags(object):
GRAD_SEND = 100
WEIGHT_UPDATE = 200
def tobuffer(gpuarray):
#print 'BUFFER: 0x%x' % gpuarray.ptr
#print 'SIZE: %s, %s, %s' % (gpuarray.size, gpuarray.shape, gpuarray.dtype)
dtype = np.dtype(gpuarray.dtype)
buf = ctypes.pythonapi.PyBuffer_FromReadWriteMemory(ctypes.c_long(gpuarray.ptr),
gpuarray.size * dtype.itemsize)
return ctypes.cast(buf, ctypes.py_object).value
def wait_for_all(reqs):
for r in reqs: r.Wait()
class Worker(object):
def __init__(self):
self.id = WORLD.Get_rank()
def train(self):
batch = train_dp.get_next_batch(batch_size)
data, labels = network.prepare_for_train(batch.data, batch.labels)
prediction = network.fprop(data)
cost, correct = network.get_cost(labels, prediction)
network.bprop(labels)
self.send_grads()
self.recv_weights()
print cost, correct
def send_grads(self):
_ = EZTimer('send grads')
sends = []
for idx, w in enumerate(layer.WEIGHTS):
sends.append(WORLD.Isend(tobuffer(w.grad), dest=MASTER, tag=Tags.GRAD_SEND + idx))
wait_for_all(sends)
def recv_weights(self):
_ = EZTimer('recv weights')
for idx, w in enumerate(layer.WEIGHTS):
WORLD.Recv(tobuffer(w.wt), source=MASTER, tag=Tags.WEIGHT_UPDATE + idx)
def run(self):
while 1:
self.train()
self.send_grads()
self.recv_weights()
class WorkerProxy(object):
def __init__(self, idx, wts):
self.idx = idx
self.wts = wts
self.recvs = []
def start_read(self):
assert len(self.recvs) == 0
for idx, w in enumerate(self.wts):
self.recvs.append(WORLD.Irecv(tobuffer(w.grad), source=self.idx, tag=Tags.GRAD_SEND + idx))
def send_weights(self, wts):
_ = EZTimer('send weights')
for idx, w in enumerate(wts):
WORLD.Send(tobuffer(w.wt), dest=self.idx, tag=Tags.WEIGHT_UPDATE + idx)
def test(self):
return np.all([r.Test() for r in self.recvs])
def wait(self):
[r.Wait() for r in self.recvs]
self.recvs = []
def try_fetch(self):
if len(self.recvs) == 0:
self.start_read()
if not self.test():
return False
self.wait()
self.start_read()
return True
class Master(object):
def __init__(self):
self._workers = {}
self._master_wts = layer.WEIGHTS
self._requests = []
for w in WORKERS:
self._workers[w] = WorkerProxy(w, layer.WEIGHTS.clone())
def update(self, worker_wts):
_ = EZTimer('update')
for idx, worker_wt in enumerate(worker_wts):
master_wt = self._master_wts[idx]
weights.update(master_wt.wt,
worker_wt.grad,
master_wt.incr,
master_wt.epsilon,
master_wt.momentum,
master_wt.decay,
128)
def run(self):
while 1:
#print 'Fetching gradients...'
for w in self._workers.values():
if w.try_fetch():
self.update(w.wts)
w.send_weights(self._master_wts)
#print 'Sending weight updates...'
if __name__ == '__main__':
if WORLD.Get_rank() == 0:
master = Master()
master.run()
else:
worker = Worker()
worker.run()
|
fardog/river | working/ntp-sync.py | Python | gpl-2.0 | 877 | 0 | import math
import numpy
import pyaudio
import time
import ntplib
def sine(frequency, length, rate):
length = int(length * rate)
factor = float(frequency) * (math.pi * 2) / rate
return numpy.sin(numpy.arange(length) * factor)
chunks = []
chunks.append(sine(440, 1, 44100))
chunk = numpy.concatena | te(chunks) * 0.25
p = pyaudio.PyAudio()
stream = p.open(format=pyaudio.paFloat32, channels=1, rate=44100, output=1)
last = 0
print("[ntp-sync] getting clock")
c = ntplib.NTPClient()
response = c.request('pool.ntp.org', version=3)
print("[ntp-sync] clock offset %s" % response.offset)
while True:
curtime = int(math.floor(time.time() + response.offset))
if (curtime % 5) == 0 and curtime > last:
print curtime
print("beep")
last = curtime
| stream.write(chunk.astype(numpy.float32).tostring())
stream.close()
p.terminate()
|
mrphlip/lrrbot | alembic/versions/e966a3afd100_separate_patreon_user_table.py | Python | apache-2.0 | 4,230 | 0.027187 | revision = 'e966a3afd100'
down_revision = '954c3c4caf32'
branch_labels = None
depends_on = None
import alembic
import sqlalchemy
import requests
import pytz
import dateutil.parser
import datetime
def upgrade():
patreon_users = alembic.op.create_table("patreon_users",
sqlalchemy.Column("id", sqlalchemy.Integer, primary_key=True),
sqlalchemy.Column("patreon_id", sqlalchemy.Text, unique=True),
sqlalchemy.Column("full_name", sqlalchemy.Text, nullable=False),
sqlalchemy.Column("access_token", sqlalchemy.Text),
sqlalchemy.Column("refresh_token", sqlalchemy.Text),
sqlalchemy.Column("token_expires", sqlalchemy.DateTime(timezone=True)),
sqlalchemy.Column("pledge_start", sqlalchemy.DateTime(timezone=True)),
sqlalchemy.Column("last_announce_month", sqlalchemy.Integer),
)
alembic.op.add_column("users",
sqlalchemy.Column("patreon_user",
sqlalchemy.Integer, sqlalchemy.ForeignKey("patreon_users.id", onupdate="CASCADE", ondelete="SET NULL"),
unique=True,
)
)
# TODO: migrate
conn = alembic.op.get_bind()
meta = sqlalchemy.MetaData(bind=conn)
meta.reflect()
users = meta.tables["users"]
existing_accounts = conn.execute(sqlalchemy.select([users.c.id, users.c.patreon_access_token, users.c.patreon_refresh_token, users.c.patreon_token_expires])
.where(users.c.patreon_access_token.isnot(None)))
all_patreon_users = []
all_users = []
clientid = alembic.context.config.get_section_option('lrrbot', 'patreon_clientid')
clientsecret = alembic.context.config.get_section_option('lrrbot', 'patreon_clientsecret')
with requests.Session() as session:
for user_id, access_token, refresh_token, expires in existing_accounts:
now = datetime.datetime.now(tz=pytz.utc)
if expires < now:
req = session.post("https://api.patreon.com/oauth2/token", data={
'grant_type': 'refresh_token',
'client_id': clientid,
'client_secret': clientsecret,
'refresh_token': refresh_token
})
req.raise_for_status()
data = req.json()
access_token = data["access_token"]
refresh_token = data["refresh_token"]
expires = datetime.datetime.now(pytz.utc) + datetime.timedelta(seconds=data["expires_in"])
req = session.get("https://api.patreon.com/oauth2/api/current_user", headers={"Authorization": "Bearer %s" % access_token})
req.raise_for_status()
data = req.json()
user = {
"patreon_id": data["data"]["id"],
"full_name": data["data"]["attributes"]["full_name"],
"access_token": access_token,
"refresh_token": refresh_token,
"token_expires": expires,
}
if 'pledges' in data["data"].get("relationships", {}):
for pledge in data["data"]["relationships"]["pledges"]["da | ta"]:
for ob | j in data["included"]:
if obj["id"] == pledge["id"] and obj["type"] == pledge["type"]:
user["pledge_start"] = dateutil.parser.parse(obj["attributes"]["created_at"])
all_patreon_users.append(user)
all_users.append((user_id, data["data"]["id"]))
alembic.op.bulk_insert(patreon_users, all_patreon_users)
for user_id, patreon_id in all_users:
conn.execute(users.update()
.values(patreon_user=patreon_users.c.id)
.where(users.c.id == user_id)
.where(patreon_users.c.patreon_id == patreon_id)
)
alembic.op.drop_column("users", "patreon_access_token")
alembic.op.drop_column("users", "patreon_refresh_token")
alembic.op.drop_column("users", "patreon_token_expires")
def downgrade():
alembic.op.add_column("users", sqlalchemy.Column("patreon_access_token", sqlalchemy.Text))
alembic.op.add_column("users", sqlalchemy.Column("patreon_refresh_token", sqlalchemy.Text))
alembic.op.add_column("users", sqlalchemy.Column("patreon_token_expires", sqlalchemy.DateTime(timezone=True)))
conn = alembic.op.get_bind()
meta = sqlalchemy.MetaData(bind=conn)
meta.reflect()
users = meta.tables["users"]
patreon_users = meta.tables["patreon_users"]
alembic.op.execute(users.update().where(users.c.patreon_id == patreon_users.c.id)).values({
"patreon_access_token": patreon_users.c.access_token,
"patreon_refresh_token": patreon_users.c.refresh_token,
"patreon_token_expires": patreon_users.c.token_expires,
})
alembic.op.drop_column("users", "patreon_id")
alembic.op.drop_table("patreon_users")
|
mmckerns/tutmom | check_env.py | Python | bsd-3-clause | 3,306 | 0.007864 | #!/usr/bin/env python
#
# Author: Mike McKerns (mmckerns @caltech and @uqfoundation)
# Copyright (c) 2015-2016 California Institute of Technology.
# Copyright (c) 2016-2019 Mike McKerns.
# License: 3-clause BSD.
"""
check environment scipt
"""
import sys
# requirements
has = dict(
# optimization
scipy='0.6.0',
mystic='0.3.1',
# parallel computing
pathos='0.2.1',
# dependencies
pox='0.2.3',
dill='0.2.7',
klepto='0.1.4',
numpy='1.0',
sympy='0.6.7',
ppft='1.6.4.7',
multiprocess='0.70.5',
# examples
matplotlib='0.91',
jupyter='1.0',
cvxopt='1.1.0',
# optional
#pyina='0.2.0.dev0',
#pulp='1.6.0',
#Numberjack='1.1.0',
#python-constraints='1.2', # installs as 'constraints'
sqlalchemy='0.8.4',
)
# executables
# list: At least one item is expected
# tuple: All items are expected
run = dict(
# optimization
mystic=('mystic_log_reader.py','mystic_model_plotter.py',
'support_convergence.py','support_hypercube.py',
'support_hypercube_measures.py','support_hypercube_scenario.py',),
# parallel computing
pathos=('pathos_tunnel.py','pathos_server.py','tunneled_pathos_server.py',),
# dependencies
ppft=('ppserver.py',),
# examples
### jupyter-notebook
# optional
#pyina=('sync','cp','rm','ezpool.py','ezscatter.py',),
)
returns = 0
# check installed packages
for module in has.keys():
try:
_module = module.split('-')[-1]
__module__ = __import__(_module, globals(), locals(), [], 0)
exec('%s = __module__' % _module)
except ImportError:
print("%s:: %s" % (module, sys.exc_info()[1]))
run.pop(module, None)
returns += 1
# check required versions
from distutils.version import LooseVersion as V
for module,version in has.items():
try:
_module = module.split('-')[-1]
assert V(eval(_module).__version__) >= V(version)
except NameError:
pass # failed import
except AttributeError:
pass # can't version-check non-standard packages...
except AssertionError:
print("%s:: Version >= %s is required" % (module, version))
returns += 1
def executable_exist(module, prog):
try:
assert which(prog)
# process = Popen([prog, '--help'], stderr=STDOUT, stdout=PIPE)
# process.wait()
return True
except (OSError, AssertionError):
from sys import exc_info
print("%s:: Executable '%s' not found" % (module, prog))
| #print("%s:: %s" % (prog, exc_info()[1]))
return False
# check required executables
try:
from pox import which
#from subprocess import Popen, STDOUT, PIPE#, call
exc | ept ImportError:
sys.exit(returns)
for module,executables in run.items():
if isinstance(executables, list):
found = False
for executable in executables:
if executable_exist(module, executable):
found = True
break
if not found:
returns += 1
else:
for executable in executables:
if not executable_exist(module, executable):
returns += 1
# final report
if not returns:
print('-'*50)
print('OK. All required items installed.')
sys.exit(returns)
|
thegmarlow/TagTrack- | examples/oscilloscope.py | Python | mit | 678 | 0.00885 | import beaglebone_pru_adc as adc
import time
numsamples = 10000 # how many samples to capture
capture = adc.Capture()
capture.oscilloscope_init(adc.OFF_VALUES, numsamples) # captures AIN0 - | the first elt in AIN array
#capture.oscilloscope_init(adc.OFF_VALUES+8, numsamples) # captures AIN2 - the third elt in AIN array
capture.start()
for _ in range(10):
if capture.oscilloscope_is_complet | e():
break
print '.'
time.sleep(0.1)
capture.stop()
capture.wait()
print 'Saving oscilloscope values to "data.csv"'
with open('data.csv', 'w') as f:
for x in capture.oscilloscope_data(numsamples):
f.write(str(x) + '\n')
print 'done'
capture.close() |
indictranstech/reciphergroup-erpnext | erpnext/setup/page/setup_wizard/setup_wizard.py | Python | agpl-3.0 | 17,443 | 0.030098 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe, json, copy
from frappe.utils import cstr, flt, getdate
from frappe import _
from frappe.utils.file_manager import save_file
from frappe.translate import (set_default_language, get_dict,
get_lang_dict, send_translations, get_language_from_code)
from frappe.geo.country_info import get_country_info
from frappe.utils.nestedset import get_root_of
from .default_website import website_maker
import install_fixtures
from .sample_data import make_sample_data
from erpnext.accounts.utils import FiscalYearError
@frappe.whitelist()
def setup_account(args=None):
try:
if frappe.db.sql("select name from tabCompany"):
frappe.throw(_("Setup Already Complete!!"))
if not args:
args = frappe.local.form_dict
if isinstance(args, basestring):
args = json.loads(args)
args = frappe._dict(args)
if args.language and args.language != "english":
set_default_language(args.language)
frappe.clear_cache()
install_fixtures.install(args.get("country"))
update_user_name(args)
frappe.local.message_log = []
create_fiscal_year_and_company(args)
frappe.local.message_log = []
create_users(args)
frappe.local.message_log = []
set_defaults(args)
frappe.local.message_log = []
create_territories()
frappe.local.message_log = []
create_price_lists(args)
frappe.local.message_log = []
create_feed_and_todo()
frappe.local.message_log = []
create_email_digest()
frappe.local.message_log = []
create_letter_head(args)
frappe.local.message_log = []
create_taxes(args)
frappe.local.message_log = []
create_items(args)
frappe.local.message_log = []
create_customers(args)
frappe.local.message_log = []
create_suppliers(args)
frappe.local.message_log = []
frappe.db.set_default('desktop:home_page', 'desktop')
website_maker(args.company_name.strip(), args.company_tagline, args.name)
create_logo(args)
frappe.db.commit()
login_as_first_user(args)
frappe.db.commit()
frappe.clear_cache()
if args.get("add_sample_data"):
try:
make_sample_data()
except FiscalYearError:
pass
except:
if args:
traceback = frappe.get_traceback()
for hook in frappe.get_hooks("setup_wizard_exception"):
frappe.get_attr(hook)(traceback, args)
raise
else:
for hook in frappe.get_hooks("setup_wizard_success"):
frappe.get_attr(hook)(args)
def update_user_name(args):
if args.get("email"):
args['name'] = args.get("email")
frappe.flags.mute_emails = True
doc = frappe.get_doc({
"doctype":"User",
"email": args.get("email"),
"first_name": args.get("first_name"),
"last_name": args.get("last_name")
})
doc.flags.no_welcome_mail = True
doc.insert()
frappe.flags.mute_emails = False
from frappe.auth import _update_password
_update_password(args.get("email"), args.get("password"))
else:
args['name'] = frappe.session.user
# Update User
if not args.get('last_name') or args.get('last_name')=='None':
args['last_name'] = None
frappe.db.sql("""update `tabUser` SET first_name=%(first_name)s,
last_name=%(last_name)s WHERE name=%(name)s""", args)
if args.get("attach_user"):
attach_user = args.get("attach_user").split(",")
if len(attach_user)==3:
filename, filetype, content = attach_user
fileurl = save_file(filename, content, "User", args.get("name"), decode=True).file_url
frappe.db.set_value("User", args.get("name"), "user_image", fileurl)
add_all_roles_to(args.get("name"))
def create_fiscal_year_and_company(args):
curr_fiscal_year = get_fy_details(args.get('fy_start_date'), args.get('fy_end_date'))
frappe.get_doc({
"doctype":"Fiscal Year",
'year': curr_fiscal_year,
'year_start_date': args.get('fy_start_date'),
'year_end_date': args.get('fy_end_date'),
}).insert()
# Company
frappe.get_doc({
"doctype":"Company",
'domain': args.get("industry"),
'company_name':args.get('company_name').strip(),
'abbr':args.get('company_abbr'),
'default_currency':args.get('currency'),
'country': args.get('country'),
'chart_of_accounts': args.get(('chart_of_accounts')),
}).insert()
# Bank Account
args["curr_fiscal_year"] = curr_fiscal_year
def create_price_lists(args):
for pl_type, pl_name in (("Selling", _("Standard Selling")), ("Buying", _("Standard Buying"))):
frappe.get_doc({
"doctype": | "Price List",
"price_list_name": pl_name,
"enabled": 1,
"buying": 1 if pl_type == "Buying" else 0,
"selling": 1 if pl_type == "Selling" else 0,
"currency": args["currency"],
"territories": [{
"territory": get_roo | t_of("Territory")
}]
}).insert()
def set_defaults(args):
# enable default currency
frappe.db.set_value("Currency", args.get("currency"), "enabled", 1)
global_defaults = frappe.get_doc("Global Defaults", "Global Defaults")
global_defaults.update({
'current_fiscal_year': args.curr_fiscal_year,
'default_currency': args.get('currency'),
'default_company':args.get('company_name').strip(),
"country": args.get("country"),
})
global_defaults.save()
number_format = get_country_info(args.get("country")).get("number_format", "#,###.##")
# replace these as float number formats, as they have 0 precision
# and are currency number formats and not for floats
if number_format=="#.###":
number_format = "#.###,##"
elif number_format=="#,###":
number_format = "#,###.##"
system_settings = frappe.get_doc("System Settings", "System Settings")
system_settings.update({
"language": args.get("language"),
"time_zone": args.get("timezone"),
"float_precision": 3,
"email_footer_address": args.get("company"),
'date_format': frappe.db.get_value("Country", args.get("country"), "date_format"),
'number_format': number_format,
'enable_scheduler': 1 if not frappe.flags.in_test else 0
})
system_settings.save()
accounts_settings = frappe.get_doc("Accounts Settings")
accounts_settings.auto_accounting_for_stock = 1
accounts_settings.save()
stock_settings = frappe.get_doc("Stock Settings")
stock_settings.item_naming_by = "Item Code"
stock_settings.valuation_method = "FIFO"
stock_settings.stock_uom = _("Nos")
stock_settings.auto_indent = 1
stock_settings.auto_insert_price_list_rate_if_missing = 1
stock_settings.save()
selling_settings = frappe.get_doc("Selling Settings")
selling_settings.cust_master_name = "Customer Name"
selling_settings.so_required = "No"
selling_settings.dn_required = "No"
selling_settings.save()
buying_settings = frappe.get_doc("Buying Settings")
buying_settings.supp_master_name = "Supplier Name"
buying_settings.po_required = "No"
buying_settings.pr_required = "No"
buying_settings.maintain_same_rate = 1
buying_settings.save()
notification_control = frappe.get_doc("Notification Control")
notification_control.quotation = 1
notification_control.sales_invoice = 1
notification_control.purchase_order = 1
notification_control.save()
hr_settings = frappe.get_doc("HR Settings")
hr_settings.emp_created_by = "Naming Series"
hr_settings.save()
def create_feed_and_todo():
"""update Activity feed and create todo for creation of item, customer, vendor"""
frappe.get_doc({
"doctype": "Feed",
"feed_type": "Comment",
"subject": "ERPNext Setup Complete!"
}).insert(ignore_permissions=True)
def create_email_digest():
from frappe.utils.user import get_system_managers
system_managers = get_system_managers(only_name=True)
if not system_managers:
return
companies = frappe.db.sql_list("select name FROM `tabCompany`")
for company in companies:
if not frappe.db.exists("Email Digest", "Default Weekly Digest - " + company):
edigest = frappe.get_doc({
"doctype": "Email Digest",
"name": "Default Weekly Digest - " + company,
"company": company,
"frequency": "Weekly",
"recipient_list": "\n".join(system_managers)
})
for df in edigest.meta.get("fields", {"fieldtype": "Check"}):
if df.fieldname != "scheduler_errors":
edigest.set(df.fieldname, 1)
edigest.insert()
# scheduler errors digest
if companies:
edigest = frappe. |
smartdata-x/robots | api/HttpApi.py | Python | apache-2.0 | 1,065 | 0.021719 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#encoding=utf-8
'''
Created on 2015年4月21日
@author: kerry
'' | '
from base.http import MIGHttpMethodGet,MIGHttpMethodPost
from base.miglog import miglog
import urlparse
import json
class HttpApi(object):
'''
classdocs
'''
def __init__(self):
'''
Constructor
'''
@classmethod
def RequestMethodGet(cls,url,port=None,header=None,cookies= | None):
parse =urlparse.urlparse(url)
if(len(parse.query)==0):
neturl = parse.path
else:
neturl = parse.path+"?"+parse.query
http = MIGHttpMethodGet(neturl,parse.netloc)
http.HttpMethodGet(header, cookies, port)
return http.HttpGetContent()
@classmethod
def RequestMethodPost(cls,url,data,header=None,cookies=None):
parse = urlparse.urlparse(url)
neturl = parse.path
http = MIGHttpMethodPost(neturl,parse.netloc)
http.HttpMethodPost(data, header, cookies)
return http.HttpGetContent()
|
ziima/polint | setup.py | Python | gpl-3.0 | 269 | 0.003731 | # -*- coding: utf-8 -*-
from setuptools import setup
# There is a problem wi | th unicode characters in setup.cfg under Python 3.5 and 3.6
# See https://github.com/pypa/ | setuptools/issues/1062
# Try$ LC_ALL=C python3 setup.py --description
setup(author='Vlastimil Zíma')
|
landscape-test/all-messages | messages/pep8/E261.py | Python | unlicense | 64 | 0 | """
E261
Incl | ude at least two spaces befor | e inline comment
"""
|
vinodkc/spark | python/pyspark/sql/tests/test_column.py | Python | apache-2.0 | 8,775 | 0.002284 | # -*- encoding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pyspark.sql import Column, Row
from pyspark.sql.types import StructType, StructField, LongType
from pyspark.sql.utils import AnalysisException
from pyspark.testing.sqlutils import ReusedSQLTestCase
class ColumnTests(ReusedSQLTestCase):
def test_column_name_encoding(self):
"""Ensure that created columns has `str` type consistently."""
columns = self.spark.createDataFrame([("Alice", 1)], ["name", "age"]).columns
self.assertEqual(columns, ["name", "age"])
self.assertTrue(isinstance(columns[0], str))
self.assertTrue(isinstance(columns[1], str))
def test_and_in_expression(self):
self.assertEqual(4, self.df.filter((self.df.key <= 10) & (self.df.value <= "2")).count())
self.assertRaises(ValueError, lambda: (self.df.key <= 10) and (self.df.value <= "2"))
self.assertEqual(14, self.df.filter((self.df.key <= 3) | (self.df.value < "2")).count())
self.assertRaises(ValueError, lambda: self.df.key <= 3 or self.df.value < "2")
self.assertEqual(99, self.df.filter(~(self.df.key == 1)).count())
self.assertRaises(ValueError, lambda: not self.df.key == 1)
def test_validate_column_types(self):
from pyspark.sql.functions import udf, to_json
from pyspark.sql.column import _to_java_column
self.assertTrue("Column" in _to_java_column("a").getClass().toString())
self.assertTrue("Column" in _to_java_column("a").getClass().toString())
self.assertTrue("Column" in _to_java_column(self.spark.range(1).id).getClass().toString())
self.assertRaisesRegex(
TypeError, "Invalid argu | ment, not a string or column", lambda: _to_java_column(1)
)
class A:
pass
self.assertRaises(TypeError, lambda: _to_java_column(A()))
self.assertRaises(TypeError, lambda: _to_java_column([]))
self.assertRaisesRegex(
TypeError, "Inv | alid argument, not a string or column", lambda: udf(lambda x: x)(None)
)
self.assertRaises(TypeError, lambda: to_json(1))
def test_column_operators(self):
ci = self.df.key
cs = self.df.value
ci == cs
self.assertTrue(isinstance((-ci - 1 - 2) % 3 * 2.5 / 3.5, Column))
rcc = (1 + ci), (1 - ci), (1 * ci), (1 / ci), (1 % ci), (1 ** ci), (ci ** 1)
self.assertTrue(all(isinstance(c, Column) for c in rcc))
cb = [ci == 5, ci != 0, ci > 3, ci < 4, ci >= 0, ci <= 7]
self.assertTrue(all(isinstance(c, Column) for c in cb))
cbool = (ci & ci), (ci | ci), (~ci)
self.assertTrue(all(isinstance(c, Column) for c in cbool))
css = (
cs.contains("a"),
cs.like("a"),
cs.rlike("a"),
cs.ilike("A"),
cs.asc(),
cs.desc(),
cs.startswith("a"),
cs.endswith("a"),
ci.eqNullSafe(cs),
)
self.assertTrue(all(isinstance(c, Column) for c in css))
self.assertTrue(isinstance(ci.cast(LongType()), Column))
self.assertRaisesRegex(
ValueError, "Cannot apply 'in' operator against a column", lambda: 1 in cs
)
def test_column_accessor(self):
from pyspark.sql.functions import col
self.assertIsInstance(col("foo")[1:3], Column)
self.assertIsInstance(col("foo")[0], Column)
self.assertIsInstance(col("foo")["bar"], Column)
self.assertRaises(ValueError, lambda: col("foo")[0:10:2])
def test_column_select(self):
df = self.df
self.assertEqual(self.testData, df.select("*").collect())
self.assertEqual(self.testData, df.select(df.key, df.value).collect())
self.assertEqual([Row(value="1")], df.where(df.key == 1).select(df.value).collect())
def test_access_column(self):
df = self.df
self.assertTrue(isinstance(df.key, Column))
self.assertTrue(isinstance(df["key"], Column))
self.assertTrue(isinstance(df[0], Column))
self.assertRaises(IndexError, lambda: df[2])
self.assertRaises(AnalysisException, lambda: df["bad_key"])
self.assertRaises(TypeError, lambda: df[{}])
def test_column_name_with_non_ascii(self):
columnName = "数量"
self.assertTrue(isinstance(columnName, str))
schema = StructType([StructField(columnName, LongType(), True)])
df = self.spark.createDataFrame([(1,)], schema)
self.assertEqual(schema, df.schema)
self.assertEqual("DataFrame[数量: bigint]", str(df))
self.assertEqual([("数量", "bigint")], df.dtypes)
self.assertEqual(1, df.select("数量").first()[0])
self.assertEqual(1, df.select(df["数量"]).first()[0])
self.assertTrue(columnName in repr(df[columnName]))
def test_field_accessor(self):
df = self.sc.parallelize([Row(l=[1], r=Row(a=1, b="b"), d={"k": "v"})]).toDF()
self.assertEqual(1, df.select(df.l[0]).first()[0])
self.assertEqual(1, df.select(df.r["a"]).first()[0])
self.assertEqual(1, df.select(df["r.a"]).first()[0])
self.assertEqual("b", df.select(df.r["b"]).first()[0])
self.assertEqual("b", df.select(df["r.b"]).first()[0])
self.assertEqual("v", df.select(df.d["k"]).first()[0])
def test_bitwise_operations(self):
from pyspark.sql import functions
row = Row(a=170, b=75)
df = self.spark.createDataFrame([row])
result = df.select(df.a.bitwiseAND(df.b)).collect()[0].asDict()
self.assertEqual(170 & 75, result["(a & b)"])
result = df.select(df.a.bitwiseOR(df.b)).collect()[0].asDict()
self.assertEqual(170 | 75, result["(a | b)"])
result = df.select(df.a.bitwiseXOR(df.b)).collect()[0].asDict()
self.assertEqual(170 ^ 75, result["(a ^ b)"])
result = df.select(functions.bitwiseNOT(df.b)).collect()[0].asDict()
self.assertEqual(~75, result["~b"])
result = df.select(functions.bitwise_not(df.b)).collect()[0].asDict()
self.assertEqual(~75, result["~b"])
def test_with_field(self):
from pyspark.sql.functions import lit, col
df = self.spark.createDataFrame([Row(a=Row(b=1, c=2))])
self.assertIsInstance(df["a"].withField("b", lit(3)), Column)
self.assertIsInstance(df["a"].withField("d", lit(3)), Column)
result = df.withColumn("a", df["a"].withField("d", lit(3))).collect()[0].asDict()
self.assertEqual(3, result["a"]["d"])
result = df.withColumn("a", df["a"].withField("b", lit(3))).collect()[0].asDict()
self.assertEqual(3, result["a"]["b"])
self.assertRaisesRegex(
TypeError, "col should be a Column", lambda: df["a"].withField("b", 3)
)
self.assertRaisesRegex(
TypeError, "fieldName should be a string", lambda: df["a"].withField(col("b"), lit(3))
)
def test_drop_fields(self):
df = self.spark.createDataFrame([Row(a=Row(b=1, c=2, d=Row(e=3, f=4)))])
self.assertIsInstance(df["a"].dropFields("b"), Column)
self.assertIsInstance(df["a"].dropFields("b", "c"), Column)
self.assertIsInstance(df["a"].dropFields("d.e"), Column)
result = (
df.select(
df["a"].dropFields("b").alias("a1"),
df["a"].dropFields("d.e").alias("a2"),
)
.first()
|
shawncaojob/LC | PY/22_generate_parentheses.py | Python | gpl-3.0 | 3,269 | 0.007648 | # 22. Generate Parentheses My Submissions QuestionEditorial Solution
# Total Accepted: 90088 Total Submissions: 240551 Difficulty: Medium
# Given n pairs of parentheses, write a function to generate all combinations of well-formed parentheses.
#
# For example, given n = 3, a solution set is:
#
# "((()))", "(()())", "(())()", "()(())", "()()()"
#
# Subscribe to see which companies asked this question
# 2018.03.22 DFS
class Solution(object):
def generateParenthesis(self, n):
"""
:type n: int |
:rtype: List[s | tr]
"""
def dfs(line, n, nl, nr):
if nl == nr and nl + nr == n * 2:
res.append(line)
return
if nr < n and nr < nl:
dfs(line + ")", n, nl, nr + 1)
if nl < n:
dfs(line + "(", n, nl + 1, nr)
res = []
dfs("", n, 0, 0)
return res
# 2017.04.23 Rewrite
class Solution(object):
def generateParenthesis(self, n):
"""
:type n: int
:rtype: List[str]
"""
res = []
self.dfs(res, "", 0, 0, n)
return res
def dfs(self, res, line, lc, rc, n):
if lc + rc == 2 * n:
res.append(line)
return
if lc < n:
self.dfs(res, line + "(", lc + 1, rc, n)
if rc < lc:
self.dfs(res, line + ")", lc, rc + 1, n)
# 11.19.2016. Rewrite DFS. Clear
class Solution(object):
def generateParenthesis(self, n):
"""
:type n: int
:rtype: List[str]
"""
res = []
self.dfs(res, "", n, 0)
return res
def dfs(self, res, line, lefts, rights):
if lefts == 0 and rights == 0:
res.append(line)
return
if lefts > 0:
self.dfs(res, line + "(", lefts - 1, rights + 1)
if rights > 0:
self.dfs(res, line + ")", lefts, rights - 1)
# DFS, l r to keep track of
class Solution2(object):
def dfs(self, l, r, n, line, res):
# l, r for the number of (, ) in line
if l == n and r == n:
res.append(line)
return
elif l > n or r > n:
return
else:
for char in "()":
# Case "("
if char == "(":
self.dfs(l+1, r, n, line+char, res)
# Case ")"
elif l > r and char == ")":
self.dfs(l, r+1, n, line+char, res)
def generateParenthesis(self, n):
"""
:type n: int
:rtype: List[str]
"""
res = []
self.dfs(0, 0, n, "", res)
return res
import unittest
class TestSolution(unittest.TestCase):
def test_0(self):
self.assertEqual(Solution().generateParenthesis(0), [""])
def test_1(self):
self.assertEqual(Solution().generateParenthesis(1), ["()"])
def test_2(self):
self.assertEqual(Solution().generateParenthesis(2), ["(())", "()()"])
def test_3(self):
self.assertEqual(Solution().generateParenthesis(3), ["((()))", "(()())", "(())()", "()(())", "()()()"])
if __name__ == "__main__":
unittest.main()
|
azurestandard/django | django/contrib/sessions/tests.py | Python | bsd-3-clause | 16,572 | 0.001026 | from datetime import datetime, timedelta
import shutil
import string
import tempfile
import warnings
from django.conf import settings
from django.contrib.sessions.backends.db import SessionStore as DatabaseSession
from django.contrib.sessions.backends.cache import SessionStore as CacheSession
from django.contrib.sessions.backends.cached_db import SessionStore as CacheDBSession
from django.contrib.sessions.backends.file import SessionStore as FileSession
from django.contrib.sessions.backends.signed_cookies import SessionStore as CookieSession
from django.contrib.sessions.models import Session
from django.contrib.sessions.middleware import SessionMiddleware
from django.core.exceptions import ImproperlyConfigured, SuspiciousOperation
from django.http import HttpResponse
from django.test import TestCase, RequestFactory
from django.test.utils import override_settings
from django.utils import timezone
from django.utils import unittest
class SessionTestsMixin(object):
# This does not inherit from TestCase to avoid any tests being run with this
# class, which wouldn't work, and to allow different TestCase subclasses to
# be used.
backend = None # subclasses must specify
def setUp(self):
self.session = self.backend()
def tearDown(self):
# NB: be careful to delete any sessions created; stale sessions fill up
# the /tmp (with some backends) and eventually overwhelm it after lots
# of runs (think buildbots)
self.session.delete()
def test_new_session(self):
self.assertFalse(self.session.modified)
self.assertFalse(self.session.accessed)
def test_get_empty(self):
self.assertEqual(self.session.get('cat'), None)
def test_store(self):
self.session['cat'] = "dog"
self.assertTrue(self.session.modified)
self.assertEqual(self.session.pop('cat'), 'dog')
def test_pop(self):
self.se | ssion['some key'] = 'exists'
# Need to reset these to pretend we haven't accessed it:
self.accessed = False
self.modified = False
self.assertEqual(self.session.pop('some key'), 'exists')
self.assertTrue(self.session.accessed)
self.assertTrue(self.session.modified)
self.assertEqual(self.session.get('some key'), None)
def test_pop_default(self):
self.assertEqual(self.session.pop('some key', 'does not exist'),
| 'does not exist')
self.assertTrue(self.session.accessed)
self.assertFalse(self.session.modified)
def test_setdefault(self):
self.assertEqual(self.session.setdefault('foo', 'bar'), 'bar')
self.assertEqual(self.session.setdefault('foo', 'baz'), 'bar')
self.assertTrue(self.session.accessed)
self.assertTrue(self.session.modified)
def test_update(self):
self.session.update({'update key': 1})
self.assertTrue(self.session.accessed)
self.assertTrue(self.session.modified)
self.assertEqual(self.session.get('update key', None), 1)
def test_has_key(self):
self.session['some key'] = 1
self.session.modified = False
self.session.accessed = False
self.assertTrue('some key' in self.session)
self.assertTrue(self.session.accessed)
self.assertFalse(self.session.modified)
def test_values(self):
self.assertEqual(self.session.values(), [])
self.assertTrue(self.session.accessed)
self.session['some key'] = 1
self.assertEqual(self.session.values(), [1])
def test_iterkeys(self):
self.session['x'] = 1
self.session.modified = False
self.session.accessed = False
i = self.session.iterkeys()
self.assertTrue(hasattr(i, '__iter__'))
self.assertTrue(self.session.accessed)
self.assertFalse(self.session.modified)
self.assertEqual(list(i), ['x'])
def test_itervalues(self):
self.session['x'] = 1
self.session.modified = False
self.session.accessed = False
i = self.session.itervalues()
self.assertTrue(hasattr(i, '__iter__'))
self.assertTrue(self.session.accessed)
self.assertFalse(self.session.modified)
self.assertEqual(list(i), [1])
def test_iteritems(self):
self.session['x'] = 1
self.session.modified = False
self.session.accessed = False
i = self.session.iteritems()
self.assertTrue(hasattr(i, '__iter__'))
self.assertTrue(self.session.accessed)
self.assertFalse(self.session.modified)
self.assertEqual(list(i), [('x', 1)])
def test_clear(self):
self.session['x'] = 1
self.session.modified = False
self.session.accessed = False
self.assertEqual(self.session.items(), [('x', 1)])
self.session.clear()
self.assertEqual(self.session.items(), [])
self.assertTrue(self.session.accessed)
self.assertTrue(self.session.modified)
def test_save(self):
self.session.save()
self.assertTrue(self.session.exists(self.session.session_key))
def test_delete(self):
self.session.save()
self.session.delete(self.session.session_key)
self.assertFalse(self.session.exists(self.session.session_key))
def test_flush(self):
self.session['foo'] = 'bar'
self.session.save()
prev_key = self.session.session_key
self.session.flush()
self.assertFalse(self.session.exists(prev_key))
self.assertNotEqual(self.session.session_key, prev_key)
self.assertTrue(self.session.modified)
self.assertTrue(self.session.accessed)
def test_cycle(self):
self.session['a'], self.session['b'] = 'c', 'd'
self.session.save()
prev_key = self.session.session_key
prev_data = self.session.items()
self.session.cycle_key()
self.assertNotEqual(self.session.session_key, prev_key)
self.assertEqual(self.session.items(), prev_data)
def test_invalid_key(self):
# Submitting an invalid session key (either by guessing, or if the db has
# removed the key) results in a new key being generated.
try:
session = self.backend('1')
try:
session.save()
except AttributeError:
self.fail("The session object did not save properly. Middleware may be saving cache items without namespaces.")
self.assertNotEqual(session.session_key, '1')
self.assertEqual(session.get('cat'), None)
session.delete()
finally:
# Some backends leave a stale cache entry for the invalid
# session key; make sure that entry is manually deleted
session.delete('1')
def test_session_key_is_read_only(self):
def set_session_key(session):
session.session_key = session._get_new_session_key()
self.assertRaises(AttributeError, set_session_key, self.session)
# Custom session expiry
def test_default_expiry(self):
# A normal session has a max age equal to settings
self.assertEqual(self.session.get_expiry_age(), settings.SESSION_COOKIE_AGE)
# So does a custom session with an idle expiration time of 0 (but it'll
# expire at browser close)
self.session.set_expiry(0)
self.assertEqual(self.session.get_expiry_age(), settings.SESSION_COOKIE_AGE)
def test_custom_expiry_seconds(self):
# Using seconds
self.session.set_expiry(10)
delta = self.session.get_expiry_date() - timezone.now()
self.assertTrue(delta.seconds in (9, 10))
age = self.session.get_expiry_age()
self.assertTrue(age in (9, 10))
def test_custom_expiry_timedelta(self):
# Using timedelta
self.session.set_expiry(timedelta(seconds=10))
delta = self.session.get_expiry_date() - timezone.now()
self.assertTrue(delta.seconds in (9, 10))
age = self.session.get_expiry_age()
self.assertTrue(age in (9, 10))
def test_custom_expiry_datetime(self):
# Using fixed datet |
geary/claslite | web/app/lib/elementtree/HTMLTreeBuilder.py | Python | unlicense | 7,826 | 0.001278 | #
# ElementTree
# $Id: HTMLTreeBuilder.py 3265 2007-09-06 20:42:00Z fredrik $
#
# a simple tree builder, for HTML input
#
# history:
# 2002-04-06 fl created
# 2002-04-07 fl ignore IMG and HR end tags
# 2002-04-07 fl added support for 1.5.2 and later
# 2003-04-13 fl added HTMLTreeBuilder alias
# 2004-12-02 fl don't feed non-ASCII charrefs/entities as 8-bit strings
# 2004-12-05 fl don't feed non-ASCII CDATA as 8-bit strings
#
# Copyright (c) 1999-2004 by Fredrik Lundh. All rights reserved.
#
# fredrik@pythonware.com
# http://www.pythonware.com
#
# --------------------------------------------------------------------
# The ElementTree toolkit is
#
# Copyright (c) 1999-2007 by Fredrik Lundh
#
# By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Secret Labs AB or the author not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
# --------------------------------------------------------------------
##
# Tools to build element trees from HTML files.
##
import htmlentitydefs
import re, string, sys
import mimetools, StringIO
import ElementTree
AUTOCLOSE = "p", "li", "tr", "th", "td", "head", "body"
IGNOREEND = "img", "hr", "meta", "link", "br"
if sys.version[:3] == "1.5":
is_not_ascii = re.compile(r"[\x80-\xff]").search # 1.5.2
else:
is_not_ascii = re.compile(eval(r'u"[\u0080-\uffff]"')).search
try:
from HTMLParser import HTMLParser
except ImportError:
from sgmllib import SGMLParser
# hack to use sgmllib's SGMLParser to emulate 2.2's HTMLParser
class HTMLParser(SGMLParser):
# the following only works as long as this class doesn't
# provide any do, start, or end handlers
def unknown_starttag(self, tag, attrs):
self.handle_starttag(tag, attrs)
def unknown_endtag(self, tag):
self.handle_endtag(tag)
##
# ElementTree builder for HTML source code. This builder converts an
# HTML document or fragment to an ElementTree.
# <p>
# The parser is relatively picky, and requires balanced tags for most
# elements. However, elements belonging to the following group are
# automatically closed: P, LI, TR, TH, and TD. In addition, the
# parser automatically inserts end tags immediately after the start
# tag, and ignores any end tags for the following group: IMG, HR,
# META, and LINK.
#
# @keyparam builder Optional builder object. If omitted, the parser
# uses the standard <b>elementtree</b> builder.
# @keyparam encoding Optional character encoding, if known. | If omitted,
# the parser looks for META tags inside the document. If no tags
# are found, the parser defaults to ISO-885 | 9-1. Note that if your
# document uses a non-ASCII compatible encoding, you must decode
# the document before parsing.
#
# @see elementtree.ElementTree
class HTMLTreeBuilder(HTMLParser):
# FIXME: shouldn't this class be named Parser, not Builder?
def __init__(self, builder=None, encoding=None):
self.__stack = []
if builder is None:
builder = ElementTree.TreeBuilder()
self.__builder = builder
self.encoding = encoding or "iso-8859-1"
HTMLParser.__init__(self)
##
# Flushes parser buffers, and return the root element.
#
# @return An Element instance.
def close(self):
HTMLParser.close(self)
return self.__builder.close()
##
# (Internal) Handles start tags.
def handle_starttag(self, tag, attrs):
if tag == "meta":
# look for encoding directives
http_equiv = content = None
for k, v in attrs:
if k == "http-equiv":
http_equiv = string.lower(v)
elif k == "content":
content = v
if http_equiv == "content-type" and content:
# use mimetools to parse the http header
header = mimetools.Message(
StringIO.StringIO("%s: %s\n\n" % (http_equiv, content))
)
encoding = header.getparam("charset")
if encoding:
self.encoding = encoding
if tag in AUTOCLOSE:
if self.__stack and self.__stack[-1] == tag:
self.handle_endtag(tag)
self.__stack.append(tag)
attrib = {}
if attrs:
for k, v in attrs:
attrib[string.lower(k)] = v
self.__builder.start(tag, attrib)
if tag in IGNOREEND:
self.__stack.pop()
self.__builder.end(tag)
##
# (Internal) Handles end tags.
def handle_endtag(self, tag):
if tag in IGNOREEND:
return
lasttag = self.__stack.pop()
if tag != lasttag and lasttag in AUTOCLOSE:
self.handle_endtag(lasttag)
self.__builder.end(tag)
##
# (Internal) Handles character references.
def handle_charref(self, char):
if char[:1] == "x":
char = int(char[1:], 16)
else:
char = int(char)
if 0 <= char < 128:
self.__builder.data(chr(char))
else:
self.__builder.data(unichr(char))
##
# (Internal) Handles entity references.
def handle_entityref(self, name):
entity = htmlentitydefs.entitydefs.get(name)
if entity:
if len(entity) == 1:
entity = ord(entity)
else:
entity = int(entity[2:-1])
if 0 <= entity < 128:
self.__builder.data(chr(entity))
else:
self.__builder.data(unichr(entity))
else:
self.unknown_entityref(name)
##
# (Internal) Handles character data.
def handle_data(self, data):
if isinstance(data, type('')) and is_not_ascii(data):
# convert to unicode, but only if necessary
data = unicode(data, self.encoding, "ignore")
self.__builder.data(data)
##
# (Hook) Handles unknown entity references. The default action
# is to ignore unknown entities.
def unknown_entityref(self, name):
pass # ignore by default; override if necessary
##
# An alias for the <b>HTMLTreeBuilder</b> class.
TreeBuilder = HTMLTreeBuilder
##
# Parse an HTML document or document fragment.
#
# @param source A filename or file object containing HTML data.
# @param encoding Optional character encoding, if known. If omitted,
# the parser looks for META tags inside the document. If no tags
# are found, the parser defaults to ISO-8859-1.
# @return An ElementTree instance
def parse(source, encoding=None):
return ElementTree.parse(source, HTMLTreeBuilder(encoding=encoding))
if __name__ == "__main__":
import sys
ElementTree.dump(parse(open(sys.argv[1])))
|
gencer/python-phonenumbers | python/phonenumbers/shortdata/region_IS.py | Python | apache-2.0 | 1,126 | 0.007993 | """Auto-generated file, do not edit by hand. IS metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_IS = PhoneMetadata(id='IS', country_code=None, international_prefix=None,
general_desc=PhoneNumberDesc(national_number_pattern='1\\d{2,5}', possible_length=(3, 4, 6)),
toll_free=PhoneNumberDesc(national_number_pattern='1717', example_number='1717', possible_length=(4,)),
premium_rate=PhoneNumberDesc(national_number_pattern='1848', example_number='1848', p | ossible_length=(4,)),
emergency=PhoneNumberDesc(national_number_pattern='112', example_number='112', possible_length=(3,)),
short_code=PhoneNumberDesc(national_number_pattern='1(?:1(?:[28]|6(?:1(?:23|16)))|4(?:00|1[145]|4[0146])|55|7(?:00|17|7[07-9])|8(?:0[08 | ]|1[016-9]|20|48|8[018])|900)', example_number='112', possible_length=(3, 4, 6)),
carrier_specific=PhoneNumberDesc(national_number_pattern='1441', example_number='1441', possible_length=(4,)),
sms_services=PhoneNumberDesc(national_number_pattern='1(?:415|848|900)', example_number='1415', possible_length=(4,)),
short_data=True)
|
protomouse/Flexget | flexget/validator.py | Python | mit | 18,604 | 0.002634 | from __future__ import unicode_literals, division, absolute_import, print_function
import re
from flexget.config_schema import process_config
# TODO: rename all validator.valid -> validator.accepts / accepted / accept ?
class Errors(object):
"""Create and hold validator error messages."""
def __init__(self):
self.messages = []
self.path = []
self.path_level = None
def count(self):
"""Return number of errors."""
return len(self.messages)
def add(self, msg):
"""Add new error message to current path."""
path = [unicode(p) for p in self.path]
msg = '[/%s] %s' % ('/'.join(path), msg)
self.messages.append(msg)
def back_out_errors(self, num=1):
"""Remove last num errors from list"""
if num > 0:
del self.messages[0 - num:]
def path_add_level(self, value='?'):
"""Adds level into error message path"""
self.path_level = len(self.path)
self.path.append(value)
def path_remove_level(self):
"""Removes level from path by depth number"""
if self.path_level is None:
raise Exception('no path level')
del(self.path[self.path_level])
self.path_level -= 1
def path_update_value(self, value):
"""Updates path level value"""
if self.path_level is None:
raise Exception('no path level')
self.path[self.path_level] = value
# A registry mapping validator names to their class
registry = {}
def factory(name='root', **kwargs):
"""Factory method, returns validator instance."""
if name not in registry:
raise Exception('Asked unknown validator \'%s\'' % name)
return registry[name](**kwargs)
def any_schema(schemas):
"""
Creates a schema that will match any of the given schemas.
Will not use anyOf if there is just one validator in the list, for simpler error messages.
"""
schemas = list(schemas)
if len(schemas) == 1:
return schemas[0]
else:
return {'anyOf': schemas}
class Validator(object):
name = 'validator'
class __metaclass__(type):
"""Automatically adds subclasses to the registry."""
def __init__(cls, name, bases, dict):
type.__init__(cls, name, bases, dict)
if not 'name' in dict:
raise Exception('Validator %s is missing class-attribute name' % name)
registry[dict['name']] = cls
def __init__(self, parent=None, message=None, **kwargs):
self.valid = []
self.message = message
self.parent = parent
self._errors = None
@property
def errors(self):
"""Recursively return the Errors class from the root of the validator tree."""
if self.parent:
return self.parent.errors
else:
if not self._errors:
self._errors = Errors()
return self._errors
def add_root_parent(self):
| if self.name == 'root':
return self
root = factory('root')
root.accept(self)
return root
def add_parent(self, parent):
self.parent = parent
return pa | rent
def get_validator(self, value, **kwargs):
"""Returns a child validator of this one.
:param value:
Can be a validator type string, an already created Validator instance,
or a function that returns a validator instance.
:param kwargs:
Keyword arguments are passed on to validator init if a new validator is created.
"""
if isinstance(value, Validator):
# If we are passed a Validator instance, make it a child of this validator and return it.
value.add_parent(self)
return value
elif callable(value):
raise ValueError('lazy validators are no longer supported. Upgrade plugin to use new schema validation.')
# Otherwise create a new child Validator
kwargs['parent'] = self
return factory(value, **kwargs)
def accept(self, value, **kwargs):
raise NotImplementedError('Validator %s should override accept method' % self.__class__.__name__)
def schema(self):
schema = self._schema()
if self.message:
schema['error'] = self.message
return schema
def _schema(self):
"""Return schema for validator"""
raise NotImplementedError(self.__name__)
def validate(self, value):
"""This is just to unit test backwards compatibility of json schema with old validators"""
errors = list(e.message for e in process_config(value, self.schema()))
self.errors.messages = errors
return not errors
def __str__(self):
return '<validator:name=%s>' % self.name
__repr__ = __str__
class RootValidator(Validator):
name = 'root'
def accept(self, value, **kwargs):
v = self.get_validator(value, **kwargs)
self.valid.append(v)
return v
def _schema(self):
return any_schema([v.schema() for v in self.valid])
class ChoiceValidator(Validator):
name = 'choice'
def __init__(self, parent=None, **kwargs):
self.valid_ic = []
Validator.__init__(self, parent, **kwargs)
def accept(self, value, ignore_case=False):
"""
:param value: accepted text, int or boolean
:param bool ignore_case: Whether case matters for text values
"""
if not isinstance(value, (basestring, int, float)):
raise Exception('Choice validator only accepts strings and numbers')
if isinstance(value, basestring) and ignore_case:
self.valid_ic.append(value.lower())
else:
self.valid.append(value)
def accept_choices(self, values, **kwargs):
"""Same as accept but with multiple values (list)"""
for value in values:
self.accept(value, **kwargs)
def _schema(self):
schemas = []
if self.valid:
schemas.append({'enum': self.valid + self.valid_ic})
if self.valid_ic:
schemas.append(any_schema({"type": "string", "pattern": "(?i)^%s$" % p} for p in self.valid_ic))
s = any_schema(schemas)
s['error'] = 'Must be one of the following: %s' % ', '.join(map(unicode, self.valid + self.valid_ic))
return s
class AnyValidator(Validator):
name = 'any'
def accept(self, value, **kwargs):
self.valid = value
def _schema(self):
return {}
class EqualsValidator(Validator):
name = 'equals'
def accept(self, value, **kwargs):
self.valid = value
def _schema(self):
return {'enum': [self.valid]}
class NumberValidator(Validator):
name = 'number'
def accept(self, name, **kwargs):
pass
def _schema(self):
return {'type': 'number'}
class IntegerValidator(Validator):
name = 'integer'
def accept(self, name, **kwargs):
pass
def _schema(self):
return {'type': 'integer'}
# TODO: Why would we need this instead of NumberValidator?
class DecimalValidator(Validator):
name = 'decimal'
def accept(self, name, **kwargs):
pass
def _schema(self):
return {'type': 'number'}
class BooleanValidator(Validator):
name = 'boolean'
def accept(self, name, **kwargs):
pass
def _schema(self):
return {'type': 'boolean'}
class TextValidator(Validator):
name = 'text'
def accept(self, name, **kwargs):
pass
def _schema(self):
return {'type': 'string'}
class RegexpValidator(Validator):
name = 'regexp'
def accept(self, name, **kwargs):
pass
def _schema(self):
return {'type': 'string', 'format': 'regex'}
class RegexpMatchValidator(Validator):
name = 'regexp_match'
def __init__(self, parent=None, **kwargs):
Validator.__init__(self, parent, **kwargs)
self.regexps = []
self.reject_regexps = []
def add_regexp(self, regexp_list, regexp):
try:
regexp_list.append(re.compile(regexp))
except:
|
0--key/lib | portfolio/Python/scrapy/naturebest/naturesbest.py | Python | apache-2.0 | 2,587 | 0.003479 | import re
import os
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request, HtmlResponse
from scrapy.utils.response import get_base_url
from scrapy.utils.url import urljoin_rfc
from urllib import urlencode
import hashlib
import csv
from product_spiders.items import Product, ProductLoaderWithNameStrip\
as ProductLoader
from scrapy import log
HERE = os.path.abspath(os.path.dirname(__file__))
class NaturesBestSpider(BaseSpider):
name = 'naturesbest.co.uk'
allowed_domains = ['www.naturesbest.co.uk', 'naturesbest.co.uk']
start_urls = ('http://www.naturesbest.co.uk/page/productdirectory/?alpha=abcde',
'http://www.naturesbest.co.uk/page/productdirectory/?alpha=fghij',
'http://www.naturesbest.co.uk/page/productdirectory/?alpha=klmno',
'http://www.naturesbest.co.uk/page/productdirectory/?alpha=pqrst',
'http://www.naturesbest.co.uk/page/productdirectory/?alpha=uvwxyz')
def parse(self, response):
if not isinstance(response, HtmlResponse):
return
hxs = HtmlXPathSelector(response)
# getting product links from A-Z product list
letter_links = hxs.select(u'//div[@class="content"]')
for letter_link in letter_links:
prod_urls = letter_link.select(u'./di | v/a/@href').extract()
for prod_url in prod_urls:
url = urljoin_rfc(get_base_url(response), prod_url)
yield Request(url)
# products
for product in self.parse_product(response):
yield product
def parse_product(self, response):
if not isinstance(response, HtmlResponse):
return
hxs = HtmlXPathSelector(response)
name = hxs.select(u'//div[@class="productTITLE"]/h1/text()').extract()
| if name:
url = response.url
url = urljoin_rfc(get_base_url(response), url)
skus = hxs.select('//td[@class="skuname"]/text()').extract()
prices = hxs.select('//td[@class="price"]/text()').extract()
skus_prices = zip(skus, prices)
for sku, price in skus_prices:
loader = ProductLoader(item=Product(), selector=hxs)
loader.add_value('url', url)
loader.add_value('name', (name[0].strip() + ' ' + sku.strip()).replace(u'\xa0', ' '))
#loader.add_value('sku', sku)
loader.add_value('price', price)
yield loader.load_item() |
ChopChopKodi/pelisalacarta | python/version-plex/core/config.py | Python | gpl-3.0 | 2,224 | 0.026529 | # -*- coding: utf-8 -*-
#------------------------------------------------------------
# pelisalacarta - XBMC Plugin
# Configuracion
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
#------------------------------------------------------------
import os,io
from types import *
PLATFORM_NAME = "plex"
def get_platform():
return PLATFORM_NAME
def is_xbmc():
return False
def get_library_support():
return False
def get_system_platform():
return ""
def open_settings():
return
def get_setting(name, channel=""):
if channel:
from core import channeltools
value = channeltools.get_channel_setting(name, channel)
if not value is None:
return value
# Devolvemos el valor del parametro global 'name'
if name=="cache.dir":
return ""
if name=="debug" or name=="download.enabled":
return "false"
if name=="cookies.dir":
return os.getcwd()
if name=="cache.mode" or name=="thumbnail_type":
return "2"
else:
import bridge
try:
devuelve = bridge.get_setting(name)
except:
devuelve = ""
if type(devuelve) == BooleanType:
if devuelve:
devuelve = "true"
else:
devuelve = "false"
return devuelve
def set_setting(name,value, channel=""):
if channel:
from core import channeltools
return channeltools.set_channel_setting(name,value, channel)
else:
return ""
def get_localized_string(code):
import bridge
return bridge.get_localized_string(code)
def get_library_path():
| return ""
def get_temp_file(filename):
return ""
def get_runtime_path():
return os.path.abspath( os.path.join( os.path.dirname(__file__) , ".." ) )
def get_data_path(): |
return os.getcwd()
def get_cookie_data():
import os
ficherocookies = os.path.join( get_data_path(), 'cookies.lwp' )
cookiedatafile = open(ficherocookies,'r')
cookiedata = cookiedatafile.read()
cookiedatafile.close();
return cookiedata
def verify_directories_created():
return
|
seismology/mc_kernel | UTILS/repack_database.py | Python | gpl-3.0 | 9,908 | 0.001312 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Repacking Instaseis databases.
Requires click, h5py, and numpy.
:copyright:
Lion Krischer (krischer@geophysik.uni-muenchen.de), 2016
Simon Stähler (staehler@geophysik.uni-muenchen.de), 2016
:license:
GNU Lesser General Public License, Version 3 [non-commercial/academic use]
(http://www.gnu.org/copyleft/lgpl.html)
"""
import os
import click
import numpy as np
def maybe_encode(string, encoding='ascii'):
try:
return string.encode(encoding)
except AttributeError:
return string
except UnicodeEncodeError:
return string
def unroll_and_merge_netcdf4(filenames, output_folder):
"""
Completely unroll and merge both files.
"""
import netCDF4
from scipy.spatial import cKDTree
# Find MZZ, MXX_P_MYY, MXZ_MYZ, MXY_MXX_M_MYY directories
if len(filenames) == 4:
filenames = [os.path.normpath(_i) for _i in filenames]
mzz = [_i for _i in filenames if "MZZ" in _i]
mxx = [_i for _i in filenames if "MXX_P_MYY" in _i]
mxz = [_i for _i in filenames if "MXZ_MYZ" in _i]
mxy = [_i for _i in filenames if "MXY_MXX_M_MYY" in _i]
assert len(mzz) == 1
assert len(mxx) == 1
assert len(mxz) == 1
assert len(mxy) == 1
mzz = mzz[0]
mxx = mxx[0]
mxz = mxz[0]
mxy = mxy[0]
assert os.path.exists(mzz)
assert os.path.exists(mxx)
assert os.path.exists(mxz)
assert os.path.exists(mxy)
f_in_1 = netCDF4.Dataset(mzz, 'r')
f_in_2 = netCDF4.Dataset(mxx, 'r')
f_in_3 = netCDF4.Dataset(mxz, 'r')
f_in_4 = netCDF4.Dataset(mxy, 'r')
elif len(filenames) == 2:
pz = [_i for _i in filenames if "PZ" in _i]
px = [_i for _i in filenames if "PX" in _i]
assert len(pz) == 1
assert len(px) == 1
pz = pz[0]
px = px[0]
assert os.path.exists(pz)
assert os.path.exists(px)
f_in_1 = netCDF4.Dataset(pz, 'r')
f_in_2 = netCDF4.Dataset(px, 'r')
else:
print('Wrong number of simulations: ', len(filenames))
assert False
output_filename = os.path.join(output_folder, "merged_instaseis_db.nc4")
assert not os.path.exists(output_filename)
# Get sorting order
r = np.array([f_in_1.groups['Mesh'].variables['mp_mesh_Z'][:],
f_in_1.groups['Mesh'].variables['mp_mesh_S'][:]]).transpose()
ctree = cKDTree(r)
inds = ctree.indices
try:
f_out = netCDF4.Dataset(output_filename, 'w', format='NETCDF4')
# Copy attributes from the vertical file.
for name in f_in_1.ncattrs():
value = getattr(f_in_1, name)
print(name, value)
setattr(f_out, name, maybe_encode(value))
f_out.setncattr('nsim', len(filenames))
for name, dimension in f_in_1.dimensions.items():
if not dimension.isunlimited():
f_out.createDimension(name, len(dimension))
else:
f_out.createDimension(name, None)
# Create Mesh group and copy mesh variables
f_out.createGroup('Mesh')
for name, dimension in f_in_1['Mesh'].dimensions.items():
if not dimension.isunlimited():
f_out['Mesh'].createDimension(name, len(dimension))
else:
f_out['Mesh'].createDimension(name, None)
for name, variable in f_in_1['Mesh'].variables.items():
f_out['Mesh'].createVariable(name, variable.datatype,
variable.dimensions)
if ('elements',) == variable.dimensions:
print('Resorting %s' % name)
f_out['Mesh'].variables[name][:] = \
f_in_1['Mesh'].variables[name][inds]
elif name == 'sem_mesh':
print('Resorting first dim of %s' % name)
f_out['Mesh'].variables[name][:, :, :] = \
f_in_1['Mesh'].variables[name][inds, :, :]
elif name == 'fem_mesh':
print('Resorting first dim of %s' % name)
f_out['Mesh'].variables[name][:, :] = \
f_in_1['Mesh'].variables[name][inds, :]
else:
f_out['Mesh'].variables[name][:] = \
f_in_1['Mesh'].variables[name][:]
# Copy source time function variables
for name, variable in f_in_1['Snapshots'].variables.items():
if name in ['stf_dump', 'stf_d_dump'] | :
f_out.createVariable(name, variable.datatype,
variab | le.dimensions)
f_out.variables[name][:] = f_in_1['Snapshots'].variables[name][:]
# Create a new array but this time in 5D. The first dimension
# is the element number, the second and third are the GLL
# points in both directions, the fourth is the time axis, and the
# last the displacement axis.
ndumps = f_in_1.getncattr("number of strain dumps")
number_of_elements = f_in_1.getncattr("nelem_kwf_global")
npol = f_in_1.getncattr("npol")
# Get datasets and the dtype.
if len(filenames) == 2:
meshes = [
f_in_1["Snapshots"]["disp_s"], # PZ
f_in_1["Snapshots"]["disp_z"],
f_in_2["Snapshots"]["disp_s"], # PX
f_in_2["Snapshots"]["disp_p"],
f_in_2["Snapshots"]["disp_z"]]
elif len(filenames) == 4:
meshes = [
f_in_1["Snapshots"]["disp_s"], # MZZ
f_in_1["Snapshots"]["disp_z"],
f_in_2["Snapshots"]["disp_s"], # MXX + MYY
f_in_2["Snapshots"]["disp_z"],
f_in_3["Snapshots"]["disp_s"], # MXZ / MYZ
f_in_3["Snapshots"]["disp_p"],
f_in_3["Snapshots"]["disp_z"],
f_in_4["Snapshots"]["disp_s"], # MXY / MXX - MYY
f_in_4["Snapshots"]["disp_p"],
f_in_4["Snapshots"]["disp_z"]]
dtype = meshes[0].dtype
nvars = len(meshes)
dim_elements = f_out.createDimension('elements', number_of_elements)
dim_ipol = f_out.createDimension('ipol', npol + 1)
dim_jpol = f_out.createDimension('jpol', npol + 1)
dim_nvars = f_out.createDimension('variables', nvars)
dim_snaps = f_out.dimensions['snapshots']
ds_o = f_out.createVariable(varname="merged_snapshots",
dimensions=(dim_elements.name,
dim_nvars.name,
dim_jpol.name,
dim_ipol.name,
dim_snaps.name),
datatype=dtype, contiguous=True)
# Old order (Instaseis):
# dimensions=(dim_elements.name,
# dim_snaps.name,
# dim_ipol.name,
# dim_jpol.name,
# dim_nvars.name),
utemp = np.zeros((nvars, npol + 1, npol + 1, ndumps),
dtype=dtype)
# Now it becomes more interesting and very slow.
sem_mesh = f_in_1["Mesh"]["sem_mesh"]
with click.progressbar(range(number_of_elements),
length=number_of_elements,
label="\t ") as gll_idxs:
for gll_idx in gll_idxs:
gll_point_ids = sem_mesh[inds[gll_idx]]
# Load displacement from all GLL points.
for ivar, var in enumerate(meshes):
# The list of ids we have is unique but not sorted.
ids = gll_point_ids.flatten()
s_ids = np.sort(ids)
temp = var[:, s_ids]
for ipol in range(npol + 1):
|
PillowLounge/lolibot | hardcoded/statistics.py | Python | gpl-3.0 | 3,782 | 0.007147 | # log moments (mean, variance, skewness, kurtosis) and quantiles
# why am I spending time creating a complex quantile and histogram
# estimator when I only need average, so far
from math import sqrt
from bisect import bisect_left
import scipy.stats as st
maxlong = 9223372036854775807
class RunningStat(object):
'''Gather single-pass statistical data from an iterable'''
__slots__ = ('count', 'moments', 'min', 'max')
def __init__(object, moments=1, buckets=1, sorted=False):
self.count = 0
self.moments = [0] * moments # statistical moments
#self.buckets = [0] * buckets # count of items in each bucket
#self.percentiles = [0] * (buckets + 1) # border values between buckets
#self.vk = 0
self.min = None
self.max = None
def __call__(self, iterable, quantifier=float):
'''Wrap an iterable'''
item = next(iterable)
self.count += 1
num = quantifier(item)
if num < self.min: self.min = num
else if num > self.max: self.max = num
#index = bisect_left(self.percentiles, num)
#self.bucket[index] += 1
yield item
def add_to_moments(self, num):
oldmean = self.moments[0]
try: newmean = oldmean + (num - oldmean) / self.count
except ZeroDivisionError: newmean = num
vk = vk + (num - oldmean)(num - newmean)
self.moments[0] = newmean
def __len__(self):
return self.count
def __iadd__(self, other):
if type(other) is str:
_addstr(self, other)
return
for string in other: _addstr(self, string)
#def __enter__(self): pass
def __exit__(self): self._mean = float(self._mean / self.count)
def _addstr(self, string):
words = string.split()
self.count = len(words)
for w in words: self._mean += len(w)
def _mean_(self):
if type(self._mean) is int: __exit__(self)
return self._mean
def append(self, other):
self.count += 1
self.accumulated += len(other)
@property
def mean(self): return self.moments[0]
@property
def variance(self): return self.moments[1]
@property
def kurtosis(self): return self.moments[2]
class Gen(object):
__slots__ = ('inner')
def __init__(self, inner): self.inner = inner
def __iter__(self): return Iter(self, self.inner)
def __len__(self): return len(self.inner)
class Iter(object):
__slots__ = ('generator', 'count', 'inner')
def __new__(cls, gen, iterable, action=None):
if isinstance(iterable, cls):
return iterable
return super().__new__(cls, gen, iterable)
def __init__(self, gen, iterable, action=None):
self.generator = gen
self.count = 0
self.actions = [] if action is None else [action]
self.inner = iterable \
if hasattr(iterable, '__next__') \
else iterable.__iter__( | )
def __iter__(self): return self
def __next__(self):
r = self.inner.__next__()
for a in self.actions: r = a(r)
self.count += 1
return r
def __len__(self): return self.generator.__len__() | - self.count
z_score = st.norm.ppf((1+.95)/2)
z_sqr = z_score*z_score
def wilson_score(positive, n):
'''returns lower bound of Wilson score confidence interval for a Bernoulli
parameter
resource: http://www.evanmiller.org/how-not-to-sort-by-average-rating.html'''
assert positive <= n
if n is 0: return float('NaN')
p = positive / n
zz÷n = z_sqr / n
return (p + zz÷n/2 - z * sqrt((p * (1 - p) + zz÷n/4) / n)) \
/ (1 + zz÷n)
# trying using closure instead
def stats(gen, moments=2, readers=[]):
def generator():
|
sebalander/trilateration | trilatera.py | Python | gpl-2.0 | 11,639 | 0.007991 | '''
practicar trilateracion
'''
# %%
import numpy as np
import numpy.linalg as ln
import matplotlib.pyplot as plt
import numdifftools as ndf
from scipy.special import chdtri
# %%
kml_file = "/home/sebalander/Code/VisionUNQextra/trilateration/trilat.kml"
# %%
texto = open(kml_file, 'r').read()
names = list()
data = list()
for line in texto.splitlines():
line = line.replace("\t","")
if line.find("name") is not -1:
name = line[6:8]
#print(name)
if line.find("coordinates") is not -1:
coords = line[13:-14]
lon, lat, _ = coords.split(sep=',')
names.append(name)
data.append([float(lon), float(lat)])
#print(data[-1])
data = np.array(data).T
data, names
xGPS = data.T
# plot data gathered from xml file
fig, ax = plt.subplots()
ax.scatter(data[0], data[1])
ax.set_xlim([min(data[0]) - 2e-5, max(data[0]) - 2e-5])
ax.set_ylim([min(data[1]) - 2e-5, max(data[1]) - 2e-5])
for i, tx in enumerate(names):
ax.annotate(tx, (data[0,i], data[1,i]))
# %% distancias medidas con bosch glm 250 vf
# cargar las distancias puestas a mano en Db (bosch)
# tambien las distancias sacada de google earth Dg
Db = np.zeros((8, 8, 3), dtype=float)
Db[0, 1] = [8.6237, 8.6243, 8.6206]
Db[0, 2] = [7.0895, 7.0952, 7.0842]
Db[0, 3] = [13.097, 13.104, 13.107]
Db[0, 4] = [18.644, 18.642, 18.641]
Db[0, 5] = [24.630, 24.649, 24.670]
Db[0, 6] = [25.223, 25.218, 25.219]
Db[0, 7] = [41.425, 41.391, 41.401]
Db[1, 2] = [3.8999, 3.8961, 3.89755] # la tercera la invente como el promedio
Db[1, 3] = [14.584, 14.619, 14.6015] # tercera inventada
Db[1, 4] = [17.723, 17.745, 17.775]
Db[1, 5] = [25.771, 25.752, 25.752]
Db[1, 6] = [22.799, 22.791, 22.793]
Db[1, 7] = [41.820, 41.827, 41.826]
Db[2, 3] = [10.678, 10.687, 10.682]
Db[2, 4] = [14.287, 14.262, 14.278]
Db[2, 5] = [22.016, 22.003, 22.002]
Db[2, 6] = [19.961, 19.962, 19.964]
Db[2, 7] = [38.281, 38.289, 38.282] # * medicion corrida 37cm por un obstaculo
Db[3, 4] = [6.3853, 6.3895, 6.3888]
Db[3, 5] = [11.640, 11.645, 11.644]
Db[3, 6] = [13.599, 13.596, 13.606]
Db[3, 7] = [28.374, 28.371, 28.366]
Db[4, 5] = [8.8504, 8.8416, 8.8448]
Db[4, 6] = [7.2463, 7.2536, 7.2526]
Db[4, 7] = [24.088, 24.086, 24.087]
Db[5, 6] = [10.523, 10.521, 10.522]
Db[5, 7] = [16.798, 16.801 | , 16.797]
Db[6, 7] = [20.794, 20.786, 20.788]
Db -= 0.055 # le resto 5.5cm porque medimos desde la bas en lugar de l centro
indAux = np.arange(len(Db))
Db[indAux, indAux] = 0.0
dg = np.zeros(Db.shape[:2], dtype=float)
dg[0, 1] = 8.27
dg[0, 2] = 6.85
dg[0, 3] = 13.01
dg[0, 4] = 18.5
dg[0, 5] = 24.79
dg[0, 6] = 25.07
dg[0, 7] = 41.22
dg[1, 2] = 3.65
dg[1, 3] = 14.53
dg[1, 4] = 17.63
dg[1, 5] = 26.00
dg[1, 6] = 22.82
dg[1, 7] = 41.65
dg[2, 3] = 1 | 0.89
dg[2, 4] = 14.53
dg[2, 5] = 22.55
dg[2, 6] = 20.22
dg[2, 7] = 38.43
dg[3, 4] = 6.37
dg[3, 5] = 11.97
dg[3, 6] = 13.30
dg[3, 7] = 28.26
dg[4, 5] = 9.17
dg[4, 6] = 6.93
dg[4, 7] = 24.05
dg[5, 6] = 10.40
dg[5, 7] = 16.41
dg[6, 7] = 20.49
# las hago simetricas para olvidarme el tema de los indices
triuInd = np.triu_indices(8)
dg.T[triuInd] = dg[triuInd]
Db.transpose([1,0,2])[triuInd] = Db[triuInd]
db = np.mean(Db, axis=2)
# grafico comparacion de matris de distancias
plt.figure()
plt.imshow(np.hstack([db, dg]))
#plt.figure()
#plt.imshow(db - dg)
#
#plt.figure()
#plt.imshow((db - dg) / db)
# %% hacemos trilateracion sencilla para sacar condiciones iniciales
# el array con las coordenadas de todos los puntos
def trilateracion(d, signos=None):
'''
calcula las posiciones de todos los puntos a partir de los dos primeros
tomados como origen y direccion de versor x
si se provee una lista de signos, se corrige la direccion y
'''
d2 = d**2 # para usar las distancias al cuadrado
X = np.empty((d.shape[0], 2), dtype=float)
# ptos de base
X[0, 0] = 0 # origen de coords
X[0, 1] = 0
X[1, 0] = d[0, 1] # sobre el versor x
X[1, 1] = 0
X[2:, 0] = (d2[0, 1] + d2[0, 2:] - d2[1, 2:]) / 2 / d[0, 1]
X[2:, 1] = np.sqrt(d2[0, 2:] - X[2:, 0]**2)
if signos is not None:
X[2:, 1] *= signos
return X
Xb = trilateracion(db)
Xg = trilateracion(dg)
fig, ax = plt.subplots()
ax.scatter(Xb.T[0], Xb.T[1], label='bosch')
ax.scatter(Xg.T[0], Xg.T[1], label='google-earth')
for i, tx in enumerate(names):
ax.annotate(tx, (Xb[i, 0], Xb[i, 1]))
ax.legend()
# %% función error
def x2flat(x):
'''
retorna los valores optimizables de x como vector
'''
return np.concatenate([[x[1,0]], np.reshape(x[2:], -1)])
def flat2x(xF):
'''
reteorna las coordenadas a partir del vector flat
'''
return np.concatenate([ np.array([[0.0, 0],[xF[0], 0.0]]),
np.reshape(xF[1:], (-1,2))])
def distEr(d1, d2):
'''
metrica del error del vector de distancias
'''
return np.sqrt(np.mean((d1 - d2)**2))
def dists(xF):
'''
calcula la matriz de distancias , solo la mitad triangular superior
'''
x = flat2x(xF)
n = x.shape[0]
d = np.zeros((n, n), dtype=float)
for i in range(n-1): # recorro cada fila
d[i, i+1:] = ln.norm(x[i+1:] - x[i], axis=1)
return d[np.triu_indices(n, k=1)]
# los indices para leer las distancias de la matriz
upInd = np.triu_indices_from(db, k=1)
## defino el jacobiano de las distancias vs x
#Jd = ndf.Jacobian(dists)
#
#def newtonOpt(Xb, db, ep=1e-15):
# errores = list()
# d = db[upInd]
#
# #print("cond inicial")
# xF = x2flat(Xb)
# D = dists(xF)
# errores.append(distEr(d, D))
# #print(errores[-1])
#
# # hago un paso
# j = Jd(xF)
# xFp = xF + ln.pinv(j).dot(d - D)
# D = dists(xFp)
# errores.append(distEr(d, D))
# xF = xFp
#
# # mientras las correcciones sean mayores aun umbral
# while np.mean(np.abs(xFp - xF)) > ep:
# # for i in range(10):
# #print("paso")
# j = Jd(xF)
# xFp = xFp + ln.pinv(j).dot(d - D)
# D = dists(xF)
# errores.append(distEr(d, D))
# xF = xFp
# #print(errores[-1])
#
# return flat2x(xFp), errores
#
#xBOpt, e1 = newtonOpt(Xb, db)
#xGOpt, e2 = newtonOpt(Xg, dg)
#
## %%
#fig, ax = plt.subplots()
#ax.scatter(xBOpt.T[0], xBOpt.T[1], label='bosch optimo')
#ax.scatter(Xb.T[0], Xb.T[1], label='bosch inicial')
#for i, tx in enumerate(names):
# ax.annotate(tx, (xBOpt[i, 0], xBOpt[i, 1]))
#ax.legend()
#
#
#fig, ax = plt.subplots()
#ax.scatter(xGOpt.T[0], xGOpt.T[1], label='google earth optimo')
#ax.scatter(Xg.T[0], Xg.T[1], label='google earth inicial')
#for i, tx in enumerate(names):
# ax.annotate(tx, (xGOpt[i, 0], xGOpt[i, 1]))
#ax.legend()
#
#
#
#fig, ax = plt.subplots()
#ax.scatter(xBOpt.T[0], xBOpt.T[1], label='bosch optimo')
#ax.scatter(xGOpt.T[0], xGOpt.T[1], label='google-earth optimo')
#for i, tx in enumerate(names):
# ax.annotate(tx, (xBOpt[i, 0], xBOpt[i, 1]))
#ax.legend()
# %%esto no me acuerdo que es
dbFlat = db[upInd]
dgFlat = dg[upInd]
dif = dgFlat - dbFlat
plt.figure()
plt.scatter(dbFlat, dgFlat - dbFlat)
np.cov(dif)
# %% ahora sacar la incerteza en todo esto y optimizar
# establecer funcion error escalar
def distEr2(d1, d2):
'''
metrica del error del vector de distancias
'''
return np.sum((d1 - d2)**2)
def xEr(xF, d):
D = dists(xF)
return distEr2(d, D)
Jex = ndf.Jacobian(xEr)
Hex = ndf.Hessian(xEr)
def newtonOptE2(x, db, ep=1e-10):
errores = list()
d = db[upInd]
#print("cond inicial")
xF = x2flat(x)
D = dists(xF)
errores.append(distEr(d, D))
#print(errores[-1])
# hago un paso
A = Hex(xF, dbFlat)
B = Jex(xF, dbFlat)
l = np.real(ln.eig(A)[0])
print('autovals ', np.max(l), np.min(l))
dX = - ln.inv(A).dot(B.T)
xFp = xF + dX[:,0]
D = dists(xFp)
errores.append(distEr(d, D))
# mientras las correcciones sean mayores a un umbral
e = np.max(np.abs(xFp - xF))
print('correcciones ', e)
while e > ep:
xF = xFp
A = Hex(xF, dbFlat)
B = Jex(xF, dbFlat)
l = np.real(ln.eig(A)[0])
print('autovals ', np.max(l), np.min(l))
dX = - ln.inv(A).dot(B.T)
xFp |
GHubgenius/PeachOrchard | node/src/core/monitor.py | Python | mit | 1,446 | 0.003458 | from src.core.log import *
from src.core import node_resource as nr
from src.core import config
from src.core import utility
from time import sleep
import os
def monitor(fuzzy):
"""
"""
# known crashes; key is idx (top level folder before crash info); value is
known_crashes = {}
try:
| utility.msg("Initializing monitor for node %s..." % config.NODE_ID)
# check monitor dir
if not os.path.isdir(config.MONITOR_DIR): |
utility.msg("Directory %s not found" % config.MONITOR_DIR, ERROR)
return
#
# prior to monitor loop, lets ensure we're synced with upstream by providing
# current set of crashes; dupes will be thrown out
#
# register crashes
current_crashes = fuzzy.fetch_crashes()
nr.register_crash(current_crashes)
# baseline
fuzzy.crashes = current_crashes
while True:
# status update
nr.send_status_update(fuzzy.get_status())
# check for any new crashes
temporal_crash = fuzzy.check_new_crashes()
if temporal_crash:
# we have new crashes; ship them over
nr.register_crash(temporal_crash)
# sleep, now
sleep(config.CHECK_INTERVAL)
except KeyboardInterrupt:
pass
except Exception, e:
utility.msg("Error during monitor: %s" % e, ERROR)
|
musashiXXX/django-clamav-upload | clamav_upload/exceptions.py | Python | gpl-3.0 | 363 | 0 | from django.contrib import messages
from | django.core.exceptions import PermissionDenied
class UploadPermissionDenied(PermissionDenied):
def __init__(self, request, log_func, error_message, *args, **kwargs):
log_func(error_message)
messages.error(request, error_message)
super(UploadPermissionDenied, self).__init__(*ar | gs, **kwargs)
|
GreatLakesEnergy/sesh-dash-beta | seshdash/tests/test_import.py | Python | mit | 4,199 | 0.010002 | # Testing
from django.test import TestCase
from django.test.utils import override_settings
# APP Models
from seshdash.models import Sesh_User, Sesh_Alert, Alert_Rule, Sesh_Site,VRM_Account, BoM_Data_Point as Data_Point, Daily_Data_Point as ddp
# django Time related
from seshdash.utils import time_utils
from django.utils import timezone
from time import sleep
import pytz
from datetime import datetime,timedelta
#Helper Functions
from django.forms.models import model_to_dict
from django.db import transaction
#Security
from guardian.shortcuts import assign_perm
from geoposition import Geoposition
# Debug
from django.forms.models import model_to_dict
# To Test
from seshdash.data.db.influx import Influx
from seshdash.api.victron import VictronAPI
from django.conf import settings
from seshdash.tasks import get_historical_BoM, get_aggregate_daily_data,run_aggregate_on_historical, get_BOM_data
# This test case written to test alerting module.
# It aims to test if the system sends an email and creates an Sesh_Alert object when an alert is triggered.
class VRM_Import_TestCase(TestCase):
@override_settings(DEBUG=True)
def setUp(self):
self.VRM = VRM_Account.objects.create(vrm_user_id='demo@victronenergy.com',vrm_password="vrmdemo")
# Setup Influx
self._influx_db_name = 'test_db'
self.i = Influx(database=self._influx_db_name)
self.no_points = 288
try:
self.i.create_database(self._influx_db_name)
#Generate random data points for 24h
except Exception, e:
print e
self.i.delete_database(self._influx_db_name)
sleep(1)
self.i.create_database(self._influx_db_name)
pass
self.VRM_API = VictronAPI(self.VRM.vrm_user_id, self.VRM.vrm_password)
if self.VRM_API.IS_INITIALIZED:
sites = self.VRM_API.SYSTEMS_IDS
print sites
if len(sites) > 1:
self.vrm_site_id = sites[0][0]
self.location = Geoposition(52.5,24.3)
self.now = timezone.now()
self.start_date = self.now - timedelta(days=2)
self.site = Sesh_Site.objects.create(site_name=u"Test_aggregate",
| comission_date=self.start_date,
location_city=u"kigali",
location_country=u"rwanda",
vrm_account = self.VRM,
installed_kw=123.0,
position=self.location,
system_voltage=12,
| number_of_panels=12,
vrm_site_id=self.vrm_site_id,
battery_bank_capacity=12321,
has_genset=True,
has_grid=True,
has_pv=True)
#create test user
self.test_user = Sesh_User.objects.create_user(username="john doe",email="alp@gle.solar",password="asdasd12345")
#assign a user to the sites
assign_perm("view_Sesh_Site",self.test_user,self.site)
def tearDown(self):
self.i.delete_database(self._influx_db_name)
pass
def test_api_stats(self):
"""
Test Victron API points
"""
stats = self.VRM_API.get_system_stats(self.vrm_site_id)
self.assertTrue(stats.has_key('Input power 1'))
stats = self.VRM_API.get_battery_stats(self.vrm_site_id)
self.assertTrue(stats.has_key('Battery voltage'))
#stats = self.VRM_API.get_pv_stats(self.vrm_site_id)
#self.assertTrue(stats.has_key('PV - DC-coupled'))
#print self.VRM_API.ATTRIBUTE_DICT
def test_bom_data_point(self):
get_BOM_data()
sleep(1)
bom_data = Data_Point.objects.all()
#Commenting out the test
#TODO: UNCOMMENT THIS AND FIX THE PROBLEMS WITH GETTING DATA FROM VICTON
self.assertEqual(len(bom_data),1)
|
horizon-institute/chariot | src/app/deployments/migrations/0002_auto_20170419_1040.py | Python | mit | 2,022 | 0 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-04-19 10:40
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('deployments', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='deployment',
name='boiler_efficiency',
field=models.FloatField(blank=True, null=True),
),
migrations.AddField(
model_name='deployment',
| name='boiler_manufacturer',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
| model_name='deployment',
name='boiler_model',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='deployment',
name='boiler_output',
field=models.FloatField(blank=True, null=True),
),
migrations.AddField(
model_name='deployment',
name='building_height',
field=models.FloatField(blank=True, null=True),
),
migrations.AddField(
model_name='deployment',
name='building_length',
field=models.FloatField(blank=True, null=True),
),
migrations.AddField(
model_name='deployment',
name='building_width',
field=models.FloatField(blank=True, null=True),
),
migrations.AddField(
model_name='deploymentsensor',
name='room_height',
field=models.FloatField(default=2.4),
),
migrations.AddField(
model_name='deploymentsensor',
name='room_length',
field=models.FloatField(default=0),
),
migrations.AddField(
model_name='deploymentsensor',
name='room_width',
field=models.FloatField(default=0),
),
]
|
bgaultier/laboitepro | boites/migrations/0008_auto_20170801_1406.py | Python | agpl-3.0 | 568 | 0.001761 | # -*- coding: | utf-8 -*-
# Generated by Django 1.11.2 on 2017-08-01 12:06
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('boites', '0007_auto_20170801_0645'),
]
operations = [
migrations.AlterField(
model_name='tile',
name='duration',
field=models.PositiveSmallIntegerField(default=5, help_text='Veuillez saisir une dur\xe9e durant laquelle la tuile sera affich\xe9e (en secondes)', verbose_name="Dur\xe9e | d'affichage de la tuile"),
),
]
|
mkelley/brew | brew/ingredients.py | Python | mit | 25,766 | 0.000388 | # Licensed under an MIT style license - see LICENSE
"""
ingredients --- Beer ingredients
================================
"""
from enum import Enum
from collections.abc import MutableSequence
from . import timing as T
__all__ = [
'PPG',
'CultureBank',
'Culture',
'Ingredient',
'Fermentable',
| 'Unfermentable',
'Hop',
'Spice',
'Fruit',
'Grain',
'Sugar',
'Wort',
'Other',
'Priming',
'Water',
'WaterTreatment',
'Ingredients',
]
# Source: Home Brewer's Companion
# Beersmith: http://www.beersmith.com/Grains/Grains/GrainList.htm
# name, PPG
class PPG(Enum):
AcidMalt = "Acid malt", 27 # (Germany) Beersmith
AmericanTwoRow = "American 2-row", 37
AmericanSixRow = "Am | erican 6-row", 35
AmericanPaleAle = "American pale ale", 36
BelgianPaleAle = "Belgian pale ale", 37
BelgianPilsener = "Belgian pilsener", 37
DriedMaltExtract = "Dried malt extract", 44
EnglishTwoRow = "English 2-row", 38
EnglishMild = "English mild", 37
MarisOtter = "Maris Otter", 38
GoldenPromise = "Golden Promise", 38
WheatMalt = "Wheat malt", 38 # midwest / german / belgian
AmericanRyeMalt = "American rye malt", 36
GermanRyeMalt = "German rye malt", 38
GermanPilsner = "German pilsner", 37
GermanSmokedMalt = "German smoked malt", 37
EnglishRyeMalt = "English rye malt", 40
EnglishOatMalt = "English oat malt", 35
AmericanVienna = "American Vienna", 36
GermanVienna = "German Vienna", 37
AmericanCarapils = "American Carapils", 34 # dextrine
BelgianCarapils = "Belgian Carapils", 36
AmericanMunich = "American Munich", 34
GermanMunich = "German Munich", 37
GermanMunichII = "Germain Munich II", 36
BelgianMunich = "Belgian Munich", 37
Caramunich = "Caramunich", 33 # Beersmith
AmericanCaramel10 = "American caramel 10", 35
AmericanCaramel20 = "American caramel 20", 35
AmericanCaramel40 = "American caramel 40", 35
AmericanCaramel60 = "American caramel 60", 34
AmericanCaramel120 = "American caramel 120", 33
EnglishCrystal20_30 = "English crystal 20-30", 36
EnglishCrystal60_70 = "English crystal 60-70", 34
EnglishCaramalt = "English Caramalt", 36
BelgianCrystal = "Belgian crystal", 36
AmericanVictory = "American Victory", 33
BelgianBiscuit = "Belgian biscuit", 36
BelgianAromatic = "Belgian aromatic", 36
EnglishBrown = "English brown", 33
EnglishAmber = "English amber", 33
BelgianSpecialB = "Belgian Special B", 35
AmericanChocolate = "American chocolate", 28 # Beersmith
EnglishPaleChocolate = "English pale chocolate", 34 # Beersmith
EnglishChocolate = "English chocolate", 34 # Beersmith
Carahell = "Carahell", 35 # guess
Black = "Black", 25 # Beersmith
RoastedBarley = "Roasted barley", 18
BarleyRaw = "Barley, raw", 32 # 30 to 34
Barleyflaked = "Barley, flaked", 32 # 30 to 34
CornFlaked = "Corn, flaked", 39
CornGrits = "Corn grits", 37
MilletRaw = "Millet, raw", 37
SorghumRaw = "Sorghum, raw", 37
OatsRaw = "Oats, raw", 33
OatsFlaked = "Oats, flaked", 33
RiceRaw = "Rice, raw", 38
RiceFlaked = "Rice, flaked", 38
RyeRaw = "Rye, raw", 36
RyeFlaked = "Rye, flaked", 36
WheatFlaked = "Wheat, flaked", 33
WheatRaw = "Wheat, raw", 37
WheatTorrified = "Wheat, torrified", 35
AgaveSyrup = "Agave syrup", 34
BelgianCandiSugar = "Belgian candi sugar", 46
BelgianCandiSyrup = "Belgian candi syrup", 36
CaneSugar = "Cane sugar", 46
TableSugar = "Table sugar", 46
TurbinadoSugar = "Turbinado sugar", 46
LightBrownSugar = "Light brown sugar", 46
DarkBrownSugar = "Dark brown sugar", 46
CornSugarDextrose = "Corn sugar (dextrose)", 46
Lactose = "Lactose", 35
Honey = "Honey", 32 # 30 to 35
MapleSap = "Maple sap", 9
MapleSyrup = "Maple syrup", 30 # variable
Molasses = "Molasses", 36
Rapadura = "Rapadura", 40
RiceExtract = "Rice extract", 34
WhiteSorghumSyrup = "White sorghum syrup", 38
PumpkinPuree = "Pumpkin puree", 2
class CultureBank(Enum):
# name, min, max apparent attenutation, url
AmericanAleUS05 = ('US-05, American Ale', 81, 81)
AmericanAle1056 = ('WY1056, American Ale', 73, 77)
CaliforniaAle = ('WLP001, California Ale', 73, 80)
GermanAle = ('WLP029, GermanAle/Kölsch', 72, 78)
CaliforniaAleV = ('WLP051, California Ale V', 70, 75)
EnglishAle = ('WLP002, English Ale', 63, 70)
IrishAle = ('WLP004, Irish Ale', 69, 74)
DryEnglishAle = ('WLP007, Dry English Ale', 70, 80)
Nottingham = ('Danstar Nottingham', 70, 80) # guess
FrenchAle = ('WLP072, French Ale', 68, 75)
CreamAleBlend = ('WLP080, Cream Ale Blend', 75, 80)
Hefeweizen = ('WLP300, Hefeweizen', 72, 76)
BelgianWit = ('WLP400, Belgian Wit', 74, 78)
MonasteryAle = ('WLP500, Monastery Ale', 75, 80)
AbbeyAle = ('WLP530, Abbey Ale', 75, 80)
BelgianAle = ('WLP550, Belgian Ale', 78, 85)
BelgianSaisonI = ('WLP565, Belgian Saison I', 65, 75)
BelgianSaisonII = ('WLP566, Belgian Saison II', 78, 85)
BelgianStyleSaison = ('WLP568, Belgian Style Saison', 70, 80)
BelgianGoldenAle = ('WLP570, Belgian Golden Ale', 73, 78)
BelgianStyleAleBlend = ('WLP575, Belgian Style Ale Blend', 74, 80)
BelgianSaisonIII = ('WLP585, Belgian Saison III', 70, 74)
TrappistHighGravity = ('WY3787, Trappist Style High Gravity', 74, 78)
FrenchSaison590 = ('WLP590, French Saison', 73, 80)
FrenchSaison3711 = ('WY3711, French Saison', 77, 83)
SanFranciscoLager = ('WLP810, San Francisco Lager', 65, 70)
OktoberfestLager = ('WLP820, Oktoberfest/Märzen Lager', 65, 73)
SacchromycesBruxellensisTrois = (
'WLP644, Sacchromyces bruxellensis Trois', 85, 100)
BrettanomycesClaussenii = ('WLP645, Brettanomyces claussenii', 85, 100)
BrettanomycesBruxellensisTroisVrai = (
'WLP648, Brettanomyces bruxellensis Trois Vrai', 85, 100)
BrettanomycesBruxellensis = ('WLP650, Brettanomyces bruxellensis', 85, 100)
BrettanomycesLambicus = ('WLP653, Brettanomyces lambicus', 85, 100)
SourMix1 = ('WLP655, Sour Mix 1', 85, 100)
FlemishAleBlend = ('WLP665, Flemish Ale Blend', 80, 100)
AmericanFarmhouseBlend = ('WLP670, American Farmhouse Blend', 75, 82)
LactobacillusBrevis = ('WLP672, Lactobacillus Brevis', 80, 80)
LactobacillusDelbrueckii = ('WLP677, Lactobacillus Delbrueckii', 75, 82)
HouseSourMixI = ('House sour mix I', 86, 86)
BottleDregs = ('Bottle dregs', 0, 100)
Gnome = ('B45, Gnome', 72, 76,
'http://www.imperialyeast.com/organic-yeast-strains/')
T58 = ('SafAle T-58', 70, 80)
class Ingredient:
"""Beer ingredient.
Parameters
----------
name : string
The name of the ingredient.
quantity : string
The amount of the ingredient as a string.
timing : Timing, optional
When to add it.
desc : string, optional
A long-form description of the ingredient.
"""
def __init__(self, name, quantity, timing=T.Unspecified(), desc=None):
if not isinstance(name, str):
raise TypeError('name')
if not isinstance(quantity, str):
raise TypeError('quantity')
self.name = name
self.quantity = quantity
self.timing = timing
self.desc = name if desc is None else desc
def __repr__(self):
return "<{}: {}>".format(type(self).__name__, str(self))
def __str__(self):
return "{}, {} at {}".format(self.name, self.quantity, self.timing)
class Culture(Ingredient):
"""Yeast or other cultures, ready for fermentation.
Parameters
----------
culture : CultureBank or tuple
Culture to propagate. May be from the `CultureBank` or a tuple:
(name, min apparent attenuation, max apparent attenuation).
quantity : string, optional
Quantity of the culture.
timing : Timing, optional
Timing of the addition.
desc : string, optional
Long-form description.
"""
def __init__(self, culture, quantity='1', timing=T.Primary(), desc=None):
if not isinstance(cultu |
popazerty/beyonwiz-4.1 | lib/python/Plugins/SystemPlugins/IceTV/API.py | Python | gpl-2.0 | 8,110 | 0.002219 | # kate: replace-tabs on; indent-width 4; remove-trailing-spaces all; show-tabs on; newline-at-eof on;
# -*- coding:utf-8 -*-
'''
Copyright (C) 2014 Peter Urbanec
All Right Reserved
License: Proprietary / Commercial - contact enigma.licensing (at) urbanec.net
'''
import requests
import json
from fcntl import ioctl
from struct import pack
from socket import socket, create_connection, AF_INET, SOCK_DGRAM, SHUT_RDWR, error as sockerror
from . import config, saveConfigFile
from boxbranding import getMachineBrand, getMachineName
_version_string = "20141027"
_protocol = "http://"
_server = "api.icetv.com.au"
_device_type_id = 22
_debug_level = 0 # 1 = request/reply, 2 = 1+headers, 3 = 2+partial body, 4 = 2+full body
def isServerReachable():
try:
sock = create_connection((_server, 80), 3)
sock.shutdown(SHUT_RDWR)
sock.close()
return True
except sockerror as ex:
print "[IceTV] Can not connect to IceTV server:", str(ex)
return False
def getMacAddress(ifname):
result = "00:00:00:00:00:00"
sock = socket(AF_INET, SOCK_DGRAM)
# noinspection PyBroadException
try:
iface = pack('256s', ifname[:15])
info = ioctl(sock.fileno(), 0x8927, iface)
result = ''.join(['%02x:' % ord(char) for char in info[18:24]])[:-1].upper()
except:
pass
sock.close()
return result
def haveCredentials():
return bool(config.plugins.icetv.member.token.value)
def getCredentials():
return {
"email_address": config.plugins.icetv.member.email_address.value,
"token": config.plugins.icetv.member.token.value,
}
def clearCredentials():
config.plugins.icetv.member.token.value = ""
config.plugins.icetv.member.token.save()
saveConfigFile()
def showIdToEventId(show_id):
# Fit within 16 bits, but avoid 0 and 0xFFF8 - 0xFFFF
return (int(show_id) % 0xFFF7) + 1
class Request(object):
def __init__(self, resource):
super(Request, self).__init__()
self.params = {
"api_key": "9019fa88-bd0c-4b1b-94ac-6761aa6a664f",
"application_version": _version_string,
}
self.headers = {
"Content-Type": "application/json",
"Accept": "application/json",
"User-Agent": "SystemPlugins.IceTV/%s (%s; %s)" % (_version_string, getMachineBrand(), getMachineName()),
}
self.url = _protocol + _server + resource
self.data = {}
self.response = None
def _shorten(self, text):
if len(text) < 4000:
return text
return text[:2000] + "\n...\n" + text[-2000:]
def send(self, method):
data = json.dumps(self.data)
r = requests.request(method, self.url, params=self.params, headers=self.headers, data=data, verify=False)
err = not r.ok
if err or _debug_level > 0:
print "[IceTV]", r.request.method, r.request.url
if err or _debug_level > 1:
print "[IceTV] headers", r.request.headers
if err or _debug_level == 3:
print "[IceTV]", self._shorten(r.request.body)
elif err or _debug_level > 3:
print "[IceTV]", r.request.body
if err or _debug_level > 0:
print "[IceTV]", r.status_code, r.reason
if err or _debug_level > 1:
print "[IceTV] headers", r.headers
if err or _debug_level == 3:
print "[IceTV]", self._shorten(r.text)
elif err or _debug_level > 3:
print "[IceTV]", r.text
self.response = r
if r.status_code == 401:
clearCredentials()
r.raise_for_status()
return r
class AuthRequest(Request):
def __init__(self, resource):
super(AuthRequest, self).__init__(resource)
self.params.update(getCredentials())
class Regions(Request):
def __init__(self):
super(Regions, self).__init__("/regions")
def get(self):
return self.send("get")
class Region(Request):
def __init__(self, region):
super(Region, self).__init__("/regions/" + str(int(region)))
def get(self):
return self.send("get")
class Channels(Request):
def __init__(self, region=None):
if region is None:
super(Channels, self).__init__("/regions/channels")
else:
super(Channels, self).__init__("/regions/" + str(int(region)) + "/channels")
def get(self):
return self.send("get")
class Login(Request):
def __init__(self, email, password, region=None):
super(Login, self).__init__("/login")
self.data["device"] = {
"uid": getMacAddress('eth0'),
"label": config.plugins.icetv.device.label.value,
"type_id": config.plugins.icetv.device.type_id.value,
}
self.data["member"] = {
"email_address": email,
"password": password,
| }
if region:
self.data["member"]["region_id"] = region
def post(self):
return self.send("post")
def put(self):
return self.send("put")
def send(self, method):
r = super(Login, self).send(method)
result = r.json()
config.plugins.icetv.member.email_address.value = result["member"]["email_addres | s"]
config.plugins.icetv.member.token.value = result["member"]["token"]
config.plugins.icetv.member.id.value = result["member"]["id"]
config.plugins.icetv.member.region_id.value = result["member"]["region_id"]
config.plugins.icetv.device.id.value = result["device"]["id"]
config.plugins.icetv.device.label.value = result["device"]["label"]
config.plugins.icetv.device.type_id.value = result["device"]["type_id"]
config.plugins.icetv.save()
saveConfigFile()
return r
class Logout(AuthRequest):
def __init__(self):
super(Logout, self).__init__("/logout")
def delete(self):
return self.send("delete")
def send(self, method):
r = super(Logout, self).send(method)
clearCredentials()
return r
class Devices(AuthRequest):
def __init__(self):
super(Devices, self).__init__("/devices")
def get(self):
return self.send("get")
def post(self):
return self.send("post")
class Device(AuthRequest):
def __init__(self, deviceid):
super(Device, self).__init__("/devices/" + str(int(deviceid)))
def get(self):
return self.send("get")
def put(self):
return self.send("put")
def delete(self):
return self.send("delete")
class DeviceTypes(AuthRequest):
def __init__(self):
super(DeviceTypes, self).__init__("/devices/types")
def get(self):
return self.send("get")
class DeviceType(AuthRequest):
def __init__(self, deviceid):
super(DeviceType, self).__init__("/devices/types/" + str(int(deviceid)))
def get(self):
return self.send("get")
class DeviceManufacturers(AuthRequest):
def __init__(self):
super(DeviceManufacturers, self).__init__("/devices/manufacturers")
def get(self):
return self.send("get")
class DeviceManufacturer(AuthRequest):
def __init__(self, deviceid):
super(DeviceManufacturer, self).__init__("/devices/manufacturers/" + str(int(deviceid)))
def get(self):
return self.send("get")
class Shows(AuthRequest):
def __init__(self):
super(Shows, self).__init__("/shows")
def get(self):
return self.send("get")
class Timers(AuthRequest):
def __init__(self):
super(Timers, self).__init__("/shows/timers")
def get(self):
return self.send("get")
def post(self):
return self.send("post")
def put(self):
return self.send("put")
class Timer(AuthRequest):
def __init__(self, timerid):
super(Timer, self).__init__("/shows/timers/" + str(timerid))
def get(self):
return self.send("get")
def put(self):
return self.send("put")
def delete(self):
return self.send("delete")
|
skg-net/ansible | lib/ansible/modules/cloud/azure/azure_rm_autoscale.py | Python | gpl-3.0 | 26,843 | 0.003502 | #!/usr/bin/python
#
# Copyright (c) 2017 Yuwei Zhou, <yuwzho@microsoft.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_autoscale
version_added: "2.7"
short_description: Manage Azure autoscale setting.
description:
- Create, delete an autoscale setting.
options:
target:
description:
- The identifier of the resource to apply autoscale setting.
- It could be the resource id string.
- It also could be a dict contains the C(name), C(subscription_id), C(namespace), C(types), C(resource_group) of the resource.
resource_group:
required: true
description: resource group of the resource.
enabled:
type: bool
description: Specifies whether automatic scaling is enabled for the resource.
default: true
profiles:
description:
- The collection of automatic scaling profiles that specify different scaling parameters for different time periods.
- A maximum of 20 profiles can be specified.
suboptio | ns:
name:
required: true
description: the name of the profile.
count:
required: true
description:
- The number of insta | nces that will be set if metrics are not available for evaluation.
- The default is only used if the current instance count is lower than the default.
min_count:
description: the minimum number of instances for the resource.
max_count:
description: the maximum number of instances for the resource.
recurrence_frequency:
default: None
description:
- How often the schedule profile should take effect.
- If this value is Week, meaning each week will have the same set of profiles.
- This element is not used if the FixedDate element is used.
choices:
- None
- Second
- Minute
- Hour
- Day
- Week
- Month
- Year
recurrence_timezone:
description:
- The timezone of repeating times at which this profile begins.
- This element is not used if the FixedDate element is used.
recurrence_days:
description:
- The days of repeating times at which this profile begins.
- This element is not used if the FixedDate element is used.
recurrence_hours:
description:
- The hours of repeating times at which this profile begins.
- This element is not used if the FixedDate element is used.
recurrence_mins:
description:
- The mins of repeating times at which this profile begins.
- This element is not used if the FixedDate element is used.
fixed_date_timezone:
description:
- The specific date-time timezone for the profile.
- This element is not used if the Recurrence element is used.
fixed_date_start:
description:
- The specific date-time start for the profile.
- This element is not used if the Recurrence element is used.
fixed_date_end:
description:
- The specific date-time end for the profile.
- This element is not used if the Recurrence element is used.
rules:
description:
- The collection of rules that provide the triggers and parameters for the scaling action.
- A maximum of 10 rules can be specified.
suboptions:
time_aggregation:
default: Average
description: How the data that is collected should be combined over time.
choices:
- Average
- Minimum
- Maximum
- Total
- Count
time_window:
required: true
description:
- The range of time(minutes) in which instance data is collected.
- This value must be greater than the delay in metric collection, which can vary from resource-to-resource.
- Must be between 5 ~ 720.
direction:
description: Whether the scaling action increases or decreases the number of instances.
choices:
- Increase
- Decrease
metric_name:
required: true
description: The name of the metric that defines what the rule monitors.
metric_resource_uri:
description: The resource identifier of the resource the rule monitors.
value:
description:
- The number of instances that are involved in the scaling action.
- This value must be 1 or greater.
operator:
default: GreaterThan
description: The operator that is used to compare the metric data and the threshold.
choices:
- Equals
- NotEquals
- GreaterThan
- GreaterThanOrEqual
- LessThan
- LessThanOrEqual
cooldown:
description:
- The amount of time (minutes) to wait since the last scaling action before this action occurs.
- It must be between 1 ~ 10080.
time_grain:
required: true
description:
- The granularity(minutes) of metrics the rule monitors.
- Must be one of the predefined values returned from metric definitions for the metric.
- Must be between 1 ~ 720.
statistic:
default: Average
description: How the metrics from multiple instances are combined.
choices:
- Average
- Min
- Max
- Sum
threshold:
default: 70
description: The threshold of the metric that triggers the scale action.
type:
description: The type of action that should occur when the scale rule fires.
choices:
- PercentChangeCount
- ExactCount
- ChangeCount
notifications:
description: the collection of notifications.
suboptions:
custom_emails:
description: the custom e-mails list. This value can be null or empty, in which case this attribute will be ignored.
send_to_subscription_administrator:
type: bool
description: A value indicating whether to send email to subscription administrator.
webhooks:
description: The list of webhook notifications service uri.
send_to_subscription_co_administrators:
type: bool
description: A value indicating whether to send em |
bhatiaharsh/naturalHHD | pynhhd-v1.1/pynhhd/structured.py | Python | bsd-2-clause | 6,676 | 0.007939 | '''
Copyright (c) 2015, Harsh Bhatia (bhatia4@llnl.gov)
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
import numpy as np
import logging
LOGGER = logging.getLogger(__name__)
from .utils.timer import Timer
# ------------------------------------------------------------------------------
class StructuredGrid(object):
# --------------------------------------------------------------------------
# --------------------------------------------------------------------------
def __init__(self, **kwargs):
'''
kwargs:
grid: ndarray of grid dimensions (Y,X) or (Z,Y,X)
spacings: ndarray of grid spacings (dy, dx) or (dz, dy, dx)
'''
args = list(kwargs.keys())
if ('grid' not in args) or ('spacings' not in args):
raise SyntaxError("Dimensions and spacings of the grid are required")
self.dims = kwargs['grid']
self.dx = kwargs['spacings']
self.dim = len(self.dims)
if self.dim != 2 and self.dim != 3:
raise ValueError("StructuredGrid works for 2D and 3D only")
if self.dim != len(self.dx):
raise ValueError("Dimensions of spacings should match that of the grid")
LOGGER.info('Initialized {}D structured grid'.format(self.dim))
# --------------------------------------------------------------------------
# --------------------------------------------------------------------------
def divcurl(self, vfield):
if (vfield.shape[-1] != self.dim) or (vfield.shape[0:self.dim] != self.dims):
raise ValueError("Dimensions of vector field should match that of the grid")
LOGGER.debug('Computing divcurl')
mtimer = Timer()
if self.dim == 2:
# self.dx = (dy,dx)
dudy, dudx = np.gradient(vfield[:,:,0], self.dx[0], self.dx[1])
dvdy, dvdx = np.gradient(vfield[:,:,1], self.dx[0], self.dx[1])
np.add(dudx, dvdy, dudx)
np.subtract(dvdx, dudy, dvdx)
mtimer.end()
LOGGER.debug('Computing divcurl done! took {}'.format(mtimer))
return (dudx, dvdx)
elif self.dim == 3:
# self.dx = (dz,dy,dx)
dudz, dudy, dudx = np.gradient(vfield[:,:,:,0], self.dx[0], self.dx[1], self.dx[2])
dvdz, dvdy, dvdx = np.gradient(vfield[:,:,:,1], self.dx[0], self.dx[1], self.dx[2])
dwdz, dwdy, dwdx = np.gradient(vfield[:,:,:,2], self.dx[0], self.dx[1], self.dx[2])
np.add(dudx, dvdy, dudx)
np.add(dudx, dvdz, dudx)
np.subtract(dwdy, dvdz, dwdy)
np.subtract(dudz, dwdx, dudz)
np.subtract(dvdx, dudy, dvdx)
mtimer.end()
LOGGER.debug('Computing divcurl done! took {}'.format(mtimer))
return (dudx, dwdy, dudz, dvdx)
def curl3D(self, vfield):
#if (vfield.shape[-1] != self.dim) or (vfield.shape[0:self.dim] - self.dims).any():
if (vfield.shape[-1] != self.dim) or (vfield.shape[0:self.dim] != self.dims):
raise ValueError("Dimensions of vector field should match that of the grid")
if self.dim != 3:
raise ValueError("curl3D works only for 2D")
LOGGER.debug('Computing curl3D')
mtimer = Timer()
# self.dx = (dz,dy,dx)
dudz, dudy, dudx = np.gradient(vfield[:,:,:,0], self.dx[0], self.dx[1], self.dx[2])
dvdz, dvdy, dvdx = np.gradient(vfield[:,:,:,1], self.dx[0], self.dx[1], self.dx[2])
dwdz, dwdy, dwdx = np.gradient(vfield[:,:,:,2], self.dx[0], self.dx[1], self.dx[2])
np.subtract(dwdy, dvdz, dwdy)
np.subtract(dudz, dwdx, dudz)
np.subtract(dvdx, dudy, dvdx)
mtimer.end()
LOGGER.debug('Computing curl3D done! took {}'.format(mtimer))
return (dwdy, dudz, dvdx)
def rotated_gradient(self, sfield, verbose=False):
if (sfield.shape != self.dims):
#if (sfield.shape - self.dims).any():
raise ValueError("Dimensions of scalar field should match that of the grid")
if self.dim != 2:
raise ValueError("rotated_gradient works only for 2D")
LOGGER.de | bug('Computing rotated gradient')
mtimer = Timer()
ddy, ddx = | np.gradient(sfield, self.dx[0], self.dx[1])
ddy *= -1.0
grad = np.stack((ddy, ddx), axis=-1)
mtimer.end()
LOGGER.debug('Computing rotated gradient done! took {}'.format(mtimer))
return grad
def gradient(self, sfield, verbose=False):
if (sfield.shape != self.dims):
#if (sfield.shape - self.dims).any():
raise ValueError("Dimensions of scalar field should match that of the grid")
LOGGER.debug('Computing gradient')
mtimer = Timer()
if self.dim == 2:
# self.dx = (dy,dx)
ddy, ddx = np.gradient(sfield, self.dx[0], self.dx[1])
grad = np.stack((ddx, ddy), axis = -1)
elif self.dim == 3:
# self.dx = (dz,dy,dx)
ddz, ddy, ddx = np.gradient(sfield, self.dx[0], self.dx[1], self.dx[2])
grad = np.stack((ddx, ddy, ddz), axis = -1)
mtimer.end()
LOGGER.debug('Computing gradient done! took {}'.format(mtimer))
return grad
# --------------------------------------------------------------------------
# --------------------------------------------------------------------------
|
sdispater/orator | tests/integrations/test_mysql.py | Python | mit | 764 | 0 | # -*- coding: utf-8 -*-
import os
from .. import OratorTestCase
from . import IntegrationTestCase
class MySQLIntegrationTestCase(Integrati | onTestCase, OratorTestCase):
@classmethod
def get_manager_config(cls):
ci = os.environ.get("CI", False)
if ci:
database = "orator_test"
user = "root"
password = ""
else:
database = "orator_test"
user = "orator"
password = "orator"
return {
"default": "mysql",
"mysql": {
"driver": "mysql",
"database": database,
| "user": user,
"password": password,
},
}
def get_marker(self):
return "%s"
|
christianurich/VIBe2UrbanSim | 3rdparty/opus/src/opus_gui/util/editorbase.py | Python | gpl-2.0 | 3,269 | 0.008259 | # Opus/UrbanSim urban simulation software.
# Copyright (C) 2005-2009 University of Washington
# See opus_core/LICENSE
# PyQt4 includes for python bindings to QT
from PyQt4.QtCore import Qt, QString
from PyQt4.QtGui import QFont, QFontMetrics, QColor, QIcon, QLabel, QWidget, QVBoxLayout
from PyQt4.Qsci import QsciScintilla, QsciLexerPython
# Main
class EditorBase(QsciScintilla):
def __init__(self, mainwindow):
QsciScintilla.__init__(self, mainwindow)
self.mainwindow = mainwindow
## define the font to use
font = QFont()
font.setFamily("Consolas")
font.setFixedPitch(True)
font.setPointSize(10)
# the font metrics here will help
# building the margin width later
fm = QFontMetrics(font)
## set the default font of the editor
## and take the same font for line numbers
self.setFont(font)
self.setMarginsFont(font)
## Line numbers
# conventionnaly, | margin 0 is for line numbers
self.setMarginWidth(0, fm.width( "00000" ) + 5)
self.setMarginLineNumbers(0, True)
## Edge Mode shows a red vetical bar at 80 chars
self.setEdgeMode(QsciScintilla.EdgeLine)
self.setEdgeColumn(80)
self.setEdgeColor(QColor("#CCCCCC"))
## Folding visual : we will use boxes
self.setFolding(QsciScintilla.BoxedTreeFoldStyle)
## Braces matching
s | elf.setBraceMatching(QsciScintilla.SloppyBraceMatch)
## Editing line color
#self.setCaretLineVisible(True)
#self.setCaretLineBackgroundColor(QColor("#CDA869"))
## Margins colors
# line numbers margin
self.setMarginsBackgroundColor(QColor("#333333"))
self.setMarginsForegroundColor(QColor("#CCCCCC"))
# folding margin colors (foreground,background)
#self.setFoldMarginColors(QColor("#99CC66"),QColor("#333300"))
self.setFoldMarginColors(QColor("#CCCCCC"),QColor("#CCCCCC"))
## Choose a lexer
lexer = QsciLexerPython()
lexer.setDefaultFont(font)
self.setLexer(lexer)
class EditorTab(object):
def __init__(self, mainwindow, filePath):
self.mainwindow = mainwindow
self.tabIcon = QIcon(":/Images/Images/cog.png")
self.tabLabel = "Editor Dyn Tab"
self.tab = QWidget(self.mainwindow)
self.widgetLayout = QVBoxLayout(self.tab)
self.widgetLayout.setAlignment(Qt.AlignTop)
self.editorStatusLabel = QLabel(self.tab)
self.editorStatusLabel.setAlignment(Qt.AlignCenter)
self.editorStatusLabel.setObjectName("editorStatusLabel")
self.editorStatusLabel.setText(QString("No files currently loaded..."))
self.widgetLayout.addWidget(self.editorStatusLabel)
self.editorStuff = EditorBase(self.mainwindow)
self.widgetLayout.addWidget(self.editorStuff)
try:
f = open(filePath,'r')
except:
return
for l in f.readlines():
self.editorStuff.append(l)
f.close()
self.editorStatusLabel.setText(QString(filePath))
self.mainwindow.tabWidget.insertTab(0,self.tab,self.tabIcon,self.tabLabel)
self.mainwindow.tabWidget.setCurrentIndex(0)
|
simonmonk/electronics_cookbook | pi/ch_13_bi_stepper.py | Python | mit | 1,655 | 0.014502 | import RPi.GPIO as GPIO
import time
GPIO.setmode(GPIO.BCM)
in_1_pin = 18
in_2_pin = 23
in_3_pin = 24
in_4_pin = 25
en_pin = 22
GPIO.setup(in_1_pin, GPIO.OUT)
GPIO.setup(in_2_pin, GPIO.OUT)
GPIO.setup(in_3_pin, GPIO.OUT)
GPIO.setup(in_4_pin, GPIO.OUT)
GPIO.setup(en_pin, GPIO.OUT)
GPIO.output(en_pin, True)
period = 0.02
def step_forward(steps, period):
for i in range(0, steps):
set_coils(1, 0, 0, 1)
time.sleep(period)
set_coils(1, 0, 1, 0)
time.sleep(period)
set_coils(0, 1, 1, 0)
time.sleep(period)
set_coils(0, 1, 0, 1)
time.sleep(period)
def step_reverse(steps, period):
for i in range(0, steps):
set_coils(0, 1, 0, 1)
time.sleep(period)
set_coils(0, 1, 1, 0)
time.sleep(period)
set_coils(1, 0, 1, 0)
time.sleep(period)
set_coils(1, 0, 0, 1)
time.sleep(period)
def set_coils(in1, in2, in3, in4):
GPIO.output(in_1_pin, in1)
GPIO.output(in_2_pin, in2)
GPIO.output(in_3_pin, in3)
GPIO.output(in_4_pin, in4)
try:
print('Command letter followed by number');
print('p20 - set the inter-step period to 20ms (control speed)');
pr | int('f100 - forward 100 steps');
print(' | r100 - reverse 100 steps');
while True:
command = input('Enter command: ')
parameter_str = command[1:] # from char 1 to end
parameter = int(parameter_str)
if command[0] == 'p':
period = parameter / 1000.0
elif command[0] == 'f':
step_forward(parameter, period)
elif command[0] == 'r':
step_reverse(parameter, period)
finally:
print('Cleaning up')
GPIO.cleanup() |
bfvanrooyen/vcontrol | cli_commands/command_base.py | Python | mit | 228 | 0.008772 | from a | bc import ABCMeta, abstractmethod
class BaseCommand(metaclass=ABCMeta):
@abstractmethod
def parse_arguments(self, subparsers):
return
@abstractmethod
def handle_command | (self, args):
return
|
galtys/galtys-addons | account_move_line_where_query/__init__.py | Python | agpl-3.0 | 19 | 0 | imp | ort where_quer | y
|
lixun910/pysal | pysal/model/mgwr/utils.py | Python | bsd-3-clause | 6,190 | 0.006139 | import numpy as np
from pysal.lib.common import requires
@requires('matplotlib')
def shift_colormap(cmap, start=0, midpoint=0.5, stop=1.0, name='shiftedcmap'):
'''
Function to offset the "center" of a colormap. Useful for
data with a negative min and positive max and you want the
middle of the colormap's dynamic range to be at zero
Parameters
----------
cmap : The matplotlib colormap to be altered
start : Offset from lowest point in the colormap's range.
Defaults to 0.0 (no lower ofset). Should be between
0.0 and `midpoint`.
midpoint : The new center of the colormap. Defaults to
0.5 (no shift). Should be between 0.0 and 1.0. In
general, this should be 1 - vmax/(vmax + abs(vmin))
For example if your data range from -15.0 to +5.0 and
you want the center of the colormap at 0.0, `midpoint`
should be set to 1 - 5/(5 + 15)) or 0.75
stop : Offset from highets point in the colormap's range.
Defaults to 1.0 (no upper ofset). Should be between
`midpoint` and 1.0.
Returns
-------
new_cmap : A new colormap that has been shifted.
'''
import matplotlib as mpl
import matplotlib.pyplot as plt
cdict = {
'red': [],
'green': [],
'blue': [],
'alpha': []
}
# regular index to compute the colors
reg_index = np.linspace(start, stop, 257)
# shifted index to match the data
shift_index = np.hstack([
np.linspace(0.0, midpoint, 128, endpoint=False),
np.linspace(midpoint, 1.0, 129, endpoint=True)
])
for ri, si in zip(reg_index, shift_index):
r, g, b, a = cmap(ri)
cdict['red'].append((si, r, r))
cdict['green'].append((si, g, g))
cdict['blue'].append((si, b, b))
cdict['alpha'].append((si, a, a))
new_cmap = mpl.colors.LinearSegmentedColormap(name, cdict)
plt.register_cmap(cmap=new_cmap)
return new_cmap
@requires('matplotlib')
def truncate_colormap(cmap, minval=0.0, maxval=1.0, n=100):
'''
Function to truncate a colormap by selecting a subset of the original colormap's values
Parameters
----------
cmap : Mmatplotlib colormap to be altered
minval : Minimum value of the original colormap to include in the truncated colormap
maxval : Maximum value of the original colormap to include in the truncated colormap
n : Number of intervals between the min and max values for the gradient of the truncated colormap
Returns
-------
new_cmap : A new colormap that has been shifted.
'''
import matplotlib as mpl
new_cmap = mpl.colors.LinearSegmentedColormap.from_list(
'trunc({n},{a:.2f},{b:.2f})'.format(n=cmap.name, a=minval, b=maxval),
cmap(np.linspace(minval, maxval, n)))
return new_cmap
@requires('matplotlib')
@requires('geopandas')
def compare_surfaces(data, var1, var2, gwr_t, gwr_bw, mgwr_t, mgwr_bw, name,
kwargs1, kwargs2, savefig=None):
'''
Function that creates comparative visualization of GWR and | MGWR surfaces.
Parameters
----------
data : pandas or geopandas Dataframe
gwr/mgwr results
var1 : string
name of gwr parameter estimate column in frame
var2 : string
name of mgwr parameter estimate column in frame
gwr_t : string
name of gwr t-values column in frame associated with var1
gwr_bw | : float
bandwidth for gwr model for var1
mgwr_t : string
name of mgwr t-values column in frame associated with var2
mgwr_bw: float
bandwidth for mgwr model for var2
name : string
common variable name to use for title
kwargs1:
additional plotting arguments for gwr surface
kwargs2:
additional plotting arguments for mgwr surface
savefig: string, optional
path to save the figure. Default is None. Not to save figure.
'''
import matplotlib.pyplot as plt
import geopandas as gp
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(45,20))
ax0 = axes[0]
ax0.set_title('GWR ' + name + ' Surface (BW: ' + str(gwr_bw) +')', fontsize=40)
ax1 = axes[1]
ax1.set_title('MGWR ' + name + ' Surface (BW: ' + str(mgwr_bw) +')', fontsize=40)
#Set color map
cmap = plt.cm.seismic
#Find min and max values of the two combined datasets
gwr_min = data[var1].min()
gwr_max = data[var1].max()
mgwr_min = data[var2].min()
mgwr_max = data[var2].max()
vmin = np.min([gwr_min, mgwr_min])
vmax = np.max([gwr_max, mgwr_max])
#If all values are negative use the negative half of the colormap
if (vmin < 0) & (vmax < 0):
cmap = truncate_colormap(cmap, 0.0, 0.5)
#If all values are positive use the positive half of the colormap
elif (vmin > 0) & (vmax > 0):
cmap = truncate_colormap(cmap, 0.5, 1.0)
#Otherwise, there are positive and negative values so the colormap so zero is the midpoint
else:
cmap = shift_colormap(cmap, start=0.0, midpoint=1 - vmax/(vmax + abs(vmin)), stop=1.)
#Create scalar mappable for colorbar and stretch colormap across range of data values
sm = plt.cm.ScalarMappable(cmap=cmap, norm=plt.Normalize(vmin=vmin, vmax=vmax))
#Plot GWR parameters
data.plot(var1, cmap=sm.cmap, ax=ax0, vmin=vmin, vmax=vmax, **kwargs1)
if (gwr_t == 0).any():
data[gwr_t == 0].plot(color='lightgrey', ax=ax0, **kwargs2)
#Plot MGWR parameters
data.plot(var2, cmap=sm.cmap, ax=ax1, vmin=vmin, vmax=vmax, **kwargs1)
if (mgwr_t == 0).any():
data[mgwr_t == 0].plot(color='lightgrey', ax=ax1, **kwargs2)
#Set figure options and plot
fig.tight_layout()
fig.subplots_adjust(right=0.9)
cax = fig.add_axes([0.92, 0.14, 0.03, 0.75])
sm._A = []
cbar = fig.colorbar(sm, cax=cax)
cbar.ax.tick_params(labelsize=50)
ax0.get_xaxis().set_visible(False)
ax0.get_yaxis().set_visible(False)
ax1.get_xaxis().set_visible(False)
ax1.get_yaxis().set_visible(False)
if savefig is not None:
plt.savefig(savefig)
plt.show()
|
tpow/pytds | tests/sspi_test.py | Python | mit | 3,151 | 0.008569 | try:
import unittest2 as unittest
except:
import unittest
import ctypes
from ctypes import create_string_buffer
import settings
import socket
import sys
@unittest.skipUnless(sys.platform.startswith("win"), "requires Windows")
class SspiTest(unittest.TestCase):
def test_enum_security_packages(self):
import pytds.sspi
pytds.sspi.enum_security_packages()
def test_credentials(self):
import pytds.sspi
cred = pytds.sspi.SspiCredentials('Negotiate', pytds.sspi.SECPKG_CRED_OUTBOUND)
cred.query_user_name()
cred.close()
def test_make_buffers(self):
import pytds.sspi
buf = create_string_buffer(1000)
bufs = [(pytds.sspi.SECBUFFER_TOKEN, buf)]
desc = pytds.sspi._make_buffers_desc(bufs)
self.assertEqual(desc.ulVersion, pytds.sspi.SECBUFFER_VERSION)
self.assertEqual(desc.cBuffers, len(bufs))
self.assertEqual(desc.pBuffers[0].cbBuffer, len(bufs[0][1]))
self.assertEqual(desc.pBuffers[0].Buffer | Type, bufs[0][0])
self.assertEqual(desc.pBuffers[0].pvBuf | fer, ctypes.cast(bufs[0][1], pytds.sspi.PVOID).value)
def test_sec_context(self):
import pytds.sspi
cred = pytds.sspi.SspiCredentials(
'Negotiate',
pytds.sspi.SECPKG_CRED_OUTBOUND)
token_buf = create_string_buffer(10000)
bufs = [(pytds.sspi.SECBUFFER_TOKEN, token_buf)]
server = settings.HOST
if '\\' in server:
server, _ = server.split('\\')
host, _, _ = socket.gethostbyname_ex(server)
target_name = 'MSSQLSvc/{0}:1433'.format(host)
ctx, status, bufs = cred.create_context(
flags=pytds.sspi.ISC_REQ_CONFIDENTIALITY|pytds.sspi.ISC_REQ_REPLAY_DETECT|pytds.sspi.ISC_REQ_CONNECTION,
byte_ordering='network',
target_name=target_name,
output_buffers=bufs)
if status == pytds.sspi.Status.SEC_I_COMPLETE_AND_CONTINUE or status == pytds.sspi.Status.SEC_I_CONTINUE_NEEDED:
ctx.complete_auth_token(bufs)
#realbuf = create_string_buffer(10000)
#buf = SecBuffer()
#buf.cbBuffer = len(realbuf)
#buf.BufferType = SECBUFFER_TOKEN
#buf.pvBuffer = cast(realbuf, PVOID)
#bufs = SecBufferDesc()
#bufs.ulVersion = SECBUFFER_VERSION
#bufs.cBuffers = 1
#bufs.pBuffers = pointer(buf)
#byte_ordering = 'network'
#output_buffers = bufs
#from pytds.sspi import _SecContext
#ctx = _SecContext()
#ctx._handle = SecHandle()
#ctx._ts = TimeStamp()
#ctx._attrs = ULONG()
#status = sec_fn.InitializeSecurityContext(
# ctypes.byref(cred._handle),
# None,
# 'MSSQLSvc/misha-pc:1433',
# ISC_REQ_CONNECTION,
# 0,
# SECURITY_NETWORK_DREP if byte_ordering == 'network' else SECURITY_NATIVE_DREP,
# None,
# 0,
# byref(ctx._handle),
# byref(bufs),
# byref(ctx._attrs),
# byref(ctx._ts));
#pass
|
sthirugn/robottelo | tests/foreman/ui/test_discoveredhost.py | Python | gpl-3.0 | 51,849 | 0.000019 | # -*- encoding: utf-8 -*-
"""Test class for Foreman Discovery
@Requirement: Discoveredhost
@CaseAutomation: Automated
@CaseLevel: Acceptance
@CaseComponent: UI
@TestType: Functional
@CaseImportance: High
@Upstream: No
"""
import subprocess
import time
from fauxfactory import gen_string
from nailgun import entities
from robottelo.decorators import (
run_in_one_thread,
run_only_on,
skip_if_not_set,
stubbed,
tier3
)
from robottelo.api.utils import configure_provisioning
from robottelo.libvirt_discovery import LibvirtGuest
from robottelo.test import UITestCase
from robottelo.ui.base import UIError
from robottelo.ui.factory import (
edit_param,
make_discoveryrule,
)
from robottelo.ui.locators import common_locators, locators, tab_locators
from robottelo.ui.session import Session
from time import sleep
@run_in_one_thread
class DiscoveryTestCase(UITestCase):
"""Implements Foreman discovery tests in UI."""
def _edit_discovery_fact_column_param(self, session, param_value):
"""
Edit the 'discovery_fact_column' parameter from settings menu.
User can populate a new column on 'Discovered Hosts' page by setting
the value of 'discovery_fact_column'
"""
tab_locator = | tab_locators['settings.tab_discovered']
param_name = 'discovery_fact_column | '
edit_param(
session=session,
tab_locator=tab_locator,
param_name=param_name,
value_type='input',
param_value=param_value,
)
saved_element = self.settings.get_saved_value(
tab_locator, param_name)
self.assertEqual(param_value, saved_element)
def _ping_host(self, host, timeout=60):
"""Helper to ensure given IP/hostname is reachable after reboot.
:param host: A string. The IP or hostname of host.
:param int timeout: The polling timeout in seconds.
"""
timeup = time.time() + int(timeout)
while True:
command = subprocess.Popen(
'ping -c1 {0}; echo $?'.format(host),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True
)
output = command.communicate()[0]
# Checking the return code of ping is 0
if time.time() > timeup:
return False
if int(output.split()[-1]) == 0:
return True
else:
time.sleep(5)
@classmethod
@skip_if_not_set('vlan_networking')
def setUpClass(cls):
"""Steps to Configure foreman discovery
1. Build PXE default template
2. Create Organization/Location
3. Update Global parameters to set default org and location for
discovered hosts.
4. Enable auto_provision flag to perform discovery via discovery
rules.
"""
super(DiscoveryTestCase, cls).setUpClass()
# Build PXE default template to get default PXE file
entities.ConfigTemplate().build_pxe_default()
# Create Org and location
cls.org = entities.Organization(name=gen_string('alpha')).create()
cls.org_name = cls.org.name
cls.loc = entities.Location(
name=gen_string('alpha'),
organization=[cls.org],
).create()
# Update default org and location params to place discovered host
cls.discovery_loc = entities.Setting().search(
query={'search': 'name="discovery_location"'})[0]
cls.discovery_loc.value = cls.loc.name
cls.discovery_loc.update({'value'})
cls.discovery_org = entities.Setting().search(
query={'search': 'name="discovery_organization"'})[0]
cls.discovery_org.value = cls.org.name
cls.discovery_org.update({'value'})
# Enable flag to auto provision discovered hosts via discovery rules
cls.discovery_auto = entities.Setting().search(
query={'search': 'name="discovery_auto"'})[0]
cls.default_discovery_auto = str(cls.discovery_auto.value)
cls.discovery_auto.value = 'True'
cls.discovery_auto.update({'value'})
cls.config_env = configure_provisioning(org=cls.org, loc=cls.loc)
@classmethod
def tearDownClass(cls):
"""Restore default 'discovery_auto' global setting's value"""
cls.discovery_auto.value = cls.default_discovery_auto
cls.discovery_auto.update({'value'})
super(DiscoveryTestCase, cls).tearDownClass()
@run_only_on('sat')
@tier3
def test_positive_pxe_based_discovery(self):
"""Discover a host via PXE boot by setting "proxy.type=proxy" in
PXE default
@id: 43a8857d-2f08-436e-97fb-ffec6a0c84dd
@Setup: Provisioning should be configured
@Steps: PXE boot a host/VM
@Assert: Host should be successfully discovered
@CaseLevel: System
"""
with Session(self.browser) as session:
session.nav.go_to_select_org(self.org_name)
with LibvirtGuest() as pxe_host:
hostname = pxe_host.guest_name
self.assertTrue(
self.discoveredhosts.waitfordiscoveredhost(hostname)
)
self.assertIsNotNone(self.discoveredhosts.search(hostname))
@run_only_on('sat')
@tier3
def test_positive_pxe_less_with_dhcp_unattended(self):
"""Discover a host with dhcp via bootable discovery ISO by setting
"proxy.type=proxy" in PXE default in unattended mode.
@id: fc13167f-6fa0-4fe5-8584-7716292866ce
@Setup: Provisioning should be configured
@Steps: Boot a host/VM using modified discovery ISO.
@Assert: Host should be successfully discovered
@CaseLevel: System
"""
with Session(self.browser) as session:
session.nav.go_to_select_org(self.org_name)
with LibvirtGuest(boot_iso=True) as pxe_less_host:
hostname = pxe_less_host.guest_name
self.assertTrue(
self.discoveredhosts.waitfordiscoveredhost(hostname)
)
self.assertIsNotNone(self.discoveredhosts.search(hostname))
@run_only_on('sat')
@stubbed()
@tier3
def test_positive_pxe_less_with_dhcp_semiauto(self):
"""Discover a host with dhcp via bootable discovery ISO in
semi-automated mode.
@id: 05c88618-6f15-4eb8-8501-3505160c5450
@Setup: Provisioning should be configured
@Steps: Boot a host/VM using discovery ISO
@Assert: Host should be successfully discovered
@caseautomation: notautomated
@CaseLevel: System
"""
@run_only_on('sat')
@stubbed()
@tier3
def test_positive_pxe_less_with_dhcp_interactively(self):
"""Discover a host with dhcp via bootable discovery ISO using
interactive TUI mode.
@id: 08780627-9ac1-4837-88eb-df673d974d05
@Setup: Provisioning should be configured
@Steps: Boot a host/VM using discovery ISO
@Assert: Host should be successfully discovered
@caseautomation: notautomated
@CaseLevel: System
"""
@run_only_on('sat')
@stubbed()
@tier3
def test_positive_pxe_less_without_dhcp_interactively(self):
"""Discover a host with single NIC on a network without DHCP and PXE
using ISO image in interactive TUI interface.
@id: 9703eb00-9857-4076-8b83-031a58d7c1cd
@Assert: Host should be discovered successfully
@caseautomation: notautomated
@CaseLevel: System
"""
@run_only_on('sat')
@stubbed()
@tier3
def test_positive_pxe_less_without_dhcp_semiauto(self):
"""Discover a host with single NIC on a network without DHCP and PXE
using ISO image in semi-automated mode.
@id: 8254a85f-21c8-4483-b453-15126762f6e5
@Assert: Host should be discovered successfully
@caseautomation: notautomated
@CaseLevel: System
"""
@run_only_on('sat')
@stubbed()
|
mrquim/mrquimrepo | repo/plugin.video.salts/scrapers/rlsmovies_scraper.py | Python | gpl-2.0 | 4,302 | 0.003952 | """
SALTS XBMC Addon
Copyright (C) 2014 tknorris
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re
import urlparse
import kodi
import log_utils # @UnusedImport
import dom_parser2
from salts_lib import scraper_utils
from salts_lib.constants import FORCE_NO_MATCH
from salts_lib.constants import VIDEO_TYPES
from salts_lib.utils2 import i18n
import scraper
BASE_URL = 'http://www.rls-movies.com'
CATEGORIES = {VIDEO_TYPES.MOVIE: '/category/movies/"', VIDEO_TYPES.EPISODE: '/category/tvshows/"'}
class Scraper(scraper.Scraper):
base_url = BASE_URL
def __init__(self, timeout=scraper.DEFAULT_TIMEOUT):
self.timeout = timeout
self.base_url = kodi.get_setting('%s-base_url' % (self.get_name()))
@classmethod
def provides(cls):
return frozenset([VIDEO_TYPES.MOVIE, VIDEO_TYPES.TVSHOW, VIDEO_TYPES.EPISODE])
@classmethod
def get_name(cls):
return 'rls-movies'
def get_sources(self, video):
hosters = []
source_url = self.get_url(video)
if not source_url or source_url == FORCE_NO_MATCH: return hosters
url = scraper_utils.urljoin(self.base_url, source_url)
html = self._http_get(url, require_debrid=False, cache_limit=.5)
for source, values in self.__get_post_links(html).iteritems():
if scraper_utils.excluded_link(source): continue
host = urlparse.urlparse(source).hostname
release = values['release']
quality = scraper_utils.blog_get_quality(video, release, host)
hoster = {'multi-part': False, 'host': host, 'class': self, 'views': None, 'url': source, 'rating': None, 'quality': quality, 'direct': False}
if 'X265' in release or 'HEVC' in release:
hoster['format'] = 'x265'
hosters.append(hoster)
return hosters
def __get_post_links(self, html):
sources = {}
release = dom_parser2.parse_dom(html, 'span', {'itemprop': 'name'})
release = release[0].content if release else ''
fragment = dom_parser2.parse_dom(html, 'div', {'class': 'entry'})
if fragment:
for attrs, label in dom_parser2.parse_dom(fragment[0].content, 'a', req='href'):
stream_url = attrs['href']
if not label or re.search('single\s+link', label, re.I):
sources[stream_url] = {'release': release}
elif any([ext for ext in ['.mp4', '.mkv', '.avi'] if ext in label]):
sources[stream_url] = {'release': label}
return sources
def get_url(self, video):
return self._blog_get_url(video, delim=' ')
@classmethod
def get_settings(cls):
settings = super(cls, cls).get_settings()
settings = scraper_utils.disable_sub_check(settings)
name = cls.get_name()
settings.append(' <setting id="%s-filter" type="slider" range="0,180" option="int" label=" %s" default="30" visible="eq(-3,true)"/>' % (name, i18n('filter_results_days')))
settings.append(' <setting id="%s-select" type="enum" label=" %s" lvalues="30636|30637" default=" | 0" visible="eq(-4,true)"/>' % (name, i18n('auto_select')))
return settings
def search(self, video_type, title, year, season=''): # @UnusedVariable
html = self._http_get(self.base_url, params={'s': title}, require_debrid=False, cache_limit=1)
post_pattern = 'class="post-box-title">.*?href="(?P<url>[^"]+)[^>]*>(?P<post_title>[^<]+).*?<span>(?P<date>[^ ]+ 0*\d+, \d{4})'
date_format = '%B %d, %Y'
return self._blog_proc | _results(html, post_pattern, date_format, video_type, title, year)
|
ingadhoc/website | website_sale_order_type_ux/models/__init__.py | Python | agpl-3.0 | 303 | 0 | ######################################################################## | ######
# For copyright and license notices, see __manifest__.py file in module root
# directory
####################################################### | #######################
from . import website
from . import res_config_settings
|
LumPenPacK/NetworkExtractionFromImages | win_build/nefi2_win_amd64_msvc_2015/site-packages/networkx/algorithms/tests/test_simple_paths.py | Python | bsd-2-clause | 9,703 | 0.012883 | #!/usr/bin/env python
import random
from nose.tools import *
import networkx as nx
from networkx import convert_node_labels_to_integers as cnlti
from networkx.algorithms.simple_paths import _bidirectional_shortest_path
from networkx.algorithms.simple_paths import _bidirectional_dijkstra
# Tests for all_simple_paths
def test_all_simple_paths():
G = nx.path_graph(4)
paths = nx.all_simple_paths(G,0,3)
assert_equal(list(list(p) for p in paths),[[0,1,2,3]])
def test_all_simple_paths_cutoff():
G = nx.complete_graph(4)
paths = nx.all_simple_paths(G,0,1,cutoff=1)
assert_equal(list(list(p) for p in paths),[[0,1]])
paths = nx.all_simple_paths(G,0,1,cutoff=2)
assert_equal(list(list(p) for p in paths),[[0,1],[0,2,1],[0,3,1]])
def test_all_simple_paths_multigraph():
G = nx.MultiGraph([(1,2),(1,2)])
paths = nx.all_simple_paths(G,1,2)
assert_equal(list(list(p) for p in paths),[[1,2],[1,2]])
def test_all_simple_paths_multigraph_with_cutoff():
G = nx.MultiGraph([(1,2),(1,2),(1,10),(10,2)])
paths = nx.all_simple_paths(G,1,2, cutoff=1)
assert_equal(list(list(p) for p in paths),[[1,2],[1,2]])
def test_all_simple_paths_directed():
G = nx.DiGraph()
G.add_path([1,2,3])
G.add_path([3,2,1])
paths = nx.all_simple_paths(G,1,3)
assert_equal(list(list(p) for p in paths),[[1,2,3]])
def test_all_simple_paths_empty():
G = nx.path_graph(4)
paths = nx.all_simple_paths(G,0,3,cutoff=2)
assert_equal(list(list(p) for p in paths),[])
def hamiltonian_path(G,source):
source = next(G.nodes_iter())
neighbors = set(G[source])-set([source])
n = len(G)
for target in neighbors:
for path in nx.all_simple_paths(G,source,target):
if len(path) == n:
yield path
def test_hamiltonian_path():
from itertools import permutations
G=nx.complete_graph(4)
paths = [list(p) for p in hamiltonian_path(G,0)]
exact = [[0]+list(p) for p in permutations([1,2,3],3) ]
assert_equal(sorted(paths),sorted(exact))
def test_cutoff_zero():
G = nx.complete_graph(4)
paths = nx.all_simple_paths(G,0,3,cutoff=0)
assert_equal(list(list(p) for p in paths),[])
paths = nx.all_simple_paths(nx.MultiGraph(G),0,3,cutoff=0)
assert_equal(list(list(p) for p in paths),[])
@raises(nx.NetworkXError)
def test_source_missing():
G = nx.Graph()
G.add_path([1,2,3])
paths = list(nx.all_simple_paths(nx.MultiGraph(G),0,3))
@raises(nx.NetworkXError)
def test_target_missing():
G = nx.Graph()
G.add_path([1,2,3])
paths = list(nx.all_simple_paths(nx.MultiGraph(G),1,4))
# Tests for shortest_simple_paths
def test_shortest_simple_paths():
G = cnlti(nx.grid_2d_graph(4, 4), first_label=1, ordering="sorted")
paths = nx.shortest_simple_paths(G, 1, 12)
assert_equal(next(paths), [1, 2, 3, 4, 8, 12])
assert_equal(next(paths), [1, 5, 6, 7, 8, 12])
assert_equal([len(path) for path in nx.shortest_simple_paths(G, 1, 12)],
sorted([len(path) for path in nx.all_simple_paths(G, 1, 12)]))
def test_shortest_simple_paths_directed():
G = nx.cycle_graph(7, create_using=nx.DiGraph())
paths = nx.shortest_simple_paths(G, 0, 3)
assert_equal([path for path in paths], [[0, 1, 2, 3]])
def test_Greg_Bernstein():
g1 = nx.Graph()
g1.add_nodes_from(["N0", "N1", "N2", "N3", "N4"])
g1.add_edge("N4", "N1", weight=10.0, capacity=50, name="L5")
g1.add_edge("N4", "N0", weight=7.0, capacity=40, name="L4")
g1.add_edge("N0", "N1", weight=10.0, capacity=45, name="L1")
g1.add_edge("N3", "N0", weight=10.0, capacity=50, name="L0")
g1.add_edge("N2", "N3", weight=12.0, capacity=30, name="L2")
g1.add_edge("N1", "N2", weight=15.0, capacity=42, name="L3")
solution = [['N1', 'N0', 'N3'], ['N1', 'N2', 'N3'], ['N1', 'N4', 'N0', 'N3']]
result = list(nx.shortest_simple_paths(g1, 'N1', 'N3', weight='weight'))
assert_equal(result, solution)
def test_weighted_shortest_simple_path():
def cost_func(path):
return sum(G.edge[u][v]['weight'] for (u, v) in zip(path, path[1:]))
G = nx.complete_graph(5)
weight = {(u, v): random.randint(1, 100) for (u, v) in G.edges()}
nx.set_edge_attributes(G, 'weight', weight)
cost = 0
for path in nx.shortest_simple_paths(G, 0, 3, weight='weight'):
this_cost = cost_func(path)
assert_true(cost <= this_cost)
cost = this_cost
def test_directed_weighted_shortest_simple_path():
def cost_func(path):
return sum(G.edge[u][v]['weight'] for (u, v) in zip(path, path[1:]))
G = nx.complete_graph(5)
G = G.to_directed()
weight = {(u, v): random.randint(1, 100) for (u, v) in G.edges()}
nx.set_edge_attributes(G, 'weight', weight)
cost = 0
for path in nx.shortest_simple_paths(G, 0, 3, weight='weight'):
this_cost = cost_func(path)
assert_true(cost <= this_cost)
cost = this_cost
def test_weight_name():
G = nx.cycle_graph(7)
nx.set_edge_attributes(G, 'weight', 1)
nx.set_edge_attributes(G, 'foo', 1)
G.edge[1][2]['foo'] = 7
paths = list(nx.shortest_simple_paths(G, 0, 3, weight='foo'))
solution = [[0, 6, 5, 4, 3], [0, 1, 2, 3]]
assert_equal(paths, solution)
@raises(nx.NetworkXError)
def test_ssp_source_missing():
G = nx.Graph()
G.add_path([1,2,3])
paths = list(nx.shortest_simple_paths(G, 0, 3))
@raises(nx.NetworkXError)
def test_ssp_target_missing():
G = nx.Graph()
G.add_path([1,2,3])
paths = list(nx.shortest_simple_paths(G, 1, 4))
@raises(nx.NetworkXNotImplemented)
def test_ssp_multigraph():
G = nx.MultiGraph()
G.add_path([1,2,3])
paths = list(nx.shortest_simple_paths(G, 1, 4))
@raises(nx.NetworkXNoPath)
def test_ssp_source_missing():
G = nx.Graph()
G.add_path([0, 1, 2])
G.add_path([3, 4, 5])
paths = list(nx.shortest_simple_paths(G, 0, 3))
def test_bidirectional_shortest_path_restricted():
grid = cnlti(nx.grid_2d_graph(4,4), first_label=1, ordering="sorted")
cycle = nx.cycle_graph(7)
directed_cycle = nx.cycle_graph(7, create_using=nx.DiGraph())
length, path = _bidirectional_shortest_path(cycle, 0, 3)
assert_equal(path, [0, 1, 2, 3])
length, path = _bidirectional_shortest_path(cycle, 0, 3, ignore_nodes=[1])
assert_equal(path, [0, 6, 5, 4, 3])
length, path = _bidirectional_shortest_path(grid, 1, 12)
assert_equal(path, [1, 2, 3, 4, 8, 12])
length, path = _bidirectional_shortest_path(grid, 1, 12, ignore_nodes=[2])
assert_equal(path, [1, 5, 6, 10, 11, 12])
length, path = _bidirectional_shortest_path(grid, 1, 12, ignore_nodes=[2, 6])
assert_equal(path, [1, 5, 9, 10, 11, 12])
length, path = _bidirectional_shortest_path(grid, 1, 12,
ignore_nodes=[2, 6],
ignore_edges=[(10, 11)])
assert_equal(path, [1, 5, 9, 10, 14, 15, 16, 12])
length, path = _bidirectional_shortest_path(directed_cycle, 0, 3)
assert_equal(path, [0, 1, 2, 3])
assert_raises(
nx.NetworkXNoPath,
_bidirectional_shortest_path,
directed_cycle,
0, 3,
ignore_nodes=[1],
)
length, path = _bidirectional_shortest_path(dir | ected_cycle, 0, 3,
| ignore_edges=[(2, 1)])
assert_equal(path, [0, 1, 2, 3])
assert_raises(
nx.NetworkXNoPath,
_bidirectional_shortest_path,
directed_cycle,
0, 3,
ignore_edges=[(1, 2)],
)
def validate_path(G, s, t, soln_len, path):
assert_equal(path[0], s)
assert_equal(path[-1], t)
assert_equal(soln_len, sum(G[u][v].get('weight', 1)
for u, v in zip(path[:-1], path[1:])))
def validate_length_path(G, s, t, soln_len, length, path):
assert_equal(soln_len, length)
validate_path(G, s, t, length, path)
def test_bidirectional_dijksta_restricted():
XG = nx.DiGraph()
XG.add_weighted_edges_from([('s', 'u', 10), ('s', 'x', 5),
('u', 'v', 1), ('u', 'x', 2),
('v', 'y', 1), ('x', 'u', 3),
|
drewkett/SU2 | SU2_PY/SU2/io/config_options.py | Python | lgpl-2.1 | 6,581 | 0.009877 | ## \file config_options.py
# \brief python package for config
# \author T. Lukaczyk, F. Palacios
# \version 6.1.0 "Falcon"
#
# The current SU2 release has been coordinated by the
# SU2 International Developers Society <www.su2devsociety.org>
# with selected contributions from the open-source community.
#
# The main research teams contributing to the current release are:
# - Prof. Juan J. Alonso's group at Stanford University.
# - Prof. Piero Colonna's group at Delft University of Technology.
# - Prof. Nicolas R. Gauger's group at Kaiserslautern University of Technology.
# - Prof. Alberto Guardone's group at Polytechnic University of Milan.
# - Prof. Rafael Palacios' group at Imperial College London.
# - Prof. Vincent Terrapon's group at the University of Liege.
# - Prof. Edwin van der Weide's group at the University of Twente.
# - Lab. of New Concepts in Aeronautics at Tech. Institute of Aeronautics.
#
# Copyright 2012-2018, Francisco D. Palacios, Thomas D. Economon,
# Tim Albring, and the SU2 contributors.
#
# SU2 is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# SU2 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with SU2. If not, see <http://www.gnu.org/licenses/>.
# ----------------------------------------------------------------------
# Imports
# ----------------------------------------------------------------------
from ..util import ordered_bunch
class OptionError(Exception):
pass
class Option(object):
def __init__(self):
self.val = ""
def __get__(self):
return self.val
def __set__(self,newval):
self.val = newval
#: class Option
class MathProblem(Option):
def __init__(self,*args,**kwarg):
super(MathProblem,self).__init__(*args,**kwarg)
self.validoptions = ['DIRECT','CONTINUOUS_ADJOINT','LINEARIZED']
def __set__(self,newval):
if not self.newval in self.validoptions:
raise OptionError("Invalid option. Valid options are: %s"%self.validoptions)
super(MathProblem,self).__set__(newval)
#: class MathProblem
class DEFINITION_DV(ordered_bunch):
""" SU2.io.config.DEFINITION_DV()
List of design variables (Design variables are separated by semicolons)
- HICKS_HENNE ( 1, Scale | Mark. List | Lower(0)/Upper(1) side, x_Loc )
- SURFACE_BUMP ( 2, Scale | Mark. List | x_Start, x_End, x_Loc )
- NACA_4DIGITS ( 4, Scale | Mark. List | 1st digit, 2nd digit, 3rd and 4th digit )
- TRANSLATION ( 5, Scale | Mark. List | x_Disp, y_Disp, z_Disp )
- ROTATION ( 6, Scale | Mark. List | x_Axis, y_Axis, z_Axis, x_Turn, y_Turn, z_Turn )
- FFD_CONTROL_POINT ( 7, Scale | Mark. List | FFD_Box_ID, i_Ind, j_Ind, k_Ind, x_Mov, y_Mov, z_Mov )
- FFD_TWIST_ANGLE ( 9, Scale | Mark. List | FFD_Box_ID, x_Orig, y_Orig, z_Orig, x_End, y_End, z_End )
- FFD_ROTATION ( 10, Scale | Mark. List | FFD_Box_ID, x_Orig, y_Orig, z_Orig, x_End, y_End, z_End )
- FFD_CAMBER ( 11, Scale | Mark. List | FFD_Box_ID, i_Ind, j_Ind )
- FFD_THICKNESS ( 12, Scale | Mark. List | FFD_Box_ID, i_Ind, j_Ind )
- FFD_CONTROL_POINT_2D ( 15, Scale | Mark. List | FFD_Box_ID, i_Ind, j_Ind, x_Mov, y_Mov )
- FFD_CAMBER_2D ( 16, Scale | Mark. List | FFD_Box_ID, i_Ind )
- FFD_THICKNESS_2D ( 17, Scale | Mark. List | FFD_Box_ID, i_Ind )
"""
def __init__(self,*args,**kwarg):
ordered_bunch.__init__(self)
self.KIND = []
self.SCALE = []
self.MARKER = []
self.FFDTAG = []
self.PARAM = []
self.update(ordered_bunch(*args,**kwarg))
def append(self,new_dv):
self.KIND. append(new_dv['KIND'])
self.SCALE. append(new_dv['SCALE'])
self.MARKER.append(new_dv['MARKER'])
self.FFDTAG.append(new_dv['FFDTA | G'])
self.PARAM. append(new_dv['PARAM'])
def extend(self,new_dvs):
assert isinstance(new_dvs,DEFINITION_DV) , 'input must be of type DEFINITION_DV'
self.KIND. extend(new_dvs['KIND'])
| self.SCALE. extend(new_dvs['SCALE'])
self.MARKER.extend(new_dvs['MARKER'])
self.FFDTAG.extend(new_dvs['FFDTAG'])
self.PARAM. extend(new_dvs['PARAM'])
#: class DEFINITION_DV
class DV_KIND(ordered_bunch):
""" SU2.io.config.DV_KIND()
List of design variables (Design variables are separated by semicolons)
- HICKS_HENNE ( 1, Scale | Mark. List | Lower(0)/Upper(1) side, x_Loc )
- SURFACE_BUMP ( 2, Scale | Mark. List | x_Start, x_End, x_Loc )
- NACA_4DIGITS ( 4, Scale | Mark. List | 1st digit, 2nd digit, 3rd and 4th digit )
- TRANSLATION ( 5, Scale | Mark. List | x_Disp, y_Disp, z_Disp )
- ROTATION ( 6, Scale | Mark. List | x_Axis, y_Axis, z_Axis, x_Turn, y_Turn, z_Turn )
- FFD_CONTROL_POINT ( 7, Scale | Mark. List | FFD_Box_ID, i_Ind, j_Ind, k_Ind, x_Mov, y_Mov, z_Mov )
- FFD_TWIST_ANGLE ( 9, Scale | Mark. List | FFD_Box_ID, x_Orig, y_Orig, z_Orig, x_End, y_End, z_End )
- FFD_ROTATION ( 10, Scale | Mark. List | FFD_Box_ID, x_Orig, y_Orig, z_Orig, x_End, y_End, z_End )
- FFD_CAMBER ( 11, Scale | Mark. List | FFD_Box_ID, i_Ind, j_Ind )
- FFD_THICKNESS ( 12, Scale | Mark. List | FFD_Box_ID, i_Ind, j_Ind )
- FFD_CONTROL_POINT_2D ( 15, Scale | Mark. List | FFD_Box_ID, i_Ind, j_Ind, x_Mov, y_Mov )
- FFD_CAMBER_2D ( 16, Scale | Mark. List | FFD_Box_ID, i_Ind )
- FFD_THICKNESS_2D ( 17, Scale | Mark. List | FFD_Box_ID, i_Ind )
"""
def __init__(self,*args,**kwarg):
ordered_bunch.__init__(self)
self.FFDTAG = []
self.PARAM = []
self.update(ordered_bunch(*args,**kwarg))
def append(self,new_dv):
self.FFDTAG.append(new_dv['FFDTAG'])
self.PARAM. append(new_dv['PARAM'])
def extend(self,new_dvs):
assert isinstance(new_dvs,DV_KIND) , 'input must be of type DV_KIND'
self.FFDTAG.extend(new_dvs['FFDTAG'])
self.PARAM. extend(new_dvs['PARAM'])
#: class DV_KIND
|
opendatadurban/citizen_sensors | Weather_Station/test_wind_dir.py | Python | apache-2.0 | 2,418 | 0.031844 | # Simple example of reading the MCP3008 analog input channels and printing
import time
import sys
import numpy as np
# Import SPI library (for hardware SPI) and MCP3008 library.
import Adafruit_GPIO.SPI as SPI
import Adafruit_MCP3008
import RPi.GPIO as GPIO
import spidev
# Software SPI configuration:
#CLK = 18
#MISO = 23
#MOSI = 24
#CS = 25
#mcp = Adafruit_MCP3008.MCP3008(clk=CLK, cs=CS, miso=MISO, mosi=MOSI)
# Hardware SPI configuration:
SPI_PORT = 0
SPI_DEVICE = 0
mcp = Adafruit_MCP3008.MCP3008(spi=SPI.SpiDev(SPI_PORT, SPI_DEVICE))
# Choose channel
an_chan = 3 # channel 8 (numbered 0-7)
# choose GPIO pin
ledPin = 18
GPIO.setmode(GPIO.BCM)
GPIO.setup(ledPin,GPIO.OUT)
samplingTime = 280.0
deltaTime = 40.0
sleepTime = 9680.0
directions = {'N':3.84,'NNE':1.98,'NE':2.25,'ENE':0.41,'E':0.45,'ESE':0.32,'SE':0.90,'SSE':0.62,'S':1.40,'SSW':1.19,'SW':3.08,'WSW':2.93,'W':4.62,'WNW':4.04,'NW':4.78,'NNW':3.43}
directions = dict((v,k) for k,v in directions.iteritems())
d = [3.84,1.98,2.25,0.41,0.45,0.32,0.90,0.62,1.40,1.19,3.08,2.93,4.62,4.04,4.78,3. | 43]
sortd = np.sort(d)
#print sortd
midp = (sortd[1:]+sortd[:-1])/2
midp = np.insert(midp,0,0)
midp = np.insert(midp,len(midp),5.0)
print midp
#for i in range(0,len(sortd)):
# print directions.get(sortd[i])
| # Main program loop.
try:
while True:
GPIO.output(ledPin,0)
time.sleep(samplingTime*10.0**-6)
# The read_adc function will get the value of the specified channel
voMeasured = mcp.read_adc(an_chan)
time.sleep(deltaTime*10.0**-6)
GPIO.output(ledPin,1)
time.sleep(sleepTime*10.0**-66)
calcVoltage = voMeasured*(5.0/1024)
c = round(calcVoltage,2)
print c
for i in range(1,len(midp)-1):
b = midp[i-1]
en = midp[i+1]
if c > 3.90 and c < 3.95:
direction = 4.78
break
elif c > b and c < en:
direction = sortd[i]
break
#dustDensity = 0.17*calcVoltage-0.1
#if dustDensity < 0:
# dustDensity = 0.00
# Print the ADC values.
print "Raw signal value (0 - 1023): ", voMeasured
print "Voltage: ", c, direction, directions.get(direction)
#print "Dust Density: ", dustDensity
time.sleep(1)
except KeyboardInterrupt:
GPIO.cleanup()
|
umars/npyscreen | npyscreen/fmPopup.py | Python | bsd-2-clause | 1,031 | 0.025218 | #!/usr/bin/python
# encoding: utf-8
from . import fmForm
from . import fmActionFormV2
import curses
class Popup(fmForm.Form):
DEFAULT_LINES = 12
DEFAULT_COLUMNS = 60
SHOW_ATX = 10
SHOW_ATY = 2
class ActionPopup(fmActionFormV2.ActionFormV2):
DEFAULT_LINES = 12
DEFAULT_COLUMNS = 60
SHOW_ATX = 10
SHOW_ATY = 2
class MessagePopup(Popup):
def __init__(self, *args, **keywords):
from . import wgmultiline as multiline
super(MessagePopup, self).__init__(*args, **keywords)
self.TextWidget = | self.add(multiline.Pager, scroll_exit=True, max_height=self.widget_useable_space()[0]-2)
class PopupWide(Popup):
DEFAULT_LINES = 14
DEFAULT_COLUMNS = None
SHOW_ATX = 0
SHOW_ATY = 0
class ActionPopupWide(fmActionFormV2.ActionFormV2):
DEFAULT_LINES = 14
DEFAULT_COLUMNS = None
SHOW_ATX | = 0
SHOW_ATY = 0
|
ecreall/nova-ideo | novaideo/views/novaideo_view_manager/widget.py | Python | agpl-3.0 | 776 | 0.002577 | # Copyright (c) 2014 by Ecreall under licence | AGPL terms
# available on http://www.gnu.org/licenses/agpl.html
# licence: AGPL
# author: Amen Souissi
import deform
from deform.widget import default_resource_registry
class SearchFormWidget(deform.widget.FormWidget):
template = 'novaideo:views/novaideo_view_manager/templates/search_form.pt'
class SearchTextInputWidget(deform.widget.TextInputWidget):
template = 'novaideo:views/novaideo_view_manager/templates/search_textinput.pt'
requirements = (('live_sear | ch', None),)
default_resource_registry.set_js_resources(
'live_search', None,
'novaideo:static/js/live_search.js')
default_resource_registry.set_css_resources(
'live_search', None,
'pontus:static/select2/dist/css/select2.min.css')
|
team-xue/xue | xue/cms/plugins/picture/migrations/0007_publisher2.py | Python | bsd-3-clause | 10,053 | 0.006963 |
from south.db import db
from django.db import models
from cms.plugins.picture.models import *
class Migration:
def forwards(self, orm):
# Deleting field 'Picture.public'
db.delete_column('cmsplugin_picture', 'public_id')
# Deleting model 'picturepublic'
db.delete_table('cmsplugin_picturepublic')
def backwards(self, orm):
# Adding field 'Picture.public'
db.add_column('cmsplugin_picture', 'public', orm['picture.picture:public'])
# Adding model 'picturepublic'
db.create_table('cmsplugin_picturepublic', (
('url', orm['picture.picturepublic:url']),
('image', orm['picture.picturepublic:image']),
('float', orm['picture.picturepublic:float']),
('mark_delete', orm['picture.picturepublic:mark_delete']),
('page_link', orm['picture.picturepublic:page_link']),
('cmspluginpublic_ptr', orm['picture.picturepublic:cmspluginpublic_ptr']),
('alt', orm['picture.picturepublic:alt']),
))
db.send_create_signal('picture', ['picturepublic'])
models = {
'cms.cmsplugin': {
'creation_date': ('models.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('models.AutoField', [], {'primary_key': 'True'}),
'language': ('models.CharField', [], {'max_length': '5', 'db_index': 'True'}),
'level': ('models.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('models.PositiveIntegerField', [], {'db_index': 'True'}),
'page': ('models.ForeignKey', [], {'to': "orm['cms.Page']"}),
'parent': ('models.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('models.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'plugin_type': ('models.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('models.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'publisher_is_draft': ('models.BooleanField', [], {'default': '1', 'db_index': 'True', 'blank': 'True'}),
'publisher_public': ('models.OneToOneField', [], {'related_name': "'publisher_draft'", 'unique': 'True', 'null': 'True', 'to': "orm['cms.CMSPlugin']"}),
'publisher_state': ('models.SmallIntegerField', [], {'default': '0', 'db_index': 'True'}),
'rght': ('models.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('models.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.cmspluginpublic': {
'creation_date': ('models.DateTimeField', [], {'default': 'datetime.datetime(2009, 7, 6, 6, 4, 30, 141247)'}),
'id': ('models.AutoField', [], {'primary_key': 'True'}),
'language': ('models.CharField', [], {'max_length': '5', 'db_index': 'True'}),
'level': ('models.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('models.PositiveIntegerField', [], {'db_index': 'True'}),
'mark_delete': ('models.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'page': ('models.ForeignKey', [], {'to': "orm['cms.PagePublic']"}),
'parent': ('models.ForeignKey', [], {'to': "orm['cms.CMSPluginPublic']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('models.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'plugin_type': ('models.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('models.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('models.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('models.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.page': {
'changed_by': ('models.CharField', [], {'max_length': '70'}),
'created_by': ('models.CharField', [], {'max_length': '70'}),
'creation_date': ('models.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('models.AutoField', [], {'primary_key': 'True'}),
'in_navigation': ('models.BooleanField', [], {'default': 'True', 'db_index': 'True', 'blank': 'True'}),
'level': ('models.PositiveInteger | Field', [], {'db_index': 'True'}),
'lft': ('models.PositiveIntegerField', [], {'db_index': 'True'}),
'login_required': ('models.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'moderator_state': ('models.SmallIntegerField', [], {'default': '1', 'blank': 'True'}),
'navigation_extenders': ('models.CharField', [], {'db_index': 'True', 'max_length': '80', 'null': 'True', 'blank': 'True'}),
'parent': ('models.ForeignKey | ', [], {'related_name': "'children'", 'blank': 'True', 'null': 'True', 'to': "orm['cms.Page']"}),
'publication_date': ('models.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'publication_end_date': ('models.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'published': ('models.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'publisher_is_draft': ('models.BooleanField', [], {'default': '1', 'db_index': 'True', 'blank': 'True'}),
'publisher_public': ('models.OneToOneField', [], {'related_name': "'publisher_draft'", 'unique': 'True', 'null': 'True', 'to': "orm['cms.Page']"}),
'publisher_state': ('models.SmallIntegerField', [], {'default': '0', 'db_index': 'True'}),
'reverse_id': ('models.CharField', [], {'db_index': 'True', 'max_length': '40', 'null': 'True', 'blank': 'True'}),
'rght': ('models.PositiveIntegerField', [], {'db_index': 'True'}),
'site': ('models.ForeignKey', [], {'to': "orm['sites.Site']"}),
'soft_root': ('models.BooleanField', [], {'default': 'False', 'db_index': 'True', 'blank': 'True'}),
'template': ('models.CharField', [], {'max_length': '100'}),
'tree_id': ('models.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.pagepublic': {
'changed_by': ('models.CharField', [], {'max_length': '70'}),
'created_by': ('models.CharField', [], {'max_length': '70'}),
'creation_date': ('models.DateTimeField', [], {'default': 'datetime.datetime(2009, 7, 6, 6, 4, 28, 442937)'}),
'id': ('models.AutoField', [], {'primary_key': 'True'}),
'in_navigation': ('models.BooleanField', [], {'default': 'True', 'blank': 'True', 'db_index': 'True'}),
'level': ('models.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('models.PositiveIntegerField', [], {'db_index': 'True'}),
'login_required': ('models.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'mark_delete': ('models.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'moderator_state': ('models.SmallIntegerField', [], {'default': '1', 'blank': 'True'}),
'navigation_extenders': ('models.CharField', [], {'blank': 'True', 'max_length': '80', 'null': 'True', 'db_index': 'True'}),
'parent': ('models.ForeignKey', [], {'related_name': "'children'", 'null': 'True', 'to': "orm['cms.PagePublic']", 'blank': 'True'}),
'publication_date': ('models.DateTimeField', [], {'blank': 'True', 'null': 'True', 'db_index': 'True'}),
'publication_end_date': ('models.DateTimeField', [], {'blank': 'True', 'null': 'True', 'db_index': 'True'}),
'published': ('models.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'reverse_id': ('models.CharField', [], {'blank': 'True', 'max_length': '40', 'null': 'True', 'db_index': 'True'}),
'rght': ('models.PositiveIntegerField', [], {'db_index': 'True'}),
'site': ('models.ForeignKey', [], {'to': "orm['sites.Site']"}),
'soft_root': ('models.BooleanField', [], {'default': 'False', 'blank': 'Tr |
codeforboston/cornerwise | server/cornerwise/__init__.py | Python | mit | 65 | 0 | from .celery i | mport app as celery_app
__ | all__ = ["celery_app"]
|
CoherentLabs/depot_tools | tests/owners_finder_test.py | Python | bsd-3-clause | 9,645 | 0.004769 | #!/usr/bin/env vpython3
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for owners_finder.py."""
import os
import sys
import unittest
if sys.version_info.major == 2:
import mock
else:
from unittest import mock
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from testing_support import filesystem_mock
import owners_finder
import owners_client
ben = 'ben@example.com'
brett = 'brett@example.com'
darin = 'darin@example.com'
jochen = 'jochen@example.com'
john = 'john@example.com'
ken = 'ken@example.com'
peter = 'peter@example.com'
tom = 'tom@example.com'
nonowner = 'nonowner@example.com'
def owners_file(*email_addresses, **kwargs):
s = ''
if kwargs.get('comment'):
s += '# %s\n' % kwargs.get('comment')
if kwargs.get('noparent'):
s += 'set noparent\n'
return s + '\n'.join(email_addresses) + '\n'
class TestClient(owners_client.OwnersClient):
def __init__(self):
super(TestClient, self).__init__()
self.owners_by_path = {
'DEPS': [ken, peter, tom],
'base/vlog.h': [ken, peter, tom],
'chrome/browser/defaults.h': [brett, ben, ken, peter, tom],
'chrome/gpu/gpu_channel.h': [ken, ben, brett, ken, peter, tom],
'chrome/renderer/gpu/gpu_channel_host.h': [peter, ben, brett, ken, tom],
'chrome/renderer/safe_browsing/scorer.h': [peter, ben, brett, ken, tom],
'content/content.gyp': [john, darin],
'content/bar/foo.cc': [john, darin],
'content/baz/froboz.h': [brett, john, darin],
'content/baz/ugly.cc': [brett, john, darin],
'content/baz/ugly.h': [brett, john, darin],
'content/common/common.cc': [jochen, john, darin],
'content/foo/foo.cc': [jochen, john, darin],
'content/views/pie.h': [ben, john, self.EVERYONE],
}
def ListOwners(self, path):
path = path.replace(os.sep, '/')
return self.owners_by_path[path]
class OutputInterceptedOwnersFinder(owners_finder.OwnersFinder):
def __init__(
self, files, author, reviewers, client, disable_color=False):
super(OutputInterceptedOwnersFinder, self).__init__(
files, author, reviewers, client, disable_color=disable_color)
self.output = []
self.indentation_stack = []
def resetText(self):
self.output = []
self.indentation_stack = []
def indent(self):
self.indentation_stack.append(self.output)
self.output = []
def unindent(self):
block = self.output
self.output = self.indentation_stack.pop()
self.output.append(block)
def writeln(self, text=''):
self.output.append(text)
class _BaseTestCase(unittest.TestCase):
default_files = [
'base/vlog.h',
'chrome/browser/defaults.h',
'chrome/gpu/gpu_channel.h',
'chrome/renderer/gpu/gpu_channel_host.h',
'chrome/renderer/safe_browsing/scorer.h',
'content/content.gyp',
'content/bar/foo.cc',
'content/baz/ugly.cc',
'content/baz/ugly.h',
'content/views/pie.h'
]
def ownersFinder(self, files, author=nonowner, reviewers=None):
reviewers = reviewers or []
return OutputInterceptedOwnersFinder(
files, author, reviewers, TestClient(), disable_color=True)
def defaultFinder(self):
return self.ownersFinder(self.default_files)
class OwnersFinderTests(_BaseTestCase):
def test_constructor(self):
self.assertNotEqual(self.defaultFinder(), None)
def test_skip_files_owned_by_reviewers(self):
files = [
'chrome/browser/defaults.h', # owned by brett
'content/bar/foo.cc', # not owned by brett
]
finder = self.ownersFinder(files, reviewers=[brett])
self.assertEqual(finder.unreviewed_files, | {'content/bar/foo.cc'})
def test_skip_files_owned_by_author(self):
files = [
'chrome/browser/defaults.h', # owned by brett
'content/bar/foo.cc', # not owned by brett
] |
finder = self.ownersFinder(files, author=brett)
self.assertEqual(finder.unreviewed_files, {'content/bar/foo.cc'})
def test_native_path_sep(self):
# Create a path with backslashes on Windows to make sure these are handled.
# This test is a harmless duplicate on other platforms.
native_slashes_path = 'chrome/browser/defaults.h'.replace('/', os.sep)
files = [
native_slashes_path, # owned by brett
'content/bar/foo.cc', # not owned by brett
]
finder = self.ownersFinder(files, reviewers=[brett])
self.assertEqual(finder.unreviewed_files, {'content/bar/foo.cc'})
@mock.patch('owners_client.OwnersClient.ScoreOwners')
def test_reset(self, mockScoreOwners):
mockScoreOwners.return_value = [brett, darin, john, peter, ken, ben, tom]
finder = self.defaultFinder()
for _ in range(2):
expected = [brett, darin, john, peter, ken, ben, tom]
self.assertEqual(finder.owners_queue, expected)
self.assertEqual(finder.unreviewed_files, {
'base/vlog.h',
'chrome/browser/defaults.h',
'chrome/gpu/gpu_channel.h',
'chrome/renderer/gpu/gpu_channel_host.h',
'chrome/renderer/safe_browsing/scorer.h',
'content/content.gyp',
'content/bar/foo.cc',
'content/baz/ugly.cc',
'content/baz/ugly.h'
})
self.assertEqual(finder.selected_owners, set())
self.assertEqual(finder.deselected_owners, set())
self.assertEqual(finder.reviewed_by, {})
self.assertEqual(finder.output, [])
finder.select_owner(john)
finder.reset()
finder.resetText()
@mock.patch('owners_client.OwnersClient.ScoreOwners')
def test_select(self, mockScoreOwners):
mockScoreOwners.return_value = [brett, darin, john, peter, ken, ben, tom]
finder = self.defaultFinder()
finder.select_owner(john)
self.assertEqual(finder.owners_queue, [brett, peter, ken, ben, tom])
self.assertEqual(finder.selected_owners, {john})
self.assertEqual(finder.deselected_owners, {darin})
self.assertEqual(finder.reviewed_by, {'content/bar/foo.cc': john,
'content/baz/ugly.cc': john,
'content/baz/ugly.h': john,
'content/content.gyp': john})
self.assertEqual(finder.output,
['Selected: ' + john, 'Deselected: ' + darin])
finder = self.defaultFinder()
finder.select_owner(darin)
self.assertEqual(finder.owners_queue, [brett, peter, ken, ben, tom])
self.assertEqual(finder.selected_owners, {darin})
self.assertEqual(finder.deselected_owners, {john})
self.assertEqual(finder.reviewed_by, {'content/bar/foo.cc': darin,
'content/baz/ugly.cc': darin,
'content/baz/ugly.h': darin,
'content/content.gyp': darin})
self.assertEqual(finder.output,
['Selected: ' + darin, 'Deselected: ' + john])
finder = self.defaultFinder()
finder.select_owner(brett)
expected = [darin, john, peter, ken, tom]
self.assertEqual(finder.owners_queue, expected)
self.assertEqual(finder.selected_owners, {brett})
self.assertEqual(finder.deselected_owners, {ben})
self.assertEqual(finder.reviewed_by,
{'chrome/browser/defaults.h': brett,
'chrome/gpu/gpu_channel.h': brett,
'chrome/renderer/gpu/gpu_channel_host.h': brett,
'chrome/renderer/safe_browsing/scorer.h': brett,
'content/baz/ugly.cc': brett,
'content/baz/ugly.h': brett})
self.assertEqual(finder.output,
['Selected: ' + brett, 'Deselected: ' + ben])
@mock.patch('owners_client.OwnersClient.ScoreOwners')
def test_deselect(self, mockScoreOwners):
mockScoreOwners.return_value = [brett, darin, john, peter, ken, ben, tom]
finder = self.defaultFinder()
finder.deselect_owner(john)
self.assertEqual(finder.owners_queue, [brett, peter, ken, ben, tom])
self.assertEqu |
MalloyPower/parsing-python | front-end/testsuite-python-lib/Python-3.0/Lib/test/test_subprocess.py | Python | mit | 31,033 | 0.001901 | import unittest
from test import support
import subprocess
import sys
import signal
import os
import tempfile
import time
import re
mswindows = (sys.platform == "win32")
#
# Depends on the following external programs: Python
#
if mswindows:
SETBINARY = ('import msvcrt; msvcrt.setmode(sys.stdout.fileno(), '
'os.O_BINARY);')
else:
SETBINARY = ''
# In a debug build, stuff like "[6580 refs]" is printed to stderr at
# shutdown time. That frustrates tests trying to check stderr produced
# from a spawned Python process.
def remove_stderr_debug_decorations(stderr):
return re.sub("\[\d+ refs\]\r?\n?$", "", stderr.decode()).encode()
#return re.sub(r"\[\d+ refs\]\r?\n?$", "", stderr)
class ProcessTestCase(unittest.TestCase):
def setUp(self):
# Try to minimize the number of children we have so this test
# doesn't crash on some buildbots (Alphas in particular).
if hasattr(support, "reap_children"):
support.reap_children()
def tearDown(self):
# Try to minimize the number of children we have so this test
# doesn't crash on some buildbots (Alphas in particular).
if hasattr(support, "reap_children"):
support.reap_children()
def mkstemp(self):
"""wrapper for mkstemp, calling mktemp if mkstemp is not available"""
if hasattr(tempfile, "mkstemp"):
return tempfile.mkstemp()
else:
fname = tempfile.mktemp()
return os.open(fname, os.O_RDWR|os.O_CREAT), fname
#
# Generic tests
#
def test_call_seq(self):
# call() function with sequence argument
rc = subprocess.call([sys.executable, "-c",
"import sys; sys.exit(47)"])
self.assertEqual(rc, 47)
def test_check_call_zero(self):
# check_call() function with zero return code
rc = subprocess.check_call([sys.executable, "-c",
"import sys; sys.exit(0)"])
self.assertEqual(rc, 0)
def test_check_call_nonzero(self):
# check_call() function with non-zero return code
try:
subprocess.check_call([sys.executable, "-c",
"import sys; sys.exit(47)"])
except subprocess.CalledProcessError as e:
self.assertEqual(e.returncode, 47)
else:
self.fail("Expected CalledProcessError")
def test_call_kwargs(self):
# call() function with keyword args
newenv = os.environ.copy()
newenv["FRUIT"] = "banana"
rc = subprocess.call([sys.executable, "-c",
'import sys, os;'
'sys.exit(os.getenv("FRUIT")=="banana")'],
env=newenv)
self.assertEqual(rc, 1)
def test_stdin_none(self):
# .stdin is None when not redirected
p = subprocess.Popen([sys.executable, "-c", 'print("banana")'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p.wait()
self.assertEqual(p.stdin, None)
def test_stdout_none(self):
# .stdout is None when not redirected
p = subprocess.Popen([sys.executable, "-c",
'print(" this bit of output is from a '
'test of stdout in a different '
'process ...")'],
stdin=subprocess.PIPE, stderr=subprocess.PIPE)
p.wait()
self.assertEqual(p.stdout, None)
def test_stderr_none(self):
# .stderr is None when not redirected
p = subprocess.Popen([sys.executable, "-c", 'print("banana")'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
p.wait()
self.assertEqual(p.stderr, None)
def test_executable(self):
p = subprocess.Popen(["somethingyoudonthave",
"-c", "import sys; sys.exit(47)"],
executable=sys.executable)
p.wait()
self.assertEqual(p.returncode, 47)
def test_stdin_pipe(self):
# stdin redirection
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.exit(sys.stdin.read() == "pear")'],
stdin=subprocess.PIPE)
p.stdin.write(b"pear")
p.stdin.close()
p.wait()
self.assertEqual(p.returncode, 1)
def test_stdin_filedes(self):
# stdin is set to open file descriptor
tf = tempfile.TemporaryFile()
d = tf.fileno()
os.write(d, b"pear")
os.lseek(d, 0, 0)
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.exit(sys.stdin.read() == "pear")'],
stdin=d)
p.wait()
self.assertEqual(p.returncode, 1)
def test_stdin_fileobj(self):
# stdin is set to open file object
tf = tempfile.TemporaryFile()
tf.write(b"pear")
tf.seek(0)
p = subprocess.Popen([sys.exec | utable, "-c",
'import sys; sys.exit(sys.stdin.read() == "pear")'],
stdin=tf)
p.wait()
self.assertEqual(p.returncode, 1)
def test_stdout_pipe(self):
# stdout redirection
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("orange")'],
stdout=subprocess.PIPE)
self.assertEqual(p.stdout.read(), b"orange")
def test_st | dout_filedes(self):
# stdout is set to open file descriptor
tf = tempfile.TemporaryFile()
d = tf.fileno()
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("orange")'],
stdout=d)
p.wait()
os.lseek(d, 0, 0)
self.assertEqual(os.read(d, 1024), b"orange")
def test_stdout_fileobj(self):
# stdout is set to open file object
tf = tempfile.TemporaryFile()
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("orange")'],
stdout=tf)
p.wait()
tf.seek(0)
self.assertEqual(tf.read(), b"orange")
def test_stderr_pipe(self):
# stderr redirection
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("strawberry")'],
stderr=subprocess.PIPE)
self.assertEqual(remove_stderr_debug_decorations(p.stderr.read()),
b"strawberry")
def test_stderr_filedes(self):
# stderr is set to open file descriptor
tf = tempfile.TemporaryFile()
d = tf.fileno()
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("strawberry")'],
stderr=d)
p.wait()
os.lseek(d, 0, 0)
self.assertEqual(remove_stderr_debug_decorations(os.read(d, 1024)),
b"strawberry")
def test_stderr_fileobj(self):
# stderr is set to open file object
tf = tempfile.TemporaryFile()
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("strawberry")'],
stderr=tf)
p.wait()
tf.seek(0)
self.assertEqual(remove_stderr_debug_decorations(tf.read()),
b"strawberry")
def test_stdout_stderr_pipe(self):
# capture stdout and stderr to the same pipe
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdout.write("apple");'
'sys.stdout.flush();'
'sys.stderr.write("orange")'],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
output = p.stdout.read()
stripped = remove_stderr_debug_decorations(output)
self.assertEqual(stripped, b"appleor |
bit-trade-one/SoundModuleAP | lib-src/lv2/sratom/waflib/Scripting.py | Python | gpl-2.0 | 10,970 | 0.056427 | #! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
import os,shlex,shutil,traceback,errno,sys,stat
from waflib import Utils,Configure,Logs,Options,ConfigSet,Context,Errors,Build,Node
build_dir_override=None
no_climb_commands=['configure']
default_cmd="build"
def waf_entry_point(current_directory,version,wafdir):
Logs.init_log()
if Context.WAFVERSION!=version:
Logs.error('Waf script %r and library %r do not match (directory %r)'%(version,Context.WAFVERSION,wafdir))
sys.exit(1)
if'--version'in sys.argv:
Context.run_dir=current_directory
ctx=Context.create_context('options')
ctx.curdir=current_directory
ctx.parse_args()
sys.exit(0)
Context.waf_dir=wafdir
Context.launch_dir=current_directory
no_climb=os.environ.get('NOCLIMB',None)
if not no_climb:
for k in no_climb_commands:
if k in sys.argv:
no_climb=True
break
cur=current_directory
while cur:
lst=os.listdir(cur)
if Options.lockfile in lst:
env=ConfigSet.ConfigSet()
try:
env.load(os.path.join(cur,Options.lockfile))
ino=os.stat(cur)[stat.ST_INO]
except Exception:
pass
else:
for x in[env.run_dir,env.top_dir,env.out_dir]:
if Utils.is_win32:
if cur==x:
load=True
break
else:
try:
ino2=os.stat(x)[stat.ST_INO]
except OSError:
pass
else:
if ino==ino2:
load=True
break
else:
Logs.warn('invalid lock file in %s'%cur)
load=False
if load:
Context.run_dir=env.run_dir
Context.top_dir=env.top_dir
Context.out_dir=env.out_dir
break
if not Context.run_dir:
if Context.WSCRIPT_FILE in lst:
Context.run_dir=cur
next=os.path.dirname(cur)
if next==cur:
break
cur=next
if no_climb:
break
if not Context.run_dir:
if'-h'in sys.argv or'--help'in sys.argv:
Logs.warn('No wscript file found: the help message may be incomplete')
Context.run_dir=current_directory
ctx=Context.create_context('options')
ctx.curdir=current_directory
ctx.parse_args()
sys.exit(0)
Logs.error('Waf: Run from a directory containing a file named %r'%Context.WSCRIPT_FILE)
sys.exit(1)
try:
os.chdir(Context.run_dir)
except OSError:
Logs.error('Waf: The folder %r is unreadable'%Context.run_dir)
sys.exit(1)
try:
set_main_module(Context.run_dir+os.sep+Context.WSCRIPT_FILE)
except Errors.WafError ,e:
Logs.pprint('RED',e.verbose_msg)
Logs.error(str(e))
sys.exit(1)
except Exception ,e:
Logs.error('Waf: The wscript in %r is unreadable'%Context.run_dir,e)
traceback.print_exc(file=sys.stdout)
sys.exit(2)
try:
run_commands()
except Errors.WafError ,e:
if Logs.verbose>1:
Logs.pprint('RED',e.verbose_msg)
Logs.error(e.msg)
sys.exit(1)
except SystemExit:
raise
except Exception ,e:
traceback.print_exc(file=sys.stdout)
sys.exit(2)
except KeyboardInterrupt:
Logs.pprint('RED','Interrupted')
sys.exit(68)
def set_main_module(file_path):
Context.g_module=Context.load_module(file_path)
Context.g_module.root_path=file_path
def set_def(obj):
name=obj.__name__
if not name in Context.g_module.__dict__:
setattr(Context.g_module,name,obj)
for k in[update,dist,distclean,distcheck,update]:
set_def(k)
if not'init'in Context.g_module.__dict__:
Context.g_module.init=Utils.nada
if not'shutdown'in Context.g_module.__dict__:
Context.g_module.shutdown=Utils.nada
if not'options'in Context.g_module.__dict__:
Context.g_module.options=Utils.nada
def parse_options():
Context.create_context('options').execute()
if not Options.commands:
Options.commands=[default_cmd]
Options.commands=[x for x in Options.commands if x!='options']
Logs.verbose=Options.options.verbose
Logs.init_log()
if Options.options.zones:
Logs.zones=Options.options.zones.split(',')
if not Logs.verbose:
Logs.verbose=1
elif Logs.verbose>0:
Logs.zones=['runner']
if Logs.verbose>2:
Logs.zones=['*']
def run_command(cmd_name):
ctx=Context.create_context(cmd_name)
ctx.log_timer=Utils.Timer()
ctx.options=Options.options
ctx.cmd=cmd_name
ctx.execute()
return ctx
def run_commands():
parse_options()
run_command('init')
while Options.commands:
cmd_name=Options.commands.pop(0)
ctx=run_command(cmd_name)
Logs.info('%r finished successfully (%s)'%(cmd_name,str(ctx.log_timer)))
run_command('shutdown')
def _can_distclean(name):
for k in'.o .moc .exe'.split():
if name.endswith(k):
return True
return False
def distclean_dir(dirname):
for(root,dirs,files)in os.walk(dirname):
for f in files:
if _can_distclean(f):
fname=root+os.sep+f
|
try:
os.unlink(fname)
except OSError:
Logs.warn('Could not remove %r'%fname)
for x in[Context.DBFILE,'config.log']:
try:
os.unlink(x)
except O | SError:
pass
try:
shutil.rmtree('c4che')
except OSError:
pass
def distclean(ctx):
'''removes the build directory'''
lst=os.listdir('.')
for f in lst:
if f==Options.lockfile:
try:
proj=ConfigSet.ConfigSet(f)
except IOError:
Logs.warn('Could not read %r'%f)
continue
if proj['out_dir']!=proj['top_dir']:
try:
shutil.rmtree(proj['out_dir'])
except IOError:
pass
except OSError ,e:
if e.errno!=errno.ENOENT:
Logs.warn('project %r cannot be removed'%proj[Context.OUT])
else:
distclean_dir(proj['out_dir'])
for k in(proj['out_dir'],proj['top_dir'],proj['run_dir']):
try:
os.remove(os.path.join(k,Options.lockfile))
except OSError ,e:
if e.errno!=errno.ENOENT:
Logs.warn('file %r cannot be removed'%f)
if f.startswith('.waf')and not Options.commands:
shutil.rmtree(f,ignore_errors=True)
class Dist(Context.Context):
'''creates an archive containing the project source code'''
cmd='dist'
fun='dist'
algo='tar.bz2'
ext_algo={}
def execute(self):
self.recurse([os.path.dirname(Context.g_module.root_path)])
self.archive()
def archive(self):
import tarfile
arch_name=self.get_arch_name()
try:
self.base_path
except AttributeError:
self.base_path=self.path
node=self.base_path.make_node(arch_name)
try:
node.delete()
except Exception:
pass
files=self.get_files()
if self.algo.startswith('tar.'):
tar=tarfile.open(arch_name,'w:'+self.algo.replace('tar.',''))
for x in files:
self.add_tar_file(x,tar)
tar.close()
elif self.algo=='zip':
import zipfile
zip=zipfile.ZipFile(arch_name,'w',compression=zipfile.ZIP_DEFLATED)
for x in files:
archive_name=self.get_base_name()+'/'+x.path_from(self.base_path)
zip.write(x.abspath(),archive_name,zipfile.ZIP_DEFLATED)
zip.close()
else:
self.fatal('Valid algo types are tar.bz2, tar.gz or zip')
try:
from hashlib import sha1 as sha
except ImportError:
from sha import sha
try:
digest=" (sha=%r)"%sha(node.read()).hexdigest()
except Exception:
digest=''
Logs.info('New archive created: %s%s'%(self.arch_name,digest))
def get_tar_path(self,node):
return node.abspath()
def add_tar_file(self,x,tar):
p=self.get_tar_path(x)
tinfo=tar.gettarinfo(name=p,arcname=self.get_tar_prefix()+'/'+x.path_from(self.base_path))
tinfo.uid=0
tinfo.gid=0
tinfo.uname='root'
tinfo.gname='root'
fu=None
try:
fu=open(p,'rb')
tar.addfile(tinfo,fileobj=fu)
finally:
if fu:
fu.close()
def get_tar_prefix(self):
try:
return self.tar_prefix
except AttributeError:
return self.get_base_name()
def get_arch_name(self):
try:
self.arch_name
except AttributeError:
self.arch_name=self.get_base_name()+'.'+self.ext_algo.get(self.algo,self.algo)
return self.arch_name
def get_base_name(self):
try:
self.base_name
except AttributeError:
appname=getattr(Context.g_module,Context.APPNAME,'noname')
version=getattr(Context.g_module,Context.VERSION,'1.0')
self.base_name=appname+'-'+version
return se |
google/grr | grr/server/grr_response_server/databases/mysql_time_test.py | Python | apache-2.0 | 411 | 0.004866 | #!/usr/bin/env python
from absl import app
from absl.testing import absltest
from grr_response_server.databases import db_time_test
from grr_response_server.databases import mysql_test
from grr.test_lib import test_lib
class MysqlClientsTest(db_time_test.DatabaseTimeTestMixi | n,
| mysql_test.MysqlTestBase, absltest.TestCase):
pass
if __name__ == "__main__":
app.run(test_lib.main)
|
davek44/Basset | src/basset_sample.py | Python | mit | 2,818 | 0.011001 | #!/usr/bin/env python
from optparse import OptionParser
import gzip
import random
import sys
################################################################################
# basset_sample.py
#
# Sample sequences from an existing dataset of sequences as BED file and
# activity table.
################################################################################
################################################################################
# main
######################### | #######################################################
def main():
usage = 'usage: %prog [options] <db_bed> <db_act_file> <sample_seqs> <output_prefix>'
parser = OptionParser(usage)
parser.add_option('-s', dest='seed', default=1, type='float', help='Random number generator seed [Default: %default]')
(options,args) = parser.parse_args()
if len(args) != 4:
parser.error('Must provide dat | abase BED and activity table and output prefix')
else:
bed_file = args[0]
act_file = args[1]
sample_seqs = int(args[2])
out_pre = args[3]
random.seed(options.seed)
############################################################
# process BED
############################################################
reservoir = ['']*sample_seqs
bed_in = open(bed_file)
# initial fill
i = 0
while i < sample_seqs:
reservoir[i] = bed_in.readline()
i += 1
# sample
for line in bed_in:
j = random.randint(0, i+1)
if j < sample_seqs:
reservoir[j] = line
i += 1
bed_in.close()
# print
bed_out = open('%s.bed' % out_pre, 'w')
for r in range(len(reservoir)):
print >> bed_out, reservoir[r],
bed_out.close()
############################################################
# process activity table
############################################################
# hash sampled headers
reservoir_headers = set()
for line in reservoir:
a = line.rstrip().split('\t')
chrom = a[0]
start = a[1]
end = a[2]
strand = a[5]
header = '%s:%s-%s(%s)' % (chrom,start,end,strand)
reservoir_headers.add(header)
act_out = open('%s_act.txt' % out_pre, 'w')
if act_file[-3:] == '.gz':
act_in = gzip.open(act_file)
else:
act_in = open(act_file)
# print header
print >> act_out, act_in.readline(),
# filter activity table
for line in act_in:
a = line.split('\t')
if a[0] in reservoir_headers:
print >> act_out, line,
act_in.close()
act_out.close()
################################################################################
# __main__
################################################################################
if __name__ == '__main__':
main()
|
tinutomson/wikicoding | wiki/plugins/macros/wiki_plugin.py | Python | gpl-3.0 | 905 | 0.007735 | from __future__ import absolute_import
from __future__ import unicode_literals
# -*- coding: utf-8 -*-
from django.utils.translation import ugettext as _
from wiki.core.plugins import registry
from wiki.core.plugins.base import BasePlugin
from wiki.plugins.macros import settings
from wiki.plugins.macros.mdx.macro import MacroExtension
from wiki.plugins.macros.mdx.toc import WikiTocExtension
# from wiki.plugins.macros.mdx.wikilinks import WikiLinkExtension
class MacroPlugin(BasePlugin):
slug = settings.SLUG
sidebar = {'hea | dline': _('Macros'),
| 'icon_class': 'fa-play',
'template': 'wiki/plugins/macros/sidebar.html',
'form_class': None,
'get_form_kwargs': (lambda a: {})}
markdown_extensions = [MacroExtension(), WikiTocExtension()]
def __init__(self):
pass
registry.register(MacroPlugin)
|
sortsimilar/Citation-Tree | markstress.py | Python | apache-2.0 | 1,287 | 0.020202 | ### This program intends to combine same GSHF in citation tree, and sort them according to the first letter of title;
### Author: Ye Gao
### Date: 2017-11-7
import csv
file = open('NodeCheckList.csv', 'rb')
reader = csv.reader(file)
NodeCheckList = list(reader)
file.close()
#print NodeCheckList
FirstRow = NodeCheckList.pop(0)
FirstRow[8] = 'Frequency'
# convert cited time from string to integer;
for elem | ent in NodeCheckList:
if element[2] != "":
element[2] = int(element[2])
SortYear = sorted(NodeCheckList, key=lambda l:l[3], reverse=True)
SortCiteTimes = sorted(NodeCheckList, key=lambda l:l[2], reverse=True)
print SortYear
NodeStressList = []
for element in NodeCheckList:
if (int(element[7]) == 0) or (int(element[7]) == 1) or (int(element[7]) == 2):
NodeStressList.append(element)
SortTitle = sorted(NodeStressList, key=lambda l:l[4], rever | se=False)
SortTitle = [FirstRow] + SortTitle
title = ""
CombineTitle = []
for element in SortTitle:
if element[4] != title:
CombineTitle.append(element)
else:
CombineTitle[-1][1] += '|' + element[1]
title = element[4]
# save result list to NodeStressList.csv;
file = open('NodeStressList.csv','wb')
for i in CombineTitle:
for j in i:
file.write(str(j))
file.write(',')
file.write('\n')
file.close()
|
r-o-b-b-i-e/pootle | pootle/apps/pootle_fs/resources.py | Python | gpl-3.0 | 5,525 | 0 | # -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
from fnmatch import fnmatch
from django.db.models import F, Max
from django.utils.functional import cached_property
from pootle_store.models import Store
from .models import StoreFS
from .utils import StoreFSPathFilter, StorePathFilter
class FSProjectResources(object):
def __init__(self, project):
self.project = project
def __str__(self):
return (
"<%s(%s)>"
% (self.__class__.__name__,
self.project))
@property
def stores(self):
return Store.objects.filter(
translation_project__project=self.project)
@property
def tracked(self):
return StoreFS.objects.filter(
project=self.project).select_related("store")
@property
def synced(self):
return (
self.tracked.exclude(last_sync_revision__isnull=True)
.exclude(last_sync_hash__isnull=True))
@property
def unsynced(self):
return (
self.tracked.filter(last_sync_revision__isnull=True)
.filter(last_sync_hash__isnull=True))
@property
def trackable_stores(self):
return self.stores.exclude(obsolete=True).filter(fs__isnull=True)
class FSProjectStateResources(object):
"""Wrap FSPlugin and cache available resources
Accepts `pootle_path` and `fs_path` glob arguments.
If present all resources are filtered accordingly.
"""
def __init__(self, context, pootle_path=None, fs_path=None):
self.context = context
self.pootle_path = pootle_path
self.fs_path = fs_path
def match_fs_path(self, path):
"""Match fs_paths using file glob if set"""
if not self.fs_path or fnmatch(path, self.fs_path):
return path
def _exclude_staged(self, qs):
return (
qs.exclude(staged_for_removal=True)
.exclude(staged_for_merge=True))
@cached_property
def found_file_matches(self):
return sorted(self.context.find_translations(
fs_path=self.fs_path, pootle_path=self.pootle_path))
| @cached_property
def found_file_paths(self):
return [x[1] for x in self.found_file_matches]
@cached_property
def resources(self):
"""Uncached Project resources provided by FSPlugin"""
return self.context.resources
@cached_property
def store_filter(self):
"""Filter Store querysets using file globs"""
return StorePathFilter(
pootle_path=self.p | ootle_path)
@cached_property
def storefs_filter(self):
"""Filter StoreFS querysets using file globs"""
return StoreFSPathFilter(
pootle_path=self.pootle_path,
fs_path=self.fs_path)
@cached_property
def synced(self):
"""Returns tracked StoreFSs that have sync information, and are not
currently staged for any kind of operation
"""
return self.storefs_filter.filtered(
self._exclude_staged(self.resources.synced))
@cached_property
def trackable_stores(self):
"""Stores that are not currently tracked but could be"""
_trackable = []
stores = self.store_filter.filtered(self.resources.trackable_stores)
for store in stores:
fs_path = self.match_fs_path(
self.context.get_fs_path(store.pootle_path))
if fs_path:
_trackable.append((store, fs_path))
return _trackable
@cached_property
def trackable_store_paths(self):
"""Dictionary of pootle_path, fs_path for trackable Stores"""
return {
store.pootle_path: fs_path
for store, fs_path
in self.trackable_stores}
@cached_property
def missing_file_paths(self):
return [
path for path in self.tracked_paths.keys()
if path not in self.found_file_paths]
@cached_property
def tracked(self):
"""StoreFS queryset of tracked resources"""
return self.storefs_filter.filtered(self.resources.tracked)
@cached_property
def tracked_paths(self):
"""Dictionary of fs_path, path for tracked StoreFS"""
return dict(self.tracked.values_list("path", "pootle_path"))
@cached_property
def unsynced(self):
"""Returns tracked StoreFSs that have NO sync information, and are not
currently staged for any kind of operation
"""
return self.storefs_filter.filtered(
self._exclude_staged(
self.resources.unsynced))
@cached_property
def pootle_changed(self):
"""StoreFS queryset of tracked resources where the Store has changed
since it was last synced.
"""
return (
self.synced.exclude(store_id__isnull=True)
.exclude(store__obsolete=True)
.annotate(max_revision=Max("store__unit__revision"))
.exclude(last_sync_revision=F("max_revision")))
def reload(self):
"""Uncache cached_properties"""
for k, v_ in self.__dict__.items():
if k in ["context", "pootle_path", "fs_path"]:
continue
del self.__dict__[k]
|
quattor/aquilon | lib/aquilon/worker/commands/add_rack_bunker.py | Python | apache-2.0 | 1,006 | 0 | # -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2008,2009,2010,2011,2012,2013,2014,2018 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distribut | ed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either expr | ess or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains the logic for `aq add rack --bunker`."""
from aquilon.worker.broker import BrokerCommand # pylint: disable=W0611
from aquilon.worker.commands.add_rack import CommandAddRack
class CommandAddRackBunker(CommandAddRack):
required_parameters = ["bunker", "row", "column"]
|
manuelm/pyload | module/ConfigParser.py | Python | gpl-3.0 | 11,895 | 0.004456 | # -*- coding: utf-8 -*-
from __future__ import with_statement
from time import sleep
from os.path import exists, join
from shutil import copy
from traceback import print_exc
from utils import chmod
# ignore these plugin configs, mainly because plugins were wiped out
IGNORE = (
"FreakshareNet", "SpeedManager", "ArchiveTo", "ShareCx", ('hooks', 'UnRar'),
'EasyShareCom', 'FlyshareCz'
)
CONF_VERSION = 1
class ConfigParser:
"""
holds and manage the configuration
current dict layout:
{
section : {
option : {
value:
type:
desc:
}
desc:
}
"""
def __init__(self):
"""Constructor"""
self.config = {} # the config values
self.plugin = {} # the config for plugins
self.oldRemoteData = {}
self.pluginCB = None # callback when plugin config value is changed
self.checkVersion()
self.readConfig()
self.deleteOldPlugins()
def checkVersion(self, n=0):
"""determines if config need to be copied"""
try:
if not exists("pyload.conf"):
copy(join(pypath, "module", "config", "default.conf"), "pyload.conf")
if not exists("plugin.conf"):
f = open("plugin.conf", "wb")
f.write("version: " + str(CONF_VERSION))
f.close()
f = open("pyload.conf", "rb")
v = f.readline()
f.close()
v = v[v.find(":") + 1:].strip()
if not v or int(v) < CONF_VERSION:
copy(join(pypath, "module", "config", "default.conf"), "pyload.conf")
print "Old version of config was replaced"
f = open("plugin.conf", "rb")
v = f.readline()
f.close()
v = v[v.find(":") + 1:].strip()
if not v or int(v) < CONF_VERSION:
f = open("plugin.conf", "wb")
f.write("version: " + str(CONF_VERSION))
f.close()
print "Old version of plugin-config replaced"
except:
if n < 3:
sleep(0.3)
self.checkVersion(n + 1)
else:
raise
def readConfig(self):
"""reads the config file"""
self.config = self.parseConfig(join(pypath, "module", "config", "default.conf"))
self.plugin = self.parseConfig("plugin.conf")
try:
homeconf = self.parseConfig("pyload.conf")
if "username" in homeconf["remote"]:
if "password" in homeconf["remote"]:
self.oldRemoteData = {"username": homeconf["remote"]["username"]["value"],
"password": homeconf["remote"]["username"]["value"]}
del homeconf["remote"]["password"]
del homeconf["remote"]["username"]
self.updateValues(homeconf, self.config)
except Exception, e:
print "Config Warning"
print_exc()
def parseConfig(self, config):
"""parses a given configfile"""
f = open(config)
config = f.read()
config = config.splitlines()[1:]
conf = {}
section, option, value, typ, desc = "", "", "", "", ""
listmode = False
for line in config:
comment = line.rfind("#")
if line.find(":", comment) < 0 > line.find("=", comment) and comment > 0 and line[comment - 1].isspace():
line = line.rpartition("#") # removes comments
if line[1]:
line = line[0]
else:
line = line[2]
line = line.strip()
try:
if line == "":
continue
elif line.endswith(":"):
section, none, desc = line[:-1].partition('-')
section = section.strip()
desc = desc.replace('"', "").strip()
conf[section] = {"desc": desc}
else:
if listmode:
if line.endswith("]"):
listmode = False
line = line.replace("]", "")
value += [self.cast(typ, x.strip()) for x in line.split(",") if x]
if not listmode:
conf[se | ction][option] = {"desc": desc,
"type": typ,
"value": value}
else:
content, none, value = line.partition("=")
content, none, desc = content.partition(":")
desc = desc.re | place('"', "").strip()
typ, none, option = content.strip().rpartition(" ")
value = value.strip()
if value.startswith("["):
if value.endswith("]"):
listmode = False
value = value[:-1]
else:
listmode = True
value = [self.cast(typ, x.strip()) for x in value[1:].split(",") if x]
else:
value = self.cast(typ, value)
if not listmode:
conf[section][option] = {"desc": desc,
"type": typ,
"value": value}
except Exception, e:
print "Config Warning"
print_exc()
f.close()
return conf
def updateValues(self, config, dest):
"""sets the config values from a parsed config file to values in destination"""
for section in config.iterkeys():
if section in dest:
for option in config[section].iterkeys():
if option in ("desc", "outline"): continue
if option in dest[section]:
dest[section][option]["value"] = config[section][option]["value"]
#else:
# dest[section][option] = config[section][option]
#else:
# dest[section] = config[section]
def saveConfig(self, config, filename):
"""saves config to filename"""
with open(filename, "wb") as f:
chmod(filename, 0600)
f.write("version: %i \n" % CONF_VERSION)
for section in config.iterkeys():
f.write('\n%s - "%s":\n' % (section, config[section]["desc"]))
for option, data in config[section].iteritems():
if option in ("desc", "outline"): continue
if isinstance(data["value"], list):
value = "[ \n"
for x in data["value"]:
value += "\t\t" + str(x) + ",\n"
value += "\t\t]\n"
else:
if type(data["value"]) in (str, unicode):
value = data["value"] + "\n"
else:
value = str(data["value"]) + "\n"
try:
f.write('\t%s %s : "%s" = %s' % (data["type"], option, data["desc"], value))
except UnicodeEncodeError:
f.write('\t%s %s : "%s" = %s' % (data["type"], option, data["desc"], value.encode("utf8")))
def cast(self, typ, value):
"""cast value to given format"""
if type(value) not in (str, unicode):
return value
elif typ == "int":
return int(value)
elif typ == "bool":
return True if value.lower() in ("1", "true", "on", "an", "yes") else False
elif typ == "time":
if not value: value = "0:00"
|
plotly/python-api | packages/python/plotly/plotly/validators/layout/_shapes.py | Python | mit | 8,858 | 0 | import _plotly_utils.basevalidators
class ShapesValidator(_plotly_utils.basevalidators.CompoundArrayValidator):
def __init__(self, plotly_name="shapes", parent_name="layout", **kwargs):
super(ShapesValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Shape"),
data_docs=kwargs.pop(
"data_docs",
"""
editable
Determines whether the shape could be activated
for edit or not. Has no effect when the older
editable shapes mode is enabled via
`config.editable` or
`config.edits.shapePosition`.
fillcolor
Sets the color filling the shape's interior.
Only applies to closed shapes.
fillrule
Determines which regions of complex paths
constitute the interior. For more info please
visit https://developer.mozilla.org/en-
US/docs/Web/SVG/Attribute/fill-rule
layer
Specifies whether shapes are drawn below or
above traces.
line
:class:`plotly.graph_objects.layout.shape.Line`
instance or dict with compatible properties
name
When used in a template, named items are
created in the output figure in addition to any
items the figure already has in this array. You
can modify these items in the output figure by
making your own item with `templateitemname`
matching this `name` alongside your
modifications (including `visible: false` or
`enabled: false` to hide it). Has no effect
outside of a template.
opacity
Sets the opacity of the shape.
path
For `type` "path" - a valid SVG path with the
pixel values replaced by data values in
`xsizemode`/`ysizemode` being "scaled" and
taken unmodified as pixels relative to
`xanchor` and `yanchor` in case of "pixel" size
mode. There are a few restrictions / quirks
only absolute instructions, not relative. So
the allowed segments are: M, L, H, V, Q, C, T,
S, and Z arcs (A) are not allowed because
radius rx and ry are relative. In the future we
could consider supporting relative commands,
but we would have to decide on how to handle
date and log axes. Note that even as is, Q and
C Bezier paths that are smooth on linear axes
may not be smooth on log, and vice versa. no
chained "polybezier" commands - specify the
segment type for each one. On category axes,
values are numbers scaled to the serial numbers
of categories because using the categories
themselves there would be no way to describe
fractional positions On data axes: because
space and T are both normal components of path
strings, we can't use either to separate date
from time parts. Therefore we'll use underscore
for this purpose: 2015-02-21_13:45:56.789
templateitemname
Used to refer to a named item in this array in
the template. Named items from the template
will be created even without a matching item in
the input figure, but you can modify one by
making an item with `templateitemname` matching
its `name`, alongside your modifications
(including `visible: false` or `enabled: false`
to hide it). If there is no template or no
matching item, this item will be hidden unless
you explicitly show it with `visible: true`.
type
Specifies the shape type to be drawn. If
"line", a line is drawn from (`x0`,`y0`) to
(`x1`,`y1`) with respect to the axes' sizing
mode. If "circle", a circle is drawn from
((`x0`+`x1`)/2, (`y0`+`y1`)/2)) with radius
(|(`x0`+`x1`)/2 - `x0`|, |(`y0`+`y1`)/2
-`y0`)|) with respect to the axes' sizing mode.
If "rect", a rectangle is drawn linking
(`x0`,`y0`), (`x1`,`y0`), (`x1`,`y1`),
(`x0` | ,`y1`), (`x0`,`y0`) with respect to the
axes' sizing mode. If "path", draw a custom SVG
path using `path`. with respect to the axes'
sizing mode.
visible
Determines whether or not this shape is
visible.
x0
Sets the shape's starting x position. See
`type` and `xsizemode` for more info.
x1
Sets | the shape's end x position. See `type` and
`xsizemode` for more info.
xanchor
Only relevant in conjunction with `xsizemode`
set to "pixel". Specifies the anchor point on
the x axis to which `x0`, `x1` and x
coordinates within `path` are relative to. E.g.
useful to attach a pixel sized shape to a
certain data value. No effect when `xsizemode`
not set to "pixel".
xref
Sets the shape's x coordinate axis. If set to
an x axis id (e.g. "x" or "x2"), the `x`
position refers to an x coordinate. If set to
"paper", the `x` position refers to the
distance from the left side of the plotting
area in normalized coordinates where 0 (1)
corresponds to the left (right) side. If the
axis `type` is "log", then you must take the
log of your desired range. If the axis `type`
is "date", then you must convert the date to
unix time in milliseconds.
xsizemode
Sets the shapes's sizing mode along the x axis.
If set to "scaled", `x0`, `x1` and x
coordinates within `path` refer to data values
on the x axis or a fraction of the plot area's
width (`xref` set to "paper"). If set to
"pixel", `xanchor` specifies the x position in
terms of data or plot fraction but `x0`, `x1`
and x coordinates within `path` are pixels
relative to `xanchor`. This way, the shape can
have a fixed width while maintaining a position
relative to data or plot fraction.
y0
Sets the shape's starting y position. See
`type` and `ysizemode` for more info.
y1
Sets the shape's end y position. See `type` and
`ysizemode` for more info.
yanchor
Only relevant in conjunction with `ysizemode`
set to "pixel". Specifies the anchor point on
the y axis to which `y0`, `y1` and y
coordinates within `path` are relative to. E.g.
useful to attach a pixel sized shape to a
certain data value. No effect when `ysizemode`
not set to "pixel".
yref
Sets the annotation's y coordinate axis. If set
to an y axis id (e.g. "y" or "y2"), the `y`
position refers to an y coordinate If set to
"paper", the `y` position refers to the
distance from the bottom of the plotting area
in normalized coordinates where 0 (1)
corresponds to the bottom (top).
ysizemode
Sets the shapes's siz |
intip/da-apps | plugins/da_centrallogin/modules/soappy/tests/speedTest.py | Python | gpl-2.0 | 2,976 | 0.005712 | #!/usr/bin/env python
ident = '$Id: speedTest. | py,v 1.4 2003/05/21 14:52:37 warnes Exp $'
import time
import sys
sys.path.insert(1, "..")
x='''<SOAP-ENV:Envelope
xmlns:SOAP-ENV="http://sche | mas.xmlsoap.org/soap/envelope/"
xmlns:xsi="http://www.w3.org/1999/XMLSchema-instance"
xmlns:xsd="http://www.w3.org/1999/XMLSchema">
<SOAP-ENV:Body>
<ns1:getRate xmlns:ns1="urn:demo1:exchange" SOAP-ENV:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/">
<country1 xsi:type="xsd:string">USA</country1>
<country2 xsi:type="xsd:string">japan</country2>
</ns1:getRate>
</SOAP-ENV:Body>
</SOAP-ENV:Envelope>'''
x2='''<SOAP-ENV:Envelope xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:SOAP-ENC="http://schemas.xmlsoap.org/soap/encoding/" xmlns:SOAP-ENV="http://schemas.xmlsoap.org/soap/envelope/" SOAP-ENV:encodingStyle="http://schemas.microsoft.com/soap/encoding/clr/1.0 http://schemas.xmlsoap.org/soap/encoding/" xmlns:i3="http://soapinterop.org/xsd" xmlns:i2="http://soapinterop.org/">
<SOAP-ENV:Body>
<i2:echoStructArray id="ref-1">
<return href="#ref-4"/>
</i2:echoStructArray>
<SOAP-ENC:Array id="ref-4" SOAP-ENC:arrayType="i3:SOAPStruct[3]">
<item href="#ref-5"/>
<item href="#ref-6"/>
<item href="#ref-7"/>
</SOAP-ENC:Array>
<i3:SOAPStruct id="ref-5">
<varString xsi:type="xsd:string">West Virginia</varString>
<varInt xsi:type="xsd:int">-546</varInt>
<varFloat xsi:type="xsd:float">-5.398</varFloat>
</i3:SOAPStruct>
<i3:SOAPStruct id="ref-6">
<varString xsi:type="xsd:string">New Mexico</varString>
<varInt xsi:type="xsd:int">-641</varInt>
<varFloat xsi:type="xsd:float">-9.351</varFloat>
</i3:SOAPStruct>
<i3:SOAPStruct id="ref-7">
<varString xsi:type="xsd:string">Missouri</varString>
<varInt xsi:type="xsd:int">-819</varInt>
<varFloat xsi:type="xsd:float">1.495</varFloat>
</i3:SOAPStruct>
</SOAP-ENV:Body>
</SOAP-ENV:Envelope>
'''
# Import in function, because for some reason they slow each other
# down in same namespace ???
def SOAPParse(inxml):
from SOAPpy import parseSOAPRPC
t= time.time()
parseSOAPRPC(inxml)
return time.time()-t
def SAXParse(inxml):
import xml.sax
y = xml.sax.handler.ContentHandler()
t= time.time()
xml.sax.parseString(inxml,y)
return time.time()-t
def DOMParse(inxml):
import xml.dom.minidom
t= time.time()
xml.dom.minidom.parseString(inxml)
return time.time()-t
# Wierd but the SAX parser runs really slow the first time.
# Probably got to load a c module or something
SAXParse(x)
print
print "Simple XML"
print "SAX Parse, no marshalling ", SAXParse(x)
print "SOAP Parse, and marshalling ", SOAPParse(x)
print "DOM Parse, no marshalling ", DOMParse(x)
print
print "Complex XML (references)"
print "SAX Parse, no marshalling ", SAXParse(x2)
print "SOAP Parse, and marshalling ", SOAPParse(x2)
print "DOM Parse, no marshalling ", DOMParse(x2)
|
robmadole/briefs-caster | src/briefscaster/__init__.py | Python | bsd-3-clause | 1,980 | 0.001515 | import sys
import os
from os.path import dirname, join
from flask import Flask, request, abort
app = Flask(__name__)
config = {
'working_directory': os.getcwd(),
'always_regenerate': True}
@app.route('/')
def provide_briefcast():
from briefscaster import briefcast
url_root = request.url_root
items = briefcast.find_briefs(config['working_directory'])
rss_string = briefcast.create_feed(
items,
url_root)
return app.response_class(rss_string,
mimetype='application/briefcast')
@app.route('/brieflist/<key>')
def brieflist(key):
from briefscaster import briefcast
briefs_cache = briefcast.get_briefs_cache()
if not key in briefs_cache:
abort(404)
if config['always_regenerate']:
briefcast.create_brieflist(briefs_cache[key]['bs_filename'])
filename = briefs_cache[key]['filename']
with open(filename) as f:
return app.response_class(f.read( | ), mimetype='application/brief')
def main():
try:
config['working_directory'] = sys.argv[1]
except IndexError:
pass
print 'briefs-caster - Serving up some fine briefs for you\n'
print 'Open http://<IP_ADDRESS>:5000 from the Briefs app\n'
print 'CTRL-C to exit the server'
app.run('0.0.0.0')
def get_briefs_utils():
"""
Trys to povide an executable bs and compact-briefs | utilities
"""
local_bs = join(dirname(__file__), 'bin', 'bc-bs')
local_compact_briefs = join(dirname(__file__), 'bin', 'bc-compact-briefs')
if os.access(local_bs, os.X_OK) and \
os.access(local_compact_briefs, os.X_OK):
# The local versions are executable, we will use those
return (local_bs, local_compact_briefs,)
else:
# Assume that briefs-caster has been installed with easy_install or pip
# and guess that they are on the path
return ('bc-bs', 'bc-compact-briefs',)
if __name__ == '__main__':
get_briefs_utils()
main()
|
NERC-CEH/jules-jasmin | majic/joj/controllers/loggedin.py | Python | gpl-2.0 | 304 | 0.023026 | import logging
from joj.lib.base import *
from pa | ste.request import parse_querystring
import urllib2
log = logging.getLogger(__name__)
class L | oggedinController(BaseController):
def index(self):
#self closes window
return '<html><head></head><body onload="window.close()"></body></html>'
|
hiliev/py-zfs-rescue | zfs/lzjb.py | Python | bsd-3-clause | 5,428 | 0.002579 | #
# An attempt at re-implementing LZJB compression in native Python.
#
# Created in May 2014 by Emil Brink <emil@obsession.se>. See LICENSE.
#
# --------------------------------------------------------- | ------------
#
# Copyright (c) 2014-2016, Emil Brink
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided
| # that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and
# the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions
# and the following disclaimer in the documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
# TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
BYTE_BITS = 8
MATCH_BITS = 6
MATCH_MIN = 3
MATCH_MAX = (1 << MATCH_BITS) + (MATCH_MIN - 1)
MATCH_RANGE = range(MATCH_MIN, MATCH_MAX + 1) # Length 64, fine on 2.x.
OFFSET_MASK = (1 << (16 - MATCH_BITS)) - 1
LEMPEL_SIZE = 1024
def size_encode(size, dst=None):
"""
Encodes the given size in little-endian variable-length encoding.
The dst argument can be an existing bytearray to append the size. If it's
omitted (or None), a new bytearray is created and used.
Returns the destination bytearray.
"""
if dst is None:
dst = bytearray()
done = False
while not done:
dst.append(size & 0x7f)
size >>= 7
done = size == 0
dst[-1] |= 0x80
return dst
def size_decode(src):
"""
Decodes a size (encoded with size_encode()) from the start of src.
Returns a tuple (size, len) where size is the size that was decoded,
and len is the number of bytes from src that were consumed.
"""
dst_size = 0
pos = 0
# Extract prefixed encoded size, if present.
val = 1
while True:
c = src[pos]
pos += 1
if c & 0x80:
dst_size += val * (c & 0x7f)
break
dst_size += val * c
val <<= 7
return dst_size, pos
def lzjb_compress(src, dst=None):
"""
Compresses src, the source bytearray.
If dst is not None, it's assumed to be the output bytearray and bytes are appended to it using dst.append().
If it is None, a new bytearray is created.
The destination bytearray is returned.
"""
if dst is None:
dst = bytearray()
lempel = [0] * LEMPEL_SIZE
copymap = 0
copymask = 1 << (BYTE_BITS - 1)
pos = 0 # Current input offset.
while pos < len(src):
copymask <<= 1
if copymask == (1 << BYTE_BITS):
copymask = 1
copymap = len(dst)
dst.append(0)
if pos > len(src) - MATCH_MAX:
dst.append(src[pos])
pos += 1
continue
hsh = (src[pos] << 16) + (src[pos + 1] << 8) + src[pos + 2]
hsh += hsh >> 9
hsh += hsh >> 5
hsh &= LEMPEL_SIZE - 1
offset = (pos - lempel[hsh]) & OFFSET_MASK
lempel[hsh] = pos
cpy = pos - offset
if cpy >= 0 and cpy != pos and src[pos:pos + 3] == src[cpy:cpy + 3]:
dst[copymap] |= copymask
for mlen in MATCH_RANGE:
if src[pos + mlen] != src[cpy + mlen]:
break
dst.append(((mlen - MATCH_MIN) << (BYTE_BITS - MATCH_BITS)) | (offset >> BYTE_BITS))
dst.append(offset & 255)
pos += mlen
else:
dst.append(src[pos])
pos += 1
return dst
def lzjb_decompress(src, dlen, dst=None):
"""
Decompresses src, a bytearray of compressed data.
The dst argument can be an optional bytearray which will have the output appended.
If it's None, a new bytearray is created.
The output bytearray is returned.
"""
if dst is None:
dst = bytearray()
pos = 0
dpos = 0
copymap = 0
copymask = 1 << (BYTE_BITS - 1)
while pos < len(src):
copymask <<= 1
if copymask == (1 << BYTE_BITS):
copymask = 1
copymap = src[pos]
pos += 1
if copymap & copymask:
mlen = (src[pos] >> (BYTE_BITS - MATCH_BITS)) + MATCH_MIN
offset = ((src[pos] << BYTE_BITS) | src[pos + 1]) & OFFSET_MASK
pos += 2
cpy = dpos - offset
if cpy < 0:
return None
while mlen > 0 and dpos < dlen:
dst.append(dst[cpy])
dpos += 1
cpy += 1
mlen -= 1
elif dpos < dlen:
dst.append(src[pos])
dpos += 1
pos += 1
return dst
|
wangjun/pythoner.net | pythoner/books/spider.py | Python | gpl-3.0 | 6,613 | 0.012414 | #encoding=utf-8
"""
pythoner.net
Copyright (C) 2013 PYTHONER.ORG
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program | . If not, see <http://www.gnu.org/licenses/>.
"""
import random
import time,math,os,re,urllib,urllib2,cookielib
from BeautifulSoup import BeautifulSoup
import time
import socket
import os
import db
from string import join
from PIL | import Image
import os
class BrowserBase(object):
ERROR = {
'0':'Can not open the url,checck you net',
'1':'Creat download dir error',
'2':'The image links is empty',
'3':'Download faild',
'4':'Build soup error,the html is empty',
'5':'Can not save the image to your disk',
}
image_links = []
image_count = 0
def __init__(self):
socket.setdefaulttimeout(20)
def speak(self,name,content):
print '[%s]%s' %(name,content)
def openurl(self,url):
"""
打开网页
"""
cookie_support= urllib2.HTTPCookieProcessor(cookielib.CookieJar())
self.opener = urllib2.build_opener(cookie_support,urllib2.HTTPHandler)
urllib2.install_opener(self.opener)
user_agents = [
'Mozilla/5.0 (Windows; U; Windows NT 5.1; it; rv:1.8.1.11) Gecko/20071127 Firefox/2.0.0.11',
'Opera/9.25 (Windows NT 5.1; U; en)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)',
'Mozilla/5.0 (compatible; Konqueror/3.5; Linux) KHTML/3.5.5 (like Gecko) (Kubuntu)',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.0.12) Gecko/20070731 Ubuntu/dapper-security Firefox/1.5.0.12',
'Lynx/2.8.5rel.1 libwww-FM/2.14 SSL-MM/1.4.1 GNUTLS/1.2.9',
"Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.7 (KHTML, like Gecko) Ubuntu/11.04 Chromium/16.0.912.77 Chrome/16.0.912.77 Safari/535.7",
"Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:10.0) Gecko/20100101 Firefox/10.0 ",
]
# 随机选取一个agent
agent = random.choice(user_agents)
self.opener.addheaders = [("User-agent",agent),("Accept","*/*"),('Referer','http://www.google.com')]
try:
res = self.opener.open(url)
except Exception,e:
self.speak(str(e)+url)
raise Exception
else:
return res
class Spider(BrowserBase):
name = 'Spider'
def __init__(self):
BrowserBase.__init__(self)
# 开始页地址
while True:
#book_page = 'http://book.douban.com/subject/1921890/'
#book_page = 'http://book.douban.com/subject/4719162/'
#book_page = 'http://book.douban.com/subject/3117898/'
book_page = raw_input('book address:')
try:
self.html = self.openurl(book_page).read()
self.url = book_page
except Exception,e:
print e
else:
break
def get_info(self):
"""
返回图书信息
"""
book = {}
soup = BeautifulSoup(self.html)
book['name'] = soup.h1.contents[1].string
info_div = soup.findAll('div',id='info')[0]
instrution = ''
span = soup.find('span',{'class':'all hidden'})
if span:
for line in span.contents:
try:
line.string
except AttributeError:
continue
else:
instrution += str(line.string)
book['instrution'] = instrution.replace('None','<br/>')
else:
book['instrution'] = ''
reg = re.compile(r'(\d{4,8})')
book['douban_id'] = int(reg.findall(self.url)[0])
jpg_url = soup.find('a',{'class':'nbg'})['href']
for line in str(info_div).split('<br />'):
#print 'line',line
soup = BeautifulSoup(line)
if '作者' in line:
author = []
for a in soup.contents[0].findAll('a'):
author.append(a.string)
book['author'] = join(author,'/')
trans = []
book['translator'] = ''
if '译者' in line:
for a in soup.contents[0].findAll('a'):
trans.append(a.string)
book['translator'] = join(trans,'/')
if '出版社' in line:
book['publish'] = soup.contents[1].string
if '出版年' in line:
book['pub_date'] = soup.contents[1].string
if '定价' in line:
book['price'] = soup.contents[1].string
if 'ISBN' in line:
line = str(line)
reg = re.compile(r'(\d{13})')
book['isbn'] = int(reg.findall(line)[0])
if '页数' in line:
book['pages'] = int(soup.contents[1].string)
self.download(jpg_url,book['isbn'])
return book
def download(self,jpg_url,douban_id):
"""
下载图片
"""
root = os.path.normpath(os.path.dirname(__file__))
file_path = '../static/books/%s.jpeg' %douban_id
file_path = os.path.join(root,file_path)
thumb_path = '../static/books/%s_120.jpeg'%douban_id
thumb_path = os.path.join(root,thumb_path)
urllib.urlretrieve(jpg_url,file_path)
# 生成缩略图
im = Image.open(file_path)
im.thumbnail((120,120),Image.ANTIALIAS)
im.save(thumb_path,'jpeg')
if __name__ == '__main__':
#main()
s = Spider()
book = s.get_info()
#print book
db.insert_book(book['name'],book['author'],book['translator'],book['publish'],book['pub_date'],book['instrution'],book['price'],book['pages'],book['isbn'],book['douban_id'])
|
ric2b/Vivaldi-browser | chromium/chrome/common/extensions/api/PRESUBMIT.py | Python | bsd-3-clause | 869 | 0.009206 | # Copyright 2016 The Chromium Authors. All rights reserved.
# Use o | f this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Chromium presubmit script for src/extensions/common.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details on the presubmit API bui | lt into depot_tools.
"""
USE_PYTHON3 = True
import sys
def _CheckExterns(input_api, output_api):
original_sys_path = sys.path
join = input_api.os_path.join
src_root = input_api.change.RepositoryRoot()
try:
sys.path.append(join(src_root, 'extensions', 'common', 'api'))
from externs_checker import ExternsChecker
finally:
sys.path = original_sys_path
return ExternsChecker(input_api, output_api).RunChecks()
def CheckChangeOnUpload(input_api, output_api):
return _CheckExterns(input_api, output_api)
|
daniellowtw/MentalMaths | utility.py | Python | mit | 472 | 0.010593 | __author__ = 'Daniel'
fro | m UserData import config
def get_integer_input(query="", default=None):
"""
Takes a query and gets an input from the user
:param query:
:param default:
:return:
"""
res = ""
while not str.isnumeric(res):
res = input(query)
if res == "" and default is not None:
return default
return int(res)
def is_debug_mode():
return config.getboolean("Debug","Mode")
__version__ | = "0.0.2" |
car3oon/saleor | saleor/search/backends/base.py | Python | bsd-3-clause | 8,616 | 0.001857 |
from __future__ import absolute_import, unicode_literals
from django.db.models.lookups import Lookup
from django.db.models.query import QuerySet
from django.db.models.sql.where import SubqueryConstraint, WhereNode
from django.utils.six import text_type
class FilterError(Exception):
pass
class FieldError(Exception):
pass
class BaseSearchQuery(object):
DEFAULT_OPERATOR = 'or'
def __init__(self, queryset, query_string, fields=None, operator=None, order_by_relevance=True):
self.queryset = queryset
self.query_string = query_string
self.fields = fields
self.operator = operator or self.DEFAULT_OPERATOR
self.order_by_relevance = order_by_relevance
def _get_filterable_field(self, field_attname):
# Get field
field = dict(
(field.get_attname(self.queryset.model), field)
for field in self.queryset.model.get_filterable_search_fields()
).get(field_attname, None)
return field
def _process_lookup(self, field, lookup, value):
raise NotImplementedError
def _connect_filters(self, filters, connector, negated):
raise NotImplementedError
def _process_filter(self, field_attname, lookup, value):
# Get the field
field = self._get_filterable_field(field_attname)
if field is None:
raise FieldError(
'Cannot filter search results with field "' + field_attname + '". Please add index.FilterField(\'' +
field_attname + '\') to ' + self.queryset.model.__name__ + '.search_fields.'
)
# Process the lookup
result = self._process_lookup(field, lookup, value)
if result is None:
raise FilterError(
'Could not apply filter on search results: "' + field_attname + '__' +
lookup + ' = ' + text_type(value) + '". Lookup "' + lookup + '"" not recognised.'
)
return result
def _get_filters_from_where_node(self, where_node):
# Check if this is a leaf node
if isinstance(where_node, Lookup):
field_attname = where_node.lhs.target.attname
lookup = where_node.lookup_name
value = where_node.rhs
# Ignore pointer fields that show up in specific page type queries
if field_attname.endswith('_ptr_id'):
return
# Process the filter
return self._process_filter(field_attname, lookup, value)
elif isinstance(where_node, SubqueryConstraint):
raise FilterError('Could not apply filter on search results: Subqueries are not allowed.')
elif isinstance(where_node, WhereNode):
# Get child filters
connector = where_node.connector
child_filters = [self._get_filters_from_where_node(child) for child in where_node.children]
child_filters = [child_filter for child_filter in child_filters if child_filter]
return self._connect_filters(child_filters, connector, where_node.negated)
else:
raise FilterError('Could not apply filter on search results: Unknown where node: ' + str(type(where_node)))
def _get_filters_from_queryset(self):
return self._get_filters_from_where_node(self.queryset.query.where)
class BaseSearchResults(object):
def __init__(self, backend, query, prefetch_related=None):
self.backend = backend
self.query = query
self.prefetch_related = prefetch_related
self.start = 0
self.stop = None
self._results_cache = None
self._count_cache = None
self._score_field = None
def _set_limits(self, start=None, stop=None):
if stop is not None:
if self.stop is not None:
self.stop = min(self.stop, self.start + stop)
else:
self.stop = self.start + stop
if start is not None:
if self.stop is not None:
self.start = min(self.stop, self.start + start)
else:
self.start = self.start + start
def _clone(self):
klass = self.__class__
new = klass(self.backend, self.query, prefetch_related=self.prefetch_related)
new.start = self.start
new.stop = self.stop
return new
def _do_search(self):
raise NotImplementedError
def _do_count(self):
raise NotImplementedError
def results(self):
if self._results_cache is None:
self._results_cache = self._do_search()
return self._results_cache
def count(self):
if self._count_cache is None:
if self._results_cache is not None:
self._count_cache = len(self._results_cache)
else:
self._count_cache = self._do_count()
return self._count_cache
def __getitem__(self, key):
new = self._clone()
if isinstance(key, slice):
# Set limits
start = int(key.start) if key.start else None
stop = int(key.stop) if key.stop else None
new._set_limits(start, stop)
# Copy results cache
if self._results_cache is not None:
new._results_cache = self._results_cache[key]
return new
else:
if self._results_cache is not None:
return self._results_cache[key]
new.start = self.start + key
new.stop | = self.start + key + 1
return list(new)[0]
def __iter__(self):
return iter(self.results())
def __len__(self):
return len(self.results())
def __repr__(self):
data = list(self[:21])
if len(data) > 20:
data[-1] = "...(remaining elements | truncated)..."
return '<SearchResults %r>' % data
def annotate_score(self, field_name):
clone = self._clone()
clone._score_field = field_name
return clone
class BaseSearchBackend(object):
query_class = None
results_class = None
rebuilder_class = None
def __init__(self, params):
pass
def get_index_for_model(self, model):
return None
def get_rebuilder(self):
return None
def reset_index(self):
raise NotImplementedError
def add_type(self, model):
raise NotImplementedError
def refresh_index(self):
raise NotImplementedError
def add(self, obj):
raise NotImplementedError
def add_bulk(self, model, obj_list):
raise NotImplementedError
def delete(self, obj):
raise NotImplementedError
def search(self, query_string, model_or_queryset, fields=None, filters=None,
prefetch_related=None, operator=None, order_by_relevance=True):
# Find model/queryset
if isinstance(model_or_queryset, QuerySet):
model = model_or_queryset.model
queryset = model_or_queryset
else:
model = model_or_queryset
queryset = model_or_queryset.objects.all()
# # Model must be a class that is in the index
# if not class_is_indexed(model):
# return []
# Check that theres still a query string after the clean up
if query_string == "":
return []
# Only fields that are indexed as a SearchField can be passed in fields
if fields:
allowed_fields = {field.field_name for field in model.get_searchable_search_fields()}
for field_name in fields:
if field_name not in allowed_fields:
raise FieldError(
'Cannot search with field "' + field_name + '". Please add index.SearchField(\'' +
field_name + '\') to ' + model.__name__ + '.search_fields.'
)
# Apply filters to queryset
if filters:
queryset = queryset.filter(**filters)
# Prefetch related
if prefetch_related:
for prefetch in prefetch_related:
queryset = queryset.prefetch_related(prefetch)
# Che |
82Flex/DCRM | WEIPDCRM/apis/contenttype.py | Python | agpl-3.0 | 1,147 | 0 | # coding=utf-8
"""
DCRM - Darwin Cydia Repository Manager
Copyright (C) 2017 WU Zheng <i.82@me.com>
This program is free software: you can redistribute it and/or modify
it under the terms o | f the GNU Affero General Public License as published
by the Free Software Foundation, ei | ther version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from rest_framework import serializers, viewsets
from django.contrib.contenttypes.models import ContentType
class ContentTypeSerializer(serializers.ModelSerializer):
class Meta:
model = ContentType
exclude = []
class ContentTypeViewSet(viewsets.ReadOnlyModelViewSet):
queryset = ContentType.objects.all()
serializer_class = ContentTypeSerializer
pagination_class = None
|
RedHatQE/cfme_tests | cfme/intelligence/reports/menus.py | Python | gpl-2.0 | 6,600 | 0.001212 | # -*- coding: utf-8 -*-
"""Module handling report menus contents"""
from contextlib import contextmanager
import attr
from navmazing import NavigateToAttribute
from widgetastic.widget import Text
from widgetastic_patternfly import Button
from . import CloudIntelReportsView
from . import ReportsMultiBoxSelect
from cfme.modeling.base import BaseCollection
from cfme.modeling.base import BaseEntity
from cfme.utils.appliance.implementations.ui import CFMENavigateStep
from cfme.utils.appliance.implementations.ui import navigate_to
from cfme.utils.appliance.implementations.ui import navigator
from widgetastic_manageiq import FolderManager
from widgetastic_manageiq import ManageIQTree
class EditReportMenusView(CloudIntelReportsView):
title = Text("#explorer_title_text")
reports_tree = ManageIQTree("menu_roles_treebox")
# Buttons
save_button = Button("Save")
reset_button = Button("Reset")
default_button = Button("Default")
cancel_button = Button("Cancel")
commit_button = Button("Commit")
discard_button = Button("Discard")
manager = FolderManager(".//div[@id='folder_lists']/table")
report_select = ReportsMultiBoxSelect(
move_into="Move selected reports right",
move_from="Move selected reports left",
available_items="available_reports",
chosen_items="selected_reports"
)
@property
def is_displayed(self):
return (
self.in_intel_reports and
self.title.text == 'Editing EVM Group "{}"'.format(self.context["object"].group) and
self.edit_report_menus.is_opened and
self.edit_report_menus.tree.currently_selected == [
"All EVM Groups",
self.context["object"].group
]
)
@attr.s
class ReportMenu(BaseEntity):
"""
This is a fake class mainly needed for navmazing navigation.
"""
group = None
def go_to_group(self, group_name):
self.group = group_name
view = navigate_to(self, "EditReportMenus")
assert view.is_displayed
return view
def get_folders(self, group):
"""Returns list of folders for given user group.
Args:
group: User group to check.
"""
view = self.go_to_group(group)
view.reports_tree.click_path("Top Level")
fields = view.manager.fields
view.discard_button.click()
return fields
def get_subfolders(self, group, folder):
"""Returns list of sub-folders for given user group and folder.
Args:
group: User group to check.
folder: Folder to read.
"""
view = self.go_to_group(group)
view.reports_tree.click_path("Top Level", folder)
fields = view.manager.fields
view.discard_button.click()
return fields
def add_folder(self, group, folder):
"""Adds a folder under top-level.
Args:
group: User group.
folder: Name of the new folder.
"""
with self.manage_folder() as top_level:
top_level.add(folder)
def add_subfolder(self, group, folder, subfolder):
"""Adds a subfolder under specified folder.
Args:
group: User group.
folder: Name of the folder.
subfolder: Name of the new subdfolder.
"""
with self.manage_folder(folder) as fldr:
fldr.add(subfolder)
def reset_to_default(self, group):
"""Clicks the `Default` button.
Args:
group: Group to set to Default
"""
view = self.go_to_group(group)
view.default_button.click()
view.save_button.click()
@contextmanager
def manage_subfolder(self, group, folder, subfolder):
"""Context manager to use when modifying the subfolder contents.
You can use manager's :py:meth:`FolderManager.bail_out` classmethod to end and discard the
changes done inside the with block.
Args:
group: User group.
folder: Parent folder name.
subfolder: Subfolder name to manage.
Returns: Context-managed :py:class: `widgetastic_manageiq.MultiBoxSelect` instance
"""
view = self.go_to_group(group)
view.reports_tree.click_path("Top Level", folder, subfolder)
try:
yield view.report_select
except FolderManager._BailOut:
view.discard_button.click()
except Exception:
# In case of any exception, nothing will be saved
view.discard_button.click()
raise # And reraise the exception
else:
# If no exception happens, save!
view.commit_button.click()
view.save_button.click()
@contextmanager
def manage_folder(self, group, folder=None):
"""Context manager to use when modifying the folder contents.
You can use manager's :py:meth:`FolderManager.bail_out` classmethod to end and discard the
changes done inside the with block. This context manager does not give the manager as a
value to the with block so you have to import and use the :py:class:`FolderManager` class
manually.
Args:
group: User group.
folder: Which folder to manage. If None, top-level will be managed.
Returns: Context-managed :py:class:`widgetastic_manageiq.FolderManager` instance
"""
view = self.go_to_group(group)
if folder is None:
view.reports_tree.click_path("Top Level")
else:
view.reports_tree.click_path("Top Level", folder)
try:
yield view.manager
except FolderManager._BailOut:
view.m | anager.discard()
except Exception:
# In case of any exception, nothing will be saved
view.manager.discard()
raise # And reraise the exception
else:
# If no exception happens, save!
view.manager.commit()
view.save_button.click()
@attr.s
class ReportMenusCollection(BaseCollection):
"""Collection object for the :py:class:'cfme.intelligence.reports.ReportMenu'."""
ENTITY = Rep | ortMenu
@navigator.register(ReportMenu)
class EditReportMenus(CFMENavigateStep):
VIEW = EditReportMenusView
prerequisite = NavigateToAttribute("appliance.server", "CloudIntelReports")
def step(self, *args, **kwargs):
self.view.edit_report_menus.tree.click_path(
"All EVM Groups",
self.obj.group
)
|
googleapis/python-compute | google/cloud/compute_v1/services/region_instance_group_managers/transports/base.py | Python | apache-2.0 | 13,869 | 0.001082 | # -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
from typing import Awaitable, Callable, Dict, Optional, Sequence, Union
import pkg_resources
import google.auth # type: ignore
import google.api_core
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.cloud.compute_v1.types import compute
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution("google-cloud-compute",).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
class RegionInstanceGroupManagersTransport(abc.ABC):
"""Abstract transport class for RegionInstanceGroupManagers."""
AUTH_SCOPES = (
"https://www.googleapis.com/auth/compute",
"https://www.googleapis.com/auth/cloud-platform",
)
DEFAULT_HOST: str = "compute.googleapis.com"
def __init__(
self,
*,
host: str = DEFAULT_HOST,
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
**kwargs,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES}
# Save the scopes.
self._scopes = scopes
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials and credentials_file:
raise core_exceptions.DuplicateCredentialArgs(
"'credentials_file' and 'credentials' are mutually exclusive"
)
if credentials_file is not None:
credentials, _ = google.auth.load_credentials_from_file(
credentials_file, **scopes_kwargs, quota_project_id=quota_project_id
)
elif credentials is None:
credentials, _ = google.auth.default(
**scopes_kwargs, quota_project_id=quota_project_id
)
# If the credentials are service account credentials, then always try to use self signed JWT.
if (
always_use_jwt_access
and isinstance(credentials, service_account.Credentials)
and hasattr(service_account.Credentials, "with_always_use_jwt_access")
):
credentials = credentials.with_always_use_jwt_access(True)
# Save the credentials.
self._credentials = credentials
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
self.abandon_instances: gapic_v1.method.wrap_method(
self.abandon_instances, default_timeout=None, client_info=client_info,
),
self.apply_updates_to_instances: gapic_v1.method.wrap_method(
self.apply_updates_to_instances,
default_timeout=None,
client_inf | o=client_info,
),
| self.create_instances: gapic_v1.method.wrap_method(
self.create_instances, default_timeout=None, client_info=client_info,
),
self.delete: gapic_v1.method.wrap_method(
self.delete, default_timeout=None, client_info=client_info,
),
self.delete_instances: gapic_v1.method.wrap_method(
self.delete_instances, default_timeout=None, client_info=client_info,
),
self.delete_per_instance_configs: gapic_v1.method.wrap_method(
self.delete_per_instance_configs,
default_timeout=None,
client_info=client_info,
),
self.get: gapic_v1.method.wrap_method(
self.get, default_timeout=None, client_info=client_info,
),
self.insert: gapic_v1.method.wrap_method(
self.insert, default_timeout=None, client_info=client_info,
),
self.list: gapic_v1.method.wrap_method(
self.list, default_timeout=None, client_info=client_info,
),
self.list_errors: gapic_v1.method.wrap_method(
self.list_errors, default_timeout=None, client_info=client_info,
),
self.list_managed_instances: gapic_v1.method.wrap_method(
self.list_managed_instances,
default_timeout=None,
client_info=client_info,
),
self.list_per_instance_configs: gapic_v1.method.wrap_method(
self.list_per_instance_configs,
default_timeout=None,
client_info=client_info,
),
self.patch: gapic_v1.method.wrap_method(
self.patch, default_timeout=None, client_info=client_info,
),
self.patch_per_instance_configs: gapic_v1.method.wrap_method(
self.patch_per_instance_configs,
default_timeout=None,
client_info=client_info,
),
self.recreate_instances: gapic_v1.method.wrap_method(
self.recreate_instances, default_timeout=None, client_info=client_info,
),
self.resize: gapic_v1.method.wrap_method(
self.resize, default_timeout=None, client_info=client_info,
),
self.set_instance_template: gapic_v1.method.wrap_method(
self.set_instance_template,
default_timeout=None,
client_info=client_info,
),
self.set_target_pools: gapic_v1.method.wrap_method(
self.set_target_pools, default_timeout=None, client_info=client_info,
),
self.update_per_instan |
hazelcast/hazelcast-python-client | hazelcast/protocol/codec/transactional_set_remove_codec.py | Python | apache-2.0 | 1,225 | 0.002449 | from hazelcast.serialization.bits import *
from hazelcast.protocol.builtin import FixSizedTypesCodec
from hazelcast.protocol.client_message import OutboundMessage, REQUEST_HEADER_SIZE, create_initial_buffer, RESPONSE_HEADER_SIZE
from hazelcast.protocol.builtin import StringCodec
from hazelcast.protocol.builtin import DataCodec
# hex: 0x100200
_REQUEST_MESSAGE_TYPE = 1049088
# hex: 0x100201
_RESPONSE_MESSAGE_TYPE = 1049089
_REQUEST_TXN_ID_OFFSET = REQUEST_HEADER_SIZE
_REQUEST_THREAD_ID_OFFSET = _REQUEST_TXN_ID_OFFSET + U | UID_SIZE_IN_BYTES
_REQUEST_INITIAL_FRAME_SIZE = _REQUEST_THREAD_ID_OFFSET + LONG_SIZE_IN_BYTES
_RESPONSE_RESPONSE_OFFSET = RESPONSE_HEADER_SIZE
def encode_request(name, txn_id, thread_id, item):
buf = create_initial_buffer(_REQUEST_INITIAL_FRAME_SIZE, _REQUEST_MESSAG | E_TYPE)
FixSizedTypesCodec.encode_uuid(buf, _REQUEST_TXN_ID_OFFSET, txn_id)
FixSizedTypesCodec.encode_long(buf, _REQUEST_THREAD_ID_OFFSET, thread_id)
StringCodec.encode(buf, name)
DataCodec.encode(buf, item, True)
return OutboundMessage(buf, False)
def decode_response(msg):
initial_frame = msg.next_frame()
return FixSizedTypesCodec.decode_boolean(initial_frame.buf, _RESPONSE_RESPONSE_OFFSET)
|
glenjarvis/decorator_training | src/answer01.py | Python | bsd-3-clause | 668 | 0.002994 | #!/usr/bin/env python
# Use the sample code in example_01.py. Create three functions named
# func1, func2, and func3.
#
# Make func1 print:
# "Hello World"
#
# Make func2 print:
# "It's nice to meet you"
#
# Make func3 print:
# "Howdeeeee"
# Put your co | de here:
# Now, make a new function called `using_functions`.
# Make it take three arguments (name the arguments as you see fit)
# Then, execute each of the arguments that you received.
# For example, if I used arguments 'a', 'b', 'c' (do | n't use those in
# your answer), my code would look like this:
#
# def using_functions(a, b, c):
# a()
#
# You are left with the exercise of calling all three functions.
|
rouault/mapnik | tests/python_tests/layer_test.py | Python | lgpl-2.1 | 745 | 0.024161 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from nose.tools import eq_
from utilities import run_all
import mapnik
# Map initialization
def test_layer_init():
l = mapnik.Layer('test')
eq_(l.name,'test')
eq_(l.srs,'+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs')
eq_( | l.envelope(),mapnik.Box2d())
eq_(l.clear_label_cache,False)
eq_(l.cache_features,False)
eq_(l.visible(1),True)
eq_(l.active,True)
eq_(l.datasource,None)
eq_(l.queryable,False)
eq_(l.minzoom,0.0)
eq_(l.maxzoom > 1e+6,True)
eq_(l.group_by,"")
eq_(l.maximum_extent,None)
eq_(l.buffer_size,None)
eq_(len(l.styles),0)
if __name__ == "__main__":
exit(run_all(eval(x) for x in dir() if x.startswith("tes | t_")))
|
lcpt/xc | verif/tests/loads/test_vector2d_point_load_global.py | Python | gpl-3.0 | 3,358 | 0.034277 | # -*- coding: utf-8 -*-
# Reference: Expresiones de la flecha el el Prontuario de
# Estructuras Metálicas del CEDEX. Apartado 3.3 Carga puntual sobre ménsula.
# ISBN: 84-7790-336-0
# url={https://books.google.ch/books?id=j88yAAAACAAJ},
'''vector2d_point_load_global verification test. Home made test.'''
import xc_base
import geom
import xc
from solution import predefined_solutions
from model import predefined_spaces
from materials import typical_materials
import math
__author__= "Luis C. Pérez Tato (LCPT) and Ana Ortega (AOO)"
__copyright__= "Copyright 2015, LCPT and AOO"
__license__= "GPL"
__version__= "3.0"
__email__= "l.pereztato@gmail.com"
E= 2e6 # Elastic modulus
L= 20 # Bar length.
h= 0.30 # Beam cross-section depth.
b= 0.2 # Beam cross-section width.
A= b*h # Cross section area.
I= b*h**3/12 # Inertia of the beam section in inches to the fourth power.
x= 0.5 # Relative abscissae where the punctual load is applied.
P= 1e3 # Transverse load.
n= 1e6 # Axial load.
feProblem= xc.FEProblem()
preprocessor= feProblem.getPreprocessor
nodes= preprocessor.getNodeHandler
# Problem type
modelSpace= predefined_spaces.StructuralMechanics2D(nodes)
vDisp= [0,0]
vReac1= [0,0]
vReac2= [0,0]
ptoAplic= geom.Pos2d(1+x*L*math.sqrt(2)/2,2+x*L*math.sqrt(2)/2) # Load application point.
nodes.defaultTag= 1 #First node number.
nod= nodes.newNodeXY(1,2)
nod= nodes.newNodeXY(1+L*math.sqrt(2)/2,2+L*math.sqrt(2)/2)
# Geometric transformation(s)
lin= modelSpace.newLinearCrdTransf("lin")
# Materials definition
scc= typical_materials.defElasticSection2d(preprocessor, "scc",A,E,I)
# Elements definition
elements= preprocessor.getElementHandler
elements.defaultTransformation= "lin"
elements.defaultMaterial= "scc"
elements.defaultTag= 1 #Tag for next element.
beam2d= elements.newElement("ElasticBeam2d",xc.ID([1,2]))
beam2d.h= h
# Constraints
constraints= preprocessor.getBoundaryCondHandler
modelSpace.fixNode000(1)
# Loads definition
loadHandler= preprocessor.getLoadHandler
lPatterns= loadHandler.getLoadPatterns
#Load modulation.
ts= lPatterns.newTimeSeries("constant_ts","ts")
lPatterns.currentTimeSeries= "ts"
#Load case definition
lp0= lPatterns.newLoadPattern("default","0")
lPatterns.currentLoadPattern= "0"
mesh= feProblem.getDomain.getMesh
eIter= mesh.getElementIter
elem= eIter.next()
while not(elem is None):
crdTransf= elem.getCoordTransf
vIElem= crdTransf.getIVector
vJElem= crdTransf.getJVector
vCarga= n*vIElem-P*vJElem
elem.vector2dPointLoadGlobal(xc.Vector([ptoAplic.x,ptoAplic.y]),vCarga)
elem= eIter.next()
#We add the load case to domain.
lPatterns.addToDomain("0")
# Solution
analisis= predefined_solutions.simple_static_linear(feProblem)
result= | analisis.analyze(1)
nod2= nodes.getNode(2)
vDisp= nod2.getDisp
a= x*L
delta0= vDisp.dot(vIElem)
delta0Teor= (n*a/E/A)
ratio0= ((delta0-delta0Teor)/delta0Teor)
delta1= vDisp.dot(vJElem)
delta1Teor= (-P*a**2*(3*L-a)/6/E/I)
ratio1= ((delta1-delta1Teor)/delta1Teor)
# print "delta0= ",delta0
# print "delta0Teor= ",delta0Teor
# print "ratio0= ",ratio0
# print "delta1= ",delta1
# print "delta1Teor= ",delta1Teor
# print "ratio | 1= ",ratio1
import os
from miscUtils import LogMessages as lmsg
fname= os.path.basename(__file__)
if (abs(ratio0)<1e-10) & (abs(ratio1)<1e-11):
print "test ",fname,": ok."
else:
lmsg.error(fname+' ERROR.')
|
RevansChen/online-judge | Codewars/7kyu/disemvowel-trolls/Python/test.py | Python | mit | 131 | 0.007634 | # Python - | 3.6.0
test.asse | rt_equals(
disemvowel('This website is for losers LOL!'),
'Ths wbst s fr lsrs LL!'
)
|
rafasis1986/EngineeringMidLevel | migrations/versions_/c05ed437b768_.py | Python | mit | 830 | 0.010843 | """empty message
Revision ID: c05ed437b768
Revises: 006a83e83b1a
Create Date: 2016-10-14 09:56:26.984816
"""
# revision identifiers, used by Alembic.
revision = 'c05ed437b768'
down_revision = '006a83e83b1a'
from alembic import op
import sqlalchemy as sa
import sqlalchemy_utils
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_constraint('requests_client_id_fkey', ' | requests', type_='foreignkey')
op.create_foreign_key(None, 'requests', 'users', ['client_id'], ['id'])
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, ' | requests', type_='foreignkey')
op.create_foreign_key('requests_client_id_fkey', 'requests', 'clients', ['client_id'], ['id'])
### end Alembic commands ###
|
mbiokyle29/pipelines | seq/tasks.py | Python | mit | 7,972 | 0.01869 | from ruffus import *
from seq_pipe import utils
@collate(input_files, formatter("([^/]+)_[12].fastq$"), ["{path[0]}/{1[0]}_1.fastq", "{path[0]}/{1[0]}_2.fastq"])
def collate_files(input_files, output_files):
log.info("Collating paired fastq files: \n\t{} \n\t{}\n".format(input_files[0], input_files[1]))
@transform(collate_files, formatter("([^/]+)_[12].fastq$"), options.output+"{1[0]}.assembled.fastq", extras)
def pear_fastq_files(input_files, output_file, extras):
log.info("Starting pear run on %s and %s", input_files[0], input_files[1])
output_file = re.sub(r"\.assembled\.fastq", "", output_file)
args = ["pear", "-f", input_files[0], "-r", input_files[1], "-o", output_file]
utils.run_cmd("pear")
@transform(input_files, suffix(".fastq"), ".fastq", extras)
def upload_to_one_codex(input_file, output_file, extras):
args = ["onecodex", "upload", input_file]
log.info("uploading %s to One Codex", input_file)
utils.run_cmd(args, "One Codex")
# only bowtie
# rsem does this for you
@transform(rename_accepted_hits, suffix(".bam"),".sorted.bam", extras)
def sort_bam(input_file, output_file, extras):
log.info("Sorting %s ", input_file)
# hacky
output_file = re.sub(r"\.bam", "", output_file)
args = ["samtools-rs", "rocksort", "-@", "8", "-m", "16G", input_file, output_file]):
utils.run_cmd(args, "samtools rocksort")
# careful
log.info("Deleting old file %s", input_file)
os.unlink(input_file)
@transform(sort_bam, suffix(".sorted.bam"), ".bed", options.output, extras)
def bam_to_bed(input_file, output_file, output, extras):
log.info("Converting %s to a bed file", input_file)
args = ["bamToBed", "-i", input_file ">", output_file]
utils.run_cmd(args, "bam_to_bed")
# now we can move sorted bam to output
file_name = os.path.basename(input_file)
new_name = os.path.join(output, file_name)
os.rename(input_file, new_name)
@transform(bam_to_bed, suffix(".bed"), ".bg", options.size, extras)
def bed_to_bg(input_file, output_file, size_file, extras):
log.info("Converting %s to a genome coverage file", input_file)
args = ["genomeCoverageBed", "-bg", "-split", "-i", input_file, "-g", size_file, ">", output_file]
utils.run_cmd(args, "bed_to_bg")
log.info("Deleting old file %s", input_file)
os.unlink(input_file)
@transform(bed_to_bg, suffix(".bg"), ".bw", genome, options.output)
def bg_to_bw(input_file, output_file, genome, output):
log.info("Creating bigwig file from bg: %s", input_file)
command = "bedGraphToBigWig {} {} {}".format( input_file, size_file, output_filet)
if subprocess.call(command, shell=True):
log.warn("bg to bw conversion of %s failed, exiting", input_file)
extras.report_error("bg_to_bw","bg to bw conversion of {} failed".format(input_file))
raise SystemExit
log.info("Deleting old file %s", input_file)
os.unlink(input_file)
@transform(input_files, suffix(".fastq"), ".sam", options, stats_file, extras)
def align_with_bowtie_two(input_file, output_file, options, stats_file, extras):
log.info("Running bowtie2 on %s", input_file)
# use poen explicitly to cpature STDERR, check still
args = ["bowtie2", "-t", "--no-unal", "-p", str(options.cores), "-x", options.index, input_file, "-S", output_file]
output = utils.run_cmd(args, "bowtie2", True)
# pass along to be saved
utils.bowtie_record_output(output, input_file, stats_file)
# call out to external bwtools here
@merge(bg_to_bw, os.path.join(options.output,"bigWigStats-"+time_stamp+".out"))
def bw_stats(input_files, output_file):
# we are going to call bwtool summary and bwtool distribution
# have to explicitly send stdout stuff like that
# what a program
summary = "bwtool summary 10000 -header -with-sum {} /dev/stdout"
dist = "bwtool distribution {} /dev/stdout"
for input_file in input_files:
log.info("Running bigwig stats on {}".format(input_file))
with open(output_file, "a+") as stats:
for command in [summary, dist]:
output = utils.run_cmd(command.format(os.path.abspath(input_file)).split(), command, True)
if command.startswith("bwtool summary"):
stats.write("#### bwtool summary for {}\n".format(input_file))
stats.write(output)
stats.write("####\n")
# filter zeros out
else:
output = output.rstrip()
output_clean = [line for line in output.split("\n") if line.split('\t')[1] != '0']
stats.write("#### bwtool distribution for {}\n".format(input_file))
stats.write("depth\tcount\n")
stats.write("\n".join(output_clean))
stats.write("\n####\n")
stats.write("\n\n")
@transform(input_files, formatter(), options.output+"{basename[0]}.genes.results", options, extras)
def rsem_align(input_file, output_file, options, extras):
mean_len = extras.get_mean_length(input_file)
output_file = output_file.replace(".genes.results", "")
log.info("Running rsem calc exp on %s", output_file)
command = ["rsem-calculate-expression", "-p", str(options.cores), "--calc-ci", input_file, options.index, output_file]
run_cmd(command)
@transform(rsem_align, suffix(".genes.results"), ".pdf", options)
def rsem_plot_model(input_file, output_file, options):
sample_name = input_file.replace("\.genes\.results", "")
command = ["rsem-plot-model", sample_name, output_file]
run_cmd(command)
@merge(rsem_align, "gene_exp.mtx", extras)
def rsem_generate_exp_matrix(input_files, matrix, extras):
sample_list = extras.gen_sample_list()
command = ["rsem-generate-data-matrix", sample_list, ">", matrix]
run_cmd(command)
@transform(generate_exp_matrix, suffix(".mtx"), ".diff", extras)
def rsem_run_ebseq(input_file, output_file, extras):
cond_str = extras.gen_cond_str()
command = ["rsem-run-ebseq", input_file, cond_str, output_file]
run_cmd(command)
@transform(run_ebseq, suffix(".diff"), ".sigdiff", options)
def rsem_fdr_correct(input_file, output_file, options):
command = ["rsem-control-fdr", input_file, str(options.fdr), output_file]
run_cmd(command)
@transform(write_excel_sheet, suffix(".xlsx"), ".email", options, input_files, extras)
def report_success(input_file, output_file, options, inputfiles, extras):
log.info("Sending email report")
# Create a text/plain message
email_body = []
email_body.append("Differential expression pipeline results:\n")
email_body.append("The following fastq files were used:")
for file in input_files:
email_body.append("- {}".format(file))
email_body.append("\nThe results (xlsx spreadsheet) and pipeline log are attatched")
email_body.append("Please direct any questions to kgmcchesney@wisc.edu")
# msg object
msg = MIMEMultipart()
# header stuff
# no one els | e cares but me!
root = "root@alpha-helix.oncology.wisc.edu"
subject = "Tophat DE pipeline Success report: {}".format(time.strftime | ("%d/%m/%Y"))
msg['Subject'] = subject
msg['From'] = root
msg['To'] = COMMASPACE.join(options.emails)
msg.attach( MIMEText("\n".join(email_body)) )
# attatch the files
for file in [input_file, log.handlers[0].baseFilename]:
part = MIMEBase('application', "octet-stream")
part.set_payload( open(file,"rb").read() )
Encoders.encode_base64(part)
part.add_header('Content-Disposition', 'attachment; filename="%s"' % os.path.basename(file))
msg.attach(part)
# Send the message via our own SMTP server, but don't include the
# envelope header.
s = smtplib.SMTP('localhost')
s.sendmail(root, options.emails, msg.as_string())
s.quit()
|
FeitianSmartcardReader/pssi | pssi/plugins/sim/plugin.py | Python | gpl-3.0 | 1,017 | 0.000984 | # -*- coding: utf-8 -*-
# -- plugin.py
# Functions required by every plugin
# Copyright © 2010 Eric Bourry & Julien Flaissy
# This file is part of PSSI (Python Simple Smartcard Interpreter).
# PSSI is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the | License, or
# (at your option) any later version.
# PSSI is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You | should have received a copy of the GNU General Public License
# along with PSSI. If not, see <http://www.gnu.org/licenses/>
import interpreters
import structures
def getClassByte():
return 0xA0
def getRootStructure():
return structures.structSIM
def getInterpretersTable():
return interpreters.interpretingFunctions
|
dariox2/CADL | test/testyida6b.py | Python | apache-2.0 | 4,901 | 0.007958 |
#
# test shuffle_batch - 6b
#
# generates a pair of files (color+bn)
# pending: make the tuple match
#
print("Loading tensorflow...")
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
import os
from libs import utils
import datetime
tf.set_random_seed(1)
def create_input_pipeline_yida(files1, files2, batch_size, n_epochs, shape, crop_shape=None,
crop_factor=1.0, n_threads=1, seed=None):
producer1 = tf.train.string_input_producer(
files1, capacity=len(files1), shuffle=False)
producer2 = tf.train.string_input_producer(
files2, capacity=len(files2), shuffle=False)
# We need something which can open the files and read its contents.
reader = tf.WholeFileReader()
# We pass the filenames to this object which can read the file's contents.
# This will create another queue running which dequeues the previous queue.
keys1, vals1 = reader.read(producer1)
keys2, vals2 = reader.read(producer2)
# And then have to decode its contents as we know it is a jpeg image
imgs1 = tf.image.decode_jpeg(vals1, channels=3)
imgs2 = tf.image.decode_jpeg(vals2, channels=3)
# We have to explicitly define the shape of the tensor.
# This is because the decode_jpeg operation is still a node in the graph
# and doesn't yet know the shape of the image. Future operations however
# need explicit knowledge of the image's shape in order to be created.
imgs1.set_shape(shape)
imgs2.set_shape(shape)
# Next we'll centrally crop the image to the size of 100x100.
# This operation required explicit knowledge of the image's shape.
if shape[0] > shape[1]:
rsz_shape = [int(shape[0] / shape[1] * crop_shape[0] / crop_factor),
int(crop_shape[1] / crop_factor)]
else:
rsz_shape = [int(crop_shape[0] / crop_factor),
int(shape[1] / shape[0] * crop_shape[1] / crop_factor)]
rszs1 = tf.image.resize_images(imgs1, rsz_shape[0], rsz_shape[1])
rszs2 = tf.image.resize_images(imgs2, rsz_shape[0], rsz_shape[1])
crops1 = (tf.image.resize_image_with_crop_or_pad(
rszs1, crop_shape[0], crop_shape[1])
if crop_shape is not None
else imgs1)
crops2 = (tf.image.resize_image_with_crop_or_pad(
rszs2, crop_shape[0], crop_shape[1])
if crop_shape is not None
else imgs2)
# Now we'll create a batch generator that will also shuffle our examples.
# We tell it how many it should have in its buffer when i | t randomly
# permutes the order.
min_after_dequeue = len(files1) // 5
# The capacity should be larger than min_after_dequeue, and determines how
# many examples are prefetched. TF docs recommend setting this value to:
# | min_after_dequeue + (num_threads + a small safety margin) * batch_size
capacity = min_after_dequeue + (n_threads + 1) * batch_size
# Randomize the order and output batches of batch_size.
batch = tf.train.shuffle_batch([crops1, crops2],
enqueue_many=False,
batch_size=batch_size,
capacity=capacity,
min_after_dequeue=min_after_dequeue,
num_threads=n_threads,
#seed=seed,
)#shapes=(64,64,3))
# alternatively, we could use shuffle_batch_join to use multiple reader
# instances, or set shuffle_batch's n_threads to higher than 1.
return batch
def CELEByida(path):
fs = [os.path.join(path, f)
for f in os.listdir(path) if f.endswith('.jpg')]
fs=sorted(fs)
return fs
print("Loading celebrities...")
from libs.datasets import CELEB
files1 = CELEByida("../session-1/img_align_celeba/") # only 100
files2 = CELEByida("../session-1/img_align_celeba_n/") # only 100
from libs.dataset_utils import create_input_pipeline
batch_size = 8
n_epochs = 3
input_shape = [218, 178, 3]
crop_shape = [64, 64, 3]
crop_factor = 0.8
seed=15
batch1 = create_input_pipeline_yida(
files1=files1, files2=files2,
batch_size=batch_size,
n_epochs=n_epochs,
crop_shape=crop_shape,
crop_factor=crop_factor,
shape=input_shape,
seed=seed)
mntg=[]
sess = tf.Session()
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
batres = sess.run(batch1)
batch_xs1=np.array(batres[0])
batch_xs2=np.array(batres[1])
for i in range(0,len(batch_xs1)):
img=batch_xs1[i] / 255.0
mntg.append(img)
img=batch_xs2[i] / 255.0
mntg.append(img)
TID=datetime.date.today().strftime("%Y%m%d")+"_"+datetime.datetime.now().time().strftime("%H%M%S")
m=utils.montage(mntg, saveto="montage_"+TID+".png")
# mntg[0]=color
# mntg[1]=b/n
plt.figure(figsize=(5, 5))
plt.imshow(m)
plt.show()
# eop
|
pantsbuild/pants | src/python/pants/option/options.py | Python | apache-2.0 | 18,439 | 0.002766 | # Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import dataclasses
import logging
from typing import Iterable, Mapping, Sequence
from pants.base.build_environment import get_buildroot
from pants.base.deprecated import warn_or_error
from pants.option.arg_splitter import ArgSplitter
from pants.option.config import Config
from pants.option.errors import ConfigValidationError
from pants.option.option_util import is_list_option
from pants.option.option_value_container import OptionValueContainer, OptionValueContainerBuilder
from pants.option.parser import Parser
from pants.option.scope import GLOBAL_SCOPE, GLOBAL_SCOPE_CONFIG_SECTION, ScopeInfo
from pants.util.memo import memoized_method
from pants.util.ordered_set import FrozenOrderedSet, OrderedSet
logger = logging.getLogger(__name__)
class Options:
"""The outward-facing API for interacting with options.
Supports option registration and fetching option values.
Examples:
The value in global scope of option '--foo-bar' (registered in global scope) will be selected
in the following order:
- The value of the --foo-bar flag in global scope.
- The value of the PANTS_GLOBAL_FOO_BAR environment variable.
- The value of the PANTS_FOO_BAR environment variable.
- The value of the foo_bar key in the [GLOBAL] section of pants.toml.
- The hard-coded value provided at registration time.
- None.
The value in scope 'compile.java' of option '--foo-bar' (registered in global scope) will be
selected in the following order:
- The value of the --foo-bar flag in scope 'compile.java'.
- The value of the --foo-bar flag in scope 'compile'.
- The value of the --foo-bar flag in global scope.
- The value of the PANTS_COMPILE_JAVA_FOO_BAR environment variable.
- The value of the PANTS_COMPILE_FOO_BAR environment variable.
- The value of the PANTS_GLOBAL_FOO_BAR environment variable.
- The value of the PANTS_FOO_BAR environment variable.
- The value of the foo_bar key in the [compile.java] section of pants.toml.
- The value of the foo_bar key in the [compile] section of pants.toml.
- The value of the foo_bar key in the [GLOBAL] section of pants.toml.
- The hard-coded value provided at registration time.
- None.
The value in scope 'compile.java' of option '--foo-bar' (registered in scope 'compile') will be
selected in the following order:
- The value of the --foo-bar flag in scope 'compile.java'.
- The value of the --foo-bar flag in scope 'compile'.
- The value of the PANTS_COMPILE_JAVA_FOO_BAR environment variable.
- The value of the PANTS_COMPILE_FOO_BAR environment variable.
- The value of the foo_bar key in the [compile.java] section of pants.toml.
- The value of the foo_bar key in the [compile] section of pants.toml.
- The value of the foo_bar key in the [GLOBAL] section of pants.toml
(because of automatic config file fallback to that section).
- The hard-coded value provided at registration time.
- None.
"""
class DuplicateScopeError(Exception):
"""More than one registration occurred for the same scope."""
class AmbiguousPassthroughError(Exception):
"""More than one goal was passed along with passthrough args."""
@classmethod
def complete_scopes(cls, scope_infos: Iterable[ScopeInfo]) -> FrozenOrderedSet[ScopeInfo]:
"""Expand a set of scopes to include scopes they deprecate.
Also validates that scopes do not collide.
"""
ret: OrderedSet[ScopeInfo] = OrderedSet()
original_scopes: dict[str, ScopeInfo] = {}
for si in sorted(scope_infos, key=lambda _si: _si.scope):
if si.scope in original_scopes:
raise cls.DuplicateScopeError(
f"Scope `{si.scope}` claimed by {si}, was also claimed "
f"by {original_scopes[si.scope]}."
)
original_scopes[si.scope] = si
ret.add(si)
if si.deprecated_scope:
ret.add(dataclasses.replace(si, scope=si.deprecated_scope))
original_scopes[si.deprecated_scope] = si
return FrozenOrderedSet(ret)
@classmethod
def create(
cls,
env: Mapping[str, str],
config: Config,
known_scope_infos: Iterable[ScopeInfo],
args: Sequence[str],
bootstrap_option_values: OptionValueContainer | None = None,
allow_unknown_options: bool = False,
) -> Options:
"""Create an Options instance.
:param env: a dict of environment variables.
:param config: data from a config fil | e.
| :param known_scope_infos: ScopeInfos for all scopes that may be encountered.
:param args: a list of cmd-line args; defaults to `sys.argv` if None is supplied.
:param bootstrap_option_values: An optional namespace containing the values of bootstrap
options. We can use these values when registering other options.
:param allow_unknown_options: Whether to ignore or error on unknown cmd-line flags.
"""
# We need parsers for all the intermediate scopes, so inherited option values
# can propagate through them.
complete_known_scope_infos = cls.complete_scopes(known_scope_infos)
splitter = ArgSplitter(complete_known_scope_infos, get_buildroot())
split_args = splitter.split_args(args)
if split_args.passthru and len(split_args.goals) > 1:
raise cls.AmbiguousPassthroughError(
f"Specifying multiple goals (in this case: {split_args.goals}) "
"along with passthrough args (args after `--`) is ambiguous.\n"
"Try either specifying only a single goal, or passing the passthrough args "
"directly to the relevant consumer via its associated flags."
)
if bootstrap_option_values:
spec_files = bootstrap_option_values.spec_files
if spec_files:
for spec_file in spec_files:
with open(spec_file) as f:
split_args.specs.extend(
[line for line in [line.strip() for line in f] if line]
)
parser_by_scope = {si.scope: Parser(env, config, si) for si in complete_known_scope_infos}
known_scope_to_info = {s.scope: s for s in complete_known_scope_infos}
return cls(
builtin_goal=split_args.builtin_goal,
goals=split_args.goals,
unknown_goals=split_args.unknown_goals,
scope_to_flags=split_args.scope_to_flags,
specs=split_args.specs,
passthru=split_args.passthru,
parser_by_scope=parser_by_scope,
bootstrap_option_values=bootstrap_option_values,
known_scope_to_info=known_scope_to_info,
allow_unknown_options=allow_unknown_options,
)
def __init__(
self,
builtin_goal: str | None,
goals: list[str],
unknown_goals: list[str],
scope_to_flags: dict[str, list[str]],
specs: list[str],
passthru: list[str],
parser_by_scope: dict[str, Parser],
bootstrap_option_values: OptionValueContainer | None,
known_scope_to_info: dict[str, ScopeInfo],
allow_unknown_options: bool = False,
) -> None:
"""The low-level constructor for an Options instance.
Dependees should use `Options.create` instead.
"""
self._builtin_goal = builtin_goal
self._goals = goals
self._unknown_goals = unknown_goals
self._scope_to_flags = scope_to_flags
self._specs = specs
self._passthru = passthru
self._parser_by_scope = parser_by_scope
self._bootstrap_option_values = bootstrap_option_values
self._known_scope_to_info = known_scope_to_info
self._allow_unknown_options = allow_unknown_options
@proper |
delitamakanda/socialite | app/email.py | Python | mit | 678 | 0.007375 | from threading import Thread
from flask_mail import Message
from flask import render_template, current_app
from . import mail
from .decorators import async
@async
def send_async_email(app, msg):
with app.app_context():
mail.send(msg)
def send_email(to, subject, t | emplate, **kwargs):
app = current_app._get_current_object()
msg = Message(app.config['MAIL_SUBJECT_PREFIX'] + ' ' + subject, sender=app.config['MAIL_SENDER'], recipients=[to])
msg.body = render_template(template + '.txt', **kwargs)
msg.html = render_template(t | emplate + '.html', **kwargs)
thr = Thread(target=send_async_email, args=[app, msg])
thr.start()
return thr
|
bencord0/cloudmeta | manage.py | Python | agpl-3.0 | 252 | 0 | #!/usr/bin/env python
import os
import sys
if __name_ | _ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "cloudmeta.settings")
from django.core.management import execute_from_command | _line
execute_from_command_line(sys.argv)
|
feroda/lessons-python4beginners | students/2016-09-04/federicofioriti/Epeople.py | Python | agpl-3.0 | 987 | 0.006079 | def main():
PEOPLE = insert_people()
sum_salary_all(PEOPLE)
list_people_by_city(PEOPLE)
def insert_people():
PEOPLE = []
while True:
NAMES = {}
NAMES["name"] = name = raw_input("Inserisci nome ")
NAMES["city"] = city = raw_input("Inseriscci citta ")
NAMES["salary"] = salary = int(raw_input("Inseriscci salario "))
PEOPLE.append(NAMES)
# print ("Name {name}, City {city}, Salary {salary} Annual {annual}".f | ormat(**NAMES))
while True:
a = raw_input("Vuoi continuare [Y/n]? ").upper()
if a in ["Y", "N"]:
break
if a == "N":
break
return PEOPLE
def sum_salary_all(list_people):
for p in list_people:
| sum_salary_single(p)
def sum_salary_single(list_people):
list_people['annual'] = list_people['salary'] * 13
def list_people_by_city(list_people):
list_city = list_people.sort()
if __name__ == '__main__':
main()
|
dhermes/gcloud-python | oslogin/google/cloud/oslogin_v1/proto/oslogin_pb2_grpc.py | Python | apache-2.0 | 7,621 | 0.003543 | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from google.cloud.oslogin_v1.proto import (
common_pb2 as google_dot_cloud_dot_oslogin_dot_common_dot_common__pb2,
)
from google.cloud.oslogin_v1.proto import (
oslogin_pb2 as google_dot_cloud_dot_oslogin__v1_dot_proto_dot_oslogin__pb2,
)
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
class OsLoginServiceStub(object):
"""Cloud OS Login API
The Cloud OS Login API allows you to manage users and their associated SSH
public keys for logging into virtual machines on Google Cloud Platform.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.DeletePosixAccount = channel.unary_unary(
"/google.cloud.oslogin.v1.OsLoginService/DeletePosixAccount",
request_serializer=google_dot_cloud_dot_oslogin__v1_dot_proto_dot_oslogin__pb2.DeletePosixAccountRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.DeleteSshPublicKey = channel.unary_unary(
"/google.cloud.oslogin.v1.OsLoginService/DeleteSshPublicKey",
request_serializer=google_dot_cloud_dot_oslogin__v1_dot_proto_dot_oslogin__pb2.DeleteSshPublicKeyRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.GetLoginProfile = channel.unary_unary(
"/google.cloud.oslogin.v1.OsLoginService/GetLoginProfile",
request_serializer=google_dot_cloud_dot_oslogin__v1_dot_proto_dot_oslogin__pb2.GetLoginProfileRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_oslogin__v1_dot_proto_dot_oslogin__pb2.LoginProfile.FromString,
)
self.GetSshPublicKey = channel.unary_unary(
"/google.cloud.oslogin.v1.OsLoginService/GetSshPublicKey",
request_serializer=google_dot_cloud_dot_oslogin__v1_dot_proto_dot_oslogin__pb2.GetSshPublicKeyRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_oslogin_dot_common_dot_common__pb2.SshPublicKey.FromString,
)
self.ImportSshPublicKey = channel.unary_unary(
"/google.cloud.oslogin.v1.OsLoginService/ImportSshPublicKey",
request_serializer=google_dot_cloud_dot_oslogin__v1_dot_proto_dot_oslogin__pb2.ImportSshPublicKeyRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_oslogin__v1_dot_proto_dot_oslogin__pb2.ImportSshPublicKeyResponse.FromString,
)
self.UpdateSshPublicKey = channel.unary_unary(
"/google.cloud.oslogin.v1.OsLoginService/UpdateSshPublicKey",
request_serializer=google_dot_cloud_dot_oslogin__v1_dot_proto_dot_oslogin__pb2.UpdateSshPublicKeyRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_oslogin_dot_common_dot_common__pb2.SshPublicKey.FromString,
)
class OsLoginServiceServicer(object):
"""Cloud OS Login API
The Cloud OS Login API allows you to manage users and their associated SSH
public keys for logging into virtual machines on Google Cloud Platform.
"""
def DeletePosixAccount(self, request, context):
"""Deletes a POSIX account.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def DeleteSshPublicKey(self, request, context):
"""Deletes an SSH public key.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def GetLoginProfile(self, request, context):
"""Retrieves the profile information used for logging in to a virtual machine
on Google Compute Engine.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def GetSshPublicKey(self, request, context):
"""Retrieves an SSH public key.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def ImportSshPublicKey(self, request, context):
"""Adds an SSH public key and returns the profile information. Default POSIX
account information is set when no username and UID exist as part of the
login profile.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def UpdateSshPublicKey(self, request, context):
"""Updates an SSH public key and returns the profile information. This method
supports patch semantics.
"""
context.set_code(grpc.StatusCod | e.UNIMPLEMENTED)
| context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def add_OsLoginServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
"DeletePosixAccount": grpc.unary_unary_rpc_method_handler(
servicer.DeletePosixAccount,
request_deserializer=google_dot_cloud_dot_oslogin__v1_dot_proto_dot_oslogin__pb2.DeletePosixAccountRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
"DeleteSshPublicKey": grpc.unary_unary_rpc_method_handler(
servicer.DeleteSshPublicKey,
request_deserializer=google_dot_cloud_dot_oslogin__v1_dot_proto_dot_oslogin__pb2.DeleteSshPublicKeyRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
"GetLoginProfile": grpc.unary_unary_rpc_method_handler(
servicer.GetLoginProfile,
request_deserializer=google_dot_cloud_dot_oslogin__v1_dot_proto_dot_oslogin__pb2.GetLoginProfileRequest.FromString,
response_serializer=google_dot_cloud_dot_oslogin__v1_dot_proto_dot_oslogin__pb2.LoginProfile.SerializeToString,
),
"GetSshPublicKey": grpc.unary_unary_rpc_method_handler(
servicer.GetSshPublicKey,
request_deserializer=google_dot_cloud_dot_oslogin__v1_dot_proto_dot_oslogin__pb2.GetSshPublicKeyRequest.FromString,
response_serializer=google_dot_cloud_dot_oslogin_dot_common_dot_common__pb2.SshPublicKey.SerializeToString,
),
"ImportSshPublicKey": grpc.unary_unary_rpc_method_handler(
servicer.ImportSshPublicKey,
request_deserializer=google_dot_cloud_dot_oslogin__v1_dot_proto_dot_oslogin__pb2.ImportSshPublicKeyRequest.FromString,
response_serializer=google_dot_cloud_dot_oslogin__v1_dot_proto_dot_oslogin__pb2.ImportSshPublicKeyResponse.SerializeToString,
),
"UpdateSshPublicKey": grpc.unary_unary_rpc_method_handler(
servicer.UpdateSshPublicKey,
request_deserializer=google_dot_cloud_dot_oslogin__v1_dot_proto_dot_oslogin__pb2.UpdateSshPublicKeyRequest.FromString,
response_serializer=google_dot_cloud_dot_oslogin_dot_common_dot_common__pb2.SshPublicKey.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
"google.cloud.oslogin.v1.OsLoginService", rpc_method_handlers
)
server.add_generic_rpc_handlers((generic_handler,))
|
caihaibin/Blog | externals/pygments/lexers/text.py | Python | mit | 54,336 | 0.001436 | # -*- coding: utf-8 -*-
"""
pygments.lexers.text
~~~~~~~~~~~~~~~~~~~~
Lexers for non-source code file types.
:copyright: 2006-2008 by Armin Ronacher, Georg Brandl,
Tim Hatch <tim@timhatch.com>,
Ronny Pfannschmidt,
Dennis Kaarsemaker,
Kumar Appaiah <akumar@ee.iitm.ac.in>,
Varun Hiremath <varunhiremath@gmail.com>,
Jeremy Thurgood,
Max Battcher <me@worldmaker.net>,
Kirill Simonov <xi@resolvent.net>.
:license: BSD, see LICENSE for more details.
"""
import re
try:
set
except NameError:
from sets import Set as set
from bisect import bisect
from pygments.lexer import Lexer, LexerContext, RegexLexer, ExtendedRegexLexer, \
bygroups, include, using, this, do_insertions
from pygments.token import Punctuation, Text, Comment, Keyword, Name, String, \
Generic, Operator, Number, Whitespace, Literal
from pygments.util import get_bool_opt
from pygments.lexers.other import BashLexer
__all__ = ['IniLexer', 'SourcesListLexer', 'BaseMakefileLexer',
'MakefileLexer', 'DiffLexer', 'IrcLogsLexer', 'TexLexer',
'GroffLexer', 'ApacheConfLexer', 'BBCodeLexer', 'MoinWikiLexer',
'RstLexer', 'VimLexer', 'GettextLexer', 'SquidConfLexer',
'DebianControlLexer', 'DarcsPatchLexer', 'YamlLexer',
'LighttpdConfLexer', 'NginxConfLexer']
class IniLexer(RegexLexer):
"""
Lexer for configuration files in INI style.
"""
name = 'INI'
aliases = ['ini', 'cfg']
filenames = ['*.ini', '*.cfg', '*.properties']
mimetypes = ['text/x-ini']
tokens = {
'root': [
(r'\s+', Text),
(r'[;#].*?$', Comment),
(r'\[.*?\]$', Keyword),
(r'(.*?)(\s*)(=)(\s*)(.*?)$',
bygroups(Name.Attribute, Text, Operator, Text, String))
]
}
def analyse_text(text):
npos = text.find('\n')
if npos < 3:
return False
return text[0] == '[' and text[npos-1] == ']'
class SourcesListLexer(RegexLexer):
"""
Lexer that highlights debian sources.list files.
*New in Pygments 0.7.*
"""
name = 'Debian Sourcelist'
aliases = ['sourceslist', 'sources.list']
filenames = ['sources.list']
mimetype = ['application/x-debian-sourceslist']
tokens = {
'root': [
(r'\s+', Text),
(r'#.*?$', Comment),
(r'^(deb(?:-src)?)(\s+)',
| bygroups(Keyword, Te | xt), 'distribution')
],
'distribution': [
(r'#.*?$', Comment, '#pop'),
(r'\$\(ARCH\)', Name.Variable),
(r'[^\s$[]+', String),
(r'\[', String.Other, 'escaped-distribution'),
(r'\$', String),
(r'\s+', Text, 'components')
],
'escaped-distribution': [
(r'\]', String.Other, '#pop'),
(r'\$\(ARCH\)', Name.Variable),
(r'[^\]$]+', String.Other),
(r'\$', String.Other)
],
'components': [
(r'#.*?$', Comment, '#pop:2'),
(r'$', Text, '#pop:2'),
(r'\s+', Text),
(r'\S+', Keyword.Pseudo),
]
}
def analyse_text(text):
for line in text.split('\n'):
line = line.strip()
if not (line.startswith('#') or line.startswith('deb ') or
line.startswith('deb-src ') or not line):
return False
return True
class MakefileLexer(Lexer):
"""
Lexer for BSD and GNU make extensions (lenient enough to handle both in
the same file even).
*Rewritten in Pygments 0.10.*
"""
name = 'Makefile'
aliases = ['make', 'makefile', 'mf', 'bsdmake']
filenames = ['*.mak', 'Makefile', 'makefile', 'Makefile.*', 'GNUmakefile']
mimetypes = ['text/x-makefile']
r_special = re.compile(r'^(?:'
# BSD Make
r'\.\s*(include|undef|error|warning|if|else|elif|endif|for|endfor)|'
# GNU Make
r'\s*(ifeq|ifneq|ifdef|ifndef|else|endif|-?include|define|endef|:))(?=\s)')
r_comment = re.compile(r'^\s*@?#')
def get_tokens_unprocessed(self, text):
ins = []
lines = text.splitlines(True)
done = ''
lex = BaseMakefileLexer(**self.options)
backslashflag = False
for line in lines:
if self.r_special.match(line) or backslashflag:
ins.append((len(done), [(0, Comment.Preproc, line)]))
backslashflag = line.strip().endswith('\\')
elif self.r_comment.match(line):
ins.append((len(done), [(0, Comment, line)]))
else:
done += line
for item in do_insertions(ins, lex.get_tokens_unprocessed(done)):
yield item
class BaseMakefileLexer(RegexLexer):
"""
Lexer for simple Makefiles (no preprocessing).
*New in Pygments 0.10.*
"""
name = 'Makefile'
aliases = ['basemake']
filenames = []
mimetypes = []
tokens = {
'root': [
(r'^(?:[\t ]+.*\n|\n)+', using(BashLexer)),
(r'\$\((?:.*\\\n|.*\n)+', using(BashLexer)),
(r'\s+', Text),
(r'#.*?\n', Comment),
(r'(export)(\s+)(?=[a-zA-Z0-9_${}\t -]+\n)',
bygroups(Keyword, Text), 'export'),
(r'export\s+', Keyword),
# assignment
(r'([a-zA-Z0-9_${}.-]+)(\s*)([!?:+]?=)([ \t]*)((?:.*\\\n|.*\n)+)',
bygroups(Name.Variable, Text, Operator, Text, using(BashLexer))),
# strings
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r"'(\\\\|\\'|[^'])*'", String.Single),
# targets
(r'([^\n:]+)(:+)([ \t]*)', bygroups(Name.Function, Operator, Text),
'block-header'),
#TODO: add paren handling (grr)
],
'export': [
(r'[a-zA-Z0-9_${}-]+', Name.Variable),
(r'\n', Text, '#pop'),
(r'\s+', Text),
],
'block-header': [
(r'[^,\\\n#]+', Number),
(r',', Punctuation),
(r'#.*?\n', Comment),
(r'\\\n', Text), # line continuation
(r'\\.', Text),
(r'(?:[\t ]+.*\n|\n)+', using(BashLexer), '#pop'),
],
}
class DiffLexer(RegexLexer):
"""
Lexer for unified or context-style diffs or patches.
"""
name = 'Diff'
aliases = ['diff', 'udiff']
filenames = ['*.diff', '*.patch']
mimetypes = ['text/x-diff', 'text/x-patch']
tokens = {
'root': [
(r' .*\n', Text),
(r'\+.*\n', Generic.Inserted),
(r'-.*\n', Generic.Deleted),
(r'!.*\n', Generic.Strong),
(r'@.*\n', Generic.Subheading),
(r'(Index|diff).*\n', Generic.Heading),
(r'=.*\n', Generic.Heading),
(r'.*\n', Text),
]
}
def analyse_text(text):
if text[:7] == 'Index: ':
return True
if text[:5] == 'diff ':
return True
if text[:4] == '--- ':
return 0.9
DPATCH_KEYWORDS = ['hunk', 'addfile', 'adddir', 'rmfile', 'rmdir', 'move',
'replace']
class DarcsPatchLexer(RegexLexer):
"""
DarcsPatchLexer is a lexer for the various versions of the darcs patch
format. Examples of this format are derived by commands such as
``darcs annotate --patch`` and ``darcs send``.
*New in Pygments 0.10.*
"""
name = 'Darcs Patch'
aliases = ['dpatch']
filenames = ['*.dpatch', '*.darcspatch']
tokens = {
'root': [
(r'<', Operator),
(r'>', Operator),
(r'{', Operator),
(r'}', Operator),
(r'(\[)((?:TAG )?)(.*)(\n)(.*)(\*\*)(\d+)(\s?)(\])', bygroups(Operator, Keyword, Name, Text,
Name, Operator, Literal.Date, Text, Operator)),
(r'(\[)((?:TAG )?)(.*)(\n)(.*)(\*\*)(\d+)(\s?)', bygroups(Operator, Keyword, Name, Text,
Name, Operator, Literal.Date, Text), 'comment'),
(r'New patches:', Generic.Heading),
(r'Conte |
HorvathLab/NGS | attic/readCounts/src/optparse_gui/__init__.py | Python | mit | 12,249 | 0.032574 | '''
A drop-in replacement for optparse ( "import optparse_gui as optparse" )
Provides an identical interface to optparse(.OptionParser),
But displays an automatically generated wx dialog in order to enter the
options/args, instead of parsing command line arguments
'''
import sys, os, os.path, fnmatch, types, time
import re, copy, StringIO, csv, glob
import math, optparse
from optparse import OptionGroup
from datetime import timedelta
__version__ = 0.1
__revision__ = '$Id: $'
def check_multichoice(option, opt, value):
if not value:
return value
for v in value.split(','):
if v not in option.multichoices:
choices = ", ".join(map(repr, option.multichoices))
raise optparse.OptionValueError(
"option %s: invalid choice: %r (choose one or more from %s)"
% (opt, value, choices))
return value
def check_file(option, opt, value):
value = value.strip('"')
if not value:
return value
value = os.path.expanduser(value)
value = os.path.expandvars(value)
value1 = glob.glob(value)
# value1 += glob.glob(value+'.gz')
# value1 += glob.glob(value+'.bz2')
if len(value1) > 1:
raise optparse.OptionValueError(
"option %s: Too many files selected: %s" % (opt, value))
if len(value1) == 0:
raise optparse.OptionValueError(
"option %s: File does not exist: %s" % (opt, value))
value = value1[0]
if option.filetypes:
match = False
for name,globlst in option.filetypes:
for gl in globlst.split(';'):
for cmp in ('','.gz','.bz2'):
if fnmatch.fnmatch(os.path.split(value)[1],gl+cmp):
match = True
break
if match:
break
if match:
break
if not match:
raise optparse.OptionValueError(
"option %s: File %s does not match required filetypes: %s" % (opt, value, ', '.join([ "%s (%s)"%(nm,ft) for nm,ft in option.filetypes])))
return value
def check_files(option, opt, ssv):
s = StringIO.StringIO(ssv)
rd = csv.reader(s,delimiter=' ',quotechar='"')
try:
files = iter(rd).next()
except StopIteration:
files = []
s.close()
files1 = []
for value in files:
value = os.path.expanduser(value)
value = os.path.expandvars(value)
gv = glob.glob(value)
# gv += glob.glob(value+'.gz')
# gv += glob.glob(value+'.bz2')
if len(gv) == 0 and '*' not in value and '?' not in value:
raise optparse.OptionValueError(
"option %s: File does not exist: %s" % (opt, value))
files1.extend(gv)
if len(files1) == 0 and ssv.strip():
raise optparse.OptionValueError(
"option %s: No files match pattern(s): %s" % (opt, ssv))
for value in files1:
if not os.path.isfile(value):
raise optparse.OptionValueError(
"option %s: File does not exist: %s" % (opt, value))
if option.filetypes:
match = False
for name,glb in option.filetypes:
for glbi in glb.split(';'):
for cmp in ('','.gz','.bz2'):
if fnmatch.fnmatch(os.path.split(value)[1],glbi+cmp):
match = True
break
if match:
break
if match:
break
if not match:
raise optparse.OptionValueError(
"option %s: File %s does not match required filetypes: %s" % (opt, value, ', '.join([ "%s (%s)"%(nm,ft) for nm,ft in option.filetypes])))
return files1
def check_savefile(option, opt, value):
value = value.strip('"')
if not option.notNone and not value:
return value
if os.path.exists(value) and not os.path.isfile(value):
raise optparse.OptionValueError(
"option %s: Can't overwrite path: %s" % (opt, value))
if option.filetypes:
match = False
for name,glb in option.filetypes:
for glbi in glb.split(';'):
if fnmatch.fnmatch(os.path.split(value)[1],glbi):
match = True
break
if match:
break
| if not match:
raise optparse.OptionValueError(
"option %s: File %s does not match required filetypes: %s" % (opt, value, ', '.join([ "%s (%s)"%(nm,ft) for nm,ft in option.filetypes])))
return value
def check_savedir(option, opt, value):
value = value.strip('"')
if not option.notNone and not value:
re | turn value
if os.path.exists(value) and not os.path.isdir(value):
raise optparse.OptionValueError(
"option %s: Can't remove path %s" % (opt, value))
return value
def check_dir(option, opt, value):
value = value.strip('"')
if not option.notNone and not value:
return value
if not os.path.exists(value):
raise optparse.OptionValueError(
"option %s: Does not exist %s" % (opt, value))
if not os.path.isdir(value):
raise optparse.OptionValueError(
"option %s: Not a directory %s" % (opt, value))
return value
class Option(optparse.Option):
ATTRS = optparse.Option.ATTRS + ['notNone','filetypes','name','text','multichoices','remember']
TYPES = optparse.Option.TYPES + ("password","file","savefile", "dir", "savedir", "files","multichoice")
TYPE_CHECKER = copy.copy(optparse.Option.TYPE_CHECKER)
TYPE_CHECKER["file"] = check_file
TYPE_CHECKER["files"] = check_files
TYPE_CHECKER["savefile"] = check_savefile
TYPE_CHECKER["savedir"] = check_savedir
TYPE_CHECKER["dir"] = check_dir
TYPE_CHECKER["multichoice"] = check_multichoice
class OptionParser( optparse.OptionParser ):
def __init__(self, *args, **kwargs ):
kwargs['option_class'] = Option
if 'dotfilename' in kwargs:
self.dotfilename = kwargs['dotfilename']
del kwargs['dotfilename']
optparse.OptionParser.__init__( self, *args, **kwargs )
def check_values (self, values, args):
for option in self.option_list:
if (isinstance(option, Option) and
option.notNone and
(getattr(values,option.dest) == "" or
getattr(values,option.dest) == None)):
self.error("%s is empty" % option)
return (values, args)
def get_defaults(self):
values = {}
for (g,o) in self.iteropts():
if o.dest != None:
if o.default == optparse.NO_DEFAULT or \
o.default == None:
values[o.dest] = ''
else:
values[o.dest] = o.default
values['-args-'] = ''
return values
def iteropts(self):
for o in self.option_list:
yield (None,o)
for og in self.option_groups:
for o in og.option_list:
yield (og,o)
def grpopts(self):
from collections import defaultdict
d = defaultdict(list)
for (g,o) in self.iteropts():
d[g].append(o)
return d
class UserCancelledError( Exception ):
pass
class Progress(object):
def __init__(self,quiet=0):
self._quiet = 0
self.quiet(quiet)
def quiet(self,q):
oldq = self._quiet
if isinstance(q,bool):
self._quiet = 2*q;
else:
assert isinstance(q,int)
self._quiet = q
return oldq
def message(self,message):
if self._quiet >= 2:
return
self.initbar(message,nl=True)
def stage(self,message,max=None,min=None,elapsed=True):
self.elapsed = elapsed
self.max = None
self.min = 0
if max != None:
self.max = float(max)
if min != None:
self.min = float(min)
self.value = 0
if self._quiet >= 2:
return
self.start = time.time()
if self.max:
self.initprogressbar(message)
else:
self.initbar(message)
def update(self,increment=1,newvalue=None):
if self._quiet >= 1:
return
if self.max != None:
if newvalue != None:
self.value = newvalue
else:
self.value += increment
self.updateprogressbar(math.floor(1000*(self.value-self.min)/(self.max-self.min)))
else:
self.updatebar()
def done(self):
if self._quiet > |
wzyy2/RTTdev | bsp/simulator/rtconfig.py | Python | gpl-2.0 | 2,797 | 0.007151 | import os
# toolchains options
ARCH='sim'
#CROSS_TOOL='msvc' or 'gcc' or 'mingw'
#'msvc' and 'mingw' are both for windows
# 'gcc' is for linux
CROSS_TOOL='mingw'
# cross_tool provides the cross compiler
# EXEC_PATH is the compiler execute path
if CROSS_TOOL == 'gcc' or CROSS_TOOL == 'clang-analyze':
CPU = 'posix'
PLATFORM = 'gcc'
EXEC_PATH = ''
elif CROSS_TOOL == 'mingw':
CPU = 'win32'
PLATFORM = 'mingw'
EXEC_PATH = r'D:\Program Files (x86)\CodeBlocks\MinGW\bin'
elif CROSS_TOOL == 'msvc':
CPU = 'win32'
PLATFORM = 'cl'
EXEC_PATH = ''
else:
print "bad CROSS TOOL!"
exit(1)
if os.getenv('RTT_EXEC_PATH'):
EXEC_PATH = os.getenv('RTT_EXEC_PATH')
BUILD = 'debug'
#BUILD = ''
if PLATFORM == 'gcc':
# toolchains
PREFIX = ''
CC = PREFIX + 'gcc'
AS = PREFIX + 'gcc'
AR = PREFIX + 'ar'
LINK = PREFIX + 'gcc'
TARGET_EXT = 'axf'
SIZE = PREFIX + 'size'
OBJDUMP = PREFIX + 'objdump'
OBJCPY = PREFIX + 'objcopy'
DEVICE = ' -ffunction-sections -fdata-sections'
DEVICE = ' '
CFLAGS = DEVICE + ' -I/usr/include -w -D_REENTRANT'
AFLAGS = ' -c' + DEVICE + ' -x assembler-with-cpp'
#LFLAGS = DEVICE + ' -Wl,--gc-sections,-Map=rtthread-linux.map -lpthread'
LFLAGS = DEVICE + ' -Wl,-Map=rtthread-linux.map -pthread -T gcc.ld'
CPATH = ''
| LPATH = ''
if BUILD == 'debug':
CFLAGS += ' -g -O0 -gdwarf-2'
AFLAGS += ' -gdwarf-2'
else:
CFLAGS += ' -O2'
POST_ACTION = ''
elif PLATFORM == 'mingw':
# toolchains
PREFIX = ''
CC = PREFIX + 'gcc'
AS = | PREFIX + 'gcc'
AR = PREFIX + 'ar'
LINK = PREFIX + 'gcc'
TARGET_EXT = 'exe'
SIZE = PREFIX + 'size'
OBJDUMP = PREFIX + 'objdump'
OBJCPY = PREFIX + 'objcopy'
DEVICE = ' -ffunction-sections -fdata-sections'
DEVICE = ' '
CFLAGS = DEVICE
AFLAGS = ' -c' + DEVICE + ' -x assembler-with-cpp'
DEFFILE_LFLAGS = DEVICE + ' -Wl,-Map=rtthread-win32.map,--output-def,rtthread.def -T mingw.ld '
LFLAGS = DEVICE + ' -Wl,-Map=rtthread-win32.map -T mingw.ld '
CPATH = ''
LPATH = ''
if BUILD == 'debug':
CFLAGS += ' -g -O0 -gdwarf-2'
AFLAGS += ' -gdwarf-2'
else:
CFLAGS += ' -O2'
POST_ACTION = ''
elif PLATFORM == 'cl':
# toolchains
PREFIX = ''
TARGET_EXT = 'exe'
AS = PREFIX + 'cl'
CC = PREFIX + 'cl'
AR = PREFIX + 'cl'
LINK = PREFIX + 'cl'
AFLAGS = ''
CFLAGS = ''
LFLAGS = ''
if BUILD == 'debug':
CFLAGS += ' /MTd'
LFLAGS += ' /DEBUG'
else:
CFLAGS += ' /MT'
LFLAGS += ''
CFLAGS += ' /ZI /Od /W 3 /WL '
LFLAGS += ' /SUBSYSTEM:CONSOLE /MACHINE:X86 '
CPATH = ''
LPATH = ''
POST_ACTION = ''
|
tyler274/Recruitment-App | recruit_app/recruit/search.py | Python | bsd-3-clause | 196 | 0.005102 | import | flask_whooshalchemy as whooshalchemy
from models import HrApplication, HrApplicationComment
def register_search_models(app):
pass
# whooshalchemy.whoosh_index(app, HrApplication)
| |
johnbelamaric/themis | vendor/github.com/apache/thrift/test/py.tornado/test_suite.py | Python | apache-2.0 | 6,403 | 0.001718 | #!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import datetime
import glob
import os
import sys
import time
import unittest
basepath = os.path.abspath(os.path.dirname(__file__))
sys.path.insert(0, basepath + '/gen-py.tornado')
sys.path.insert(0, glob.glob(os.path.join(basepath, '../../lib/py/build/lib*'))[0])
try:
__import__('tornado')
except ImportError:
print("module `tornado` not found, skipping test")
sys.exit(0)
from tornado import gen
from tornado.testing import AsyncTestCase, get_unused_port, gen_test
from thrift import TTornado
from thrift.Thrift import TApplicationException
from thrift.protocol import TBinaryProtocol
from ThriftTest import ThriftTest
from ThriftTest.ttypes import Xception, Xtruct
class TestHandler(object):
def __init__(self, test_instance):
self.test_instance = test_instance
def testVoid(self):
pass
def testString(self, s):
if s == 'unexpected_error':
raise Exception(s)
return s
def testByte(self, b):
return b
def testI16(self, i16):
return i16
def testI32(self, i32):
return i32
def testI64(self, i64):
return i64
def testDouble(self, dub):
return dub
def testBinary(self, thing):
return thing
def testStruct(self, thing):
return thing
def testException(self, s):
if s == 'Xception':
x = Xception()
x.errorCode = 1001
x.message = s
raise x
elif s == 'throw_undeclared':
raise ValueError('testing undeclared exception')
def testOneway(self, seconds):
start = time.time()
def fire_oneway():
end = time.time()
self.test_instance.stop((start, end, seconds))
self.test_instance.io_loop.add_timeout(
datetime.timedelta(seconds=seconds),
fire_oneway)
raise Exception('testing exception in oneway method')
def testNest(self, thing):
return thing
@gen.coroutine
def testMap(self, thing):
yield gen.moment
raise gen.Return(thing)
def testSet(self, thing):
return thing
def testList(self, thing):
return thing
def testEnum(self, thing):
return thing
def testTypedef(self, thing):
return thing
class ThriftTestCase(AsyncTestCase):
def setUp(self):
super(ThriftTestCase, self).setUp()
self.port = get_unused_port()
# server
self.handler = TestHandler(self)
self.processor = ThriftTest.Processor(self.handler)
self.pfactory = TBinaryProtocol.TBinaryProtocolFactory()
self.server = TTornado.TTornadoServer(self.processor, self.pfactory, io_loop=self.io_loop)
self.server.bind(self.port)
self.server.start(1)
# client
transport = TTornado.TTornadoStreamTransport('localhost', self.port, io_loop=self.io_loop)
pfactory = TBinaryProtocol.TBinaryProtocolFactory()
self.io_loop.run_sync(transport.open)
self.client = ThriftTest.Client(transport, pfactory)
@gen_test
def test_void(self):
v = yield self.client.testVoid()
self.assertEqual(v, None)
@gen_test
def test_string(self):
v = yield self.client.testString('Python')
self.assertEqual(v, 'Python')
@gen_test
def test_byte(self):
v = yield self.client.testByte(63)
self.assertEqua | l(v, 63)
@gen_test
def test_i32(self):
v = yield self.client.testI32(-1)
self.assertEqual(v, -1)
v = yield self.client.testI32(0)
self.assertEqual(v, 0)
@gen_test
def test_i64(self):
v = yield self.client.testI64(-34359738368)
self.assertEqual(v, -34359738368)
@gen_test
def test_double(self):
v = yield self.client.testDouble(-5.235098235)
self.assertEqual(v, -5.235098235)
| @gen_test
def test_struct(self):
x = Xtruct()
x.string_thing = "Zero"
x.byte_thing = 1
x.i32_thing = -3
x.i64_thing = -5
y = yield self.client.testStruct(x)
self.assertEqual(y.string_thing, "Zero")
self.assertEqual(y.byte_thing, 1)
self.assertEqual(y.i32_thing, -3)
self.assertEqual(y.i64_thing, -5)
@gen_test
def test_oneway(self):
self.client.testOneway(1)
v = yield self.client.testI32(-1)
self.assertEqual(v, -1)
@gen_test
def test_map(self):
"""
TestHandler.testMap is a coroutine, this test checks if gen.Return() from a coroutine works.
"""
expected = {1: 1}
res = yield self.client.testMap(expected)
self.assertEqual(res, expected)
@gen_test
def test_exception(self):
try:
yield self.client.testException('Xception')
except Xception as ex:
self.assertEqual(ex.errorCode, 1001)
self.assertEqual(ex.message, 'Xception')
else:
self.fail("should have gotten exception")
try:
yield self.client.testException('throw_undeclared')
except TApplicationException:
pass
else:
self.fail("should have gotten exception")
yield self.client.testException('Safe')
def suite():
suite = unittest.TestSuite()
loader = unittest.TestLoader()
suite.addTest(loader.loadTestsFromTestCase(ThriftTestCase))
return suite
if __name__ == '__main__':
unittest.TestProgram(defaultTest='suite',
testRunner=unittest.TextTestRunner(verbosity=1))
|
baylee-d/cos.io | common/migrations/0047_auto_20161115_1743.py | Python | apache-2.0 | 1,717 | 0.00233 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-11-15 17:43
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('common', '0046_auto_20161115_1703'),
]
operations = [
migrations.AlterField(
model_name='custompage',
name='menu_order',
field=models.IntegerField(blank=True, default=1, help_text='The order this page should appear in the menu. The lower the number, the more left the page will appear. This is required for all pages where "Show in menus" is checked.'),
),
migrations.AlterField(
model_name='formpage',
name='menu_order',
field=models.IntegerField(blank=True, default=1, help_text='The order this page should appear in the menu. The lower the number, the more left the page will appear. This is required for all pages where "Show in menus" is checked.'),
),
migrations.AlterField(
model_name='newsindexpage',
name='menu_order',
field=models.IntegerField(blank=True, default=1, help_text='The order this page should appear in the menu. The lower the number, the | more left the page will appear. This is requ | ired for all pages where "Show in menus" is checked.'),
),
migrations.AlterField(
model_name='pagealias',
name='menu_order',
field=models.IntegerField(blank=True, default=1, help_text='The order this page should appear in the menu. The lower the number, the more left the page will appear. This is required for all pages where "Show in menus" is checked.'),
),
]
|
RIT-CS-Mentoring-Center-Queueing/mmcga_project | server/datagrams/user_stats.py | Python | mit | 2,044 | 0.003914 | ##
## File: user_stats.py
##
## Author: Schuyler Martin <sam8050@rit.edu>
##
## Description: Python class that defines a datagram for storing statistics
## on users
##
from datagrams.datagram import Datagram
class UserStats(Datagram):
'''
Class for storing statistics on a user
'''
def __init__(self, uid="", init_map=None):
'''
UserStat constructor, uses optional named parameters
:param: uid UID of user that these stats belong to
:param: init_map Dictionary that maps class attributes to values
This map, if it is passed in, will replace all attributes that
are seen in the dictionary. This is how we load an object from
JSON in the DB
'''
super().__init__(uid, init_map)
# number of questions a student has asked or a tutor has answered
self.q_count = 0
# number of times logged into the system
self.login_count = 0
# override attributes in the map
if (init_map | != None):
if ("q_count" in init_map):
self.q_count = init_map["q_count"]
if ("login_count" in init_map):
self.login_count = init_map["login_coun | t"]
def __str__(self):
'''
Converts to a string equivalent
'''
title = "User Stats for " + self.uid + "\n"
return title + super().__str__()
def stat_count(self, var_name):
'''
Returns the stats measure of a specific variable
:param: var_name Variable to fetch
:return: Current variable count
'''
return self.__dict__[var_name]
def stat_increment(self, var_name, value=1):
'''
Increments the stats measure of a specific variable
:param: var_name Variable to increment
:param: value Optional parameter, how much to increment by
:return: Current variable count
'''
self.__dict__[var_name] += value
return self.stat_count(var_name)
|
zemuvier/Python-courses | skype_bot2.py | Python | gpl-3.0 | 1,129 | 0.000886 | from skypebot import *
class Skype_Bot:
"""
This class handles communication with Skype via SkypeBot
"""
def __init__(self, plugins):
self.skype = Skypebot.Skype(Events=self)
self.skype.FriendlyName = "Skype Bot Levitan"
self.skype.Attach()
self.plugins = plugins
def AttachmentStatus(self, status):
if status == Skypebot.apiAttachAvailable:
self.skype.Attach()
def MessageStatus(self, msg, status):
print("INCOMING> %s" % msg.Body)
# msg.MarkAsSeen()
if status == Skypebot.cmsReceived:
for plugin in self.plugins:
r = plugin.plugin_process_request(msg)
if r['status']:
msg.Chat.SendMessage(r['message'])
def send(self, topic, message):
"""
Manual send to CONFERENCES to handle command line interfac | e
:param topic: topic of the conference (it's name)
:par | am message: thing to say
:return:
"""
for chat in self.skype.Chats:
if chat.Topic == topic:
chat.SendMessage(message)
|
hammerlab/immuno | immuno/mhc_common.py | Python | apache-2.0 | 4,443 | 0.006977 | # Copyright (c) 2014. Mount Sinai School of Medicine
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import re
MHC_1_GENE_SET = set(["A", "B", "C", "E", "F", "G", "K", "L"])
MHC_2_GENE_SET = set(["DM", "DO", "DP", "DQ", "DR"])
def seq_to_str(obj):
"""
Given a sequence convert it to a comma separated string.
If, however, the argument is a single object, return its string
representation.
"""
if isinstance(obj, (unicode, str)):
return obj
elif isinstance(obj, (list, tuple)):
return ",".join([str(x) for x in obj])
else:
return str(obj)
def convert_str(obj):
"""
Given a string, convert it to an int or float if possible.
"""
if obj is None:
return obj
try:
try:
return int(obj)
except:
return float(obj)
except:
return str(obj)
def _parse_substring(hla, pred, max_len = None):
"""
Extract substring of letters for which predicate is True
"""
result = ""
pos = 0
if max_len is None:
max_len = len(hla)
else:
max_len = min(max_len, len(hla))
while pos < max_len and pred(hla[pos]):
result += hla[pos]
pos +=1
return result, hla[pos:]
def _parse_letters(hla, max_len = None):
return _parse_substring(hla, lambda c: c.isalpha(), max_len = max_len)
def _parse_numbers(hla, max_len = None):
return _parse_substring(hla, lambda c: c.isdigit(), max_len = max_len)
def _parse_not_numbers(hla, max_len = None):
return _parse_substring(hla, lambda c: not c.isdigit(), max_len = max_len)
def normalize_hla_allele_name(hla):
"""
HLA allele names can look like:
- HLA-A*03:02
- HLA-A02:03
- HLA-A:02:03
- HLA-A2
- A2
- A*03:02
- A02:02
- A:02:03
...should all be normalized to:
HLA-A*03:02
"""
original = hla
hla = hla.strip()
if hla.startswith("HLA-"):
hla = hla[4:]
# gene name is sequence of letters at start of HLA string
gene, hla = _parse_letters(hla) |
assert len(gene) > 0, "No HLA gene name given in %s" % original
assert len(hla) > 0, "Malformed HLA | type %s" % original
gene = gene.upper()
# skip initial separator
sep, hla = _parse_not_numbers(hla)
assert sep in ("", ":", "*"), \
"Malformed separator %s in HLA type %s" % (sep, original)
family, hla = _parse_numbers(hla, max_len = 2)
sep, hla = _parse_not_numbers(hla)
assert sep in ("", ":"), \
"Malformed separator %s in HLA type %s" % (sep, original)
allele, hla = _parse_numbers(hla)
assert len(hla) == 0, \
"Unexpected suffix %s in HLA type %s" % (hla, original)
if len(family) == 1:
family = "0" + family
if len(allele) == 0:
allele = "01"
elif len(allele) == 1:
allele = "0" + allele
return "HLA-%s*%s:%s" % (gene, family, allele )
def compact_hla_allele_name(hla):
long_name = normalize_hla_allele_name(hla)
# turn HLA-A*02:01 into A0201
return long_name[4:].replace("*", "").replace(":", "")
def mhc_class_from_normalized_allele_name(normalized_hla):
"""
Given a normalized HLA allele name, returns 1 or 2 (corresponding to the
MHC class).
Returns 1 for: HLA-A, HLA-B, HLA-C, HLA-E, HLA-F, HLA-G, HLA-K, HLA-L
Returns 2 for: HLA-DM, HLA-DO, HLA-DP, HLA-DQ, HLA-DR
"""
assert normalized_hla.startswith("HLA-") and all(
delim in normalized_hla for delim in set(["*", ":", "-"])), \
"Expected normalized HLA allele name, but received %s" % normalized_hla
gene_end_pos = normalized_hla.index("*")
gene = normalized_hla[4:gene_end_pos]
if gene in MHC_1_GENE_SET:
return 1
elif gene in MHC_2_GENE_SET:
return 2
raise ValueError(
"HLA gene %s is not a part of the MHC 1 or MHC 2 gene sets." % gene)
return 1
|
stvstnfrd/xblock-sdk | workbench/blocks.py | Python | apache-2.0 | 867 | 0.002307 | """An XBlock to use as a child when you don't care what child to show.
This code is in the Workbench layer.
"""
from web_fragments.fragment import Fragment
from xblock.core import XBlock
from .util import make_safe_for_html
class DebuggingChildBlock(XBlock):
"""A simple gray box, to use as a child placeholder."""
def fallback_view(self, view_name, context=None): # pylint: disable=W0613
"""Provides a fallback view handler"""
frag = Fragment("<div class='debug_child'>%s<br>%s</div>" % (make_safe_for_html(repr(self)), view_name))
frag.add_css("""
.debug_child {
background-color: grey;
width: 300px;
| height: 100px;
margin: 10px;
padding: 5px 10px;
font-si | ze: 75%;
}
""")
return frag
|
lordzfc/wyborySam2014 | stats/migrations/0002_auto_20150919_1109.py | Python | mit | 419 | 0.002387 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('stats | ', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='vote',
name='election',
field=models.ForeignKey(blank=True, to='stats.Election', null=True), |
),
]
|
marcardioid/DailyProgrammer | solutions/232_Easy/solution.py | Python | mit | 427 | 0.004684 | d | ef is_palindrome(data):
if isinstance(data, list):
data = ''.join(c.lower() for c in ''.join(data) if c.isalpha())
if isinstance(data, str):
return "Palindrome" if data == data[::-1] else "Not a palindrome"
else:
return "Invalid input"
if __name__ == "__main__":
with open("input/input4.txt", "r") as file:
num, *lines = file.read().splitlines()
print(is_palindrome | (lines)) |
coberger/DIRAC | FrameworkSystem/Service/PlottingHandler.py | Python | gpl-3.0 | 2,415 | 0.038509 | """ Plotting Service generates graphs according to the client specifications
and data
"""
__RCSID__ = "$Id$"
import os
import hashlib
from types import DictType, ListType
from DIRAC import S_OK, S_ERROR, rootPath, gConfig, gLogger, gMonitor
from DIRAC.ConfigurationSystem.Client import PathFinder
from DIRAC.Core.DISET.RequestHandler import RequestHandler
from DIRAC.FrameworkSystem.Service.PlotCache import gPlotCache
def initializePlottingHandler( serviceInfo ):
#Get data location
plottingSection = PathFinder.getServiceSection( "Framework/Plotting" )
dataPath = gConfig.getValue( "%s/DataLocation" % plottingSection, "data/graphs" )
dataPath = dataPath.strip()
if "/" != dataPath[0]:
dataPath = os.path.realpath( "%s/%s" % ( gConfig.getValue( '/LocalSite/InstancePath', rootPath ), dataPath ) )
gLogger.info( "Data will be written into %s" % dataPath )
try:
os.makedirs( dataPath )
except:
pass
try:
testFile = "%s/plot__.test" % dataPath
fd = file( testFile, "w" )
fd.close()
os.unlink( testFile )
except IOError:
gLogger.fatal( "Can't write to %s" % dataPath )
return S_ERROR( "Data location is not writable" )
gPlotCache.setPlotsLocation( dataPath )
gMonitor.registerActivity( "plotsDrawn", "Drawn plot images", "Plotting requests", "plots", gMonitor.OP_SUM )
return S_OK()
class PlottingHandler( RequestHandler ):
def __calculatePlotHash( self, data, metadata, subplotMetadata ):
m = hashlib.md5()
m.update( repr | ( {'Data':data, 'PlotMetadata':metadata, 'SubplotMetadata':subplotMetadata} ) )
return m.hexdigest()
types_generatePlot = [ [DictType, ListType], DictType ]
def export_generatePlot( self, data, plotMetadata, subplotMetadata = {} ):
""" Create a plot according to the client specification and return its name
"""
plotHash = self.__calculatePlotHash( data, plotMetadata, subplotMetadata )
result = gPlotCache.getPlot( plotHash, d | ata, plotMetadata, subplotMetadata )
if not result['OK']:
return result
return S_OK( result['Value']['plot'] )
def transfer_toClient( self, fileId, token, fileHelper ):
"""
Get graphs data
"""
retVal = gPlotCache.getPlotData( fileId )
if not retVal[ 'OK' ]:
return retVal
retVal = fileHelper.sendData( retVal[ 'Value' ] )
if not retVal[ 'OK' ]:
return retVal
fileHelper.sendEOF()
return S_OK()
|
cit563emef2dasdme/jklasjdf12nfasfdkl | scrape_google_scholar_from_bing.py | Python | mit | 1,084 | 0.001845 | import requests
from urllib.parse import parse_qs, urlparse
from lxml.html import fromstring
_HEADERS = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu Chromium/41.0.2272.76 Chrome/41.0.2272.76 Safari/537.36',
'accept': 'text/html,application/xhtml+xml,application/xml'
}
# get results from search
query = {"q": "site:scholar.google.com \"From Mechanism to Mouse\" "}
url = "https://cn.bing.com/search"
html = requests.get(url, headers=_HEADERS, params=query)
print(html.request.headers)
print(html.url)
print(html.content)
tree = fromstring(html.content)
results = tree.xpath(".//*[@id='b_results']/li/div[1]/h2/a") |
print(len(results))
# grab the first link
link = results[0].get('href')
print(lin | k)
# parse the destination url from the querystring
qs = urlparse(link).query
parsed_qs = parse_qs(qs)
print(parsed_qs)
print(parsed_qs.get('user', []))
# as one list
links = []
for result in results:
link = result.get('href')
qs = urlparse(link).query
links.extend(parse_qs(qs).get('user', []))
print(links)
|
kleientertainment/ds_mod_tools | pkg/win32/mod_tools/exported/validate.py | Python | mit | 854 | 0.0363 | import zipfile, sys, os, glob
import xml.etree.ElementTree as ET
from clint.textui import progress
from collections import defaultdict
anim_map = defaultdict( list )
for zipfilename in progress.bar( glob.glob( "*.zip" ) ):
try:
with zipfile.ZipFile( zipfilename, "r" ) as zf:
root = ET.fr | omstring( zf.read( "animation.xml" ) )
for anim in root.findall( "anim" ):
animname = anim.attrib[ 'name' ]
rootname = | anim.attrib[ 'root' ]
key = ( animname, rootname )
anim_map[ key ].append( zipfilename )
except:
pass
invalid = False
for key, datalist in anim_map.iteritems():
if len( datalist ) > 1:
print key
print datalist
print
invalid = True
if invalid:
sys.exit( 255 )
|
texastribune/ox-scale | ox_scale/apps/scale/signals.py | Python | apache-2.0 | 538 | 0 | from django.contrib.auth.models import Group
from django.contrib.auth.signals import user_logged_in
def setup_user(sender, request, user, **kwargs):
"""
Make sure all users are in a common group and can log into the admin.
This makes setting up permissions in the crud admin easier.
"""
if not user.is_staff:
group = Group.objects.get(name='users') # XXX magic constant
user.groups.add(group)
user.is_staff | = True
user.save()
# TODO l | og
user_logged_in.connect(setup_user)
|
blckshrk/Weboob | modules/caissedepargne/browser.py | Python | agpl-3.0 | 4,017 | 0.002738 | # -*- coding: utf-8 -*-
# Copyright(C) 2012 Romain Bignon
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <ht | tp://www.gnu.org/licenses/>.
from urlparse import urlsplit
from weboob.tools.browse | r import BaseBrowser, BrowserIncorrectPassword
from .pages import LoginPage, IndexPage, ErrorPage, UnavailablePage
__all__ = ['CaisseEpargne']
class CaisseEpargne(BaseBrowser):
DOMAIN = 'www.caisse-epargne.fr'
PROTOCOL = 'https'
CERTHASH = ['165faeb5bd1bad22bf52029e3c09bf540199402a1fa70aa19e9d5f92d562ff69', 'dfff27d6db1fcdf1cea3ab8e3c1ca4f97c971262e95be49f3385b40c97fe640c']
PAGES = {'https://[^/]+.caisse-epargne.fr/particuliers/ind_pauthpopup.aspx.*': LoginPage,
'https://[^/]+.caisse-epargne.fr/Portail.aspx': IndexPage,
'https://[^/]+.caisse-epargne.fr/login.aspx': ErrorPage,
'https://[^/]+.caisse-epargne.fr/Pages/logout.aspx.*': ErrorPage,
'https://[^/]+.caisse-epargne.fr/page_hs_dei_.*.aspx': UnavailablePage,
}
def __init__(self, nuser, *args, **kwargs):
self.nuser = nuser
BaseBrowser.__init__(self, *args, **kwargs)
def is_logged(self):
return self.page is not None and not self.is_on_page((LoginPage,ErrorPage))
def home(self):
if self.is_logged():
self.location(self.buildurl('/Portail.aspx'))
else:
self.login()
def login(self):
"""
Attempt to log in.
Note: this method does nothing if we are already logged in.
"""
assert isinstance(self.username, basestring)
assert isinstance(self.password, basestring)
if self.is_logged():
return
self._ua_handlers['_cookies'].cookiejar.clear()
if not self.is_on_page(LoginPage):
self.location('https://www.caisse-epargne.fr/particuliers/ind_pauthpopup.aspx?mar=101®=&fctpopup=auth&cv=0', no_login=True)
self.page.login(self.username)
if not self.page.login2(self.nuser, self.password):
# perso
self.page.login3(self.password)
if not self.is_logged():
raise BrowserIncorrectPassword()
v = urlsplit(self.page.url)
self.DOMAIN = v.netloc
def get_accounts_list(self):
if self.is_on_page(IndexPage):
self.page.go_list()
else:
self.location(self.buildurl('/Portail.aspx'))
return self.page.get_list()
def get_account(self, id):
assert isinstance(id, basestring)
l = self.get_accounts_list()
for a in l:
if a.id == id:
return a
return None
def _get_history(self, info):
if self.is_on_page(IndexPage):
self.page.go_list()
else:
self.location(self.buildurl('/Portail.aspx'))
self.page.go_history(info)
while True:
assert self.is_on_page(IndexPage)
for tr in self.page.get_history():
yield tr
if not self.page.go_next():
return
def get_history(self, account):
return self._get_history(account._info)
def get_coming(self, account):
for info in account._card_links:
for tr in self._get_history(info):
yield tr
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.