repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
ardi69/pyload-0.4.10 | pyload/plugin/hoster/LetitbitNet.py | Python | gpl-3.0 | 4,139 | 0.003866 | # -*- coding: utf-8 -*-
#
# API Documentation:
# http://api.letitbit.net/reg/static/api.pdf
#
# Test links:
# http://letitbit.net/download/07874.0b5709a7d3beee2408bb1f2eefce/random.bin.html
import re
import urlparse
from pyload.utils import json_loads, json_dumps
from pyload.network.RequestFactory import getURL
from pyload.plugin.captcha.ReCaptcha import ReCaptcha
from pyload.plugin.internal.SimpleHoster import SimpleHoster, secondsToMidnight
def api_response(url):
json_data = ["yw7XQy2v9", ["download/info", {"link": url}]]
api_rep = getURL("http://api.letitbit.net/json",
post={'r': json_dumps(json_data)})
return json_loads(api_rep)
def getInfo(urls):
for url in urls:
api_rep = api_ | response(url)
if api_rep['status'] == 'OK':
info = api_rep['data'][0]
yield (info['name'], info['size'], 2, url)
else:
yield (url, 0, 1, url)
class LetitbitNet(SimpleHoster):
__name = "LetitbitNet"
__type = "hoster"
__version | = "0.30"
__pattern = r'https?://(?:www\.)?(letitbit|shareflare)\.net/download/.+'
__config = [("use_premium", "bool", "Use premium account if available", True)]
__description = """Letitbit.net hoster plugin"""
__license = "GPLv3"
__authors = [("zoidberg", "zoidberg@mujmail.cz"),
("z00nx", "z00nx0@gmail.com")]
URL_REPLACEMENTS = [(r"(?<=http://)([^/]+)", "letitbit.net")]
SECONDS_PATTERN = r'seconds\s*=\s*(\d+);'
CAPTCHA_CONTROL_FIELD = r'recaptcha_control_field\s=\s\'(.+?)\''
def setup(self):
self.resumeDownload = True
def handle_free(self, pyfile):
action, inputs = self.parseHtmlForm('id="ifree_form"')
if not action:
self.error(_("ifree_form"))
pyfile.size = float(inputs['sssize'])
self.logDebug(action, inputs)
inputs['desc'] = ""
self.html = self.load(urlparse.urljoin("http://letitbit.net/", action), post=inputs)
m = re.search(self.SECONDS_PATTERN, self.html)
seconds = int(m.group(1)) if m else 60
self.logDebug("Seconds found", seconds)
m = re.search(self.CAPTCHA_CONTROL_FIELD, self.html)
recaptcha_control_field = m.group(1)
self.logDebug("ReCaptcha control field found", recaptcha_control_field)
self.wait(seconds)
res = self.load("http://letitbit.net/ajax/download3.php", post=" ")
if res != '1':
self.error(_("Unknown response - ajax_check_url"))
self.logDebug(res)
recaptcha = ReCaptcha(self)
response, challenge = recaptcha.challenge()
post_data = {"recaptcha_challenge_field": challenge,
"recaptcha_response_field": response,
"recaptcha_control_field": recaptcha_control_field}
self.logDebug("Post data to send", post_data)
res = self.load("http://letitbit.net/ajax/check_recaptcha.php", post=post_data)
self.logDebug(res)
if not res:
self.invalidCaptcha()
if res == "error_free_download_blocked":
self.logWarning(_("Daily limit reached"))
self.wait(secondsToMidnight(gmt=2), True)
if res == "error_wrong_captcha":
self.invalidCaptcha()
self.retry()
elif res.startswith('['):
urls = json_loads(res)
elif res.startswith('http://'):
urls = [res]
else:
self.error(_("Unknown response - captcha check"))
self.link = urls[0]
def handle_premium(self, pyfile):
api_key = self.user
premium_key = self.account.getAccountData(self.user)['password']
json_data = [api_key, ["download/direct_links", {"pass": premium_key, "link": pyfile.url}]]
api_rep = self.load('http://api.letitbit.net/json', post={'r': json_dumps(json_data)})
self.logDebug("API Data: " + api_rep)
api_rep = json_loads(api_rep)
if api_rep['status'] == 'FAIL':
self.fail(api_rep['data'])
self.link = api_rep['data'][0][0]
|
xifle/home-assistant | tests/components/test_rfxtrx.py | Python | mit | 5,523 | 0 | """The tests for the Rfxtrx component."""
# pylint: disable=protected-access
import unittest
import pytest
from homeassistant.core import callback
from homeassistant.bootstrap import setup_component
from homeassistant.components import rfxtrx as rfxtrx
from tests.common import get_test_home_assistant
@pytest.mark.skipif("os.environ.get('RFXTRX') != 'RUN'")
class TestRFXTRX(unittest.TestCase):
"""Test the Rfxtrx component."""
def setUp(self):
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
def tearDown(self):
"""Stop everything that was started."""
rfxtrx.RECEIVED_EVT_SUBSCRIBERS = []
rfxtrx.RFX_DEVICES = {}
if rfxtrx.RFXOBJECT:
rfxtrx.RFXOBJECT.close_connection()
self.hass.stop()
def test_default_config(self):
"""Test configuration."""
self.assertTrue(setup_component(self.hass, 'rfxtrx', {
'rfxtrx': {
'device': '/dev/serial/by-id/usb' +
'-RFXCOM_RFXtrx433_A1Y0NJGR-if00-port0',
'dummy': True}
}))
self.assertTrue(setup_component(self.hass, 'sensor', {
'sensor': {'platform': 'rfxtrx',
'automatic_add': True,
'devices': {}}}))
self.assertEqual(len(rfxtrx.RFXOBJECT.sensors()), 2)
def test_valid_config(self):
"""Test configuration."""
self.assertTrue(setup_component(self.hass, 'rfxtrx', {
'rfxtrx': {
'device': '/dev/serial/by-id/usb' +
'-RFXCOM_RFXtrx433_A1Y0NJGR-if00-port0',
'dummy': True}}))
self.hass.config.components.remove('rfxtrx')
self.assertTrue(setup_component(self.hass, 'rfxtrx', {
'rfxtrx': {
'device': '/dev/serial/by-id/usb' +
'-RFXCOM_RFXtrx433_A1Y0NJGR-if00-port0',
'dummy': True,
'debug': True}}))
def test_in | valid_config(self):
"""Test configuration."""
self.assertFalse(setup_component(self.hass, 'rfxtrx', {
'rfxtrx': {}
}))
self.assertFalse(setup_component(self.hass, 'rfxtrx', {
'rfxtrx': {
'device': '/dev/serial/by-id/usb' + |
'-RFXCOM_RFXtrx433_A1Y0NJGR-if00-port0',
'invalid_key': True}}))
def test_fire_event(self):
"""Test fire event."""
self.assertTrue(setup_component(self.hass, 'rfxtrx', {
'rfxtrx': {
'device': '/dev/serial/by-id/usb' +
'-RFXCOM_RFXtrx433_A1Y0NJGR-if00-port0',
'dummy': True}
}))
self.assertTrue(setup_component(self.hass, 'switch', {
'switch': {'platform': 'rfxtrx',
'automatic_add': True,
'devices':
{'0b1100cd0213c7f210010f51': {
'name': 'Test',
rfxtrx.ATTR_FIREEVENT: True}
}}}))
calls = []
@callback
def record_event(event):
"""Add recorded event to set."""
calls.append(event)
self.hass.bus.listen(rfxtrx.EVENT_BUTTON_PRESSED, record_event)
self.hass.block_till_done()
entity = rfxtrx.RFX_DEVICES['213c7f216']
self.assertEqual('Test', entity.name)
self.assertEqual('off', entity.state)
self.assertTrue(entity.should_fire_event)
event = rfxtrx.get_rfx_object('0b1100cd0213c7f210010f51')
event.data = bytearray([0x0b, 0x11, 0x00, 0x10, 0x01, 0x18,
0xcd, 0xea, 0x01, 0x01, 0x0f, 0x70])
rfxtrx.RECEIVED_EVT_SUBSCRIBERS[0](event)
self.hass.block_till_done()
self.assertEqual(event.values['Command'], "On")
self.assertEqual('on', entity.state)
self.assertEqual(self.hass.states.get('switch.test').state, 'on')
self.assertEqual(1, len(calls))
self.assertEqual(calls[0].data,
{'entity_id': 'switch.test', 'state': 'on'})
def test_fire_event_sensor(self):
"""Test fire event."""
self.assertTrue(setup_component(self.hass, 'rfxtrx', {
'rfxtrx': {
'device': '/dev/serial/by-id/usb' +
'-RFXCOM_RFXtrx433_A1Y0NJGR-if00-port0',
'dummy': True}
}))
self.assertTrue(setup_component(self.hass, 'sensor', {
'sensor': {'platform': 'rfxtrx',
'automatic_add': True,
'devices':
{'0a520802060100ff0e0269': {
'name': 'Test',
rfxtrx.ATTR_FIREEVENT: True}
}}}))
calls = []
@callback
def record_event(event):
"""Add recorded event to set."""
calls.append(event)
self.hass.bus.listen("signal_received", record_event)
self.hass.block_till_done()
event = rfxtrx.get_rfx_object('0a520802060101ff0f0269')
event.data = bytearray(b'\nR\x08\x01\x07\x01\x00\xb8\x1b\x02y')
rfxtrx.RECEIVED_EVT_SUBSCRIBERS[0](event)
self.hass.block_till_done()
self.assertEqual(1, len(calls))
self.assertEqual(calls[0].data,
{'entity_id': 'sensor.test'})
|
javnik36/ZTMtoGTFS | bugs_checker_osm.py | Python | mit | 547 | 0.005484 | import re
osm | = open("stops.txt", 'r', encoding="utf-8")
bugs = open("BAD-STOPS.txt", 'r', encoding="u | tf-8")
still = open("BUGS-NOT-IN-OSM.txt", 'w')
bugi = []
for line in bugs:
line = line.split(' ')
bugi.append(line[0])
print(len(bugi))
for line in osm:
line = line.split(',')
if line[0].isnumeric():
stop_nr = line[0]
if stop_nr in bugi:
bugi.remove(stop_nr)
for item in bugi:
still.write(item)
still.write("\n")
osm.close()
bugs.close()
still.close()
print(len(bugi))
|
marshmallow-code/marshmallow-jsonapi | tests/conftest.py | Python | mit | 1,521 | 0 | import pytest
from tests.base import Author, Post, Comment, Keyword, fake
def make_author():
return Author(
id=fake.random_int(),
first_name=fake.first_name(),
last_name=fake.last_name(),
twitter=fake.domain_word(),
)
def make_post(with_comments=True, with_author=True, with_keywords=True):
comments = [make_comment() for _ in range(2)] if with_comments else []
keywords = [make_keyword() for _ in range(3)] if with_keywords else []
author = make_author() if with_author else None
return Post(
id=fake.random_int(),
title=fake.catch_phrase(),
author=author,
author_id=author.id if with_author else No | ne,
comments=comments,
keywords=keywords,
)
def make_comment(with_author=True):
author = make_author() if with_au | thor else None
return Comment(id=fake.random_int(), body=fake.bs(), author=author)
def make_keyword():
return Keyword(keyword=fake.domain_word())
@pytest.fixture()
def author():
return make_author()
@pytest.fixture()
def authors():
return [make_author() for _ in range(3)]
@pytest.fixture()
def comments():
return [make_comment() for _ in range(3)]
@pytest.fixture()
def post():
return make_post()
@pytest.fixture()
def post_with_null_comment():
return make_post(with_comments=False)
@pytest.fixture()
def post_with_null_author():
return make_post(with_author=False)
@pytest.fixture()
def posts():
return [make_post() for _ in range(3)]
|
jyejare/robottelo | pytest_plugins/testimony_markers.py | Python | gpl-3.0 | 5,447 | 0.002937 | import inspect
import re
import pytest
from robottelo.logging import collection_logger as logger
IMPORTANCE_LEVELS = []
def pytest_addoption(parser):
"""Add CLI options related to Testimony token based mark collection"""
parser.addoption(
'--importance',
help='Comma separated list of importance levels to include in test collection',
)
parser.addoption(
'--compone | nt',
help='Comma separated list of component names to include in test collection',
)
parser.addoption(
'--assignee',
help='Comma separated list of assignees to include in test collection',
)
def pytest_configure(config):
"""Register markers related to testimony tokens"""
for marker in [
'importance: CaseImportance tes | timony token, use --importance to filter',
'component: Component testimony token, use --component to filter',
'assignee: Assignee testimony token, use --assignee to filter',
]:
config.addinivalue_line("markers", marker)
component_regex = re.compile(
# To match :CaseComponent: FooBar
r'\s*:CaseComponent:\s*(?P<component>\S*)',
re.IGNORECASE,
)
importance_regex = re.compile(
# To match :CaseImportance: Critical
r'\s*:CaseImportance:\s*(?P<importance>\S*)',
re.IGNORECASE,
)
assignee_regex = re.compile(
# To match :Assignee: jsmith
r'\s*:Assignee:\s*(?P<assignee>\S*)',
re.IGNORECASE,
)
@pytest.hookimpl(tryfirst=True)
def pytest_collection_modifyitems(session, items, config):
"""Add markers for testimony tokens"""
# split the option string and handle no option, single option, multiple
# config.getoption(default) doesn't work like you think it does, hence or ''
importance = [i for i in (config.getoption('importance') or '').split(',') if i != '']
component = [c for c in (config.getoption('component') or '').split(',') if c != '']
assignee = [a for a in (config.getoption('assignee') or '').split(',') if a != '']
selected = []
deselected = []
logger.info('Processing test items to add testimony token markers')
for item in items:
if item.nodeid.startswith('tests/robottelo/'):
# Unit test, no testimony markers
continue
# apply the marks for importance, component, and assignee
# Find matches from docstrings starting at smallest scope
item_docstrings = [
d
for d in map(inspect.getdoc, (item.function, getattr(item, 'cls', None), item.module))
if d is not None
]
item_mark_names = [m.name for m in item.iter_markers()]
for docstring in item_docstrings:
# Add marker starting at smallest docstring scope
# only add the mark if it hasn't already been applied at a lower scope
doc_component = component_regex.findall(docstring)
if doc_component and 'component' not in item_mark_names:
item.add_marker(pytest.mark.component(doc_component[0]))
doc_importance = importance_regex.findall(docstring)
if doc_importance and 'importance' not in item_mark_names:
item.add_marker(pytest.mark.importance(doc_importance[0]))
doc_assignee = assignee_regex.findall(docstring)
if doc_assignee and 'assignee' not in item_mark_names:
item.add_marker(pytest.mark.assignee(doc_assignee[0]))
# exit early if no filters were passed
if importance or component or assignee:
# Filter test collection based on CLI options for filtering
# filters should be applied together
# such that --component Repository --importance Critical --assignee jsmith
# only collects tests which have all three of these marks
# https://github.com/pytest-dev/pytest/issues/1373 Will make this way easier
# testimony requires both importance and component, this will blow up if its forgotten
importance_marker = item.get_closest_marker('importance').args[0]
if importance and importance_marker not in importance:
logger.debug(
f'Deselected test {item.nodeid} due to "--importance {importance}",'
f'test has importance mark: {importance_marker}'
)
deselected.append(item)
continue
component_marker = item.get_closest_marker('component').args[0]
if component and component_marker not in component:
logger.debug(
f'Deselected test {item.nodeid} due to "--component {component}",'
f'test has component mark: {component_marker}'
)
deselected.append(item)
continue
assignee_marker = item.get_closest_marker('assignee').args[0]
if assignee and assignee_marker not in assignee:
logger.debug(
f'Deselected test {item.nodeid} due to "--assignee {assignee}",'
f'test has assignee mark: {assignee_marker}'
)
deselected.append(item)
continue
selected.append(item)
# selected will be empty if no filter option was passed, defaulting to full items list
items[:] = selected if deselected else items
config.hook.pytest_deselected(items=deselected)
|
RPGOne/Skynet | scikit-learn-0.18.1/examples/neural_networks/plot_mlp_training_curves.py | Python | bsd-3-clause | 3,692 | 0.001354 | """
========================================================
Compare Stochastic learning strategies for MLPClassifier
========================================================
This example visualizes some training loss curves for different stochastic
learning strategies, including SGD and Adam. Because of time-constraints, we
use several small datasets, for which L-BFGS might be more suitable. The
general trend shown in these examples seems to carry over to larger datasets,
however.
Note that those results can be highly dependent on the value of
``learning_rate_init``.
"""
print(__doc__)
import matplotlib.pyplot as plt
from sklearn.neural_network import MLPClassifier
from sklearn.preprocessing import MinMaxScaler
from sklearn import datasets
# different learning rate schedules and momentum parameters
params = [{'solver': 'sgd', 'learning_rate': 'constant', 'momentum': 0,
'learning_rate_init': 0.2},
{'solver': 'sgd', 'learning_rate': 'constant', 'momentum': .9,
'nesterovs_momentum': False, 'learning_rate_init': 0.2},
{'solver': 'sgd', 'learning_rate': 'constant', 'momentum': .9,
'nesterovs_momentum': True, 'learning_rate_init': 0.2},
{'solver': 'sgd', 'learning_rate': 'invscaling', 'momentum': 0,
'learning_rate_init': 0.2},
{'solver': 'sgd', 'learning_rate': 'invscaling', 'momentum': .9,
'nesterovs_momentum': True, 'learning_rate_init': 0.2},
{'solver': 'sgd', 'learning_rate': 'invscaling', 'momentum': .9,
'nesterovs_momentum': False, 'learning_rate_init': 0.2},
{'solver': 'adam', 'learning_rate_init': 0.01}]
labels = ["constant learning-rate", "constant with momentum",
"constant with Nesterov's momentum",
"inv-scaling lear | ning-rate", "inv-scaling with momentum",
"inv-scaling with Nesterov's momentum", "adam"]
plot_args = [{'c': 'red', 'linestyle': '-'},
{'c': 'green', 'linestyle': '-'},
{'c': 'blue', 'linestyle': '-'},
{'c': 'red', 'linestyle': '--'},
{'c': 'green', ' | linestyle': '--'},
{'c': 'blue', 'linestyle': '--'},
{'c': 'black', 'linestyle': '-'}]
def plot_on_dataset(X, y, ax, name):
# for each dataset, plot learning for each learning strategy
print("\nlearning on dataset %s" % name)
ax.set_title(name)
X = MinMaxScaler().fit_transform(X)
mlps = []
if name == "digits":
# digits is larger but converges fairly quickly
max_iter = 15
else:
max_iter = 400
for label, param in zip(labels, params):
print("training: %s" % label)
mlp = MLPClassifier(verbose=0, random_state=0,
max_iter=max_iter, **param)
mlp.fit(X, y)
mlps.append(mlp)
print("Training set score: %f" % mlp.score(X, y))
print("Training set loss: %f" % mlp.loss_)
for mlp, label, args in zip(mlps, labels, plot_args):
ax.plot(mlp.loss_curve_, label=label, **args)
fig, axes = plt.subplots(2, 2, figsize=(15, 10))
# load / generate some toy datasets
iris = datasets.load_iris()
digits = datasets.load_digits()
data_sets = [(iris.data, iris.target),
(digits.data, digits.target),
datasets.make_circles(noise=0.2, factor=0.5, random_state=1),
datasets.make_moons(noise=0.3, random_state=0)]
for ax, data, name in zip(axes.ravel(), data_sets, ['iris', 'digits',
'circles', 'moons']):
plot_on_dataset(*data, ax=ax, name=name)
fig.legend(ax.get_lines(), labels=labels, ncol=3, loc="upper center")
plt.show()
|
mbayon/TFG-MachineLearning | venv/lib/python3.6/site-packages/scipy/misc/tests/test_doccer.py | Python | mit | 3,171 | 0.002523 | ''' Some tests for the documenting decorator and support functions '''
from __future__ import division, print_function, absolute_import
import sys
import pytest
from numpy.testing import assert_equal
from scipy.misc import doccer
# python -OO strips docstrings
DOCSTRINGS_STRIPPED = sys.flags.optimize > 1
docstring = \
"""Docstring
%(strtest1)s
%(strtest2)s
%(strtest3)s
"""
param_doc1 = \
"""Another test
with some indent"""
param_doc2 = \
"""Another test, one line"""
param_doc3 = \
""" Another test
with some indent"""
doc_dict = {'strtest1':param_doc1,
'strtest2':param_doc2,
'strtest3':param_doc3}
filled_docstring = \
"""Docstring
Another test
with some indent
Another test, one line
Another test
with some indent
"""
def test_unindent():
assert_equal(doccer.unindent_string(param_doc1), param_doc1)
assert_equal(doccer.unindent_string(param_doc2), param_doc2)
assert_equal(doccer.unindent_string(param_doc3), param_doc1)
def test_unindent_dict():
d2 = doccer.unindent_dict(doc_dict)
assert_equal(d2['strtest1'], doc_dict['strtest1'])
assert_equal(d2['strtest2'], doc_dict['strtest2'])
assert_equal(d2['strtest3'], doc_dict['strtest1'])
def test_docformat():
udd = doccer.unindent_dict(doc_dict)
formatted = doccer.docformat(docstring, udd)
assert_equal(formatted, filled_docstring)
single_doc = 'Single line doc %(strtest1)s'
formatted = doccer.docformat(single_doc, doc_dict)
# Note - initial indent of format string does not
# affect subsequent indent of inserted parameter
assert_equal(formatted, """Single line doc Another test
with some indent""")
@pytest.mark.skipif(DOCSTRINGS_STRIPPED, reason="docstrings stripped")
def test_decorator():
# with unindentation of parameters
decorator = doccer.filldoc(doc_dict, True)
@decorator
def func():
""" Docstring
%(strtest3)s
"""
assert_equal(func.__doc__, """ Docstring
Another test
with some indent
""")
# without unindentation of parameters
decorator = doccer.filldoc(doc_dict, False)
@decorator
def func():
""" Docstring
%(strtest3)s
"""
assert_equal(func.__doc__, """ Docstring
Another test
with some indent
""")
@pytest.mark.skipif(DOCSTRINGS_STRIPPED, reason="docstrings stripped")
def test_inherit_docstring_from():
class Foo(object):
def func(self):
'''Do something useful.'''
return
def func2(self):
'''Something else.'''
class Bar(Foo):
@doccer.inherit_docstring_from(Foo)
| def func(self):
'''%(super)sABC'''
return
@doccer.inherit_docstring_from(Foo)
def func2(self):
# No docstring.
return
assert_equal(Bar.func.__doc__, Foo.func.__doc__ + 'ABC')
assert_equal(Bar.func2.__doc__, Foo.func2.__doc_ | _)
bar = Bar()
assert_equal(bar.func.__doc__, Foo.func.__doc__ + 'ABC')
assert_equal(bar.func2.__doc__, Foo.func2.__doc__)
|
nicfit/vexmpp | vexmpp/stream.py | Python | mit | 11,877 | 0.000168 | import os
import asyncio
import logging
from functools import partial
from collections import deque
from lxml import etree
from . import stanzas
from .stanzas import Iq
from .parser import Parser
from .utils import signalEvent
from .utils import benchmark as timedWait
from . import getLogger
log = getLogger(__name__)
if "VEX_TIMED_WAITS" in os.environ and int(os.environ["VEX_TIMED_WAITS"]):
from .metrics import ValueMet | ric
stream_wait_met = ValueMetric("stream:wait_time", type_=float)
else:
stream_wait_met = None
_ENFORCE_TIMEOUTS = bool("VEX_ENFORCE_TIMEOUTS" in os.environ and
int(os.environ["VEX_ENFORCE_TIMEOUTS"]))
class QueuedStanza:
def __init__(self, s):
self.task_set = set()
self.stanza = s
class ParserTask(asyncio.Task):
def __init__(self, stream, loop=None):
| super().__init__(self._run(), loop=loop)
self._parser = Parser()
self._data_queue = asyncio.Queue()
self._stream = stream
def parse(self, bytes_):
self._data_queue.put_nowait(bytes_)
def reset(self):
self._parser.reset()
async def _run(self):
while True:
try:
data = await self._data_queue.get()
elems = self._parser.parse(data)
for e in elems:
stanza = stanzas.makeStanza(e)
if log.getEffectiveLevel() <= logging.VERBOSE:
log.verbose("[STANZA IN]:\n%s" %
stanza.toXml(pprint=True).decode("utf-8"))
await self._stream._handleStanza(stanza)
except asyncio.CancelledError:
pass
except Exception as ex:
log.exception(ex)
class Stream(asyncio.Protocol):
"""Base class for XMPP streams."""
def __init__(self, creds, state_callbacks=None, mixins=None,
default_timeout=None):
self.creds = creds
self._transport = None
self._waiter_futures = []
self._tls_active = False
self._callbacks = state_callbacks
self._mixins = mixins or []
for mixin in self._mixins:
for name, obj in mixin._exports:
if name in self.__dict__:
raise ValueError("Mixin '%s' exports ambiguous "
"data named '%s'" % (str(mixin), name))
else:
# Add the symbol to the stream's namespace
self.__dict__[name] = obj
self._parser_task = ParserTask(self)
self.default_timeout = default_timeout
# Stream errors
self.error = None
self._stanza_queue = deque(maxlen=10)
@property
def connected(self):
if not self._transport:
return False
else:
if (getattr(self._transport, "_closing") and
self._transport._closing):
# SSL transport
return False
return True
@property
def tls_active(self):
return self._tls_active
@property
def jid(self):
return self.creds.jid
def close(self):
if self.connected:
self.send(b"</stream:stream>")
self._transport.close()
self._parser_task.cancel()
def send(self, data):
"""Send ``data`` which can be a vexmpp.stanza.Stanza,
lxml.etree.Element, a str, or bytes. The the case of bytes the
encoding MUST be utf-8 encoded (per XMPP specification).
In the case of Stanza and Element the Mixin.onSend callback is
invoked. Currently there is not a Mixin callback for strings or bytes.
"""
def _send(bytes_):
if not self._transport:
log.warn("Data send with disconnected transport")
return
self._transport.write(bytes_)
log.debug("[BYTES OUT]: %s", bytes_)
stanza = None
if isinstance(data, stanzas.Stanza):
stanza = data
raw_data = data.toXml()
elif isinstance(data, str):
raw_data = data.encode("utf-8")
elif isinstance(data, etree._Element):
stanza = stanzas.Stanza(xml=data)
raw_data = etree.tostring(data, encoding="utf-8")
elif isinstance(data, bytes):
raw_data = data
else:
raise ValueError("Unable to send type {}".format(type(data)))
if stanza and log.getEffectiveLevel() <= logging.VERBOSE:
log.verbose("[STANZA OUT]:\n%s" %
stanza.toXml(pprint=True).decode("utf-8"))
_send(raw_data)
if stanza:
for m in self._mixins:
hook = partial(m.onSend, self, stanza)
asyncio.ensure_future(self._runMixin(hook))
async def sendAndWaitIq(self, child_ns, to=None, child_name="query",
type="get", raise_on_error=False, timeout=None,
id_prefix=None):
iq = Iq(to=to, type=type, request=(child_name, child_ns),
id_prefix=id_prefix)
resp = await self.sendAndWait(iq, raise_on_error=raise_on_error,
timeout=timeout)
return resp
async def sendAndWait(self, stanza, raise_on_error=False, timeout=None):
if not stanza.id:
stanza.setId()
xpath = "/%s[@id='%s']" % (stanza.name, stanza.id)
self.send(stanza)
resp = await self.wait([(xpath, None)], timeout=timeout)
if resp.error is not None and raise_on_error:
raise resp.error
else:
return resp
async def negotiate(self, timeout=None):
raise NotImplementedError()
async def wait(self, xpaths, timeout=None):
"""``xpaths`` is a 2-tuple of the form (xpath, nsmap), or a list of
the same tuples to wait on a choice of matches. The first matched
stanza is returned. Passing a ``timeout`` argument will raise a
asyncio.TimeoutError if not matches are found."""
global stream_wait_met
if not isinstance(xpaths, list):
xpaths = [xpaths]
if timeout is None and self.default_timeout:
timeout = self.default_timeout
log.debug("Stream wait for %s [timeout=%s]" % (xpaths, timeout))
if _ENFORCE_TIMEOUTS and not timeout:
raise RuntimeError("Timeout not set error")
fut = _StreamWaitFuture(xpaths)
# Run thru queue. Note, once a tasklet has seen a stanza it is skipped
# by _StreamWaitFuture.matchStanza
for queued_stanza in self._stanza_queue:
matched = fut.matchStanza(queued_stanza)
if matched:
return queued_stanza.stanza
self._waiter_futures.append(fut)
try:
with timedWait() as timer_stat:
match = await asyncio.wait_for(fut, timeout)
if stream_wait_met:
stream_wait_met.update(timer_stat["total"])
log.debug("Stream wait - time: {:.3f} "
"min/max/avg: {:.6f}/{:.6f}/{:.6f}"
.format(stream_wait_met.value,
stream_wait_met.min, stream_wait_met.max,
stream_wait_met.average))
return match
except asyncio.TimeoutError as ex:
raise asyncio.TimeoutError(
"Timeout ({}s) while waiting for xpaths: {}"
.format(timeout, xpaths)) from ex
finally:
self._waiter_futures.remove(fut)
# asyncio.Protocol implementation
def connection_made(self, transport, tls=False):
log.debug("Connection_made: %s", transport)
self._transport = transport
self._tls_active = tls
signalEvent(self._callbacks, "connected", self, tls)
def starttls_made(self, transport):
self.connection_made(transport, tls=True)
async def _handleStanza(self, stanza):
if isinstance(stanza, stanzas.StreamError):
sig |
opennode/nodeconductor-assembly-waldur | src/waldur_mastermind/support/models.py | Python | mit | 14,894 | 0.00141 | import logging
import re
from django.conf import settings
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django_fsm import FSMIntegerField
from model_utils import FieldTracker
from model_utils.models import TimeStampedModel
from waldur_core.core import models as core_models
from waldur_core.core.validators import validate_name, validate_template_syntax
from waldur_core.structure import models as structure_models
from . import managers
logger = logging.getLogger(__name__)
class Issue(
core_models.UuidMixin,
structure_models.StructureLoggableMixin,
core_models.BackendModelMixin,
TimeStampedModel,
core_models.StateMixin,
):
class Meta:
ordering = ['-created']
class Permissions:
customer_path = 'customer'
project_path = 'project'
backend_id = models.CharField(max_length=255, blank=True, null=True, unique=True)
key = models.CharField(max_length=255, blank=True)
type = models.CharField(max_length=255)
link = models.URLField(
max_length=255, help_text=_('Link to issue in support system.'), blank=True
)
summary = models.CharField(max_length=255)
description = models.TextField(blank=True)
deadline = models.DateTimeField(blank=True, null=True)
impact = models.CharField(max_length=255, blank=True)
status = models.CharField(max_length=255)
resolution = models.CharField(max_length=255, blank=True)
priority = models.CharField(max_length=255, blank=True)
caller = models.ForeignKey(
settings.AUTH_USER_MODEL,
related_name='created_issues',
blank=True,
null=True,
help_text=_('Waldur user who has reported the issue.'),
on_delete=models.SET_NULL,
)
reporter = models.ForeignKey(
'SupportUser',
related_name='reported_issues',
blank=True,
null=True,
help_text=_(
'Help desk user who have created the issue that is reported by caller.'
),
on_delete=models.PROTECT,
)
assignee = models.ForeignKey(
'SupportUser',
related_name='issues',
blank=True,
null=True,
help_text=_('Help desk user who will implement the issue'),
on_delete=models.PROTECT,
)
customer = models.ForeignKey(
structure_models.Customer,
verbose_name=_('organization'),
related_name='issues',
blank=True,
null=True,
on_delete=models.CASCADE,
)
project = models.ForeignKey(
structure_models.Project,
related_name='issues',
blank=True,
null=True,
on_delete=models.CASCADE,
)
resource_content_type = models.ForeignKey(
on_delete=models.CASCADE, to=ContentType, null=True
)
resource_object_id = models.PositiveIntegerField(null=True)
resource = GenericForeignKey('resource_content_type', 'resource_object_id')
first_response_sla = models.DateTimeField(blank=True, null=True)
resolution_date = models.DateTimeField | (blank=True, null=True)
template = models.ForeignKey(
'Template',
related_name='issues',
blank=True,
null= | True,
on_delete=models.PROTECT,
)
feedback_request = models.BooleanField(
blank=True,
default=True,
help_text='Request feedback from the issue creator after resolution of the issue',
)
tracker = FieldTracker()
def get_description(self):
return self.description
@classmethod
def get_url_name(cls):
return 'support-issue'
@classmethod
def get_backend_fields(cls):
return super(Issue, cls).get_backend_fields() + (
'backend_id',
'key',
'type',
'link',
'summary',
'description',
'deadline',
'impact',
'status',
'resolution',
'priority',
'caller',
'reporter',
'assignee',
'customer',
'project',
'resource',
'first_response_sla',
)
def get_log_fields(self):
return (
'uuid',
'type',
'key',
'status',
'link',
'summary',
'reporter',
'caller',
'customer',
'project',
'resource',
)
@property
def resolved(self):
return IssueStatus.check_success_status(self.status)
def set_resolved(self):
self.status = (
IssueStatus.objects.filter(type=IssueStatus.Types.RESOLVED).first().name
)
self.state = Issue.States.OK
self.save()
def set_canceled(self):
self.status = (
IssueStatus.objects.filter(type=IssueStatus.Types.CANCELED).first().name
)
self.state = Issue.States.OK
self.save()
def __str__(self):
return '{}: {}'.format(self.key or '???', self.summary)
class Priority(
core_models.NameMixin, core_models.UuidMixin, core_models.UiDescribableMixin
):
backend_id = models.CharField(max_length=255, blank=True)
class Meta:
verbose_name = _('Priority')
verbose_name_plural = _('Priorities')
@classmethod
def get_url_name(cls):
return 'support-priority'
def __str__(self):
return self.name
class SupportUser(core_models.UuidMixin, core_models.NameMixin, models.Model):
class Meta:
ordering = ['name']
user = models.ForeignKey(
on_delete=models.CASCADE,
to=settings.AUTH_USER_MODEL,
related_name='+',
blank=True,
null=True,
)
backend_id = models.CharField(max_length=255, blank=True, null=True, unique=True)
is_active = models.BooleanField(
_('active'),
default=True,
help_text=_(
'Designates whether this user should be treated as '
'active. Unselect this instead of deleting accounts.'
),
)
objects = managers.SupportUserManager()
@classmethod
def get_url_name(cls):
return 'support-user'
def __str__(self):
return self.name
class Comment(
core_models.UuidMixin,
core_models.BackendModelMixin,
TimeStampedModel,
core_models.StateMixin,
):
class Meta:
ordering = ['-created']
unique_together = ('backend_id', 'issue')
class Permissions:
customer_path = 'issue__customer'
project_path = 'issue__project'
issue = models.ForeignKey(
on_delete=models.CASCADE, to=Issue, related_name='comments'
)
author = models.ForeignKey(
on_delete=models.CASCADE, to=SupportUser, related_name='comments'
)
description = models.TextField()
is_public = models.BooleanField(default=True)
backend_id = models.CharField(max_length=255, blank=True, null=True)
tracker = FieldTracker()
def clean_message(self, message):
"""
Extracts comment message from JIRA comment which contains user's info in its body.
"""
match = re.search(r'^(\[.*?\]\:\s)', message)
return message.replace(match.group(0), '') if match else message
def prepare_message(self):
"""
Prepends user info to the comment description to display comment author in JIRA.
User info format - '[user.full_name user.civil_number]: '.
"""
prefix = self.author.name
# User is optional
user = self.author.user
if user:
prefix = user.full_name or user.username
if user.civil_number:
prefix += ' ' + user.civil_number
return '[%s]: %s' % (prefix, self.description)
def update_message(self, message):
self.description = self.clean_message(message)
@classmethod
def get_url_name(cls):
return 'support-comment'
@classmethod
def get_backend_fields(cls):
return super(Comment, cls).get_backend_fi |
indevgr/django | django/views/debug.py | Python | bsd-3-clause | 46,822 | 0.001474 | from __future__ import unicode_literals
import re
import sys
import types
from django.conf import settings
from django.http import HttpResponse, HttpResponseNotFound
from django.template import Context, Engine, TemplateDoesNotExist
from django.template.defaultfilters import force_escape, pprint
from django.urls import Resolver404, resolve
from django.utils import lru_cache, six, timezone
from django.utils.datastructures import MultiValueDict
from django.utils.encoding import force_bytes, smart_text
from django.utils.module_loading import import_string
from django.utils.translation import ugettext as _
# Minimal Django templates engine to render the error templates
# regardless | of the project's TEMPLATES setting.
DEBUG_ENGINE = Engine(debug=True)
HIDDEN_SETTINGS = re.compile('API|TOKEN|KEY|SECRET|PASS|SIGNATURE', flags=re.IGNORECASE)
CLEANSED_SUBSTITUTE = '********************'
class CallableSettingWrapper(object):
""" Object to wrap callable appearing in settings
* Not to call in the debug page (#21345).
* Not to break the debug page if the callable forbidding | to set attributes (#23070).
"""
def __init__(self, callable_setting):
self._wrapped = callable_setting
def __repr__(self):
return repr(self._wrapped)
def cleanse_setting(key, value):
"""Cleanse an individual setting key/value of sensitive content.
If the value is a dictionary, recursively cleanse the keys in
that dictionary.
"""
try:
if HIDDEN_SETTINGS.search(key):
cleansed = CLEANSED_SUBSTITUTE
else:
if isinstance(value, dict):
cleansed = {k: cleanse_setting(k, v) for k, v in value.items()}
else:
cleansed = value
except TypeError:
# If the key isn't regex-able, just return as-is.
cleansed = value
if callable(cleansed):
# For fixing #21345 and #23070
cleansed = CallableSettingWrapper(cleansed)
return cleansed
def get_safe_settings():
"Returns a dictionary of the settings module, with sensitive settings blurred out."
settings_dict = {}
for k in dir(settings):
if k.isupper():
settings_dict[k] = cleanse_setting(k, getattr(settings, k))
return settings_dict
def technical_500_response(request, exc_type, exc_value, tb, status_code=500):
"""
Create a technical server error response. The last three arguments are
the values returned from sys.exc_info() and friends.
"""
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
if request.is_ajax():
text = reporter.get_traceback_text()
return HttpResponse(text, status=status_code, content_type='text/plain')
else:
html = reporter.get_traceback_html()
return HttpResponse(html, status=status_code, content_type='text/html')
@lru_cache.lru_cache()
def get_default_exception_reporter_filter():
# Instantiate the default filter for the first time and cache it.
return import_string(settings.DEFAULT_EXCEPTION_REPORTER_FILTER)()
def get_exception_reporter_filter(request):
default_filter = get_default_exception_reporter_filter()
return getattr(request, 'exception_reporter_filter', default_filter)
class ExceptionReporterFilter(object):
"""
Base for all exception reporter filter classes. All overridable hooks
contain lenient default behaviors.
"""
def get_post_parameters(self, request):
if request is None:
return {}
else:
return request.POST
def get_traceback_frame_variables(self, request, tb_frame):
return list(tb_frame.f_locals.items())
class SafeExceptionReporterFilter(ExceptionReporterFilter):
"""
Use annotations made by the sensitive_post_parameters and
sensitive_variables decorators to filter out sensitive information.
"""
def is_active(self, request):
"""
This filter is to add safety in production environments (i.e. DEBUG
is False). If DEBUG is True then your site is not safe anyway.
This hook is provided as a convenience to easily activate or
deactivate the filter on a per request basis.
"""
return settings.DEBUG is False
def get_cleansed_multivaluedict(self, request, multivaluedict):
"""
Replaces the keys in a MultiValueDict marked as sensitive with stars.
This mitigates leaking sensitive POST parameters if something like
request.POST['nonexistent_key'] throws an exception (#21098).
"""
sensitive_post_parameters = getattr(request, 'sensitive_post_parameters', [])
if self.is_active(request) and sensitive_post_parameters:
multivaluedict = multivaluedict.copy()
for param in sensitive_post_parameters:
if param in multivaluedict:
multivaluedict[param] = CLEANSED_SUBSTITUTE
return multivaluedict
def get_post_parameters(self, request):
"""
Replaces the values of POST parameters marked as sensitive with
stars (*********).
"""
if request is None:
return {}
else:
sensitive_post_parameters = getattr(request, 'sensitive_post_parameters', [])
if self.is_active(request) and sensitive_post_parameters:
cleansed = request.POST.copy()
if sensitive_post_parameters == '__ALL__':
# Cleanse all parameters.
for k, v in cleansed.items():
cleansed[k] = CLEANSED_SUBSTITUTE
return cleansed
else:
# Cleanse only the specified parameters.
for param in sensitive_post_parameters:
if param in cleansed:
cleansed[param] = CLEANSED_SUBSTITUTE
return cleansed
else:
return request.POST
def cleanse_special_types(self, request, value):
try:
# If value is lazy or a complex object of another kind, this check
# might raise an exception. isinstance checks that lazy
# MultiValueDicts will have a return value.
is_multivalue_dict = isinstance(value, MultiValueDict)
except Exception as e:
return '{!r} while evaluating {!r}'.format(e, value)
if is_multivalue_dict:
# Cleanse MultiValueDicts (request.POST is the one we usually care about)
value = self.get_cleansed_multivaluedict(request, value)
return value
def get_traceback_frame_variables(self, request, tb_frame):
"""
Replaces the values of variables marked as sensitive with
stars (*********).
"""
# Loop through the frame's callers to see if the sensitive_variables
# decorator was used.
current_frame = tb_frame.f_back
sensitive_variables = None
while current_frame is not None:
if (current_frame.f_code.co_name == 'sensitive_variables_wrapper' and
'sensitive_variables_wrapper' in current_frame.f_locals):
# The sensitive_variables decorator was used, so we take note
# of the sensitive variables' names.
wrapper = current_frame.f_locals['sensitive_variables_wrapper']
sensitive_variables = getattr(wrapper, 'sensitive_variables', None)
break
current_frame = current_frame.f_back
cleansed = {}
if self.is_active(request) and sensitive_variables:
if sensitive_variables == '__ALL__':
# Cleanse all variables
for name, value in tb_frame.f_locals.items():
cleansed[name] = CLEANSED_SUBSTITUTE
else:
# Cleanse specified variables
for name, value in tb_frame.f_locals.items():
if name in sensitive_variables:
value = CLEANSED_SUBSTITUTE
else:
value = self.cleanse_special_type |
youfou/wxpy | wxpy/__init__.py | Python | mit | 2,313 | 0.002017 | #!/usr/bin/env python3
# coding: utf-8
"""
登陆微信::
# 导入模块
from wxpy import *
# 初始化机器人,扫码登陆
bot = Bot()
找到好友::
# 搜索名称含有 "游否" 的男性深圳好友
my_friend = bot.friends().search('游否', sex=MALE, city="深圳")[0]
发送消息::
# 发送文本给好友
my_friend.send('Hello WeChat!')
# 发送图片
my_friend.send_image('my_picture.jpg')
自动响应各类消息::
# 打印来自其他好友、群聊和公众号的消息
@bot.register()
def print_others(msg):
print(msg)
# 回复 my_friend 的消息 (优先匹配后注册的函数!)
@bot.register(my_friend)
def reply_my_friend(msg):
return 'received: {} ({})'.format(msg.text, msg.type)
# 自动接受新的好友请求
@bot.register(msg_types=FRIENDS)
def auto_accept_friends(msg):
# 接受好友请求
new_friend = msg.card.accept()
# 向新的好友发送消息
new_friend.send('哈哈,我自动接受了你的好友请求')
保持登陆/运行::
# 进入 Python 命令行、让程序保持运行
embed()
# 或者仅仅堵塞线程
# bot.join()
"""
import logging
import sys
from .api.bot import Bot
from .api.chats import Chat, Chats, Friend, Group, Groups, MP, Member, User
from .api.consts import ATTACHMENT, CARD, FRIENDS, MAP, NOTE, PICTURE, RECORDING, SHARING, SYSTEM, TEXT, VIDEO
from .api.consts import FEMALE, MALE
from .api.messages import Article, Message, Messages, SentMessage
from .exceptions import ResponseError
from .ext import Tuling, WeChatLoggingHandler, XiaoI, get_wechat_logger, sync_message_in_groups
from .utils import BaseRequest, detect_freq_limit, dont_raise_response_ | error, embed, ensure_one, mutual_friends
__title__ = 'wxpy'
__version__ = '0.3.9.8'
__author__ = 'Youfou'
__license__ = 'MIT'
__copyright__ = '2017, Youfou'
version_details = 'wxpy {ver} from | {path} (python {pv.major}.{pv.minor}.{pv.micro})'.format(
ver=__version__, path=__path__[0], pv=sys.version_info)
try:
# Python 2.7+
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
logging.getLogger(__name__).addHandler(NullHandler())
|
Garrett-R/scikit-learn | doc/sphinxext/gen_rst.py | Python | bsd-3-clause | 38,936 | 0.000719 | """
Example generation for the scikit learn
Generate the rst files for the examples by iterating over the python
example files.
Files that generate images should start with 'plot'
"""
from __future__ import division, print_function
from time import time
import ast
import os
import re
import shutil
import traceback
import glob
import sys
import gzip
import posixpath
import subprocess
from textwrap import dedent
# Try Python 2 first, otherwise load from Python 3
try:
from StringIO import StringIO
import cPickle as pickle
import urllib2 as urllib
from urllib2 import HTTPError, URLError
except ImportError:
from io import StringIO
import pickle
import urllib.request
import urllib.error
import urllib.parse
from urllib.error import HTTPError, URLError
try:
# Python 2 built-in
execfile
except NameError:
def execfile(filename, global_vars=None, local_vars=None):
with open(filename) as f:
code = compile(f.read(), filename, 'exec')
exec(code, global_vars, local_vars)
try:
basestring
except NameError:
basestring = str
import token
import tokenize
import numpy as np
try:
# make sure that the Agg backend is set before importing any
# matplotlib
import matplotlib
matplotlib.use('Agg')
except ImportError:
# this script can be imported by nosetest to find tests to run: we should not
# impose the matplotlib requirement in that case.
pass
from sklearn.externals import joblib
###############################################################################
# A tee object to redict streams to multiple outputs
class Tee(object):
def __init__(self, file1, file2):
self.file1 = file1
self.file2 = file2
def write(self, data):
self.file1.write(data)
self.file2.write(data)
def flush(self):
self.file1.flush()
self.file2.flush()
###############################################################################
# Documentation link resolver objects
def _get_data(url):
"""Helper function to get data over http or from a local file"""
if url.startswith('http://'):
# Try Python 2, use Python 3 on exception
try:
resp = urllib.urlopen(url)
encoding = resp.headers.dict.get('content-encoding', 'plain')
except AttributeError:
resp = urllib.request.urlopen(url)
encoding = r | esp.headers.get('content-encoding', 'plain')
data = resp.read()
if encoding == 'pl | ain':
pass
elif encoding == 'gzip':
data = StringIO(data)
data = gzip.GzipFile(fileobj=data).read()
else:
raise RuntimeError('unknown encoding')
else:
with open(url, 'r') as fid:
data = fid.read()
fid.close()
return data
mem = joblib.Memory(cachedir='_build')
get_data = mem.cache(_get_data)
def parse_sphinx_searchindex(searchindex):
"""Parse a Sphinx search index
Parameters
----------
searchindex : str
The Sphinx search index (contents of searchindex.js)
Returns
-------
filenames : list of str
The file names parsed from the search index.
objects : dict
The objects parsed from the search index.
"""
def _select_block(str_in, start_tag, end_tag):
"""Select first block delimited by start_tag and end_tag"""
start_pos = str_in.find(start_tag)
if start_pos < 0:
raise ValueError('start_tag not found')
depth = 0
for pos in range(start_pos, len(str_in)):
if str_in[pos] == start_tag:
depth += 1
elif str_in[pos] == end_tag:
depth -= 1
if depth == 0:
break
sel = str_in[start_pos + 1:pos]
return sel
def _parse_dict_recursive(dict_str):
"""Parse a dictionary from the search index"""
dict_out = dict()
pos_last = 0
pos = dict_str.find(':')
while pos >= 0:
key = dict_str[pos_last:pos]
if dict_str[pos + 1] == '[':
# value is a list
pos_tmp = dict_str.find(']', pos + 1)
if pos_tmp < 0:
raise RuntimeError('error when parsing dict')
value = dict_str[pos + 2: pos_tmp].split(',')
# try to convert elements to int
for i in range(len(value)):
try:
value[i] = int(value[i])
except ValueError:
pass
elif dict_str[pos + 1] == '{':
# value is another dictionary
subdict_str = _select_block(dict_str[pos:], '{', '}')
value = _parse_dict_recursive(subdict_str)
pos_tmp = pos + len(subdict_str)
else:
raise ValueError('error when parsing dict: unknown elem')
key = key.strip('"')
if len(key) > 0:
dict_out[key] = value
pos_last = dict_str.find(',', pos_tmp)
if pos_last < 0:
break
pos_last += 1
pos = dict_str.find(':', pos_last)
return dict_out
# Make sure searchindex uses UTF-8 encoding
if hasattr(searchindex, 'decode'):
searchindex = searchindex.decode('UTF-8')
# parse objects
query = 'objects:'
pos = searchindex.find(query)
if pos < 0:
raise ValueError('"objects:" not found in search index')
sel = _select_block(searchindex[pos:], '{', '}')
objects = _parse_dict_recursive(sel)
# parse filenames
query = 'filenames:'
pos = searchindex.find(query)
if pos < 0:
raise ValueError('"filenames:" not found in search index')
filenames = searchindex[pos + len(query) + 1:]
filenames = filenames[:filenames.find(']')]
filenames = [f.strip('"') for f in filenames.split(',')]
return filenames, objects
class SphinxDocLinkResolver(object):
""" Resolve documentation links using searchindex.js generated by Sphinx
Parameters
----------
doc_url : str
The base URL of the project website.
searchindex : str
Filename of searchindex, relative to doc_url.
extra_modules_test : list of str
List of extra module names to test.
relative : bool
Return relative links (only useful for links to documentation of this
package).
"""
def __init__(self, doc_url, searchindex='searchindex.js',
extra_modules_test=None, relative=False):
self.doc_url = doc_url
self.relative = relative
self._link_cache = {}
self.extra_modules_test = extra_modules_test
self._page_cache = {}
if doc_url.startswith('http://'):
if relative:
raise ValueError('Relative links are only supported for local '
'URLs (doc_url cannot start with "http://)"')
searchindex_url = doc_url + '/' + searchindex
else:
searchindex_url = os.path.join(doc_url, searchindex)
# detect if we are using relative links on a Windows system
if os.name.lower() == 'nt' and not doc_url.startswith('http://'):
if not relative:
raise ValueError('You have to use relative=True for the local'
' package on a Windows system.')
self._is_windows = True
else:
self._is_windows = False
# download and initialize the search index
sindex = get_data(searchindex_url)
filenames, objects = parse_sphinx_searchindex(sindex)
self._searchindex = dict(filenames=filenames, objects=objects)
def _get_link(self, cobj):
"""Get a valid link, False if not found"""
fname_idx = None
full_name = cobj['module_short'] + '.' + cobj['name']
if full_name in self._searchindex['objects']:
value = self._searchindex['objects'][full_name]
if isinstance(value, dict):
value = v |
ashwyn/eden-message_parser | modules/s3/s3import.py | Python | mit | 123,322 | 0.002092 | # -*- coding: utf-8 -*-
""" Resource Import Tools
@copyright: 2011-12 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
# @todo: remove all interactive error reporting out of the _private methods, and raise exceptions instead.
__all__ = ["S3Importer", "S3ImportJob", "S3ImportItem"]
import os
import sys
import cPickle
import tempfile
from datetime import datetime
from copy import deepcopy
try:
from cStringIO import StringIO # Faster, where available
except:
from StringIO import StringIO
try:
from lxml import etree
except ImportError:
print >> sys.stderr, "ERROR: lxml module needed for XML handling"
raise
try:
import json # try stdlib (Python 2.6)
except ImportError:
try:
import simplejson as json # try external module
except:
import gluon.contrib.simplejson as json # fallback to pure-Python module
from gluon import *
from gluon.serializers import json as jsons
from gluon.storage import Storage, Messages
from gluon.tools import callback
from s3utils import SQLTABLES3
from s3crud import S3CRUD
from s3xml import S3XML
from s3utils import s3_mark_required, s3_has_foreign_key, s3_get_foreign_key
DEBUG = False
if DEBUG:
print >> sys.stderr, "S3IMPORTER: DEBUG MODE"
def _debug(m):
print >> sys.stderr, m
else:
_debug = lambda m: None
# =============================================================================
class S3Importer(S3CRUD):
"""
Transformable formats (XML, JSON, CSV) import handler
"""
UPLOAD_TABLE_NAME = "s3_import_upload"
# -------------------------------------------------------------------------
def apply_method(self, r, **attr):
"""
Apply CRUD methods
@param r: the S3Request
@param attr: dictionary of parameters for the method handler
@returns: output object to send to the view
Known means of communicating with this module:
It expects a URL of the form: /prefix/name/import
It will interpret the http requests as follows:
GET will trigger the upload
POST will trigger either commits or display the import details
DELETE will trigger deletes
It will accept one of the following control vars:
item: to specify a single item in the import job
job: to specify a job
It should not receive both so job takes precedent over item
For CSV imports, the calling controller can add extra fields
to the upload form to add columns to each row in the CSV. To add
the extra fields, pass a named parameter "csv_extra_fields" to the
s3_rest_controller call (or the S3Request call, respectively):
|
s3_rest_controller(module, resourcename,
csv_extra_fields=[
dict(label="ColumnLabelInTheCSV",
| field=field_instance)
])
The Field instance "field" will be added to the upload form, and
the user input will be added to each row of the CSV under the
label as specified. If the "field" validator has options, the
input value will be translated into the option representation,
otherwise the value will be used as-is.
Note that the "label" in the dict is the column label in the CSV,
whereas the field label for the form is to be set in the Field
instance passed as "field".
You can add any arbitrary number of csv_extra_fields to the list.
Additionally, you may want to allow the user to choose whether
the import shall first remove all existing data in the target
table. To do so, pass a label for the "replace_option" to the
request:
s3_rest_controller(module, resourcename,
replace_option=T("Remove existing data before import"))
This will add the respective checkbox to the upload form.
You may also want to provide a link to download a CSV template from
the upload form. To do that, add the resource name to the request
attributes:
s3_rest_controller(module, resourcename,
csv_template="<resourcename>")
This will provide a link to:
- static/formats/s3csv/<controller>/<resourcename>.csv
at the top of the upload form.
"""
_debug("S3Importer.apply_method(%s)" % r)
# Messages
T = current.T
messages = self.messages = Messages(T)
messages.download_template = "Download Template"
messages.invalid_file_format = "Invalid File Format"
messages.unsupported_file_type = "Unsupported file type of %s"
messages.stylesheet_not_found = "No Stylesheet %s could be found to manage the import file."
messages.no_file = "No file submitted"
messages.file_open_error = "Unable to open the file %s"
messages.file_not_found = "The file to upload is missing"
messages.no_records_to_import = "No records to import"
messages.no_job_to_delete = "No job to delete, maybe it has already been deleted."
messages.title_job_read = "Details of the selected import job"
messages.title_job_list = "List of import items"
messages.file_uploaded = "Import file uploaded"
messages.upload_submit_btn = "Upload Data File"
messages.open_btn = "Open"
messages.view_btn = "View"
messages.delete_btn = "Delete"
messages.item_show_details = "Display Details"
messages.job_total_records = "Total records in the Import Job"
messages.job_records_selected = "Records selected"
messages.job_deleted = "Import job deleted"
messages.job_completed = "Job run on %s. With result of (%s)"
messages.import_file = "Import File"
messages.import_file_comment = "Upload a file formatted according to the Template."
messages.user_name = "User Name"
messages.commit_total_records_imported = "%s records imported"
messages.commit_total_records_ignored = "%s records ignored"
messages.commit_total_errors = "%s records in error"
try:
self.uploadTitle = current.response.s3.crud_strings[self.tablename].title_upload
except:
self.uploadTitle = T("Upload a %s import file" % r.function)
# @todo: correct to switch this off for the whole session?
current.session.s3.ocr_enabled = False
# Reset all errors/warnings
self.error = None
self.warning = None
# CSV upload configuration
if "csv_stylesheet" in attr:
self.csv_stylesheet = attr["csv_stylesheet"]
else:
self.csv_stylesheet = None
self.csv_extra_fields = None
self.csv_extra_data = None
# Environment
|
acsone/mozaik | mozaik_communication/models/__init__.py | Python | agpl-3.0 | 178 | 0 | # -*- coding: utf-8 - | *-
# Copyright 2016 ACSONE SA/NV (<http://a | csone.eu>)
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from . import mass_mailing_stats
|
apporc/nova | nova/scheduler/filters/disk_filter.py | Python | apache-2.0 | 3,099 | 0.001613 | # Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
import nova.conf
from nova.i18n import _LW
from nova.scheduler import filters
from nova.scheduler.filters import utils
LOG = logging.getLogger(__name__)
CONF = nova.conf.CONF
class DiskFilter(filters.BaseHostFilter):
"""Disk Filter with over subscription flag."""
def _get_disk_allocation_ratio(self, host_state, filter_properties):
return CONF.disk_allocation_ratio
@filters.compat_legacy_props
def host_passes(self, host_state, filter_properties):
"""Filter based on disk usage."""
instance_type = filter_properties.get('instance_type')
requested_disk = (1024 * (instance_type['root_gb'] +
instance_type['ephemeral_gb']) +
instance_type['swap'])
free_disk_mb = host_state.free_disk_mb
total_usable_disk_mb = host_state.total_usable_disk_gb * 1024
disk_allocation_ratio = self._get_disk_allocation_ratio(
host_state, filter_properties)
disk_mb_limit = total_usable_disk_mb * disk | _allocation_ratio
used_disk_mb = total_usable_disk_mb - free_disk_mb
usable_disk_mb = disk_mb_limit - used_disk_mb
if not usable_disk_mb >= requested_disk:
LOG.debug | ("%(host_state)s does not have %(requested_disk)s MB "
"usable disk, it only has %(usable_disk_mb)s MB usable "
"disk.", {'host_state': host_state,
'requested_disk': requested_disk,
'usable_disk_mb': usable_disk_mb})
return False
disk_gb_limit = disk_mb_limit / 1024
host_state.limits['disk_gb'] = disk_gb_limit
return True
class AggregateDiskFilter(DiskFilter):
"""AggregateDiskFilter with per-aggregate disk allocation ratio flag.
Fall back to global disk_allocation_ratio if no per-aggregate setting
found.
"""
def _get_disk_allocation_ratio(self, host_state, filter_properties):
aggregate_vals = utils.aggregate_values_from_key(
host_state,
'disk_allocation_ratio')
try:
ratio = utils.validate_num_values(
aggregate_vals, CONF.disk_allocation_ratio, cast_to=float)
except ValueError as e:
LOG.warning(_LW("Could not decode disk_allocation_ratio: '%s'"), e)
ratio = CONF.disk_allocation_ratio
return ratio
|
zlsun/XX-Net | code/default/launcher/simple_i18n.py | Python | bsd-2-clause | 4,279 | 0.001168 | import locale
import os
import subprocess
import sys
class SimpleI18N:
def __init__(self, lang=None):
if lang:
self.lang = lang
else:
self.lang = self.get_os_language()
if not self.lang:
self.lang = "en_US"
def get_os_language(self):
try:
lang_code, code_page = locale.getdefaultlocale()
# ('en_GB', 'cp1252'), en_US,
self.lang_code = lang_code
return lang_code
except:
# Mac fail to run this
pass
if sys.platform == "darwin":
try:
oot = os.pipe()
p = subprocess.Popen(["/usr/bin/defaults", 'read', 'NSGlobalDomain', 'AppleLanguages'], stdout=oot[1])
p.communicate()
lang_code = self.get_default_language_code_for_mac(os.read(oot[0], 10000))
self.lang_code = lang_code
return lang_code
except:
pass
lang_code = 'Unknown'
return lang_code
def get_valid_languages(self):
# return ['de_DE', 'en_US', 'es_VE', 'fa_IR', 'ja_JP', 'zh_CN']
return ['en_US', 'fa_IR', 'zh_CN']
def get_default_language_code_for_mac(self, lang_code):
if 'zh' in lang_code:
return 'zh_CN'
elif 'en' in lang_code:
return 'en_US'
elif 'fa' in lang_code:
return 'fa_IR'
else:
return 'Unknown'
def po_loader(self, file):
po_dict = {}
fp = open(file, "r")
while True:
line = fp.readline()
if not line:
break
if len(line) < 2:
continue
if line.startswith("#"):
continue
if line.startswith("msgid "):
key = line[7:-2]
value = ""
while True:
| line = fp.readline()
if not line:
| break
if line.startswith("\""):
key += line[1:-2]
elif line.startswith("msgstr "):
value += line[8:-2]
break
else:
break
while True:
line = fp.readline()
if not line:
break
if line.startswith("\""):
value += line[1:-2]
else:
break
if key == "":
continue
po_dict[key] = value
return po_dict
def _render(self, po_dict, file):
fp = open(file, "r")
content = fp.read()
out_arr = []
cp = 0
while True:
bp = content.find("{{", cp)
if bp == -1:
break
ep = content.find("}}", bp)
if ep == -1:
print(content[bp:])
break
b1p = content.find("_(", bp, ep)
if b1p == -1:
print(content[bp:])
continue
b2p = content.find("\"", b1p + 2, b1p + 4)
if b2p == -1:
print(content[bp:])
continue
e1p = content.find(")", ep - 2, ep)
if e1p == -1:
print(content[bp:])
continue
e2p = content.find("\"", e1p - 2, e1p)
if e2p == -1:
print(content[bp:])
continue
out_arr.append(content[cp:bp])
key = content[b2p + 1:e2p]
if po_dict.get(key, "") == "":
out_arr.append(key)
else:
out_arr.append(po_dict[key])
cp = ep + 2
out_arr.append(content[cp:])
return "".join(out_arr)
def render(self, lang_path, template_file):
po_file = os.path.join(lang_path, self.lang, "LC_MESSAGES", "messages.po")
if not os.path.isfile(po_file):
return self._render(dict(), template_file)
else:
po_dict = self.po_loader(po_file)
return self._render(po_dict, template_file)
|
kaspermarstal/SimpleElastix | Examples/ImageGridManipulation/ImageGridManipulation.py | Python | apache-2.0 | 2,424 | 0.006601 | #!/usr/bin/env python
#=========================================================================
#
# Copyright Insight Software Consortium
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or | implied.
# See the License for the s | pecific language governing permissions and
# limitations under the License.
#
#=========================================================================
from __future__ import print_function
import SimpleITK as sitk
import sys
if len ( sys.argv ) < 3:
print( "Usage: " +sys.argv[0]+ " <input-1> <input-2>" )
sys.exit ( 1 )
# Two vector images of same pixel type and dimension expected
image_1 = sitk.ReadImage(sys.argv[1])
image_2 = sitk.ReadImage(sys.argv[2])
# Join two N-D Vector images to form an (N+1)-D image
join = sitk.JoinSeriesImageFilter()
joined_image = join.Execute(image_1, image_2)
# Extract first three channels of joined image (assuming RGB)
select = sitk.VectorIndexSelectionCastImageFilter()
channel1_image = select.Execute(joined_image, 0, sitk.sitkUInt8)
channel2_image = select.Execute(joined_image, 1, sitk.sitkUInt8)
channel3_image = select.Execute(joined_image, 2, sitk.sitkUInt8)
# Recompose image (should be same as joined_image)
compose = sitk.ComposeImageFilter()
composed_image = compose.Execute(channel1_image, channel2_image, channel3_image)
# Select same subregion using image slicing operator
sliced_image = composed_image[100:400, 100:400, 0]
# Select same subregion using ExtractImageFilter
extract = sitk.ExtractImageFilter()
extract.SetSize([300, 300, 0])
extract.SetIndex([100, 100, 0])
extracted_image = extract.Execute(composed_image)
# Select same subregion using CropImageFilter (NOTE: CropImageFilter cannot reduce dimensions
# unlike ExtractImageFilter, so cropped_image is a three dimensional image with depth of 1)
crop = sitk.CropImageFilter()
crop.SetLowerBoundaryCropSize([100, 100, 0])
crop.SetUpperBoundaryCropSize([composed_image.GetWidth()-400, composed_image.GetHeight()-400, 1])
cropped_image = crop.Execute(composed_image)
|
RedHatInsights/insights-core | insights/parsers/package_provides_java.py | Python | apache-2.0 | 3,087 | 0.002915 | """
PackageProvidesJava - command ``/bin/echo {java_command_package}``
==================================================================
.. warning::
This module is deprecated, please use
:mod:`insights.parsers.package_provides` instead.
This command reads the output of the pre-command:
``for jp in `/bin/ps auxwww | grep java | grep -v grep| awk '{print $11}' | sort -u`; do echo $jp `readlink -e $jp | xargs rpm -qf`; done``
This command looks for all versions of 'java' running and tries to find the
RPM packages which provide them. The running command and its package name
are stored as properties ``command`` and ``package`` of the object.
The reason why using above pre_command is that we need to recor | d the links
between running_java_command and package which provi | des the java command. In
``ps aux`` output, we can only get what java command starts a java
application, instead of java package. Through this way, when there is jdk
bug, we can detect whether a running java application will be affected.
Typical contents of the pre_command::
/usr/lib/jvm/jre/bin/java java-1.8.0-openjdk-headless-1.8.0.141-3.b16.el6_9.x86_64
Parsed result::
self.command = "/usr/lib/jvm/jre/bin/java"
self.package = "java-1.8.0-openjdk-headless-1.8.0.141-3.b16.el6_9.x86_64"
Examples:
>>> command_package = shared[PackageProvidesJava]
>>> command_package.command
'/usr/lib/jvm/jre/bin/java'
>>> command_package.package
'java-1.8.0-openjdk-headless-1.8.0.141-3.b16.el6_9.x86_64'
Raises:
insights.parsers.ParseException: if there is no java application running
Raises:
insights.parsers.SkipException: if running java command is not provided by package installed through yum or rpm
"""
from insights import parser, CommandParser
from ..parsers import ParseException, SkipException
from insights.specs import Specs
from insights.util import deprecated
@parser(Specs.package_provides_java)
class PackageProvidesJava(CommandParser):
"""
.. warning::
This Combiner is deprecated, please use
:class:`insights.parsers.package_provides.PackageProvidesCommand` instead.
Parse the output of pre_command::
``for jp in `/bin/ps auxwww | grep java | grep -v grep| awk '{print $11}' | sort -u`; do echo "$jp `readlink -e $jp | xargs rpm -qf`"; done``.
Attributes:
command (str): The java command that starts application.
package (str): Java package that provides above java command.
"""
def parse_content(self, content):
deprecated(
PackageProvidesJava,
'Please use the :class:`insights.parsers.package_provides.PackageProvidesCommand` instead.'
)
if len(content) == 0:
raise ParseException("Error: ", 'there is not java application running')
l = content[0].split()
if len(l) != 2:
raise SkipException("Error: ",
'current running java command is not provided by package installed through yum or rpm')
self.command = l[0]
self.package = l[1]
|
airbnb/airflow | tests/test_utils/perf/perf_kit/sqlalchemy.py | Python | apache-2.0 | 8,062 | 0.001737 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import time
import traceback
from typing import Callable
from sqlalchemy import event
def _pretty_format_sql(text: str):
import pygments
from pygments.formatters.terminal import TerminalFormatter
from pygments.lexers.sql import SqlLexer
text = pygments.highlight(code=text, formatter=TerminalFormatter(), lexer=SqlLexer()).rstrip()
return text
class TraceQueries:
"""
Tracking SQL queries in a code block.
:param display_num: If True, displays the query number.
:param display_time: If True, displays the query execution time.
:param display_trace: If True, displays the simplified (one-line) stack trace
:param display_sql: If True, displays the SQL statements
:param display_parameters: If True, display SQL statement parameters
:param print_fn: The function used to display the text. By default,``builtins.print``
"""
def __init__(
self,
*,
display_num: bool = True,
display_time: bool = True,
display_trace: bool = True,
display_sql: bool = False,
display_parameters: bool = True,
print_fn: Callable[[str], None] = print,
):
self.display_num = display_num
self.display_time = display_time
self.display_trace = display_trace
self.display_sql = display_sql
self.display_parameters = display_parameters
self.print_fn = print_fn
self.query_count = 0
def before_cursor_execute(
self,
conn,
cursor, # pylint: disable=unused-argument
statement, # pylint: disable=unused-argument
parameters, # pylint: disable=unused-argument
context, # pylint: disable=unused-argument
executemany,
): # pylint: disable=unused-argument
"""
Executed before cursor.
:param conn: connection
:param cursor: cursor
:param statement: statement
:param parameters: parameters
:param context: context
:param executemany: whether many statements executed
:return:
"""
conn.info.setdefault("query_start_time", []).append(time.monotonic())
self.query_count += 1
def after_cursor_execute(
self,
conn,
cursor, # pylint: disable=unused-argument
statement,
parameters,
context, # pylint: disable=unused-argument
executemany,
): # pylint: disable=unused-argument
"""
Executed after cursor.
:param conn: connection
:param cursor: cursor
:param statement: statement
:param parameters: parameters
:param context: context
:param executemany: | whether many statements executed
:return:
"""
total = time.monotonic() - conn.info["query_start_time"].pop()
file_names = [
f"{f.filename}:{f.name}:{f.lineno}"
for f in traceback.extract_stack()
if "sqlalchemy" not in f.filename
]
file_name = file_names[-1] if file_names else ""
stack = [f for | f in traceback.extract_stack() if "sqlalchemy" not in f.filename]
stack_info = " > ".join([f"{f.filename.rpartition('/')[-1]}:{f.name}:{f.lineno}" for f in stack][-7:])
conn.info.setdefault("query_start_time", []).append(time.monotonic())
output_parts = []
if self.display_num:
output_parts.append(f"{self.query_count:>3}")
if self.display_time:
output_parts.append(f"{total:.5f}")
if self.display_trace:
output_parts.extend([f"{file_name}", f"{stack_info}"])
if self.display_sql:
sql_oneline = statement.replace("\n", " ")
output_parts.append(f"{_pretty_format_sql(sql_oneline)}")
if self.display_parameters:
output_parts.append(f"{parameters}")
self.print_fn(" | ".join(output_parts))
def __enter__(self):
import airflow.settings
event.listen(airflow.settings.engine, "before_cursor_execute", self.before_cursor_execute)
event.listen(airflow.settings.engine, "after_cursor_execute", self.after_cursor_execute)
def __exit__(self, type_, value, traceback): # noqa pylint: disable=redefined-outer-name
import airflow.settings
event.remove(airflow.settings.engine, "before_cursor_execute", self.before_cursor_execute)
event.remove(airflow.settings.engine, "after_cursor_execute", self.after_cursor_execute)
trace_queries = TraceQueries # pylint: disable=invalid-name
class CountQueriesResult:
"""
Counter for number of queries.
"""
def __init__(self):
self.count = 0
class CountQueries:
"""
Counts the number of queries sent to Airflow Database in a given context.
Does not support multiple processes. When a new process is started in context, its queries will
not be included.
:param print_fn: The function used to display the text. By default, ``builtins.print``
"""
def __init__(self, print_fn: Callable[[str], None] = print):
self.result = CountQueriesResult()
self.print_fn = print_fn
def __enter__(self):
import airflow.settings
event.listen(airflow.settings.engine, "after_cursor_execute", self.after_cursor_execute)
return self.result
def __exit__(self, type_, value, traceback): # noqa pylint: disable=redefined-outer-name
import airflow.settings
event.remove(airflow.settings.engine, "after_cursor_execute", self.after_cursor_execute)
self.print_fn(f"Count SQL queries: {self.result.count}")
def after_cursor_execute(
self,
conn, # pylint: disable=unused-argument
cursor, # pylint: disable=unused-argument
statement, # pylint: disable=unused-argument
parameters, # pylint: disable=unused-argument
context, # pylint: disable=unused-argument
executemany,
): # pylint: disable=unused-argument
"""
Executed after cursor.
:param conn: connection
:param cursor: cursor
:param statement: statement
:param parameters: parameters
:param context: context
:param executemany: whether many statements executed
"""
self.result.count += 1
count_queries = CountQueries # pylint: disable=invalid-name
if __name__ == "__main__":
# Example:
def case():
"""Case of logging om/"""
import logging
from unittest import mock
from airflow.jobs.scheduler_job import DagFileProcessor
with mock.patch.dict(
"os.environ",
{
"PERF_DAGS_COUNT": "200",
"PERF_TASKS_COUNT": "10",
"PERF_START_AGO": "2d",
"PERF_SCHEDULE_INTERVAL": "None",
"PERF_SHAPE": "no_structure",
},
):
log = logging.getLogger(__name__)
processor = DagFileProcessor(dag_ids=[], log=log)
dag_file = os.path.join(os.path.dirname(__file__), os.path.pardir, "dags", "elastic_dag.py")
processor.process_file(file_path=dag_file, callback_requests=[])
with trace_queries(), count_queries():
case()
|
the-blue-alliance/the-blue-alliance | src/backend/common/consts/media_tag.py | Python | mit | 1,026 | 0 | import enum
from typing import Dict, Optional, Set
@enum.unique
class MediaTag(enum.IntEnum):
# ndb keys are based on these! Don't change!
CHAIRMANS_VIDEO = 0
CHAIRMANS_PRESENTATION = 1
CHAIRMANS_ESSAY = 2
MEDIA_TAGS: Set[MediaTag] = {t for t in MediaTag}
| TAG_NAMES: Dict[MediaTag, str] = {
MediaTag.CHAIRMANS_VIDEO: "Chairman's Video",
MediaTag.CHAIRMANS_PRESENTATION: "Chairman's Presentation",
MediaTag.CHAIRMANS_ESSAY: "Chairman's Essay",
}
TAG_URL_NAMES: Dict[MediaTag, str] = {
MediaTag.CHAIRMANS_VIDEO: "chairmans_video",
MediaTag.CHAIRMANS_PRESENTATION: "chairmans_presentation",
MediaTag.CHAIRMANS_ESSAY: "chairmans_essay",
}
CHAIRMANS_TAGS: Set[MediaTag] = {
MediaTag.CHAIRMANS_VIDEO,
Me | diaTag.CHAIRMANS_PRESENTATION,
MediaTag.CHAIRMANS_ESSAY,
}
def get_enum_from_url(url_name: str) -> Optional[MediaTag]:
inversed = {v: k for k, v in TAG_URL_NAMES.items()}
if url_name in inversed:
return inversed[url_name]
else:
return None
|
magul/volontulo | backend/apps/volontulo/admin.py | Python | mit | 315 | 0 | # -*- coding: utf-8 -*-
"""
.. module:: admin
"""
from django.contrib import admin
from apps.volontulo.models import Of | fer
from apps.volontulo.models import Organization
from apps.volontulo.models import UserProfile
admin.site.register(Offer)
admin.sit | e.register(Organization)
admin.site.register(UserProfile)
|
sputnick-dev/weboob | weboob/browser/filters/standard.py | Python | agpl-3.0 | 24,256 | 0.00198 | # -*- coding: utf-8 -*-
# Copyright(C) 2014 Romain Bignon
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import
import datetime
import re
import unicodedata
from decimal import Decimal, InvalidOperation
from itertools import islice
from collections import Iterator
from dateutil.parser import parse as parse_date
from weboob.capabilities.base import empty
from weboob.tools.compat import basestring
from weboob.exceptions import ParseError
from weboob.browser.url import URL
from weboob.tools.log import getLogger, DEBUG_FILTERS
class NoDefault(object):
def __repr__(self):
return 'NO_DEFAULT'
_NO_DEFAULT = NoDefault()
__all__ = ['FilterError', 'ColumnNotFound', 'RegexpError', 'ItemNotFound',
'Filter', 'Base', 'Env', 'TableCell', 'RawText',
'CleanText', 'Lower', 'CleanDecimal', 'Field', 'Regexp', 'Map',
'DateTime', 'Date', 'Time', 'DateGuesser', 'Duration',
'MultiFilter', 'CombineDate', 'Format', 'Join', 'Type', 'Eval',
'BrowserURL', 'Async', 'AsyncLoad']
class FilterError(ParseError):
pass
class ColumnNotFound(FilterError):
pass
class RegexpError(FilterError):
pass
class ItemNotFound(FilterError):
pass
class _Filter(object):
_creation_counter = 0
def __init__(self, default=_NO_DEFAULT):
self._key = None
self._obj = None
self.default = default
self._creation_counter = _Filter._creation_counter
_Filter._creation_counter += 1
def __or__(self, o):
self.default = o
return self
def __and__(self, o):
if isinstance(o, type) and issubclass(o, _Filter):
o = o()
o.selector = self
return o
def default_or_raise(self, exception):
if self.default is not _NO_DEFAULT:
return self.default
else:
raise exception
def __str__(self):
return self.__class__.__name__
def debug(*args):
"""
A decorator function to provide some debug information
in Filters.
It prints by default the name of the Filter and the input value.
"""
def wraper(function):
def print_debug(self, value):
logger = getLogger('b2filters')
result = ''
outputvalue = value
if isinstance(value, list):
from lxml import etree
outputvalue = ''
first = True
for element in value:
if first:
first = False
else:
outputvalue += ', '
if isinstance(element, etree.ElementBase):
outputvalue += "%s" % etree.tostring(element, encoding=unicode)
else:
outputvalue += "%r" % element
if self._obj is not None:
result += "%s" % self._obj._random_id
if self._key is not None:
result += ".%s" % self._key
name = str(self)
result += " %s(%r" % (name, outputvalue)
for arg in self.__dict__:
if arg.startswith('_') or arg == u"selector":
continue
if arg == u'default' and getattr(self, arg) == _NO_DEFAULT:
continue
result += ", %s=%r" % (arg, getattr(self, arg))
result += u')'
logger.log(DEBUG_FILTERS, result)
res = function(self, value)
return res
return print_debug
return wraper
class Filter(_Filter):
"""
Class used to filter on a HTML element given as call parameter to return
matching elements.
Filters can be chained, so the parameter supplied to constructor can be
either a xpath selector string, or an other filter called before.
>>> from lxml.html import etree
>>> f = CleanDecimal(CleanText('//p'), replace_dots=True)
>>> f(etree.fromstring('<html><body><p>blah: <span>229,90</span></p></body></html>'))
Decimal('229.90')
"""
def __init__(self, selector=None, default=_NO_DEFAULT):
super(Filter, self).__init__(default=default)
self.selector = selector
@classmethod
def select(cls, selector, item, obj=None, key=None):
if isinstance(selector, basestring):
return item.xpath(selector)
elif isinstance(selector, _Filter):
selector._key = key
selector._obj = obj
return selector(item)
elif callable(selector):
return selector(item)
else:
return selector
def __call__(self, item):
return self.filter(self.select(self.selector, item, key=self._key, obj=self._obj))
@debug()
def filter(self, value):
"""
This method have to be overrided by children classes.
"""
raise NotImplementedError()
class _Selector(Filter):
def filter(self, elements):
if elements is not None:
return elements
else:
return self.default_or_raise(ParseError('Element %r not found' % self.selector))
class AsyncLoad(Filter):
def __call__(self, item):
link = self.select(self.selector, item, key=self._key, obj=self._obj)
return item.page.browser.async_open(link) if link else None
class Async(_Filter):
def __init__(self, name, selector=None):
super(Async, self).__init__()
self.selector = selector
self.name = name
def __and__(self, o):
if isinstance(o, type) and issubclass(o, _Filter):
o = o()
self.selector = o
return self
def __call__(self, item):
if item.loaders[self.name] is None:
return None
result = item.loaders[self.name].result()
assert result.page is not None, 'The loaded url %s hasn\'t been matched by an URL object' % result.url
return self.selector(result.page.doc)
class Base(Filter):
"""
Change the base element used in filters.
>>> Base(Env('header'), CleanText('./h1')) # doctest: +SKIP
"""
def __call__(self, item):
base = self.select(self.base, item, obj=self._obj, key=self._key)
return self.selector(base)
def __init__(self, base, selector=None, default=_NO_DEFAULT):
super(Base, self).__init__(selector, default)
self.base = base
class Env(_Filter):
"""
Filter to get environment value of the item.
It is used for example to get page parameters, or when there is a parse()
method on ItemElement.
"""
def __init__(self, name, default=_NO_DEFAULT):
super(Env, self).__init__(default)
self.name = name
def __call__(self, item):
try:
return item.env[self.name]
except KeyError:
return self.default_or_raise(ParseError('Environment variable %s not found' % self.name))
class TableCell(_Filter):
"""
Used with TableElement, it get th | e cell value from its name.
For examp | le:
>>> from weboob.capabilities.bank import Transaction
>>> from weboob.browser.elements import TableElement, ItemElement
>>> class table(TableElement):
... head_xpath = '//table/thead/th'
... item_xpath = '//table/tbody/tr'
... col_date = u'Date'
... col_label = [u'Name', u'Label']
... class item(ItemElement):
... klass = Transaction
... obj_date = Date(TableCell('date'))
... obj_label = Cle |
kaplun/Invenio-OpenAIRE | modules/bibexport/lib/bibexport_method_fieldexporter_webinterface.py | Python | gpl-2.0 | 22,858 | 0.012162 | # -*- coding: utf-8 -*-
## $Id: webmessage_webinterface.py,v 1.13 2008/03/12 16:48:08 tibor Exp $
##
## This file is part of Invenio.
## Copyright (C) 2009, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""FieldExporter web interface"""
__revision__ = "$Id: webmessage_webinterface.py,v 1.13 2008/03/12 16:48:08 tibor Exp $"
__lastupdated__ = """$Date: 2008/03/12 16:48:08 $"""
import re
from invenio.webpage import page
from invenio.webinterface_handler import WebInterfaceDirectory, \
wash_urlargd
from invenio.urlutils import redirect_to_url
from invenio.config import CFG_SITE_URL, \
CFG_SITE_SECURE_URL
from invenio.dateutils import convert_datestruct_to_datetext, \
convert_datetext_to_datestruct
from invenio.messages import gettext_set_language
from invenio.bibexport_method_fieldexporter import get_css, \
get_navigation_menu, \
perform_request_edit_query, \
perform_request_edit_job, \
perform_request_jobs, \
perform_request_new_job, \
perform_request_save_job, \
perform_request_delete_jobs, \
perform_request_run_jobs, \
perform_request_job_queries, \
perform_request_new_query, \
perform_request_save_query, \
perform_request_delete_queries, \
perform_request_run_queries, \
perform_request_job_history, \
perform_request_job_results, \
perform_request_display_job_result, \
perform_request_download_job_result, \
AccessDeniedError
from invenio.bibexport_method_fieldexporter_dblayer import Job, \
Query, \
JobResult
from invenio.webuser import collect_user_info, \
page_not_authorized
from invenio.access_control_engine import acc_authorize_action
class WebInterfaceFieldExporterPages(WebInterfaceDirectory):
"""Defines the set of /fieldexporter pages."""
_exports = ["", "jobs", "edit_job",
"job_queries", "edit_query",
"job_results", "display_job_result", "download_job_result",
"history", "not_authorized"]
# constats containig URL to the pages
_EXPORT_URL = "%s/exporter" % (CFG_SITE_URL, )
_JOBS_URL = "%s/exporter/jobs" % (CFG_SITE_URL, )
_EDIT_JOB_URL = "%s/exporter/edit_job" % (CFG_SITE_URL, )
_EDIT_QUERY_URL = "%s/exporter/edit_query" % (CFG_SITE_U | RL, )
_JOB_QUERIES_URL = "%s/exporter/job_queries" % (CFG_SITE_URL, )
_JOB_HISTORY_URL = "%s/exporter/history" % (CFG_SITE_URL, )
_NOT_AUTHORIZED_URL = "%s/exporter/not_authorized" % (CFG_SITE_URL, )
_LOGIN_URL = "%s/youraccount/login" % (CFG_SITE_SECURE_URL,)
_NAVTRAIL_EXPORT = """<a href="/expor | ter" class="navtrail">Export</a>"""
def index(self, req, form):
""" The function called by default"""
redirect_to_url(req, self._JOB_HISTORY_URL)
__call__ = index
def jobs(self, req, form):
"""Displays all the jobs of a given user
and allows creation, deletion and execution of jobs"""
argd = wash_urlargd(form, {
"new_button": (str, ""),
"run_button": (str, ""),
"delete_button": (str, ""),
"selected_jobs": (list, "")
})
# load the right message language
language = argd["ln"]
_ = gettext_set_language(language)
self._check_user_credentials(req, language)
user_id = self._get_user_id(req)
try:
# if the form is submitted through some of the buttons
# we should perform the appropriate action
if argd["new_button"]:
self._redirect_to_page(req, self._EDIT_JOB_URL, language)
elif argd["delete_button"]:
job_ids = argd["selected_jobs"]
perform_request_delete_jobs(job_ids = job_ids,
user_id = user_id,
language = language)
elif argd["run_button"]:
job_ids = argd["selected_jobs"]
perform_request_run_jobs(job_ids = job_ids,
user_id = user_id,
language = language)
self._redirect_to_page(req, self._JOB_HISTORY_URL, language)
user_id = self._get_user_id(req)
body = perform_request_jobs(user_id = user_id,
language = language)
except AccessDeniedError:
self._redirect_to_not_authorised_page(req, language)
return page(title = _("Export Job Overview"),
metaheaderadd = get_css(),
body = body,
req = req,
navmenuid = "fieldexporter",
titleprologue = get_navigation_menu(language),
navtrail = self._NAVTRAIL_EXPORT,
language = language)
def edit_job(self, req, form):
"""Edits an existing job or creates a new one"""
# Create an empty job and use its default values
# to init missing parameters
job = Job()
argd = wash_urlargd(form,
{"job_name": (str, job.get_name()),
"output_directory": (str, job.get_output_directory()),
"job_frequency": (int, job.get_frequency()),
"output_format": (int, job.get_output_format()),
"last_run": (str, convert_datestruct_to_datetext(job.get_last_run())),
"id": (int, job.get_id()),
"save_button": (str, ""),
"cancel_button": (str, ""),
"edit_queries_button": (str, "")
})
language = argd["ln"]
# load the right message language
_ = gettext_set_language(language)
self._check_user_credentials(req, language)
user_id = self._get_user_id(req)
job_id = argd["id"]
job = Job(job_id = job_id,
name = argd["job_name"],
frequency = argd["job_frequency"],
output_format = argd["output_format"],
last_run = convert_datetext_to_datestruct(argd["last_run"]),
output_directory = argd["output_directory"])
try:
if argd["cancel_button"]:
self._redirect_to_page(req, self._JOBS_URL, language)
elif argd["save_button"]:
perform_request_save_job(job = job,
|
QISKit/qiskit-sdk-py | qiskit/transpiler/fencedobjs.py | Python | apache-2.0 | 2,608 | 0.002301 | # -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2018.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
""" Fenced objects are wraps for raising TranspilerAccessError when they are modified."""
from .exceptions import TranspilerAccessError
class FencedObject():
""" Given an instance and a list of attributes to fence, raises a TranspilerAccessError when one
of these attributes is accessed."""
def __init__(self, instance, attributes_to_fence):
self._wrapped = instance
self._attributes_to_fence = attributes_to_fence
def __getattribute__(self, name):
object.__getattribute__(self, '_check_if_fenced')(name)
return getattr(object.__getattribute__(self, '_wrapped'), name)
def __getitem__(self, key):
object.__getattribute__(self, '_check_if_fenced')('__getitem__')
return object.__getattribute__(self, '_wrapped')[key]
def __setitem__(self, key, value):
object.__getattribute__(self, '_check_if_fenced')('__setitem__')
object.__getattribute__(self, '_wrapped')[key] = value
def _check_if_fenced(self, name):
"""
Chec | ks if the attribute name is in the list of attributes to protect | . If so, raises
TranspilerAccessError.
Args:
name (string): the attribute name to check
Raises:
TranspilerAccessError: when name is the list of attributes to protect.
"""
if name in object.__getattribute__(self, '_attributes_to_fence'):
raise TranspilerAccessError("The fenced %s has the property %s protected" %
(type(object.__getattribute__(self, '_wrapped')), name))
class FencedPropertySet(FencedObject):
""" A property set that cannot be written (via __setitem__) """
def __init__(self, property_set_instance):
super().__init__(property_set_instance, ['__setitem__'])
class FencedDAGCircuit(FencedObject):
""" A dag circuit that cannot be modified (via remove_op_node) """
# FIXME: add more fenced methods of the dag after dagcircuit rewrite
def __init__(self, dag_circuit_instance):
super().__init__(dag_circuit_instance, ['remove_op_node'])
|
publica-io/django-publica-pages | pages/migrations/0001_initial.py | Python | bsd-3-clause | 1,903 | 0.002627 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
import entropy.mixins
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('templates', '__first__'),
]
operations = [
migrations.CreateModel(
name='Page',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('text', models.TextField(default=b'', blank=True)),
('title', models.CharField(max_length=255)),
('short_title', models.CharField(max_length=255, blank=True)),
('slug', models.SlugField(max_length=255)),
('created_at', models.DateTimeField(auto_now_add=True)),
('modified_at', models.DateTimeField(auto_now=True)),
('enabled', models.BooleanField(default=False, d | b_index=True)),
('featured', models.BooleanField(default=False)),
('published_at', models.DateField(null=True)),
('created_by', models.ForeignKey(related_name='pages_page_created_by', blank=True, to=settings.AUTH_USER_MODEL, null=True)),
('modified_by', models.ForeignKey(related_name='pages_page_modified_by', blank=True, to=settings.AUTH_USER_MODEL, null=True)),
| ('preview_template', models.ForeignKey(related_name='pages_page_preview_templates', blank=True, to='templates.Template', null=True)),
('template', models.ForeignKey(related_name='pages_page_templates', blank=True, to='templates.Template', null=True)),
],
options={
'abstract': False,
},
bases=(entropy.mixins.BaseSlugMixin, models.Model),
),
]
|
be-cloud-be/horizon-addons | partner-contact/partner_contact_weight/models/res_partner.py | Python | agpl-3.0 | 534 | 0 | # -*- coding: utf-8 -*-
# Copyright 2016 Ursa Information Systems <http://ursainfosystems.com>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from openerp import fields, mo | dels
class ResPartner(models.Model):
_inherit = 'res.partner'
weight = fields.Float("Weight")
weight_uom = fields.Many2one(
"product.uom", "Weight UoM",
domain=lambda self: [('category_id', '=',
| self.env.ref('product.product_uom_categ_kgm').id)
]
)
|
MLAB-project/weewx | bin/weewx/drivers/ws23xx.py | Python | gpl-3.0 | 79,475 | 0.004328 | #!usr/bin/env python
#
# Copyright 2013 Matthew Wall
# See the file LICENSE.txt for your full rights.
#
# Thanks to Kenneth Lavrsen for the Open2300 implementation:
# http://www.lavrsen.dk/foswiki/bin/view/Open2300/WebHome
# description of the station communication interface:
# http://www.lavrsen.dk/foswiki/bin/view/Open2300/OpenWSAPI
# memory map:
# http://www.lavrsen.dk/foswiki/bin/view/Open2300/OpenWSMemoryMap
#
# Thanks to Russell Stuart for the ws2300 python implementation:
# http://ace-host.stuart.id.au/russell/files/ws2300/
# and the map of the station memory:
# http://ace-host.stuart.id.au/russell/files/ws2300/memory_map_2300.txt
#
# This immplementation copies directly from Russell Stuart's implementation,
# but only the parts required to read from and write to the weather station.
"""Classes and functions for interfacing with WS-23xx weather stations.
LaCrosse made a number of stations in the 23xx series, including:
WS-2300, WS-2308, WS-2310, WS-2315, WS-2317, WS-2357
The stations were also sold as the TFA Matrix and TechnoLine 2350.
The WWVB receiver is located in the console.
To synchronize the console and sensors, press and hold the PLUS key for 2
seconds. When console is not synchronized no data will be received.
To do a factory reset, press and hold PRESSURE and WIND for 5 seconds.
A single bucket tip is 0.0204 in (0.518 mm).
The station has 175 history records. That is just over 7 days of data with
the default history recording interval of 60 minutes.
The station supports both wireless and wired communication between the
sensors and a station console. Wired connection updates data every 8 seconds.
Wireless connection updates data in 16 to 128 second intervals, depending on
wind speed and rain activity.
The connection type can be one of 0=cable, 3=lost, 15=wireless
sensor update frequency:
32 seconds when wind speed > 22.36 mph (wireless)
128 seconds when wind speed < 22.36 mph (wireless)
10 minutes (wireless after 5 failed attempts)
8 seconds (wired)
console update frequency:
15 seconds (pressure/temperature)
20 seconds (humidity)
It is possible to increase the rate of wireless updates:
http://www.wxforum.net/index.php?topic=2196.0
Sensors are connected by unshielded phone cables. RF interference can cause
random spikes in data, with one symptom being values of 25.5 m/s or 91.8 km/h
for the wind speed. Unfortunately those values are within the sensor limits
of 0-113 mph (50.52 m/s or 181.9 km/h). To reduce the number of spikes in
data, replace with shielded cables:
http://www.lavrsen.dk/sources/weather/windmod.htm
The station records wind speed and direction, but has no notion of gust.
The station calculates windchill and dewpoint.
The station has a serial connection to the computer.
This driver does not keep the serial port open for long periods. Instead, the
driver opens the serial port, reads data, then closes the port.
This driver polls the station. Use the polling_interval parameter to specify
how often to poll for data. If not specified, the polling interval will adapt
based on connection type and status.
USB-Serial Converters
With a USB-serial converter one can connect the station to a computer with
only USB ports, but not every converter will work properly. Perhaps the two
most common converters are based on the Prolific and FTDI chipsets. Many
people report better luck with the FTDI-based converters. Some converters
that use the Prolific chipset (PL2303) will work, but not all of them.
Known to work: ATEN UC-232A
Bounds checking
wind speed: 0-113 mph
wind direction: 0-360
humidity: 0-100
temperature: ok if not -22F and humidity is valid
dewpoint: ok if not -22F and humidity is valid
barometer: 25-35 inHg
rain rate: 0-10 in/hr
Discrepancies Between Implementations
As of December 2013, there are significant differences between the open2300,
wview, and ws2300 implementations. Current version numbers are as follows:
open2300 1.11
ws2300 1.8
wview 5.20.2
History Interval
The factory default is 60 minutes. The value stored in the console is one
less than the actual value (in minutes). So for the factory default of 60,
the console stores 59. The minimum interval is 1.
ws2300.py reports the actual value from the console, e.g., 59 when the
interval is 60. open2300 reports the interval, e.g., 60 when the interval
is 60. wview ignores the interval.
Detecting Bogus Sensor Values
wview queries the station 3 times for each sensor then accepts the value only
if the three values were close to each other.
open2300 sleeps 10 seconds if a wind measurement indicates invalid or overflow.
The ws2300.py implementation includes overflow and validity flags for values
from the wind sensors. It does not retry based on invalid or overflow.
Wind Speed
There is disagreement about how to calculate wind speed and how to determine
whether the wind speed is valid.
This driver introduces a WindConversion object that uses open2300/wview
decoding so that wind speeds match that of open2300/wview. ws2300 1.8
incorrectly uses bcd2num instead of bin2num. This bug is fixed in this driver.
The memory map indicates the following:
addr smpl description
0x527 0 Wind overflow flag: 0 = normal
0x528 0 Wind minimum code: 0=min, 1=--.-, 2=OFL
0x529 0 Windspeed: binary nibble 0 [m/s * 10]
0x52A 0 Windspeed: binary nibble 1 [m/s * 10]
0x52B 0 Windspeed: binary nibble 2 [m/s * 10]
0x52C 8 Wind Direction = nibble * 22.5 degrees
0x52D 8 Wind Direction 1 measurement ago
0x52E 9 Wind Direction 2 measurement ago
0x52F 8 Wind Direction 3 measurement ago
0x530 7 Wind Direction 4 measurement ago
0x531 7 Wind Direction 5 measurement ago
0x532 0
wview 5.20.2 implementation (wview apparently copied from open2300):
read 3 bytes starting at 0x527
0x527 x[0]
0x528 x[1]
0x529 x[2]
if ((x[0] != 0x00) ||
((x[1] == 0xff) && (((x[2] & 0xf) == 0) || ((x[2] & 0xf) == 1)))) {
fail
} else {
dir = (x[2] >> 4) * 22.5
speed = ((((x[2] & 0xf) << 8) + (x[1])) / 10.0 * 2.23693629)
maxdir = dir
maxspeed = speed
}
open2300 1.10 implementation:
read 6 bytes starting at 0x527
0x527 x[0]
0x528 x[1]
0x529 x[2]
0x52a x[3]
0x52b x[4]
0x52c x[5]
if ((x[0] != 0x00) ||
((x[1] == | 0xff) && (((x[2] & 0xf) == 0) || ((x[2] & 0xf) == 1)))) {
sleep 10
} else {
dir = x[2] >> 4
speed = ((((x[2] & 0xf) << 8) + (x[1])) / 10.0)
dir0 = (x[2] >> 4) * 22.5
dir1 = (x[3] & 0xf) * 22.5
dir2 = (x[3] >> 4) * | 22.5
dir3 = (x[4] & 0xf) * 22.5
dir4 = (x[4] >> 4) * 22.5
dir5 = (x[5] & 0xf) * 22.5
}
ws2300.py 1.8 implementation:
read 1 nibble starting at 0x527
read 1 nibble starting at 0x528
read 4 nibble starting at 0x529
read 3 nibble starting at 0x529
read 1 nibble starting at 0x52c
read 1 nibble starting at 0x52d
read 1 nibble starting at 0x52e
read 1 nibble starting at 0x52f
read 1 nibble starting at 0x530
read 1 nibble starting at 0x531
0x527 overflow
0x528 validity
0x529 speed[0]
0x52a speed[1]
0x52b speed[2]
0x52c dir[0]
speed: ((x[2] * 100 + x[1] * 10 + x[0]) % 1000) / 10
velocity: (x[2] * 100 + x[1] * 10 + x[0]) / 10
dir = data[0] * 22.5
speed = (bcd2num(data) % 10**3 + 0) / 10**1
velocity = (bcd2num(data[:3])/10.0, bin2num(data[3:4]) * 22.5)
bcd2num([a,b,c]) -> c*100+b*10+a
"""
# TODO: use pyserial instead of LinuxSerialPort
# TODO: put the __enter__ and __exit__ scaffolding on serial port, not Station
# FIXME: unless we can get setTime to work, just ignore the console clock
# FIXME: detect bogus wind speed/direction
# i see these when the wind instrument is disconnected:
# ws 26.399999
# wsh 21
# w0 135
from __future__ import with_statement
import syslog
import time
import string
import fcntl
import os
import select
import struct
import termios
import tty
import weeutil.weeutil
import weewx.drivers
import weewx.wxformulas
DRIVER_NAME = 'WS23xx'
DRIVER_VERSION = '0.26rc1'
def loader(config_dict, _):
return WS23xxDriver(config_dict=config_dict, **config_dict[DRIVER_NAME])
def configurator_loader(_):
return WS23xxConfigurator()
def confeditor_loader():
return WS23xxConfEditor()
DEFAULT_PORT |
jhs/strangle | test/testlibbind_ns_msg.py | Python | gpl-2.0 | 2,294 | 0.01918 | #!/usr/bin/env python
#
# testlibbind_ns_msg.py - Unit tests for the libbind ns_msg wrapper
#
# This file is part of Strangle.
#
# Strangle is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Strangle is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Strangle; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import sys, testutils, random
import unittest
from Strangle import libbind
class ns_msgTestCase(unittest.TestCase):
"""Tests for the wrapper around the libbind ns_msg struct"""
def test000Exists(self):
"""Check that the ns_msg type object exists cleanly in the module"""
assert(libbind.ns_msg.__class__ is type)
def testInstantiate(self):
"""Check that the ns_msg type accepts the correct arguments"""
# Too few
self.assertRaises(TypeError, libbind.ns_msg)
# Too many
self.assertRaises(TypeError, libbind.ns_msg, 'one', 'two')
def testNotice | Invalid(self):
"""Test whether the ns_msg type can handle bad data"""
rng = testutils.rng
for testNum in range(0, 50):
packetLength = random.randrange(20, 80)
packetVal = rng.read(packetLength)
self.assertRaises(TypeError, libbind.ns_msg, packetVal)
def testParseValidQuery(self):
"""Test whether ns_msg initialization parses valid NS queries"""
packetData = file("data/www.company.example-query").read()
n = libbind.ns_msg(packetData)
assert(t | ype(n) is libbind.ns_msg)
def testParseValidResponse(self):
"""Test whether ns_msg initialization parses valid NS queries"""
packetData = file("data/www.company.example-response").read()
n = libbind.ns_msg(packetData)
assert(type(n) is libbind.ns_msg)
def suite():
s = unittest.TestSuite()
s.addTest( unittest.makeSuite(ns_msgTestCase, 'test') )
return s
if __name__ == "__main__":
unittest.main()
|
ncoghlan/pip | pip/utils/__init__.py | Python | mit | 25,452 | 0.000118 | from __future__ import absolute_import
from collections import deque
import contextlib
import errno
import locale
# we have a submodule named 'logging' which would shadow this if we used the
# regular name:
import logging as std_logging
import re
import os
import posixpath
import shutil
import stat
import subprocess
import sys
import tarfile
import zipfile
from pip.exceptions import InstallationError
from pip.compat import console_to_str, expanduser, stdlib_pkgs
from pip.locations import (
site_packages, user_site, running_under_virtualenv, virtualenv_no_global,
write_delete_marker_file,
)
from pip._vendor import pkg_resources
from pip._vendor.six.moves import input
from pip._vendor.six import PY2
from pip._vendor.retrying import retry
if PY2:
from io import BytesIO as StringIO
else:
from io import StringIO
__all__ = ['rmtree', 'display_path', 'backup_dir',
'ask', 'splitext',
'format_size', 'is_installable_dir',
'is_svn_page', 'file_contents',
'split_leading_dir', 'has_leading_dir',
'normalize_path', 'canonicalize_name',
'renames', 'get_terminal_size', 'get_prog',
'unzip_file', 'untar_file', 'unpack_file', 'call_subprocess',
'captured_stdout', 'remove_tracebacks', 'ensure_dir',
'ARCHIVE_EXTENSIONS', 'SUPPORTED_EXTENSIONS',
'get_installed_version']
logger = std_logging.getLogger(__name__)
BZ2_EXTENSIONS = ('.tar.bz2', '.tbz')
ZIP_EXTENSIONS = ('.zip', '.whl')
TAR_EXTENSIONS = ('.tar.gz', '.tgz', '.tar')
ARCHIVE_EXTENSIONS = ZIP_EXTENSIONS + BZ2_EXTENSIONS + TAR_EXTENSIONS
try:
import bz2 # noqa
SUPPORTED_EXTENSIONS = ZIP_EXTENSIONS + BZ2_EXTENSIONS + TAR_EXTENSIONS
except ImportError:
logger.debug('bz2 module is not available')
SUPPORTED_EXTENSIONS = ZIP_EXTENSIONS + TAR_EXTENSIONS
def import_or_raise(pkg_or_module_string, ExceptionType, *args, **kwargs):
try:
return __import__(pkg_or_module_string)
except ImportError:
raise ExceptionType(*args, **kwargs)
def ensure_dir(path):
"""os.path.makedirs without EEXIST."""
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def get_prog():
try:
if os.path.basename(sys.argv[0]) in ('__main__.py', '-c'):
return "%s -m pip" % sys.executable
except (AttributeError, TypeError, IndexError):
pass
return 'pip'
# Retry every half second for up to 3 seconds
@retry(stop_max_delay=3000, wait_fixed=500)
def rmtree(dir, ignore_errors=False):
shutil.rmtree(dir, ignore_errors=ignore_errors,
onerror=rmtree_errorhandler)
def rmtree_errorhandler(func, path, exc_info):
"""On Windows, the files in .svn are read-only, so when rmtree() tries to
remove them, an exception is thrown. We catch that here, remove the
read-only attribute, and hopefully continue without problems."""
# if file type currently read only
if os.stat(path).st_mode & stat.S_IREAD:
# convert to read/write
os.chmod(path, stat.S_IWRITE)
# use the original function to repeat the operation
func(path)
return
else:
raise
def display_path(path):
"""Gives the display value for a given path, making it relative to cwd
if possible."""
path = os.path.normcase(os.path.abspath(path))
if sys.version_info[0] == 2:
path = path.decode(sys.getfilesystemencoding(), 'replace')
path = path.encode(sys.getdefaultencoding(), 'replace')
if path.startswith(os.getcwd() + os.path.sep):
path = '.' + path[len(os.getcwd()):]
return path
def backup_dir(dir, ext='.bak'):
"""Figure out the name of a directory to back up the given dir to
(adding .bak, .bak2, etc)"""
n = 1
extension = ext
while os.path.exists(dir + extension):
n += 1
extension = ext + str(n)
return dir + extension
def ask_path_exists(message, options):
for action in os.environ.get('PIP_EXISTS_ACTION', '').split():
if action in options:
return action
return ask(message, options)
def ask(message, options):
"""Ask the message interactively, with the given possible responses"""
whi | le 1:
if os.environ.get('PIP_NO_INPUT'):
raise Exception(
'No input was expected ($PIP_NO_INPUT set); question: %s' %
message
)
response = input(message)
response = response.strip().lower()
if response not in options:
print(
'Your response (%r) was not one of the expected responses: '
'%s' % (response, ', '.join(options) | )
)
else:
return response
def format_size(bytes):
if bytes > 1000 * 1000:
return '%.1fMB' % (bytes / 1000.0 / 1000)
elif bytes > 10 * 1000:
return '%ikB' % (bytes / 1000)
elif bytes > 1000:
return '%.1fkB' % (bytes / 1000.0)
else:
return '%ibytes' % bytes
def is_installable_dir(path):
"""Return True if `path` is a directory containing a setup.py file."""
if not os.path.isdir(path):
return False
setup_py = os.path.join(path, 'setup.py')
if os.path.isfile(setup_py):
return True
return False
def is_svn_page(html):
"""
Returns true if the page appears to be the index page of an svn repository
"""
return (re.search(r'<title>[^<]*Revision \d+:', html) and
re.search(r'Powered by (?:<a[^>]*?>)?Subversion', html, re.I))
def file_contents(filename):
with open(filename, 'rb') as fp:
return fp.read().decode('utf-8')
def read_chunks(file, size=4096):
"""Yield pieces of data from a file-like object until EOF."""
while True:
chunk = file.read(size)
if not chunk:
break
yield chunk
def split_leading_dir(path):
path = path.lstrip('/').lstrip('\\')
if '/' in path and (('\\' in path and path.find('/') < path.find('\\')) or
'\\' not in path):
return path.split('/', 1)
elif '\\' in path:
return path.split('\\', 1)
else:
return path, ''
def has_leading_dir(paths):
"""Returns true if all the paths have the same leading path name
(i.e., everything is in one subdirectory in an archive)"""
common_prefix = None
for path in paths:
prefix, rest = split_leading_dir(path)
if not prefix:
return False
elif common_prefix is None:
common_prefix = prefix
elif prefix != common_prefix:
return False
return True
def normalize_path(path, resolve_symlinks=True):
"""
Convert a path to its canonical, case-normalized, absolute version.
"""
path = expanduser(path)
if resolve_symlinks:
path = os.path.realpath(path)
else:
path = os.path.abspath(path)
return os.path.normcase(path)
def splitext(path):
"""Like os.path.splitext, but take off .tar too"""
base, ext = posixpath.splitext(path)
if base.lower().endswith('.tar'):
ext = base[-4:] + ext
base = base[:-4]
return base, ext
def renames(old, new):
"""Like os.renames(), but handles renaming across devices."""
# Implementation borrowed from os.renames().
head, tail = os.path.split(new)
if head and tail and not os.path.exists(head):
os.makedirs(head)
shutil.move(old, new)
head, tail = os.path.split(old)
if head and tail:
try:
os.removedirs(head)
except OSError:
pass
def is_local(path):
"""
Return True if path is within sys.prefix, if we're running in a virtualenv.
If we're not in a virtualenv, all paths are considered "local."
"""
if not running_under_virtualenv():
return True
return normalize_path(path).startswith(normalize_path(sys.prefix))
def dist_is_local(dist):
"""
Return True if given Distribution object is installed locally
(i.e. within current virtualenv).
Always True if we're not in a virtualenv.
""" |
pytroll/pytroll-db | trolldb/hl_file.py | Python | gpl-3.0 | 7,220 | 0.00097 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2010, 2011, 2012, 2014, 2015.
# Author(s):
# Martin Raspaud <martin.raspaud@smhi.se>
# This file is part of pytroll.
# Pytroll is free software: you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
# Pytroll is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
# You should have received a copy of the GNU General Public License along with
# pytroll. If not, see <http://www.gnu.org/licenses/>.
import pytroll_db as db
from sqlalchemy.orm.exc import NoResultFound
from datetime import datetime
import shapely
import numpy as np
from geoalchemy2.shape import from_shape
import logging
logger = logging.getLogger(__name__)
def area_def2boundary(area_def, boundary_id, session):
"""Convert a pyresample *area_def* to a db Boundary object
"""
# check if srid is there, otherwise add it
try:
new_srs = session.query(db.SpatialRefSys).filter_by(
proj4text=area_def.proj4_string).one()
except NoResultFound:
logger.debug("Can't find srid, adding it")
# add it
from osgeo import osr
srs = osr.SpatialReference()
srs.ImportFromProj4(area_def.proj4_string)
srs.SetProjCS(area_def.proj_id)
try:
srs.SetWellKnownGeogCS(area_def.proj_dict['ellps'])
except KeyError:
pass
wkt = srs.ExportToWkt()
last_srid = session.query(db.SpatialRefSys).order_by(
db.SpatialRefSys.srid.desc()).first().srid
new_srs = db.SpatialRefSys(srid=last_srid + 1,
auth_name="smhi",
auth_srid=last_srid + 1,
srtext=wkt,
proj4text=area_def.proj4_string)
session.add(new_srs)
# create the boundary, with the right srid
# lon_bound, lat_bound = area_def.get_boundary_lonlats()
# lons = np.concatenate((lon_bound.side1[:-1],
# lon_bound.side2[:-1],
# lon_bound.side3[:-1],
# lon_bound.side4[:-1]))
# lats = np.concatenate((lat_bound.side1[:-1],
# lat_bound.side2[:-1],
# lat_bound.side3[:-1],
# lat_bound.side4[:-1]))
corners = [(area_def.area_extent[0], area_def.area_extent[1]),
(area_def.area_extent[0], area_def.area_extent[3]),
(area_def.area_extent[2], area_def.area_extent[3]),
(area_def.area_extent[2], area_def.area_extent[1])]
poly = shapely.geometry.asPolygon(corners)
wkb = from_shape(poly, srid=new_srs.srid)
return db.Boundary(boundary_id, area_def.name, wkb)
class File(object):
def __init__(self, uid, dbm, filetype=None, fileformat=None):
self.uid = uid
self.dbm = dbm
try:
self._file = dbm.session.query(db.File).\
filter(db.File.uid == self.uid).one()
except NoResultFound:
self._file = self.dbm.create_file(self.uid,
file_type_name=filetype,
file_format_name=fileformat,
creation_time=datetime.utcnow())
self.dbm.save()
def add_bound(self, area_def):
# find if the boundary is already there
try:
bound = self.dbm.session.query(db.Boundary).filter(
db.Boundary.boundary_name == area_def.name).one()
except NoResultFound:
try:
bid = self.dbm.session.query(db.Boundary).order_by(
db.Boundary.boundary_id.desc()).first().boundary_id + 1
except AttributeError:
bid = 1
bound = area_def2boundary(area_def, bid, self.dbm.session)
self.dbm.session.add(bound)
self._file.boundary.append(bound)
self.dbm.save()
def __setitem__(self, key, val):
if key == "URIs":
uris = self.dbm.session.query(db.FileURI).\
filter(db.FileURI.uid == self.uid).all()
uri_vals = [i.uri for i in uris]
# adding new uris
for uri in val:
if uri not in uri_vals:
self.dbm.create_file_uri(uid=self.uid, URI=uri)
# deleting old uris
for uri, uri_obj in zip(uri_vals, uris):
if uri not in val:
self.dbm.session.delete(uri_obj)
elif key == "format":
fileformat = self.dbm.get_file_format(val)
self._file.file_format = fileformat
elif key == "type":
filetype = self.dbm.get_file_format(val)
self._file.file_type = filetype
elif key == "area":
self.add_bound(val)
elif key == "sub_satellite_track":
value = 'LINESTRING ('
for i, item in enumerate(val):
if i == 0:
value += '%s %s' % (item[0], item[1])
else:
value += ', %s %s' % (item[0], item[1])
value += ')'
wkt_o = shapely.wkt.loads(value)
p_track = self.dbm.get_parameter('sub_satellite_track')
try:
self.dbm.session.query(db.ParameterLinestring).join(db.Parameter).filter(
db.ParameterLinestring.uid == self.uid).filter(db.Parameter.parameter_name == key).one().data_value
except NoResultFound:
self.dbm.create_parameter_linestring(wkt_o,
uid=self.uid,
| parameter=p | _track)
else:
try:
self.dbm.session.query(db.ParameterValue).join(db.Parameter).filter(
db.ParameterValue.uid == self.uid).filter(db.Parameter.parameter_name == key).one().data_value
except NoResultFound:
self.dbm.create_parameter_value(uid=self.uid,
parameter_name=key,
data_value=val,
creation_time=datetime.utcnow())
self.dbm.save()
def __getitem__(self, key):
if key == "URIs":
return [i.uri for i in self.dbm.session.query(db.FileURI).filter(db.FileURI.uid == self.uid)]
elif key == "type":
return self._file.file_type.file_type_name
elif key == "format":
return self._file.file_format.file_format_name
else:
return self.dbm.session.query(db.ParameterValue).join(db.Parameter).filter(db.ParameterValue.uid == self.uid).filter(db.Parameter.parameter_name == key).one().data_value
|
CodigoSur/cyclope | cyclope/apps/feeds/migrations/0001_initial.py | Python | gpl-3.0 | 8,615 | 0.008358 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Feed'
db.create_table('feeds_feed', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=250, db_index=True)),
('slug', self.gf('autoslug.fields.AutoSlugField')(unique=True, max_length=50, populate_from=None, unique_with=(), db_index=True)),
('published', self.gf('django.db.models.fields.BooleanField')(default=True)),
('creation_date', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime(2012, 2, 23, 17, 40, 5, 907597))),
('modification_date', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime(2012, 2, 23, 17, 40, 5, 907653), auto_now=True, blank=True)),
('allow_comments', self.gf('django.db.models.fields.CharField')(default='SITE', max_length=4)),
('url', self.gf('django.db.models.fields.URLField')(unique=True, max_length=250, db_index=True)),
('summary', self.gf('django.db.models.fields.TextField')(blank=True)),
))
db.send_create_signal('feeds', ['Feed'])
def backwards(self, orm):
# Deleting model 'Feed'
db.delete_table('feeds_feed')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'comments.comment': {
'Meta': {'ordering': "('submit_date',)", 'object_name': 'Comment', 'db_table': "'django_comments'"},
'comment': ('django.db.models.fields.TextField', [], {'max_length': '3000'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'content_type_set_for_comment'", 'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.IPAddressField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_removed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'object_pk': ('django.db.models.fields.TextField', [], {}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'submit_date': ('django.db.models.fields.DateTimeField', [], {'default': 'None'}), |
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'comment_comments'", 'null': 'True', 'to': "orm['auth.User']"}),
'user_email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'user_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'user_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank' | : 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'cyclope.relatedcontent': {
'Meta': {'ordering': "['order']", 'object_name': 'RelatedContent'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'other_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'other_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'related_contents_rt'", 'to': "orm['contenttypes.ContentType']"}),
'self_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'self_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'related_contents_lt'", 'to': "orm['contenttypes.ContentType']"})
},
'feeds.feed': {
'Meta': {'object_name': 'Feed'},
'allow_comments': ('django.db.models.fields.CharField', [], {'default': "'SITE'", 'max_length': '4'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 2, 23, 17, 40, 5, 907597)'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modification_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 2, 23, 17, 40, 5, 907653)', 'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '250', 'db_index': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique': 'True', 'max_length': '50', 'populate_from': 'None', 'unique_with': '()', 'db_index': 'True'}),
'summary': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '250', 'db_index': 'True'})
|
enjaz/enjaz | niqati/migrations/0002_add_categories.py | Python | agpl-3.0 | 1,163 | 0.003484 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
def add_categories(apps, schema_editor):
Category = apps.get_model('niqati', 'Category')
# We are going to add categories only if none already exists.
if not Category.objects.exists():
Category.objects.create(label="Idea",
ar_label="فكرة",
po | ints=3)
Category.objects.create(label="Organizer",
ar_label="تنظي | م",
points=2)
Category.objects.create(label="Participation",
ar_label="مشاركة",
points=1)
def remove_categories(apps, schema_editor):
Category = apps.get_model('niqati', 'Category')
Category.objects.filter(label__in=["Idea", "Organizer", "Participation"]).delete()
class Migration(migrations.Migration):
dependencies = [
('niqati', '0001_initial'),
]
operations = [
migrations.RunPython(
add_categories,
reverse_code=remove_categories),
]
|
xiaoxq/apollo | modules/tools/restore_video_record/restore_video_record.py | Python | apache-2.0 | 9,788 | 0.002043 | #!/usr/bin/env python3
###############################################################################
# Copyright 2019 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
""" Restore record file by replacing its video frames with image frames. """
import datetime
import errno
import glob
import os
import shutil
import time
from absl import app
from absl import flags
from absl import logging
import cv2
from cyber.python.cyber_py3.record import RecordReader, RecordWriter
from modules.drivers.proto.sensor_image_pb2 import CompressedImage
flags.DEFINE_string('from_record', None, 'The source record file that needs to be restored.')
flags.DEFINE_string('to_record', None, 'The restored record file.')
# The compressed channels that have videos we need to decode
IMAGE_FRONT_6MM_CHANNEL = '/apollo/sensor/camera/front_6mm/image/compressed'
IMAGE_FRONT_12MM_CHANNEL = '/apollo/sensor/camera/front_12mm/image/compressed'
IMAGE_REAR_6MM_CHANNEL = '/apollo/sensor/camera/rear_6mm/image/compressed'
IMAGE_LEFT_FISHEYE_CHANNEL = '/apollo/sensor/camera/left_fisheye/image/compressed'
IMAGE_RIGHT_FISHEYE_CHANNEL = '/apollo/sensor/camera/right_fisheye/image/compressed'
VIDEO_FRONT_6MM_CHANNEL = '/apollo/sensor/camera/front_6mm/video/compressed'
VIDEO_FRONT_12MM_CHANNEL = '/apollo/sensor/camera/front_12mm/video/compressed'
VIDEO_REAR_6MM_CHANNEL = '/apollo/sensor/camera/rear_6mm/video/compressed'
VIDEO_LEFT_FISHEYE_CHANNEL = '/apollo/sensor/camera/left_fisheye/video/compressed'
VIDEO_RIGHT_FISHEYE_CHANNEL = '/apollo/sensor/camera/right_fisheye/video/compressed'
VIDEO_CHANNELS = [
IMAGE_FRONT_6MM_CHANNEL,
IMAGE_FRONT_12MM_CHANNEL,
IMAGE_REAR_6MM_CHANNEL,
IMAGE_LEFT_FISHEYE_CHANNEL,
IMAGE_RIGHT_FISHEYE_CHANNEL,
VIDEO_FRONT_6MM_CHANNEL,
VIDEO_FRONT_12MM_CHANNEL,
VIDEO_REAR_6MM_CHANNEL,
VIDEO_LEFT_FISHEYE_CHANNEL,
VIDEO_RIGHT_FISHEYE_CHANNEL,
]
VIDEO_IMAGE_MAP = {
IMAGE_FRONT_6MM_CHANNEL: IMAGE_FRONT_6MM_CHANNEL,
IMAGE_FRONT_12MM_CHANNEL: IMAGE_FRONT_12MM_CHANNEL,
IMAGE_REAR_6MM_CHANNEL: IMAGE_REAR_6MM_CHANNEL,
IMAGE_LEFT_FISHEYE_CHANNEL: IMAGE_LEFT_FISHEYE_CHANNEL,
IMAGE_RIGHT_FISHEYE_CHANNEL: IMAGE_RIGHT_FISHEYE_CHANNEL,
VIDEO_FRONT_6MM_CHANNEL: IMAGE_FRONT_6MM_CHANNEL,
VIDEO_FRONT_12MM_CHANNEL: IMAGE_FRONT_12MM_CHANNEL,
VIDEO_REAR_6MM_CHANNEL: IMAGE_REAR_6MM_CHANNEL,
VIDEO_LEFT_FISHEYE_CHANNEL: IMAGE_LEFT_FISHEYE_CHANNEL,
VIDEO_RIGHT_FISHEYE_CHANNEL: IMAGE_RIGHT_FISHEYE_CHANNEL,
}
class VideoConverter(object):
"""Convert video into images."""
def __init__(self, work_dir, topic):
# Initial type of video frames that defined in apollo video drive proto
# The initial frame has meta data information shared by the following tens of frames
self.initial_frame_type = 1
self.image_ids = []
self.first_initial_found = False
video_dir = os.path.join(work_dir, 'videos')
self.video_file = os.path.join(video_dir, '{}.h265'.format(topic))
self.image_dir = '{}_images'.format(self.video_file)
makedirs(video_dir)
makedirs(self.image_dir)
self.frame_writer = open(self.video_file, 'wb+')
def close_writer(self):
"""Close the video frames writer"""
self.frame_writer.close()
def write_frame(self, py_message):
"""Write video frames into binary format file"""
if not self.first_initial_found:
proto = image_message_to_proto(py_message)
if proto.frame_type != self.initial_frame_type:
return
self.first_initial_found = True
self.frame_writer.write(py_message.message)
self.image_ids.append(get_message_id(py_message.timestamp, py_message.topic))
def decode(self):
"""Decode video file into images"""
video_decoder_exe = '/apollo/bazel-bin/modules/drivers/video/tools/decode_video/video2jpg'
return_ | code = os.system('{} --input_video={} --output_dir={}'.format(
video_decoder_exe, self.video_file, self.image_dir))
if return_code != 0:
logging.error('Failed to execute video2jpg for video {}'.format(self.video_file))
return False
g | enerated_images = sorted(glob.glob('{}/*.jpg'.format(self.image_dir)))
if len(generated_images) != len(self.image_ids):
logging.error('Mismatch between original {} and generated frames {}'.format(
len(self.image_ids), len(generated_images)))
return False
for idx in range(len(generated_images)):
os.rename(generated_images[idx], os.path.join(self.image_dir, self.image_ids[idx]))
return True
def move_images(self, overall_image_dir):
"""Move self's images to overall image dir"""
for image_file in os.listdir(self.image_dir):
shutil.move(os.path.join(self.image_dir, image_file),
os.path.join(overall_image_dir, image_file))
def restore_record(input_record, output_record):
"""Entrance of processing."""
# Define working dirs that store intermediate results in the middle of processing
work_dir = 'restore_video_work_dir_{}'.format(
datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d-%H-%M-%S'))
# Decode videos
converters = {}
for topic in VIDEO_CHANNELS:
converters[topic] = VideoConverter(work_dir, topic)
reader = RecordReader(input_record)
for message in reader.read_messages():
if message.topic in VIDEO_CHANNELS:
converters[message.topic].write_frame(message)
image_dir = os.path.join(work_dir, 'images')
makedirs(image_dir)
for topic in VIDEO_CHANNELS:
converters[topic].close_writer()
converters[topic].decode()
converters[topic].move_images(image_dir)
# Restore target record file
writer = RecordWriter(0, 0)
writer.open(output_record)
topic_descs = {}
counter = 0
reader = RecordReader(input_record)
for message in reader.read_messages():
message_content = message.message
message_topic = message.topic
if message.topic in VIDEO_CHANNELS:
message_content = retrieve_image(image_dir, message)
message_topic = VIDEO_IMAGE_MAP[message.topic]
if not message_content:
continue
counter += 1
if counter % 1000 == 0:
logging.info('rewriting {} th message to record {}'.format(counter, output_record))
writer.write_message(message_topic, message_content, message.timestamp)
if message_topic not in topic_descs:
topic_descs[message_topic] = reader.get_protodesc(message_topic)
writer.write_channel(message_topic, message.data_type, topic_descs[message_topic])
writer.close()
logging.info('All Done, converted record: {}'.format(output_record))
def retrieve_image(image_dir, message):
"""Actually change the content of message from video bytes to image bytes"""
message_id = get_message_id(message.timestamp, message.topic)
message_path = os.path.join(image_dir, message_id)
if not os.path.exists(message_path):
logging.error('message {} not found in image dir'.format(message_id))
return None
img_bin = cv2.imread(message_path)
# Check by using NoneType explicitly to avoid ambitiousness
if img_bin is None:
logging.error('failed to read original message: {}'.format(message_path))
return None
encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), 95]
|
digwanderlust/pants | contrib/cpp/src/python/pants/contrib/cpp/tasks/cpp_compile.py | Python | apache-2.0 | 3,678 | 0.006797 | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from pants.base.build_environment import get_buildroot
from pants.base.workunit import WorkUnit
from pants.util.dirutil import safe_mkdir
from pants.contrib.cpp.tasks.cpp_task import CppTask
class CppCompile(CppTask):
"""Compiles object files from C++ sources."""
@classmethod
def register_options(cls, register):
super(CppCompile, cls).register_options(register)
register('--cc-options',
help='Append these options to the compiler command line.')
register('--cc-extensions',
default=['cc', 'cxx', 'cpp'],
help=('The list of extensions (without the .) to consider when '
'determining if a file is a C++ source file.'))
@classmethod
def product_types(cls):
return ['objs']
@property
def cache_target_dirs(self):
return True
def execute(s | elf):
"""Compile all sources in a given target to object files."""
def is_cc(source):
_, ext = os.path.splitext(source)
return ext[1:] in self.get_options().cc_exten | sions
targets = self.context.targets(self.is_cpp)
# Compile source files to objects.
with self.invalidated(targets, invalidate_dependents=True) as invalidation_check:
obj_mapping = self.context.products.get('objs')
for vt in invalidation_check.all_vts:
for source in vt.target.sources_relative_to_buildroot():
if is_cc(source):
if not vt.valid:
with self.context.new_workunit(name='cpp-compile', labels=[WorkUnit.MULTITOOL]):
# TODO: Parallelise the compilation.
# TODO: Only recompile source files that have changed since the
# object file was last written. Also use the output from
# gcc -M to track dependencies on headers.
self._compile(vt.target, vt.results_dir, source)
objpath = self._objpath(vt.target, vt.results_dir, source)
obj_mapping.add(vt.target, vt.results_dir).append(objpath)
def _objpath(self, target, results_dir, source):
abs_source_root = os.path.join(get_buildroot(), target.target_base)
abs_source = os.path.join(get_buildroot(), source)
rel_source = os.path.relpath(abs_source, abs_source_root)
root, _ = os.path.splitext(rel_source)
obj_name = root + '.o'
return os.path.join(results_dir, obj_name)
def _compile(self, target, results_dir, source):
"""Compile given source to an object file."""
obj = self._objpath(target, results_dir, source)
abs_source = os.path.join(get_buildroot(), source)
# TODO: include dir should include dependent work dir when headers are copied there.
include_dirs = []
for dep in target.dependencies:
if self.is_library(dep):
include_dirs.extend([os.path.join(get_buildroot(), dep.target_base)])
cmd = [self.cpp_toolchain.compiler]
cmd.extend(['-c'])
cmd.extend(('-I{0}'.format(i) for i in include_dirs))
cmd.extend(['-o' + obj, abs_source])
if self.get_options().cc_options != None:
cmd.extend([self.get_options().cc_options])
# TODO: submit_async_work with self.run_command, [(cmd)] as a Work object.
with self.context.new_workunit(name='cpp-compile', labels=[WorkUnit.COMPILER]) as workunit:
self.run_command(cmd, workunit)
self.context.log.info('Built c++ object: {0}'.format(obj))
|
maxrbc/daedalus | scripts/make_features_quatification_table.py | Python | bsd-3-clause | 1,805 | 0.060942 | #!/usr/bin/env python
from __future__ import division
import argparse
import os
import shutil as sh
import daedalus.lib.utils as du
#make_quantification_table
def parse_argument():
parser = argparse.ArgumentParser()
parser.add_argument("-i",help = "",required = True , dest = "features_path" , )
pars | er.add_argument("-n",help = "total number of features from file to read",default = 99999, dest = "features" , type = int)
parser.add_argument("--accuracy",help = "will stop reading features at this 'accuracy score'",default = 2,dest = "accuracy" , type = str)
parser.add_argument("-o",help | = "The filename to where the results will be written to. ", default = ".", dest = "working_path", type = str)
parser.add_argument("-f",help = "force override of the output files if they exist", action='store_true' , dest = "override")
parser.add_argument("-v",help = " To run verbose form. Useful for debuging ", action= "store_true",dest = "verbose")
opt = parser.parse_args()
return opt
def main():
opt = parse_argument()
#making workspace for the script to work on
if not os.path.exists(opt.working_path):
os.mkdir(opt.working_path)
elif opt.override:
sh.rmtree(opt.working_path)
os.mkdir(opt.working_path)
else:
raise OSError(" File already exist!")
if os.path.exists(opt.features_path):
du.quantify_occurences_through_table(
opt.features_path,
os.listdir(opt.features_path),
opt.accuracy,
opt.features,
opt.working_path
)
elif type([]) is type(opt.features_path.split(",")):
du.quantify_occurences_through_table(
".",
opt.features_path.split(","),
opt.accuracy,
opt.features,
opt.working_path
)
else:
raise OSError( " Dude! you are not giving me a list of files or a folder path!")
if __name__ == "__main__":
main()
|
gollum23/oidoypaz | web/context_processors.py | Python | gpl-2.0 | 369 | 0 | # -*- coding: utf-8 -*-
| from .models import Slider, Category, | SoundLibrary
def slider(request):
slides = Slider.objects.all()
return {'slider': slides}
def category(request):
categories = Category.objects.all()
return {'category_menu': categories}
def fonoteca(request):
library = SoundLibrary.objects.all()
return {'library': library}
|
su27/qcloud_cos_py3 | docs/source/conf.py | Python | mit | 9,669 | 0.006205 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Qcloud COS SDK for Python 3 documentation build configuration file, created by
# cookiecutter pipproject
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import sphinx_rtd_theme
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Qcloud COS SDK for Python 3'
copyright = '2016, Dan Su'
author = 'Dan Su'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1.0'
# The full version, including alpha/beta/rc tags.
release = '0.1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#html_title = 'Qcloud COS SDK for Python 3 v0.1.0'
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# | html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document name | s to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr', 'zh'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Qcloud COS SDK for Python 3doc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Qcloud COS SDK for Python 3.tex', 'Qcloud COS SDK for Python 3 Documentation',
'Dan Su', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addre |
aerler/WRF-Projects | src/projects/GreatLakes/GRW_settings.py | Python | gpl-3.0 | 33,466 | 0.019841 | '''
Created on Sep 4, 2016
This module contains a meta data for HGS simulations for the GRW and a wrapper to load them.
@author: Andre R. Erler, GPL v3
'''
import numpy as np
from collections import OrderedDict
from collections import namedtuple
# internal imports
import hgs.HGS as hgs # need to prevent name collisions here
from hgs.PGMN import getWellName
import projects.WSC_basins as wsc
from projects.GreatLakes.WRF_experiments import WRF_exps
from geodata.misc import ArgumentError, DatasetError
# imports from HGS_settings
import projects.HGS_settings as default
project_name = 'GRW'
project_prefix = 'grw_omafra'
conservation_authority = 'GRCA'
# some parameters
gage_scalefactors = 1. # default scalefactor for discharge plots
main_gage = 'Brantford' # gage station(see station_list for proper HGS/WSC names)
main_basin = 'GRW' # main basin name
main_grid = 'grw2' # climate data grid
binary_grid = 'grw3' # grid used to interpolated binary output
# project folders
project_folder = '{:s}/{:s}/'.format(hgs.root_folder,project_name) # the dataset root folder
project_folder_pattern = '{PROJECT_FOLDER:s}/{GRID:s}/{EXPERIMENT:s}/{CLIM_DIR:s}/{TASK:s}/'
station_file_v1 = '{PREFIX:s}o.hydrograph.Station_{STATION:s}.dat' # general HGS naming convention
station_file_v2 = '{PREFIX:s}o.hydrograph.{WSC_ID0:s}.dat' # general HGS naming convention
# mapping of WSC station names to HGS hydrograph names
Station = namedtuple('Station', ('HGS','WSC','ylim'),)
station_list = OrderedDict() # this is the main gage of the GRW
station_list['Grand River at Brantford'] = Station(HGS='Station_GR_Brantford',WSC='Grand River_Brantford',ylim=150)
station_list['Nith River at Canning'] = Station(HGS='Station_Nith_River_near_Canning_(moved_upstrea',WSC='Nith River_Canning',ylim=35 )
station_list['Grand River at Marsville'] = Station(HGS='Station_GR_Marsville_(near_it)',WSC='Grand River_Marsville',ylim=30)
station_list['Conestogo River at Glen Allan'] = Station(HGS='Station_Conestogo_River_at_Glen_Allan',WSC='Conestogo River_Glen Allan',ylim=20)
station_list['Speed River at Guelph'] = Station(HGS='Station_Speed_River_near_Guelph(moved_North)',WSC='Speed River_Guelph',ylim=20)
station_list['Whiteman\'s Cr. at Mt. Vernon'] = Station(HGS='Station_Station_Whitemans_Creek_near_Mt_Vernon',WSC='Whitemans Creek_Mount Vernon',ylim=12)
station_list['Fairchild River at Brantford'] = Station(HGS='Station_Fairchild_Creek_near_Brantford',WSC='Fairchild Creek_Brantford',ylim=12 )
# look-up tables for WSC/HGS station name conversion
WSC_station_list = {stn.WSC:stn.HGS for stn in list(station_list.values())}
HGS_station_list = {stn.HGS:stn.WSC for stn in list(station_list.values())}
# short names for gages in this basin and their HGS/WSC names
station_list_etal = dict(**station_list) # not ordered
station_list_etal['Brantford'] = station_list['Grand River at Brantford'] # alias
# list of groundwater observation wells
grca_wells = ['W0000023_1', 'W0000024_2', 'W0000024_4', 'W0000046_1',
'W0000065_4', 'W0000306_1', 'W0000307_1', 'W0000309_2', 'W0000309_3',
'W0000347_2', 'W0000347_3', 'W0000421_1', 'W0000423_1', 'W0000424_1',
'W0000425_1', 'W0000427_1', 'W0000428_1', 'W0000476_1', ]
sorted_grca_wells = ['W0000347-2','W0000307-1','W0000306-1','W0000347-3','W0000421-1','W0000046-1',
'W0000065-4','W0000427-1','W0000024-2','W0000023-1','W0000309-2','W0000024-4',
'W0000423-1','W0000428-1','W0000476-1','W0000309-3','W0000425-1','W0000424-1',]
# N.B.: 'W0000035_5' and 'W0000426_1' are missing in the GRCA dataset
other_wells = ['W0000003_1', 'W0000022_1', 'W0000178_1', 'W0000477_1', 'W0000478_1',]
# plotting parameters for HGS simulations
hgs_plotargs = dict()
# stream observations
hgs_plotargs['Observations'] = dict(color='#959595') # gray
hgs_plotargs['Obs.'] = dict(color='#959595') # gray
hgs_plotargs['WSC Obs.'] = dict(color='#959595') # gray
hgs_plotargs['WSC'] = dict(color='#959595') # gray
# NRCan forcing, different model versions
hgs_plotargs['HGS (V1)'] = dict(color='green') #, linewidth=3)
hgs_plotargs['NRCan'] = dict(color='green')
hgs_plotargs['NRCan, L21'] = dict(color='green')
hgs_plotargs['NRCan (V1)'] = dict(color='gray', linestyle='--')
hgs_plotargs['NRCan (V2)'] = dict(color='gray')
hgs_plotargs['NRCan (V2k)'] = dict(color='#AAA2D8') # purple
hgs_plotargs['NRCan (V2f)'] = dict(color='red') # red
hgs_plotargs['NRCan (V3f)'] = dict(color='magenta')
hgs_plotargs['NRCan (V3s)'] = dict(color='black')
hgs_plotargs['NRCan (V3w)'] = dict(color='green')
hgs_plotargs['NRCan (V3m2)'] = dict(color='blue')
hgs_plotargs['NRCan (V3m3)'] = dict(color='purple')
hgs_plotargs['NRCan (V3m4)'] = dict(color='red')
hgs_plotargs['NRCan (V3m5)'] = dict(color='green')
hgs_plotargs['NRCan (V3)'] = dict(color='green')
hgs_plotargs['V3 (Prairies)'] = dict(color='#AAA2D8')
hgs_plotargs['V3 (Maritime)'] = dict(color='#62A1C6')
hgs_plotargs['V3 (Ephemeral)'] = dict(color='#E24B34')
hgs_plotargs['NRCan (Prairies)'] = dict(color='cyan')
hgs_plotargs['NRCan (Ephemeral)'] = dict(color='coral')
hgs_plotargs['NRCan (hires)'] = dict(color='magenta')
# SnoDAS
hgs_plotargs['SnoDAS'] = dict(color='#62A1C6') # blue
hgs_plotargs['RF-BC'] = dict(color='#E24B34') # red
# Landuse scenarios
hgs_plotargs['GRCA'] = dict(color='green')
hgs_plotargs['LU 2000'] = dict(color='#62A1C6')
hgs_plotargs['LU 2055'] = dict(color='#AAA2D8')
hgs_plotargs['LU 2095'] = dict(color='#E24B34')
# Temporal Aggregation
hgs_plotargs['Steady-State (V1)'] = dict(color='#AAA2D8') # purple
hgs_plotargs['Periodic (V1)'] = dict(color='green') #
hgs_plotargs['Steady-State (V2)'] = dict(color='#E24B34') # red
hgs_plotargs['Periodic (V2)'] = dict(color='coral')
hgs_plotargs['Transient'] = dict(color='#62A1C6') # blue
hgs_plotargs['Steady-State'] = dict(color='#AAA2D8') # purple
hgs_plotargs['Periodic'] = dict(color='#E24B34') # red
hgs_plotargs['Normals'] = dict(color='green') #
hgs_plotargs['Monthly'] = dict(color='#62A1C6') # blue
hgs_plotargs['Daily'] = dict(color='#E24B34') # red
# WRF forcing
hgs_plotargs['WRF 10km'] = dict(color='#62A1C6') # blue
hgs_plotargs['WRF 30km'] = dict(color='#E24B34') # red
hgs_plotargs['WRF 90km'] = dict(color='#AAA2D8') # purple
hgs_plotargs['WRF (AABC)'] = dict(color='#62A1C6') # blue
hgs_plotargs['WRF (Delta)'] = dict(color='#E24B34') # red
# hgs_plotargs['WRF G 10km'] = dict(color='#62A1C6') # blue
# hgs_plotargs['WRF G 30km'] = dict(color='#E24B34') # red
# hgs_plotargs['WRF G 90km'] = dict(color='#AAA2D8') # purple
# hgs_plotargs['WRF T 10km'] = dict(color='#62A1C6') # blue
# hgs_plotargs['WRF T 30km'] = dict(color='#E24B34') # red
# hgs_plotargs['WRF T 90km'] = dict(color='#AAA2D8') # purple
hgs_plotargs['WRF T 10km'] = dict(color='#E24B34') # red
hgs_plotargs['WRF G 10km'] = dict(color='#62A1C6') # blue
hgs_plotargs['WRF T 30km'] = dict(color='#E24B34') # red
hgs_plotargs['WRF G 30km'] = dict(color='#62A1C6') # blue
hgs_plotargs['WRF T 90km'] = dict(color='#E24B34') # red
hgs_plotargs['WRF G 90km'] = dict(color='#62A1C6') # blue
hgs_plo | targs['WRF T'] = dict(color='#E24B34') # red
hgs_plotargs['WRF G'] = dict(color='#62A1C6') # blue
hgs_plotargs['T Ensemble'] = dict(color='#E24B34') # red
hgs_plotargs['G Ensemble'] = | dict(color='#62A1C6') # blue
hgs_plotargs['ERAI-T'] = dict(color='#E24B34') # red
hgs_plotargs['ERAI-G'] = dict(color='#62A1C6') # blue
hgs_plotargs['T Mean'] = dict(color='red') # red
hgs_plotargs['G Mean'] = dict(color='black') # blue |
trawick/edurepo | src/edurepo/edurepo/settings.py | Python | apache-2.0 | 6,323 | 0.000949 | """
Django settings for edurepo project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
import logging
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
import stat
import ConfigParser
config = ConfigParser.ConfigParser()
config.readfp(open('settings.cfg'))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = config.get('secret', 'key')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = config.get('debugging', 'DEBUG') == 'True'
ALLOWED_HOSTS = [x for x in config.get('deployment', 'ALLOWED_HOSTS') if x != '']
have_google_oauth2 = config.get('auth', 'have_google_oauth2') == 'True'
if have_google_oauth2:
SOCIAL_AUTH_GOOGLE_OAUTH2_KEY = config.get('auth', 'OAUTH2_KEY')
SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET = config.get('auth', 'OAUTH2_SECRET')
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'social_django',
'bootstrap3',
'django_nose',
'repo',
'resources',
'teachers',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'social_django.middleware.SocialAuthExceptionMiddleware',
)
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.PickleSerializer'
TEMPLATES = [
{
'BACKE | ND': 'django.template.backend | s.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'social_django.context_processors.backends',
'social_django.context_processors.login_redirect',
'django.contrib.auth.context_processors.auth',
],
'debug': config.get('debugging', 'TEMPLATE_DEBUG') == 'True',
},
},
]
AUTHENTICATION_BACKENDS = (
'social_core.backends.google.GoogleOAuth2',
'django.contrib.auth.backends.ModelBackend',
)
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
ROOT_URLCONF = 'edurepo.urls'
WSGI_APPLICATION = 'edurepo.wsgi.application'
MOUNTED_AT = config.get('deployment', 'MOUNTED_AT')
LOGIN_URL = MOUNTED_AT + '/login/google/'
SOCIAL_AUTH_LOGIN_URL = MOUNTED_AT + '/login/'
# SOCIAL_AUTH_LOGIN_REDIRECT_URL = <TBD>
# SOCIAL_AUTH_LOGIN_ERROR_URL = <TBD>
SOCIAL_AUTH_STRATEGY = 'social_django.strategy.DjangoStrategy'
SOCIAL_AUTH_STORAGE = 'social_django.models.DjangoStorage'
SOCIAL_AUTH_USERNAME_IS_FULL_EMAIL = False
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': config.get('database', 'NAME'),
'USER': config.get('database', 'USER'),
'PASSWORD': config.get('database', 'PASSWORD'),
'HOST': 'localhost'
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
TASTYPIE_DEFAULT_FORMATS = ['json']
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
if config.get('deployment', 'set_static_root') == 'True':
STATIC_ROOT = config.get('deployment', 'STATIC_ROOT')
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static"),
)
LOG_DIRECTORY = config.get('logging', 'DIRECTORY')
GLOBAL_LOG_LEVEL = config.get('logging', 'GLOBAL_LEVEL')
def group_writable_file_handler(filename, mode='a', encoding=None):
if not os.path.exists(filename):
# We can only make it group writable if we are the owner.
# We're always the owner if we create it, and that path
# should be sufficient.
open(filename, 'a').close()
os_mode = os.stat(filename).st_mode
os.chmod(filename, os_mode | stat.S_IWGRP)
return logging.FileHandler(filename, mode, encoding)
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
# what do the log records look like?
'formatters': {
'verbose': {
'format' : "[%(asctime)s] %(levelname)s [%(name)s:%(lineno)s] %(message)s",
'datefmt' : "%d/%b/%Y %H:%M:%S"
},
},
# what can we do with the log records?
'handlers': {
'file': {
'()': group_writable_file_handler,
'level': 'DEBUG',
'formatter': 'verbose',
'filename': os.path.join(LOG_DIRECTORY, 'edjective.log'),
'mode': 'a',
'encoding': 'utf-8'
}
},
'loggers': {
# catch-all logger:
'django': {
'handlers': ['file'],
'propagate': True,
'level': 'INFO',
},
'django.request': {
'handlers': ['file'],
'level': 'DEBUG',
'propagate': True,
},
'': {
'handlers': ['file'],
'level': GLOBAL_LOG_LEVEL,
}
}
}
# Set SNI_API_BREAKAGE to True iff all of the following are true:
# . The app is hosted on a system that relies on SNI to support SSL, on port
# 443.
# . Non-SSL is also supported, on port 80.
# . Some interesting clients, including Python requests running in the app,
# don't work with SNI for whatever reason.
# (If they aren't all true, it may still be broken, but there's no workaround
# in the code.)
SNI_API_BREAKAGE = True
|
tinybike/grapple | grapple/grapple.py | Python | mit | 24,929 | 0.002166 | #!/usr/bin/env python
"""Ripple ledger extractor.
Grapple extracts the ledger from rippled via websocket. It starts at the
current ledger index, and walks backwards until it reaches the genesis ledger.
The genesis ledger index is set by default to 152370.
If you have previously run Grapple, data will only be collected from the
current ledger to the maximum ledger index previously recorded. Just set
the "full" flag if you prefer to re-download the entire ledger.
Data can be collected from a local or a remote rippled instance. If you have
a local rippled instance running that has downloaded all or most of the ledger,
I strongly recommend doing local data collection. Fetching data from Ripple's
public websocket is very slow!
Grapple also resamples each currency pair's price time series to create
"Open-Hi-Lo-Close" time series. These resampled datasets can be useful for
statistical tests, technical market analysis, or simply for drawing charts.
Grapple is designed to integrate with PostgreSQL, using connection information
in config.py. By default, it assumes that your database is located on
localhost (127.0.0.1), and that your database's name, password, username and
are all "grapple".
(While this is certainly not a secure setup, it may be convenient for people
who install Grapple via pip, and do not wish to edit its source code.)
Usage as a Python module:
from grapple import Grapple
grapple = Grapple()
grapple.download()
The Grapple constructor accepts the following keyword arguments:
socket_url (str):
rippled websocket URL (default="ws://127.0.0.1:6006/")
full (bool):
True if downloading the full ledger (starting from the current ledger
and walking back to the genesis ledger). False if the download should
stop at the last current ledger (i.e., the last time grapple was run).
(default=True)
genesis (int):
Genesis ledger index and download halting point. (default=152370)
quiet (bool):
If True, suppress console output. (default=True)
resampling_frequencies (tuple):
Resampling frequencies, using pandas frequency codes. If None, then
resampling is disabled. (default=('D',) or daily)
Usage as a script:
python grapple.py [-flags]
Optional flags:
-w, --websocket [websocket url]:
Specify the rippled websocket url. (default=ws://127.0.0.1:6006/)
-p, --public:
Use Ripple Labs' public websocket, wss://s1.ripple.com:51233.
-f, --full:
Download the full Ripple ledger. Automatic on first run.
-g, --genesis [ledger index]:
Genesis ledger index and download halting point.
-q, --quiet:
Suppress command line output.
"""
from __future__ import division, print_function, unicode_literals, absolute_import
import sys
try:
import cdecimal
sys.modules["decimal"] = cdecimal
except:
pass
import os
import getopt
import json
import websocket
from decimal import Decimal, getcontext, ROUND_HALF_EVEN
from contextlib import contextmanager
import pandas a | s pd
import pandas.io.sql as psql
import numpy as np
import psycopg2 as db
import psycopg2.extensions as ext
# Python 3 compatibility
from six.moves import xrange as range
_IS_PYTHON_3 = sys.version_info[0] == 3
identity = lambda x : x |
if _IS_PYTHON_3:
u = identity
else:
import codecs
def u(string):
return codecs.unicode_escape_decode(string)[0]
from config import *
getcontext().rounding = ROUND_HALF_EVEN
# Postgres connection
if not os.environ.get("CONTINUOUS_INTEGRATION"):
conn = db.connect(POSTGRES_CONNECTION_STRING)
conn.set_isolation_level(ext.ISOLATION_LEVEL_READ_COMMITTED)
class Grapple(object):
def __init__(self, socket_url="ws://127.0.0.1:6006/", full=False,
genesis=152370, quiet=True, resampling_frequencies=('D',)):
"""
Args:
socket_url (str): rippled websocket URL (default="ws://127.0.0.1:6006/")
full (bool): True if downloading the full ledger (starting from the
current ledger and walking back to the genesis ledger).
False if the download should stop at the last current
ledger (the last time grapple was run). (default=True)
genesis (int): Genesis block index. If full=True, this is where the
download ends; otherwise, this value is ignored.
(default=152370)
quiet (bool): If True, suppress console output. (default=True)
resampling_frequencies (tuple): Resampling frequencies, using pandas
frequency codes. If None, then
resampling is disabled.
(default=('D',) or daily)
"""
self.full = full
self.socket_url = socket_url
self.start_date = None
self.halt = genesis
self.socket = None
self.ledger_current_index = None
self.ledgers_to_read = None
self.updates = 0
self.markets = []
self.quiet = quiet
self.resampling_frequencies = resampling_frequencies
def get_current_index(self, retry=False):
try:
if self.socket is not None:
self.socket.send(json.dumps({'command': 'ledger_current'}))
data = json.loads(self.socket.recv())
if data and data['status'] == 'success':
if 'result' in data and 'ledger_current_index' in data['result']:
self.ledger_current_index = data['result']['ledger_current_index']
if not self.quiet:
print("Current ledger index:", self.ledger_current_index)
else:
self.get_current_index(retry=True)
except Exception as e:
if not self.quiet:
print(e)
if retry:
return
self.get_current_index(retry=True)
def get_tx(self, tx_hash, data):
try:
if self.socket is not None:
self.socket.send(json.dumps({
'command': 'tx',
'transaction': tx_hash,
}))
tx_data = self.socket.recv()
tx_data = json.loads(tx_data)
if tx_data['status'] == 'success' and 'result' in tx_data:
options = {
'ledger_time': data['result']['ledger']['close_time'],
'tx_hash': tx_hash,
}
return tx_data['result'], options
except Exception as exc:
if not self.quiet:
print(exc)
return False, False
def parse_tx(self, tx, accepted, ledger_time=None, tx_hash=None):
stored_tx_count = 0
if tx['TransactionType'] == 'Payment' and 'meta' in tx and tx['meta']['TransactionResult'] == 'tesSUCCESS':
for affected_node in tx['meta']['AffectedNodes']:
if 'ModifiedNode' in affected_node:
node = affected_node['ModifiedNode']
elif 'DeletedNode' in affected_node:
node = affected_node['DeletedNode']
else:
continue
is_offer = node['LedgerEntryType'] == 'Offer'
has_prev = 'PreviousFields' in node
if has_prev:
has_pays = 'TakerPays' in node['PreviousFields']
has_gets = 'TakerGets' in node['PreviousFields']
if is_offer and has_prev and has_pays and has_gets:
previous = node['PreviousFields']
final = node['FinalFields']
adjust_xrp = 10**6
if 'currency' in final['TakerGets']:
gets = {
'currency': final['TakerGets']['currency'],
'amount': Decimal(previous['TakerGets']['value']) - Decimal(final['TakerGets']['value']),
'issuer' |
factorlibre/stock-logistics-warehouse | stock_available/__openerp__.py | Python | agpl-3.0 | 483 | 0 | # -*- coding: utf-8 -*-
# © 2014 Numérigraphe SARL
# License AGPL-3.0 or later (ht | tp://www.gnu.org/licenses/agpl.html).
{
'name': 'Stock available to promise',
'version': '8.0.3.1.0',
"author": u"Numérigraphe,Odoo Community Association (OCA)",
'category': 'Warehouse',
'depends': ['stock'],
'license': 'AGPL-3',
'data': [
| 'views/product_template_view.xml',
'views/product_product_view.xml',
'views/res_config_view.xml',
]
}
|
Microvellum/Fluid-Designer | win64-vc/2.78/Python/bin/2.78/scripts/addons/rigify/rigs/pitchipoy/limbs/arm.py | Python | gpl-3.0 | 3,863 | 0.017085 | #====================== BEGIN GPL LICENSE BLOCK ======================
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#======================= END GPL LICENSE BLOCK ========================
# <pep8 compliant>
import bpy
from ....utils import MetarigError
from ....utils import create_widget, copy_bone
from ....utils import strip_org
from .limb_utils import *
from ..super_widgets import create_hand_widget
from rna_prop_ui import rna_idprop_ui_prop_get
def create_arm( cls, bones ):
org_bones = cls.org_bones
bpy.ops.object.mode_set(mode='EDIT')
eb = cls.obj.data.edit_bones
ctrl = get_bone_name( org_bones[2], 'ctrl', 'ik' )
# Create IK arm control
ctrl = copy_bone( cls.obj, org_bones[2], ctrl )
# clear parent (so that rigify will parent to root)
eb[ ctrl ].parent = None
eb[ ctrl ].use_connect = False
# Parent
eb[ bones['ik']['mch_target'] ].parent = eb[ ctrl ]
eb[ bones['ik']['mch_target'] ].use_connect = False
# Set up constraints
# Constrain mch target bone to the ik control and mch stretch
make_constraint( cls, bones['ik']['mch_target'], {
'constraint' : 'COPY_LOCATION',
'subtarget' : bones['ik']['mch_str'],
'head_tail' : 1.0
})
# Constrain mch ik stretch bone to the ik control
make_constraint( cls, bones['ik']['mch_str'], {
'constraint' : 'DAMPED_TRACK',
'subtarget' : ctrl,
})
make_constraint( cls, bones['ik']['mch_str'], {
'constraint' : 'STRETCH_TO',
'subtarget' : ctrl,
})
make_constraint( cls, bones['ik']['mch_str'], {
'constraint' : 'LIMIT_SCALE',
'use_min_y' : True,
'use_max_y' : True,
'max_y' : 1.05,
'owner_space' : 'LOCAL'
})
pb = cls.obj.pose.bones
# Modify rotation mode for ik and tweak controls
pb[bones['ik']['ctrl']['limb']].rotation_mode = 'ZXY'
for b in bones['tweak']['ctrl']:
pb[b].rotation_mode = 'ZXY'
# Create ik/fk switch property
pb_parent = pb[ bones['parent'] ]
pb_parent['IK_Strertch'] = 1.0
prop = rna_idprop_ui_prop_get( pb_parent, 'IK_Strertch', create=True )
prop["min"] = 0.0
prop["max"] = 1.0
prop["soft_min"] = 0.0
prop["soft_max"] = 1.0
prop["description"] = 'IK Stretch'
# Add driver to limit scale constraint influence
b = bones['ik']['mch_str']
drv = pb[b].constraints[-1].driver_add("influence").driver
drv.type = 'SUM'
var = drv.variables.new()
var.name = prop.name
var.type = "SINGLE_PROP"
var.targets[0].id = cls.obj
var.targets[0].data_path = \
pb_parent.path_from_id() + '['+ '"' + prop.name + ' | "' + ']'
drv_modifier = cls.obj.animation_data.drivers[-1].modifier | s[0]
drv_modifier.mode = 'POLYNOMIAL'
drv_modifier.poly_order = 1
drv_modifier.coefficients[0] = 1.0
drv_modifier.coefficients[1] = -1.0
# Create hand widget
create_hand_widget(cls.obj, ctrl, bone_transform_name=None)
bones['ik']['ctrl']['terminal'] = [ ctrl ]
return bones
|
lmazuel/azure-sdk-for-python | azure-mgmt-network/azure/mgmt/network/v2017_11_01/models/backend_address_pool_paged.py | Python | mit | 982 | 0.001018 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.paging import Paged
class BackendAddressPoolPaged(Paged):
"""
| A paging container for iterating over a list of :class:`BackendAddressPool <az | ure.mgmt.network.v2017_11_01.models.BackendAddressPool>` object
"""
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'current_page': {'key': 'value', 'type': '[BackendAddressPool]'}
}
def __init__(self, *args, **kwargs):
super(BackendAddressPoolPaged, self).__init__(*args, **kwargs)
|
Heufneutje/txircd | txircd/modules/rfc/pingpong.py | Python | bsd-3-clause | 5,686 | 0.033415 | from twisted.plugin import IPlugin
from twisted.words.protocols import irc
from txircd.module_interface import Command, ICommand, IModuleData, ModuleData
from txircd.utils import now
from zope.interface import implementer
from typing import Any, Callable, Dict, List, Optional, Tuple
@implementer(IPlugin, IModuleData)
class PingPong(ModuleData):
name = "PingPong"
core = True
def actions(self) -> List[Tuple[str, int, Callable]]:
return [ ("pinguser", 10, self.pingUser),
("pingserver", 10, self.pingServer) ]
def userCommands(self) -> List[Tuple[str, int, Command]]:
return [ ("PING", 1, UserPing(self.ircd)),
("PONG", 1, UserPong()) ]
def serverCommands(self) -> List[Tuple[str, int, Command]]:
return [ ("PING", 1, ServerPing(self.ircd)),
("PONG", 1, ServerPong(self.ircd)) ]
def pingUser(self, user: "IRCUser") -> None:
if "pingtime" not in user.cache or "pongtime" not in user.cache:
user.cache["pingtime"] = now()
user.cache["pongtime"] = now()
pingTime = user.cache["pingtime"]
pongTime = user.cache["pongtime"]
if pongTime < pingTime:
self.ircd.log.debug("User {user.uuid} pinged out (last pong time '{pongTime}' was less than last ping time '{pingTime}' at the next ping interval)", user=user, pongTime=pongTime, pingTime=pingTime)
user.disconnect("Ping timeout")
return
if user.idleSince > user.cache["pongtime"]:
user.cache["pingtime"] = now()
user.cache["pongtime"] = now()
return
user.sendMessage("PING", self.ircd.name, to=None, prefix=None)
user.cache["pingtime"] = now()
def pingServer(self, server: "IRCServer") -> None:
if "pingtime" not in server.cache or "pongtime" not in server.cache:
server.cache["pingtime"] = now()
server.cache["pongtime"] = now()
pingTime = server.cache["pingtime"]
pongTime = server.cache["pongtime"]
if pongTime < pingTime:
self.ircd.log.debug("Ser | ver {server.serverID} pinged out (last pong time '{pongTime}' was less than last ping time '{pingTime}' at the next ping interval)", server=server, pongTime=pongTime, pingTime=pingTime)
server.disconnect("Ping timeout")
return
server.sendMessage("PING", self.ircd.serverID, server.serverID, prefix=self.ircd.serverID)
server.cache["pingtime"] = now( | )
@implementer(ICommand)
class UserPing(Command):
resetsIdleTime = False
forRegistered = None
def __init__(self, ircd):
self.ircd = ircd
def parseParams(self, user: "IRCUser", params: List[str], prefix: str, tags: Dict[str, Optional[str]]) -> Optional[Dict[Any, Any]]:
if not params:
user.sendSingleError("PingCmd", irc.ERR_NEEDMOREPARAMS, "PING", "Not enough parameters")
return None
return {
"data": params[0]
}
def execute(self, user: "IRCUser", data: Dict[Any, Any]) -> bool:
user.sendMessage("PONG", data["data"], to=self.ircd.name)
return True
@implementer(ICommand)
class UserPong(Command):
resetsIdleTime = False
forRegistered = None
def parseParams(self, user: "IRCUser", params: List[str], prefix: str, tags: Dict[str, Optional[str]]) -> Optional[Dict[Any, Any]]:
if not params:
user.sendSingleError("PongCmd", irc.ERR_NEEDMOREPARAMS, "PONG", "Not enough parameters")
return None
return {
"data": params[0]
}
def execute(self, user: "IRCUser", data: Dict[Any, Any]) -> bool:
user.cache["pongtime"] = now()
return True
@implementer(ICommand)
class ServerPing(Command):
forRegistered = None
def __init__(self, ircd):
self.ircd = ircd
def parseParams(self, server: "IRCServer", params: List[str], prefix: str, tags: Dict[str, Optional[str]]) -> Optional[Dict[Any, Any]]:
if len(params) != 2:
return None
if params[0] != server.serverID and params[0] not in self.ircd.servers:
if params[0] in self.ircd.recentlyQuitServers:
return {
"lostserver": True
}
return None
if params[1] != self.ircd.serverID and params[1] not in self.ircd.servers:
if params[1] in self.ircd.recentlyQuitServers:
return {
"lostserver": True
}
return None
return {
"prefix": prefix,
"source": params[0],
"dest": params[1]
}
def execute(self, server: "IRCServer", data: Dict[Any, Any]) -> bool:
if "lostserver" in data:
return True
if data["dest"] == self.ircd.serverID:
server.sendMessage("PONG", data["dest"], data["source"], prefix=data["prefix"])
return True
self.ircd.servers[data["dest"]].sendMessage("PING", data["source"], data["dest"], prefix=data["prefix"])
return True
@implementer(ICommand)
class ServerPong(Command):
forRegistered = None
def __init__(self, ircd):
self.ircd = ircd
def parseParams(self, server: "IRCServer", params: List[str], prefix: str, tags: Dict[str, Optional[str]]) -> Optional[Dict[Any, Any]]:
if len(params) != 2:
return None
if params[0] != server.serverID and params[0] not in self.ircd.servers:
if params[0] in self.ircd.recentlyQuitServers:
return {
"lostserver": True
}
return None
if params[1] != self.ircd.serverID and params[1] not in self.ircd.servers:
if params[1] in self.ircd.recentlyQuitServers:
return {
"lostserver": True
}
return None
return {
"prefix": prefix,
"source": params[0],
"dest": params[1]
}
def execute(self, server: "IRCServer", data: Dict[Any, Any]) -> bool:
if "lostserver" in data:
return True
if data["dest"] == self.ircd.serverID:
if data["source"] == server.serverID:
server.cache["pongtime"] = now()
else:
self.ircd.servers[data["source"]].cache["pongtime"] = now()
return True
self.ircd.servers[data["dest"]].sendMessage("PONG", data["source"], data["dest"], prefix=data["prefix"])
return True
pingpong = PingPong() |
luotao1/Paddle | python/paddle/distributed/fleet/meta_parallel/tensor_parallel.py | Python | apache-2.0 | 1,803 | 0.000555 | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from paddle.fluid.dygraph.layers import Layer
from .meta_parallel_base import MetaParallelBase
from ..utils.hybrid_parallel_util import broadcast_dp_parameters
from ..utils.hybrid_parallel_util import broadcast_input_data
from ..utils.hybrid_parallel_util import broadcast_mp_parameters, broadcast_sharding_parameters
from ..utils.log_util import logger
__all__ = []
class TensorParallel(MetaParallelBase):
def __init__(self, layers, hcg, **kwargs):
super(TensorParallel, self).__init__(layers, hcg, **k | wargs)
def _prepare_for_model(self):
logger.info("start broadcast mp parameters")
broadcast_mp_parameters(self._layers, self._hcg)
if self._hcg.get_sharding_parallel_world_size() > 1:
logger.info("start broadcast sharding parameters")
broadcast_sharding_parameters(self._layers, self._hcg)
logger.info("start broadcast dp parameters")
br | oadcast_dp_parameters(self._layers, self._hcg)
logger.info("mp's parameters is ready")
def _pre_forward(self, *inputs, **kwargs):
logger.debug("mp start broadcast input data")
return broadcast_input_data(self._hcg, *inputs, **kwargs)
|
glymehrvrd/CppYCM | listeners/highlight_problems.py | Python | mit | 902 | 0 | import sublime
import sublime_plugin
import json
from threading import Thread
from ..lib.ycmd_handler import server
from ..lib.utils import *
from ..lib.msgs import MsgTemplates
class CppYCMHighlightProblemsListener(sublime_plugin.EventListener):
def on_selection_modified_async(self, view):
| if not is_cpp(view) or view.is_scratch():
return
# Not work in st3, output panel wouldn't call this callback
# from ..commands.highlight_problems import output_panel
# if output_panel and (view.id() == output_panel.id()):
# sublime.message_dialog('match!')
# update_statusbar(view)
def on_post_save_async(self, view):
if not is_cpp(view) or view.is_scratch():
return
# run highlight problem | s command
if check_highlight_on_save():
view.window().run_command('cppycm_highlight_problems')
|
tabalinas/jsgrid-django | clients/urls.py | Python | mit | 243 | 0.004115 | from django.conf.urls import url
from . import views
from .views import Clients
urlpatterns | = [
url(r'^$', v | iews.index, name='index'),
url(r'^api/?$', Clients.as_view()),
url(r'^api/(?P<client_id>[0-9]+)/?$', Clients.as_view()),
] |
shl198/Pipeline | VariantCall/Human_GATK_RNA_vari_call.py | Python | mit | 7,218 | 0.015517 | """
this file does variant calling for RNAseq
"""
#============= import required packages =================
import os
import sys
import subprocess
sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0) # disable buffer
from Modules.f00_Message import Message
from Modules.f01_list_trim_fq import list_files_human,Trimmomatic
from Modules.f02_aligner_command import STAR2Pass
from Modules.f03_samtools import sam2bam_sort
from Modules.f07_picard import markduplicates,addReadGroup
from Modules.f08_GATK import *
from Modules.p01_FileProcess import remove,get_parameters,rg_bams
#============= define some parameters ===================
"""these parameters and read group names are different for
different samples, should only change this part for
running pipeline
"""
parFile = sys.argv[1]
param = get_parameters(parFile)
thread = param[' | thread']
email = param['email']
startMessage = param['startMessage']
endMessage = param['endMessage']
ref_fa = param['refSequence']
file_path = param['filePath']
starDb = param['alignerDb']
trim = param['trim']
phred = param['phred']
picard = param['picard']
trimmomatic = param['trimmomatic']
trimmoAdapter = param['trimmoAdapter']
gold_snp = param['dbSNP']
phaseINDEL= param['phase1INDEL']
gold_indel= param['MillINDEL']
omni = param['omni']
hapmap = param['hapMap']
gatk = | param['gatk']
read_group = param['readGroup']
organism = param['organism']
##***************** Part 0. Build index file for bwa and GATK ******
##***************** Part I. Preprocess ============================
#======== 1. map and dedupping =====================================
#======== (0) enter the directory ========================
os.chdir(file_path)
Message(startMessage,email)
#======== (1) read files ================================
fastqFiles = list_files_human(file_path)
if trim == 'True':
fastqFiles = Trimmomatic(trimmomatic,fastqFiles,phred,trimmoAdapter)
sys.stdout.write('list file succeed\n')
sys.stdout.write('fastqFiles is: {fq}\n'.format(fq=fastqFiles))
#======== (2) align using 2 pass STAR ====================
try:
map_sams= STAR2Pass(fastqFiles,starDb,ref_fa,thread)
sys.stdout.write('align succeed\n')
sys.stdout.write('map_sams is: {map}\n'.format(map=map_sams))
except:
sys.stdout.write('align failed\n')
Message('align failed',email)
sys.exit(1)
#======== 2. Add read groups, sort,mark duplicates, and create index
#======== (1) sort and add group =========================
try:
sort_bams = sam2bam_sort(map_sams,thread)
sys.stdout.write('sort bam succeed\n')
sys.stdout.write('sort_bams is: {bam}\n'.format(bam=sort_bams))
except:
sys.stdout.write('sort bam failed\n')
Message('sort bam failed',email)
sys.exit(1)
try:
group_bams = addReadGroup(picard,sort_bams,read_group)
sys.stdout.write('add group succeed\n')
sys.stdout.write('group_bams is: {group}\n'.format(group=group_bams))
except:
sys.stdout.write('add group failed\n')
Message('add group failed',email)
sys.exit(1)
#======== (2) mark duplicates ============================
try:
dedup_bams = markduplicates(picard,group_bams)
sys.stdout.write('mark duplicate succeed\n')
sys.stdout.write('dedup_bams is: {dedup}\n'.format(dedup=dedup_bams))
remove(group_bams)
except:
sys.stdout.write('mark duplicate failed\n')
Message('mark duplicate failed',email)
sys.exit(1)
#======== 3. Split 'N' Trim and reassign mapping qualiteies
try:
split_bams = splitN(gatk,dedup_bams,ref_fa)
sys.stdout.write('split N succeed\n')
sys.stdout.write('split N is: {N}\n'.format(N=split_bams))
remove(dedup_bams)
except:
sys.stdout.write('split N failed\n')
Message('split N failed',email)
sys.exit(1)
#======== 4. Indel realignment ===========================
#======== (1) generate intervals =========================
try:
interval = RealignerTargetCreator(gatk,split_bams,ref_fa,thread,phaseINDEL,gold_indel)
sys.stdout.write('RealignerTarget Creator succeed\n')
sys.stdout.write('interval is: {int}\n'.format(int=interval))
except:
sys.stdout.write('RealignerTarget Creator failed\n')
Message('RealignerTarget Creator failed',email)
sys.exit(1)
#======== (2) realignment of target intervals ============
try:
realign_bams = IndelRealigner(gatk,split_bams,ref_fa,interval,phaseINDEL,gold_indel)
sys.stdout.write('IndelRealigner succeed\n')
sys.stdout.write('realign bams is: {reali}\n'.format(reali=realign_bams))
remove(split_bams)
except:
sys.stdout.write('IndelRealigner failed\n')
Message('IndelRealigner failed',email)
sys.exit(1)
#======== 5. Base quality recalibration =================
roundNum = 1
try:
recal_bams = BaseRecalibrator(gatk,realign_bams,ref_fa,gold_snp,
gold_indel,roundNum,thread)
sys.stdout.write('recalibration succeed\n')
sys.stdout.write('recal_bams is: {recal}\n'.format(recal=recal_bams))
except:
sys.stdout.write('recalibration failed\n')
Message('recalibration failed',email)
sys.exit(1)
#======== !!! merge lanes for the same sample ============
roundNum = '1'
if len(recal_bams) !=1:
try:
merged_bams = rg_bams(read_group,recal_bams)
sys.stdout.write('merge_bams is: {mer}\n'.format(mer=merged_bams))
remove(recal_bams)
except:
sys.stdout.write('merge failed\n')
Message('merge failed',email)
sys.exit(1)
try:
dedup_files = markduplicates(picard,merged_bams)
sys.stdout.write('dedup_files is: {dedup}\n'.format(dedup=dedup_files))
remove(merged_bams)
except:
sys.stdout.write('mark duplicate merged failed\n')
Message('mark uplicate merged failed',email)
sys.exit(1)
try:
interval = RealignerTargetCreator(gatk,dedup_files,ref_fa,thread,
phaseINDEL,gold_indel)
realign_bams = IndelRealigner(gatk,dedup_files,ref_fa,interval,
phaseINDEL,gold_indel)
remove(dedup_files)
sys.stdout.write('realign_bams is: {reali}\n'.format(reali=realign_bams))
sys.stdout.write('merge lanes succeed\n')
except:
sys.stdout.write('realign merged failed\n')
Message('realign merged failed',email)
sys.exit(1)
else:
realign_bams = recal_bams
#======== 6. Variant Calling =============================
try:
vcf_files = HaplotypeCaller_RNA_VCF(gatk,realign_bams,ref_fa,thread)
sys.stdout.write('1 round call succeed\n')
sys.stdout.write('vcf_files is: {vcf}\n'.format(vcf=vcf_files))
remove(realign_bams)
except:
sys.stdout.write('1 round call failed\n')
Message('1 round call failed',email)
sys.exit(1)
#======== 7. Variant filtering ===========================
try:
gold_varis = RNA_Vari_Filter(gatk,vcf_files,ref_fa)
sys.stdout.write('variant filter succeed\n')
sys.stdout.write('gold_varis is: {gold}\n'.format(gold=gold_varis))
sys.stdout.write('job finished succeessfully\n')
except:
sys.stdout.write('vairant filter failed')
Message('variant filter failed',email)
sys.exit(1)
Message(endMessage,email)
|
rhyolight/nupic.son | app/soc/mapreduce/process_org_apps.py | Python | apache-2.0 | 2,473 | 0.007683 | # Copyright 2011 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GCI org proposal processing mapreduce."""
from mapreduce import context
from mapreduce im | port operation
from melange.request import links
from soc.logic import org_app as org_app_logic
# MapReduce requires import of processed model classes.
# pylint: disable=unused-import
from soc.models.site import Site
from soc.models.org_app_survey import OrgAppSurvey
from soc.modules.gci.models.program import GCIProgr | am
from soc.modules.gci.views.helper import url_names as gci_url_names
# pylint: enable=unused-import
class MapreduceRequestData(object):
"""Simple class to use for convenience with RequestData object"""
def __init__(self, program, site):
self.program = program
self.site = site
def process(org_app):
ctx = context.get()
params = ctx.mapreduce_spec.mapper.params
program_type = params['program_type']
program_key_str = params['program_key']
# now the script is used only for GCI
if program_type != 'gci':
return
program = GCIProgram.get_by_key_name(program_key_str)
survey_query = OrgAppSurvey.all(keys_only=True).filter('program', program)
survey_key = survey_query.get()
# We can skip the survey records not belonging to the given program.
if org_app.survey.key() != survey_key:
return
# TODO(daniel): create a MapReduce/Task RequestData
data = MapreduceRequestData(program, Site.get_by_key_name('site'))
absolute_url = links.ABSOLUTE_LINKER.program(
program, gci_url_names.CREATE_GCI_ORG_PROFILE)
if org_app.status == 'pre-accepted':
org_app_logic.setStatus(data, org_app, 'accepted', absolute_url)
yield operation.counters.Increment("proposals_accepted")
elif org_app.status == 'pre-rejected':
org_app_logic.setStatus(data, org_app, 'rejected', absolute_url)
yield operation.counters.Increment("proposals_rejected")
else:
yield operation.counters.Increment("proposals_ignored")
|
spreadflow/spreadflow-core | spreadflow_core/proc.py | Python | mit | 2,588 | 0.000386 | from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import copy
from twisted.internet import defer, task
from twisted.logger import Logger, LogLevel
class SyntheticSource(object):
def __init__(self, items):
self.items = items
def attach(self, scheduler, reactor):
for delay, item in self.items:
reactor.callLater(delay, scheduler.send, item, self)
def detach(self):
pass
def __call__(self, item, send):
send(item, self)
class DebugLog(object):
"""
A minimal processor which simply logs every item received.
"""
log = Logger()
|
def __init__(self, message='Item received: {item}', level='debug'):
self.leve | l = LogLevel.levelWithName(level)
self.message = message
def __call__(self, item, send):
self.log.emit(self.level, self.message, item=item)
send(item, self)
class Duplicator(object):
"""
A processor capable of sending messages to another flow.
"""
def __init__(self):
self.out_duplicate = object()
def __call__(self, item, send):
send(copy.deepcopy(item), self.out_duplicate)
send(item, self)
class Sleep(object):
"""
A processor which delays every incomming message by the specified amount.
When sleeping, no other incoming message is accepted. Use a throttle before
this component in order to avoid queue overflow.
"""
sleep = None
def __init__(self, delay):
self.delay = delay
def attach(self, scheduler, reactor):
self.sleep = lambda delay: task.deferLater(reactor, delay, lambda: self)
def detach(self):
self.sleep = None
@defer.inlineCallbacks
def __call__(self, item, send):
assert self.sleep, 'Must call attach() before'
yield self.sleep(self.delay)
send(item, self)
class Throttle(object):
"""
A processor which forwards only one incomming message per time period.
"""
last = None
now = None
def __init__(self, delay, initial=0):
self.delay = delay
self.initial = initial
def attach(self, scheduler, reactor):
self.now = reactor.seconds
self.last = self.now() - self.delay + self.initial
def detach(self):
self.now = None
self.last = None
def __call__(self, item, send):
assert self.now, 'Must call attach() before'
now = self.now()
if now - self.last >= self.delay:
self.last = now
send(item, self)
|
xtuyaowu/jtyd_python_spider | browser_interface/browser/BrowserFactory.py | Python | mit | 225 | 0 | # -*- coding: utf-8 -*-
import sys
sys.path.append('../browser_interface/browser')
class BrowserFactory(object):
def create(self, type, *args, **kwargs):
return getattr(__import__(type), type)(*args, | **kwargs)
| |
invisiblek/python-for-android | python3-alpha/python3-src/Lib/test/test_range.py | Python | apache-2.0 | 17,941 | 0.000334 | # Python test set -- built-in functions
import test.support, unittest
import sys
import pickle
import itertools
# pure Python implementations (3 args only), for comparison
def pyrange(start, stop, step):
if (start - stop) // step < 0:
# replace stop with next element in the sequence of integers
# that are congruent to start modulo step.
stop += (start - stop) % step
while start != stop:
yield start
start += step
def pyrange_reversed(start, stop, step):
stop += (start - stop) % step
return pyrange(stop - step, start - step, -step)
class RangeTest(unittest.TestCase):
def assert_iterators_equal(self, xs, ys, test_id, limit=None):
# check that an iterator xs matches the expected results ys,
# up to a given limit.
if limit is not None:
xs = itertools.islice(xs, limit)
ys = itertools.islice(ys, limit)
sentinel = object()
pairs = itertools.zip_longest(xs, ys, fillvalue=sentinel)
for i, (x, y) in enumerate(pairs):
if x == y:
continue
elif x == sentinel:
self.fail('{}: iterator ended unexpectedly '
'at position {}; expected {}'.format(test_id, i, y))
elif y == sentinel:
self.fail('{}: unexpected excess element {} at '
'position {}'.format(test_id, x, i))
else:
self.fail('{}: wrong element at position {};'
'expected {}, got {}'.format(test_id, i, y, x))
def test_range(self):
self.assertEqual(list(range(3)), [0, 1, 2])
self.assertEqual(list(range(1, 5)), [1, 2, 3, 4])
self.assertEqual(list(range(0)), [])
self.assertEqual(list(range(-3)), [])
self.assertEqual(list(range(1, 10, 3)), [1, 4, 7])
self.assertEqual(list(range(5, -5, -3)), [5, 2, -1, -4])
a = 10
b = 100
c = 50
self.assertEqual(list(range(a, a+2)), [a, a+1])
self.assertEqual(list(range(a+2, a, -1)), [a+2, a+1])
self.assertEqual(list(range(a+4, a, -2)), [a+4, a+2])
seq = list(range(a, b, c))
self.assertIn(a, seq)
self.assertNotIn(b, seq)
self.assertEqual(len(seq), 2)
seq = list(range(b, a, -c))
self.assertIn(b, seq)
self.assertNotIn(a, seq)
self.assertEqual(len(seq), 2)
seq = list(range(-a, -b, -c))
self.assertIn(-a, seq)
self.assertNotIn(-b, seq)
self.assertEqual(len(seq), 2)
self.assertRaises(TypeError, range)
self.assertRaises(TypeError, range, 1, 2, 3, 4)
self.assertRaises(ValueError, range, 1, 2, 0)
self.assertRaises(TypeError, range, 0.0, 2, 1)
self.assertRaises(TypeError, range, 1, 2.0, 1)
self.assertRaises(TypeError, range, 1, 2, 1.0)
self.assertRaises(TypeError, range, 1e100, 1e101, 1e101)
self.assertRaises(TypeError, range, 0, "spam")
self.assertRaises(TypeError, range, 0, 42, "spam")
self.assertEqual(len(range(0, sys.maxsize, sys.maxsize-1)), 2)
r = range(-sys.maxsize, sys.maxsize, 2)
self.assertEqual(len(r), sys.maxsize)
def test_large_operands(self):
x = range(10**20, 10**20+10, 3)
self.assertEqual(len(x), 4)
self.assertEqual(len(list(x)), 4)
x = range(10**20+10, 10**20, 3)
self.assertEqual(len(x), 0)
self.assertEqual(len(list(x)), 0)
x = range(10**20, 10**20+10, -3)
self.assertEqual(len(x), 0)
self.assertEqual(len(list(x)), 0)
x = range(10**20+10, 10**20, -3)
self.assertEqual(len(x), 4)
self.assertEqual(len(list(x)), 4)
# Now test range() with longs
self.assertEqual(list(range(-2**100)), [])
self.assertEqual(list(range(0, -2**100)), [])
self.assertEqual(list(range(0, 2**100, -1)), [])
self.assertEqual(list(range(0, 2**100, -1)), [])
a = int(10 * sys.maxsize)
b = int(100 * sys.maxsize)
c = int(50 * sys.maxsize)
self.assertEqual(list(range(a, a+2)), [a, a+1])
self.assertEqual(list(range(a+2, a, -1)), [a+2, a+1])
self.assertEqual(list(range(a+4, a, -2)), [a+4, a+2])
seq = list(range(a, b, c))
self.assertIn(a, seq)
self.assertNotIn(b, seq)
self.assertEqual(len(seq), 2)
self.assertEqual(seq[0], a)
self.assertEqual(seq[-1], a+c)
seq = list(range(b, a, -c))
self.assertIn(b, seq)
self.assertNotIn(a, seq)
self.assertEqual(len(seq), 2)
self.assertEqual(seq[0], b)
self.assertEqual(seq[-1], b-c)
seq = list(range(-a, -b, -c))
self.assertIn(-a, seq)
self.assertNotIn(-b, seq)
self.assertEqual(len(seq), 2)
self.assertEqual(seq[0], -a)
self.assertEqual(seq[-1], -a-c)
def test_large_range(self):
# Check long ranges (len > sys.maxsize)
# len() is expected to fail due to limitations of the __len__ protocol
def _range_len(x):
try:
length = len(x)
except OverflowError:
step = x[1] - x[0]
length = 1 + ((x[-1] - x[0]) // step)
return length
a = -sys.maxsize
b = sys.maxsize
expected_len = b - a
x = range(a, b)
self.assertIn(a, x)
self.assertNotIn(b, x)
self.assertRaises(OverflowError, len, x)
self.assertEqual(_range_len(x), expected_len)
self.assertEqual(x[0], a)
idx = sys.maxsize+1
self.assertEqual(x[idx], a+idx)
self.assertEqual(x[idx:idx+1][0], a+idx)
with self.assertRaises(IndexError):
x[-expected_len-1]
with self.assertRaises(IndexError):
x[expected_len]
a = 0
b = 2 * sys.maxsize
expected_len = b - a
x = range(a, b)
self.assertIn(a, x)
self.assertNotIn(b, x)
self.assertRaises(OverflowError, len, x)
self.assertEqual(_range_len(x), expected_len)
self.assertEqual(x[0], a)
idx = sys.maxsize+1
self.assertEqual(x[idx], a+idx)
self.assertEqual(x[idx:idx+1][0], a+idx)
with self.assertRaises(IndexError):
x[-expected_len-1]
with self.assertRaises(IndexError):
x[expected_len]
a = 0
b = sys.maxsize**10
c = 2*sys.maxsize
expected_len = 1 + (b - a) // c
x = range(a, b, c)
self.assertIn(a, x)
self.assertNotIn(b, x)
self.assertRaises | (OverflowError, len, x)
self.assertEqual(_range_len(x), expected_len)
self.assertEqual(x[0], a)
idx = sys.maxsize+1
self.assertEqual(x[idx], a+(idx*c))
self.assertEqual(x[idx:idx+1][0], a+(idx*c))
with self.assertRaises(IndexError):
x[-expected_len-1]
with self.assertRaises(IndexError):
x[expected_len]
a = sys.maxsize**10
b = 0
c = -2*sys.maxsize
| expected_len = 1 + (b - a) // c
x = range(a, b, c)
self.assertIn(a, x)
self.assertNotIn(b, x)
self.assertRaises(OverflowError, len, x)
self.assertEqual(_range_len(x), expected_len)
self.assertEqual(x[0], a)
idx = sys.maxsize+1
self.assertEqual(x[idx], a+(idx*c))
self.assertEqual(x[idx:idx+1][0], a+(idx*c))
with self.assertRaises(IndexError):
x[-expected_len-1]
with self.assertRaises(IndexError):
x[expected_len]
def test_invalid_invocation(self):
self.assertRaises(TypeError, range)
self.assertRaises(TypeError, range, 1, 2, 3, 4)
self.assertRaises(ValueError, range, 1, 2, 0)
a = int(10 * sys.maxsize)
self.assertRaises(ValueError, range, a, a + 1, int(0))
self.assertRaises(TypeError, range, 1., 1., 1.)
self.assertRaises(TypeError, range, 1e100, 1e101, 1e101)
self.assertRaises(TypeError, range, 0, "spam")
self.assertRaises(TypeErr |
AunShiLord/Tensor-analysis | build/lib/tensor-analysis/arraypy.py | Python | mit | 47,289 | 0.000508 | # -*- coding: utf-8 -*-
from sympy import Symbol
from sympy.matrices import Matrix, MatrixSymbol
from copy import copy
from itertools import permutations
from sympy.core.basic import Basic
"""
Module "arraypy" describes tensor and it's bases - Multidimentional arrays.
Module consists of Arraypy class, TensorArray class and converting functions:
list2arraypy, matrix2arraypy, list2tensor, matrix2tensor.
"""
class Arraypy(Basic):
"""
N-dimentional array.
Parameters
==========
self._dims - tuple, array dimension.
self._rank - Length of self._dims, rank of array.
self._sparse - boolean variable. True means that array is sparse.
self._name - custom _name of the element or default value of the element
self._start_index - first, starting index.
self._end_index - last, maximum index.
self._loop_size - Counts number of elements in array.
self._output - dictionary. Dictionary key is an element index.
Dictionary value - is array value.
self._current_index - current index (used in iterator)
self._iterator_index_count - count of indices (in iterator)
index - list, represent current index in calculating process.
[0,0,0]; [0,0,1]; [0,0,2] etc (for 3-dim array).
"""
def __init__(self, *arg):
"""
Class constructor
Creates n-dimensional array.
Input:
*arg - custom list of arguments. It could be:
-Array dimension
-Name of the Symbol element
-Default element
-Sparse
-Custom range of dimensions
"""
# main variables declaration
self._name = '0'
self._sparse = False
self._dims = [1]
self._start_index = [0]
self._end_index = [1]
j = 0
# --recognition of constructor arguments--
for i in arg:
# for arguments of type: a = Arraypy( (3,3) )
if isinstance(i, (tuple)):
self._dims = i
self._start_index = [0 for j in range(len(self._dims))]
self._end_index = [j for j in self._dims]
# for arguments of type: a = Arraypy( 3 )
if isinstance(i, int):
self._dims[0] = i
self._start_index = [0]
self._end_index = [i]
# for string arguments
if isinstance(i, str):
i = i.strip()
# a = Arraypy ('sparse')
if i == 'sparse':
self._sparse = True
# a = Arraypy ('0..3, 1..4')
elif len(i.split('..')) != 1:
self._dims = i
# splitting the string by commas ','. Length of resulted
# list will be the rank of array.
# '0..3, 1..4' -> ['0..3' , '1..4']
temp = self._dims.split(',')
self._rank = len(temp)
self._dims = []
k = 0
for temp_str in temp:
# splitting every k-th string by '..'. Resulted digits
# will be a start index and end index.
# Difference between end index and start index
# will be dimension
# ['0..3'] -> [['0'], ['3']]
temp[k] = temp_str.split('..')
if len(temp[k]) != 2:
raise SyntaxError('Wrong argument syntax')
# cleaning from spaces. If resulted string is digit,
# then converting it to integer.
# [['0'], ['3']] -> [[0], [3]]
for j in range(2):
temp[k][j] = temp[k][j].strip()
if temp[k][j].isdigit() is False:
raise TypeError('Wrong type')
temp[k][j] = int(temp[k][j])
self | ._dims.append(temp[k][1] - temp[k][0] + 1)
k += 1
self._dims = tuple(self._dims)
self._start_index = [temp[k][0] for k in range(self._rank)]
self._end_index = [
temp[k][1] + 1
for k in range(sel | f._rank)]
# a = Arraypy('Py')
else:
self._name = i
# for list arguments
if isinstance(i, (list)):
# a = Arraypy( [2, 4, 1] )
# first list element - rank
# second list element - length of every dimension
# third list element - start index
if isinstance(i[0], int):
if len(i) != 3:
raise TypeError('This argument must be lenght of 3')
for j in i:
if not isinstance(j, int):
raise TypeError(
'All list elements must be the same type (tuple or int)')
if i[0] < 1 or i[1] < 1:
raise ValueError(
'_rank and length of each dimensions must be greater than 0')
self._rank = i[0]
self._dims = tuple([i[1] for j in range(i[0])])
self._start_index = tuple([i[2] for j in range(i[0])])
self._end_index = tuple([i[2] + i[1] for j in range(i[0])])
# a = Arraypy( [(0, 3), (1, 4)] )
elif isinstance(i[0], tuple):
self._dims = []
self._start_index = []
self._end_index = []
for j in i:
if not isinstance(j, tuple):
raise TypeError(
'All list elements must be the same type (tuple or int)')
if len(j) != 2:
raise TypeError('Every tuple must be size of 2')
if j[0] > j[1]:
raise ValueError(
'Right border must be greater than left border')
self._start_index.append(j[0])
self._end_index.append(j[1] + 1)
self._dims.append(j[1] - j[0] + 1)
self._start_index = tuple(self._start_index)
self._end_index = tuple(self._end_index)
# rank - length of tuple with dimensions
self._rank = len(self._dims)
self._output = {}
# check if self._name is not digit (except '0')
if self._name[0].isdigit():
if self._name.isdigit() and self._name == '0':
self._name = int(self._name)
else:
raise ValueError('Element name cant start from digits')
# index - is an index of current array element
index = [self._start_index[i] for i in range(self._rank)]
# counting number of elements in array(equals to number of loops),
# which is product of every self._dims element
self._loop_size = self._dims[0]
for i in range(1, self._rank):
self._loop_size *= self._dims[i]
# --setting elements value to dictionary self._output--
if not (self._sparse and self._name == 0):
for i in range(self._loop_size):
if isinstance(self._name, str):
self._output[tuple(index)] = Symbol(
self._name + str(list(index)))
else:
self._output[tuple(index)] = self._name
index = self.next_index(index)
self._dims = tuple(self._dims)
self._start_index = tuple(self._start_index)
self._end_index = tuple(self._end_index)
def __add__(self, other):
"""Overload operator '+'. Returns new Arraypy instance, per elemental
sum of two Arraypy instances. Both arrays must have the same shape and
start index.
Examples
========
>> |
tonghuashuai/42qu-notepad | lib/_tornado.py | Python | mit | 3,187 | 0.00502 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import Cookie
import base64
import calendar
import datetime
import email.utils
import functools
import gzip
import hashlib
import hmac
import httplib
import logging
import mimetypes
import os.path
import re
import stat
import sys
import time
import types
import urllib
import urlparse
import uuid
from tornado import web
from tornado.web import HTTPError, utf8
from tld_name import tld_name
from tornado import escape
from tornado import locale
from tornado import stack_context
from tornado import template
def set_cookie(self, name, value, domain=None, expires=None, path='/',
expires_days=None, **kwargs):
"""Sets the given cookie name/value with the given options.
Additional keyword arguments are set on the Cookie.Morsel
directly.
See http://docs.python.org/library/cookie.html#morsel-objects
for available attributes.
"""
if domain is None:
domain = '.%s'%tld_name(self.request.host)
name = escape.native_str(name)
value = escape.native_str(value)
if re.search(r"[\x00-\x20]", name + value):
# Don't let us accidentally inject bad stuff
raise ValueError("Invalid cookie %r: %r" % (name, value))
if not hasattr(self, "_new_cookie"):
self._new_cookie = Cookie.SimpleCookie()
if name in self._new_cookie:
del self._new_cookie[name]
self._new_cookie[name] = value
morsel = self._new_cookie[name]
if domain:
morsel["domain"] = domain
if expires_days is not None and not expires:
expires = datetime.datetime.utcnow() + datetime.timedelta(
days=expires_days)
if expires:
if type(expires) is not str:
timestamp = calendar.timegm(expires.utc | timetuple())
expires = email.utils.formatdate(
ti | mestamp, localtime=False, usegmt=True
)
else:
expires = 'Tue, 01 Jan 2030 00:00:00 GMT'
morsel['expires'] = expires
if path:
morsel["path"] = path
for k, v in kwargs.iteritems():
if k == 'max_age':
k = 'max-age'
morsel[k] = v
web.RequestHandler.set_cookie = set_cookie
def clear_cookie(self, name, path='/', domain=None):
"""Deletes the cookie with the given name."""
expires = 'Tue, 01 Jun 2000 00:00:00 GMT'
self.set_cookie(name, value='', path=path, expires=expires, domain=domain)
web.RequestHandler.clear_cookie = clear_cookie
#from model._db import SQLSTORE, mc
from os import getpid
PID = str(getpid()).ljust(7)
#logging.warn("PID:%s", PID)
def _init(self, *args, **kwds):
pass
web.RequestHandler.init = _init
def redirect(self, url, permanent=False):
"""Sends a redirect to the given (optionally relative) URL."""
if self._headers_written:
raise Exception('Cannot redirect after headers have been written')
self.set_status(301 if permanent else 302)
self.set_header('Location', url)
self.finish()
web.RequestHandler.redirect = redirect
def xsrf_form_html(self):
return '<input type="hidden" name="_xsrf" value="%s">'%self.xsrf_token
web.RequestHandler.xsrf_form_html = property(xsrf_form_html)
|
LaetitiaPapaxanthos/UnionCom | PrimeDual.py | Python | mit | 2,943 | 0.036357 | # import os
import random
import torch
import numpy as np
def cor_pairs_match_Adam(Kx, Kz, N, params, p1, p2, epo, device):
print("use device:", device)
Kx = Kx / N
Kz = Kz / N
Kx = torch.from_numpy(Kx).float().to(device)
Kz = torch.from_numpy(Kz).float().to(device)
m = np.shape(Kx)[0]
n = np.shape(Kz)[0]
F = np.zeros((m,n))
F = torch.from_numpy(F).float().to(device)
Im = np.ones((m,1))
Im = torch.from_numpy(Im).float().to(device)
In = np.ones((n,1))
In = torch.from_numpy(In).float().to(device)
Lambda = np.zeros((n,1))
Lambda = torch.from_numpy(Lambda).float().to(device)
Mu = np.zeros((m,1))
Mu = torch.from_numpy(Mu).float().to(device)
S = np.zeros((n,1))
S = torch.from_numpy(S).float().to(device)
a = np.sqrt(p2/p1)
pho1 = 0.9
pho2 = 0.999
delta = 10e-8
Fst_moment = np.zeros((m,n))
Fst_moment = torch.from_numpy(Fst_moment).float().to(device)
Snd_moment = np.zeros((m,n))
Snd_moment = torch.from_numpy(Snd_moment).float().to(device)
i=0
while(i<params.epoch_pd):
grad = 2*torch.mm(F, torch.mm(Kz, torch.mm(torch.t(F), torch.mm(F, torch.t(Kz))))) \
+ 2*torch.mm(F, torch.mm(torch.t(Kz), torch.mm(torch.t(F), torch.mm(F, Kz)))) \
- 2*a*torch.mm(torch.t(Kx), torch.mm(F, Kz)) - 2*a*torch.mm(Kx, torch.mm(F,torch.t(Kz))) + torch.mm(Mu, torch.t(In)) \
+ torch.mm(Im, torch.t(Lambda)) + params.rho*(torch.mm(F, torch.mm(In, torch.t(In))) - torch.mm(Im, torch.t(In)) \
+ torch.mm(Im, torch.mm(torch.t(Im), F)) + torch.mm(Im, torch.t(S-In)))
i += 1
Fst_moment = pho1*Fst_moment + (1-pho1)*grad
Snd_moment = pho2*Snd_moment + (1-pho2)*grad*grad
hat_Fst_moment = Fst_moment | /(1-np.power(pho1,i))
hat_Snd_moment = Snd_moment/(1-np.power(pho2,i))
grad = hat_Fst_moment/(torch.sqrt(hat_Snd_moment)+delta)
F_tmp = F - grad
F_tmp[F_tmp<0]=0
F = (1-params.epsilon)*F + params.epsilon*F_tmp
grad_s = Lambda + params.rho*(torch.mm(torch.t(F), Im) - In + S) |
s_tmp = S - grad_s
s_tmp[s_tmp<0]=0
S = (1-params.epsilon)*S + params.epsilon*s_tmp
Mu = Mu + params.epsilon*(torch.mm(F,In) - Im)
Lambda = Lambda + params.epsilon*(torch.mm(torch.t(F), Im) - In + S)
#### if scaling factor a changes too fast, we can delay the update of speed.
if i>=params.delay:
# grad_a = 2*a*torch.mm(torch.t(Kx), Kx) - torch.mm(torch.t(Kx), torch.mm(F, torch.mm(Kz, torch.t(F)))) - \
# torch.mm(F, torch.mm(torch.t(Kz), torch.mm(torch.t(F), Kx)))
# a = a - params.epsilon_a*grad_a
# a = torch.mean(a).to(device)
a = torch.trace(torch.mm(torch.t(Kx), torch.mm(torch.mm(F, Kz), torch.t(F)))) / \
torch.trace(torch.mm(torch.t(Kx), Kx))
norm2 = torch.norm(a*Kx - torch.mm(torch.mm(F, Kz), torch.t(F)))
if (i+1) % 500 == 0:
print("[{:d}/{}] [{:d}/{:d}]".format(epo+1, params.epoch_total, i+1,params.epoch_pd), norm2.data.item(), \
"alpha: {:4f}".format(a))
F = F.cpu().numpy()
pairs = np.zeros(m)
for i in range(m):
pairs[i] = np.argsort(F[i])[-1]
return pairs |
BrechtBa/mpcpy | examples/simple_space_heating_mpc.py | Python | gpl-3.0 | 11,279 | 0.006916 | #!/usr/bin/env python
################################################################################
# Copyright 2015 Brecht Baeten
# This file is part of mpcpy.
#
# mpcpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# mpcpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with mpcpy. If not, see <http://www.gnu.org/licenses/>.
################################################################################
import numpy as np
import matplotlib.pyplot as plt
import pyomo.environ as pyomo
import mpcpy
# Disturbances
time = np.arange(0.,24.01*3600.,3600.)
dst = {
'time': time,
'T_am': 5 + 2*np.sin(2*np.pi*time/24./3600.)+273.15,
'Q_flow_so': 500 + 500*np.sin(2*np.pi*time/24./3600.),
'p_el': 0.2 + 0.05*np.sin(2*np.pi*time/24./3600.),
'Q_flow_hp_max': 5000*np.ones_like(time),
'T_in_min': 20*np.ones_like(time)+273.15,
'T_em_max': 30*np.ones_like(time)+273.15
}
disturbances = mpcpy.Disturbances(dst, periodic=False)
# test
print(disturbances(1800))
print(disturbances(24.5 * 3600)) # extrapolation
# Emulator
class Emulator(mpcpy.Emulator):
"""
A custom system emulator
"""
def simulate(self, starttime, stoptime, input):
dt = 60
| time = np.arange(starttime, stoptime+dt, dt, dtype=np.float)
# initialize
T_em = np.ones_like(time)*self.res['T_em'][-1]
T_in = np.ones_like(time)*self.res['T_in'][-1]
# interpolate inputs
Q_flow_hp = np.interp(time, input['time'], input['Q_flow_hp'])
Q_flow_so = np.interp(time, input['time'], input['Q_flow_s | o'])
T_am = np.interp(time, input['time'], input['T_am'])
for i,t in enumerate(time[:-1]):
# C_em dT_em/dt = Q_flow_hp - UA_em_in*(T_em-T_in)
T_em[i+1] = T_em[i] + (Q_flow_hp[i] - self.parameters['UA_em_in']*(T_em[i]-T_in[i]))*dt/self.parameters['C_em']
# C_in dT_in/dt = Q_flow_so - UA_em_in*(T_in-T_em) - UA_in_am*(T_in-T_am)
T_in[i+1] = T_in[i] + (Q_flow_so[i] - self.parameters['UA_em_in']*(T_in[i]-T_em[i]) - self.parameters['UA_in_am']*(T_in[i]-T_am[i]))*dt/self.parameters['C_em']
# create and return a results dict
res = {
'time': time,
'Q_flow_hp':Q_flow_hp,
'Q_flow_so':Q_flow_so,
'T_em':T_em,
'T_in':T_in,
'T_am':T_am,
}
return res
# Emulator parameters and initial conditions:
emulator_parameters = {
'C_em': 10e6,
'C_in': 5e6,
'UA_in_am': 200,
'UA_em_in': 1600
}
emulator_initial_conditions = {
'T_em': 22+273.15,
'T_in': 21+273.15
}
emulator = Emulator(['T_am','Q_flow_so','Q_flow_hp'],parameters=emulator_parameters,initial_conditions=emulator_initial_conditions)
emulator.initialize()
# test
inp = {
'time': [0., 3600., 7200.],
'T_am': [273.15, 274.15, 275.15],
'Q_flow_so': [500., 400., 300.],
'Q_flow_hp': [4000., 4000., 4000.]
}
emulator(np.arange(0., 7201., 1200.), inp)
print(emulator.res['time'])
print(emulator.res['T_em'])
# State estimation
class StateestimationPerfect(mpcpy.Stateestimation):
"""
Custom state estimation method
"""
def stateestimation(self, time):
state = {}
state['T_in'] = np.interp(time, self.emulator.res['time'], self.emulator.res['T_in'])
state['T_em'] = np.interp(time, self.emulator.res['time'], self.emulator.res['T_em'])
return state
stateestimation = StateestimationPerfect(emulator)
# test
print(stateestimation(0))
# Prediction
prediction = mpcpy.Prediction(disturbances)
# test
print(prediction([0., 1800., 3600.]))
# Control
class LinearProgram(mpcpy.Control):
def formulation(self):
"""
formulates the abstract optimal control problem
"""
model = pyomo.AbstractModel()
# sets
model.i = pyomo.Set() # initialize=range(len(time)-1)
model.ip = pyomo.Set() # initialize=range(len(time))
# parameters
model.time = pyomo.Param(model.ip)
model.UA_em_in = pyomo.Param(initialize=800.)
model.UA_in_am = pyomo.Param(initialize=200.)
model.C_in = pyomo.Param(initialize=5.0e6)
model.C_em = pyomo.Param(initialize=20.0e6)
model.T_in_ini = pyomo.Param(initialize=21.+273.15)
model.T_em_ini = pyomo.Param(initialize=22.+273.15)
model.T_in_min = pyomo.Param(initialize=20.+273.15)
model.T_in_max = pyomo.Param(initialize=24.+273.15)
model.T_am = pyomo.Param(model.i, initialize=0.+273.15)
model.Q_flow_so = pyomo.Param(model.i, initialize=0.)
# variables
model.T_in = pyomo.Var(model.ip,domain=pyomo.Reals, initialize=20.+273.15)
model.T_em = pyomo.Var(model.ip,domain=pyomo.Reals,initialize=20.+273.15)
model.T_in_min_slack = pyomo.Var(model.ip,domain=pyomo.NonNegativeReals, initialize=0)
model.T_in_max_slack = pyomo.Var(model.ip,domain=pyomo.NonNegativeReals, initialize=0)
model.Q_flow_hp = pyomo.Var(model.i,domain=pyomo.NonNegativeReals,bounds=(0.,10000.),initialize=0.)
# constraints
model.state_T_em = pyomo.Constraint(
model.i,
rule=lambda model,i: model.C_em*(model.T_em[i+1]-model.T_em[i])/(model.time[i+1]-model.time[i]) == \
model.Q_flow_hp[i] \
- model.UA_em_in*(model.T_em[i]-model.T_in[i])
)
model.ini_T_em = pyomo.Constraint(rule=lambda model: model.T_em[0] == model.T_em_ini)
model.state_T_in = pyomo.Constraint(
model.i,
rule=lambda model,i: model.C_in*(model.T_in[i+1]-model.T_in[i])/(model.time[i+1]-model.time[i]) == \
model.Q_flow_so[i] \
- model.UA_em_in*(model.T_in[i]-model.T_em[i]) \
- model.UA_in_am*(model.T_in[i]-model.T_am[i])
)
model.ini_T_in = pyomo.Constraint(rule=lambda model: model.T_in[0] == model.T_in_ini)
# soft constraints
model.constraint_T_in_min_slack = pyomo.Constraint(
model.ip,
rule=lambda model,i: model.T_in_min_slack[i] >= model.T_in_min-model.T_in[i]
)
model.constraint_T_in_max_slack = pyomo.Constraint(
model.ip,
rule=lambda model,i: model.T_in_max_slack[i] >= model.T_in[i]-model.T_in_max
)
# a large number
L = 1e6
# objective
model.objective = pyomo.Objective(
rule=lambda model: sum(model.Q_flow_hp[i]*(model.time[i+1]-model.time[i])/3600/1000 for i in model.i) \
+sum(model.T_in_min_slack[i]*(model.time[i+1]-model.time[i])/3600 for i in model.i)*L\
+sum(model.T_in_max_slack[i]*(model.time[i+1]-model.time[i])/3600 for i in model.i)*L\
)
self.model = model
def solution(self, sta, pre):
"""
instanciate the optimal control problem, solve it and return a solution dictionary
"""
ip = np.arange(len(pre['time']))
data = {
None: {
'i': {None: ip[:-1]},
'ip': {None: ip},
'time': {(i,): v for i, v in enumerate(pre['time'])},
'T_am': {(i,): pre['T_am'][i] for i in ip[:-1]},
'Q_flow_so': {(i,): pre['Q_flow_so'][i] for i in ip[:-1]},
'T_em_ini': {None: sta['T_em']},
'T_in_ini': {None: sta['T_in']},
'C_em': {None: self.parameters['C_em']},
'C_in': {None: self.parameters['C_in']},
'UA_em_in': {None: self.parameters['UA_em_in']},
'UA_in_am': {None: self.par |
teoreteetik/api-snippets | sync/rest/maps/retrieve-map-item/retrieve-map-item.6.x.py | Python | mit | 471 | 0 | # | Download the Python helper library from twilio.com/docs/python/install
from twilio.rest import Client
# Your Account Sid and Auth Token from twilio.com/user/account
account_sid = "ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
auth_token = "your_auth_token"
client = Client(account_sid, auth_token)
map_item = client.sync \
.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.sync_maps("Players") \
.sync_map_items("steph_ | curry") \
.fetch()
print(map_item.data)
|
manasi24/tempest | tempest/scenario/test_large_ops.py | Python | apache-2.0 | 5,090 | 0.000393 | # Copyright 2013 NEC Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from tempest_lib import exceptions as lib_exc
from tempest.common import fixed_network
from tempest.common.utils import data_utils
from tempest.common import waiters
from tempest import config
from tempest.scenario import manager
from tempest import test
CONF = config.CONF
LOG = logging.getLogger(__name__)
class TestLargeOpsScenario(manager.ScenarioTest):
"""
Test large operations.
This test below:
* Spin up multiple instances in one nova call, and repeat three times
* as a regular user
* TODO: same thing for cinder
"""
@classmethod
def skip_checks(cls):
super(TestLargeOpsScenario, cls).skip_checks()
if CONF.scenario.large_ops_number < 1:
raise cls.skipException("large_ops_number not set to multiple "
"instances")
@classmethod
def setup_credentials(cls):
cls.set_network_resources()
super(TestLargeOpsScenario, cls).setup_credentials()
@classmethod
def resource_setup(cls):
super(TestLargeOpsScenario, cls).resource_setup()
# list of cleanup calls to be executed in reverse order
cls._cleanup_resources = []
@classmethod
def resource_cleanup(cls):
| while cls._cleanup_resources:
function | , args, kwargs = cls._cleanup_resources.pop(-1)
try:
function(*args, **kwargs)
except lib_exc.NotFound:
pass
super(TestLargeOpsScenario, cls).resource_cleanup()
@classmethod
def addCleanupClass(cls, function, *arguments, **keywordArguments):
cls._cleanup_resources.append((function, arguments, keywordArguments))
def _wait_for_server_status(self, status):
for server in self.servers:
# Make sure nova list keeps working throughout the build process
self.servers_client.list_servers()
waiters.wait_for_server_status(self.servers_client,
server['id'], status)
def nova_boot(self):
name = data_utils.rand_name('scenario-server')
flavor_id = CONF.compute.flavor_ref
# Explicitly create secgroup to avoid cleanup at the end of testcases.
# Since no traffic is tested, we don't need to actually add rules to
# secgroup
secgroup = self.security_groups_client.create_security_group(
name='secgroup-%s' % name, description='secgroup-desc-%s' % name)
self.addCleanupClass(self.security_groups_client.delete_security_group,
secgroup['id'])
create_kwargs = {
'min_count': CONF.scenario.large_ops_number,
'security_groups': [{'name': secgroup['name']}]
}
network = self.get_tenant_network()
create_kwargs = fixed_network.set_networks_kwarg(network,
create_kwargs)
#self.servers_client.create_server(
self.create_server(
name,
'',
flavor_id,
**create_kwargs)
# needed because of bug 1199788
params = {'name': name}
server_list = self.servers_client.list_servers(**params)
self.servers = server_list['servers']
for server in self.servers:
# after deleting all servers - wait for all servers to clear
# before cleanup continues
self.addCleanupClass(self.servers_client.
wait_for_server_termination,
server['id'])
for server in self.servers:
self.addCleanupClass(self.servers_client.delete_server,
server['id'])
self._wait_for_server_status('ACTIVE')
def _large_ops_scenario(self):
#self.glance_image_create()
self.nova_boot()
@test.idempotent_id('14ba0e78-2ed9-4d17-9659-a48f4756ecb3')
@test.services('compute', 'image')
def test_large_ops_scenario_1(self):
self._large_ops_scenario()
@test.idempotent_id('b9b79b88-32aa-42db-8f8f-dcc8f4b4ccfe')
@test.services('compute', 'image')
def test_large_ops_scenario_2(self):
self._large_ops_scenario()
@test.idempotent_id('3aab7e82-2de3-419a-9da1-9f3a070668fb')
@test.services('compute', 'image')
def test_large_ops_scenario_3(self):
self._large_ops_scenario()
|
PuZZleDucK/pixelated-user-agent | service/test/functional/features/steps/attachments.py | Python | agpl-3.0 | 2,007 | 0.000997 | #
# Copyright (c) 2014 ThoughtWorks, Inc.
#
# Pixelated is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pixelated is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU | Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General | Public License
# along with Pixelated. If not, see <http://www.gnu.org/licenses/>.
from email.mime.application import MIMEApplication
from time import sleep
from leap.mail.mail import Message
from common import *
from test.support.integration import MailBuilder
from behave import given
from crochet import wait_for
from uuid import uuid4
from email.MIMEMultipart import MIMEMultipart
from email.mime.text import MIMEText
@given(u'I have a mail with an attachment in my inbox')
def add_mail_with_attachment_impl(context):
subject = 'Hi! This the subject %s' % uuid4()
mail = build_mail_with_attachment(subject)
load_mail_into_soledad(context, mail)
context.last_subject = subject
def build_mail_with_attachment(subject):
mail = MIMEMultipart()
mail['Subject'] = subject
mail.attach(MIMEText(u'a utf8 message', _charset='utf-8'))
attachment = MIMEApplication('pretend to be binary attachment data')
attachment.add_header('Content-Disposition', 'attachment', filename='filename.txt')
mail.attach(attachment)
return mail
@wait_for(timeout=10.0)
def load_mail_into_soledad(context, mail):
return context.client.mail_store.add_mail('INBOX', mail.as_string())
@then(u'I see the mail has an attachment')
def step_impl(context):
attachments_list = find_elements_by_css_selector(context, '.attachmentsArea li')
assert len(attachments_list) == 1
|
MAPSuio/spring-challenge16 | frengers/generate.py | Python | mit | 734 | 0.001362 | from random import choice, shuffle
names_fd = open('names.txt', 'ro')
names = map(lambda name: name.strip(), names_fd.readlines())
events = []
for i in xrange(7):
events.append("meet")
for i in xrange(3):
events.append("friends")
names_fd.close()
entries = set()
while len(entries) < 8000:
event = choice(events)
person_a = choice(names)
person_b = choice(names)
if person_a == person_b:
continue |
case = event + " " + person_a + " " + person_b
i | f case not in entries:
equivalent = event + " " + person_b + " " + person_a
if equivalent not in entries:
entries.add(case)
result = list(entries)
shuffle(result)
for entry in result:
print entry
|
wette/netSLS | network_emulator/rpc_server.py | Python | apache-2.0 | 3,616 | 0.000553 | """
Copyright 2015 Malte Splietker
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
import traceback
import gevent
import gevent.wsgi
import gevent.queue
from tinyrpc import MethodNotFoundError, ServerError
from tinyrpc.protocols.jsonrpc import JSONRPCProtocol
from tinyrpc.transports.wsgi import WsgiServerTransport
from tinyrpc.server.gevent import RPCServerGreenlets
from tinyrpc.dispatch import RPCDispatcher
import configuration
import utils
logger = logging.getLogger(__name__)
class RPCServer(object):
"""Provides an interface via JSON-RPC.
This class creates an JSON-RPC server that dispatches calls to @public
methods of the given interface.
"""
def __init__(self, interface):
"""
Args:
interface: Interface to provide.
"""
self.__dispatcher = RPCLoggingDispatcher()
transport = WsgiServerTransport(queue_class=gevent.queue.Queue)
self._wsgi_server = gevent.wsgi.WSGIServer(
('', configuration.get_rpc_server_por | t()),
transport.handle,
| log=None)
gevent.spawn(self._wsgi_server.serve_forever)
self.__server = RPCServerGreenlets(
transport,
JSONRPCProtocol(),
self.__dispatcher
)
# register interface's public functions
self.__dispatcher.register_instance(interface, "")
def serve_forever(self):
"""Starts the RPC server and serves forever."""
logger.info("RPC server started listening on 0.0.0.0:{}".format(
configuration.get_rpc_server_port()))
try:
self.__server.serve_forever()
except gevent.hub.LoopExit:
# FIXME: Right now this exception seems to be expected in this situation. Maybe have another look...
pass
def stop(self):
"""Stops the RPC server."""
self._wsgi_server.stop()
class RPCLoggingDispatcher(RPCDispatcher):
"""A modified version of RPCDispatcher which logs errors on the server side."""
def _dispatch(self, request):
try:
try:
method = self.get_method(request.method)
except KeyError as e:
logger.error("RPC method not found: {}".format(request.method))
return request.error_respond(MethodNotFoundError(e))
# we found the method
try:
result = method(*request.args, **request.kwargs)
except Exception as e:
# an error occured within the method, return it
logger.error("RPC method {} failed:\n{}".format(
request.method, utils.indent(traceback.format_exc(), 2)))
return request.error_respond(e)
# respond with result
return request.respond(result)
except Exception as e:
logger.error("RPC method {} failed unexpectedly:\n{}".format(
request.method, utils.indent(traceback.format_exc(), 2)))
# unexpected error, do not let client know what happened
return request.error_respond(ServerError())
|
bokunimowakaru/Wi-SUN_EnergyMeter | user_conf.py | Python | mit | 486 | 0.005464 | # | coding: UTF-8
#
# user_conf.py
#
# スマート電力量メーター ユーザ設定
#
# Copyright(C) 2016 pi@blue-black.ink
#
SEM_ROUTEB_ID = '00000000000000000000000000000000'
SEM_PASSWORD = 'XXXXXXXXXXXX'
SEM_INTERVAL = 20 # 瞬時電力取得間隔[s]
SEM_DURATION = 6 # アクティブスキャンduration (通常は変更の必要なし)
SOCK_UDP = '192.168.0.255' # UDPソケットIPアドレス
SOCK_PORT = 1024 # UDPポート番号(0でOFF | )
|
tstone2077/django-bases | site/main/views.py | Python | gpl-3.0 | 214 | 0.028037 | from django.shortcuts import render_to_response
from django.temp | late imp | ort RequestContext
def about(request):
return render_to_response('about.html',context_instance=RequestContext(request,{'active':'about'}))
|
hddn/studentsdb | students/signals.py | Python | mit | 1,010 | 0.00297 | # -*- coding: utf-8 -*-
import logging
from django.db.models.signals import post_save, post_delete
from django.dispatch import rec | eiver
from .models import Student
@receiver(post_save, sender=Student)
def log_student_updated_added_event(sender, **kwargs):
"""Writes info about updated or newly added student into log file"""
logger = logging.getLogg | er(__name__)
student = kwargs['instance']
if kwargs['created']:
logger.info("Student added: {} {} (ID: {:d})".format(student.first_name, student.last_name, student.id))
else:
logger.info("Student updated: {} {} (ID: {:d})".format(student.first_name, student.last_name, student.id))
@receiver(post_delete, sender=Student)
def log_student_deleted_event(sender, **kwargs):
"""Writes info about deleted student into the log file"""
logger = logging.getLogger(__name__)
student = kwargs['instance']
logger.info("Student deleted: {} {} (ID: {:d})".format(student.first_name, student.last_name, student.id))
|
winxos/python | cache_grab.py | Python | mit | 1,468 | 0.042244 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# grab cache file to user path
# used for game factory, grab cached files
# winxos 2014-02-22
import os,sys
import fnmatch
import getpass #get user name
import shutil #copy
from time import sleep
rootdir = os.path.join('C:/Users',getpass.getuser(),'AppData\Local\Microsoft\Windows\INetCache\IE')
def copy_file(file,newpath):
base_dir=os.path.join(sys.path[0],newpath)#get absolute path
if not os.path.exists(base_dir): #不存在就建立
os.mkdir(base_dir)
simplename,suffix=os.path.splitext(os.path.basename(file))
shutil.copy(file,os.path.join(base_dir,simplename[0:-3]+suffix)) #remove cache auto added [1]
def get_cachefile(name,newpath):
for i in range(6): #max try times
flag=False
for parent,dirnames,filenames in os.walk(rootdir):
for file in filenames:
if fnmatch.fnmatch(file,name): #common match
copy_file(os.path.join(parent,file),newpath)
| flag=True
if flag:
print("grab files succ | essed.")
return
sleep(30) #延时
print("grab files failed.")
from threading import Thread
#grab files
def grab_file(name,newpath):
simplename,suffix=os.path.splitext(name)
get_file=Thread(target=get_cachefile,args=(simplename+'*'+suffix,newpath))#parameter transin
get_file.start() #异步抓取
if __name__ =='__main__':
grab_file("*.jpg","tmp_jpgs") |
sony/nnabla | python/test/utils/test_graph_converters/test_batch_normalization_folding.py | Python | apache-2.0 | 2,857 | 0.0014 | # Copyright 2020,2021 Sony Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import pytest
import numpy as np
import nnabla as nn
import nnabla.experimental.graph_converters as GC
from nnabla.ext_utils import get_extension_context
from nbla_test_utils import list_context
from .ref_graphs.lenets import bn_lenet, bn_folding_lenet, bn_opp_lenet
from .ref_graphs.resnets import (small_bn_resnet,
small_bn_folding_resnet,
small_bn_opp_resnet)
ctxs = list_context('Convolution') # proxy to switch the context
batch_size = 1
lenet_ref = bn_folding_lenet
resnet_ref = small_bn_folding_resnet
@pytest.mark.parametrize('ctx, func_name', ctxs)
@pytest.mark.parametrize('seed', [313])
@pytest.mark.parametrize('test', [True])
@pytest.mark.parametrize('w_bias', [True])
@pytest.mark.parametrize('channel_last', [True])
@pytest.mark.parametrize('graph_ref, graph_act, opposite',
[(resnet_ref, small_bn_resnet, False),
(resnet_ref, small_bn_opp_resnet, True)])
def test_batch_normalization_folding(ctx, func_name, seed, test, w_bias,
channel_last, graph_ref, graph_act, opposite):
from .graph_converter_test_utils import structure_tester, value_tester
if channel_last == True and not func_name.endswith('Cudnn'):
pytest.skip(
'ChannelLast conversion is only suppo | rted in cuDNN context.')
with nn.context_scope(ctx):
# Random number
np.random.seed(seed)
rng = np.random.RandomState(seed)
# Graph
x_data = rng.randn(batch_size, 32, 32, 3) if | channel_last == True else rng.randn(
batch_size, 3, 32, 32)
x = nn.Variable.from_numpy_array(x_data)
y_tgt = graph_act(x, test=test, w_bias=w_bias,
channel_last=channel_last)
# FunctionModifier
modifiers = []
modifiers.append(GC.BatchNormalizationFoldingModifier(
opposite, channel_last))
y_act = GC.GraphConverter(modifiers).convert(y_tgt)
# Ref Graph
y_ref = graph_ref(x, test=test, channel_last=channel_last)
# Test
structure_tester(y_ref, y_act)
value_tester(y_tgt, y_act, rtol=6e-02, atol=5e-02)
|
Kalimaha/fake_data_crud_service | test_data_crud_services/resources/test_book.py | Python | mit | 338 | 0.002959 | book_1 = {
"ISBN": "0436203057",
"authors": [
"Howard Marks"
],
"genre": "non-fic | tion",
"pages": 466,
"title": "Mr. Nice"
}
book_ | 2 = {
"ISBN": "0439708184",
"authors": [
"J. K. Rowling"
],
"genre": "fiction",
"pages": 322,
"title": "Harry Potter and the Sorcerer's Stone"
} |
jimmyraywv/cloud-custodian | tests/test_sqs.py | Python | apache-2.0 | 10,409 | 0.001537 | # Copyright 2017 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
from .common import BaseTest, functional
from botocore.exceptions import ClientError
import json, time
class TestSqsAction(BaseTest):
@functional
def test_sqs_delete(self):
session_factory = self.replay_flight_data(
'test_sqs_delete')
client = session_factory().client('sqs')
client.create_queue(QueueName='test-sqs')
queue_url = client.get_queue_url(QueueName='test-sqs')['QueueUrl']
p = self.load_policy({
'name': 'sqs-delete',
'resource': 'sqs',
'filters': [{'QueueUrl': queue_url}],
'actions': [
{'type': 'delete'}]},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertRaises(
ClientError,
client.purge_queue, QueueUrl=queue_url)
@functional
def test_sqs_set_encryption(self):
session_factory = self.replay_flight_data(
'test_sqs_set_encryption')
client_sqs = session_factory().client('sqs')
client_sqs.create_queue(QueueName='sqs-test')
queue_url = client_sqs.get_queue_url(QueueName='sqs-test')['QueueUrl']
self.addCleanup(client_sqs.delete_queue, QueueUrl=queue_url)
client_kms = session_factory().client('kms')
key_id = client_kms.create_key(Description='West SQS encryption key')['KeyMetadata']['KeyId']
client_kms.create_alias(
AliasName='alias/new-key-test-sqs',
TargetKeyId=key_id)
self.addCleanup(client_kms.disable_key, KeyId=key_id)
p = self.load_policy({
'name': 'sqs-delete',
'resource': 'sqs',
'filters': [{'QueueUrl': queue_url}],
'actions': [
{'type': 'set-encryption',
'key': 'new-key-test-sqs'}]},
session_factory=session_factory)
resources = p.run()
check_master_key = client_sqs.get_queue_attributes(
QueueUrl=queue_url,
AttributeNames=['All'])['Attributes']['KmsMasterKeyId']
self.assertEqual(check_master_key, 'c4816d44-73c3-4eed-a7cc-d52a74fa3294')
@functional
def test_sqs_remove_matched(self):
session_factory = self.replay_flight_data('test_sqs_remove_matched')
client = session_factory().client('sqs')
name = 'test-sqs-remove-matched-1'
queue_url = client.create_queue(QueueName=name)['QueueUrl']
self.addCleanup(client.delete_queue, QueueUrl=queue_url)
client.set_queue_attributes(
QueueUrl=queue_url,
Attributes={'Policy':json.dumps({
"Version": "2012-10-17",
"Statement": [
{
"Sid": "SpecificAllow",
"Effect": "Allow",
"Principal": {
"AWS": "arn:aws:iam::123456789012:root"
},
"Action": [
"sqs:Subscribe"
]
},
{
"Sid": "Public",
"Effect": "Allow",
"Principal": "*",
"Action": [
"sqs:GetqueueAttributes"
]
}
]
})}
)
p = self.load_policy({
'name': 'sqs-rm-matched',
'resource': 'sqs',
'filters': [
{'QueueUrl': queue_url},
{'type': 'cross-account',
'whitelist': ["123456789012"]}
],
'actions': [
{'type': 'remove-statements',
'statement_ids': 'matched'}]
},
session_factory=session_factory)
resources = p.run()
self.assertEqual([r['QueueUrl'] for r in resources], [queue_url])
data = json.loads(client.get_queue_attributes(QueueUrl=resources[0]['QueueUrl'], AttributeNames=['Policy'])['Attributes']['Policy'])
self.assertEqual(
[s['Sid'] for s in data.get('Statement', ())],
['SpecificAllow'])
@functional
def test_sqs_remove_named(self):
session_factory = self.replay_flight_data('test_sqs_remove_named')
client = session_factory().client('sqs')
name = 'test-sqs-remove-named'
queue_url = client.create_queue(QueueName=name)['QueueUrl']
self.addCleanup(client.delete_queue, QueueUrl=queue_url)
client.set_queue_attributes(
QueueUrl=queue_url,
Attributes={'Policy':json.dumps({
"Version": "2012-10-17",
"Statement": [
{
"Sid": "SpecificAllow",
"Effect": "Allow",
"Principal": {
"AWS": "arn:aws:iam::644160558196:root"
},
"Action": ["sqs:Subscribe"]
},
{
"Sid": "RemoveMe",
"Effect": "Allow",
"Principal": "*",
"Action": ["sqs:GetqueueAttributes"]
}
]
})}
)
p = self.load_policy({
'name': 'sqs-rm-named',
'resource': 'sqs',
'filters': [{'QueueUrl': queue_url}],
'actions': [
{'type': 'remove-statements',
'statement_ids': ['RemoveMe']}]
},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
data = json.loads(client.get_queue_attributes(QueueUrl=resources[0]['QueueUrl'], AttributeNames=['Policy'])['Attributes']['Policy'])
self.assertTrue('RemoveMe' not in [s['Sid'] for s in data.get('Statement', ())])
@functional
def test_sqs_mark_for_op(self):
session_factory = self.replay_flight_data('test_sqs_mark_for_op')
client = session_factory().client('sqs')
name = 'test-sqs'
queue_url = client.create_queue(Que | ueName=name)['QueueUrl']
self.addCleanup(client.delete_queue, QueueUrl=queue_url)
p = self.load_policy({
'name': 'sqs-mark-for-op',
'resource': 'sqs',
'filters': [{'QueueUrl': queue_url}],
'actions': [
{'type': 'mark-for-op',
'tag': 'tag-for-op',
'op': 'delete',
'days': 1}]
},
ses | sion_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
tags_after_run = client.list_queue_tags(
QueueUrl=queue_url).get('Tags', {})
self.assertTrue("tag-for-op" in tags_after_run)
@functional
def test_sqs_tag(self):
session_factory = self.replay_flight_data('test_sqs_tags')
client = session_factory().client('sqs')
name = 'test-sqs'
queue_url = client.create_queue(QueueName=name)['QueueUrl']
self.addCleanup(client.delete_queue, QueueUrl=queue_url)
p = self.load_policy({
'name': 'sqs-mark-for-op',
'resource': 'sqs',
'filters': [{'QueueUrl': queue_url}],
'actions': [
{'type': 'tag',
'key': 'tag- |
r0h4n/ceph-integration | tendrl/ceph_integration/objects/rbd/atoms/delete/__init__.py | Python | lgpl-2.1 | 2,691 | 0 | from tendrl.ceph_integration.manager.exceptions import \
RequestStateError
from tendrl.ceph_integration.manager.rbd_crud import RbdCrud
from tendrl.ceph_integration.objects.rbd import Rbd
from tendrl.commons.event import Event
from tendrl.commons.message import Message
from tendrl.commons import objects
class Delete(objects.BaseAtom):
obj = Rbd
def __init__(self, *args, **kwargs):
super(Delete, self).__init__(*args, **kwargs)
def run(self):
pool_id = self.parameters['Rbd.pool_id']
rbd_name = self.parameters['Rbd.name']
Event(
Message(
priority="info",
publisher=NS.publisher_id,
payload={
"message": "Deleting rbd %s on pool %s" %
(self.parameters['Rbd.name'],
self.parameters['Rbd.pool_id'])
},
job_id=self.parameters['job_id'],
flow_id=self.parameters['flow_id'],
cluster_id=NS.tendrl_context.integration_id,
)
)
crud = RbdCrud()
resp = crud.delete_rbd(
pool_id,
rbd_name
)
try:
crud.sync_request_status(resp['request'])
except RequestStateError as ex:
Event(
Message(
priority="error",
publisher=NS.publisher_id,
payload={
"message": "Failed to delete rbd %s."
| " Error: %s" % (self.parameters['Rbd.name'],
ex)
},
job_id=self.parameters['job_id'],
flow_id=self.parameters['flow_id'],
cluster_id=NS.tendrl_context.integration_id,
)
)
| return False
NS._int.wclient.delete(
"clusters/%s/Pools/%s/Rbds/%s" % (
NS.tendrl_context.integration_id,
self.parameters['Rbd.pool_id'],
self.parameters['Rbd.name']
),
recursive=True
)
Event(
Message(
priority="info",
publisher=NS.publisher_id,
payload={
"message": "Deleted rbd %s on pool-id %s" %
(self.parameters['Rbd.name'],
self.parameters['Rbd.pool_id'])
},
job_id=self.parameters['job_id'],
flow_id=self.parameters['flow_id'],
cluster_id=NS.tendrl_context.integration_id,
)
)
return True
|
thombashi/typepy | typepy/checker/_nan.py | Python | mit | 826 | 0.002421 | """
.. codeauthor:: Tsuyoshi Hombashi <tsuyoshi.hombashi@gmail.com>
"""
from ._checker import CheckerFactory, TypeCheckerBase, TypeCheckerDelegator
from ._common import isnan, isstring
class NanTypeCheckerStrictLevel0(TypeCheckerBase):
def is_instance(self):
return isnan(self._value)
def is_valid_after_convert(self, converted_value):
return isnan(converted_value)
class NanTypeCheckerStrictLevel1(NanTypeCheckerStrictLevel0):
def is_exclude_instance(self):
| return isstring(self._value)
_factory = CheckerFactory(
checker_mapping={0: NanTypeCheckerStrictLevel0, 1: NanTypeCheckerStrictLevel1}
)
class NanTypeChecker(TypeCheckerDelegator):
def __init__(self, value, strict_level):
super().__init__(value=value, checker_factory=_factory, strict_level=s | trict_level)
|
ttm/pnud4 | flask/auxiliar.py | Python | unlicense | 5,873 | 0.011588 | #-*- coding: utf8 -*-
from SPARQLWrapper import SPARQLWrapper, JSON
from configuracao import *
import string, networkx as x, nltk as k
import __builtin__
stemmer = k.stem.RSLPStemmer()
def fazRedeAmizades():
global SPARQLWrapper
q="""SELECT ?a ?b ?aname ?bname
WHERE {
?a foaf:knows ?b .
}"""
sparql=SPARQLWrapper(URL_ENDPOINT_)
sparql.setQuery(PREFIX+q)
sparql.setReturnFormat(JSON)
results = sparql.query().convert()
g=x.Graph()
for amizade in results["results"]["bindings"]:
nome1=amizade["a"]["value"]
nome2=amizade["b"]["value"]
g.add_edge(nome1,nome2)
__builtin__.g=g
def fazRedeInteracao():
q="""SELECT ?participante1 ?participante2 ?aname ?bname
WHERE {
?comentario dc:type tsioc:Comment.
?participante1 ops:performsParticipation ?comentario.
?participante1 foaf:name ?aname.
?artigo sioc:has_reply ?comentario.
?participante2 ops:performsParticipation ?artigo.
?participante2 foaf:name ?bname.
}"""
sparql=SPARQLWrapper(URL_ENDPOINT_)
sparql.setQuery(PREFIX+q)
sparql.setReturnFormat(JSON)
results = sparql.query().convert()
d=x.DiGraph()
for interacao in results["results"]["bindings"]:
nome_chegada=interacao["participante1"]["value"]
nome_partida=interacao["participante2"]["value"]
if (nome_partida,nome_chegada) in d.edges():
d[nome_partida][nome_chegada]["weight"]+=1
else:
d.add_edge(nome_partida,nome_chegada,weight=1.)
__builtin__.d=d
def fazBoW():
"""Faz Bag of Words de todos os comentários e artigos do site"""
q="SELECT ?cbody ?titulo ?abody WHERE \
{?foo ops:performsParticipation ?participacao.\
OPTIONAL { ?participacao schema:articleBody ?abody. }\
OPTIONAL {?participacao dc:title ?titulo . }\
OPTIONAL {?participacao schema:text ?cbody .}}"
spar | ql=SPARQLWrapper(URL_ENDPOINT_)
sparql.setQuery(PREFIX+q)
sparql.setReturnFormat(JSON)
results = sparql.query().convert()
msgs_=results["res | ults"]["bindings"]
msgs=[mm for mm in msgs_ if ("titulo" not in mm.keys()) or
(("teste de stress" not in mm["titulo"]["value"].lower())
or ("cbody" not in mm.keys() or ("comunidade de desenvolvedores e nesse caso, quanto mais"
not in mm["cbody"]["value"].lower())))]
textos1=[i["cbody"]["value"] for i in msgs if "cbody" in i.keys()]
textos2=[i["abody"]["value"] for i in msgs if "abody" in i.keys()]
textos=textos1+textos2
# faz BoW e guarda num dict
texto=string.join(textos).lower()
texto_= ''.join(ch for ch in texto if ch not in EXCLUDE)
texto__=texto_.split()
#texto___=[stemmer.stem(pp) for pp in texto__]
texto___=[stemmer.stem(pp) for pp in texto__ if (pp not in STOPWORDS) and (not pp.isdigit())]
fdist=k.FreqDist(texto___)
radicais_escolhidos=fdist.keys()[:400]
__builtin__.radicais_escolhidos=radicais_escolhidos
__builtin__.bow=fdist
def fazBoWs():
"""Faz Bag of Words de cada usuário"""
# puxa todos os usuarios
q="""SELECT DISTINCT ?participante
WHERE {
?foo dc:contributor ?participante .
}"""
sparql=SPARQLWrapper(URL_ENDPOINT_)
sparql.setQuery(PREFIX+q)
sparql.setReturnFormat(JSON)
results = sparql.query().convert()
participantes_=results["results"]["bindings"]
participantes=[i["participante"]["value"] for i in participantes_]
# inicia loop
if "radicais_escolhidos" not in dir(__builtin__):
print(u"rode BoW antes, para saber do vocabulário geral do portal")
else:
radicais_escolhidos=__builtin__.radicais_escolhidos
bows={}
for participante in participantes:
# puxa todos os comentarios de cada usuario
# e os article bodys
q="""SELECT DISTINCT ?abody ?cbody
WHERE {
<%s> ops:performsParticipation ?participacao.
OPTIONAL { ?participacao schema:articleBody ?abody. }
OPTIONAL { ?participacao schema:text ?cbody. }
OPTIONAL {?comentario dc:title ?titulo . }
}"""%(participante,)
sparql = SPARQLWrapper("http://localhost:82/participabr/query")
sparql.setQuery(PREFIX+q)
sparql.setReturnFormat(JSON)
results = sparql.query().convert()
results_=results["results"]["bindings"]
results__=[mm for mm in results_ if ("titulo" not in mm.keys()) or
(("teste de stress" not in mm["titulo"]["value"].lower())
or ("cbody" not in mm.keys() or ("comunidade de desenvolvedores e nesse caso, quanto mais"
not in mm["cbody"]["value"].lower())))]
textos1=[i["cbody"]["value"] for i in results__ if "cbody" in i.keys()]
textos2=[i["abody"]["value"] for i in results__ if "abody" in i.keys()]
textos=textos1+textos2
# faz BoW e guarda num dict
texto=string.join(textos).lower()
texto_= ''.join(ch for ch in texto if ch not in EXCLUDE)
texto__=texto_.split()
texto___=[stemmer.stem(pp) for pp in texto__ if pp not in STOPWORDS]
fdist=k.FreqDist(texto___)
ocorrencias=[fdist[i] for i in radicais_escolhidos]
bows[participante]=(fdist,ocorrencias)
__builtin__.bows=bows
def fazBoWsC():
"""Faz Bag of Words de cada comunidade
Por hora, há duas bag of words para cada comunidade:
*) Média das bag of words de cada participante
*) Bag of words de todos os textos da comunidade"""
if "bows" not in dir(__builtin__):
return "execute fazBoWs() primeiro"
# puxar participantes de cada comunidade
# fazer media dos bows deles
# puxar texto relacionado a cada comunidade
# fazer bow
|
gdsfactory/gdsfactory | gdsfactory/simulation/gtidy3d/tests/test_results.py | Python | mit | 855 | 0 | import gdsfactory as gf
import gdsfactory.simulation.gtidy3d as gt
from gdsfactory.config import CONFIG
from gdsfactory.simulation.gtidy3d.get_results import get_results
# def test_results_run(data_regression) -> None:
# """Run simulations and checks local results."""
# component = gf.components.straight(length=3)
# sim = gt.get_simulation(component=compon | ent, is_3d=False)
# dirpath = CONFIG["sparameters"]
# r = get_results(sim=sim, dirpath=dirpath, overwrite=True).result()
# if data_regression:
# data_regression.check(r.monitor_data)
if __name__ == "__main__":
# test_results_run(None)
component = gf.components.straight(length=3)
sim = gt.get_simulation(component=component, is_3d=False)
dirpath = CONFIG["sparameters"]
r = get_results(sim=sim, dirpath= | dirpath, overwrite=True).result()
|
Saldenisov/QY_itegrating_sphere | views/__init__.py | Python | mit | 46 | 0.021739 | from .ui import *
from | .windows_views import | * |
LLNL/spack | var/spack/repos/builtin/packages/py-sentry-sdk/package.py | Python | lgpl-2.1 | 1,517 | 0.000659 | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PySentrySdk(PythonPackage):
"""The new Python SDK for Sentry.io"""
homepage = "https://github.com/getsentry/sentry-python"
pypi = "sentry-sdk/sentry-sdk-0.17.6.tar.gz"
version('0.17.6', sha256='1a086486ff9da15791f294f6e9915eb3747d161ef64dee2d038a4d0b4a369b24')
depends_on('python@2.7,3.4:', type=('build', 'run'))
depends_on('py-setuptools', type='build')
depends_on('py-urllib3@1.10.0:', type=('build', 'run'))
depends_on('py-certifi', type=('build', 'run'))
depends_on('py-flask@0.11:', type=('build', 'r | un'))
depends_on('py-bottle@0.12.13:', type=('build', 'run'))
depends_on('py-falcon@1.4:', type=('build', 'run'))
depends_on('py-django@1.8:', type=('build', 'run'))
depends_on('py-sanic@0.8:', type=('build', 'run'))
depends_on('py-celery@3:', | type=('build', 'run'))
depends_on('py-apache-beam@2.12:', type=('build', 'run'))
depends_on('py-rq@0.6:', type=('build', 'run'))
depends_on('py-aiohttp@3.5:', type=('build', 'run'))
depends_on('py-tornado@5:', type=('build', 'run'))
depends_on('py-sqlalchemy@1.2:', type=('build', 'run'))
depends_on('py-pyspark@2.4.4:', type=('build', 'run'))
depends_on('py-pure-eval', type=('build', 'run'))
depends_on('py-chalice@1.16.0:', type=('build', 'run'))
|
Pyc0kw4k/djangocms-portfolio | djangocms_portfolio/models.py | Python | apache-2.0 | 2,018 | 0.002973 | # coding: utf-8
# Django db
from django.db import models
# Fields (Requirements)
from djangocms_text_ckeditor.fields import HTMLField
#Filer (Requirements)
from filer.fields.image import FilerImageField
from filer.fields.folder import FilerFolderField
# Taggit (Requirements)
from taggit.managers import TaggableManager
# Reverse
from django.core.urlresolvers import reverse
# I18N
from django.utils.translation import ugettext as _
class CategoryWork(models.Mo | del):
title = models.CharField(_('Title'), max_length=255)
slug = models.SlugField()
class Meta:
| verbose_name = _('Category')
verbose_name_plural = _('Categories')
def __str__(self):
return self.title
class Work(models.Model):
title = models.CharField(_('Title'), max_length=200)
slug = models.SlugField()
pub_date = models.DateTimeField(_('Published on'), auto_now_add=True)
description = HTMLField(_('Work Description'))
category = models.ForeignKey(CategoryWork, verbose_name=_('Category'))
tags = TaggableManager()
client = models.CharField(_('Client'), max_length=255, null=True, blank=True)
location = models.CharField(_('Location'), max_length=255, null=True, blank=True)
head_picture = FilerImageField(verbose_name=_("Head"))
folder = FilerFolderField(verbose_name=_('Gallery Folder'), null=True, blank=True)
class Meta:
verbose_name = _('Work')
verbose_name_plural = _('Works')
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse('work_detail', args=[self.slug])
def get_next_work(self):
try:
next_work = Work.objects.get(pk=self.pk+1)
return reverse('work_detail', args=[next_work.slug])
except:
return None
def get_previous_work(self):
try:
previous_work = Work.objects.get(pk=self.pk-1)
return reverse('work_detail', args=[previous_work.slug])
except:
return None
|
vienno/flashback | setup.py | Python | mit | 515 | 0 | from distutils.core import setup
setup(
name='flashback',
packages=['flashback'],
version='0.4',
description='The handiest Flashback scraper in the game',
author='Robin Linderborg',
author_email='robin.linderborg@gmail.com', |
install_requires=[
' | beautifulsoup4==4.4.1',
'requests==2.8.0'
],
url='https://github.com/miroli/flashback',
download_url='https://github.com/miroli/flashback/tarball/0.4',
keywords=['flashback', 'scraping'],
classifiers=[],
)
|
didrocks/snapcraft | snapcraft/tests/test_config.py | Python | gpl-3.0 | 3,065 | 0 | # -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2016 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import fixtures
from snapcraft import (
config,
tests
)
from snapcraft.tests import fixture_setup
def create_config_from_string(content):
path = config.Config.save_path()
with open(path, 'w') as f:
f.write(content)
class TestConfig(tests.TestCase):
def test_non_existing_file_succeeds(self):
conf = config.Config()
self.assertEqual([], conf.parser.sections())
self.assertTrue(conf.is_empty())
def test_existing_file(self):
existing_conf = config.Config()
existing_conf.set('foo', 'bar')
existing_conf.save()
# Check we find and use the existing conf
conf = config.Config()
self.assertEqual('bar', conf.get('foo'))
self.assertFalse(conf.is_empty())
def test_irrelevant_sections_are_ignored(self):
create_config_from_string('''[example.com]\nfoo=bar''')
conf = config.Config()
self.assertEqual(None, conf.get('foo'))
def test_section_from_url(self):
create_config_from_string('''[example.com]\nfoo=bar''')
self.useFixture(fixtures.EnvironmentVariable(
'UBUNTU_SSO_API_ROOT_URL', 'http://example.com/api/v2'))
conf = config.Config()
self.assertEqual('bar', conf.get('foo'))
def test_save_one_option(self):
conf = config.Config()
conf.set('bar', 'baz')
conf.save()
new_conf = config.Config()
self.assertEqual('baz', | new_conf.get('bar'))
def test_clear_preserver_other_sections(self):
create_config_from_string('''[keep_me]\nfoo=bar\n''')
conf = config.Config()
conf.set('bar', 'baz')
self.assertEqual('b | az', conf.get('bar'))
conf.clear()
conf.save()
new_conf = config.Config()
self.assertEqual(None, new_conf.get('bar'))
# Picking behind the curtains
self.assertEqual('bar', new_conf.parser.get('keep_me', 'foo'))
self.assertTrue(conf.is_empty())
class TestOptions(tests.TestCase):
def setUp(self):
super().setUp()
self.useFixture(fixture_setup.TempConfig(self.path))
def create_config(self, **kwargs):
conf = config.Config()
for k, v in kwargs.items():
conf.set(k, v)
return conf
def test_string(self):
conf = self.create_config(foo='bar')
self.assertEqual('bar', conf.get('foo'))
|
dwt/BayesianNetworks | fluent.py | Python | mit | 46,253 | 0.014554 | #!/usr/bin/env python3
# encoding: utf8
# license: ISC (MIT/BSD compatible) https://choosealicense.com/licenses/isc/
# This library is principally created for python 3. However python 2 support may be doable and is welcomed.
"""Use python in a more object oriented, saner and shorter way.
# WARNING
First: A word of warning. This library is an experiment. It is based on a wrapper that aggressively
wraps anything it comes in contact with and tries to stay invisible from then on (apart from adding methods).
However this means that this library is probably quite unsuitable for use in bigger projects. Why?
Because the wrapper will spread in your runtime image like a virus, 'infecting' more and more objects
causing strange side effects. That being said, this library is perfect for short scripts and especially
'one of' shell commands. Use it's power wisely!
# Introduction
This library is heavily inspired by jQuery and underscore / lodash in the javascript world. Or you
could say that it is inspired by SmallTalk and in extension Ruby and how they deal with collections
and how to work with them.
In JS the problem is that the standard library sucks very badly and is missing many of the
most important convenience methods. Python is better in this regard, in that it has (almost) all
those methods available somewhere. BUT: quite a lot of them are available on the wrong object or
are free methods where they really should be methods. Examples: `str.join` really should be on iterable.
`map`, `zip`, `filter` should really be on iterable. Part of this problem comes from the design
choice of the python language, to provide a strange kind of minimal duck typing interface with the __*__
methods that the free methods like `map`, `zip`, `filter` then use. This however has the unfortunate
side effect in that writing python code using these methods often requires the reader to mentally skip
back and forth in a line to parse what it does. While this is not too bad for simple usage of these
functions, it becomes a nightmare if longer statements are built up from them.
Don't believe me? Try to parse this simple example as fast as you can:
>>> map(print, map(str.upper, sys.stdin.read().split('\n')))
How many backtrackings did you have to do? To me this code means, finding out that it starts in the
middle at `sys.stdin.read().split('\n')`, then I have to backtrack to `map(str.upper, …)`, then to
`map(print, …)`. Then while writing, I have to make sure that the number of parens at the end are
correct, which is something I usually have to use editor support for as it's quite hard to accurately
identify where the matching paren is.
The problem with this? This is hard! Hard to write, as it doesn't follow the way I think about this
statement. Literally, this means I usually write these statements from the inside out and wrap them
using my editor as I write them. As demonstrated above, it's also hard to read - requireing quite a
bit of backtracking.
So, what's the problem you say? Just don't do it, it's not pythonic you say! Well, Python has two
main workarounds available for this mess. One is to use list comprehension / generator
statements like this:
>>> [print(line.upper()) for line in sys.stdin.read().split('\n')]
This is clearly better. Now you only have to skip back and forth once instead of twice Yay! Win!
To me that is not a good workaround. Sure it's nice to easily be able to create generators this
way, but it still requires of me to find where the statement starts and to backtrack to the beginning
to see what is happening. Oh, but they support filtering too!
>>> [print(line.upper()) for line in sys.stdin.read().split('\n') if line.upper().startswith('FNORD')]
Well, this is little better. For one thing, this doesn't solve the backtracking problem, but more
importantly, if the filtering has to be done on the processed version (here artificially on
`line.upper().startswith()`) then the operation has to be applied twice - which sucks because you have to write it twice, but also because it is computed twice.
The solution? Nest them!
[print(line) for line in (line.upper() for line in sys.stdin.read().split('\n')) if line.startswith('FNORD')]
Do you start seing the problem?
Compare it to this:
>>> for line in sys.stdin.read().split('\n'):
>>> uppercased = line.upper()
>>> if uppercased.startswith('FNORD'):
>>> print(uppercased)
Almost all my complaints are gone. It reads and writes almost completely in order it is computed.
Easy to read, easy to write - but one drawback. It's not an expression - it's a bunch of statements.
Which means that it's not easily combinable and abstractable with higher order methods or generators.
Also (to complain on a high level), you had to invent two variable names `line` and `uppercased`.
While that is not bad, especially if they explain what is going on - in this case it's not really
helping _and_ (drummroll) it requires some backtracking and buildup of mental state to read. Oh well.
Of course you can use explaining variables to untangle the mess of using higher order functions too:
Consider this code:
>>> cross_product_of_dependency_labels = \
>>> set(map(frozenset, itertools.product(*map(attrgetter('_labels'), dependencies))))
That certainly is hard to read (and write). Pulling out explaining variables, makes it better. Like so:
>>> labels = map(attrgetter('_labels'), dependencies)
>>> cross_product_of_dependency_labels = set(map(frozenset, itertools.product(*labels)))
Better, but still hard to read. Sure, those explaining variables are nice and sometimes
essential to understand the code. - but it does take up space in lines, and space in my head
while parsing this code. The question would be - is this really easier to read than something
like this?
>>> cross_product_of_dependency_labels = _(dependencies) \
>>> .map(_.each._labels) \
>>> .star_call(itertools.product) \
>>> .map(frozenset) \
>>> .call(set)
Sure you are not used to this at first, but consider the advantages. The intermediate variable
names are abstracted away - the data flows through the methods completely naturally. No jumping
back and forth to parse this at all. It just reads and writes exactly in the order it is computed.
What I think that I want to accomplish, I can write down directly in order. Oh, and I don't have
to keep track of extra closing parantheses at the end of the expression.
So what is the essence of all of this?
Python is an object oriented language - but it doesn't really use what object orientation has tought
us about how we can work with collections and higher order methods in the languages that came before it
(especially SmallTalk, but more recently also Ruby). Why can't I make those beautiful fluent call chains
that SmallTalk could do 20 years ago in Python today?
Well, now you can.
# Features
To enable this style of coding this library has some features that might not be so obvious at first.
## Aggressive (specialized) wrapping
The most important entry point for this library is the function `wrap` or the perhaps p | referrable and
shorter alias `_`:
>>> _(something)
>>> # or
>>> wrap(something)
`wrap` is a factory function that returns a subclass of Wrapper, the basic and main object of this | library.
This does two things: First it ensures that every attribute access, item access or method call off of
the wrapped object will also return a wrapped object. This means that once you wrap something, unless
you unwrap it explicitly via `.unwrap` or `._` it stays wrapped - pretty much no matter what you do
with it. The second thing this does is that it returns a subclass of Wrapper that has a specialized set
of methods depending on the type of what is wrapped. I envision this to expand in the future, but right
now the most usefull wrappers are: Iterable, where we add all the python collection functions (map,
filter, zip, reduce, …) as well as a good batch of methods from itertools and a few extras for good
measure. Callable, where we add `.curry()` and `.compose()` and Text, where most of the regex methods
are added.
## Imports |
emory-libraries/eulcommon | eulcommon/djangoextras/formfields.py | Python | apache-2.0 | 8,195 | 0.002318 | # file eulcommon/djangoextras/formfields.py
#
# Copyright 2010,2011 Emory University Libraries
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
Custom generic form fields for use with Django forms.
----
'''
import re
from django.core.validators import RegexValidator
from django.forms import CharField, ChoiceField
from django.forms.widgets import Select, TextInput, Widget
from django.utils.safestring import mark_safe
# regular expression to validate and parse W3C dates
W3C_DATE_RE = re.compile(r'^(?P<year>\d{4})(?:-(?P<month>[0-1]\d)(?:-(?P<day>[0-3]\d))?)?$')
validate_w3c_date = RegexValidator(W3C_DATE_RE,
u'Enter a valid W3C date in one of these formats: YYYY, YYYY-MM, or YYYY-MM-DD',
'invalid')
class W3CDateWidget(Widget):
'''Multi-part date widget that generates three text input boxes for year,
month, and day. Expects and generates dates in any of these W3C formats,
depending on which fields are filled in: YYYY-MM-DD, YYYY-MM, or YYYY.
'''
# based in part on SelectDateWidget from django.forms.extras.widgets
month_field = '%s_month'
day_field = '%s_day'
year_field = '%s_year'
def value_from_datadict(self, data, files, name):
'''Generate a single value from multi-part form data. Constructs a W3C
date based on values that are set, leaving out day and month if they are
not present.
:param data: dictionary of data submitted by the form
:param files: - unused
:param name: base name of the form field
:returns: string value
'''
y = data.get(self.year_field % name)
m = data.get(self.month_field % name)
d = data.get(self.day_field % name)
if y == 'YYYY':
y = ''
if m == 'MM':
m = ''
if d == 'DD':
d = ''
date = y
if m:
date += '-%s' % m
if d:
date += '-%s' % d
return date
# TODO: split out logic so it is easier to extend and customize display
def render(self, name, value, attrs=None):
'''Render the widget as HTML inputs for display on a form.
:param name: form field base name
:param value: date value
:param attrs: - unused
:returns: HTML text with three inputs for year/month/day
'''
# expects a value in format YYYY-MM-DD or YYYY-MM or YYYY (or empty/None)
year, month, day = 'YYYY', 'MM', 'DD'
if value:
# use the regular expression to pull out year, month, and day values
# if regular expression does not match, inputs will be empty
match = W3C_DATE_RE.match(value)
if match:
date_parts = match.groupdict()
year = date_parts['year']
month = date_parts['month']
day = date_parts['day']
year_html = self.create_textinput(name, self.year_field, year, size=4, title='4-digit year', onClick='javascript:if(this.value == "YYYY") { this.value = "" };')
month_html = self.create_textinput(name, self.month_field, month, size=2, title='2-digit month', onClick='javascript:if(this.value == "MM") { this.value = "" };')
day_html = self.create_textinput(name, self.day_field, day, size=2, title='2-digit day', onClick='javascript:if(this.value == "DD") { this.value = "" };')
# display widget fields in YYYY-MM-DD order to match W3C date format,
# and putting required field(s) on the left
output = [year_html, month_html, day_html]
return mark_safe(u' / \n'.join(output))
def create_textinput(self, name, field, value, **extra_attrs):
'''Generate and render a :class:`django.forms.widgets.TextInput` for
a single year, month, or day input.
If size is specified in the extra attributes, it will also be used to
set the maximum length of the field.
:param name: base name of the input field
:param field: pattern for this field (used with name to generate input name)
:param value: initial value for the field
:param extra_attrs: any extra widget attributes
:returns: rendered HTML output for the text input
'''
# TODO: move id-generation logic out for re-use
if 'id' in self.attrs:
id_ = self.attrs['id']
else:
id_ = 'id_%s' % name
# use size to set maximum length
if 'size' in extra_attrs:
extra_attrs['maxlength'] = extra_attrs['size']
local_attrs = self.build_attrs(id=field % id_, **extra_attrs)
txtinput = TextInput()
return txtinput.render(field % name, value, local_attrs)
class W3CDateField(CharField):
'''W3C date field that uses a :class:`~eulcore.django.forms.fields.W3CDateWidget`
for presentation and uses a simple regular expression to do basic validation
on the input (but does not actually test that it is a valid date).
'''
widget = W3CDateWidget
default_error_messages = {
'invalid': u'Enter a date in one of these formats: YYYY, YYYY-MM, or YYYY-MM-DD',
}
default_validators = [validate_w3c_date]
class DynamicSelect(Select):
'''A :class:`~django.forms.widgets.Select` widget whose choices are not
static, but instead generated dynamically when referenced.
:param choices: callable; this will be called to generate choices each
time they are referenced.
'''
def __init__(self, attrs=None, choices=None):
# Skip right over Select and go to its parents. Select just sets
# self.choices, which will break since it's a property here.
super(DynamicSelect, self).__init__(attrs)
if choices is None:
choices = lambda: ()
self._choices = choices
def _get_choices(self):
return self._choices()
def _set_choices(s | elf, choices):
self._choices = choices
choices = property(_get_choices, _set_choices)
class DynamicChoiceField(ChoiceField):
'''A :class:`django.forms.ChoiceField` whose choices are not static, but
instead generated dynamically when referenced.
:param choices: callable; this will be called to generate choices each
time | they are referenced
'''
widget = DynamicSelect
def __init__(self, choices=None, widget=None, *args, **kwargs):
# ChoiceField.__init__ tries to set static choices, which won't
# work since our choices are dynamic, so we're going to have to skip
# over it.
# First normalize our choices
if choices is None:
choices = lambda: ()
self._choices = choices
# Then normalize our widget, constructing it with our choices
# function if we need to construct it.
if widget is None:
widget = self.widget
if isinstance(widget, type):
widget = widget(choices=self._choices)
# Now call call super.__init__(), but bypass ChoiceField.
# ChoiceField just sets static choices manually and then calls its
# own super. We don't have static choices, so ChoiceField.__init__()
# would break if we called it. Skip over ChoiceField and go straight
# to *its* super.__init__().
super(ChoiceField, self).__init__(widget=widget, *args, **kwargs)
def _get_choices(self):
return self._choices()
def _set_choices(self, choices):
# if choices is updated, update the widget choice callable also
self._choices = choices
self.widget._choices = self._choices
choices = property(_get_choices, |
googleapis/python-aiplatform | samples/snippets/feature_store_service/create_featurestore_sample.py | Python | apache-2.0 | 2,289 | 0.002184 | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Create a featurestore resource to contain entity types and features.
# See https://cloud.google.com/vertex-ai/docs/featurestore/setup before running
# the code snippet
# [START aiplatform_create_featurestore_sample]
from google.cloud import aiplatform
def create_featurestore_sample(
project: str,
featurestore_id: str,
fixed_node_count: int = 1,
location: str = "us-central1",
api_endpoint: str = "us-central1-aiplatform.googleapis.com",
timeout: int = 300,
):
# The AI Platform services require regional API endpoints, which need to be
# in the same region or multi-region overlap with the Feature Store location.
client_options = {"api_endpoint": api_endpoint}
# Initialize client that will be used to create and send requests.
# This client only needs to be created once, and can be reused for multiple requests.
client = aiplatform.gapic.FeaturestoreServiceClient(client_options=client_options)
parent = f"projects/{project}/locations/{location}"
create_featurestore_request = aiplatform.gapic.CreateFeaturestoreRequest(
parent=parent,
featurestore_id=featurestore_id,
featurestore=aiplatform.gapic.Featurestore(
online_serving_config=aiplatform.gapic. | Featurestore.OnlineServingConfig(
fixed_node_count=fixed_node_count,
),
),
)
lro_response = client.create_featurestore(request=create_featurestore_request)
print("Long running operation:", lro_response.operation.name)
create_featurestore_response = lro_response.result(timeout=timeo | ut)
print("create_featurestore_response:", create_featurestore_response)
# [END aiplatform_create_featurestore_sample]
|
odie5533/Python-RTSP | sdpp.py | Python | gpl-3.0 | 6,434 | 0.004352 | # Pythonic SDP/SDPPLIN Parser
# SDP = Session Description Protocol
#
# Copyright (C) 2008 David Bern
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import base64
def _parse_sdpplin_line(item):
""" Returns a (name,value) tuple when given an Sdpplin attribute
e.g. AvgPacketSize:integer;744 => (AvgPacketSize,744)
"""
name = item.split(':')[0]
value = item[len(name)+ 1:]
if value.find(';') != -1:
#type = value.split(';')[0]
#value = value[len(type) + 1:]
type, sep, value = value.partition(';')
if type == 'integer':
value = int(value)
if type == 'buffer':
value = base64.b64decode(value[1:-1])
if type == 'string':
value = value[1:-1]
return name, value
class SDPMediaDesc:
""" Holds the (a)ttribute and (b)andwidth values for an SDP Media Desc """
def __init__(self):
self.a = []
self.b = []
class SDPParser:
def __init__(self, data = None):
""" Parses a full SDP data string.
Alternatively, send lines to the parseLine method. """
self.v = []
self.o = []
self.s = []
self.i = []
self.t = []
self.a = []
self.m | edia_description | s = []
self.last_desc = None
# Variables are provided for convenience
self.protocol_version = None
self.session_name = None
self.session_desc = None
self.start_time = None
self.stop_time = None
if data is None:
return
lines = [ l for l in data.split('\r\n') if l ]
for line in lines:
self.parseLine(line)
def saveSDP(self, filename):
""" Not finished """
f = open(filename, 'w')
for type in [ 'v', 'o', 's', 'i', 't', 'a' ]:
for val in getattr(self, type):
f.write('%s=%s\r\n' % (type, val))
for mdesc in self.media_descriptions:
for type in [ 'a', 'b' ]:
for val in getattr(mdesc, type):
f.write('%s=%s\r\n' % (type, val))
f.write('\r\n')
f.close()
def parseLine(self, line):
""" Parses an SDP line. SDP protocol requires lines be parsed in order
as the m= attribute tells the parser that the following a= values
describe the last m= """
type = line[0]
value = line[2:].strip()
if type == 'v':
self.v.append(value)
self.protocol_version = value
elif type == 'o':
self.o.append(value)
elif type == 's': # Session Name
self.s.append(value)
self.session_name = value
elif type == 'i': # Session Description
self.i.append(value)
self.session_desc = value
elif type =='t': # Time
try:
start_time, stop_time = [`t` for t in value.split(' ')]
except ValueError:
pass
elif type == 'a':
if self.last_desc is None:
# Appends to the session attributes
self.a.append(value)
else:
# or to the media description attributes
self.last_desc.a.append(value)
elif type == 'm':
self.last_desc = SDPMediaDesc()
self.media_descriptions.append(self.last_desc)
elif type == 'b':
self.last_desc.b.append(value)
else:
# Need to add email and phone
raise TypeError('Unknown type: %s' % type)
class SdpplinMediaDesc(SDPMediaDesc):
""" Extends the SDPMediaDesc by providing dictionary-style access to
the sdpplin variables.
e.g. instead of media_desc.a[7] returning "MaxBitRate:integer;64083"
media_desc["MaxBitRate"] returns an integer 64083
"""
def __iter__(self):
for key in self.attributes:
yield key
def items(self):
return [(key,self.attributes[key]) for key in self.attributes]
def __getitem__(self, name):
return self.attributes[name]
def __init__(self, media_desc):
self.a = media_desc.a
self.b = media_desc.b
self.attributes = {}
self.duration = None
for item in media_desc.a:
name, value = _parse_sdpplin_line(item)
if name == 'control':
self.attributes[value.split('=')[0]] = int(value.split('=')[1])
if name == 'length':
self.duration = int(float(value.split('=')[1]) * 1000)
self.attributes[name] = value
class Sdpplin(SDPParser):
""" Extends the SDPParser by providing dictionary-style access to
the sdpplin variables.
e.g. instead of sdp.a[1] returning "StreamCount:integer;2"
sdp["StreamCount"] returns 2
"""
def __init__(self, data):
self.attributes = {}
self.streams = []
sdp = SDPParser(data)
# Adds attributes to self
for item in sdp.a:
name, value = _parse_sdpplin_line(item)
if name in ['Title', 'Author', 'Copyright']:
value = value.strip(chr(0))
self.attributes[name] = value
# Adds SdpplinMediaDesc to streams[] for each SDPMediaDesc
for media_desc in sdp.media_descriptions:
sdpplin_media_desc = SdpplinMediaDesc(media_desc)
self.streams.append(sdpplin_media_desc)
def __iter__(self):
for key in self.attributes:
yield key
def items(self):
return [(key,self.attributes[key]) for key in self.attributes]
def __getitem__(self, name):
return self.attributes[name]
|
MillerCMBLabUSC/lab_analysis | apps/4f_model/OldCode/thermo.py | Python | gpl-2.0 | 3,192 | 0.024436 | import scipy.integrate as intg
import numpy as np
#Physical Constants
#Everything is in MKS units
#Planck constant [J/s]
h = 6.6261e-34
#Boltzmann constant [J/K]
kB = 1.3806e-23
#Speed of light [m/s]
c = 299792458.0
#Pi
PI = 3.14159265
#Vacuum Permitivity
eps0 = 8.85e-12
#Resistivity of the mirror
rho=2.417e-8
GHz = 10 ** 9
Tcmb = 2.725
#Calculates total black body power for a given temp and emis.
def bbSpec(freq,temp,emis):
occ = 1.0/(np.exp(h*freq/(temp*kB)) | - 1)
if | callable(emis):
e = emis(freq)
else:
e = emis
return (2*e*h*freq**3)/(c**2)* occ
#Calculates total black body power for a given temp and emis multiplied by the optical throughput.
def weightedSpec(freq,temp,emis):
AOmega = (c/freq)**2
return (AOmega*bbSpec(freq,temp,emis))
def bbPower(temp, emis, f1,f2):
power = .5*intg.quad(lambda x: weightedSpec(x,temp,emis), f1, f2)[0]
return power
def powFromSpec(freqs, spec):
return np.trapz(spec, freqs)
#Spillover efficiency
def spillEff(D, F, waistFact, freq):
return 1. - np.exp((-np.power(np.pi,2)/2.)*np.power((D/(waistFact*F*(c/freq))),2))
def powFrac(T1, T2, f1, f2):
if T1==0:
return 0
else:
return bbPower(T1, 1.0, f1, f2)/bbPower(T2, 1.0, f1, f2)
def getLambdaOptCoeff(chi):
geom = (1 / np.cos(chi) - np.cos(chi))
return - 2 * geom * np.sqrt(4 * PI * eps0 * rho )
def getLambdaOpt(nu, chi):
geom = (1 / np.cos(chi) - np.cos(chi))
return - 2 * geom * np.sqrt(4 * PI * eps0 * rho * nu)
def aniPowSpec(emissivity, freq, temp=None):
if temp == None:
temp = Tcmb
occ = 1.0/(np.exp(h*freq/(temp*kB)) - 1)
return ((h**2)/kB)*emissivity*(occ**2)*((freq**2)/(temp**2))*np.exp((h*freq)/(kB*temp))
def dPdT(elements, det):
"""Conversion from Power on detector to Kcmb"""
totalEff = lambda f : reduce((lambda x,y : x * y), map(lambda e : e.Eff(f), elements[1:]))
# print "Total Efficiency: %e"%totalEff(det.band_center)
return intg.quad(lambda x: aniPowSpec(totalEff(x), x, Tcmb), det.flo, det.fhi)[0]
#***** Public Methods *****
def lamb(freq, index=None):
"""Convert from from frequency [Hz] to wavelength [m]"""
if index == None:
index = 1.
return c/(freq*index)
def dielectricLoss( lossTan, thickness, index, freq, atmScatter=0):
"""Dielectric loss coefficient with thickness [m] and freq [Hz]"""
return 1.0 - np.exp((-2*PI*index*lossTan*thickness)/lamb(freq/GHz))
if __name__=="__main__":
bc = 145 * GHz
fbw = .276
flo = bc * (1 - fbw / 2)
fhi = bc * (1 + fbw / 2)
T = Tcmb
#Exact
occ = lambda nu : 1./(np.exp(h * nu / (T * kB)) - 1)
aniSpec = lambda nu : 2 * h**2 * nu **2 / (kB * T**2) * occ(nu)**2 * np.exp(h * nu / (kB * T))
factor1 = intg.quad(aniSpec, flo, fhi)[0]
cumEff = .3638
factor2 = 2 * kB * (fhi - flo)
print factor1 * pW
print factor2 * pW
print factor2 / factor1
# freqs = np.linspace(flo, fhi, 30)
# plt.plot(freqs, aniSpec(freqs) / (2 * kB))
## plt.plot(freqs, [2 * kB for f in freqs])
# plt.show()
|
graphql-python/graphql-core | tests/utilities/test_type_from_ast.py | Python | mit | 1,371 | 0 | from pytest import raises
from graphql.language import parse_type, TypeNode
from graphql.type import GraphQLList, GraphQLNonNull, GraphQLObjectType
from graphql.utilities import type_from_ast
from ..validation.harness import test_schema
def describe_type_from_ast():
def for_named_type_node():
node = parse_typ | e("Cat")
type_for_node = type_from_ast(test_schema, node)
assert isinstance(type_for_node, GraphQLObjectType)
assert type_for_node.name == "Cat"
def for_list_type_node():
node = parse_type("[Cat]")
type_for_node = type_from_ast(test_schema, node)
assert | isinstance(type_for_node, GraphQLList)
of_type = type_for_node.of_type
assert isinstance(of_type, GraphQLObjectType)
assert of_type.name == "Cat"
def for_non_null_type_node():
node = parse_type("Cat!")
type_for_node = type_from_ast(test_schema, node)
assert isinstance(type_for_node, GraphQLNonNull)
of_type = type_for_node.of_type
assert isinstance(of_type, GraphQLObjectType)
assert of_type.name == "Cat"
def for_unspecified_type_node():
node = TypeNode()
with raises(TypeError) as exc_info:
type_from_ast(test_schema, node)
msg = str(exc_info.value)
assert msg == "Unexpected type node: <TypeNode instance>."
|
pFernbach/hpp-rbprm-corba | script/scenarios/sandbox/siggraph_asia/spiderman/down_spidey_interp.py | Python | lgpl-3.0 | 4,445 | 0.028796 | from hpp.corbaserver.rbprm.rbprmbuilder import Builder
from hpp.corbaserver.rbprm.rbprmfullbody import FullBody
from hpp.gepetto import Viewer
from hpp.gepetto import PathPlayer
import down_spidey_path as path_planner
#~ import hrp2_model as model
import time
tp = path_planner
packageName = 'hpp-rbprm-corba'
meshPackageName = 'hpp-rbprm-corba'
rootJointType = "freeflyer"
##
# Information to retrieve urdf and srdf files.
urdfName = "spiderman"
urdfSuffix = ""
srdfSuffix = ""
#~ V0list = tp.V0list
#~ Vimplist = tp.Vimplist
base_joint_xyz_limits = tp.base_joint_xyz_limits
fullBody = FullBody ()
robot = fullBody.client.basic.robot
fullBody.loadFullBodyModel(urdfName, rootJointType, meshPackageName, packageName, urdfSuffix, srdfSuffix)
fullBody.setJointBounds ("base_joint_xyz", base_joint_xyz_limits)
#psf = ProblemSolver(fullBody); rr = Viewer (psf); gui = rr.client.gui
ps = path_planner.ProblemSolver( fullBody )
r = path_planner.Viewer (ps, viewerClient=path_planner.r.client)
rr = r
#~ psf = tp.ProblemSolver( fullBody ); rr = tp.Viewer (psf); gui = rr.client.gui
pp = PathPlayer (fullBody.client.basic, rr); pp.speed = 0.6
q_0 = fullBody.getCurrentConfig(); rr(q_0)
rLegId = 'RFoot'
lLegId = 'LFoot'
rarmId = 'RHand'
larmId = 'LHand'
rfoot = 'SpidermanRFootSphere'
lfoot = 'SpidermanLFootSphere'
lHand = 'SpidermanLHandSphere'
rHand = 'SpidermanRHandSphere'
nbSamples = 50000; x = 0.03; y = 0.08
fullBody.addLimb(rLegId,'RThigh_rx','SpidermanRFootSphere',[0,0,0],[0,0,1], x, y, nbSamples, "EFORT_Normal", 0.01,"_6_DOF")
fullBody.addLimb(lLegId,'LThigh_rx','SpidermanLFootSphere',[0,0,0],[0,0,1], x, y, nbSamples, "EFORT_Normal", 0.01,"_6_DOF")
#~ fullBody.addLimb(rarmId,'RHumerus_rx','SpidermanRHandSphere',[0,0,0],[0,-1,0], x, y, nbSamples, "EFORT_Normal", 0.01,"_6_DOF")
#~ fullBody.addLimb(larmId,'LHumerus_rx','SpidermanLHandSphere',[0,0,0],[0,1,0], x, y, nbSamples, "EFORT_Normal", 0.01,"_6_DOF")
#~
fullBody.runLimbSampleAnalysis(rLegId, "jointLimitsDistance", True)
fullBody.runLimbSampleAnalysis(lLegId, "jointLimitsDistance", True)
limbsCOMConstraints = { rLegId : {'file': "spiderman/RL_com.ineq", 'effector' : rfoot},
lLegId : {'file': "spiderman/LL_com.ineq", 'effector' : rHand},
rarmId : {'file': "spiderman/RA_com.ineq", 'effector' : rHand},
larmId : {'file': "spiderman/LA_com.ineq", 'effector' : lHand} }
ps = path_planner.ProblemSolver( fullBody )
r = path_planner.Viewer (ps, viewerClient=path_planner.r.client)
#~ fullBody.setJointBounds ("base_joint_xyz", [-1,3, -1, 1, 0, 2.2])
fullBody.setJointBounds ("base_joint_xyz", [-1,3, -1, 1, 0, 6])
pp = PathPlayer (fullBody.client.basic, r)
from plan_execute import a, b, c, d, e, init_plan_execute
init_plan_execute(fullBody, r, path_planner, pp)
q_0 = fullBody.getCurrentConfig();
q_init = fullBody.getCurrentConfig(); q_init[0:7] = path_planner.q_init[0:7]
q_goal = fullBody.getCurrentConfig(); q_goal[0:7] = path_planner.q_goal[0:7]
#~ fullBody.setCurrentConfig (q_init)
#~ q_init = [
#~ -0.05, -0.82, 0.55, 1.0, 0.0 , 0.0, 0.0, # Free flyer 0-6
#~ 0.0, 0.0, 0.0, 0.0, | # CHEST HEAD 7-10
#~ 0.261799388, 0.174532925, 0.0, -0.523598776, 0.0, 0.0, 0.17, # LARM 11-17
#~ 0.261799388, -0.174532925, 0.0, -0.523598776, 0.0, 0.0, 0.17, # RARM 18-24
#~ 0.0, 0.0, -0.453785606, 0.872664626, -0.41887902, 0.0, # LLEG | 25-30
#~ 0.0, 0.0, -0.453785606, 0.872664626, -0.41887902, 0.0, # RLEG 31-36
#~ ]; r (q_init)
fullBody.setCurrentConfig (q_goal)
q_goal = fullBody.generateContacts(q_goal, [0,0,1])
q_init = fullBody.generateContacts(q_init, [0,0,1])
#~ fullBody.setStartState(q_init,[rLegId,lLegId,rarmId]) #,rarmId,larmId])
#~ fullBody.setStartState(q_init,[rLegId,lLegId,larmId, rarmId]) #,rarmId,larmId])
fullBody.setStartState(q_init,[rLegId,lLegId]) #,rarmId,larmId])
fullBody.setEndState(q_goal,[rLegId,lLegId])#,rarmId,larmId])
configs = d(0.005); e()
fb = fullBody
from bezier_traj import *
fullBody = fb
init_bezier_traj(fullBody, r, pp, configs, limbsCOMConstraints)
#~ AFTER loading obstacles
#~ test_ineq(0,{ rLegId : {'file': "hrp2/RL_com.ineq", 'effector' : 'RLEG_JOINT5'}}, 1000, [1,0,0,1])
#~ test_ineq(0,{ lLegId : {'file': "hrp2/LL_com.ineq", 'effector' : 'LLEG_JOINT5'}}, 1000, [0,0,1,1])
#~ gen(0,1)
|
woodymit/millstone_accidental_source | genome_designer/debug/2014_07_11_dep_mpileup_analysis.py | Python | mit | 2,582 | 0.002711 | """
ucript to manually bin alignments against position. The idea is to manually
look for mutations that freebayes might miss because of the low probability
of the events that we are dealing with.
"""
import os
import sys
# Setup Django environment.
sys.path.append(
os.path.join(os.path.dirname(os.path.realpath(__file__)), '../'))
os.e | nviron['DJANGO_SETTINGS_MODULE'] = 'settings'
from main.models | import *
from utils import generate_safe_filename_prefix_from_label
from utils.bam_utils import filter_bam_file_by_row
from utils.samtools_utils import run_mpileup
def get_pEVOL_bam_path(bam_path):
return os.path.splitext(bam_path)[0] + '.pEVOL_only.bam'
def filter_pEVOL_reads():
p = Project.objects.get(uid='9a5fc6ec')
sample_alignments = ExperimentSampleToAlignment.objects.filter(
alignment_group__reference_genome__project=p)
def is_pEVOL(line):
parts = line.split('\t')
rnext_col = parts[2]
return rnext_col == 'pEVOL-bipA'
for sa in sample_alignments:
bam_dataset = get_dataset_with_type(sa, Dataset.TYPE.BWA_ALIGN)
bam_path = bam_dataset.get_absolute_location()
pEVOL_bam_path = get_pEVOL_bam_path(bam_path)
filter_bam_file_by_row(bam_path, is_pEVOL, pEVOL_bam_path)
def generate_mpileups(idx_range=None):
output_dir = '/dep_data/2014_07_11_pileup_analysis'
p = Project.objects.get(uid='9a5fc6ec')
sample_alignments = ExperimentSampleToAlignment.objects.filter(
alignment_group__reference_genome__project=p)
for idx, sa in enumerate(sample_alignments):
if idx_range and not idx in idx_range:
continue
print 'Running %d of %d: %s' % (
idx + 1, len(sample_alignments), sa.experiment_sample.label)
bam_dataset = get_dataset_with_type(sa, Dataset.TYPE.BWA_ALIGN)
bam_path = bam_dataset.get_absolute_location()
pEVOL_bam_path = get_pEVOL_bam_path(bam_path)
ref_genome_fasta_location = get_dataset_with_type(
sa.alignment_group.reference_genome,
Dataset.TYPE.REFERENCE_GENOME_FASTA).get_absolute_location()
output_filename = generate_safe_filename_prefix_from_label(
sa.experiment_sample.label + '_' + sa.uid) + '.mpileup'
output_path = os.path.join(output_dir, output_filename)
run_mpileup(pEVOL_bam_path, ref_genome_fasta_location, output_path,
coverage_only=False)
def main():
# filter_pEVOL_reads()
generate_mpileups([6,7])
if __name__ == '__main__':
main()
|
sonic182/libsasscompiler | test_djangoapp/django_app/settings.py | Python | mit | 4,154 | 0.000963 | """
Django settings for django_app project.
Generated by 'django-admin startproject' using Django 1.11.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'eerdox*%*06dsv5hez(k+-gc26)3ft%+x!3=%+o&wc*16l_5!a'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = os.environ.get('DEBUG', '1') == '1'
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'pipeline',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'django_app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'templates'),
],
'APP_DIRS': True,
'OPTIONS | ': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'django_app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABA | SES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:'
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "test_styles"),
]
STATICFILES_STORAGE = 'pipeline.storage.PipelineCachedStorage'
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'pipeline.finders.PipelineFinder',
)
PIPELINE = {
'PIPELINE_ENABLED': True,
'STYLESHEETS': {
'test_scss': {
'source_filenames': ('tests/test1.scss',),
'output_filename': 'css/test_scss.css',
},
'test_sass': {
'source_filenames': ('tests/test1.sass',),
'output_filename': 'css/test_sass.css',
},
}
}
# not testing compressors so not required
PIPELINE['CSS_COMPRESSOR'] = 'pipeline.compressors.NoopCompressor'
PIPELINE['JS_COMPRESSOR'] = 'pipeline.compressors.NoopCompressor'
PIPELINE['COMPILERS'] = (
'libsasscompiler.LibSassCompiler',
)
|
Raulios/django-blog | core/migrations/0005_auto_20160120_1634.py | Python | mit | 458 | 0 | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-01-20 16:34
from __future__ import unicode_literals
from django.db import migrations, mode | ls
class Migration(migrations.Migration):
dependencies = [
('core', '0004_auto_20160120_1633'),
]
operations = [
migrations.AlterField(
model_name='post',
name='categories',
field=models.ManyToManyField(to='core.Category'),
),
] | |
jiangzhuo/kbengine | kbe/src/lib/python/Lib/test/test_codecs.py | Python | lgpl-3.0 | 111,483 | 0.001408 | import codecs
import contextlib
import io
import locale
import sys
import unittest
import warnings
import encodings
from test import support
if sys.platform == 'win32':
VISTA_OR_LATER = (sys.getwindowsversion().major >= 6)
else:
VISTA_OR_LATER = False
try:
import ctypes
except ImportError:
ctypes = None
SIZEOF_WCHAR_T = -1
else:
SIZEOF_WCHAR_T = ctypes.sizeof(ctypes.c_wchar)
def coding_checker(self, coder):
def check(input, expect):
self.assertEqual(coder(input), (expect, len(input)))
return check
class Queue(object):
"""
queue: write bytes at one end, read bytes from the other end
"""
def __init__(self, buffer):
self._buffer = buffer
def write(self, chars):
self._buffer += chars
def read(self, size=-1):
if size<0:
s = self._buffer
self._buffer = self._buffer[:0] # make empty
return s
else:
s = se | lf._buffer[:size]
self._buffer = self._buffer[size:]
return s
class MixInCheckStateHandling:
def check_state_handling_decode(self, encoding, u, s):
for i in range(len(s)+1):
d = codecs.getincrementaldecoder(encoding)()
part1 = d.decode(s[:i])
state = d.getsta | te()
self.assertIsInstance(state[1], int)
# Check that the condition stated in the documentation for
# IncrementalDecoder.getstate() holds
if not state[1]:
# reset decoder to the default state without anything buffered
d.setstate((state[0][:0], 0))
# Feeding the previous input may not produce any output
self.assertTrue(not d.decode(state[0]))
# The decoder must return to the same state
self.assertEqual(state, d.getstate())
# Create a new decoder and set it to the state
# we extracted from the old one
d = codecs.getincrementaldecoder(encoding)()
d.setstate(state)
part2 = d.decode(s[i:], True)
self.assertEqual(u, part1+part2)
def check_state_handling_encode(self, encoding, u, s):
for i in range(len(u)+1):
d = codecs.getincrementalencoder(encoding)()
part1 = d.encode(u[:i])
state = d.getstate()
d = codecs.getincrementalencoder(encoding)()
d.setstate(state)
part2 = d.encode(u[i:], True)
self.assertEqual(s, part1+part2)
class ReadTest(MixInCheckStateHandling):
def check_partial(self, input, partialresults):
# get a StreamReader for the encoding and feed the bytestring version
# of input to the reader byte by byte. Read everything available from
# the StreamReader and check that the results equal the appropriate
# entries from partialresults.
q = Queue(b"")
r = codecs.getreader(self.encoding)(q)
result = ""
for (c, partialresult) in zip(input.encode(self.encoding), partialresults):
q.write(bytes([c]))
result += r.read()
self.assertEqual(result, partialresult)
# check that there's nothing left in the buffers
self.assertEqual(r.read(), "")
self.assertEqual(r.bytebuffer, b"")
# do the check again, this time using a incremental decoder
d = codecs.getincrementaldecoder(self.encoding)()
result = ""
for (c, partialresult) in zip(input.encode(self.encoding), partialresults):
result += d.decode(bytes([c]))
self.assertEqual(result, partialresult)
# check that there's nothing left in the buffers
self.assertEqual(d.decode(b"", True), "")
self.assertEqual(d.buffer, b"")
# Check whether the reset method works properly
d.reset()
result = ""
for (c, partialresult) in zip(input.encode(self.encoding), partialresults):
result += d.decode(bytes([c]))
self.assertEqual(result, partialresult)
# check that there's nothing left in the buffers
self.assertEqual(d.decode(b"", True), "")
self.assertEqual(d.buffer, b"")
# check iterdecode()
encoded = input.encode(self.encoding)
self.assertEqual(
input,
"".join(codecs.iterdecode([bytes([c]) for c in encoded], self.encoding))
)
def test_readline(self):
def getreader(input):
stream = io.BytesIO(input.encode(self.encoding))
return codecs.getreader(self.encoding)(stream)
def readalllines(input, keepends=True, size=None):
reader = getreader(input)
lines = []
while True:
line = reader.readline(size=size, keepends=keepends)
if not line:
break
lines.append(line)
return "|".join(lines)
s = "foo\nbar\r\nbaz\rspam\u2028eggs"
sexpected = "foo\n|bar\r\n|baz\r|spam\u2028|eggs"
sexpectednoends = "foo|bar|baz|spam|eggs"
self.assertEqual(readalllines(s, True), sexpected)
self.assertEqual(readalllines(s, False), sexpectednoends)
self.assertEqual(readalllines(s, True, 10), sexpected)
self.assertEqual(readalllines(s, False, 10), sexpectednoends)
lineends = ("\n", "\r\n", "\r", "\u2028")
# Test long lines (multiple calls to read() in readline())
vw = []
vwo = []
for (i, lineend) in enumerate(lineends):
vw.append((i*200+200)*"\u3042" + lineend)
vwo.append((i*200+200)*"\u3042")
self.assertEqual(readalllines("".join(vw), True), "|".join(vw))
self.assertEqual(readalllines("".join(vw), False), "|".join(vwo))
# Test lines where the first read might end with \r, so the
# reader has to look ahead whether this is a lone \r or a \r\n
for size in range(80):
for lineend in lineends:
s = 10*(size*"a" + lineend + "xxx\n")
reader = getreader(s)
for i in range(10):
self.assertEqual(
reader.readline(keepends=True),
size*"a" + lineend,
)
self.assertEqual(
reader.readline(keepends=True),
"xxx\n",
)
reader = getreader(s)
for i in range(10):
self.assertEqual(
reader.readline(keepends=False),
size*"a",
)
self.assertEqual(
reader.readline(keepends=False),
"xxx",
)
def test_mixed_readline_and_read(self):
lines = ["Humpty Dumpty sat on a wall,\n",
"Humpty Dumpty had a great fall.\r\n",
"All the king's horses and all the king's men\r",
"Couldn't put Humpty together again."]
data = ''.join(lines)
def getreader():
stream = io.BytesIO(data.encode(self.encoding))
return codecs.getreader(self.encoding)(stream)
# Issue #8260: Test readline() followed by read()
f = getreader()
self.assertEqual(f.readline(), lines[0])
self.assertEqual(f.read(), ''.join(lines[1:]))
self.assertEqual(f.read(), '')
# Issue #16636: Test readline() followed by readlines()
f = getreader()
self.assertEqual(f.readline(), lines[0])
self.assertEqual(f.readlines(), lines[1:])
self.assertEqual(f.read(), '')
# Test read() followed by read()
f = getreader()
self.assertEqual(f.read(size=40, chars=5), data[:5])
self.assertEqual(f.read(), data[5:])
self.assertEqual(f.read(), '')
# Issue #12446: Test read() followed by readlines()
f = getreader()
self.assertEqual(f.read(size=40, chars=5), data[:5])
self.assertEqual(f.readlines(), [lines[0][5:]] + lines[1:])
self.assertE |
luzfcb/cookiecutter_django_test | config/settings/local.py | Python | mit | 2,250 | 0.000444 | # -*- coding: utf-8 -*-
"""
Local settings
- Run in Debug mode
- Use console backend for emails
- Add Django Debug Toolbar
- Add django-extensions as app
"""
import socket
import os
from .common import * # noqa
# DEBUG
# ------------------------------------------------------------------------------
DEBUG = env.bool('DJANGO_DEBUG', default=True)
TEMPLATES[0]['OPTIONS']['debug'] = DEBUG
# SECRET CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Note: This key only used for development and testing.
SECRET_KEY = env('DJANGO_SECRET_KEY', default='ou5wlwg8d0d%61j)37q2#_&@#)-&pse40c#dezo)7t4-cvu8_3')
# Mail settings
# ------------------------------------------------------------------------------
EMAIL_PORT = 1025
EMAIL_HOST = 'localhost'
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND',
default='d | jango.core.mail.backends.console.EmailBackend')
# CACHING
# ------------------------------------------------------------------------------
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'L | OCATION': ''
}
}
# django-debug-toolbar
# ------------------------------------------------------------------------------
MIDDLEWARE_CLASSES += ('debug_toolbar.middleware.DebugToolbarMiddleware',)
INSTALLED_APPS += ('debug_toolbar', )
INTERNAL_IPS = ['127.0.0.1', '10.0.2.2', ]
# tricks to have debug toolbar when developing with docker
if os.environ.get('USE_DOCKER') == 'yes':
ip = socket.gethostbyname(socket.gethostname())
INTERNAL_IPS += [ip[:-1] + "1"]
DEBUG_TOOLBAR_CONFIG = {
'DISABLE_PANELS': [
'debug_toolbar.panels.redirects.RedirectsPanel',
],
'SHOW_TEMPLATE_CONTEXT': True,
}
# django-extensions
# ------------------------------------------------------------------------------
INSTALLED_APPS += ('django_extensions', )
# TESTING
# ------------------------------------------------------------------------------
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
# Your local stuff: Below this line define 3rd party library settings
# ------------------------------------------------------------------------------
|
qsantos/spyce | gspyce/__main__.py | Python | gpl-3.0 | 73 | 0 | #!/usr/bin/env python
import gspyce.simulation
gspyce.simulation. | main( | )
|
googleapis/python-dialogflow | samples/generated_samples/dialogflow_v2_generated_environments_delete_environment_async.py | Python | apache-2.0 | 1,441 | 0.000694 | # -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language g | overning permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for DeleteEnvironment
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published p | ackage dependency, execute the following:
# python3 -m pip install google-cloud-dialogflow
# [START dialogflow_v2_generated_Environments_DeleteEnvironment_async]
from google.cloud import dialogflow_v2
async def sample_delete_environment():
# Create a client
client = dialogflow_v2.EnvironmentsAsyncClient()
# Initialize request argument(s)
request = dialogflow_v2.DeleteEnvironmentRequest(
name="name_value",
)
# Make the request
await client.delete_environment(request=request)
# [END dialogflow_v2_generated_Environments_DeleteEnvironment_async]
|
kmoocdev2/edx-platform | openedx/core/djangoapps/auth_exchange/views.py | Python | agpl-3.0 | 8,123 | 0.002585 | # -*- coding: utf-8 -*-
"""
Views to support exchange of authentication credentials.
The following are currently implemented:
1. AccessTokenExchangeView:
3rd party (social-auth) OAuth 2.0 access token -> 1st party (open-edx) OAuth 2.0 access token
2. LoginWithAccessTokenView:
1st party (open-edx) OAuth 2.0 access token -> session cookie
"""
# pylint: disable=abstract-method
import django.contrib.auth as auth
import social_django.utils as social_utils
from django.conf import settings
from django.contrib.auth import login
from django.http import HttpResponse
from django.utils.decorators import method_decorator
from django.views.decorators.csrf import csrf_exempt
from edx_oauth2_provider.constants import SCOPE_VALUE_DICT
from oauth2_provider import models as dot_models
from oauth2_provider.settings import oauth2_settings
from oauth2_provider.views.base import TokenView as DOTAccessTokenView
from oauthlib.oauth2.rfc6749.tokens import BearerToken
from provider import constants
from provider.oauth2.views import AccessTokenView as DOPAccessTokenView
from rest_framework import permissions
from rest_framework.exceptions import AuthenticationFailed
from rest_framework.response import Response
from rest_framework.views import APIView
from openedx.core.djangoapps.auth_exchange.forms import AccessTokenExchangeForm
from openedx.core.djangoapps.oauth_dispatch import adapters
from openedx.core.lib.api.authentication import OAuth2AuthenticationAllowInactiveUser
class AccessTokenExchangeBase(APIView):
"""
View for token exchange from 3rd party OAuth access token to 1st party
OAuth access token.
"""
@method_decorator(csrf_exempt)
@method_decorator(social_utils.psa("social:complete"))
def dispatch(self, *args, **kwargs):
return super(AccessTokenExchangeBase, self).dispatch(*args, **kwargs)
def get(self, request, _backend): # pylint: disable=arguments-differ
"""
Pass through GET requests without the _backend
"""
return super(AccessTokenExchangeBase, self).get(request)
def post(self, request, _backend): # pylint: disable=arguments-differ
"""
Handle POST requests to get a first-party access token.
"""
form = AccessTokenExchangeForm(re | quest=request, oauth2_adapter=self.oauth2_adapter, data=request.POST) # pylint: disable=no-member
if not form.is_valid():
return self.error_response(form.errors) # pylint: disable=no-member
user = form.cleaned_data["user"]
scope = form.cleaned_data["scope"]
client = form.cleaned_data["client"]
|
return self.exchange_access_token(request, user, scope, client)
def exchange_access_token(self, request, user, scope, client):
"""
Exchange third party credentials for an edx access token, and return a
serialized access token response.
"""
if constants.SINGLE_ACCESS_TOKEN:
edx_access_token = self.get_access_token(request, user, scope, client) # pylint: disable=no-member
else:
edx_access_token = self.create_access_token(request, user, scope, client)
return self.access_token_response(edx_access_token) # pylint: disable=no-member
class DOPAccessTokenExchangeView(AccessTokenExchangeBase, DOPAccessTokenView):
"""
View for token exchange from 3rd party OAuth access token to 1st party
OAuth access token. Uses django-oauth2-provider (DOP) to manage access
tokens.
"""
oauth2_adapter = adapters.DOPAdapter()
class DOTAccessTokenExchangeView(AccessTokenExchangeBase, DOTAccessTokenView):
"""
View for token exchange from 3rd party OAuth access token to 1st party
OAuth access token. Uses django-oauth-toolkit (DOT) to manage access
tokens.
"""
oauth2_adapter = adapters.DOTAdapter()
def get(self, request, _backend):
return Response(status=400, data={
'error': 'invalid_request',
'error_description': 'Only POST requests allowed.',
})
def get_access_token(self, request, user, scope, client):
"""
TODO: MA-2122: Reusing access tokens is not yet supported for DOT.
Just return a new access token.
"""
return self.create_access_token(request, user, scope, client)
def create_access_token(self, request, user, scope, client):
"""
Create and return a new access token.
"""
_days = 24 * 60 * 60
token_generator = BearerToken(
expires_in=settings.OAUTH_EXPIRE_PUBLIC_CLIENT_DAYS * _days,
request_validator=oauth2_settings.OAUTH2_VALIDATOR_CLASS(),
)
self._populate_create_access_token_request(request, user, scope, client)
return token_generator.create_token(request, refresh_token=True)
def access_token_response(self, token):
"""
Wrap an access token in an appropriate response
"""
return Response(data=token)
def _populate_create_access_token_request(self, request, user, scope, client):
"""
django-oauth-toolkit expects certain non-standard attributes to
be present on the request object. This function modifies the
request object to match these expectations
"""
request.user = user
request.scopes = [SCOPE_VALUE_DICT[scope]]
request.client = client
request.state = None
request.refresh_token = None
request.extra_credentials = None
request.grant_type = client.authorization_grant_type
def error_response(self, form_errors, **kwargs):
"""
Return an error response consisting of the errors in the form
"""
return Response(status=400, data=form_errors, **kwargs)
class LoginWithAccessTokenView(APIView):
"""
View for exchanging an access token for session cookies
"""
authentication_classes = (OAuth2AuthenticationAllowInactiveUser,)
permission_classes = (permissions.IsAuthenticated,)
@staticmethod
def _get_path_of_arbitrary_backend_for_user(user):
"""
Return the path to the first found authentication backend that recognizes the given user.
"""
for backend_path in settings.AUTHENTICATION_BACKENDS:
backend = auth.load_backend(backend_path)
if backend.get_user(user.id):
return backend_path
@staticmethod
def _is_grant_password(access_token):
"""
Check if the access token provided is DOT based and has password type grant.
"""
token_query = dot_models.AccessToken.objects.select_related('user')
dot_token = token_query.filter(token=access_token).first()
if dot_token and dot_token.application.authorization_grant_type == dot_models.Application.GRANT_PASSWORD:
return True
return False
@method_decorator(csrf_exempt)
def post(self, request):
"""
Handler for the POST method to this view.
"""
# The django login method stores the user's id in request.session[SESSION_KEY] and the
# path to the user's authentication backend in request.session[BACKEND_SESSION_KEY].
# The login method assumes the backend path had been previously stored in request.user.backend
# in the 'authenticate' call. However, not all authentication providers do so.
# So we explicitly populate the request.user.backend field here.
if not hasattr(request.user, 'backend'):
request.user.backend = self._get_path_of_arbitrary_backend_for_user(request.user)
#if not self._is_grant_password(request.auth):
# raise AuthenticationFailed({
# u'error_code': u'non_supported_token',
# u'developer_message': u'Only support DOT type access token with grant type password. '
# })
login(request, request.user) # login generates and stores the user's cookies in the session
return HttpResponse(status=204) # cookies stored in the session are returned with the response
|
marcioweck/PSSLib | reference/deap/doc/code/tutorials/part_4/4_4_Using_Cpp_NSGA.py | Python | lgpl-3.0 | 4,997 | 0.005003 | # This file is part of DEAP.
#
# DEAP is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# DEAP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with DEAP. If not, see <http://www.gnu.org/licenses/>.
import random
from deap import algorithms
from deap import base
from deap import creator
from deap import tools
from deap import cTools
import sortingnetwork as sn
INPUTS = 6
def evalEvoSN(individual, dimension):
network = sn.SortingNetwork(dimension, individual)
return network.assess(), network.length, network.depth
def genWire(dimension):
return (random.randrange(dimension), random.randrange(dimension))
def genNetwork(dimension, min_size, max_size):
size = random.randint(min_size, max_size)
return [genWire(dimension) for i in xrange(size)]
def mutWire(individual, dimension, indpb):
for index, elem in enumerate(individual):
if random.random() < indpb:
individual[index] = genWire(dimension)
def mutAddWire(individual, dimension):
index = random.randint(0, len(individual))
individual.insert(index, genWire(dimension))
def mutDelWire(individual):
index = random.randrange(len(individual))
del individual[index]
creator.create("FitnessMin", base.Fitness, weights=(-1.0, -1.0, -1.0))
creator.create("Individual", list, fitness=creator.FitnessMin)
t | oolbox = base.Toolbox()
# Gene initializer
toolbox.register("network", genNetwork, dimension=INPUTS, mi | n_size=9, max_size=12)
# Structure initializers
toolbox.register("individual", tools.initIterate, creator.Individual, toolbox.network)
toolbox.register("population", tools.initRepeat, list, toolbox.individual)
toolbox.register("evaluate", evalEvoSN, dimension=INPUTS)
toolbox.register("mate", tools.cxTwoPoint)
toolbox.register("mutate", mutWire, dimension=INPUTS, indpb=0.05)
toolbox.register("addwire", mutAddWire, dimension=INPUTS)
toolbox.register("delwire", mutDelWire)
toolbox.register("select", cTools.selNSGA2)
def main():
random.seed(64)
population = toolbox.population(n=300)
hof = tools.ParetoFront()
stats = tools.Statistics(lambda ind: ind.fitness.values)
stats.register("Avg", tools.mean)
stats.register("Std", tools.std)
stats.register("Min", min)
stats.register("Max", max)
CXPB, MUTPB, ADDPB, DELPB, NGEN = 0.5, 0.2, 0.01, 0.01, 40
# Evaluate every individuals
fitnesses = toolbox.map(toolbox.evaluate, population)
for ind, fit in zip(population, fitnesses):
ind.fitness.values = fit
hof.update(population)
stats.update(population)
# Begin the evolution
for g in xrange(NGEN):
print "-- Generation %i --" % g
offspring = [toolbox.clone(ind) for ind in population]
# Apply crossover and mutation
for ind1, ind2 in zip(offspring[::2], offspring[1::2]):
if random.random() < CXPB:
toolbox.mate(ind1, ind2)
del ind1.fitness.values
del ind2.fitness.values
# Note here that we have a different sheme of mutation than in the
# original algorithm, we use 3 different mutations subsequently.
for ind in offspring:
if random.random() < MUTPB:
toolbox.mutate(ind)
del ind.fitness.values
if random.random() < ADDPB:
toolbox.addwire(ind)
del ind.fitness.values
if random.random() < DELPB:
toolbox.delwire(ind)
del ind.fitness.values
# Evaluate the individuals with an invalid fitness
invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
for ind, fit in zip(invalid_ind, fitnesses):
ind.fitness.values = fit
print " Evaluated %i individuals" % len(invalid_ind)
population = toolbox.select(population+offspring, len(offspring))
hof.update(population)
stats.update(population)
print " Min %s" % stats.Min[0][-1][0]
print " Max %s" % stats.Max[0][-1][0]
print " Avg %s" % stats.Avg[0][-1][0]
print " Std %s" % stats.Std[0][-1][0]
best_network = sn.SortingNetwork(INPUTS, hof[0])
print best_network
print best_network.draw()
print "%i errors, length %i, depth %i" % hof[0].fitness.values
return population, stats, hof
if __name__ == "__main__":
main()
|
mbohlool/client-python | kubernetes/test/test_v1_role_binding_list.py | Python | apache-2.0 | 971 | 0.004119 | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.8.2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kuber | netes.client.models.v1_role_binding_list import V1RoleBindingList
class TestV1RoleBindingList(unittest.TestCase):
""" V1RoleBindingList unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1RoleBindingList(self):
"""
Test V1RoleBindingList
"""
# FIXME: construct object with mandatory attributes with example values
#model = kubernetes.client.models.v1_role_binding_list.V1R | oleBindingList()
pass
if __name__ == '__main__':
unittest.main()
|
operasoftware/dragonfly-build-tools | df2/showconfig.py | Python | apache-2.0 | 486 | 0.026749 | import sys
i | mport json
import os
def showconfig(args):
json.dump(args.config, sys.stdout, indent=1)
def configdoc(args):
with open(os.path.join(args.root_path, "CONFIGDOC"), "r") as f:
print f.read()
def setup_subparser(subparsers, config):
subp = subpars | ers.add_parser('showconfig', help="Show the config file.")
subp.set_defaults(func=showconfig)
subp = subparsers.add_parser('configdoc', help="Show the config options.")
subp.set_defaults(func=configdoc)
|
sebcourtois/pypeline-tool-devkit | pytaya/core/transform.py | Python | gpl-3.0 | 6,643 | 0.002559 |
import maya.cmds as mc
import pymel.core as pm
from pytd.util.logutils import logMsg
from pytaya.util.sysutils import argsToPyNode
def matchTransform(obj, target, **kwargs):
logMsg(log='all')
bPreserveChild = kwargs.pop('preserveChild', kwargs.pop('pc', Fa | lse))
sAttr = kwargs.pop('attributeToMatch', kwargs.pop('atm', 'trs'))
bObjSpace = kwargs.get('objectSpace', kwargs.get('os', False))
(oObj, oTarget) = argsToPyNode(obj, target)
oChildren = None
if bPreserveChild:
oChildren = oObj.getChildren(typ='transform')
oParent = oObj.getParent()
if oChildren:
if oParent:
pm.parent(oChildren | , oParent)
else:
pm.parent(oChildren, world=True)
sAttrList = list(sAttr)
if sAttr == "trs":
matchTRS(oObj, oTarget, **kwargs)
elif sAttr == "rpvt":
matchRotatePivot(oObj, oTarget, **kwargs)
elif sAttr == "spvt":
matchScalePivot(oObj, oTarget, **kwargs)
else:
for sAttr in sAttrList:
if sAttr == "t":
matchPos(oObj, oTarget, **kwargs)
elif sAttr == "r":
matchRot(oObj, oTarget, **kwargs)
elif sAttr == "s":
if bObjSpace == True:
matchScl(oObj, oTarget, **kwargs)
else:
logMsg('scale cannot be matched in world space !!', log='all')
else:
logMsg("'%s' not a valid attribute to match !!" % sAttr, log='all')
if oChildren:
pm.parent(oChildren, oObj)
def matchPos(obj, target, **kwargs):
logMsg(log='all')
bPreserveChild = kwargs.get('preserveChild', kwargs.get('pc', False))
bObjSpace = kwargs.get('objectSpace', kwargs.get('os', False))
(oObj, oTarget) = argsToPyNode(obj, target)
sSpace = 'world'
if bObjSpace == True:
sSpace = "object"
fPosVec = mc.xform(oTarget.name(), q=True, ws=not bObjSpace, os=bObjSpace, t=True)
oChildren = None
if bPreserveChild:
oChildren = oObj.getChildren(typ='transform')
oParent = oObj.getParent()
if oChildren:
if oParent:
pm.parent(oChildren, oParent)
else:
pm.parent(oChildren, world=True)
mc.xform(oObj.name(), ws=not bObjSpace, os=bObjSpace, t=fPosVec)
logMsg("'%s' translate %s to %s" % (sSpace, oObj, oTarget), log='all')
if oChildren:
pm.parent(oChildren, oObj)
def matchRot(obj, target, **kwargs):
logMsg(log='all')
bPreserveChild = kwargs.pop('preserveChild', kwargs.pop('pc', False))
bObjSpace = kwargs.get('objectSpace', kwargs.get('os', False))
(oObj, oTarget) = argsToPyNode(obj, target)
sSpace = 'world'
if bObjSpace == True:
sSpace = "object"
objWorldPos = mc.xform(oObj.name(), q=True, ws=True, t=True)
objScale = mc.xform(oObj.name(), q=True, r=True, s=True)
oChildren = None
if bPreserveChild:
oChildren = oObj.getChildren(typ='transform')
oParent = oObj.getParent()
if oChildren:
if oParent:
pm.parent(oChildren, oParent)
else:
pm.parent(oChildren, world=True)
matchTRS(oObj, oTarget, logMsg=False, **kwargs)
logMsg("'%s' rotate %s to %s" % (sSpace, oObj, oTarget), log='all')
if oChildren:
pm.parent(oChildren, oObj)
mc.xform(oObj.name(), ws=True, t=objWorldPos)
mc.xform(oObj.name(), s=objScale)
def matchScl(obj, target, **kwargs):
logMsg(log='all')
bPreserveChild = kwargs.get('preserveChild', kwargs.get('pc', False))
(oObj, oTarget) = argsToPyNode(obj, target)
fScaleVec = mc.xform(oTarget.name(), q=True, r=True, s=True)
oChildren = None
if bPreserveChild:
oChildren = oObj.getChildren(typ='transform')
oParent = oObj.getParent()
if oChildren:
if oParent:
pm.parent(oChildren, oParent)
else:
pm.parent(oChildren, world=True)
mc.xform(oObj.name(), s=fScaleVec)
logMsg("'object' scale %s to %s" % (oObj, oTarget), log='all')
if oChildren:
pm.parent(oChildren, oObj)
def matchTRS(obj, target, **kwargs):
logMsg(log='all')
bPreserveChild = kwargs.get('preserveChild', kwargs.get('pc', False))
bObjSpace = kwargs.get('objectSpace', kwargs.get('os', False))
bLog = kwargs.get('logMsg', True)
(oObj, oTarget) = argsToPyNode(obj, target)
sSpace = 'world'
if bObjSpace == True:
sSpace = "object"
targetMtx = mc.xform(oTarget.name(), q=True, ws=not bObjSpace, os=bObjSpace, m=True)
oChildren = None
if bPreserveChild:
oChildren = oObj.getChildren(typ='transform')
oParent = oObj.getParent()
if oChildren:
if oParent:
pm.parent(oChildren, oParent)
else:
pm.parent(oChildren, world=True)
mc.xform(oObj.name(), m=targetMtx, ws=not bObjSpace, os=bObjSpace)
if bLog:
logMsg("'%s' transform %s to %s" % (sSpace, oObj, oTarget), log='all')
if oChildren:
pm.parent(oChildren, oObj)
def matchRotatePivot(obj, target, **kwargs):
logMsg(log='all')
bPreserveChild = kwargs.get('preserveChild', kwargs.get('pc', False))
(oObj, oTarget) = argsToPyNode(obj, target)
fPosVec = mc.xform(oTarget.name(), q=True, ws=True, rp=True)
oChildren = None
if bPreserveChild:
oChildren = oObj.getChildren(typ='transform')
oParent = oObj.getParent()
if oChildren:
if oParent:
pm.parent(oChildren, oParent)
else:
pm.parent(oChildren, world=True)
mc.xform(oObj.name(), ws=True, t=fPosVec)
logMsg("'world' translate %s to %s's rotate pivot" % (oObj, oTarget), log='all')
if oChildren:
pm.parent(oChildren, oObj)
def matchScalePivot(obj, target, **kwargs):
logMsg(log='all')
bPreserveChild = kwargs.get('preserveChild', kwargs.get('pc', False))
(oObj, oTarget) = argsToPyNode(obj, target)
fPosVec = mc.xform(oTarget.name(), q=True, ws=True, sp=True)
oChildren = None
if bPreserveChild:
oChildren = oObj.getChildren(typ='transform')
oParent = oObj.getParent()
if oChildren:
if oParent:
pm.parent(oChildren, oParent)
else:
pm.parent(oChildren, world=True)
mc.xform(oObj.name(), ws=True, t=fPosVec)
logMsg("'world' translate %s to %s's scale pivot" % (oObj, oTarget), log='all')
if oChildren:
pm.parent(oChildren, oObj)
|
comicxmz001/LeetCode | Python/35. Search Insert Position.py | Python | mit | 973 | 0.045221 | class Solution(object):
def searchInsert(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: int
O(logn)
"""
low = 0
high = len(nums) - 1
if target <= nums[low]:
| return low
if target > nums[high]:
return high+1
while low < high:
mid = (low + high) // 2
# print low, high, mid
if nums[mid] < target <= nums[mid+1]:
return mid+1
if nums[mid] >= target:
high = mid
else:
low = mid
def searchInsert1(self, nums, target):
"""
| :type nums: List[int]
:type target: int
:rtype: int
60ms O(n)
"""
index = 0
for num in nums:
if num < target:
index += 1
return index
if __name__ == '__main__':
target = 7
nums = [1,3,5,6]
# nums = [1]
print Solution().searchInsert(nums,target) |
qedsoftware/commcare-hq | corehq/apps/accounting/migrations/0031_merge.py | Python | bsd-3-clause | 324 | 0 | # -*- coding: utf-8 -*-
from __future__ import unic | ode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('accounting', '0027_more_prbac_bootstrap'),
('accounting', '0030 | _remove_softwareplan_visibility_trial_internal'),
]
operations = [
]
|
zlsun/XX-Net | code/default/x_tunnel/local/heroku_front/check_ip.py | Python | bsd-2-clause | 4,521 | 0.004424 | #!/usr/bin/env python2
# coding:utf-8
import sys
import os
import threading
current_path = os.path.dirname(os.path.abspath(__file__))
root_path = os.path.abspath( os.path.join(current_path, os.pardir, os.pardir, os.pardir))
data_path = os.path.abspath(os.path.join(root_path, os.pardir, os.pardir, 'data'))
module_data_path = os.path.join(data_path, 'x_tunnel')
python_path = os.path.abspath( os.path.join(root_path, 'python27', '1.0'))
sys.path.append(root_path)
noarch_lib = os.path.abspath( os.path.join(python_path, 'lib', 'noarch'))
sys.path.append(noarch_lib)
if sys.platform == "win32":
win32_lib = os.path.abspath( os.path.join(python_path, 'lib', 'win32'))
sys.path.append(win32_lib)
elif sys.platform.startswith("linux"):
linux_lib = os.path.abspath( os.path.join(python_path, 'lib', 'linux'))
sys.path.append(linux_lib)
elif sys.platform == "darwin":
darwin_lib = os.path.abspath( os.path.join(python_path, 'lib', 'darwin'))
sys.path.append(darwin_lib)
extra_lib = "/System/Library/Frameworks/Python.framework/Versions/2.7/Extras/lib/python"
sys.path.append(extra_lib)
import xlog
logger = xlog.getLogger("check_ip")
logger.set_buffer(500)
from front_base.openssl_wrap import SSLContext
from front_base.host_manager import HostManagerBase
from front_base.connect_creator import ConnectCreator
from front_base.check_ip import CheckIp
from config import Config
class CheckAllIp(object):
def __init__(self):
config_path = os.path.join(module_data_path, "heroku_front.json")
config = Config(config_path)
openssl_context = SSLContext(logger)
host_manager = HostManagerBase()
connect_creator = ConnectCreator(logger, config, openssl_context, host_manager,
debug=True)
self.check_ip = CheckIp(logger, config, connect_creator)
self.lock = threading.Lock()
self.in_fd = open("good_ip.txt", "r")
self.out_fd = open(
os.path.join(module_data_path, "heroku_checked_ip.txt"),
"w"
)
def get_ip(self):
with self.lock:
while True:
line = self.in_fd.readline()
if not line:
raise Exception()
try:
ip = line.split()[0]
return ip
except:
continue
def write_ip(self, ip, host, handshake):
with self.lock:
self.out_fd.write("%s %s gws %d 0 0\n" % (ip, host, handshake))
self.out_fd.flush()
def checker(self):
while True:
try:
ip = self.get_ip()
except Exception as e:
xlog.info("no ip left")
return
try:
res = self.check_ip.check_ip(ip)
except Exception as e:
xlog.warn("check except:%r", e)
continue
if not res or not res.ok:
xlog.debug("ip:%s fail", ip)
continue
self.write_ip(ip, res.domain, res.handshake_time)
def run(self):
for i in range(0, 10):
threading.Threa | d(target=self.checker).run()
def check_all():
check = CheckAllIp()
check.run()
exit(0)
def check_one(ip, top_domain, wait_time):
config_path = os.path.join(module_data_path, "heroku_front.json")
config = Config(config_path)
openssl_context = SSLContext(logger)
host_manager = HostManagerBase()
connect_creator = ConnectCreator(logger, config, openssl_context, host_manager,
debug=True)
check_ip = CheckIp(l | ogger, config, connect_creator)
res = check_ip.check_ip(ip, host=top_domain, wait_time=wait_time)
if not res:
print("connect fail")
elif res.ok:
print("success, domain:%s handshake:%d" % (res.domain, res.handshake_time))
else:
print("not support")
if __name__ == "__main__":
check_all()
# case 1: only ip
# case 2: ip + domain
# connect use domain
if len(sys.argv) > 1:
ip = sys.argv[1]
else:
ip = "54.225.129.54"
print("Usage: check_ip.py [ip] [top_domain] [wait_time=0]")
print("test ip:%s" % ip)
if len(sys.argv) > 2:
top_domain = sys.argv[2]
else:
top_domain = None
if len(sys.argv) > 3:
wait_time = int(sys.argv[3])
else:
wait_time = 0
check_one(ip, top_domain, wait_time) |
hh-italian-group/h-tautau | Studies/python/ttbarGenStudies_cfg.py | Python | gpl-2.0 | 8,374 | 0.019226 | ## @package patTuple
# Configuration file to produce PAT-tuples and ROOT-tuples for X->HH->bbTauTau analysis.
#
# \author Claudio Caputo
#
# Copyright 2015
#
# This file is part of X->HH->bbTauTau.
#
# X->HH->bbTauTau is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# X->HH->bbTauTau is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with X->HH->bbTauTau. If not, see <http://www.gnu.org/licenses/>.
import FWCore.ParameterSet.Config as cms
from FWCore.ParameterSet.VarParsing import VarParsing
options = VarParsing('analysis')
options.register ('isData',
False,
VarParsing.multiplicity.singleton,
VarParsing.varType.bool,
"Include Sim. Default: False")
options.register ('sampleType',
'Fall15MC',
VarParsing.multiplicity.singleton,
VarParsing.varType.string,
"Indicates the sample type: Spring15MC, Run2015B, Run2015C, Run2015D")
options.register ('computeHT',
'False',
VarParsing.multiplicity.singleton,
VarParsing.varType.bool,
"Compute HT variable and HT binning")
options.parseArguments()
process = cms.Process("USER")
process.load("Configuration.StandardSequences.MagneticField_cff")
process.load("Configuration.Geometry.GeometryRecoDB_cff")
#process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")
#from Configuration.AlCa.GlobalTag import GlobalTag
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_condDBv2_cff')
from Configuration.AlCa.GlobalTag_condDBv2 import GlobalTag
runOnData = options.isData
# Latest JEC
if runOnData:
process.GlobalTag.globaltag = '76X_dataRun2_16Dec2015_v0'
isMC = False
#process.source.lumisToProcess = LumiList.LumiList(filename = '../json/Cert_13TeV_16Dec2015ReReco_Collisions15_25ns_JSON.txt').getVLuminosityBlockRange()
else:
process.GlobalTag.globaltag = '76X_mcRun2_asymptotic_RunIIFall15DR76_v1'
isMC = True
## Events to process
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(100) )
inputSignal_v2 = cms.untracked.vstring("file:768F5AFB-D771-E511-9ABD-B499BAABD280.root")
DYSample = cms.untracked.vstring("/store/user/ccaputo/HHbbtautau/Run2/DYSample_forHT.root")
TTBar = cms.untracked.vstring('/store/mc/RunIIFall15MiniAODv2/TT_TuneCUETP8M1_13TeV-powheg-pythia8/MINIAODSIM/PU25nsData2015v1_76X_mcRun2_asymptotic_v12_ext4-v1/00000/0007DBD0-2ED2-E511-AD0D-20CF3019DEF5.root')
SyncSignal = cms.untracked.vstring('/store/mc/RunIIFall15MiniAODv2/SUSYGluGluToHToTauTau_M-160_TuneCUETP8M1_13TeV-pythia8/MINIAODSIM/PU25nsData2015v1_76X_mcRun2_asymptotic_v12-v1/50000/B2FF8F77-3DB8-E511-B743-001E6757F1D4.root')
Radion300 = cms.untracked.vstring('/store/mc/RunIIFall15MiniAODv2/GluGluToRadionToHHTo2B2Tau_M-300_narrow_13TeV-madgraph/MINIAODSIM/PU25nsData2015v1_76X_mcRun2_asymptotic_v12-v1/20000/821F9C9F-28B8-E511-93CF-003048D2BF3E.root')
SyncSignal_fewEvents = cms.untracked.vstring(("root://xrootd.unl.edu//store/mc/RunIIFall15MiniAODv2/SUSYGluGluToHToTauTau_M-160_TuneCUETP8M1_13TeV-pythia8/MINIAODSIM/PU25nsData2015v1_76X_mcRun2_asymptotic_v12-v1/50000/E0B9088F-3DB8-E511-AFFD-001EC9ADCD52.root",
"root://xrootd.unl.edu//store/mc/RunIIFall15MiniAODv2/SUSYGluGluToHToTauTau_M-160_TuneCUETP8M1_13TeV-pythia8/MINIAODSIM/PU25nsData2015v1_76X_mcRun2_asymptotic_v12-v1/70000/EAD78CAB-33B8-E511-8E8E-20CF3027A5BF.root"))
##inputSignal_v2 = cms.untracked.vstring(
## '/store/mc/RunIISpring15MiniAODv2/SUSYGluGluToHToTauTau_M-160_TuneCUETP8M1_13TeV-pythia8/MINIAODSIM/74X_mcRun2_asymptotic_v2-v1/40000/10563B6E-D871-E511-9513-B499BAABD280.root')
## Input files
process.source = cms.Source("PoolSource",
fileNames = Radion300#,
#eventsToProcess = cms.untracked.VEventRange('1:449465','1:475952')
)
## Output file
from PhysicsTools.PatAlgos.patEventContent_cff import patEventContent
# from h-tautau.Production.skimmedBranches_cff import *
#
# if options.computeHT and not options.isData:
# skimmedBranches = cms.untracked.vstring(BaseMCBranches+
# ['keep LHEEventProduct_externalLHEProducer__LHE'])
# if not options.computeHT and not options.isData:
# skimmedBranches = cms.untracked.vstring(BaseMCBranches)
#
# if options.isData:
# skimmedBranches = cms.untracked.vstring(BaseDATABranches)
ttbarStudiesBrances = cms.untracked.vstring(['drop *',
'keep patPackedCandidates_packedPFCandidates__PAT',
'keep patPackedGenParticles_packedGenParticles__PAT',
'keep recoGenJets_slimmedGenJets__PAT',
'keep recoGenParticles_prunedGenParticles__PAT',
'keep GenEventInfoProduct_generator__SIM',
'keep patJets_*__PAT',
'keep patMETs_*__PAT']
)
DropAllBranches = cms.untracked.vstring(['drop *'])
process.load("RecoMET.METProducers.METSignificance_cfi")
process.load("RecoMET.METProducers.METSignificanceParams_cfi")
process.bbttSkim = cms.EDFilter("SkimFilterMiniAOD",
vertexSrc = cms.untracked.InputTag('offlineSlimmedPrimaryVertices'),
muonSrc = cms.untracked.InputTag('slimmedMuons'),
electronSrc=cms.untracked.InputTag("slimmedElectrons"),
tauSrc = cms.untracked.InputTag("slimmedTaus")
)
process.ttbarAnalyzer = cms.EDAnalyzer("TTBarGenAnalyzer",
pruned = cms.InputTag('prunedGenParticles'),
packed = cms.InputTag('packedGenParticles'),
genJet = cms.InputTag('slimmedGenJets'),
isSignal = cms.bool(True),
)
process.load("SimGeneral.HepPDTESSource.pythiapdt_cfi")
process.printTree = cms.EDAnalyzer("ParticleListDrawer",
maxEventsToPrint = cms.untracked.int32(1),
printVertex = cms.untracked.bool(False),
printOnlyHardInteraction = cms.untracked.bool(False), # Print only status=3 particles. This will not work for Pythia8, which does not have any such particles.
src = cms.InputTag("prunedGenParticles")
| )
##------------
#-------------
# Output ROOT file
#-------------
process.TFileService = cms.Service("TFileService", fileName = cms.string("tree.root") )
process.p = cms.Path(
# process.METSignificance*
# process.eg | mGsfElectronIDSequence*
# process.electronMVAValueMapProducer*
process.bbttSkim*
process.printTree*
process.ttbarAnalyzer
#(process.syncNtupler_mutau + process.syncNtupler_etau)
)
process.out = cms.OutputModule("PoolOutputModule",
compressionLevel = cms.untracked.int32(4),
compressionAlgorithm = cms.untracked.string('LZMA'),
eventAutoFlushCompressedSize = cms.untracked.int32(15728640),
fileName = cms.untracked.string('microAOD.root'),
SelectEvents = cms.untracked.PSet( SelectEvents = cms.vstring('p') ),
#outputCommands = skimmedBranches,
#outputCommands = HTBinBranches,
outputCommands = ttbarStudiesBrances,
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.