repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
tendermint/tmsp | example/python3/app.py | Python | apache-2.0 | 2,169 | 0.001844 | import sys
from abci.wire import hex2bytes, decode_big_endian, encode_big_endian
from abci.server import ABCIServer
from abci.reader import BytesBuffer
class CounterApplication():
def __init__(self):
sys.exit("The python example is out of date. Upgrading the Python examples is currently left as an exercise to you.")
self.hashCount = 0
self.txCount = 0
self.serial = False
def echo(self, msg):
return msg, 0
def info(self):
return ["hashes:%d, txs:%d" % (self.hashCount, self.txCount)], 0
def set_option(self, key, value):
if key == "serial" and value == "on":
self.serial = True
return 0
def deliver_tx(self, txBytes):
if self.serial:
txByteArray = bytearray(txBytes)
if len(txBytes) >= 2 and txBytes[:2] == "0x":
txByteArray = hex2bytes(txBytes[2:])
txValue = decode_big_endian(
BytesBuffer(txByteArray), len(txBytes))
if txValue != self.txCount:
return None, 6
self.txCount += 1
return None, 0
def check_tx(self, txBytes):
if self.serial:
txByteArray = bytearray(txBytes)
if len(txBytes) >= 2 and txBytes[:2] == "0x":
txByteArray = hex2bytes(txBytes[2:])
txValue = decode_big_endian(
BytesBuffer(txByteArray), len(txBytes))
if txValue < self.txCount:
return 6
return 0
def commit(self):
self.hashCount += 1
if self.txCount == 0:
return "", 0
h = encode_big_endian(self.txCount, 8)
h.reverse()
return | h.decode(), 0
def add_listener(self):
return 0
def rm_listener(self):
return | 0
def event(self):
return
if __name__ == '__main__':
l = len(sys.argv)
if l == 1:
port = 26658
elif l == 2:
port = int(sys.argv[1])
else:
print("too many arguments")
quit()
print('ABCI Demo APP (Python)')
app = CounterApplication()
server = ABCIServer(app, port)
server.main_loop()
|
mohamed-mamdouh95/pedestrainTracker | darkflow/net/flow.py | Python | gpl-3.0 | 4,587 | 0.005668 | import os
import time
import numpy as np
import tensorflow as tf
import pickle
train_stats = (
'Training statistics: \n'
'\tLearning rate : {}\n'
'\tBatch size : {}\n'
| '\tEpoch number : {}\n'
'\tBackup every : {}'
)
def _save_ckpt(self, step, loss_profile):
file = '{}-{}{}'
model = self. | meta['name']
profile = file.format(model, step, '.profile')
profile = os.path.join(self.FLAGS.backup, profile)
with open(profile, 'wb') as profile_ckpt:
pickle.dump(loss_profile, profile_ckpt)
ckpt = file.format(model, step, '')
ckpt = os.path.join(self.FLAGS.backup, ckpt)
self.say('Checkpoint at step {}'.format(step))
self.saver.save(self.sess, ckpt)
def train(self):
loss_ph = self.framework.placeholders
loss_mva = None; profile = list()
batches = self.framework.shuffle()
loss_op = self.framework.loss
for i, (x_batch, datum) in enumerate(batches):
if not i: self.say(train_stats.format(
self.FLAGS.lr, self.FLAGS.batch,
self.FLAGS.epoch, self.FLAGS.save
))
feed_dict = {
loss_ph[key]: datum[key]
for key in loss_ph }
feed_dict[self.inp] = x_batch
feed_dict.update(self.feed)
fetches = [self.train_op, loss_op, self.summary_op]
fetched = self.sess.run(fetches, feed_dict)
loss = fetched[1]
if loss_mva is None: loss_mva = loss
loss_mva = .9 * loss_mva + .1 * loss
step_now = self.FLAGS.load + i + 1
self.writer.add_summary(fetched[2], step_now)
form = 'step {} - loss {} - moving ave loss {}'
self.say(form.format(step_now, loss, loss_mva))
profile += [(loss, loss_mva)]
ckpt = (i+1) % (self.FLAGS.save // self.FLAGS.batch)
args = [step_now, profile]
if not ckpt: _save_ckpt(self, *args)
if ckpt: _save_ckpt(self, *args)
def return_predict(self, im):
assert isinstance(im, np.ndarray), \
'Image is not a np.ndarray'
h, w, _ = im.shape
im = self.framework.resize_input(im)
this_inp = np.expand_dims(im, 0)
feed_dict = {self.inp : this_inp}
out = self.sess.run(self.out, feed_dict)[0]
boxes = self.framework.findboxes(out)
threshold = self.FLAGS.threshold
boxesInfo = list()
for box in boxes:
tmpBox = self.framework.process_box(box, h, w, threshold)
if tmpBox is None:
continue
boxesInfo.append({
"label": tmpBox[4],
"confidence": tmpBox[6],
"topleft": {
"x": tmpBox[0],
"y": tmpBox[2]},
"bottomright": {
"x": tmpBox[1],
"y": tmpBox[3]}
})
return boxesInfo
import math
def predict(self):
inp_path = self.FLAGS.test
all_inps = os.listdir(inp_path)
all_inps = [i for i in all_inps if self.framework.is_inp(i)]
if not all_inps:
msg = 'Failed to find any test files in {} .'
exit('Error: {}'.format(msg.format(inp_path)))
batch = min(self.FLAGS.batch, len(all_inps))
# predict in batches
n_batch = int(math.ceil(len(all_inps) / batch))
for j in range(n_batch):
from_idx = j * batch
to_idx = min(from_idx + batch, len(all_inps))
# collect images input in the batch
inp_feed = list(); new_all = list()
this_batch = all_inps[from_idx:to_idx]
for inp in this_batch:
new_all += [inp]
this_inp = os.path.join(inp_path, inp)
this_inp = self.framework.preprocess(this_inp)
expanded = np.expand_dims(this_inp, 0)
inp_feed.append(expanded)
this_batch = new_all
# Feed to the net
feed_dict = {self.inp : np.concatenate(inp_feed, 0)}
self.say('Forwarding {} inputs ...'.format(len(inp_feed)))
start = time.time()
out = self.sess.run(self.out, feed_dict)
stop = time.time(); last = stop - start
self.say('Total time = {}s / {} inps = {} ips'.format(
last, len(inp_feed), len(inp_feed) / last))
# Post processing
self.say('Post processing {} inputs ...'.format(len(inp_feed)))
start = time.time()
for i, prediction in enumerate(out):
self.framework.postprocess(prediction,
os.path.join(inp_path, this_batch[i]))
stop = time.time(); last = stop - start
# Timing
self.say('Total time = {}s / {} inps = {} ips'.format(
last, len(inp_feed), len(inp_feed) / last)) |
LuminosoInsight/luminoso-api-client-python | luminoso_api/v5_client.py | Python | mit | 20,872 | 0.000096 | """
Provides the LuminosoClient object, a wrapper for making
properly-authenticated requests to the Luminoso REST API.
"""
import json
import logging
import os
import requests
import time
from getpass import getpass
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
from urllib.parse import urlparse
from .v5_constants import URL_BASE
from .errors import (LuminosoError, LuminosoAuthError, LuminosoClientError,
LuminosoServerError, LuminosoTimeoutError)
from .version import VERSION
logger = logging.getLogger(__name__)
class LuminosoClient(object):
"""
A tool for making authenticated requests to the Luminoso API version 5.
A LuminosoClient is a thin wrapper around the API documented at
https://daylight.luminoso.com/api/v5/. As such, you interact with it by
calling its methods that correspond to HTTP methods: `.get(url)`,
`.post(url)`, `.put(url)`, `.patch(url)`, and `.delete(url)`.
These URLs are relative to a 'base URL' for the LuminosoClient. For
example, you can make requests for a specific project by creating a
LuminosoClient for
`https://daylight.luminoso.com/api/v5/projects/<project_id>`.
Methods take parameters as keyword arguments, and encode them in the
appropriate way for the request, which is described in the individual
documentation for each method.
The easiest way to create a LuminosoClient is using the
`LuminosoClient.connect()` static method.
In addition to the base URL, the LuminosoClient has a `root_url`,
pointing to the root of the API, such as
https://daylight.luminoso.com/api/v5. This is used, for example, as a
starting point for the `client_for_path` method: when it gets a path
starting with `/`, it will go back to the `root_url` instead of adding to
the existing URL.
"""
_URL_BASE = URL_BASE
def __init__(self, session, url, user_agent_suffix=None, timeout=None):
"""
Create a LuminosoClient given an existing Session object that has a
_TokenAuth object as its .auth attribute.
It is probably easier to call LuminosoClient.connect() to handle
the authentication for you.
"""
self.session = session
self.timeout = timeout
self.url = ensure_trailing_slash(url)
# Don't warn this time; warning happened in connect()
self.root_url = self.get_root_url(url, warn=False)
# Calculate the full user agent suffix, but also store the suffix so it
# can be preserved by client_for_path().
self._user_agent_suffix = user_agent_suffix
self.user_agent = 'LuminosoClient/' + VERSION
if user_agent_suffix is not None:
self.user_agent += ' ' + user_agent_suffix
def __repr__(self):
return '<LuminosoClient for %s>' % self.url
@classmethod
def connect(cls, url=None, token_file=None, token=None,
user_agent_suffix=None, timeout=None):
"""
Returns an object that makes requests to the API, authenticated
with a saved or specified long-lived token, at URLs beginning with
`url`.
If no URL is specified, or if the specified URL is a path such as
'/projects' without a scheme and domain, the client will default to
https://daylight.luminoso.com/api/v5/.
If neither token nor token_file are specified, the client will look
for a token in $HOME/.luminoso/tokens.json. The file should contain
a single json dictionary of the format
`{'root_url': 'token', 'root_url2': 'token2', ...}`.
Requests made with this client will have the user agent
"LuminosoClient" and the version number. You can optionally pass a
string to be appended to this, though for most uses of the client this
is unnecessary.
"""
if url is None:
url = '/'
if url.startswith('http'):
root_url = cls.get_root_url(url)
else:
url = cls._URL_BASE + '/' + url.lstrip('/')
root_url = cls._URL_BASE
if token is None:
token_file = token_file or get_token_filename()
try:
with open(token_file) as tf:
token_dict = json.load(tf)
except FileNotFoundError:
raise LuminosoAuthError('No token file at %s' % token_file)
netloc = urlparse(root_url).netloc
try:
token = token_dict[netloc]
exc | ept KeyError:
# Some code to help people transition from using URLs | with
# "analytics" to URLs with "daylight" by looking for a token
# with the old URL and using it if it exists
legacy_netloc = netloc.replace('daylight', 'analytics')
if legacy_netloc in token_dict:
logger.warning('Using token for legacy domain %s; saving it'
' for %s', legacy_netloc, netloc)
token = token_dict[legacy_netloc]
cls.save_token(token, domain=netloc,
token_file=token_file)
else:
raise LuminosoAuthError('No token stored for %s' % root_url)
session = requests.session()
session.auth = _TokenAuth(token)
# By default, requests will only retry things like connection timeouts,
# not any server responses. We use urllib3's Retry class to say that,
# if a call failed specifically on a 429 ("too many requests"), wait a
# full second and try again. (Technically it tries again immediately,
# but then it gets another 429 and tries again at twice the backoff
# factor.) The total retries is 10, which is 256 seconds (four
# minutes, 16 seconds; or a cumulative wait of 8.5 minutes).
retry_strategy = Retry(total=10, backoff_factor=.5,
status_forcelist=[429])
adapter = HTTPAdapter(max_retries=retry_strategy)
session.mount("https://", adapter)
session.mount("http://", adapter)
return cls(session, url, user_agent_suffix=user_agent_suffix,
timeout=timeout)
@classmethod
def save_token(cls, token=None, domain='daylight.luminoso.com',
token_file=None):
"""
Take a long-lived API token and store it to a local file. Long-lived
tokens *should* be retrieved through the UI and specified as the
`token` argument to this method. As a dispreferred alternative, if no
token is specified, you will be prompted for a username and password
and a new token will be created and saved.
Other optional arguments are the domain for which the token is valid
and the file in which to store the token.
"""
# Make this as friendly as possible: turn any of
# "daylight.luminoso.com", "daylight.luminoso.com/api/v5", or
# "https://daylight.luminoso.com/" into just the domain
if '://' in domain:
parsed = urlparse(domain)
domain = parsed.netloc
protocol = parsed.scheme
else:
domain = domain.split('/')[0]
protocol = None
if token is None:
if domain == 'daylight.luminoso.com':
protocol = 'https'
while protocol is None:
prompt = input('Use https? (y/n, default=y): ').lower()
if not prompt or prompt.startswith('y'):
protocol = 'https'
elif prompt.startswith('n'):
protocol = 'http'
url = f'{protocol}://{domain}/'
username = input('Username: ')
password = getpass('Password: ')
session = requests.session()
headers = {'user-agent': f'LuminosoClient/{VERSION} save_token()',
'Content-Type': 'application/json'}
temp_token_resp = session.post(
url.rstrip('/') + '/api/v5/login/', headers=he |
Luindil/Glassure | glassure/core/optimization.py | Python | mit | 19,018 | 0.005784 | # -*- coding: utf-8 -*-
from copy import deepcopy
import numpy as np
import lmfit
from . import Pattern
from .calc import calculate_fr, calculate_gr_raw, calculate_sq, calculate_sq_raw, calculate_normalization_factor_raw, \
fit_normalization_factor
from .utility import convert_density_to_atoms_per_cubic_angstrom, calculate_incoherent_scattering, \
calculate_f_mean_squared, calculate_f_squared_mean
from .utility import extrapolate_to_zero_poly
from .soller_correction import SollerCorrection
__all__ = ['optimize_sq', 'optimize_density', 'optimize_incoherent_container_scattering',
'optimize_soller_dac']
def optimize_sq(sq_pattern, r_cutoff, iterations, atomic_density, use_modification_fcn=False,
attenuation_factor=1, fcn_callback=None, callback_period=2):
"""
Performs an optimization of the structure factor based on an r_cutoff value as described in Eggert et al. 2002 PRB,
65, 174105. This basically does back and forward transforms between S(Q) and f(r) until the region below the
r_cutoff value is a flat line without any oscillations.
:param sq_pattern:
original S(Q)
:param r_cutoff:
cutoff value below which there is no signal expected (below the first peak in g(r))
:param iterations:
number of back and forward transforms
:param atomic_density:
density in atoms/A^3
:param use_modification_fcn:
Whether or not to use the Lorch modification function during the Fourier transform.
Warning: When using the Lorch modification function usually more iterations are needed to get to the
wanted result.
:param attenuation_factor:
Sometimes the initial change during back and forward transformations results in a run
away, by setting the attenuation factor to higher than one can help for this situation, it basically reduces
the amount of change during each iteration.
:param fcn_callback:
Function which will be called at an iteration period defined by the callback_period parameter.
The function should take 3 arguments: sq_pattern, fr_pattern and gr_pattern. Additionally the function
should return a boolean value, where True continues the optimization and False will stop the optimization
procedure
:param callback_period:
determines how frequently the fcn_callback will be called.
:return:
optimized S(Q) pattern
"""
r = np.arange(0, r_cutoff, 0.02)
sq_pattern = deepcopy(sq_pattern)
for iteration in range(iterations):
fr_pattern = calculate_fr(sq_pattern, r, use_modification_fcn)
q, sq_int = sq_pattern.data
r, fr_int = fr_pattern.data
delta_fr = fr_int + 4 * np.pi * r * atomic_density
in_integral = np.array(np.sin(np.outer(q.T, r))) * delta_fr
integral = np.trapz(in_integral, r) / attenuation_factor
sq_optimized = sq_int * (1 - 1. / q * integral)
sq_pattern = Pattern(q, sq_optimized)
if fcn_callback is not None and iteration % callback_period == 0:
fr_pattern = calculate_fr(sq_pattern, use_modification_fcn=use_modification_fcn)
gr_pattern = calculate_gr_raw(fr_pattern, atomic_density)
fcn_callback(sq_pattern, fr_pattern, gr_pattern)
return sq_pattern
def optimize_density(data_pattern, background_pattern, initial_background_scaling, composition,
initial_density, background_min, background_max, density_min, density_max,
iterations, r_cutoff, use_modification_fcn=False, extrapolation_cutoff=None,
r_step=0.01, fcn_callback=None):
"""
Performs an optimization of the background scaling and density using a figure of merit function defined by the low
r region in F(r) as described in Eggert et al. (2002) PRB, 65, 174105.
:param data_pattern: raw data pattern in Q space (A^-1)
:param background_pattern: raw background pattern in Q space (A^-1)
:param initial_background_scaling:
start value for the background scaling optimization
:param composition: composition of the sample as a di | ctionary with elements as keys and abundances as values
:param initial_density: start value for the density optimization in g/cm^3
:param background_min: minimum value for the background scaling
:param background_max: maximum value for the background scaling
:param density_min: minimum | value for the density
:param density_max: maximum value for the density
:param iterations: number of iterations of S(Q) (see optimize_sq(...) prior to calculating chi2
:param r_cutoff: cutoff value below which there is no signal expected (below the first peak in g(r))
:param use_modification_fcn:
Whether or not to use the Lorch modification function during the Fourier transform.
Warning: When using the Lorch modification function usually more iterations are needed
to get to the wanted result. Default is False.
:param extrapolation_cutoff:
Determines up to which q value the S(Q) will be extrapolated to zero. The default
(None), will use the minimum q value plus 0.2 A^-1
:param r_step: Step size for the r-space for calculating f(r) during each iteration. Defaults to
0.01.
:param fcn_callback: Function which will be called after each iteration. The function should take 4
arguments: iteration number, chi2, density, and background scaling. Additionally the
function should return a boolean value, where True continues the optimization and False
will stop the optimization procedure
:return: (tuple) - density, density standard error, background scaling, background scaling standard error
"""
params = lmfit.Parameters()
params.add("density", value=initial_density, min=density_min, max=density_max)
params.add("background_scaling", value=initial_background_scaling, min=background_min, max=background_max)
r = np.arange(0, r_cutoff + r_step / 2., r_step)
def optimization_fcn(params, extrapolation_max, r, r_cutoff, use_modification_fcn):
density = params['density'].value
atomic_density = convert_density_to_atoms_per_cubic_angstrom(composition, density)
background_pattern.scaling = params['background_scaling'].value
sq = calculate_sq(data_pattern - background_pattern, density, composition)
extrapolation_max = extrapolation_max or np.min(sq._x[0]) + 0.2
sq = extrapolate_to_zero_poly(sq, extrapolation_max)
sq_optimized = optimize_sq(sq, r_cutoff, iterations, atomic_density, use_modification_fcn)
fr = calculate_fr(sq_optimized, r=r, use_modification_fcn=use_modification_fcn)
min_r, min_fr = fr.data
output = (min_fr + 4 * np.pi * atomic_density * min_r) ** 2 * r_step
if fcn_callback is not None:
if not fcn_callback(optimization_fcn.iteration,
np.sum(output),
density,
params['background_scaling'].value):
return None
optimization_fcn.iteration += 1
return output
optimization_fcn.iteration = 1
lmfit.minimize(optimization_fcn, params, args=(extrapolation_cutoff, r, r_cutoff, use_modification_fcn))
lmfit.report_fit(params)
return params['density'].value, params['density'].stderr, \
params['background_scaling'].value, params['background_scaling'].stderr
def optimize_incoherent_container_scattering(sample_pattern, sample_density, sample_composition, container_composition,
r_cutoff, initial_content=10, use_extrapolation=True,
extrapolation_q_max=None, callback_fcn=None):
|
cberzan/django-anger | testdata/good_migration.py | Python | mit | 60,530 | 0.008161 | # -*- coding: utf-8 -*-
"""
A fairly complicated migration taken from the real world, mangled so as not to
disclose much about the meaning of the models / fields.
"""
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
pass # dummy
def backwards(self, orm):
pass # dummy
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '75'})
},
'app_beta.model01': {
'Meta': {'object_name': 'Model01'},
'field001': ('django.db.models.fields.CharField', [], {'default': "'blabla'", 'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'field002': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'field003': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'field004': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'})
},
'app_beta.model02': {
'Meta': {'object_name': 'Model02'},
'field005': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'field006': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'field007': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'field008': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'}),
'field009': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'field010': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'app_beta.model03': {
'Meta': {'object_name': 'Model03', '_ormbases': ['app_beta.Model07']},
'field011': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['app_beta.Model07']", 'unique': 'True', 'primary_key': 'True'})
},
'app_beta.model04': {
'Meta': {'object_name': 'Model04'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'field012': ('django.db.models.fields.DateField', [], {}),
'field013': ('django.db.models.fields.CharField', [], {'max_length': '120', 'blank': 'True'}),
'field014': ('django.db.models.fields.CharField', [], {'max_length': '160'}),
'field015': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'app_beta.model05': {
'Meta': {'ordering': "['asdf', 'qwer']", 'unique_together': "(['content_type', 'object_id'],)", 'object_name': 'Model05'},
'field016': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'qwer': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'asdf': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'field017': ('django.db.models.fields.PositiveIntegerField', [], {}),
'field018': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'})
},
'app_beta.model06': {
'Meta': {'ordering': "('dpdpd', '-qppqwewje')", 'unique_together': "(('content_type', 'object_id'),)", 'object_name': 'Model06'},
'qppqwewje': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'field019': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'field020': ('django.db.models.fields.PositiveIntegerField', [], {}),
'dpdpd': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'field021': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'app_beta.model07': {
'Meta': {'ordering': "('-aerowerowe',)", 'object_name': 'Model07'},
'field022': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'field023': ('django.db.models.fields.TextField', [], {}),
'field024': ('django.db.models.fields.PositiveIntegerField', [], {}),
'aerowerowe': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'app_beta.model07a': {
'Meta': {'object_name': 'Model07a'},
'field025': ('django.db.models.fields.TextField', [] | , {}),
'field026': ('django.db.models.fields.DateField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'field0 | 27': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '160'}),
'field028': ('django.db.models.fields.CharField', [], {'max_length': '160'})
},
'app_beta.model08': {
'Meta': {'object_name': 'Model08'},
'field029': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'field030': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'field032': ('django.db.models.fields.BooleanField', |
mathLab/RBniCS | rbnics/backends/basic/wrapping/function_copy.py | Python | lgpl-3.0 | 168 | 0 | # Copyright (C) | 2015-2022 by the RBniCS authors
#
# This file is part of RBniCS.
#
# SPDX-License-Identifier: LGPL-3.0-or | -later
def function_copy(function):
pass
|
dmaidaniuk/ozark | test/jsr223/src/main/webapp/WEB-INF/views/index.py | Python | apache-2.0 | 54 | 0.018519 | 'Hello ' + m | odels['mvc'].enc | oders.html(models['name']) |
alessandro-sena/slearning-stackoverflow | fix_probs.py | Python | bsd-2-clause | 965 | 0.004145 | impor | t competition_utilities as cu
import numpy as np
import features
from sklearn.ensemble import RandomFo | restClassifier
from sklearn.naive_bayes import *
import sys
from sklearn.neighbors import KNeighborsClassifier
probs_file = sys.argv[1]
submission_file = sys.argv[2]
train_file = "train-sample.csv"
full_train_file = "train.csv"
test_file = "public_leaderboard.csv"
def main():
f = open(probs_file, 'r')
lines = f.readlines()
probs = []
for line in lines:
probs.append(np.array([float(x) for x in line.split(',')]))
print("Calculating priors and updating posteriors")
probs = np.array(probs)
new_priors = cu.get_priors(full_train_file)
old_priors = cu.get_priors(train_file)
probs = cu.cap_and_update_priors(old_priors, probs, new_priors, 0.001)
print("Saving submission to %s" % submission_file)
cu.write_submission(submission_file, probs)
f.close()
if __name__=="__main__":
main()
|
cargocult/rowan-python | rowan/db.py | Python | mit | 1,715 | 0.001749 | import rowan.controllers.base as base
# ----------------------------------------------------------------------------
class MongoDBMiddleware(base.Wrapper):
"""
Wraps another controller, setting up the mongo database before
delegation. The database is housed in the .db.mongo property of the
request.
"""
@classmethod
def import_dependencies(cls):
global pymongo
import pymongo
def __init__(self, contr | oller,
server="localhost", port=27017, db="test"):
super(MongoDBMiddleware, self).__init__(controller)
self.server = server
self.port = port
self.db = | db
self.connection = pymongo.Connection(self.server, self.port)
def __call__(self, request):
with request.set(db__mongo=self.connection[self.db]):
return self.controller(request)
# ----------------------------------------------------------------------------
class SQLAlchemyMiddleware(base.Wrapper):
"""Wraps another controller, setting up the sql database before
delegation. The database is housed in the .db.sqlalechmy property of
the request.
"""
@classmethod
def import_dependencies(cls):
global sql, orm
import sqlalchemy as sql
import sqlalchemy.orm as orm
def __init__(self, controller, connection_string="sqlite:///database.db"):
super(SQLAlchemyMiddleware, self).__init__(controller)
self.connection_string = connection_string
def __call__(self, request):
engine = sql.create_engine(self.connection_string, echo=True)
with request.set(db__sqlalchemy=orm.sessionmaker(bind=engine)()):
return self.controller(request)
|
supercheetah/diceroller | pyinstaller/PyInstaller/hooks/hook-paste.exceptions.reporter.py | Python | artistic-2.0 | 169 | 0.005917 | # some modules use the old-style import: explicitly include
# the new module when the old | one is referenced
hiddenimports = ["email.mime.te | xt", "email.mime.multipart"]
|
kubernetes-client/python | kubernetes/client/models/v1_deployment.py | Python | apache-2.0 | 7,196 | 0 | # coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: release-1.23
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class V1Deployment(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_version': 'str',
'kind': 'str',
'metadata': 'V1ObjectMeta',
'spec': 'V1DeploymentSpec',
'status': 'V1DeploymentStatus'
}
attribute_map = {
'api_version': 'apiVersion',
'kind': 'kind',
'metadata': 'metadata',
'spec': 'spec',
'status': 'status'
}
def __init__(self, api_version=None, kind=None, metadata=None, spec=None, status=None, local_vars_configuration=None): # noqa: E501
"""V1Deployment - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._api_version = None
self._kind = None
self._metadata = None
self._spec = None
self._status = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
if spec is not None:
self.spec = spec
if status is not None:
self.status = status
@property
def api_version(self):
"""Gets the api_version of this V1Deployment. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:return: The api_version of this V1Deployment. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this V1Deployment.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this V1Deployment. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def kind(self):
"""Gets the kind of this V1Deployment. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this V1Deployment. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1Deployment.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this V1Deployment. # noqa: E501
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""Gets the metadata of this V1Deployment. # noqa: E501
:return: The metadata of this V1Deployment. # noqa: E501
:rtype: V1ObjectMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this V1Deployment.
:param metadata: The metadata of this V1Deployment. # noqa: E501
:type: V1ObjectMeta
"""
self._metadata = metadata
@property
def spec(self):
"""Gets the spec of this V1Deployment. # noqa: E501
:return: The spec of this V1Deployment. # noqa: E501
:rtype: V1DeploymentSpec
"""
return self._spec
@spec.setter
def spec(self, spec):
"""Sets the spec of this V1Deployment.
:param spec: The spec of this V1Deployment. # noqa: E501
:type: V1DeploymentSpec
"""
self._spec = spec
@property
def status(self):
"""Gets the status of this V1Deployment. # noqa: E501
:return: The status of this V1Deployment. # noqa: E501
:rtype: V1DeploymentStatus
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this V1Deployment.
:param status: The status of this V1Deployment. # noqa: E501
:type: V1DeploymentStatus
"""
self._status = status
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"" | "For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1Deployment):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1Deployment):
return True
return s | elf.to_dict() != other.to_dict()
|
jromang/retina-old | imagepreview.py | Python | gpl-3.0 | 9,712 | 0.018946 | #
# This file is part of Retina.
#
# Retina is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Retina is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NickelsWebTranslator. If not, see <http://www.gnu.org/licenses/>.
#
# Copyright 2012 Jean-Francois Romang <jeanfrancois.romang AT gmail.com>
#
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 27 14:10:45 2012
@author: Jeff
"""
import sys
import pyfits
import matplotlib.cm
from PyQt4 import QtCore, QtGui
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from ui_imagepreview import Ui_ImagePreviewWidget
from flickcharm import FlickCharm
import qimage2ndarray
import numpy
#import stylesheet
class ImagePreviewWidget(QtGui.QWidget, Ui_ImagePreviewWidget):
def __init__(self, image):
QtGui.QWidget.__init__(self)
self.setupUi(self)
#self.scrollArea.setWidgetResizable(True)
self.charm = FlickCharm()
self.charm.activateOn(self.scrollArea)
QObject.connect(self.fitToolButton,QtCore.SIGNAL('clicked()'), self.fitImage)
QObject.connect(self.fullSizeToolButton,QtCore.SIGNAL('clicked()'), self.fullSizeImage)
QObject.connect(self.cornersToolButton,QtCore.SIGNAL('clicked()'), self.cornersImage)
QObject.connect(self.horizontalSlider,QtCore.SIGNAL('valueChanged(int)'), self.zoomImage)
QObject.connect(self.colormapComboBox,QtCore.SIGNAL('currentIndexChanged(QString)'), self.colorMapChanged)
#QObject.connect(self.minDoubleSpinBox,QtCore.SIGNAL('valueChanged(double)'), self.clampChanged)
#QObject.connect(self.maxDoubleSpinBox,QtCore.SIGNAL('valueChanged(double)'), self.clampChanged)
QObject.connect(self.histogram,QtCore.SIGNAL('clampChanged(double,double)'), self.clampChanged)
self.splitter.setStretchFactor(0, 5)
self.splitter.setStretchFactor(1, 2)
self.colormap=[]
currentMap=matplotlib.cm.get_cmap('gray')
for n in range(256) :
self.colormap.append( tuple([int(255*k) for k in currentMap(n)]) )
self.setImage(image)
self.scrollArea.installEventFilter(self)
self.painter=QPainter()
def eventFilter(self, obj, event):
if event.type() == QEvent.Resize:
if(self.fitToolButton.isChecked()):
self.fitImage()
elif(self.cornersToolButton.isChecked()):
self.cornersImage()
return False
def setImage(self,image):
#print 'opening:',image
self.titleLabel.setText("<b>"+image+"</b>")
self.image=image
hdulist = pyfits.open(image)
prihdr = hdulist[0].header
keys=prihdr.ascardlist().keys()
keys.sort()
self.tableWidget.setRowCount(len(keys));
self.tableWidget.setColumnCount(2);
i=0
for item in keys:
self.tableWidget.setItem(i, 0, QTableWidgetItem(item));
self.tableWidget.setItem(i, 1, QTableWidgetItem(str(prihdr[item])));
i=i+1
self.tableWidget.resizeColumnsToContents()
self.tableWidget.resizeRowsToContents()
#load the image
self.data = hdulist[0].data #.astype('float32')
self.histogram.setData(self.data)
min = self.data.min()
max = self.data.max()
#self.minDoubleSpinBox.setMinimum(min)
#self.minDoubleSpinBox.setMaximum(max)
#self.minDoubleSpinBox.setValue(min)
#self.maxDoubleSpinBox.setMinimum(min)
#self.maxDoubleSpinBox.setMaximum(max)
#self.maxDoubleSpinBox.setValue(max)
self.clampChanged(min,max)
#create histogram
#http://matplotlib.sourceforge.net/users/image_tutorial.html
#print numpy.histogram(self.data,200)
#im.show()
def fitImage(self):
#print 'fitting image'
self.fitToolButton.setChecked(True)
self.fullSizeToolButton.setChecked(False)
self.cornersToolButton.setChecked(False)
fittedimage=self.qimage.scaled(self.scrollArea.size().width()-4,self.scrollArea.height()-4,Qt.KeepAspectRatio,Qt.SmoothTransformation)
self.imageLabel.setPixmap(QPixmap.fromImage(fittedimage))
def fullSizeImage(self):
#print 'fitting image'
self.fitToolButton.setChecked(False)
self.fullSizeToolButton.setChecked(True)
self.cornersToolButton.setChecked(False)
self.imageLabel.setPixmap(QPixmap.fromImage(self.qimage))
def cornersImage(self):
#print 'fitting image'
self.fitToolButton.setChecked(False)
self.fullSizeToolButton.setChecked(False)
self.cornersToolButton.setChecked(True)
width=self.scrollArea.size().width()-4
height=self.scrollArea.height()-4
cornersImage=QImage(width,height,QImage.Format_RGB32)
#for n in range(256):
# cornersImage.setColor (n, qRgb(*self.colormap[n][:3]))
self.painter.begin(cornersImage)
#upperLeft=self.qimage.copy(0,0,width/2,height/2)
self.painter.drawImage(0,0,self.qimage.copy(0,0,width/2,height/2)) #upper left
self.painter.drawImage(width/2,0,self.qimage.copy(self.qimage.width()-width/2,0,width/2,height/2)) #upper right
self.painter.drawImage(0,height/2,self.qimage.copy(0,self.qim | age.height()-height/2,width/2,height/2)) #lower left
self.painter.drawImage(width/2,height/2,self.qimage.copy(self.qimage.width()-width/2,self.qimage.height()-height/2,width/2,height/2))#lower right
self.painter.setPen(QColor(255,160,47))
self.painter.drawLine(width/2,0,width/2,height)
self.painter.drawLine(0,height/2,width,height/2)
xcenter=self.qimage.width()/2
ycenter=self.qimage.hei | ght()/2
self.painter.drawImage(width/4,height/4,self.qimage.copy(xcenter-width/4,ycenter-height/4,width/2,height/2))
self.painter.drawRect(width/4,height/4,width/2,height/2)
self.painter.end()
self.imageLabel.setPixmap(QPixmap.fromImage(cornersImage))
def zoomImage(self, value):
#print 'zoom', value
self.fitToolButton.setChecked(False)
self.fullSizeToolButton.setChecked(False)
self.cornersToolButton.setChecked(False)
zoomimage=self.qimage.scaledToWidth(self.qimage.width()*value/100)
self.imageLabel.setPixmap(QPixmap.fromImage(zoomimage))
def colorMapChanged(self, string) :
self.colormap=[]
currentMap=matplotlib.cm.get_cmap(str(string))
for n in range(256) :
self.colormap.append( tuple([int(255*k) for k in currentMap(n)]) )
for n in range(256):
self.qimage.setColor (n, qRgb(*self.colormap[n][:3]))
self.redrawImage()
|
dpnova/cyclone | cyclone/__init__.py | Python | apache-2.0 | 714 | 0 | # coding: utf-8
#
# Copyright 2010 Alexandre Fiori
# based on the original Tornado by Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except | in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See t | he
# License for the specific language governing permissions and limitations
# under the License.
__author__ = "Alexandre Fiori"
__version__ = version = "git-2013042301"
|
pantsbuild/pants | src/python/pants/backend/python/subsystems/ipython_test.py | Python | apache-2.0 | 3,096 | 0.002907 | # Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
from textwrap import dedent
from pants.backend.python import target_types_rules
from pants.backend.python.goals.lockfile import GeneratePythonLockfile
from pants.backend.python.subsystems.ipython import IPythonLockfileSentinel
from pants.backend.python.subsystems.ipython import rules as subsystem_rules
from pants.backend.python.target_types import PythonSourcesGeneratorTarget
from pants.backend.python.util_rules.interpreter_constraints import InterpreterConstraints
from pants.core.target_types import GenericTarget
from pants.testutil.rule_runner import QueryRule, RuleRunner
def test_setup_lockfile_interpreter_constraints() -> None:
rule_runner = RuleRunner(
rules=[
*subsystem_rules(),
*target_types_rules.rules(),
QueryRule(GeneratePythonLockfile, [IPythonLockfileSentinel]),
],
target_types=[PythonSourcesGeneratorTarget, GenericTarget],
)
global_constraint = "==3.9.*"
rule_runner.set_options(
["--ipython-lockfile=lockfile.txt"],
env={"PANTS_PYTHON_INTERPRETER_CONSTRAINTS": f"['{global_constraint}']"},
)
def assert_ics(build_file: str, expected: list[str]) -> None:
rule_runner.write_files({"project/BUILD": build_file})
lockfile_request = rule_runner.request(GeneratePythonLockfile, [IPythonLockfileSentinel()])
assert lockfile_request.interpreter_constraints == InterpreterConstraints(expected)
assert_ics("python_sources()", [global_constraint])
assert_ics("python_sources(interpreter_constraints=['==2.7.*'])", ["==2.7.*"])
assert_ics(
"python_sources(interpreter_constraints=['==2.7.*', '==3.5.*'])", ["==2.7.*", "==3.5.*"]
)
# If no Python targets in repo, fall back to global [python] constraints.
assert_ics("target()", [global_constraint])
# If there are multiple distinct ICs in the repo, we OR them. Even though the user might AND
# them by running `./pants repl ::`, they could also run on more precise subsets like
# `./pants repl py2::` and then `./pants repl py3::`
assert_ics(
dedent(
"""\
python_sources(name='a', interpreter_constraints=['==2.7.*'])
python_sources(name='b', interpreter_constraints=['==3.5.*'])
"""
| ),
["==2.7.*", | "==3.5.*"],
)
assert_ics(
dedent(
"""\
python_sources(name='a', interpreter_constraints=['==2.7.*', '==3.5.*'])
python_sources(name='b', interpreter_constraints=['>=3.5'])
"""
),
["==2.7.*", "==3.5.*", ">=3.5"],
)
assert_ics(
dedent(
"""\
python_sources(name='a')
python_sources(name='b', interpreter_constraints=['==2.7.*'])
python_sources(name='c', interpreter_constraints=['>=3.6'])
"""
),
["==2.7.*", global_constraint, ">=3.6"],
)
|
hlabathems/iccf | iccf/__init__.py | Python | bsd-3-clause | 517 | 0 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This is an Astropy affiliated package.
"""
# Affiliated packages may add whatever they like to this file, but
# should keep this content at the top.
# ----------------------------------------------------------------------------
from | ._astropy_init import *
# ----------------------------------------------------------------------------
# For egg_info test builds to pass, put package imports here.
if not _ASTROPY_SETUP_:
from .iccf i | mport *
|
slobberchops/rop | opc/text.py | Python | gpl-3.0 | 4,706 | 0 | typeface_bbc = {
"description": "Typeface from the Acorn BBC Computer",
"geometry": {"width": 8, "height": 8},
"bitmaps": [
0x00000000, 0x00000000, 0x18181818, 0x18001800, # (spc) !
0x6c6c6c00, 0x00000000, 0x36367f36, 0x7f363600, # " #
0x0c3f683e, 0x0b7e1800, 0x60660c18, 0x30660600, # $ %
0x386c6c38, 0x6d663b00, 0x0c183000, 0x00000000, # & '
0x0c183030, 0x30180c00, 0x30180c0c, 0x0c183000, # ( )
0x00187e3c, 0x7e180000, 0x0018187e, 0x18180000, # * +
0x00000000, 0x00181830, 0x0000007e, 0x00000000, # , -
0x00000000, 0x00181800, 0x00060c18, 0x30600000, # . /
0x3c666e7e, 0x76663c00, 0x18381818, 0x18187e00, # 0 1
0x3c66060c, 0x18307e00, 0x3c66061c, 0x06663c00, # 2 3
0x0c1c3c6c, 0x7e0c0c00, 0x7e607c06, 0x06663c00, # 4 5
0x1c30607c, 0x66663c00, 0x7e060c18, 0x30303000, # 6 7
0x3c66663c, 0x66663c00, 0x3c66663e, 0x060c3800, # 8 9
0x00001818, 0x00181800, 0x00001818, 0x00181830, # : ;
0x0c183060, 0x30180c00, 0x00007e00, 0x7e000000, # < =
0x30180c06, 0x0c183000, 0x3c660c18, 0x18001800, # > ?
0x3c666e6a, 0x6e603c00, 0x3c66667e, 0x66666600, # @ A
0x7c66667c, 0x66667c00, 0x3c666060, 0x60663c00, # B C
0x786c6666, 0x666c7800, 0x7e60607c, 0x60607e00, # D E
0x7e60607c, 0x60606000, 0x3c66606e, 0x66663c00, # F G
0x6666667e, 0x66666600, 0x7e181818, 0x18187e00, # H I
0x3e0c0c0c, 0x0c6c3800, 0x666c7870, 0x786c6600, # J K
0x60606060, 0x60607e00, 0x63777f6b, 0x6b636300, # L M
0x6666767e, 0x6e666600, 0x3c666666, 0x66663c00, # N O
0x7c66667c, 0x60606000, 0x3c666666, 0x6a6c3600, # P Q
0x7c66667c, 0x6c666600, 0x3c66603c, 0x06663c00, # R S
0x7e181818, 0x18181800, 0x66666666, 0x66663c00, # T U
0x66666666, 0x663c1800, 0x63636b6b, 0x7f776300, # V W
0x66663c18, 0x3c666600, 0x6666663c, 0x18181800, # X Y
0x7e060c18, 0x30607e00, 0x7c606060, 0x60607c00, # Z [
0x00603018, 0x0c060000, 0x3e060606, 0x06063e00, # \ ]
0x183c6642, 0x00000000, 0x00000000, 0x000000ff, # ^ _
0x1c36307c, 0x30307e00, 0x00003c06, 0x3e663e00, # ` a
0x60607c66, 0x66667c00, 0x00003c66, 0x60663c00, # b c
0x06063e66, 0x66663e00, 0x00003c66, 0x7e603c00, # d e
0x1c30307c, 0x30303000, 0x00003e66, 0x663e063c, # f g
0x60607c66, 0x66666600, 0x18003818, 0x18183c00, # h i
0x18003818, 0x18181870, 0x6060666c, 0x786c6600, # j k
0x38181818, 0x18183c00, 0x0000367f, 0x6b6b6300, # l m
0x00007c66, 0x66666600, 0x00003c66, 0x66663c00, # n o
0x00007c66, 0x667c6060, 0x00003e66, 0x663e0607, # p q
0x00006c76, 0x60606000, 0x00003e60, 0x3c067c00, # r s
0x30307c30, 0x30301c00, 0x00006666, 0x66663e00, # t u
0x00006666, 0x663c1800, 0x0000636b, 0x6b7f3600, # v w
0x0000663c, 0x183c6600, 0x00006666, 0x663e063c, # x y
0x00007e0c, 0x18307e00, 0x0c181870, 0x18180c00, # z {
0x18181800, 0x18181800, 0x3018180e, 0x18183000, # | }
0x00000018, 0x18000000, 0x00000018, 0x18000000, # n/a
],
}
class OPCText(object):
"""
This implementation assumes an 8x8 pixel grid per character, with one
byte per row.
"""
def __init__(self, typeface):
self.typeface = typeface
def drawHalfChar(self, matrix, x, y, char, offset, fg, bg):
word = self.typeface["bitmaps"][2*char+offset]
ybase = y + 4*(1-offset)
for window in range(4):
byte = word & 0xff
word = word >> 8
for bit in reversed(range(8)):
if byte & 1 == 1:
matrix.drawPixel(x+bit, ybase + window, fg)
else:
| matrix | .drawPixel(x+bit, ybase + window, bg)
byte = byte >> 1
def drawChar(self, matrix, x, y, char, fg, bg):
char = ord(char) - 32 # printable ASCII starts at index 32
self.drawHalfChar(matrix, x, y, char, 0, fg, bg)
self.drawHalfChar(matrix, x, y, char, 1, fg, bg)
def drawText(self, matrix, x, y, string, fg, bg):
offset = 0
for char in list(string):
xpos = x+offset
if xpos >= -7:
if xpos-7 < matrix.width:
try:
self.drawChar(matrix, xpos, y, char, fg, bg)
except IndexError: # when char has no bitmap
self.drawChar(matrix, xpos, y, chr(127), fg, bg)
else:
return None
offset += 8
return xpos + 8
|
Sriee/epi | data_structures/heaps/frequency_stack.py | Python | gpl-3.0 | 2,003 | 0.001498 | import heapq
from collections import defaultdict, Counter
class FreqStackHeap(object):
"""
Leet code solution. Timing out. Execution result is correct but times out for
exceptionally high number of inputs.
"""
def __init__(self):
self._mem = { | }
self.heap = []
self._idx = 0
def push(self, x):
self._idx -= 1
if x in self._mem:
_temp = []
while self.heap[0][2] != x:
_temp.append(heapq.heappop(self.heap))
| found = heapq.heappop(self.heap)
self._mem[x][0] += 1
self._mem[x][1].append(found[1])
_temp.append((found[0] - 1, self._idx, found[2]))
while _temp:
heapq.heappush(self.heap, _temp.pop())
else:
self._mem[x] = [1, [self._idx]]
heapq.heappush(self.heap, (-1, self._idx, x))
def pop(self):
top = heapq.heappop(self.heap)
if top[0] + 1 == 0:
del self._mem[top[2]]
else:
self._mem[top[2]][0] -= 1
heapq.heappush(self.heap, (top[0] + 1, self._mem[top[2]][1].pop(), top[2]))
return top[2]
class FreqStack(object):
"""
Leet code solution. Problem -> Accepted.
"""
def __init__(self):
self.freq = Counter()
self.mem = defaultdict(list)
self.max_f = 0
def push(self, x):
self.freq[x] += 1
self.max_f = max(self.max_f, self.freq[x])
self.mem[self.freq[x]].append(x)
def pop(self):
x = self.mem[self.max_f].pop()
if not self.mem[self.max_f]:
self.max_f -= 1
self.freq[x] -= 1
return x
obj = FreqStack()
obj.push(5)
obj.push(1)
obj.push(2)
obj.push(5)
obj.push(5)
obj.push(5)
obj.push(1)
obj.push(6)
obj.push(1)
obj.push(5)
print(obj.pop())
print(obj.pop())
print(obj.pop())
print(obj.pop())
print(obj.pop())
print(obj.pop())
print(obj.pop())
print(obj.pop())
print(obj.pop())
print(obj.pop()) |
tobias47n9e/social-core | social_core/backends/stackoverflow.py | Python | bsd-3-clause | 1,459 | 0 | """
Stackoverflow OAuth2 backend, docs at:
https://python-social-auth.readthedocs.io/en/latest/backends/stackoverflow.html
"""
from .oauth import BaseOAuth2
class StackoverflowOAuth2(BaseOAuth2):
"""Stackoverflow OAuth2 authentication backend"""
name = 'stackoverflow'
ID_KEY = 'user_id'
AUTHORIZATION_URL = 'https://stackexchange.com/oauth'
ACCESS_TOKEN_URL = 'https://stackexchange.com/oauth/access_token'
ACCESS_TOKEN_METHOD = 'POST'
SCOPE_SEPARATOR = ','
EXTRA_DATA = [
('id', 'id'),
('expires', 'expires')
]
def get_user_details(self, response):
"""Return user details from Sta | ckoverflow account"""
fullname, first_name, last_name = self.get_user_names(
response.get('display_name')
)
return {'username': response.get('link').rsplit('/', 1)[-1],
'full_name': fullname,
'first_name': first_name,
'last_name': last_name}
| def user_data(self, access_token, *args, **kwargs):
"""Loads user data from service"""
return self.get_json(
'https://api.stackexchange.com/2.1/me',
params={
'site': 'stackoverflow',
'access_token': access_token,
'key': self.setting('API_KEY')
}
)['items'][0]
def request_access_token(self, *args, **kwargs):
return self.get_querystring(*args, **kwargs)
|
ianj-als/pypeline | src/pypeline/core/types/tests/state_tests.py | Python | gpl-3.0 | 2,775 | 0.002883 | #
# Copyright Applied Language Solutions 2012
#
# This file is part of Pypeline.
#
# Pypeline is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pypeline is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Pypeline. If not, see <http://www.gnu.org/licenses/>.
#
import unittest
import inspect
from pypeline.core.types.state import State, return_
class StateMonadUnitTest(unittest.TestCase):
def test_single_immutable_state(self):
# Value/state pair
value = 7
state = list()
# Build stuff
m = return_(value)
target = (value, state)
result = State.runState(m, state)
self.assertEquals(target, result)
def test_single_mutable_state(self):
# Value/state pair
value = 7
state = list()
# Build stuff
m = State(lambda | s: (value, s.append(value)))
target = (value, state.append(value))
|
result = State.runState(m, state)
self.assertEquals(target, result)
def test_many_mutable_state(self):
# Build this:
# state (\s -> (1, s ++ ["Initial value 1"]))
# >>= (\a -> state (\s -> (a * 2, s ++ ["Mult by 2"])))
# >>= (\a -> state (\s -> (a - 9, s ++ ["Minus 9"])))
s_one = "Initial value 1"
s_two = "Multiply by 2"
s_three = "Minus 9"
m_one = State(lambda s: (1, s.append(s_one) or s))
m_two = m_one >= (lambda a: State(lambda s: (a * 2, s.append(s_two) or s)))
m_three = m_two >= (lambda a: State(lambda s: (a - 9, s.append(s_three) or s)))
result = State.runState(m_three, list())
self.assertEquals((-7, [s_one, s_two, s_three]), result)
def test_with_return(self):
value = 7
state = list()
m = return_(value)
result = State.runState(m, state)
self.assertEquals((value, state), result)
def test_eval_state(self):
value = 7
state = list()
m = return_(value)
self.assertEquals(value, State.evalState(m, state))
def test_exec_state(self):
value = 7
state = list()
msg = "*2"
m_one = return_(value)
m_two = m_one >= (lambda a: State(lambda s: (a * 2, s.append(msg) or s)))
self.assertEquals([msg], State.execState(m_two, state))
|
oblique-labs/pyVM | rpython/jit/metainterp/optimizeopt/test/test_intbound.py | Python | mit | 8,982 | 0.003674 | from rpython.jit.metainterp.optimizeopt.intutils import IntBound, IntUpperBound, \
IntLowerBound, IntUnbounded
from rpython.jit.metainterp.optimizeopt.intbounds import next_pow2_m1
from copy import copy
import sys
from rpython.rlib.rarithmetic import LONG_BIT
def bound(a,b):
if a is None and b is None:
return IntUnbounded()
elif a is None:
return IntUpperBound(b)
elif b is None:
return IntLowerBound(a)
else:
return IntBound(a,b)
def const(a):
return bound(a,a)
def some_bounds():
brd = [None] + range(-2, 3)
for lower in brd:
for upper in brd:
if lower is not None and upper is not None and lower > upper:
continue
yield (lower, upper, bound(lower, upper))
nbr = range(-5, 6)
def test_known():
for lower, upper, b in some_bounds():
inside = []
border = []
for n in nbr:
if (lower is None or n >= lower) | and \
(upper is None or n <= upper):
if n == lower or n ==upper:
b | order.append(n)
else:
inside.append(n)
for n in nbr:
c = const(n)
if n in inside:
assert b.contains(n)
assert not b.known_lt(c)
assert not b.known_gt(c)
assert not b.known_le(c)
assert not b.known_ge(c)
elif n in border:
assert b.contains(n)
if n == upper:
assert b.known_le(const(upper))
else:
assert b.known_ge(const(lower))
else:
assert not b.contains(n)
some = (border + inside)[0]
if n < some:
assert b.known_gt(c)
else:
assert b.known_lt(c)
def test_make():
for _, _, b1 in some_bounds():
for _, _, b2 in some_bounds():
lt = IntUnbounded()
lt.make_lt(b1)
lt.make_lt(b2)
for n in nbr:
c = const(n)
if b1.known_le(c) or b2.known_le(c):
assert lt.known_lt(c)
else:
assert not lt.known_lt(c)
assert not lt.known_gt(c)
assert not lt.known_ge(c)
gt = IntUnbounded()
gt.make_gt(b1)
gt.make_gt(b2)
for n in nbr:
c = const(n)
if b1.known_ge(c) or b2.known_ge(c):
assert gt.known_gt(c)
else:
assert not gt.known_gt(c)
assert not gt.known_lt(c)
assert not gt.known_le(c)
le = IntUnbounded()
le.make_le(b1)
le.make_le(b2)
for n in nbr:
c = const(n)
if b1.known_le(c) or b2.known_le(c):
assert le.known_le(c)
else:
assert not le.known_le(c)
assert not le.known_gt(c)
assert not le.known_ge(c)
ge = IntUnbounded()
ge.make_ge(b1)
ge.make_ge(b2)
for n in nbr:
c = const(n)
if b1.known_ge(c) or b2.known_ge(c):
assert ge.known_ge(c)
else:
assert not ge.known_ge(c)
assert not ge.known_lt(c)
assert not ge.known_le(c)
gl = IntUnbounded()
gl.make_ge(b1)
gl.make_le(b2)
for n in nbr:
c = const(n)
if b1.known_ge(c):
assert gl.known_ge(c)
else:
assert not gl.known_ge(c)
assert not gl.known_gt(c)
if b2.known_le(c):
assert gl.known_le(c)
else:
assert not gl.known_le(c)
assert not gl.known_lt(c)
def test_intersect():
for _, _, b1 in some_bounds():
for _, _, b2 in some_bounds():
b = copy(b1)
b.intersect(b2)
for n in nbr:
if b1.contains(n) and b2.contains(n):
assert b.contains(n)
else:
assert not b.contains(n)
def test_add():
for _, _, b1 in some_bounds():
for n1 in nbr:
b2 = b1.add(n1)
for n2 in nbr:
c1 = const(n2)
c2 = const(n2 + n1)
if b1.known_le(c1):
assert b2.known_le(c2)
else:
assert not b2.known_le(c2)
if b1.known_ge(c1):
assert b2.known_ge(c2)
else:
assert not b2.known_ge(c2)
if b1.known_le(c1):
assert b2.known_le(c2)
else:
assert not b2.known_lt(c2)
if b1.known_lt(c1):
assert b2.known_lt(c2)
else:
assert not b2.known_lt(c2)
if b1.known_gt(c1):
assert b2.known_gt(c2)
else:
assert not b2.known_gt(c2)
def test_add_bound():
for _, _, b1 in some_bounds():
for _, _, b2 in some_bounds():
b3 = b1.add_bound(b2)
for n1 in nbr:
for n2 in nbr:
if b1.contains(n1) and b2.contains(n2):
assert b3.contains(n1 + n2)
a=bound(2, 4).add_bound(bound(1, 2))
assert not a.contains(2)
assert not a.contains(7)
def test_mul_bound():
for _, _, b1 in some_bounds():
for _, _, b2 in some_bounds():
b3 = b1.mul_bound(b2)
for n1 in nbr:
for n2 in nbr:
if b1.contains(n1) and b2.contains(n2):
assert b3.contains(n1 * n2)
a=bound(2, 4).mul_bound(bound(1, 2))
assert not a.contains(1)
assert not a.contains(9)
a=bound(-3, 2).mul_bound(bound(1, 2))
assert not a.contains(-7)
assert not a.contains(5)
assert a.contains(-6)
assert a.contains(4)
a=bound(-3, 2).mul(-1)
for i in range(-2,4):
assert a.contains(i)
assert not a.contains(4)
assert not a.contains(-3)
def test_shift_bound():
for _, _, b1 in some_bounds():
for _, _, b2 in some_bounds():
bleft = b1.lshift_bound(b2)
bright = b1.rshift_bound(b2)
for n1 in nbr:
for n2 in range(10):
if b1.contains(n1) and b2.contains(n2):
assert bleft.contains(n1 << n2)
assert bright.contains(n1 >> n2)
def test_shift_overflow():
b10 = IntBound(0, 10)
b100 = IntBound(0, 100)
bmax = IntBound(0, sys.maxint/2)
assert not b10.lshift_bound(b100).has_upper
assert not bmax.lshift_bound(b10).has_upper
assert b10.lshift_bound(b10).has_upper
for b in (b10, b100, bmax, IntBound(0, 0)):
for shift_count_bound in (IntBound(7, LONG_BIT), IntBound(-7, 7)):
#assert not b.lshift_bound(shift_count_bound).has_upper
assert not b.rshift_bound(shift_count_bound).has_upper
def test_div_bound():
from rpython.rtyper.lltypesystem import lltype
from rpython.rtyper.lltypesystem.lloperation import llop
for _, _, b1 in some_bounds():
for _, _, b2 in some_bounds():
b3 = b1.py_div_bound(b2)
for n1 in nbr:
for n2 in nbr:
if b1.contains(n1) and b2.contains(n2):
if n2 != 0:
assert b3.contains(n1 / n2) # Python-style div
a=bound(2, 4).py_div_bound(bound(1, 2))
assert not a.contains(0)
assert not a.contains(5)
a=bound(-3, 2).py_div_bound(bound(1, 2))
assert not a.contains(-4)
assert not a.contai |
ianadmu/bolton_bot | bot/common.py | Python | mit | 2,461 | 0.000406 | import random
import os.path
import re
DONT_DELETE = (
"i came back to life on|winnipeg is currently|loud messages|erased"
)
TEAM_MATES = "bolton|leon|ian|leontoast"
USER_TAG = re.compile("<@.*")
CHANNEL_TAG = re.compile("<!.*")
TESTING_CHANNEL = 'bolton-testing'
def is_bolton_mention(msg_text):
return re.search(' ?bolton', msg_text.lower())
def is_bot_message(message):
if 'subtype' in message and message['subtype'] == "bot_message":
return True
return False
def should_add_markov(message):
msg_text = message['text']
if is_bot_message(message):
return False
if (
'attachments' not in message
and not re. | search('markov|bolton', msg_text.lower())
and not re.search(TEAM_MATES, msg_text.lower())
and not contains_tag(msg_text)
):
return True
return False
def should_add_loud(message):
msg_text = message['text']
if (
'user' in mess | age and
not contains_tag(msg_text) and
_is_loud(msg_text)
):
return True
return False
def contains_tag(msg_text):
tokens = msg_text.split()
for token in tokens:
if USER_TAG.match(token) or CHANNEL_TAG.match(token):
return True
return False
def get_target(flag, msg_txt):
token = re.split(flag, msg_txt.lower())
target = ""
if len(token) > 1:
target = _format_target(token[1])
return target
class ResourceManager(object):
def __init__(self, file_name):
with open(os.path.join('./resources', file_name), 'r') as f:
self.responses = f.read().splitlines()
def get_response(self):
return random.choice(self.responses)
def get_all(self):
return ' \n'.join(line for line in self.responses)
def get_count(self):
return len(self.responses)
"""Methods that should only be used from this file"""
def _is_loud(msg_text):
emoji_pattern = re.compile(":.*:")
tokens = msg_text.split()
if len(tokens) < 2:
return False
for token in tokens:
if not (token.isupper() or emoji_pattern.match(token)):
return False
return True
def _format_target(target):
if target == 'me':
return 'you'
elif target == 'yourself' or is_bolton_mention(target):
return 'bolton Efron'
elif '<@' in target:
return target.upper()
else:
return target.title()
|
ammarkhann/FinalSeniorCode | lib/python2.7/site-packages/kombu/async/aws/ext.py | Python | mit | 863 | 0 | # -*- coding: utf-8 -*-
"""Amazon boto interface."""
from __future__ import absolute_import, un | icode_literals
try:
import boto
except ImportError: # pragma: no cover
boto = get_regions = ResultSet = RegionInfo = XmlHandler = None
class _void(object):
pass
AWSAut | hConnection = AWSQueryConnection = _void # noqa
class BotoError(Exception):
pass
exception = _void()
exception.SQSError = BotoError
exception.SQSDecodeError = BotoError
else:
from boto import exception
from boto.connection import AWSAuthConnection, AWSQueryConnection
from boto.handler import XmlHandler
from boto.resultset import ResultSet
from boto.regioninfo import RegionInfo, get_regions
__all__ = [
'exception', 'AWSAuthConnection', 'AWSQueryConnection',
'XmlHandler', 'ResultSet', 'RegionInfo', 'get_regions',
]
|
nibrahim/PlasTeX | plasTeX/Packages/ifpdf.py | Python | mit | 135 | 0.007407 | #!/usr/bin/env python
"""
| This package is intentionally empty | . The \ifpdf command is implemented
in the TeX/Primitives package.
"""
|
bountyfunding/bountyfunding | plugin/trac/bountyfunding/trac/bountyfunding.py | Python | agpl-3.0 | 27,871 | 0.007355 | # -*- coding: utf-8 -*-
from pprint import pprint
from genshi.filters import Transformer
from genshi.builder import tag
from trac.core import *
from trac.util.html import html
from trac.web import IRequestHandler, HTTPInternalError
from trac.web.chrome import INavigationContributor, ITemplateProvider, add_stylesheet, add_script, add_warning, add_notice
from trac.web.api import IRequestFilter, ITemplateStreamFilter
from trac.ticket.api import ITicketChangeListener, ITicketManipulator
from trac.prefs import IPreferencePanelProvider
from trac.ticket.model import Ticket
from trac.notification import NotifyEmail
from genshi.template.text import NewTextTemplate
import requests, re
from pkg_resources import resource_filename
#from IPython import embed
# Configuration
DEFAULT_BOUNTYFUNDING_URL='http://localhost:8080'
DEFAULT_TOKEN = 'default'
DEFAULT_MAPPING_READY = ['new', 'accepted', 'reopened']
DEFAULT_MAPPING_STARTED = ['assigned']
DEFAULT_MAPPING_COMPLETED = ['closed']
BOUNTYFUNDING_PATTERN = re.compile("(?:/(?P<ticket>ticket)/(?P<ticket_id>[0-9]+)/(?P<ticket_action>sponsor|update_sponsorship|confirm|validate|pay))|(?:/(?P<bountyfunding>bountyfunding)/(?P<bountyfunding_action>status|email|sync))")
class Sponsorship:
def __init__(self, dictionary={}):
self.amount = dictionary.get('amount', 0)
self.status = dictionary.get('status')
class Email:
def __init__(self, dictionary):
self.id = dictionary.get('id')
self.recipient = dictionary.get('recipient')
self.issue_id = dictionary.get('issue_id')
self.body = dictionary.get('body')
class GenericNotifyEmail(NotifyEmail):
template_name = 'email.txt'
def __init__(self, env, recipient, body, link):
NotifyEmail.__init__(self, env)
self.recipient = recipient
self.data = {
'body': body,
'link': link,
'project_name': env.project_name,
'project_url': env.project_url or self.env.abs_href(),
}
def get_recipients(self, resid):
return ([self.recipient], [])
def sum_amounts(sponsorships, statuses=None):
if statuses != None:
sponsorships = [s for s in sponsorships if s.status in statuses]
total_amount = sum(map(lambda s: s.amount, sponsorships))
return total_amount
class BountyFundingPlugin(Component):
implements(ITemplateStreamFilter, IRequestFilter, IRequestHandler, ITemplateProvider, ITicketChangeListener, ITicketManipulator, IPreferencePanelProvider)
def __init__(self):
self.configure()
def configure(self):
self.bountyfunding_url = self.config.get('bountyfunding', 'url', DEFAULT_BOUNTYFUNDING_URL)
self.token = self.config.get('bountyfunding', 'token', DEFAULT_TOKEN)
self.status_mapping = {}
for m in self.get_config_array(
'bountyfunding', 'status_mapping_ready', DEFAULT_MAPPING_READY):
self.status_mapping[m] = 'READY'
for m in self.get_config_array(
'bountyfunding', 'status_mapping_started', DEFAULT_MAPPING_STARTED):
self.status_mapping[m] = 'STARTED'
for m in self.get_config_array(
'bountyfunding', 'status_mapping_completed', DEFAULT_MAPPING_COMPLETED):
self.status_mapping[m] = 'COMPLETED'
def get_config_array(self, section, option, default):
value = self.config.get(section, option, None)
if value != None:
return [v.strip() for v in value.split(",")]
else:
return default
def call_api(self, method, path, **kwargs):
url = self.bountyfunding_url + path
params = kwargs
params['token'] = self.token
try:
response = requests.request(method, url, params=kwargs)
except requests.exceptions.ConnectionError:
self.log.warn("Error connecting to BountyFunding")
response = None
return response
def convert_status(self, status):
return self.status_mapping[status]
def get_sponsorships(self, ticket_id):
sponsorships = {}
request = self.call_api('GET', '/issue/%s/sponsorships' % ticket_id)
if request.status_code == 200:
sponsorships = dict(map(lambda (k,v): (k, Sponsorship(v)), request.json().items()))
return sponsorships
#TODO: not entirely safe from race conditions, fix it
def update_ticket(self, ticket, refresh_amount=True, author=None, comment=None):
update = (comment != None)
if refresh_amount:
sponsorships = self.get_sponsorships(ticket.id)
amount = sum_amounts(sponsorships.values())
if amount == 0:
if ticket["bounty"]:
ticket["bounty"] = None
update = True
else:
amount = u"%d\u20ac" % amount
if ticket["bounty"] != amount:
ticket["bounty"] = amount
update = True
if update:
ticket.save_changes(author, comment)
return update
def update_api_ticket(self, ticket):
r = self.call_api('GET', '/issue/%s' % ticket.id)
if r.status_code != 200:
return False
api_ticket = r.json()
title = ticket['summary']
status = self.convert_status(ticket['status'])
owner = ticket['owner']
changes = {}
if title != api_ticket.get('title'):
changes['title'] = title
if status != api_ticket.get('status'):
changes['status'] = status
if owner != api_ticket.get('owner'):
changes['owner'] = owner
if changes:
self.call_api('PUT', '/issue/%s' % ticket.id, **changes)
return True
return False
def get_link(self, ticket_id):
return '/ticket/%s' % ticket_id
def send_email(self, recipient, ticket_id, body):
ticket = Ticket(self.env | , ticket_id)
subject = self.format_email_subject(t | icket)
link = self.env.abs_href.ticket(ticket_id)
email = GenericNotifyEmail(self.env, recipient, body, link)
email.notify('', subject)
def format_email_subject(self, ticket):
template = self.config.get('notification','ticket_subject_template')
template = NewTextTemplate(template.encode('utf8'))
prefix = self.config.get('notification', 'smtp_subject_prefix')
if prefix == '__default__':
prefix = '[%s]' % self.env.project_name
data = {
'prefix': prefix,
'summary': ticket['summary'],
'ticket': ticket,
'env': self.env,
}
return template.generate(**data).render('text', encoding=None).strip()
# ITemplateStreamFilter methods
def filter_stream(self, req, method, filename, stream, data):
"""
Quick and dirty solution - modify page on the fly to inject special field. It would be
nicer if we can do it by creating custom field as this depends on page structure.
"""
#embed(header='Ticket Stream Filter')
if filename == 'ticket.html':
# Disable any direct bounty input
filter = Transformer('.//input[@id="field-bounty"]')
stream |= filter.attr("disabled", "disabled")
ticket = data.get('ticket')
if ticket and ticket.exists:
identifier = ticket.id
user = req.authname if req.authname != 'anonymous' else None
request = self.call_api('GET', '/issue/%s' % identifier)
fragment = tag()
sponsorships = {}
status = self.convert_status(ticket.values['status'])
owner = ticket.values['owner']
tooltip = None
if request != None and (request.status_code == 200 or request.status_code == 404):
sponsorships = self.get_sponsorships(identifier)
pledged_amount = sum_amounts(sponsorships.values())
|
bxlab/hifive | hifive/fivec_binning.py | Python | mit | 71,263 | 0.003705 | #!/usr/bin/env python
"""
This is a module contains scripts for generating compact, upper-triangle and full matrices of 5C interaction data.
Concepts
--------
Data can either be arranged in compact, complete, or flattened (row-major) upper-triangle arrays. Compact arrays are N x M, where N is the number of forward probe fragments and M is the number of reverse probe fragments. Data can be raw, fragment-corrected, distance-dependence removed, or enrichment values. Arrays are 3-dimensional with observed values in the first layer of d3, expected values in the second layer of d3. The exception to this is upper-triangle arrays, which are 2d, dividing observed and expected along the second axis.
API documentation
-----------------
"""
import os
import sys
import subprocess
import numpy
import h5py
import libraries._fivec_binning as _fivec_binning
def find_cis_signal(fivec, region, binsize=0, binbounds=None, start=None, stop=None, startfrag=None, stopfrag=None,
datatype='enrichment', arraytype='full', skipfiltered=False, returnmapping=False, **kwargs):
"""
Create an array of format 'arraytype' and fill with data requested in 'datatype'.
:param fivec: A :class:`FiveC <hifive.fivec.FiveC>` class object containing fragment and count data.
:type fivec: :class:`FiveC <hifive.fivec.FiveC>`
:param region: The index of the region to pull data from.
:type region: int.
:param binsize: This is the coordinate width of each bin. A value of zero indicates unbinned. If binbounds is not None, this value is ignored.
:type binsize: int.
:param binbounds: An arr | ay containing start and stop coordinates for a set of user-defined bins. Any fragment not falling in a bin is ignored.
:type binbounds: numpy array
:param start: The smallest coordinate to include in the array, measured from fragment midpoints. If 'binbounds' is given, this value is ignored. If both 'start' and 'startfrag' are given, 'start' will override 'startfrag'. If unspecified, this will be set to the midpoint of the first f | ragment for 'region', adjusted to the first multiple of 'binsize' if not zero. Optional.
:type start: int.
:param stop: The largest coordinate to include in the array, measured from fragment midpoints. If 'binbounds' is given, this value is ignored. If both 'stop' and 'stopfrag' are given, 'stop' will override 'stopfrag'. If unspecified, this will be set to the midpoint of the last fragment plus one for 'region', adjusted to the last multiple of 'start' + 'binsize' if not zero. Optional.
:type stop: int.
:param startfrag: The first fragment to include in the array. If 'binbounds' is given, this value is ignored. If unspecified and 'start' is not given, this is set to the first fragment in 'region'. In cases where 'start' is specified and conflicts with 'startfrag', 'start' is given preference. Optional.
:type startfrag: int.
:param stopfrag: The first fragment not to include in the array. If 'binbounds' is given, this value is ignored. If unspecified and 'stop' is not given, this is set to the last fragment in 'region' plus one. In cases where 'stop' is specified and conflicts with 'stopfrag', 'stop' is given preference. Optional.
:type stopfrag: int.
:param datatype: This specifies the type of data that is processed and returned. Options are 'raw', 'distance', 'fragment', 'enrichment', and 'expected'. Observed values are aways in the first index along the last axis, except when 'datatype' is 'expected'. In this case, filter values replace counts. Conversely, if 'raw' is specified, unfiltered fragments return value of one. Expected values are returned for 'distance', 'fragment', 'enrichment', and 'expected' values of 'datatype'. 'distance' uses only the expected signal given distance for calculating the expected values, 'fragment' uses only fragment correction values, and both 'enrichment' and 'expected' use both correction and distance mean values. 'enrichment' also scales both observed and expected by the standard deviation, giving a completely normalized set of values.
:type datatype: str.
:param arraytype: This determines what shape of array data are returned in. Acceptable values are 'compact' (though only when 'binned' is zero), 'full', and 'upper'. 'compact' means data are arranged in a N x M x 2 array where N and M are the number of forward and reverse probe fragments, respectively. 'full' returns a square, symmetric array of size N x N x 2 where N is the total number of fragments. 'upper' returns only the flattened upper triangle of a full array, excluding the diagonal of size (N * (N - 1) / 2) x 2, where N is the total number of fragments.
:type arraytype: str.
:param skipfiltered: If 'True', all interaction bins for filtered out fragments are removed and a reduced-size array is returned.
:type skipfiltered: bool.
:param returnmapping: If 'True', a list containing the data array and either one or two 2d arrays containing first coordinate included and excluded from each bin, and the first fragment included and excluded from each bin corresponding to both axes or the first and second axis for an upper or compact array, respectively, is returned. Otherwise only the data array is returned.
:type returnmapping: bool.
:returns: Array in format requested with 'arraytype' containing data requested with 'datatype'.
"""
if 'silent' in kwargs and kwargs['silent']:
silent = True
else:
silent = False
# check that all values are acceptable
if datatype not in ['raw', 'fragment', 'distance', 'enrichment', 'expected']:
if not silent:
print >> sys.stderr, ("Datatype given is not recognized. No data returned\n"),
return None
elif datatype in ['fragment', 'enrichment'] and fivec.normalization == 'none':
if not silent:
print >> sys.stderr, ("Normalization has not been performed yet on this project. Select either 'raw' or 'distance' for datatype. No data returned\n"),
return None
elif datatype in ['distance', 'enrichment'] and fivec.gamma is None:
fivec.find_distance_parameters()
if arraytype not in ['full', 'compact', 'upper']:
if not silent:
print >> sys.stderr, ("Unrecognized array type. No data returned.\n"),
return None
if arraytype == 'compact' and (binsize > 0 or not binbounds is None):
if not silent:
print >> sys.stderr, ("'Compact' array can only be used with unbinned data. No data returned.\n"),
return None
# Determine start, stop, startfrag, and stopfrag
chrom = fivec.frags['regions']['chromosome'][region]
chrint = fivec.chr2int[chrom]
if not binbounds is None:
start = binbounds[0, 0]
stop = binbounds[-1, 1]
startfrag = _find_frag_from_coord(fivec, chrint, start)
stopfrag = _find_frag_from_coord(fivec, chrint, stop)
else:
if start is None and startfrag is None:
startfrag = fivec.frags['regions']['start_frag'][region]
start = fivec.frags['fragments']['mid'][startfrag]
if binsize > 0:
start = (start / binsize) * binsize
elif start is None:
start = fivec.frags['fragments']['mid'][startfrag]
if binsize > 0:
start = (start / binsize) * binsize
else:
startfrag = _find_frag_from_coord(fivec, chrint, start)
if (stop is None or stop == 0) and stopfrag is None:
stopfrag = fivec.frags['regions']['stop_frag'][region]
stop = fivec.frags['fragments']['mid'][stopfrag - 1] + 1
if binsize > 0:
stop = ((stop - 1 - start) / binsize + 1) * binsize + start
elif stop is None or stop == 0:
stop = fivec.frags['fragments']['mid'][stopfrag - 1] + 1
if binsize > 0:
stop = ((stop - 1 - start) / binsize + 1) * binsize + start
else:
if binsize > 0:
stop = ((stop - 1 - start) / binsize + 1) * binsize + start
stopfrag = _find_frag_from_coord(fivec, chrint, stop)
|
OCA/l10n-italy | l10n_it_fiscalcode/model/res_company.py | Python | agpl-3.0 | 266 | 0 | # L | icense AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).
from odoo import fields, models
class ResCompany(models.Model):
_inherit = "res.company"
fiscalcode = fields.Char(
related="partner_id.fiscalcode", store=True, readon | ly=False
)
|
sdpython/pyquickhelper | src/pyquickhelper/pycode/utils_tests_helper.py | Python | mit | 20,810 | 0.001778 | """
@file
@brief This extension contains various functionalities to help unittesting.
"""
import os
import stat
import sys
import re
import warnings
import time
import importlib
from contextlib import redirect_stdout, redirec | t_stderr
from io import StringIO
| def _get_PyLinterRunV():
# Separate function to speed up import.
from pylint.lint import Run as PyLinterRun
from pylint import __version__ as pylint_version
if pylint_version >= '2.0.0':
PyLinterRunV = PyLinterRun
else:
PyLinterRunV = lambda *args, do_exit=False: PyLinterRun( # pylint: disable=E1120, E1123
*args, exit=do_exit) # pylint: disable=E1120, E1123
return PyLinterRunV
def get_temp_folder(thisfile, name=None, clean=True, create=True,
persistent=False, path_name="tpath"):
"""
Creates and returns a local temporary folder to store files
when unit testing.
@param thisfile use ``__file__`` or the function which runs the test
@param name name of the temporary folder
@param clean if True, clean the folder first, it can also a function
called to determine whether or not the folder should be
cleaned
@param create if True, creates it (empty if clean is True)
@param persistent if True, create a folder at root level to reduce path length,
the function checks the ``MAX_PATH`` variable and
shorten the test folder is *max_path* is True on :epkg:`Windows`,
on :epkg:`Linux`, it creates a folder three level ahead
@param path_name test path used when *max_path* is True
@return temporary folder
The function extracts the file which runs this test and will name
the temporary folder base on the name of the method. *name* must be None.
Parameter *clean* can be a function.
Signature is ``def clean(folder)``.
"""
if name is None:
name = thisfile.__name__
if name.startswith("test_"):
name = "temp_" + name[5:]
elif not name.startswith("temp_"):
name = "temp_" + name
thisfile = os.path.abspath(thisfile.__func__.__code__.co_filename)
final = os.path.split(name)[-1]
if not final.startswith("temp_") and not final.startswith("temp2_"):
raise NameError( # pragma: no cover
"the folder '{0}' must begin with temp_".format(name))
local = os.path.join(
os.path.normpath(os.path.abspath(os.path.dirname(thisfile))), name)
if persistent:
if sys.platform.startswith("win"): # pragma: no cover
from ctypes.wintypes import MAX_PATH
if MAX_PATH <= 300:
local = os.path.join(os.path.abspath("\\" + path_name), name)
else:
local = os.path.join(
local, "..", "..", "..", "..", path_name, name)
else:
local = os.path.join(local, "..", "..", "..",
"..", path_name, name)
local = os.path.normpath(local)
if name == local:
raise NameError( # pragma: no cover
"The folder '{0}' must be relative, not absolute".format(name))
if not os.path.exists(local):
if create:
os.makedirs(local)
mode = os.stat(local).st_mode
nmode = mode | stat.S_IWRITE
if nmode != mode:
os.chmod(local, nmode) # pragma: no cover
else:
if (callable(clean) and clean(local)) or (not callable(clean) and clean):
# delayed import to speed up import time of pycode
from ..filehelper.synchelper import remove_folder
remove_folder(local)
time.sleep(0.1)
if create and not os.path.exists(local):
os.makedirs(local)
mode = os.stat(local).st_mode
nmode = mode | stat.S_IWRITE
if nmode != mode:
os.chmod(local, nmode) # pragma: no cover
return local
def _extended_refactoring(filename, line): # pragma: no cover
"""
Private function which does extra checkings
when refactoring :epkg:`pyquickhelper`.
@param filename filename
@param line line
@return None or error message
"""
if "from pyquickhelper import fLOG" in line:
if "test_code_style" not in filename:
return "issue with fLOG"
if "from pyquickhelper import noLOG" in line:
if "test_code_style" not in filename:
return "issue with noLOG"
if "from pyquickhelper import run_cmd" in line:
if "test_code_style" not in filename:
return "issue with run_cmd"
if "from pyquickhelper import get_temp_folder" in line:
if "test_code_style" not in filename:
return "issue with get_temp_folder"
return None
class PEP8Exception(Exception):
"""
Code or style issues.
"""
pass
def check_pep8(folder, ignore=('E265', 'W504'), skip=None,
complexity=-1, stop_after=100, fLOG=None,
pylint_ignore=('C0103', 'C1801',
'R0201', 'R1705',
'W0108', 'W0613',
'W0107', 'C0415',
'C0209'),
recursive=True, neg_pattern=None, extended=None,
max_line_length=143, pattern=".*[.]py$",
run_lint=True, verbose=False, run_cmd_filter=None):
"""
Checks if :epkg:`PEP8`,
the function calls command :epkg:`pycodestyle`
on a specific folder.
@param folder folder to look into
@param ignore list of warnings to skip when raising an exception if
:epkg:`PEP8` is not verified, see also
`Error Codes <http://pep8.readthedocs.org/en/latest/intro.html#error-codes>`_
@param pylint_ignore ignore :epkg:`pylint` issues, see
:epkg:`pylint error codes`
@param complexity see `check_file <https://pycodestyle.pycqa.org/en/latest/api.html>`_
@param stop_after stop after *stop_after* issues
@param skip skip a warning if a substring in this list is found
@param neg_pattern skip files verifying this regular expressions
@param extended list of tuple (name, function), see below
@param max_line_length maximum allowed length of a line of code
@param recursive look into subfolder
@param pattern only file matching this pattern will be checked
@param run_lint run :epkg:`pylint`
@param verbose :epkg:`pylint` is slow, tells which file is
investigated (but it is even slower)
@param run_cmd_filter some files makes :epkg:`pylint` crashes (``import yaml``),
the test for this is run in a separate process
if the function *run_cmd_filter* returns True of the filename,
*verbose* is set to True in that case
@param fLOG logging function
@return output
Functions mentioned in *extended* takes two parameters (file name and line)
and they returned None or an error message or a tuple (position in the line, error message).
When the return is not empty, a warning will be added to the ones
printed by :epkg:`pycodestyle`.
A few codes to ignore:
* *E501*: line too long (?? characters)
* *E265*: block comments should have a space after #
* *W504*: line break after binary operator, this one is raised
after the code is modified by @see fn remove_extra_spaces_and_pep8.
The full list is available at :epkg:`PEP8 codes`. In addition,
the function |
safchain/contrail-sandesh | library/python/pysandesh/test/test_utils.py | Python | apache-2.0 | 258 | 0.011628 | #
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
import socket
def ge | t_free_port():
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(("", 0))
po | rt = sock.getsockname()[1]
sock.close()
return port
|
TheAlgorithms/Python | digital_image_processing/histogram_equalization/histogram_stretch.py | Python | mit | 1,894 | 0.000528 | """
Created on Fri Sep 28 15:22:29 2018
@author: Binish125
"""
import copy
import os
import cv2
import numpy as np
from matplotlib import pyplot as plt
class contrastStretch:
def __init__(self):
self.img = ""
self.original_image = ""
self.last_list = []
self.rem = 0
self.L = 256
self.sk = 0
self.k = 0
self.number_of_rows = 0
self.number_of_cols = 0
def stretch(self, input_image):
self.img = cv2.imread(input_image, 0)
self.original_image = copy.deepcopy(self.img)
x, _, _ = plt.hist(self.img.ravel(), 256, [0, 256], label="x")
self.k = np.sum(x)
for i in range(len(x)):
prk = x[i] / self.k
self.sk += prk
last = (self.L - 1) * self.sk
if self.rem != 0:
self.rem = int(last % last)
last = int(last + 1 if self.rem >= 0.5 else last)
self.last_list.append(last)
self.number_of_rows = int(np.ma.count(self.img) / self.img[1].size)
self.number_of_cols = self.img[1].size
for i in range(self.number_of_cols):
for j in range(self.number_of_rows):
num = self.img[j][i]
if num != self.last_list[num]:
self.img[j][i] = self.last_list[num]
cv2.imwrite("output_data/output.jpg", self.img)
def p | lotHistogram(self):
plt.hist(self.img.ravel(), 256, [0, 256])
def showImage(self):
cv2.imshow("Output-Image", self.img)
cv2.imshow("Input-Image", self.original_image)
cv2.waitKey(5000)
c | v2.destroyAllWindows()
if __name__ == "__main__":
file_path = os.path.join(os.path.basename(__file__), "image_data/input.jpg")
stretcher = contrastStretch()
stretcher.stretch(file_path)
stretcher.plotHistogram()
stretcher.showImage()
|
andfoy/margffoy-tuay-server | env/lib/python2.7/site-packages/tornadoredis/backports.py | Python | gpl-2.0 | 6,201 | 0.000968 | from operator import itemgetter
from heapq import nlargest
from itertools import repeat, ifilter
class Counter(dict):
'''Dict subclass for counting hashable objects. Sometimes called a bag
or multiset. Elements are stored as dictionary keys and their counts
are stored as dictionary values.
>>> Counter('zyzygy')
Counter({'y': 3, 'z': 2, 'g': 1})
'''
def __init__(self, iterable=None, **kwds):
'''Create a new, empty Counter object. And if given, count elements
from an input iterable. Or, initialize the count from another mapping
of elements to their counts.
>>> c = Counter() # a new, empty counter
>>> c = Counter('gallahad') # a new counter from an iterable
>>> c = Counter({'a': 4, 'b': 2}) # a new counter from a mapping
>>> c = Counter(a=4, b=2) # a new counter from keyword args
'''
self.update(iterable, **kwds)
def __missing__(self, key):
return 0
def most_common(self, n=None):
'''List the n most common elements and their counts from the most
common to the least. If n is None, then list all element counts.
>>> Counter('abracadabra').most_common(3)
[('a', 5), ('r', 2), ('b', 2)]
'''
if n is None:
return sorted(self.iteritems(), key=itemgetter(1), reverse=True)
return nlargest(n, self.iteritems(), key=itemgetter(1))
def elements(self):
'''Iterator over elements repeating each as many times as its count.
>>> c = Counter('ABCABC')
>>> sorted(c.elements())
['A', 'A', 'B', 'B', 'C', 'C']
If an element's count has been set to zero or is a negative number,
elements() will ignore it.
'''
for elem, count in self.iteritems():
for _ in repeat(None, count):
yield elem
# Override dict methods where the meaning changes for Counter objects.
@classmethod
def fromkeys(cls, iterable, v=None):
raise NotImplementedError(
'Counter.fromkeys() is undefined. Use Counter(iterable) instead.')
def update(self, iterable=None, **kwds):
'''Like dict.update() but add counts instead of replacing them.
Source can be an iterable, a dictionary, or another Counter instance.
>>> c = Counter('which')
>>> c.update('witch') # add elements from another iterable
>>> d = Counter('watch')
>>> c.update(d) # add elements from another counter
>>> c['h'] # four 'h' in which, witch, and watch
4
'''
if iterable is not None:
if hasattr(iterable, 'iteritems'):
if self:
self_get = self.get
for elem, count in iterable.iteritems():
self[elem] = self_get(elem, 0) + count
else:
dict.update(self, iterable) # fast path when counter is empty
else:
self_get = self.get
for elem in iterable:
self[elem] = self_get(elem, 0) + 1
if kwds:
self.update(kwds)
def copy(self):
'Like dict.copy() but returns a Counter instance instead of a dict.'
return Counter(self)
def __delitem__(self, elem):
'Like dict.__delitem__() but does not raise KeyError for missing values.'
if elem in self:
dict.__delitem__(self, elem)
def __repr__(self):
if not self:
return '%s()' % self.__class__.__name__
items = ', '.join(map('%r: %r'.__mod__, self.most_common()))
return '%s({%s})' % (self.__class__.__name__, items)
# Multiset-style mathematical operations discussed in:
# Knuth TAOCP Volume II section 4.6.3 exercise 19
# and at http://en.wikipedia.org/wiki/Multiset
#
# Outputs guaranteed to only include positive counts.
#
# To strip negative and zero counts, add-in an e | mpty counter:
# c += Counter()
def __add__(self, other):
'''Add counts from two counters.
>>> Counter('abbb') + Counter('bcc')
Counter({'b': 4, 'c': 2, 'a': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
| for elem in set(self) | set(other):
newcount = self[elem] + other[elem]
if newcount > 0:
result[elem] = newcount
return result
def __sub__(self, other):
''' Subtract count, but keep only results with positive counts.
>>> Counter('abbbc') - Counter('bccd')
Counter({'b': 2, 'a': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem in set(self) | set(other):
newcount = self[elem] - other[elem]
if newcount > 0:
result[elem] = newcount
return result
def __or__(self, other):
'''Union is the maximum of value in either of the input counters.
>>> Counter('abbb') | Counter('bcc')
Counter({'b': 3, 'c': 2, 'a': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
_max = max
result = Counter()
for elem in set(self) | set(other):
newcount = _max(self[elem], other[elem])
if newcount > 0:
result[elem] = newcount
return result
def __and__(self, other):
''' Intersection is the minimum of corresponding counts.
>>> Counter('abbb') & Counter('bcc')
Counter({'b': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
_min = min
result = Counter()
if len(self) < len(other):
self, other = other, self
for elem in ifilter(self.__contains__, other):
newcount = _min(self[elem], other[elem])
if newcount > 0:
result[elem] = newcount
return result
|
codyparker/channels-obstruction | game/views/api_views.py | Python | mit | 2,229 | 0.000897 | from rest_framework.views import APIView
from rest_framework import viewsets
from game.serializers import *
from rest_framework.response import Response
from game.models import *
from rest_framework.permissions import IsAuthenticated
from django.shortcuts import get_object_or_404
from django.http import Http404
class UserViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows users to be viewed or edited.
"""
queryset = User.objects.all().order_by('-date_joined')
serializer_class = UserSerializer
class PlayerGameViewSet(viewsets.ViewSet):
"""
API endpoint for player games
"""
def list(self, request):
queryset = Game.get_games_for_player(self.request.user)
serializer = GameSerializer(
queryset, many=True, context={'request': request})
return Response(serializer.data)
class AvailableGameViewSet(viewsets.ViewSet):
"""
| API endpoint for available/open games
"""
def list(self, request):
queryset = Game.get_available_games()
serializer = GameSerializer(queryset, many=True)
return Response(serializer.data)
class CurrentUserView(APIView):
def get(self, request):
serializer = UserSerializer(request.user)
| return Response(serializer.data)
class SingleGameViewSet(APIView):
"""
Get all data for a game: Game Details, Squares, & Log
"""
def get(self, request, **kwargs):
game = Game.get_by_id(kwargs['game_id'])
log = game.get_game_log()
squares = game.get_all_game_squares()
game_serializer = GameSerializer(game)
log_serializer = GameLogSerializer(log, many=True)
square_serializer = GameSquareSerializer(squares, many=True)
return_data = {'game': game_serializer.data,
'log': log_serializer.data,
'squares': square_serializer.data}
return Response(return_data)
class GameSquaresViewSet(viewsets.ViewSet):
def retrieve(self, request, pk=None):
game = get_object_or_404(Game, pk=pk)
squares = game.get_all_game_squares()
serializer = GameSquareSerializer(squares, many=True)
return Response(serializer.data)
|
Otend/backlog | work.py | Python | bsd-2-clause | 254 | 0.003937 | # -*- coding: utf-8 -*-
#import random
import time
class Work():
def __init__(self, name):
self.n | ame = name
self.addedTime = time.time()
self.wasViewed = False
def changeName(self, newName):
self.name = newNam | e
|
dand-oss/transit-python | tests/seattle_benchmark.py | Python | apache-2.0 | 1,590 | 0 | # Copyright 2014 Cognitect. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permission | s and
# limitations under the License.
from transit.reader import JsonUnmarshaler
import json
import time
from StringIO import StringIO
def run_tests(data):
datas = StringIO(data)
t = time.time()
JsonUnmarshaler().load(datas)
et = time.time()
datas = StringIO(data)
tt = time.time()
json.load(datas)
| ett = time.time()
read_delta = (et - t) * 1000.0
print('Done: {} -- raw JSON in: {}'.format(
read_delta, (ett - tt) * 1000.0))
return read_delta
seattle_dir = '../transit-format/examples/0.8/'
means = {}
for jsonfile in ['{}example.json'.format(seattle_dir),
'{}example.verbose.json'.format(seattle_dir)]:
data = ''
with open(jsonfile, 'r') as fd:
data = fd.read()
print('-'*50)
print('Running {}'.format(jsonfile))
print('-'*50)
runs = 100
deltas = [run_tests(data) for x in range(runs)]
means[jsonfile] = sum(deltas)/runs
for jsonfile, mean in means.items():
print('\nMean for{}: {}'.format(jsonfile, mean))
|
jdereus/labman | labcontrol/gui/handlers/process_handlers/test/test_gdna_extraction_process.py | Python | bsd-3-clause | 2,304 | 0 | # ----------------------------------------------------------------------------
# Copyright (c) 2017-, LabControl development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
from unittest import main
from tornado.escape import json_encode, json_decode
from labcontrol.gui.testing import TestHandlerBase
class TestGDNAExtractionProcessHandlers(TestHandlerBase):
def test_get_gdna_extraction_process_handler(self):
response = self.get('/process/gdna_extraction')
self.assertEqual(response.code, 200)
self.assertNotEqual(response.body, '')
response = self.get('/process/gdna_extraction?plate_id=21')
self.assertEqual(response.code, 200)
self.assertNotEqual(response.body, '')
response = self.get('/process/gdna_extraction?plate_id=21&plate_id=22')
self.assertEqual(response.code, 200)
self.assertNotEqual(response.body, '')
response = self.get('/process/gdna_extraction?process_id=1')
self.assertEqual(response.code, 200)
self.assertNotEqual(response.body, '')
response = self.get('/process/gdna_extraction?process_id=1000')
self.assertEqual(response.code, 404)
def test_post_gdna_extraction_process_handler(self):
data = {'extraction_date': '01/20/2018', 'volume': 10,
'plates_info': json_encode(
| [['21', False, 11, 6, 15, '157022406',
'new gdna plate', None]])}
response = self.post('/process/gdna_extraction', data)
self.assertEqual(response.code, 200)
self.assertCountEqual(json_decode(response.body), ['proc | esses'])
data = {'extraction_date': '01/20/2018', 'volume': 10,
'plates_info': json_encode(
[['21', True, None, None, None, None,
'externally extracted gdna plate',
'Extracted externally']])}
response = self.post('/process/gdna_extraction', data)
self.assertEqual(response.code, 200)
self.assertCountEqual(json_decode(response.body), ['processes'])
if __name__ == '__main__':
main()
|
jptomo/rpython-lang-scheme | rpython/jit/codewriter/test/test_call.py | Python | mit | 11,707 | 0.002306 | import py
from rpython.flowspace.model import SpaceOperation, Constant, Variable
from rpython.rtyper.lltypesystem import lltype, llmemory, rffi
from rpython.translator.unsimplify import varoftype
from rpython.rlib import jit
from rpython.jit.codewriter import support, call
from rpython.jit.codewriter.call import CallControl
from rpython.jit.codewriter.effectinfo import EffectInfo
class FakePolicy:
def look_inside_graph(self, graph):
return True
def test_graphs_from_direct_call():
cc = CallControl()
F = lltype.FuncType([], lltype.Signed)
f = lltype.functionptr(F, 'f', graph='fgraph')
v = varoftype(lltype.Signed)
op = SpaceOperation('direct_call', [Constant(f, lltype.Ptr(F))], v)
#
lst = cc.graphs_from(op, {}.__contains__)
assert lst is None # residual call
#
lst = cc.graphs_from(op, {'fgraph': True}.__contains__)
assert lst == ['fgraph'] # normal call
def test_graphs_from_indirect_call():
cc = CallControl()
F = lltype.FuncType([], lltype.Signed)
v = varoftype(lltype.Signed)
graphlst = ['f1graph', 'f2graph']
op = SpaceOperation('indirect_call', [varoftype(lltype.Ptr(F)),
Constant(graphlst, lltype.Void)], v)
#
lst = cc.graphs_from(op, {'f1graph': True, 'f2graph': True}.__contains__)
assert lst == ['f1graph', 'f2graph'] # normal indirect call
#
lst = cc.graphs_from(op, {'f1graph': True}.__contains__)
assert lst == ['f1graph'] # indirect call, look only inside some graphs
#
lst = cc.graphs_from(op, {}.__contains__)
assert lst is None # indirect call, don't look inside any graph
def test_graphs_from_no_target():
cc = CallControl()
F = lltype.FuncType([], lltype.Signed)
v = varoftype(lltype.Signed)
op = SpaceOperation('indirect_call', [varoftype(lltype.Ptr(F)),
Constant(None, lltype.Void)], v)
lst = cc.graphs_from(op, {}.__contains__)
assert lst is None
# ____________________________________________________________
class FakeJitDriverSD:
def __init__(self, portal_graph):
self.portal_graph = portal_graph
self.portal_runner_ptr = "???"
def test_find_all_graphs():
def g(x):
return x + 2
def f(x):
return g(x) + 1
rtyper = support.annotate(f, [7])
jitdriver_sd = FakeJitDriverSD(rtyper.annotator.translator.graphs[0])
cc = CallControl(jitdrivers_sd=[jitdriver_sd])
res = cc.find_all_graphs(FakePolicy())
funcs = set([graph.func for graph in res])
assert funcs == set([f, g])
def test_find_all_graphs_without_g():
def g(x):
return x + 2
def f(x):
return g(x) + 1
rtyper = support.annotate(f, [7])
jitdriver_sd = FakeJitDriverSD(rtyper.annotator.translator.graphs[0])
cc = CallControl(jitdrivers_sd=[jitdriver_sd])
class CustomFakePolicy:
def look_inside_graph(self, graph):
assert graph.name == 'g'
return False
res = cc.find_all_graphs(CustomFakePolicy())
funcs = [graph.func for graph in res]
assert funcs == [f]
# ____________________________________________________________
def test_guess_call_kind_and_calls_from_graphs():
class portal_runner_obj:
graph = object()
class FakeJitDriverSD:
portal_runner_ptr = portal_runner_obj
g = object()
g1 = object()
cc = CallControl(jitdrivers_sd=[FakeJitDriverSD()])
cc.candidate_graphs = [g, g1]
op = SpaceOperation('direct_call', [Constant(portal_runner_obj)],
Variable())
assert cc.guess_call_kind(op) == 'recursive'
class fakeresidual:
_obj = object()
op = SpaceOperation('direct_call', [Constant(fakeresidual)],
Variable())
assert cc.guess_call_kind(op) == 'residual'
class funcptr:
class _obj:
class graph:
class func:
oopspec = "spec"
op = SpaceOperation('direct_call', [Constant(funcptr)],
Variable())
assert cc.guess_call_kind(op) == 'builtin'
class funcptr:
class _obj:
graph = g
op = SpaceOperation('direct_call', [Constant(funcptr)],
Variable())
res = cc.graphs_from(op)
assert res == [g]
assert cc.guess_call_kind(op) == 'regular'
class funcptr:
class _obj:
graph = object()
op = SpaceOperation('direct_call', [Constant(funcptr)],
Variable())
res = cc.graphs_from(op)
assert res is None
assert cc.guess_call_kind(op) == 'residual'
h = object()
op = SpaceOperation('indirect_call', [Variable(),
Constant([g, g1, h])],
Variable())
res = cc.graphs_from(op)
assert res == [g, g1]
assert cc.guess_call_kind(op) == 'regular'
op = SpaceOperation('indirect_call', [Variable(),
Constant([h])],
Variable())
res = cc.graphs_from(op)
assert res is None
assert cc.guess_call_kind(op) == 'residual'
# ____________________________________________________________
def test_get_jitcode(monkeypatch):
from rpython.jit.codewriter.test.test_flatten import FakeCPU
class FakeRTyper:
class annotator:
translator = None
def getfunctionptr(graph):
F = lltype.FuncType([], lltype.Signed)
return lltype.functionptr(F, 'bar')
monkeypatch.setattr(call, 'getfunctionptr', getfunctionptr)
cc = CallControl(FakeCPU(FakeRTyper()))
class somegraph:
name = "foo"
jitcode = cc.get_jitcode(somegraph)
assert jitcode is cc.get_jitcode(somegraph) # caching
assert jitcode.name == "foo"
pending = list(cc.enum_pending_graphs())
assert pending == [(somegraph, jitcode)]
# ____________________________________________________________
def test_jit_force_virtualizable_effectinfo():
py.test.skip("XXX add a test for CallControl.getcalldescr() -> EF_xxx")
def test_releases_gil_analyzer():
from rpython.jit.backend.llgraph.runner import LLGraphCPU
T = rffi.CArrayPtr(rffi.TIME_T)
external = rffi.llexternal("time", [T], rffi.TIME_T, releasegil=True)
@jit.dont_look_in | side
def f():
return external(lltype.nullptr(T.TO))
rtyper = support.annotate(f, [])
jitdriver_sd = FakeJitDriverSD(rtyper.annotator.t | ranslator.graphs[0])
cc = CallControl(LLGraphCPU(rtyper), jitdrivers_sd=[jitdriver_sd])
res = cc.find_all_graphs(FakePolicy())
[f_graph] = [x for x in res if x.func is f]
[block, _] = list(f_graph.iterblocks())
[op] = block.operations
call_descr = cc.getcalldescr(op)
assert call_descr.extrainfo.has_random_effects()
assert call_descr.extrainfo.is_call_release_gil() is False
def test_call_release_gil():
from rpython.jit.backend.llgraph.runner import LLGraphCPU
T = rffi.CArrayPtr(rffi.TIME_T)
external = rffi.llexternal("time", [T], rffi.TIME_T, releasegil=True,
save_err=rffi.RFFI_SAVE_ERRNO)
# no jit.dont_look_inside in this test
def f():
return external(lltype.nullptr(T.TO))
rtyper = support.annotate(f, [])
jitdriver_sd = FakeJitDriverSD(rtyper.annotator.translator.graphs[0])
cc = CallControl(LLGraphCPU(rtyper), jitdrivers_sd=[jitdriver_sd])
res = cc.find_all_graphs(FakePolicy())
[llext_graph] = [x for x in res if x.func is external]
[block, _] = list(llext_graph.iterblocks())
[op] = block.operations
tgt_tuple = op.args[0].value._obj.graph.func._call_aroundstate_target_
assert type(tgt_tuple) is tuple and len(tgt_tuple) == 2
call_target, saveerr = tgt_tuple
assert saveerr == rffi.RFFI_SAVE_ERRNO
call_target = llmemory.cast_ptr_to_adr(call_target)
call_descr = cc.getcalldescr(op)
assert call_descr.extrainfo.has_random_effects()
assert call_descr.extrainfo.is_call_release_gil() is True
assert call_descr.extrainfo.call_release_gil_target == (
call_target, rffi.RFFI_SAVE_ERRNO)
def test_rand |
PeterHo/mysite | scoreboard/migrations/0002_auto_20160204_0121.py | Python | apache-2.0 | 892 | 0.002242 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('scoreboard', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='score',
name='loser',
| field=models.CharField(default=b'H', max_length=50, verbose_name=b'\xe8\xb4\x9f\xe8\x80\x85', choices=[(b'\xe4\xbd\x95', b'\xe4\xb | d\x95'), (b'\xe4\xbd\x99', b'\xe4\xbd\x99'), (b'\xe5\xbc\xa0', b'\xe5\xbc\xa0')]),
),
migrations.AlterField(
model_name='score',
name='winner',
field=models.CharField(default=b'H', max_length=50, verbose_name=b'\xe8\x83\x9c\xe8\x80\x85', choices=[(b'\xe4\xbd\x95', b'\xe4\xbd\x95'), (b'\xe4\xbd\x99', b'\xe4\xbd\x99'), (b'\xe5\xbc\xa0', b'\xe5\xbc\xa0')]),
),
]
|
jtoppins/beaker | Server/bkr/server/tests/data_setup.py | Python | gpl-2.0 | 38,889 | 0.005863 |
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
import logging
import re
import os
import time
import datetime
import uuid
import itertools
import netaddr
import lxml.etree
import inspect
import unittest
import mock
from sqlalchemy.orm.exc import NoResultFound
import turbogears.config, turbogears.database
from turbogears.database import session, metadata
from bkr.server.bexceptions import DatabaseLookupError
from bkr.server.model import LabController, User, Group, UserGroup, \
Distro, DistroTree, Arch, OSMajor, OSVersion, \
SystemActivity, Task, MachineRecipe, System, \
SystemType, SystemStatus, Recipe, RecipeTask, RecipeTaskResult, \
Device, TaskResult, TaskStatus, Job, RecipeSet, TaskPriority, \
LabControllerDistroTree, Power, PowerType, \
Permission, RetentionTag, Product, Watchdog, Reservation, LogRecipe, \
LogRecipeTask, ExcludeOSMajor, ExcludeOSVersion, Hypervisor, DistroTag, \
DeviceClass, DistroTreeRepo, TaskPackage, KernelType, \
LogRecipeTaskResult, TaskType, SystemResource, GuestRecipe, \
GuestResource, VirtResource, SystemStatusDuration, SystemAccessPolicy, \
SystemPermission, DistroTreeImage, ImageType, KernelType, \
RecipeReservationRequest, OSMajorInstallOptions, SystemPool, \
GroupMembershipType, Installation, CommandStatus, OpenStackRegion
from bkr.server.model.types import mac_unix_padded_dialect
from bkr.server import dynamic_virt
log = logging.getLogger(__name__)
ADMIN_USER = u'admin'
ADMIN_PASSWORD = u'testing'
ADMIN_EMAIL_ADDRESS = u'admin@example.com'
def setup_model(override=True):
from bkr.server.tools.init import init_db, populate_db
engine = turbogears.database.get_engine()
db_name = engine.url.database
if db_name: # it will be None for in-memory sqlite
connection = engine.connect()
if override:
log.info('Dropping database %s', db_name)
connection.execute('DROP DATABASE IF EXISTS %s' % db_name)
log.info('Creating database %s', db_name)
connection.execute('CREATE DATABASE %s' % db_name)
connection.invalidate() # can't reuse this one
del connection
log.info('Initialising model')
init_db(metadata)
populate_db(user_name=ADMIN_USER, password=ADMIN_PASSWORD,
user_email_address=ADMIN_EMAIL_ADDRESS)
_counter = itertools.count()
def unique_name(pattern):
"""
Pass a %-format pattern, such as 'user%s', to generate a name that is
unique within this test run.
"""
# time.time() * 1000 is no good, KVM guest wall clock is too dodgy
# so we just use a global counter instead
# http://29a.ch/2009/2/20/atomic-get-and-increment-in-python
return pattern % _counter.next()
def create_product(product_name=None):
if product_name is None:
product_name = unique_name(u'product%s')
return Product.lazy_create(name=product_name)
def create_distro_tag(tag=None):
if tag is None:
tag = unique_name('tag%s')
return DistroTag.lazy_create(tag=tag)
def create_labcontroller(fqdn=None, user=None):
if fqdn is None:
fqdn = unique_name(u'lab%s.testdata.invalid')
try:
lc = LabController.by_name(fqdn)
except NoResultFound:
if user is None:
user = User(user_name=u'host/%s' % fqdn,
email_address=u'root@%s' % fqdn)
lc = LabController(fqdn=fqdn)
lc.user = user
session.add(lc)
group = Group.by_name(u'lab_controller')
group.add_member(user, service=u'testdata')
# Need to ensure it is inserted now, since we aren't using lazy_create
# here so a subsequent call to create_labcontroller could try and
# create the same LC again.
session.flush()
return lc
log.debug('labcontroller %s already exists' % fqdn)
return lc
def create_user(user_name=None, password=None, display_name=None,
email_address=None, notify_job_completion=True, notify_broken_system=True,
notify_group_membership=True, notify_reservesys=True):
if user_name is None:
user_name = unique_name(u'user%s')
if display_name is None:
display_name = user_name
if email_address is None:
email_address = u'%s@example.com' % user_name
user = User.lazy_create(user_name=user_name)
user.display_name = display_name
user.email_address = email_address
if password:
user.password = password
user.notify_job_completion = notify_job_completion
user.notify_broken_system = notify_broken_system
user.notify_group_membership = notify_group_membership
user.notify_reservesys = notify_reservesys
log.debug('Created user %r', user)
return user
def create_admin(user_name=None, **kwargs):
if user_name is None:
user_name = unique_name(u'admin%s')
user = create_user(user_name=user_name, **kwargs)
group = Group.by_name(u'admin')
group.add_member(user, service=u'testdata')
return user
def add_system_lab_controller(system,lc):
system.lab_controller = lc
def create_group(permissions=None, group_name=None, display_name=None,
owner=None, membership_type=GroupMembershipType.normal, root_password=None):
# tg_group.group_name column is VARCHAR(16)
if group_name is None:
group_name = unique_name(u'group%s')
assert len(group_name) <= 16
group = Group.lazy_create(group_name=group_name)
group.root_password = root_password
if display_name is None:
group.display_name = u'Group %s' % group_name
else:
group.display_name = display_name
group.membership_type = membership_type
if group.membership_type == GroupMembershipType.ldap:
assert owner is None, 'LDAP groups cannot have owners'
if not owner:
owner = create_user(user_name=unique_name(u'group_owner_%s'))
group.add_member(owner, is_owner=True, service=u'testdata')
if permissions:
group.permissions.extend(Permission.by_name(name) for name in permissions)
return group
def create_permission(name=None):
if not name:
name = unique_name('permission%s')
permission = Permission(name)
session.add(permission)
return permission
def add_pool_to_system(system, pool):
system.pools.append(pool)
def create_distro(name=None, osmajor=u'DansAwesomeLinux6', osminor=u'9',
arches=None, tags=None, harness_dir=True,
osmajor_installopts=None, date_created=None):
osmajor = OSMajor.la | zy_create | (osmajor=osmajor)
osversion = OSVersion.lazy_create(osmajor=osmajor, osminor=osminor)
if arches:
osversion.arches = [Arch.by_name(arch) for arch in arches]
if not name:
name = unique_name(u'%s.%s-%%s' % (osmajor, osminor))
distro = Distro.lazy_create(name=name, osversion=osversion)
if date_created is not None:
distro.date_created = date_created
for tag in (tags or []):
distro.add_tag(tag)
# add distro wide install options, if any
if osmajor_installopts:
for arch in arches:
io = OSMajorInstallOptions.lazy_create(osmajor_id=osmajor.id,
arch_id=Arch.by_name(arch).id)
io.ks_meta = osmajor_installopts.get('ks_meta', '')
io.kernel_options = osmajor_installopts.get('kernel_options', '')
io.kernel_options_post = osmajor_installopts.get('kernel_options_post', '')
log.debug('Created distro %r', distro)
if harness_dir:
harness_dir = os.path.join(turbogears.config.get('basepath.harness'), distro.osversion.osmajor.osmajor)
if not os.path.exists(harness_dir):
os.makedirs(harness_dir)
return distro
def create_distro_tree(distro=None, distro_name=None, osmajor=u'DansAwesomeLinux6',
osminor=u'9', distro_tags=None, arch=u'i386', variant=u'Server',
lab_controllers=None, urls=None, |
ioiogoo/SmartUrl | manage.py | Python | mit | 806 | 0 | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "smarturl.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Dja | ngo. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv) | |
free-free/pyblog | pyblog/locale/english/message.py | Python | mit | 313 | 0 | { |
"register": {
"email": "email address already exists",
"password": "password's length must longer than six characters",
"username": "{ username } already exists"
},
"login": {
"email": "email address not exists",
| "password": "password is not correct"
}
}
|
basvandenberg/puzzlepy | puzzlepy/cell.py | Python | mit | 1,875 | 0.001067 |
class Cell:
def __init__(self, coord):
self._coord = coord
self._neighbors = [None, None, None, None]
|
self._value = None
self._initial_value = False
self._valid_values = None
self._marks = set()
self._active = False
self._valid = True
self._partition_subsets = {}
@property
def coord(self):
return self._coord
@property
def neighbors(self):
| return self._neighbors
@property
def initial_value(self):
return self._initial_value;
@initial_value.setter
def initial_value(self, value):
self._initial_value = True
self.value = value
@property
def value(self):
return self._value;
@value.setter
def value(self, value):
self._value = value
self.valid_values = None
def clear_value(self):
self.value = None
def is_empty(self):
return self.value is None
@property
def valid_values(self):
return self._valid_values
@valid_values.setter
def valid_values(self, values):
self._valid_values = values
@property
def marks(self):
return self._marks
@marks.setter
def marks(self, marks):
self._marks = marks
def add_mark(self, mark):
self.marks.add(mark)
def clear_marks(self):
self.marks = set()
@property
def active(self):
return self._active
@property
def valid(self):
return self._valid
@property
def partition_subsets(self):
return self._partition_subsets
def add_to_partition_subset(self, partition_name, subset_index):
self.partition_subsets[partition_name] = subset_index
def __str__(self):
if(self.value is None):
return '.'
else:
return str(self.value)
|
Juniper/ceilometer | ceilometer/api/rbac.py | Python | apache-2.0 | 2,916 | 0 | #
# Copyright 2012 New Dream Network, LLC (DreamHost)
# Copyright 2014 Hewlett-Packard Company
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Access Control Lists (ACL's) control access the API server."""
import pecan
from ceilometer.openstack.common import policy
_ENFORCER = None
def enforce(policy_name, request):
"""Return the user and project the request should be limited to.
:param request: HTTP request
:param policy_name: the policy name to validate authz against.
"""
global _ENFORCER
if not _ENFORCER:
_ENFORCER = policy.Enforcer()
_ENFORCER.load_rules()
rule_method = "telemetry:" + policy_name
headers = request.headers
policy_dict = dict()
policy_dict['roles'] = headers.get('X-Roles', "").split(",")
policy_dict['target.user_id'] = (headers.get('X-User-Id'))
policy_dict['target.project_id'] = (headers.get('X-Project-Id'))
for rule_name in _ENFORCER.rules.keys():
if rule_method == rule_name:
if not _ENFORCER.enforce(
rule_name,
{},
policy_dict):
pecan.core.abort(status_code=403,
detail='RBAC Authorization Failed')
# TODO(fabiog): these methods are still used because the scoping part is really
# convoluted and difficult to separate out.
def get_limited_to(headers):
"""Return the user and proj | ect the request should be limited to.
:param headers: HTTP headers dictionary
:return: A tuple of (user, project), set to None if there's no limit on
one of these.
"""
global _ENFORCER
if not _ENFORCER:
_ENFORCER = policy.Enforcer()
_ENFORCER.load_rules()
policy_dict = dict()
policy_dict['roles'] = headers.get('X-Roles', "").split(",")
po | licy_dict['target.user_id'] = (headers.get('X-User-Id'))
policy_dict['target.project_id'] = (headers.get('X-Project-Id'))
if not _ENFORCER.enforce('segregation',
{},
policy_dict):
return headers.get('X-User-Id'), headers.get('X-Project-Id')
return None, None
def get_limited_to_project(headers):
"""Return the project the request should be limited to.
:param headers: HTTP headers dictionary
:return: A project, or None if there's no limit on it.
"""
return get_limited_to(headers)[1]
|
idaholab/raven | tests/framework/hybridModel/logicalCode/control.py | Python | apache-2.0 | 959 | 0.005214 | # Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LIC | ENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def evaluate(self):
"""
Method req | uired by RAVEN to run this as an ControlFunction in LogicalModel.
@ In, self, object, object to store members on
@ Out, model, str, the name of external model that
will be executed by hybrid model
"""
model = None
if self.x > 0.5 and self.y > 1.5:
model = 'poly'
else:
model = 'exp'
return model
|
jameshensman/pythonGPLVM | GPPOD.py | Python | gpl-3.0 | 171 | 0.005848 | # -*- coding: utf-8 -*-
# C | opyright 2009 James Hensman
# Licensed under the | Gnu General Public license, see COPYING
#
# Gaussian Process Proper Orthogonal Decomposition.
|
rays/ipodderx-core | khashmir/unet.py | Python | mit | 2,756 | 0.006894 | # The contents of this file are subject to the BitTorrent Open Source License
# Version 1.1 (the License). You may not copy or use this file, in either
# source code or executable form, except in compliance with the License. You
# may obtain a copy of the License at http://www.bittorrent.com/license/.
#
# Software distributed under the License is distributed on an AS IS basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
#
# knet.py
# create a network of khashmir nodes
# usage: knet.py <num_nodes> <start_port> <ip_address>
from utkhashmir import UTKhashmir
from BitTorrent.RawServer_magic import RawServer
from BitTorrent.defaultargs import common_options, rare_options
from random import randrange
from threading import Event
import sys, os
from krpc import KRPC
KRPC.noisy = 1
class Network:
def __init__(self, size=0, startport=5555, localip='127.0.0.1'):
self.num = size
self.startport = startport
self.localip = localip
def _done(self, val):
self.done = 1
def simpleSetUp(self):
#self.kfiles()
d = dict([(x[0],x[1]) for x in common_options + rare_options])
self.r = RawServer(Event(), d)
self.l = []
for i in range(self.num):
self.l.append(UTKhashmir('', self.startport + i, 'kh%s.db' % (self.startport + i), self.r))
for i in self.l:
i.addContact(self.localip, self.l[randrange(0,self.num)].port)
i.addContact(self.localip, self.l[randrange(0,self.num)].port)
i.addContact(self.localip, self.l[randrange(0,self.num)].port)
self.r.listen_once(1)
self.r.listen_once(1)
self.r.listen_once(1)
for i in self.l:
self.done = 0
i.findCloseNodes(self._done)
while not self.done:
self.r.listen_once(1)
for i in self.l:
self.done = 0
i.findCloseNodes(self._done)
while not self.done:
self.r.listen_once(1)
def tearDown(self):
for i in self.l:
i.rawserver.stop_listening_udp(i.socket)
i.socket.close()
#self.kfiles()
def kfiles(self):
for i in range(self.startport, self.startport+self.num):
try:
os.unlink('kh%s.db' % i)
except:
p | ass
self.r.listen_once(1)
if __name__ == "__main__":
n = Network(int(sys.argv[1]), int(sys.argv[2]))
n.simpleSetUp()
print ">>> network ready"
try:
n.r.listen_forever()
f | inally:
n.tearDown()
|
MalloyPower/parsing-python | front-end/testsuite-python-lib/Python-2.3/Lib/bsddb/db.py | Python | mit | 2,176 | 0.001838 | #----------------------------------------------------------------------
# Copyright (c) 1999-2001, Digital Creations, Fredericksburg, VA, USA
# and Andrew Kuchling. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
| # modification, are permitted provided that the following conditions are
# met:
#
# o Redistributions of source code must retain the above copyright
# notice, this list of conditi | ons, and the disclaimer that follows.
#
# o Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions, and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# o Neither the name of Digital Creations nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY DIGITAL CREATIONS AND CONTRIBUTORS *AS
# IS* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL DIGITAL
# CREATIONS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
#----------------------------------------------------------------------
# This module is just a placeholder for possible future expansion, in
# case we ever want to augment the stuff in _db in any way. For now
# it just simply imports everything from _db.
from _bsddb import *
from _bsddb import __version__
if version() < (3, 1, 0):
raise ImportError, "BerkeleyDB 3.x symbols not found. Perhaps python was statically linked with an older version?"
|
DashkevichBy/Advisors.stratiFi.frontend | tests/test_base_page.py | Python | apache-2.0 | 622 | 0 | from unittest import TestCase
from nose.tools import assert_raises, eq_
import tests
from webium.base_page import BasePage
from webium.driver import get_driver
from webium.errors import WebiumException
class PageWithoutUrl(BasePage):
pass
class TestWithStaticUrl(BasePage):
url = tests.get_url('simple_page.htm | l')
class TestNoUrlValidation(TestCase):
def test_no_url_validatio | n(self):
page = PageWithoutUrl()
assert_raises(WebiumException, page.open)
def test_static_url(self):
page = TestWithStaticUrl()
page.open()
eq_(get_driver().title, 'Hello Webium')
|
Ledoux/ShareYourSystem | Pythonlogy/build/lib/ShareYourSystem/Standards/Interfacers/Folderer/__init__.py | Python | mit | 6,649 | 0.055347 | # -*- coding: utf-8 -*-
"""
<DefineSource>
@Date : Fri Nov 14 13:20:38 2014 \n
@Author : Erwan Ledoux \n\n
</DefineSource>
The Folderer is a quick object helping for getting the FolderedDirKeyStrsList
at a specified directory or in the current one by default
"""
#<DefineAugmentation>
import ShareYourSystem as SYS
BaseModuleStr="ShareYourSystem.Standards.Itemizers.Structurer"
DecorationModuleStr="ShareYourSystem.Standards.Classors.Classer"
SYS.setSubModule(globals())
#</DefineAugmentation>
#<ImportSpecificModules>
import collections
import json
import os
import sys
#</ImportSpecificModules>
#<DefineLocals>
class ModuleDict(dict):
def __init__(self,_DictVariable=None,_ModuleVariable=None,**_KwargVariablesDict):
#Call the parent init method
if _DictVariable!=None:
dict.__init__(self,_DictVariable,**_KwargVariablesDict)
else:
dict.__init__(self,**_KwargVariablesDict)
#Debug
'''
print('PackageDict l 39')
print('_ModuleVariable is ')
print(_ModuleVariable)
print('')
'''
#import
self._import(_ModuleVariable)
def _import(self,_ModuleVariable):
#Check
if type(_ModuleVariable) in SYS.StrTypesList:
self['ModuleVariable']=None
self['ModuleStr']=_ModuleVariable
else:
self['ModuleVariable']=_ModuleVariable
self['ModuleStr']=_ModuleVariable.__name__ |
#Check for a module
if self['ModuleVariable']==None or self['ModuleStr']!=self['ModuleVariable'].__name__:
#Check
if self['ModuleStr']!="":
#Import the module if not already
if self['ModuleStr'] not in sys.modules:
importlib.import_module(self['ModuleStr'])
#set with sys
self['ModuleVariable']=sys.modules[
self['ModuleStr']
] |
#set
if self['ModuleVariable']!=None:
#set
self['InstallFolderPathStr']='/'.join(
self['ModuleVariable'].__file__.split('/')[:-1]
)+'/'
#set
self['LocalFolderPathStr']=SYS.PythonlogyLocalFolderPathStr+self['ModuleVariable'].__name__.replace(
'.','/')+'/'
#</DefineLocals>
#<DefineClass>
@DecorationClass()
class FoldererClass(BaseClass):
"""
FoldererClass ...
"""
def default_init(self,
_FolderingPathVariable=None,
_FolderingMkdirBool=False,
_FolderingImportBool=True,
_FolderedPathStr="",
_FolderedModuleDict=None,
_FolderedDirKeyStrsList=None,
_FolderedModuleStr="",
_FolderedParentModuleStr="",
_FolderedNameStr="",
**_KwargVariablesDict
):
#Call the parent __init__ method
BaseClass.__init__(self,**_KwargVariablesDict)
def do_folder(self,**_KwargVariablesDict):
#/################/#
# Adapt the current path str
#
#debug
'''
self.debug(
[
'We folder here',
('self.',self,[
'FolderingPathVariable'
])
]
)
'''
#set
if self.FolderingPathVariable==None:
#/################/#
# Get the current
#
#Get the current
FolderedCurrentPathStr=os.getcwd()
#add
self.FolderedPathStr=FolderedCurrentPathStr+'/'
elif type(self.FolderingPathVariable)==str:
#/################/#
# This is a path str query
#
#set
self.FolderedPathStr=self.FolderingPathVariable
else:
#/################/#
# Get info on the already imported module
#
#module
self.FolderedModuleDict=ModuleDict(
_ModuleVariable=self.FolderingPathVariable
)
#set
self.FolderedPathStr=self.FolderedModuleDict['LocalFolderPathStr']
#/################/#
# Now do things with that
#
#debug
'''
self.debug(
[
('self.',self,[
'FolderedPathStr'
])
]
)
'''
#Check
if self.FolderedPathStr!="":
#Add the '/' if not in the end
if self.FolderedPathStr[-1]!="/":
self.FolderedPathStr+="/"
#/################/#
# Maybe build the dir
#
#Build intermediar pathes
if os.path.isdir(self.FolderedPathStr)==False:
#Check
if self.FolderingMkdirBool:
#debug
'''
print('We are going to build the intermediar folder')
print('self.FolderingPathVariable is ',self.FolderingPathVariable)
print('')
'''
#Definition
FolderedPathStrsList=self.FolderedPathStr.split('/')
FolderedRootPathStr=FolderedPathStrsList[0]
for _PathStr in FolderedPathStrsList[1:]:
#debug
'''
print('FolderedRootPathStr is ',FolderedRootPathStr)
print('')
'''
#Mkdir if it doesn't exist
if FolderedRootPathStr!="" and os.path.isdir(FolderedRootPathStr)==False:
os.popen('mkdir '+FolderedRootPathStr)
#Add the following
FolderedRootPathStr+='/'+_PathStr
#Mkdir if it doesn't exist
if os.path.isdir(FolderedRootPathStr)==False:
os.popen('mkdir '+FolderedRootPathStr)
#/################/#
# Find the Module str maybe that is associated
#
#Recheck
if os.path.isdir(self.FolderedPathStr):
#set
self.FolderedDirKeyStrsList=os.listdir(
self.FolderedPathStr
)
#Check
if '__init__.py' in self.FolderedDirKeyStrsList:
#set maybe FolderedModuleStr and FolderedParentModuleStr if we are located in the SYS path
if 'ShareYourSystem' in self.FolderedPathStr:
#Check
if self.FolderedModuleDict==None:
#module
self.FolderedModuleDict=ModuleDict(
_ModuleVariable=self.FolderingPathVariable
)
#set
self.FolderedModuleStr='ShareYourSystem'+self.FolderedPathStr.split(
'ShareYourSystem')[-1].replace('/','.')
#Remove the ossibly last dot
if self.FolderedModuleStr[-1]=='.':
self.FolderedModuleStr=self.FolderedModuleStr[:-1]
#set
if '.' in self.FolderedModuleStr:
#set
self.FolderedNameStr=self.FolderedModuleStr.split('.')[-1]
#debug
'''
self.debug(('self.',self,['FolderingPathVariable','FolderedNameStr']))
'''
#set the parent
self.FolderedParentModuleStr=self.FolderedNameStr.join(
self.FolderedModuleStr.split(self.FolderedNameStr)[:-1]
)
if len(self.FolderedParentModuleStr
)>0 and self.FolderedParentModuleStr[-1]=='.':
self.FolderedParentModuleStr=self.FolderedParentModuleStr[:-1]
else:
self.FolderedModuleStr=self.FolderedModuleStr
else:
#set
self.FolderedModuleStr=""
self.FolderedParentModuleStr=""
#</DefineClass>
#</DefinePrint>
FoldererClass.PrintingClassSkipKeyStrsList.extend(
[
'FolderingPathVariable',
'FolderingMkdirBool',
'FolderingImportBool',
'FolderedPathStr',
'FolderedDirKeyStrsList',
'FolderedModuleDict',
'FolderedModuleStr',
'FolderedParentModuleStr',
'FolderedNameStr',
#'FolderedModuleDict'
]
)
#<DefinePrint> |
ninjaotoko/dynaform | dynaform/forms/base.py | Python | bsd-3-clause | 24,221 | 0.007226 | # *-* coding=utf-8 *-*
import os, uuid, StringIO
import re
import json
import mimetypes
from django import forms
from django.conf import settings
from django.contrib.sites.models import Site
from django.utils.translation import ugettext_lazy as _
from django.template.defaultfilters import force_escape, escape, safe
from django.contrib.contenttypes.models import ContentType
from django.template import Context, Template, RequestContext
from django.forms import extras
from django.utils.encoding import force_unicode
from django.core.mail import EmailMultiAlternatives, get_connection
from django.core.urlresolvers import reverse
from dynaform.models import DynaFormTracking, DynaFormRecipientList
from dynaform.forms import widgets as dynaform_widgets
from dynaform.forms import fields as dynaform_fields
import requests
import logging
log = logging.getLogger(__name__)
TOKEN_FORMAT = re.compile('%\((?P<field>[a-z0-9\.\_\-]+)\)s', re.U|re.I)
# siempre copio a los managers en los mails
def get_recipient_list(r):
r.extend([mail_tuple[1] for mail_tuple in settings.MANAGERS])
return r
################################################################################
# Base Model DynaForm
################################################################################
class DynaFormClassForm(forms.Form):
def _resolve_variable(self, variable):
var_tpl = Template(variable)
return var_tpl.render(self.get_context())
def __init__(self, *args, **kwargs):
object_form = kwargs.pop('object_form', 0) # Objeto DynaFormForm
success_url = kwargs.pop('success_url', '') # Objeto DynaFormForm
request = kwargs.pop('request', 0) # request
context = kwargs.pop('context', 0) # context
super(DynaFormClassForm, self).__init__(*args, **kwargs)
self.error_css_class = object_form.error_class
self.required_css_class = object_form.required_css_class
for field in object_form.get_fields():
# configura el field y widget
try:
_field = getattr(dynaform_fields, field.field_type)
except AttributeError:
_field = getattr(forms, field.field_type)
field_args = { 'label': field.field_label, 'required': field.is_required }
# set default decimal and float position
if field.field_type == "DecimalField":
field_args.update({'decimal_places': 2, 'localize': True})
if field.default_value:
# Add context to render fields with context values
default_value = Template(field.default_value)
field_args.update({'initial': default_value.render(context)})
if field.field_help:
field_args.update({'help_text': field.field_help})
if field.choices:
"""
Split key, values
"""
choices_delimiter = (field.choices_delimiter.strip('\n\r')).strip(' ')
choices = StringIO.StringIO(field.choices)
choice_list = []
for choice in choices.readlines():
value, label = choice.split(choices_delimiter)
choice_list.append(
(force_unicode((value.strip('\n\r')).strip(' ')), force_unicode((label.strip('\n\r')).strip(' ')), )
)
field_args.update({'choices': choice_list})
################################################################
# Si el queryset es desde un modelo
################################################################
if field.choices_queryset and field.field_type in \
("ModelChoiceField","ModelMultipleChoiceField"):
qs = field.choices_queryset_queryset()
# Si tiene una label custom genera la subclase y el metodo
if field.choices_queryset_label:
class CustomModelChoiceField(_field):
def label_from_instance(self, obj):
key_fields = TOKEN_FORMAT.findall(field.choices_queryset_label)
ret = {}
for key in key_fields:
parts = key.split('__')
val = obj
for part in parts:
val = getattr(val, part, '')
ret.update({key: val})
return field.choices_queryset_label % ret
_field = CustomModelChoiceField
# Si tiene un field relacionado genera los attrs para el
# handler en javascript
if field.choices_related_field:
class CustomModelChoiceField(_field):
_choices = (
('', '------'),
)
def widget_attrs(self, widget):
attrs = super(CustomModelChoiceField, self).widget_attrs(widget)
action = reverse | ('dynaform_choices_related_queryset', kwargs = {
| 'field_pk': field.choices_related_field.pk,
'related_field_pk':field.pk,
'pk': 0
})
attrs.update({
'data-related-field': '',
'data-related-field-options': 'action:%(action)s;field:%(field)s;related_field:%(related_field)s;ids:id_%(ids)s;names:%(names)s' % {
'action': action,
'field': field.choices_related_field.pk,
'related_field':field.pk,
'ids': field.choices_related_field.field_name,
'names': field.choices_related_field.field_name
},
})
return attrs
# FIXME: Si es un campo relacionado por defecto deja el query
# vacio para ser llenado al cambiar el parent
#qs = qs.none()
_field = CustomModelChoiceField
field_args.update({
'queryset': qs,
'empty_label': field.choices_queryset_empty_label
})
if field.is_hidden:
field_args.update({'widget': forms.HiddenInput})
if field.field_widget:
attrs = {'placeholder': field.field_help or field.field_label, 'required': field.is_required}
try:
widget = getattr(forms, field.field_widget)(attrs=attrs)
except AttributeError:
try :
widget = getattr(extras, field.field_widget)(attrs=attrs)
except AttributeError:
widget = getattr(dynaform_widgets, field.field_widget)(attrs=attrs)
field_args.update({'widget': widget})
self.fields[field.field_name] = _field(**field_args)
if 'instance' not in kwargs:
# resuelve el content type y object pk
if context.get('object'):
_ct = Template("{% load dynaform_tags %}{{ object|get_ct_pk }}")
_ct_val = _ct.render(context)
_pk = Template("{{ object.pk }}")
_pk_val = _pk.render(context)
else:
_ct_val = request.POST.get('_obj_ct')
_pk_val = request.POST.get('_obj_pk')
if not '_object_form_pk' in self.fields:
self.fields['_object_form_pk'] = forms.IntegerField(initial=object_form.pk, required=False, widget=forms.HiddenInput)
if not '_object_form_success_url' in self.fields:
|
stripe/stripe-python | tests/api_resources/reporting/test_report_type.py | Python | mit | 760 | 0 | from __future__ import absolute_import, division, print_function
import stripe
TEST_RESOURCE_ID = "activity.summary.1"
class TestReportType(object):
def test_is_listable(self, request_mock):
resources = stripe.reporting.ReportType.list()
request_mock.assert_requested("get", "/v1/reporting/report_types")
assert isinstance(resources.data, list)
assert isinstance(resources.data[0], stripe.reporting.ReportType)
def test_is_retrievable(self, requ | est_mock):
resource = stripe.reporting.ReportType.retrieve(TEST_RESOURCE_ID)
request_mock.assert_requested(
"get", "/v1/reporting/report_types/%s" % TEST_RESOURCE_ID
)
assert isinstance(resource, stripe.report | ing.ReportType)
|
yugangw-msft/azure-cli | src/azure-cli/azure/cli/command_modules/eventhubs/custom.py | Python | mit | 20,353 | 0.005257 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=line-too-long
# pylint: disable=too-many-lines
# pylint: disable=inconsistent-return-statements
from azure.cli.core.profiles import ResourceType
# , resource_type = ResourceType.MGMT_EVENTHUB
# Namespace Region
def cli_namespace_create(cmd, client, resource_group_name, namespace_name, location=None, tags=None, sku='Standard', capacity=None,
is_auto_inflate_enabled=None, maximum_throughput_units=None, is_kafka_enabled=None,
default_action=None, identity=None, zone_redundant=None, cluster_arm_id=None, trusted_service_access_enabled=None,
disable_local_auth=None):
EHNamespace = cmd.get_models('EHNamespace', resource_type=ResourceType.MGMT_EVENTHUB)
Sku = cmd.get_models('Sku', resource_type=ResourceType.MGMT_EVENTHUB)
Identity = cmd.get_models('Identity', resource_type=ResourceType.MGMT_EVENTHUB)
IdentityType = cmd.get_models('ManagedServiceIdentityType', resource_type=ResourceType.MGMT_EVENTHUB)
if cmd.supported_api_version(resource_type=ResourceType.MGMT_EVENTHUB, min_api='2018-01-01-preview'):
ehparam = EHNamespace()
ehparam.location = location
ehparam.tags = tags
ehparam.sku = Sku(name=sku, tier=sku, capacity=capacity)
ehparam.is_auto_inflate_enabled = is_auto_inflate_enabled
ehparam.maximum_throughput_units = maximum_throughput_units
ehparam.kafka_enabled = is_kafka_enabled
ehparam.zone_redundant = zone_redundant
ehparam.disable_local_auth = disable_local_auth
ehparam.cluster_arm_id = cluster_arm_id
if identity:
ehparam.identity = Identity(type=IdentityType.SYSTEM_ASSIGNED)
client.begin_create_or_update(
resource_group_name=resource_group_name,
namespace_name=namespace_name,
parameters=ehparam).result()
if default_action or trusted_service_access_enabled:
netwrokruleset = client.get_network_rule_set(resource_group_name, namespace_name)
netwrokruleset.default_action = default_action
netwrokruleset.trusted_service_access_enabled = trusted_service_access_enabled
client.create_or_update_network_rule_set(resource_group_name, namespace_name, netwrokruleset)
return client.get(resource_group_name, na | mespace_name)
def cli_namespace_update(cm | d, client, instance, tags=None, sku=None, capacity=None, is_auto_inflate_enabled=None,
maximum_throughput_units=None, is_kafka_enabled=None, default_action=None,
identity=None, key_source=None, key_name=None, key_vault_uri=None, key_version=None, trusted_service_access_enabled=None,
disable_local_auth=None, require_infrastructure_encryption=None):
Encryption = cmd.get_models('Encryption', resource_type=ResourceType.MGMT_EVENTHUB)
KeyVaultProperties = cmd.get_models('KeyVaultProperties', resource_type=ResourceType.MGMT_EVENTHUB)
Identity = cmd.get_models('Identity', resource_type=ResourceType.MGMT_EVENTHUB)
IdentityType = cmd.get_models('ManagedServiceIdentityType', resource_type=ResourceType.MGMT_EVENTHUB)
if cmd.supported_api_version(resource_type=ResourceType.MGMT_EVENTHUB, min_api='2021-06-01-preview'):
if tags:
instance.tags = tags
if sku:
instance.sku.name = sku
instance.sku.tier = sku
if capacity:
instance.sku.capacity = capacity
if is_auto_inflate_enabled:
instance.is_auto_inflate_enabled = is_auto_inflate_enabled
if maximum_throughput_units:
instance.maximum_throughput_units = maximum_throughput_units
if is_kafka_enabled:
instance.kafka_enabled = is_kafka_enabled
if identity is True and instance.identity is None:
instance.identity = Identity(type=IdentityType.SYSTEM_ASSIGNED)
elif instance.identity and instance.encryption is None:
instance.encryption = Encryption()
if key_source:
instance.encryption.key_source = key_source
if key_name and key_vault_uri:
keyprop = []
keyprop.append(KeyVaultProperties(key_name=key_name, key_vault_uri=key_vault_uri, key_version=key_version))
instance.encryption.key_vault_properties = keyprop
if require_infrastructure_encryption:
instance.encryption.require_infrastructure_encryption = require_infrastructure_encryption
if default_action:
resourcegroup = instance.id.split('/')[4]
netwrokruleset = client.get_network_rule_set(resourcegroup, instance.name)
netwrokruleset.default_action = default_action
netwrokruleset.trusted_service_access_enabled = trusted_service_access_enabled
client.create_or_update_network_rule_set(resourcegroup, instance.name, netwrokruleset)
if disable_local_auth:
instance.disable_local_auth = disable_local_auth
return instance
def cli_namespace_list(cmd, client, resource_group_name=None):
if cmd.supported_api_version(min_api='2021-06-01-preview'):
if resource_group_name:
return client.list_by_resource_group(resource_group_name=resource_group_name)
return client.list()
def cli_namespace_exists(client, name):
return client.check_name_availability(parameters={'name': name})
# Cluster region
def cli_cluster_create(cmd, client, resource_group_name, cluster_name, location=None, tags=None, capacity=None):
Cluster = cmd.get_models('Cluster', resource_type=ResourceType.MGMT_EVENTHUB)
ClusterSku = cmd.get_models('ClusterSku', resource_type=ResourceType.MGMT_EVENTHUB)
if cmd.supported_api_version(resource_type=ResourceType.MGMT_EVENTHUB, min_api='2016-06-01-preview'):
ehparam = Cluster()
ehparam.sku = ClusterSku(name='Dedicated')
ehparam.location = location
if not capacity:
ehparam.sku.capacity = 1
ehparam.tags = tags
cluster_result = client.begin_create_or_update(
resource_group_name=resource_group_name,
cluster_name=cluster_name,
parameters=ehparam).result()
return cluster_result
def cli_cluster_update(cmd, instance, tags=None):
if cmd.supported_api_version(resource_type=ResourceType.MGMT_EVENTHUB, min_api='2016-06-01-preview'):
if tags:
instance.tags = tags
return instance
# Namespace Authorization rule:
def cli_namespaceautho_create(client, resource_group_name, namespace_name, name, rights=None):
from azure.cli.command_modules.eventhubs._utils import accessrights_converter
return client.create_or_update_authorization_rule(
resource_group_name=resource_group_name,
namespace_name=namespace_name,
authorization_rule_name=name,
parameters={'rights': accessrights_converter(rights)}
)
def cli_autho_update(cmd, instance, rights):
if cmd.supported_api_version(resource_type=ResourceType.MGMT_EVENTHUB, min_api='2021-06-01-preview'):
from azure.cli.command_modules.eventhubs._utils import accessrights_converter
instance.rights = accessrights_converter(rights)
return instance
def cli_keys_renew(client, resource_group_name, namespace_name, name, key_type, key=None):
return client.regenerate_keys(
resource_group_name=resource_group_name,
namespace_name=namespace_name,
authorization_rule_name=name,
parameters={'key_type': key_type, 'key': key}
)
# Eventhub Region
def cli_eheventhub_create(cmd, client, resource_group_name, namespace_name, event_hub_name, message_retention_in_days=None, partition_count=None, status=None,
enabled=No |
crocodilered/insideout | webapp/libs/models/schedule.py | Python | mit | 712 | 0.001404 | from sql | alchemy.ext.declarative import declarative_base
from sqlalchemy import Column
from sqlalchemy.types import String, Integer, Boolean, Text, Date
from webapp.libs.mediahelper import MediaHelper
Base = declarative_base()
|
class Schedule(Base):
__tablename__ = "schedule"
schedule_id = Column(Integer, primary_key=True)
date = Column("dt", Date, nullable=False)
content = Column(Text, nullable=False)
enabled = Column(Boolean)
def __init__(self, schedule_id=None, date=None, content=None, enabled=None):
Base.__init__(self)
self.schedule_id = schedule_id
self.date = date
self.content = content
self.enabled = enabled
|
kinshuk4/MoocX | misc/deep_learning_notes/Proj_Centroid_Loss_LeNet/LeNet_plus/checkpoints/90_pc_trial_001/MNIST_train.py | Python | mit | 4,056 | 0.002959 | import os, sys, numpy as np, tensorflow as tf
from pathlib import Path
from termcolor import colored as c, cprint
sys.path.append(str(Path(__file__).resolve().parents[1]))
import LeNet_plus
__package__ = 'LeNet_plus'
from . import network
from tensorflow.examples.tutorials | .mnist | import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
BATCH_SIZE = 200
FILENAME = os.path.basename(__file__)
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
SUMMARIES_DIR = SCRIPT_DIR
SAVE_PATH = SCRIPT_DIR + "/network.ckpt"
### configure devices for this eval script.
USE_DEVICE = '/gpu:0'
session_config = tf.ConfigProto(log_device_placement=True)
session_config.gpu_options.allow_growth = True
# this is required if want to use GPU as device.
# see: https://github.com/tensorflow/tensorflow/issues/2292
session_config.allow_soft_placement = True
if __name__ == "__main__":
with tf.Graph().as_default() as g, tf.device(USE_DEVICE):
# inference()
input, deep_feature = network.inference()
labels, logits, loss_op = network.loss(deep_feature)
train, global_step = network.training(loss_op, 0.1)
eval = network.evaluation(logits, labels)
init = tf.initialize_all_variables()
with tf.Session(config=session_config) as sess:
# Merge all the summaries and write them out to /tmp/mnist_logs (by default)
# to see the tensor graph, fire up the tensorboard with --logdir="./train"
all_summary = tf.merge_all_summaries()
train_writer = tf.train.SummaryWriter(SUMMARIES_DIR + '/summaries/train', sess.graph)
test_writer = tf.train.SummaryWriter(SUMMARIES_DIR + '/summaries/test')
saver = tf.train.Saver()
try:
saver.restore(sess, SAVE_PATH)
cprint(c('successfully loaded checkpoint file.', 'green'))
except ValueError:
cprint(c('checkpoint file not found. Moving on to initializing automatically.', 'red'))
sess.run(init)
# sess.run(init)
for i in range(500000):
batch_xs, batch_labels = mnist.train.next_batch(BATCH_SIZE)
accuracy = 0
if i % 100 == 0:
summaries, step, logits_output, loss_value, accuracy = \
sess.run(
[all_summary, global_step, logits, loss_op, eval],
feed_dict={
input: mnist.test.images[:5000],
labels: mnist.test.labels[:5000]
})
test_writer.add_summary(summaries, global_step=step)
cprint(
c("#" + str(i), 'grey') +
c(" training accuracy", 'green') + " is " +
c(accuracy, 'red') + ", " +
c("loss", 'green') + " is " +
c(loss_value, 'red')
)
print('logits => ', logits_output[0])
if i % 500 == 0 and (accuracy > 0.6):
saver.save(sess, SAVE_PATH)
print('=> saved network in checkfile.')
summaries, step, _ = sess.run([all_summary, global_step, train], feed_dict={
input: batch_xs,
labels: batch_labels
})
train_writer.add_summary(summaries, global_step=step)
# now let's test!
TEST_BATCH_SIZE = np.shape(mnist.test.labels)[0]
summaries, step, logits_output, loss_value, accuracy = \
sess.run(
[all_summary, global_step, logits, loss_op, eval], feed_dict={
input: mnist.test.images[:5000],
labels: mnist.test.labels[:5000]
})
test_writer.add_summary(summaries, global_step=step)
print("MNIST Test accuracy is ", accuracy)
|
vakhet/rathena-utils | dev/ragna.py | Python | mit | 1,183 | 0.002536 | import re
def read_lua():
PATTERN = r'\s*\[(?P<id>\d+)\] = {\s*unidentifiedDisplayName = ' \
r'"(?P<unidentifiedDisplayName>[^"]+)",\s*unidentifie' \
r'dResourceName = "(?P<unidentifiedResourceName>[^"]+' | \
r')",\s*unidentifiedDescriptionName = {\s*"(?P<uniden' \
r'tifiedDescriptionName>[^=]+)"\s*},\s*identifiedDisp' \
r'layName = "(?P<identifiedDisplayName>[\S | ]+)",\s*id' \
r'entifiedResourceName = "(?P<identifiedResourceName>' \
r'[\S ]+)",\s*identifiedDescriptionName = {\s*"(?P<id' \
r'entifiedDescriptionName>[^=]+)"\s*},\s*slotCount = ' \
r'(?P<slotCount>\d{1}),\s*ClassNum = (?P<ClassNum>\d{' \
r'1})\s*}'
PATTERN = re.compile(PATTERN)
with open('testcase.txt', encoding='utf8', errors='ignore') as file:
test = PATTERN.findall(file.read())
for item in test:
if item[0] == '502':
print(item)
print(len(test))
return 0
"""
for group in test.groupdict():
for k, v in group.items():
print(k + ' : ' + v)
print()
"""
read_lua() |
emsrc/daeso-framework | lib/daeso/stats/corpus.py | Python | gpl-3.0 | 6,068 | 0.00824 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2007-2013 by Erwin Marsi and TST-Centrale
#
# This file is part of the DAESO Framework.
#
# The DAESO Framework is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# The DAESO Framework is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
report statistics over (part of) the files from a corpus (segment)
"""
# TODO:
# - not tested on windows
# - design flaw: it's funny that we have to write the tables,
# then parse them and add them to another table, while we have
# both tables in memory
__authors__ = "Erwin Marsi <e.marsi@gmail.com>"
from sys import stderr
from glob import glob
from os import getcwd, chdir, path
from daeso.pgc.pgcstats import pgc_stats, PgcStatsTable
from daeso.gb.gbstats import GbStatsTable
def corpus_seg_stats(targets,
seg_dir,
annot_dirs=("ma", "aa", "na"),
subseg_dirs=None,
pgc_files_pattern="*.pgc",
verbose=True):
"""
report statistics over (part of) the files from a corpus (segment)
"""
assert subseg_dirs
targets = dict.fromkeys(targets or ["all"])
seg_dir = seg_dir.rstrip(path.sep)
# assume final dir of seg_dir path represents
seg = seg_dir.split(path.sep)[-1]
pgc_tables = dict(all=PgcStatsTable())
gb_tables = dict(all=GbStatsTable())
for annot in annot_dirs:
pgc_tables[annot] = PgcStatsTable()
gb_tables[annot] = GbStatsTable()
# **** HACK *** !
if annot == "na":
# We typically want to know how many potential tokens an unanotated
# file contains, that is, the CT columns in the graph bank stats.
# However, the token t will be zero because by default graphs with
# unaligned roots are discounted. hence we need to set the
# with_unaligned_roots option.
# This also means that the token count over both ma/aa and na is
# a overestimation, because some aligned graph pairs may be wrong.
with_unaligned_roots = True
else:
with_unaligned_roots = False
for subseg in subseg_dirs:
# ****** HACK *****
# Replacing "/" by "." is a quick hack to facilitate deepr subdirs
# Should be fixed in a proper manner!
gb_tab_fn = ".".join(("stats", seg, annot, subseg, "gb")).replace("/",".")
pgc_tab_fn = ".".join(("stats", seg, annot, subseg, "pgc")).replace("/",".")
if ( "all" in targets or
annot in targets or
subseg in targets or
(annot + "." + subseg) in targets ):
pgc_dir = path.join(seg_dir, "pgc", annot, subseg)
print "Processing corpora files in ", pgc_dir
# this assumes there are no duplicates of part/whole pgc files in
# this directory
pgc_tab, gb_tab = stats(pgc_dir,
pgc_files_pattern,
with_unaligned_roots)
write_table(pgc_tab, pgc_tab_fn, verbose=verbose)
write_table(gb_tab, gb_tab_fn, verbose=verbose)
|
read_subtable(pgc_tables[annot], pgc_tab_fn)
read_subtable(gb_tables[annot], gb_tab_fn)
pgc_tables[annot].summarize()
pgc_tab_fn = ".".join(("stats", seg, annot, "pgc"))
write_table(pgc | _tables[annot], pgc_tab_fn, verbose=verbose)
read_subtable(pgc_tables["all"], pgc_tab_fn, verbose=verbose)
gb_tables[annot].summarize()
gb_tab_fn = ".".join(("stats", seg, annot, "gb"))
write_table(gb_tables[annot], gb_tab_fn, verbose=verbose)
read_subtable(gb_tables["all"], gb_tab_fn, verbose=verbose)
pgc_tab_fn = "stats.%s.pgc" % seg
pgc_tables["all"].summarize()
write_table(pgc_tables["all"], pgc_tab_fn, verbose=verbose)
gb_tab_fn = "stats.%s.gb" % seg
gb_tables["all"].summarize()
write_table(gb_tables["all"], gb_tab_fn, verbose=verbose)
def stats(pgc_dir, pgc_files_pattern, with_unaligned_roots=False):
"""
return statistics tables for all matching parallel graph corpora and
associated graph banks
"""
cwd = getcwd()
# must change to dir of pgc files,
# because paths to gb files are interpreted relative to location of pgc file
try:
chdir(pgc_dir)
except OSError:
# dir does not exist
# assume there are no files, which results in a empty table
pgc_files = []
else:
pgc_files = glob(pgc_files_pattern)
pgc_table, gb_table = pgc_stats(pgc_files,
with_unaligned_roots=with_unaligned_roots)
chdir(cwd)
return pgc_table, gb_table
def read_subtable(table, fn, verbose=True):
"""
try to read a stats table for parallel graph corpora or graph banks
"""
try:
if verbose: print "Reading table", fn
table.read(fn)
except IOError:
print >>stderr, "Warning: no file", fn
def write_table(table, fn, verbose=True):
"""
write a stats table for parallel graph corpora or graph banks
unless it is empty
"""
if not table.is_empty():
if verbose: print "Writing table", fn
table.write(out=file(fn, "w")) |
almarklein/bokeh | tests/glyphs/Patch.py | Python | bsd-3-clause | 987 | 0.001013 | import numpy as np
from bokeh.document import Document
from bokeh.models import ColumnDataSource, DataRange1d, Plot, LinearAxis, Grid
from bokeh.models.glyphs | import Patch
from bokeh.plotting import sho | w
N = 30
x1 = np.linspace(-2, 2, N)
x2 = x1[::-1]
y1 = x1**2
y2 = x2**2 + (x2+2.2)
x = np.hstack((x1, x2))
y = np.hstack((y1, y2))
source = ColumnDataSource(dict(x=x, y=y))
xdr = DataRange1d(sources=[source.columns("x")])
ydr = DataRange1d(sources=[source.columns("y")])
plot = Plot(
title=None, x_range=xdr, y_range=ydr, plot_width=300, plot_height=300,
h_symmetry=False, v_symmetry=False, min_border=0, toolbar_location=None)
glyph = Patch(x="x", y="y", fill_color="#a6cee3")
plot.add_glyph(source, glyph)
xaxis = LinearAxis()
plot.add_layout(xaxis, 'below')
yaxis = LinearAxis()
plot.add_layout(yaxis, 'left')
plot.add_layout(Grid(dimension=0, ticker=xaxis.ticker))
plot.add_layout(Grid(dimension=1, ticker=yaxis.ticker))
doc = Document()
doc.add(plot)
show(plot) |
Patola/Cura | plugins/UM3NetworkPrinting/src/Cloud/Models/CloudClusterPrinterStatus.py | Python | lgpl-3.0 | 4,467 | 0.00582 | # Copyright (c) 2018 Ultimaker B.V.
# Cura is released under the terms of the LGPLv3 or higher.
from typing import List, Union, Dict, Optional, Any
from cura.PrinterOutput.PrinterOutputController import PrinterOutputController
from cura.PrinterOutput.Models.PrinterOutputModel import PrinterOutputModel
from .CloudClusterBuildPlate import CloudClusterBuildPlate
from .CloudClusterPrintCoreConfiguration import CloudClusterPrintCoreConfiguration
from .BaseCloudModel import BaseCloudModel
## Class representing a cluster printer
# Spec: https://api-staging.ultimaker.com/connect/v1/spec
class CloudClusterPrinterStatus(BaseCloudModel):
## Creates a new cluster printer status
# \param enabled: A printer can be disabled if it should not receive new jobs. By default every printer is enabled.
# \param firmware_version: Firmware version installed on the printer. Can differ for each printer in a cluster.
# \param friendly | _name: Human readable name of the printer. Can be used for identification purposes.
# \param ip_address: The IP address of the printer in the local network.
# \param machine_variant: The type of printer. Can be 'Ultimaker 3' or 'Ultimaker 3ext'.
# \param status: The status of the p | rinter.
# \param unique_name: The unique name of the printer in the network.
# \param uuid: The unique ID of the printer, also known as GUID.
# \param configuration: The active print core configurations of this printer.
# \param reserved_by: A printer can be claimed by a specific print job.
# \param maintenance_required: Indicates if maintenance is necessary
# \param firmware_update_status: Whether the printer's firmware is up-to-date, value is one of: "up_to_date",
# "pending_update", "update_available", "update_in_progress", "update_failed", "update_impossible"
# \param latest_available_firmware: The version of the latest firmware that is available
# \param build_plate: The build plate that is on the printer
def __init__(self, enabled: bool, firmware_version: str, friendly_name: str, ip_address: str, machine_variant: str,
status: str, unique_name: str, uuid: str,
configuration: List[Union[Dict[str, Any], CloudClusterPrintCoreConfiguration]],
reserved_by: Optional[str] = None, maintenance_required: Optional[bool] = None,
firmware_update_status: Optional[str] = None, latest_available_firmware: Optional[str] = None,
build_plate: Union[Dict[str, Any], CloudClusterBuildPlate] = None, **kwargs) -> None:
self.configuration = self.parseModels(CloudClusterPrintCoreConfiguration, configuration)
self.enabled = enabled
self.firmware_version = firmware_version
self.friendly_name = friendly_name
self.ip_address = ip_address
self.machine_variant = machine_variant
self.status = status
self.unique_name = unique_name
self.uuid = uuid
self.reserved_by = reserved_by
self.maintenance_required = maintenance_required
self.firmware_update_status = firmware_update_status
self.latest_available_firmware = latest_available_firmware
self.build_plate = self.parseModel(CloudClusterBuildPlate, build_plate) if build_plate else None
super().__init__(**kwargs)
## Creates a new output model.
# \param controller - The controller of the model.
def createOutputModel(self, controller: PrinterOutputController) -> PrinterOutputModel:
model = PrinterOutputModel(controller, len(self.configuration), firmware_version = self.firmware_version)
self.updateOutputModel(model)
return model
## Updates the given output model.
# \param model - The output model to update.
def updateOutputModel(self, model: PrinterOutputModel) -> None:
model.updateKey(self.uuid)
model.updateName(self.friendly_name)
model.updateType(self.machine_variant)
model.updateState(self.status if self.enabled else "disabled")
model.updateBuildplate(self.build_plate.type if self.build_plate else "glass")
for configuration, extruder_output, extruder_config in \
zip(self.configuration, model.extruders, model.printerConfiguration.extruderConfigurations):
configuration.updateOutputModel(extruder_output)
configuration.updateConfigurationModel(extruder_config)
|
openstack/sahara-dashboard | sahara_dashboard/test/integration_tests/pages/project/data_processing/jobs/jobtemplatespage.py | Python | apache-2.0 | 5,101 | 0 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack_dashboard.test.integration_tests.regions import forms
from openstack_dashboard.test.integration_tests.regions import tables
from selenium.webdriver.common import by
from sahara_dashboard.test.integration_tests.pages import basepage
from sahara_dashboard.test.integration_tests.pages import mixins
class CreateMixin(object):
CREATE_FIELD_MAPPING = (
('job_name', 'job_type', 'main_binary', 'job_description'),
('lib_binaries',),
)
LAUNCH_ON_EXIST_CLUSTER_FIELD_MAPPING = (
('job_input', 'job_output', 'cluster'),
('adapt_spark_swift', 'datasource_substitute', 'streaming_mapper',
'streaming_reducer'),
(),
)
@tables.bind_table_action('create job')
def get_create_form(self, button):
button.click()
return forms.TabbedFormRegion(self.driver, self.conf,
field_mappings=self.CREATE_FIELD_MAPPING)
@tables.bind_row_action('launch-job-existing')
def get_launch_on_exists_form(self, button, row):
button.click()
return forms.TabbedFormRegion(
self.driver, self.conf,
field_mappings=self.LAUNCH_ON_EXIST_CLUSTER_FIELD_MAPPING)
class JobtemplatesPage(mixins.DeleteMixin, basepage.BaseDataProcessingPage):
TABLE_NAME = "job_templates"
@classmethod
def get_table_mixins(cls):
return super(JobtemplatesPage, cls).get_table_mixins() + (CreateMixin,)
def create(self, name, job_type, binary_name, libs=()):
form = self.table.get_create_form()
form.job_name.text = name
form.job_type.text = job_type
if binary_name is not None:
form.main_binary.text = binary_name
form.switch_to(1)
for lib in libs:
form.lib_binaries.text = lib
form.src_elem.find_element_by_id("add_lib_button").click()
form.submit()
def launch_on_exists(self, job_name, input_name, output_name,
cluster_name, adapt_swift=True,
datasource_substitution=True, configuration=None,
parameters=None, arguments=(),
mapper=None, reducer=None):
configuration = configuration or {}
parameters = parameters or {}
row = self._get_row_with_name(job_name)
form = self.table.get_launch_on_exists_form(row)
if input_name is not None:
form.job_input.text = input_name
if output_name is not None:
form.job_output.text = output_name
form.cluster.text = cluster_name
form.switch_to(1)
if adapt_swift:
form.adapt_spark_swift.mark()
else:
form.adapt_spark_swift.unmark()
if datasource_substitution:
form.datasource_substitute.mark()
else:
form.datasource_substitute.unmark()
if mapper is not None:
form.streaming_mapper.text = mapper
if reducer is not None:
form.streaming_reducer.text = reducer
locator = (by.By.ID, 'configs')
if form._is_element_visible(*locator):
config_block = form.src_elem.find_element(*locator)
add_btn = config_block.find_element_by_link_text('Add')
for key, value in configuration.items():
add_btn.click()
inputs = config_block.find_elements_by | _css_selector(
'input[type=text]')[-2:]
inputs[0].s | end_keys(key)
inputs[1].send_keys(value)
locator = (by.By.ID, 'params')
if form._is_element_visible(*locator):
params_block = form.src_elem.find_element(*locator)
add_btn = params_block.find_element_by_link_text('Add')
for key, value in parameters.items():
add_btn.click()
inputs = params_block.find_elements_by_css_selector(
'input[type=text]')[-2:]
inputs[0].send_keys(key)
inputs[1].send_keys(value)
locator = (by.By.ID, 'args_array')
if form._is_element_visible(*locator):
args_block = form.src_elem.find_element(*locator)
add_btn = args_block.find_element_by_link_text('Add')
for value in arguments:
add_btn.click()
input_el = args_block.find_elements_by_css_selector(
'input[type=text]')[-1]
input_el.send_keys(value)
form.submit()
|
microy/PyMeshToolkit | MeshToolkit/File/Obj.py | Python | mit | 1,285 | 0.070039 | # -*- coding:utf-8 -*-
#
# Import OBJ files
#
# External dependencies
import os
import numpy as np
import MeshToolkit as mtk
# Import a mesh from a OBJ / SMF file
def ReadObj( filename ) :
# Initialisation
vertices = []
faces = []
normals = []
colors = []
texcoords = []
material = ""
# Read each line in the file
for line in open( filename, "r" ) :
# Empty line / Comment
if line.isspace() or line.startswith( '#' ) : continue
# Split values in the line
values = line.split()
# Vertex
if values[0] == 'v' :
vertices.append( list( map( float, values[1:4] ) ) )
# Face (index starts at 1)
elif values[0] == 'f' :
faces.append( list( map( int, [ (v.split('/'))[0] for | v in values[1:4] ] ) ) )
# Normal
elif values[0] == 'vn' :
normals.append( list( map( float, values[1:4] ) ) )
# Color
elif values[0] == 'c' :
colors.append( list( map( float, values[1:4] ) ) )
# Texture
elif values[0] == 'vt' :
texcoords.appen | d( list( map( float, values[1:3] ) ) )
# Texture filename
elif values[0] == 'mtllib' :
material = values[1]
# Remap face indices
faces = np.array( faces ) - 1
# Return the final mesh
return mtk.Mesh( os.path.splitext(os.path.basename(filename))[0], vertices, faces, colors, material, texcoords, [], normals )
|
qsnake/numpy | numpy/testing/tests/test_utils.py | Python | bsd-3-clause | 15,034 | 0.00306 | import warnings
import sys
import numpy as np
from numpy.testing import *
import unittest
class _GenericTest(object):
def _test_equal(self, a, b):
self._assert_func(a, b)
def _test_not_equal(self, a, b):
try:
self._assert_func(a, b)
passed = True
except AssertionError:
pass
else:
raise AssertionError("a and b are found equal but are not")
def test_array_rank1_eq(self):
"""Test two equal array of rank 1 are found equal."""
a = np.array([1, 2])
b = np.array([1, 2])
self._test_equal(a, b)
def test_array_rank1_noteq(self):
"""Test two different array of rank 1 are found not equal."""
a = np.array([1, 2])
b = np.array([2, 2])
self._test_not_equal(a, b)
def test_array_rank2_eq(self):
"""Test two equal array of rank 2 are found equal."""
a = np.array([[1, 2], [3, 4]])
b = np.array([[1, 2], [3, 4]])
self._test_equal(a, b)
def test_array_diffshape(self):
"""Test two arrays with different shapes are found not equal."""
a = np.array([1, 2])
b = np.array([[1, 2], [1, 2]])
self._test_not_equal(a, b)
def test_objarray(self):
"""Test object arrays."""
a = np.array([1, 1], dtype=np.object)
self._test_equal(a, 1)
class TestArrayEqual(_GenericTest, unittest.TestCase):
def setUp(self):
self._assert_func = assert_array_equal
def test_generic_rank1(self):
"""Test rank 1 array for all dtypes."""
def foo(t):
a = np.empty(2, t)
a.fill(1)
b = a.copy()
c = a.copy()
c.fill(0)
self._test_equal(a, b)
self._test_not_equal(c, b)
# Test numeric types and object
for t in '?bhilqpBHILQPfdgFDG':
foo(t)
# Test strings
for t in ['S1', 'U1']:
foo(t)
def test_generic_rank3(self):
"""Test rank 3 array for all dtypes."""
def foo(t):
a = np.empty((4, 2, 3), t)
a.fill(1)
b = a.copy()
c = a.copy()
c.fill(0)
self._test_equal(a, b)
self._test_not_equal(c, b)
# Test numeric types and object
for t in '?bhilqpBHILQPfdgFDG':
foo(t)
# Test strings
for t in ['S1', 'U1']:
foo(t)
def test_nan_array(self):
"""Test arrays with nan values in them."""
a = np.array([1, 2, np.nan])
b = np.array([1, 2, np.nan])
self._test_equal(a, b)
c = np.array([1, 2, 3])
self._test_not_equal(c, b)
def test_string_arrays(self):
"""Test two arrays with different shapes are found not equal."""
a = np.array(['floupi', 'floupa'])
b = np.array(['floupi', 'floupa'])
self._test_equal(a, b)
c = np.array(['floupipi', 'floupa'])
self._test_not_equal(c, b)
def test_recarrays(self):
"""Test record arrays."""
a = np.empty(2, [('floupi', np.float), ('floupa', np.float)])
a['floupi'] = [1, 2]
a['floupa'] = [1, 2]
b = a.copy()
self._test_equal(a, b)
c = np.empty(2, [('floupipi', np.float), ('floupa', np.float)])
c['floupipi'] = a['floupi'].copy()
c['floupa'] = a['floupa'].copy()
self._test_not_equal(c, b)
class TestEqual(TestArrayEqual):
def setUp(self):
self._assert_func = assert_equal
def test_nan_items(self):
self._assert_func(np.nan, np.nan)
self._assert_func([np.nan], [np.nan])
self._test_not_equal(np.nan, [np.nan])
self._test_not_equal(np.nan, 1)
def test_inf_items(self):
self._assert_func(np.inf, np.inf)
self._assert_func([np.inf], [np.inf])
self._test_not_equal(np.inf, [np.inf])
def test_non_numeric(self):
self._assert_func('ab', 'ab')
self._test_not_equal('ab', 'abb')
def test_complex_item(self):
self._assert_func(complex(1, 2), complex(1, 2))
self._assert_func(complex(1, np.nan), complex(1, np.nan))
self._test_not_equal(complex(1, np.nan), complex(1, 2))
self._test_not_equal(complex(np.nan, 1), complex(1, np.nan))
self._test_not_equal(complex(np.nan, np.inf), complex(np.nan, 2))
def test_negative_zero(self):
self._test_not_equal(np.PZERO, np.NZERO)
def test_complex(self):
x = np.array([complex(1, 2), complex(1, np.nan)])
y = np.array([complex(1, 2), complex(1, 2)])
self._assert_func(x, x)
self._test_not_equal(x, y)
class TestArrayAlmostEqual(_GenericTest, unittest.TestCase):
def setUp(self):
self._assert_func = assert_array_almost_equal
def test_simple(self):
x = np.array([1234.2222])
y = np.array([1234.2223])
self._assert_func(x, y, decimal=3)
self._assert_func(x, y, decimal=4)
self.assertRaises(AssertionError,
lambda: self._assert_func(x, y, decimal=5))
def test_nan(self):
anan = np.array([np.nan])
aone = np.array([1])
ainf = np.array([np.inf])
self._assert_func(an | an, anan)
self.assertRaises(AssertionError,
lambda : self._assert_func(anan, aone))
self.assertRaises(AssertionError,
lambda : self._assert_func(anan, ainf))
self.assertRaises(AssertionError,
lambda : self._assert_func(ainf, anan))
class TestAlmostEqual(_GenericTest, unittest.TestCase):
def setUp(self):
self._assert_func = assert_almos | t_equal
def test_nan_item(self):
self._assert_func(np.nan, np.nan)
self.assertRaises(AssertionError,
lambda : self._assert_func(np.nan, 1))
self.assertRaises(AssertionError,
lambda : self._assert_func(np.nan, np.inf))
self.assertRaises(AssertionError,
lambda : self._assert_func(np.inf, np.nan))
def test_inf_item(self):
self._assert_func(np.inf, np.inf)
self._assert_func(-np.inf, -np.inf)
def test_simple_item(self):
self._test_not_equal(1, 2)
def test_complex_item(self):
self._assert_func(complex(1, 2), complex(1, 2))
self._assert_func(complex(1, np.nan), complex(1, np.nan))
self._assert_func(complex(np.inf, np.nan), complex(np.inf, np.nan))
self._test_not_equal(complex(1, np.nan), complex(1, 2))
self._test_not_equal(complex(np.nan, 1), complex(1, np.nan))
self._test_not_equal(complex(np.nan, np.inf), complex(np.nan, 2))
def test_complex(self):
x = np.array([complex(1, 2), complex(1, np.nan)])
z = np.array([complex(1, 2), complex(np.nan, 1)])
y = np.array([complex(1, 2), complex(1, 2)])
self._assert_func(x, x)
self._test_not_equal(x, y)
self._test_not_equal(x, z)
class TestApproxEqual(unittest.TestCase):
def setUp(self):
self._assert_func = assert_approx_equal
def test_simple_arrays(self):
x = np.array([1234.22])
y = np.array([1234.23])
self._assert_func(x, y, significant=5)
self._assert_func(x, y, significant=6)
self.assertRaises(AssertionError,
lambda: self._assert_func(x, y, significant=7))
def test_simple_items(self):
x = 1234.22
y = 1234.23
self._assert_func(x, y, significant=4)
self._assert_func(x, y, significant=5)
self._assert_func(x, y, significant=6)
self.assertRaises(AssertionError,
lambda: self._assert_func(x, y, significant=7))
def test_nan_array(self):
anan = np.array(np.nan)
aone = np.array(1)
ainf = np.array(np.inf)
self._assert_func(anan, anan)
self.assertRaises(AssertionError,
lambda : self._assert_func(anan, aone))
self.assertRaises(AssertionError,
lambda : self._assert_func(anan, ainf))
self.assertRaises(AssertionError,
lambda : self._assert_func |
teeple/pns_server | work/install/Python-2.7.4/Tools/pybench/Arithmetic.py | Python | gpl-2.0 | 13,590 | 0.000294 | from pybench import Test
class SimpleIntegerArithmetic(Test):
version = 2.0
operations = 5 * (3 + 5 + 5 + 3 + 3 + 3)
rounds = 120000
def test(self):
for i in xrange(self.rounds):
a = 2
b = 3
c = 3
c = a + b
c = b + c
c = c + a
c = a + b
c = b + c
c = c - a
c = a - b
c = b - c
c = c - a
c = b - c
c = a / b
c = b / a
c = c / b
c = a * b
c = b * a
c = c * b
c = a / b
c = b / a
c = c / b
a = 2
b = 3
c = 3
c = a + b
c = b + c
c = c + a
c = a + b
c = b + c
c = c - a
c = a - b
c = b - c
c = c - a
c = b - c
c = a / b
c = b / a
c = c / b
c = a * b
c = b * a
c = c * b
c = a / b
c = b / a
c = c / b
a = 2
b = 3
c = 3
c = a + b
c = b + c
c = c + a
c = a + b
c = b + c
c = c - a
c = a - b
c = b - c
c = c - a
c = b - c
c = a / b
c = b / a
c = c / b
c = a * b
c = b * a
c = c * b
c = a / b
c = b / a
c = c / b
a = 2
b = 3
c = 3
c = a + b
c = b + c
c = c + a
c = a + b
c = b + c
c = c - a
c = a - b
c = b - c
c = c - a
c = b - c
c = a / b
c = b / a
c = c / b
c = a * b
c = b * a
c = c * b
c = a / b
c = b / a
c = c / b
a = 2
b = 3
c = 3
c = a + b
c = b + c
c = c + a
c = a + b
c = b + c
c = c - a
c = a - b
c = b - c
c = c - a
c = b - c
c = a / b
c = b / a
c = c / b
c = a * b
c = b * a
c = c * b
c = a / b
c = b / a
c = c / b
def calibrate(self):
for i in xrange(self.rounds):
pass
class SimpleFloatArithmetic(Test):
version = 2.0
operations = 5 * (3 + 5 + 5 + 3 + 3 + 3)
rounds = 120000
def test(self):
for i in xrange(self.rounds):
a = 2.1
b = 3.3332
c = 3.14159
c = a + b
c = b + c
c = c + a
c = a + b
c = b + c
c = c - a
c = a - b
c = b - c
c = c - a
c = b - c
c = a / b
c = b / a
c = c / b
c = a * b
c = b * a
c = c * b
c = a / b
c = b / a
c = c / b
a = 2.1
b = 3.3332
c = 3.14159
c = a + b
c = b + c
c = c + a
c = a + b
c = b + c
c = c - a
c = a - b
c = b - c
c = c - a
c = b - c
c = a / b
c = b / a
c = c / b
c = a * b
c = b * a
c = c * b
c = a / b
c = b / a
c = c / b
a = 2.1
b = 3.3332
c = 3.14159
c = a + b
c = b + c
c = c + a
c = a + b
c = b + c
c = c - a
c = a - b
c = b - c
c = c - a
c = b - c
c = a / b
c = b / a
c = c / b
c = a * b
c = b * a
c = c * b
c = a / b
c = b / a
c = c / b
a = 2.1
b = 3.3332
c = 3.14159
c = a + b
c = b + c
c = c + a
c = a + b
c = b + c
c = c - a
c = a - b
c = b - c
c = c - a
c = b - c
c = a / b
c = b / a
c = c / b
c = a * b
c = b * a
c = c * b
c = a / b
c = b / a
c = c / b
a = 2.1
b = 3.3332
c = 3.14159
c = a + b
c = b + c
c = c + a
c = a + b
c = b + c
c = c - a
c = a - b
c = b - c
c = c - a
c = b - c
c = a / b
c = b / a
c = c / b
c = a * b
c = b * a
c = c * b
c = a / b
c = b / a
c = c / b
def calibrate(self):
for i in xrange(self.rounds):
pass
class SimpleIntFloatArithmetic(Test):
version = 2.0
operations = 5 * (3 + 5 + 5 + 3 + 3 + 3)
rounds = 120000
def test(self):
for i in xrange(self.rounds):
a = 2
b = 3
c = 3.14159
c = a + b
c = b + c
c = c + a
c = a + b
c = b + c
c = c - a
c = a - b
c = b - c
c = c - a
c = b - c
c = a / b
c = b / a
c = c / b
c = a * b
c = b * a
c = c * b
c = a / b
c = b / a
c = c / b
a = 2
b = 3
c = 3.14159
c = a + b
c = b + c
c = c + a
c = a + b
c = b + c
c = c - a
c = a - b
c = b - c
c = c - a
c = b - c
c = a / b
c = b / a
c = c / b
c = a * b
c = b * a
c = c * b
c = a / b
c = b / a
c = c / b
a = 2
b = 3
c = 3.14159
c = a + b
c = b + c
c = c + a
c = a + b
c = b + c
c = c - a
c = a - b
c = b - c
c = c - a
c = b - c
c = a / b
c = b / a
c = c / b
c = a * b
c = b * a
c = c * b
c = a / b
c = b / a
c = c / b
a = 2
b = 3
c = 3.14159
c = a + b
c = b + c
c = c + a
c = a + b
c = b + c
c = c - a
c = a - b
c = b - c
c = c - a
c = b - c
c = a / b
c = b / a
c = c / b
c = a * b
c = b * a
c = c * b
c = a / b
c = b / a
c = c / b
a = 2
b = 3
c = 3.14159
c = a + b
c = b + c
c = c + a
c = a + b
c = b + c
c = c - a
| c = a - b
c = b - c
c = c - a
c = b - c
c = a / b
c = b / a
c = c / b
c = a * b
c = b * a
c = c * b
|
c = a / b
c = b / a
c = c / b
def calibrate(self):
for i in xrange(self.rounds):
pass
class SimpleLongArithmetic(Test):
version = 2.0
operations = 5 * (3 + 5 + 5 + 3 + 3 + 3) |
pombredanne/geopy | geopy/point.py | Python | mit | 10,317 | 0.000194 | import re
from itertools import islice
from geopy import util, units, format
class Point(object):
"""
A geodetic point with latitude, longitude, and altitude.
Latitude and longitude are floating point values in degrees.
Altitude is a floating point value in kilometers. The reference level
is never considered and is thus application dependent, so be consistent!
The default for all values is 0.
Points can be created in a number of ways...
With longitude, latitude, and altitude:
>>> p1 = Point(41.5, -81, 0)
>>> p2 = Point(latitude=41.5, longitude=-81)
With a sequence of 0 to 3 values (longitude, latitude, altitude):
>>> p1 = Point([41.5, -81, 0])
>>> p2 = Point((41.5, -81))
Copy another `Point` instance:
>>> p2 = Point(p1)
>>> p2 == p1
True
>>> p2 is p1
False
Give an object with a 'point' attribute, such as a `Location` instance:
>>> p = Point(location)
Give a string containing at least latitude and longitude:
>>> p1 = Point('41.5,-81.0')
>>> p2 = Point('41.5 N -81.0 W')
>>> p3 = Point('-41.5 S, 81.0 E, 2.5km')
>>> p4 = Point('23 26m 22s N 23 27m 30s E 21.0mi')
>>> p5 = Point('''3 26' 22" N 23 27' 30" E''')
Point values can be accessed by name or by index:
>>> p = Point(41.5, -81.0, 0)
>>> p.latitude == p[0]
True
>>> p.longitude == p[1]
True
>>> p.altitude == p[2]
True
When unpacking (or iterating), only latitude and longitude are included:
>>> latitude, longitude = p
"""
UTIL_PATTERNS = dict(
FLOAT=r'\d+(?:\.\d+)?',
DEGREE=format.DEGREE,
PRIME=format.PRIME,
DOUBLE_PRIME=format.DOUBLE_PRIME,
SEP=r'\s*[,;\s]\s*'
)
POINT_PATTERN = re.compile(r"""
\s*
(?P<latitude>
(?P<latitude_degrees>-?%(FLOAT)s)(?:[%(DEGREE)s ][ ]*
(?:(?P<latitude_arcminutes>%(FLOAT)s)[%(PRIME)s'm][ ]*)?
(?:(?P<latitude_arcseconds>%(FLOAT)s)[%(DOUBLE_PRIME)s"s][ ]*)?
)?(?P<latitude_direction>[NS])?)
%(SEP)s
(?P<longitude>
(?P<longitude_degrees>-?%(FLOAT)s)(?:[%(DEGREE)s\s][ ]*
(?:(?P<longitude_arcminutes>%(FLOAT)s)[%(PRIME)s'm][ ]*)?
(?:(?P<longitude_arcseconds>%(FLOAT)s)[%(DOUBLE_PRIME)s"s][ ]*)?
)?(?P<longitude_direction>[EW])?)(?:
%(SEP)s
(?P<altitude>
(?P<altitude_distance>-?%(FLOAT)s)[ ]*
(?P<altitude_units>km|m|mi|ft|nm|nmi)))?
\s*$
""" % UTIL_PATTERNS, re.X)
def __new__(cls, latitude=None, longitude=None, altitude=None):
single_arg = longitude is None and altitude is None
if single_arg and not isinstance(latitude, util.NUMBER_TYPES):
arg = latitude
if arg is None:
pass
elif isinstance(arg, Point):
return cls.from_point(arg)
elif isinstance(arg, basestring):
return cls.from_string(arg)
else:
try:
seq = iter(arg)
except TypeError:
raise TypeError(
"Failed to create Point instance from %r." % (arg,)
)
else:
return cls.from_sequence(seq)
latitude = float(latitude or 0)
if abs(latitude) > 90:
raise ValueError("Latitude out of range [-90, 90]: %r" % latitude)
longitude = float(longitude or 0)
if abs(longitude) > 180:
raise ValueError("Longitude out of range [-180, 180]: %r" % longitude)
altitude = float(altitude or 0)
self = super(Point, cls).__new__(cls)
self.latitude = latitude
self.longitude = longitude
self.altitude = altitude
return self
def __getitem__(self, index):
return (self.latitude, self.longitude, self.altitude)[index]
def __setitem__(self, index, value):
point = [self.latitude, self.longitude, self.altitude]
point[index] = value
self.latitude, self.longitude, self.altitude = point
def __iter__(self):
return iter((self.latitude, self.longitude, self.altitude))
def __repr__(self):
return "Point(%r, %r, %r)" % (
self.latitude, self.longitude, self.altitude
)
def format(self, altitude=None, deg_char='', min_char='m', sec_char='s'):
latitude = "%s %s" % (
format.angle(abs(self.latitude), deg_char, min_char, sec_char),
self.latitude >= 0 and 'N' or 'S'
)
longitude = "%s %s" % (
format.angle(abs(self.longitude), deg_char, min_char, sec_char),
self.longitude >= 0 and 'E' or 'W'
)
coordinates = [latitude, longitude]
if altitude is None:
altitude = bool(self.altitude)
if altitude:
if not isinstance(altitude, basestring):
altitude = 'km'
coordinates.append(self.format_altitude(altitude))
return ", ".join(coordinates)
def format_decimal(self, altit | ude=None):
latitude = "%s" % self.latitude
longitude = "%s" % self.longitude
coordinates = [latitude, longitude]
if altitude is None:
altitude = bool(self.altitude)
if altitude:
if not isinstance(altitude, base | string):
altitude = 'km'
coordinates.append(self.format_altitude(altitude))
return ", ".join(coordinates)
def format_altitude(self, unit='km'):
return format.distance(self.altitude, unit)
def __str__(self):
return self.format()
def __unicode__(self):
return self.format(
None, format.DEGREE, format.PRIME, format.DOUBLE_PRIME
)
def __eq__(self, other):
return tuple(self) == tuple(other)
def __ne__(self, other):
return tuple(self) != tuple(other)
@classmethod
def parse_degrees(cls, degrees, arcminutes, arcseconds, direction=None):
negative = degrees < 0 or degrees.startswith('-')
degrees = float(degrees or 0)
arcminutes = float(arcminutes or 0)
arcseconds = float(arcseconds or 0)
if arcminutes or arcseconds:
more = units.degrees(arcminutes=arcminutes, arcseconds=arcseconds)
if negative:
degrees -= more
else:
degrees += more
if direction in [None, 'N', 'E']:
return degrees
elif direction in ['S', 'W']:
return -degrees
else:
raise ValueError("Invalid direction! Should be one of [NSEW].")
@classmethod
def parse_altitude(cls, distance, unit):
if distance is not None:
distance = float(distance)
CONVERTERS = {
'km': lambda d: d,
'm': lambda d: units.kilometers(meters=d),
'mi': lambda d: units.kilometers(miles=d),
'ft': lambda d: units.kilometers(feet=d),
'nm': lambda d: units.kilometers(nautical=d),
'nmi': lambda d: units.kilometers(nautical=d)
}
return CONVERTERS[unit](distance)
else:
return distance
@classmethod
def from_string(cls, string):
"""
Create and return a Point instance from a string containing latitude
and longitude, and optionally, altitude.
Latitude and longitude must be in degrees and may be in decimal form
or indicate arcminutes and arcseconds (labeled with Unicode prime and
double prime, ASCII quote and double quote or 'm' and 's'). The degree
symbol is optional and may be included after the decimal places (in
decimal form) and before the arcminutes and arcseconds otherwise.
Coordinates given from south and west (indicated by S and W suffixes)
will be converted to north and east by switching their signs. If no
(or partial) cardinal directions are given, north and east are the
assumed directions. Latitude and longitude must be separated by a |
e-lin/LeetCode | 401-binary-watch/401-binary-watch.py | Python | apache-2.0 | 563 | 0.001776 | # solution reference:
# http://bookshadow.com/weblog/ | 2016/09/18/leetcode-binary-watch/
class Solution(object):
def readBinaryWatch(self, num):
"""
:type num: int
:rtype: List[str]
"""
ans = []
for h in xrange(12):
for m in xrange(60):
if bin(h).count('1') + bin(m).count('1') == num:
ans.append("%d:%02d" % (h, m))
return ans
def main():
n = 5
solution = Solution()
print solution.readBina | ryWatch(n)
if __name__ == '__main__':
main() |
ketancmaheshwari/hello-goog | src/python/trie.py | Python | apache-2.0 | 179 | 0 | #!/bin/env python
# m | ainly for sys.argv[], sys.argv[0] is the name of the program
import sys
# mainly for arrays
import numpy as np
if __name__ == '_ | _main__':
print 'hello'
|
openstack/openstackdocstheme | openstackdocstheme/ext.py | Python | apache-2.0 | 18,703 | 0 | # Copyright 2015 Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import configparser
import os
import subprocess
import textwrap
import dulwich.repo
from pbr import packaging
import sphinx
from sphinx.ext import extlinks
from sphinx.util import logging
from . import version
from openstackdocstheme import paths
_series = None
_project = None
_giturl = 'https://opendev.org/{}/src/{}'
_html_context_data = None
logger = logging.getLogger(__name__)
def _has_stable_branches():
try:
repo = dulwich.repo.Repo.discover()
except dulwich.repo.NotGitRepository:
return False
refs = repo.get_refs()
for ref in refs.keys():
ref = ref.decode('utf-8')
if ref.startswith('refs/remotes/origin/stable'):
return True
return False
def _get_other_versions(app):
if not app.config.html_theme_options.get('show_other_versions', False):
return []
all_series = []
try:
repo = dulwich.repo.Repo.discover()
except dulwich.repo.NotGitRepository:
return []
refs = repo.get_refs()
for ref in refs.keys():
ref = ref.decode('utf-8')
if ref.startswith('refs/remotes/origin/stable'):
series = ref.rpartition('/')[-1]
all_series.append(series)
elif ref.startswith('refs/tags/') and ref.endswith('-eol'):
series = ref.rpartition('/')[-1][:-4]
all_series.append(series)
all_series.sort()
# NOTE(dhellmann): Given when this feature was implemented, we
# assume that the earliest version we can link to is for
# mitaka. Projects that have older docs online can set the option
# to indicate another start point. Projects that come later should
# automatically include everything they actually have available
# because the start point is not present in the list.
earliest_desired = app.config.html_theme_options.get(
'earliest_published_series', 'mitaka')
if earliest_desired and earliest_desired in all_series:
interesting_series = all_series[all_series.index(earliest_desired):]
else:
interesting_series = all_series
# Reverse the list because we want the most recent to appear at
# the top of the dropdown. Add the "latest" release to the
# front of the list.
interesting_series.append("latest")
interesting_series.reverse()
return interesting_series
def _get_doc_path(app):
# Handle 'doc/{docType}/source' paths
doc_parts = os.path.abspath(app.srcdir).split(os.sep)[-3:]
if doc_parts[0] == 'doc' and doc_parts[2] == 'source':
return '/'.join(doc_parts)
# Handle '{docType}/source' paths
doc_parts = os.path.abspath(app.srcdir).split(os.sep)[-2:]
if doc_parts[1] == 'source':
return '/'.join(doc_parts)
logger.info(
"[openstackdocstheme] cannot identify project's root directory."
)
return
def _html_page_context(app, pagename, templatename, context, doctree):
global _html_context_data
if _html_context_data is None:
logger.debug('[openstackdocstheme] building html context')
if app.config.repository_name is not None:
logger.info(
"[openstackdocstheme] "
"the 'repository_name' config option has been deprecated and "
"replaced by the 'openstackdocs_repo_name' option; support "
"for the former will be dropped in a future release")
app.config.openstackdocs_repo_name = app.config.repository_name
if app.config.use_storyboard is not None:
logger.info(
"[openstackdocstheme] "
"the 'use_storyboard' config option has been deprecated and "
"replaced by the 'openstackdocs_use_storyboard' option; "
"support for the former will be dropped in a future release")
app.config.opens | tackdocs_use_storyboard = app.config.use_storyboard
if app.config.bug_project is not None:
logger.info(
"[openstackdocsth | eme] "
"the 'bug_project' config option has been deprecated and "
"replaced by the 'openstackdocs_bug_project' option; support "
"for the former will be dropped in a future release")
app.config.openstackdocs_bug_project = app.config.bug_project
if app.config.bug_tag is not None:
logger.info(
"[openstackdocstheme] "
"the 'bug_tag' config option has been deprecated and "
"replaced by the 'openstackdocs_bug_tag' option; support "
"for the former will be dropped in a future release")
app.config.openstackdocs_bug_project = app.config.bug_project
_html_context_data = {}
try:
_html_context_data['gitsha'] = subprocess.check_output(
['git', 'rev-parse', 'HEAD'],
).decode('utf-8').strip()
except Exception:
logger.warning(
'[openstackdocstheme] cannot get gitsha from git repository'
)
_html_context_data['gitsha'] = 'unknown'
doc_path = _get_doc_path(app)
repo_name = app.config.openstackdocs_repo_name
_html_context_data['repository_name'] = repo_name
logger.debug('[openstackdocstheme] repository_name %r', repo_name)
if repo_name and doc_path:
_html_context_data['giturl'] = _giturl.format(repo_name, doc_path)
logger.debug(
'[openstackdocstheme] giturl %r', _html_context_data['giturl'],
)
use_storyboard = app.config.openstackdocs_use_storyboard
_html_context_data['use_storyboard'] = use_storyboard
bug_project = app.config.openstackdocs_bug_project
if bug_project:
logger.debug(
'[openstackdocstheme] bug_project (from user) %r', bug_project,
)
elif use_storyboard:
bug_project = repo_name
logger.debug(
'[openstackdocstheme] bug_project (use_storyboard set) %r',
bug_project,
)
if bug_project:
_html_context_data['bug_project'] = bug_project
# Previously storyboard showed numbers that were used, keep
# for old conf.py files:
if bug_project and bug_project.isdigit():
logger.debug(
'[openstackdocstheme] bug_project looks like a number, '
'setting use_storyboard'
)
_html_context_data['use_storyboard'] = True
bug_tag = app.config.openstackdocs_bug_tag
if bug_tag:
_html_context_data['bug_tag'] = bug_tag
logger.debug('[openstackdocstheme] bug_tag %r', bug_tag)
_html_context_data['pdf_link'] = app.config.openstackdocs_pdf_link
logger.debug(
'[openstackdocstheme] pdf_link %r', _html_context_data['pdf_link'],
)
if app.config.openstackdocs_pdf_filename:
_html_context_data['pdf_filename'] = (
app.config.openstackdocs_pdf_filename)
else:
short_repo_name = repo_name.split('/')[-1]
_html_context_data['pdf_filename'] = f'doc-{short_repo_name}.pdf'
if _html_context_data['pdf_link']:
logger.debug(
'[openstackdocstheme] pdf_filename %r',
_html_context_data['pdf_filename'],
)
_html_context_data['series'] = _get_series_name()
logger.debug(
'[openstackdocstheme] series %r', _html_context_data |
uniphier/buildroot-unph | utils/checkpackagelib/lib_mk.py | Python | gpl-2.0 | 11,938 | 0.005193 | # See utils/checkpackagelib/readme.txt before editing this file.
# There are already dependency checks during the build, so below check
# functions don't need to check for things already checked by exploring the
# menu options using "make menuconfig" and by running "make" with appropriate
# packages enabled.
import re
from checkpackagelib.base import _CheckFunction
from checkpackagelib.lib import ConsecutiveEmptyLines # noqa: F401
from checkpackagelib.lib import EmptyLastLine # noqa: F401
from checkpackagelib.lib import NewlineAtEof # noqa: F401
from checkpackagelib.lib import TrailingSpace # noqa: F401
# used in more than one check
start_conditional = ["ifdef", "ifeq", "ifndef", "ifneq"]
end_conditional = ["endif"]
class Indent(_CheckFunction):
COMMENT = re.compile("^\s*#")
CONDITIONAL = re.compile("^\s*({})\s".format("|".join(start_conditional + end_conditional)))
ENDS_WITH_BACKSLASH = re.compile(r"^[^#].*\\$")
END_DEFINE = re.compile("^\s*endef\s")
MAKEFILE_TARGET = re.compile("^[^# \t]+:\s")
START_DEFINE = re.compile("^\s*define\s")
def before(self):
self.define = False
self.backslash = False
self.makefile_target = False
def check_line(self, lineno, text):
if self.START_DEFINE.search(text):
self.define = True
return
if self.END_DEFINE.search(text):
self.define = False
return
expect_tabs = False
if self.define or self.backslash or self.makefile_target:
expect_tabs = True
if self.CONDITIONAL.search(text):
expect_tabs = False
# calculate for next line
if self.ENDS_WITH_BACKSLASH.search(text):
self.backslash = True
else:
self.backslash = False
if self.MAKEFILE_TARGET.search(text):
self.makefile_target = True
return
if text.strip() == "":
self.makefile_target = False
return
# comment can be indented or not inside define ... endef, so ignore it
if self.define and self.COMMENT.search(text):
return
if expect_tabs:
if not text.startswith("\t"):
return ["{}:{}: expected indent with tabs"
.format(self.filename, lineno),
text]
else:
if text.startswith("\t"):
return ["{}:{}: unexpected indent with tabs"
.format(self.filename, lineno),
text]
class OverriddenVariable(_CheckFunction):
CONCATENATING = re.compile("^([A-Z0-9_]+)\s*(\+|:|)=\s*\$\(\\1\)")
END_CONDITIONAL = re.compile("^\s*({})".format("|".join(end_conditional)))
OVERRIDING_ASSIGNMENTS = [':=', "="]
START_CONDITIONAL = re.compile("^\s*({})".format("|".join(start_conditional)))
VARIABLE = re.compile("^([A-Z0-9_]+)\s*((\+|:|)=)")
USUALLY_OVERRIDDEN = re.compile("^[A-Z0-9_]+({})".format("|".join([
"_ARCH\s*=\s*",
"_CPU\s*=\s*",
"_SITE\s*=\s*",
"_SOURCE\s*=\s*",
"_VERSION\s*=\s*"])))
def before(self):
self.conditional = 0
self.unconditionally_set = []
self.conditionally_set = []
def check_line(self, lineno, text):
if self.START_CONDITIONAL.search(text):
self.condi | tional += 1
return
if self.END_CONDITIONAL.search(text):
self.conditional -= 1
| return
m = self.VARIABLE.search(text)
if m is None:
return
variable, assignment = m.group(1, 2)
if self.conditional == 0:
if variable in self.conditionally_set:
self.unconditionally_set.append(variable)
if assignment in self.OVERRIDING_ASSIGNMENTS:
return ["{}:{}: unconditional override of variable {} previously conditionally set"
.format(self.filename, lineno, variable),
text]
if variable not in self.unconditionally_set:
self.unconditionally_set.append(variable)
return
if assignment in self.OVERRIDING_ASSIGNMENTS:
return ["{}:{}: unconditional override of variable {}"
.format(self.filename, lineno, variable),
text]
else:
if variable not in self.unconditionally_set:
self.conditionally_set.append(variable)
return
if self.CONCATENATING.search(text):
return
if self.USUALLY_OVERRIDDEN.search(text):
return
if assignment in self.OVERRIDING_ASSIGNMENTS:
return ["{}:{}: conditional override of variable {}"
.format(self.filename, lineno, variable),
text]
class PackageHeader(_CheckFunction):
def before(self):
self.skip = False
def check_line(self, lineno, text):
if self.skip or lineno > 6:
return
if lineno in [1, 5]:
if lineno == 1 and text.startswith("include "):
self.skip = True
return
if text.rstrip() != "#" * 80:
return ["{}:{}: should be 80 hashes ({}#writing-rules-mk)"
.format(self.filename, lineno, self.url_to_manual),
text,
"#" * 80]
elif lineno in [2, 4]:
if text.rstrip() != "#":
return ["{}:{}: should be 1 hash ({}#writing-rules-mk)"
.format(self.filename, lineno, self.url_to_manual),
text]
elif lineno == 6:
if text.rstrip() != "":
return ["{}:{}: should be a blank line ({}#writing-rules-mk)"
.format(self.filename, lineno, self.url_to_manual),
text]
class RemoveDefaultPackageSourceVariable(_CheckFunction):
packages_that_may_contain_default_source = ["binutils", "gcc", "gdb"]
PACKAGE_NAME = re.compile("/([^/]+)\.mk")
def before(self):
package = self.PACKAGE_NAME.search(self.filename).group(1)
package_upper = package.replace("-", "_").upper()
self.package = package
self.FIND_SOURCE = re.compile(
"^{}_SOURCE\s*=\s*{}-\$\({}_VERSION\)\.tar\.gz"
.format(package_upper, package, package_upper))
def check_line(self, lineno, text):
if self.FIND_SOURCE.search(text):
if self.package in self.packages_that_may_contain_default_source:
return
return ["{}:{}: remove default value of _SOURCE variable "
"({}#generic-package-reference)"
.format(self.filename, lineno, self.url_to_manual),
text]
class SpaceBeforeBackslash(_CheckFunction):
TAB_OR_MULTIPLE_SPACES_BEFORE_BACKSLASH = re.compile(r"^.*( |\t ?)\\$")
def check_line(self, lineno, text):
if self.TAB_OR_MULTIPLE_SPACES_BEFORE_BACKSLASH.match(text.rstrip()):
return ["{}:{}: use only one space before backslash"
.format(self.filename, lineno),
text]
class TrailingBackslash(_CheckFunction):
ENDS_WITH_BACKSLASH = re.compile(r"^[^#].*\\$")
def before(self):
self.backslash = False
def check_line(self, lineno, text):
last_line_ends_in_backslash = self.backslash
# calculate for next line
if self.ENDS_WITH_BACKSLASH.search(text):
self.backslash = True
self.lastline = text
return
self.backslash = False
if last_line_ends_in_backslash and text.strip() == "":
return ["{}:{}: remove trailing backslash"
.format(self.filename, lineno - 1),
self.lastline]
class TypoInPackageVariable(_CheckFunction):
ALLOWED = re.compile("|".join([
"ACLOCAL_DIR",
"ACLOCAL_HOST_DIR",
"BR_CCACHE_INITIAL_SETUP",
"BR_LIBC", |
WojciechMula/toys | autovectorization-tests/scripts/compile_all.py | Python | bsd-2-clause | 1,263 | 0.003959 | #!/usr/bin/env python3
import sys
from pathlib import Path
from procedures import PROCEDURES
std_options = {
'avx2': '-O3 -mavx2 -S %(cpp_file)s -o %(asm_file)s',
'avx512': '-O3 -mavx512f -mavx512dq -mavx512bw -mavx512vbmi -mavx512vbmi2 -mavx512vl '
'-S %(cpp_file)s -o %(asm_file)s',
}
compiler_options = {
'gcc': std_options,
'clang': std_options,
}
def main():
if len(sys.argv) < 2:
print("Usage: script compile-command target")
print()
print("compiler-command tell | s how to invoke script (like 'gcc' or 'clang')")
print("target is avx2 or avx512")
return 1
arg_compiler = sys.argv[1]
arg_target = sys.argv[2]
options = None
for compiler in compiler_options:
if compiler in arg_compiler:
options = compiler_options[compiler][arg_target]
if options is None:
| print(f"{arg_compiler} not recognized")
return 1
for cpp_file in PROCEDURES:
asm_file = Path(cpp_file).stem + '_' + arg_target + '.s'
opts = options % {'cpp_file': cpp_file,
'asm_file': asm_file}
print(f"{arg_compiler} {opts}")
return 0
if __name__ == '__main__':
sys.exit(main())
|
timorieber/wagtail | wagtail/admin/tests/api/test_images.py | Python | bsd-3-clause | 8,040 | 0.002861 | import json
from django.urls import reverse
from wagtail.api.v2.tests.test_images import TestImageDetail, TestImageListing
from wagtail.images import get_image_model
from wagtail.images.tests.utils import get_test_image_file
from .utils import AdminAPITestCase
class TestAdminImageListing(AdminAPITestCase, TestImageListing):
fixtures = ['demosite.json']
def get_response(self, **params):
return self.client.get(reverse('wagtailadmin_api:images:listing'), params)
def get_image_id_list(self, content):
return [image['id'] for image in content['items']]
# BASIC TESTS
def test_basic(self):
response = self.get_response()
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-type'], 'application/json')
# Will crash if the JSON is invalid
content = json.loads(response.content.decode('UTF-8'))
# Check that the meta section is there
self.assertIn('meta', content)
self.assertIsInstance(content['meta'], dict)
# Check that the total count is there and correct
self.assertIn('total_count', content['meta'])
self.assertIsInstance(content['meta']['total_count'], int)
self.assertEqual(content['meta']['total_count'], get_image_model().objects.count())
# Check that the items section is there
self.assertIn('items', content)
self.assertIsInstance(content['items'], list)
# Check that each image has a meta section with type, detail_url and tags attributes
for image in content['items']:
self.assertIn('meta', image)
self.assertIsInstance(image['meta'], dict)
self.assertEqual(set(image['meta'].keys()), {'type', 'detail_url', 'tags', 'download_url'})
# Type should always be wagtailimages.Image
self.assertEqual(image['meta']['type'], 'wagtailimages.Image')
# Check detail url
self.assertEqual(image['meta']['detail_url'], 'http://localhost/admin/api/main/images/%d/' % image['id'])
# FIELDS
def test_fields_default(self): |
response = self.get_response()
content = json.loads(response.content.decode('UTF-8'))
for image in content['items']:
self.assertEqual(set(image.keys()), {'id | ', 'meta', 'title', 'width', 'height', 'thumbnail'})
self.assertEqual(set(image['meta'].keys()), {'type', 'detail_url', 'download_url', 'tags'})
def test_fields(self):
response = self.get_response(fields='width,height')
content = json.loads(response.content.decode('UTF-8'))
for image in content['items']:
self.assertEqual(set(image.keys()), {'id', 'meta', 'title', 'width', 'height', 'thumbnail'})
self.assertEqual(set(image['meta'].keys()), {'type', 'detail_url', 'download_url', 'tags'})
def test_remove_fields(self):
response = self.get_response(fields='-title')
content = json.loads(response.content.decode('UTF-8'))
for image in content['items']:
self.assertEqual(set(image.keys()), {'id', 'meta', 'width', 'height', 'thumbnail'})
def test_remove_meta_fields(self):
response = self.get_response(fields='-tags')
content = json.loads(response.content.decode('UTF-8'))
for image in content['items']:
self.assertEqual(set(image.keys()), {'id', 'meta', 'title', 'width', 'height', 'thumbnail'})
self.assertEqual(set(image['meta'].keys()), {'type', 'detail_url', 'download_url'})
def test_remove_all_meta_fields(self):
response = self.get_response(fields='-type,-detail_url,-tags')
content = json.loads(response.content.decode('UTF-8'))
for image in content['items']:
self.assertEqual(set(image.keys()), {'id', 'title', 'width', 'height', 'thumbnail', 'meta'})
def test_remove_id_field(self):
response = self.get_response(fields='-id')
content = json.loads(response.content.decode('UTF-8'))
for image in content['items']:
self.assertEqual(set(image.keys()), {'meta', 'title', 'width', 'height', 'thumbnail'})
def test_all_fields(self):
response = self.get_response(fields='*')
content = json.loads(response.content.decode('UTF-8'))
for image in content['items']:
self.assertEqual(set(image.keys()), {'id', 'meta', 'title', 'width', 'height', 'thumbnail'})
self.assertEqual(set(image['meta'].keys()), {'type', 'detail_url', 'tags', 'download_url'})
def test_all_fields_then_remove_something(self):
response = self.get_response(fields='*,-title,-tags')
content = json.loads(response.content.decode('UTF-8'))
for image in content['items']:
self.assertEqual(set(image.keys()), {'id', 'meta', 'width', 'height', 'thumbnail'})
self.assertEqual(set(image['meta'].keys()), {'type', 'detail_url', 'download_url'})
def test_fields_tags(self):
response = self.get_response(fields='tags')
content = json.loads(response.content.decode('UTF-8'))
for image in content['items']:
self.assertEqual(set(image.keys()), {'id', 'meta', 'title', 'width', 'height', 'thumbnail'})
self.assertEqual(set(image['meta'].keys()), {'type', 'detail_url', 'tags', 'download_url'})
self.assertIsInstance(image['meta']['tags'], list)
class TestAdminImageDetail(AdminAPITestCase, TestImageDetail):
fixtures = ['demosite.json']
def get_response(self, image_id, **params):
return self.client.get(reverse('wagtailadmin_api:images:detail', args=(image_id, )), params)
def test_basic(self):
response = self.get_response(5)
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-type'], 'application/json')
# Will crash if the JSON is invalid
content = json.loads(response.content.decode('UTF-8'))
# Check the id field
self.assertIn('id', content)
self.assertEqual(content['id'], 5)
# Check that the meta section is there
self.assertIn('meta', content)
self.assertIsInstance(content['meta'], dict)
# Check the meta type
self.assertIn('type', content['meta'])
self.assertEqual(content['meta']['type'], 'wagtailimages.Image')
# Check the meta detail_url
self.assertIn('detail_url', content['meta'])
self.assertEqual(content['meta']['detail_url'], 'http://localhost/admin/api/main/images/5/')
# Check the thumbnail
# Note: This is None because the source image doesn't exist
# See test_thumbnail below for working example
self.assertIn('thumbnail', content)
self.assertEqual(content['thumbnail'], {'error': 'SourceImageIOError'})
# Check the title field
self.assertIn('title', content)
self.assertEqual(content['title'], "James Joyce")
# Check the width and height fields
self.assertIn('width', content)
self.assertIn('height', content)
self.assertEqual(content['width'], 500)
self.assertEqual(content['height'], 392)
# Check the tags field
self.assertIn('tags', content['meta'])
self.assertEqual(content['meta']['tags'], [])
def test_thumbnail(self):
# Add a new image with source file
image = get_image_model().objects.create(
title="Test image",
file=get_test_image_file(),
)
response = self.get_response(image.id)
content = json.loads(response.content.decode('UTF-8'))
self.assertIn('thumbnail', content)
self.assertEqual(content['thumbnail']['width'], 165)
self.assertEqual(content['thumbnail']['height'], 123)
self.assertTrue(content['thumbnail']['url'].startswith('/media/images/test'))
# Check that source_image_error didn't appear
self.assertNotIn('source_image_error', content['meta'])
|
chevah/brink | brink/pavement_commons.py | Python | bsd-3-clause | 36,882 | 0.000054 | # Copyright (c) 2011 Ad | i Roiban.
# See LICENSE for details.
"""
Shared pavement methods used in Chevah project.
This file is copied into the root of each repo as pavement_lib.py
Brace yoursef for watching how wheels are reinvented.
Do not modify this file in | side the branch.
A `product` is a repository delived to customers or a library.
A `project` is a collection of products.
This scripts assume that you have dedicated folder for the project, and
inside the project folder, there is one folder for each products.
"""
from __future__ import (
absolute_import,
print_function,
with_statement,
unicode_literals,
)
from six.moves.configparser import RawConfigParser
import getpass
import os
import re
import sys
import subprocess
import time
from base64 import b64encode
from datetime import datetime
from io import BytesIO
from zipfile import ZipFile
from paver.easy import call_task, cmdopts, task, pushd, needs
from paver.tasks import BuildFailure, environment, help, consume_args
from brink.configuration import SETUP, DIST_EXTENSION, DIST_TYPE
from brink.utils import BrinkPaver
from brink.qm import (
github,
merge_init,
merge_commit,
pqm,
rqm,
publish,
)
pave = BrinkPaver(setup=SETUP)
class ChecksumFile(object):
"""
A file storing sha256 checksums for files.
"""
def __init__(self, segments):
"""
Initialize by creating an empty file.
"""
self._segments = segments
pave.fs.createEmptyFile(target=self._segments)
def addFile(self, file_path):
"""
Add file to file listed in checksum file.
"""
content = pave.createSHA256Sum([file_path]) + ' ' + file_path + '\n'
pave.fs.appendContentToFile(
destination=self._segments, content=content)
@task
@cmdopts([
('all', 'a', 'Run linter for all changed files.'),
('branch=', 'b', 'Name of the branch for which test is executed.'),
])
def lint(options):
"""
Run static code checks for files that were changed.
"""
from scame.__main__ import check_sources
all = pave.getOption(options, 'lint', 'all', default_value=False)
branch_name = pave.getOption(
options, 'lint', 'branch', default_value=None)
options = SETUP['scame']
# If branch name was not specified from command line, try to get it from
# environment or from the current branch.
if not branch_name:
branch_name = os.environ.get('BRANCH', None)
if not branch_name:
branch_name = pave.git.branch_name
if not all:
options.diff_branch = 'master'
# Strings are broken to not match the own rules.
ticket = branch_name.split('-', 1)[0]
options.regex_line = [
('FIX' + 'ME:%s:' % (ticket), 'FIX' + 'ME for current branch.'),
('(?i)FIX' + 'ME$', 'FIXME:123: is the required format.'),
('(?i)FIX' + 'ME:$', 'FIXME:123: is the required format.'),
('FIX' + 'ME[^:]', 'FIXME:123: is the required format.'),
('(?i)FIX' + 'ME:[^0-9]', 'FIXME:123: is the required format.'),
(
'(?i)FIX' + 'ME:[0-9]+[^:]$',
'FIXME:123: is the required format.'
),
('(?i)TO' + 'DO ', 'No TO' + 'DO markers are allowed.'),
('(?i)TO' + 'DO$', 'No TO' + 'DO markers are allowed.'),
('\[#' + '%s\] ' % (ticket), 'Branch should fix this issue.'),
]
pocket_lint_result = check_sources(options)
if pocket_lint_result > 0:
raise BuildFailure('Lint failed.')
towncrier_options = options.towncrier
if not towncrier_options['enabled']:
return 0
release_notes = towncrier_options['fragments_directory']
is_release_series_branch = (
branch_name in ['master', 'trunk'] or
branch_name.startswith('series-')
)
if release_notes and not is_release_series_branch:
# This repo has managed release notes.
members = pave.fs.listFolder(release_notes)
if '-release-' in branch_name:
# Check that release notes have all fragments published.
ignored_files = towncrier_options['excluded_fragments']
fragments = [m for m in members if m.lower() not in ignored_files]
if fragments:
raise BuildFailure(
u'Branch name hint it is a release branch. '
u'It has unpublished release notes. %s' % (fragments,))
else:
# Check that it has release notes fragment.
ticket_id = branch_name.split('-', 1)[0]
ticket_mark = '%s.' % (ticket_id,)
has_fragment = False
for member in members:
if member.startswith(ticket_mark):
has_fragment = True
if not has_fragment:
raise BuildFailure(
u'No release notes fragment for %s' % (ticket_id,))
return 0
@task
def default():
'''
Default task. Shows this help.
'''
environment.args = []
help()
@task
def deps():
"""Copy external dependencies.
Each project should define custom deps_runtime and deps_builtime
targets.
"""
pave.installBuildDependencies()
@task
@needs('build')
@consume_args
def test_normal(args):
"""
Run the test suite as regular user.
"""
exit_code = run_test(
python_command=pave.python_command_normal,
switch_user='-',
arguments=args,
)
if exit_code != 0:
sys.exit(exit_code)
return exit_code
@task
@needs('build')
@consume_args
def test_super(args):
"""
Run the test suite as root user.
"""
exit_code = run_test(
python_command=pave.python_command_super,
switch_user=getpass.getuser(),
arguments=args,
)
if exit_code != 0:
sys.exit(exit_code)
return exit_code
@needs('build')
@consume_args
def test_python(args):
"""
Execute Python tests.
"""
normal_result = 0
super_result = 0
default_arguments = SETUP['test']['nose_options']
call_arguments = []
if len(args) == 1:
# On buildbot the
args = args[0].split(' ')
# Filter empty arguments that might be generated by buildbot.
args = [arg for arg in args if arg]
empty_args = False
if not len(args):
empty_args = True
call_arguments = default_arguments[:]
call_arguments.append('-s')
call_arguments.extend(args)
if empty_args:
call_arguments.append('--exclude=(elevated|selenium)')
environment.args = call_arguments
normal_result = test_normal(call_arguments)
call_arguments.pop()
if SETUP['test']['elevated']:
environment.args = [SETUP['test']['elevated']]
environment.args.extend(call_arguments)
super_result = test_super(call_arguments)
else:
# We have specific tests or arguments which were requested.
selected_elevated_tests = []
selected_normal_tests = []
other_arguments = []
for arg in args:
if arg.strip().startswith('-'):
other_arguments.append(arg)
elif arg.startswith(SETUP['test']['elevated']):
selected_elevated_tests.append(arg)
else:
selected_normal_tests.append(arg)
run_normal = False
if not selected_elevated_tests and not selected_normal_tests:
run_normal = True
elif selected_normal_tests:
# We have specific normal test requested, so we should exclude
# any elevated tests.
run_normal = True
call_arguments = selected_normal_tests + other_arguments
if run_normal:
environment.args = call_arguments
normal_result = test_normal(call_arguments)
run_elevated = False
if SETUP['test']['elevated']:
if not selected_elevated_tests and not selected_normal_tests:
# No specific test selected.
# Run all elevated tests with the requested options.
run_elevated = True
call_argume |
jonparrott/google-cloud-python | logging/tests/unit/gapic/v2/test_config_service_v2_client_v2.py | Python | apache-2.0 | 18,769 | 0 | # -*- coding: utf-8 -*-
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests."""
import mock
import pytest
from google.cloud import logging_v2
from google.cloud.logging_v2.proto import logging_config_pb2
from google.protobuf import empty_pb2
from google.protobuf import field_mask_pb2
class MultiCallableStub(object):
"""Stub for the grpc.UnaryUnaryMultiCallable interface."""
def __init__(self, method, channel_stub):
self.method = method
self.channel_stub = channel_stub
def __call__(self, request, timeout=None, metadata=None, credentials=None):
self.channel_stub.requests.append((self.method, request))
response = None
if self.channel_stub.responses:
response = self.channel_stub.responses.pop()
if isinstance(response, Exception):
raise response
if response:
return response
class ChannelStub(object):
"""Stub for the grpc.Channel interface."""
def __init__(self, responses=[]):
self.responses = responses
self.requests = []
def unary_unary(self,
method,
request_serializer=None,
response_deserializer=None):
return MultiCallableStub(method, self)
class CustomException(Exception):
pass
class TestConfigServiceV2Client(object):
def test_list_sinks(self):
# Setup Expected Response
next_page_token = ''
sinks_element = {}
sinks = [sinks_element]
expected_response = {
'next_page_token': next_page_token,
'sinks': sinks
}
expected_response = logging_config_pb2.ListSinksResponse(
**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = logging_v2.ConfigServiceV2Client()
# Setup Request
parent = client.project_path('[PROJECT]')
paged_list_response = client.list_sinks(parent)
resources = list(paged_list_response)
assert len(resources) == 1
assert expected_response.sinks[0] == resources[0]
assert len(channel.requests) == 1
expected_request = logging_config_pb2.ListSinksRequest(parent=parent)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_list_sinks_exception(self):
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = logging_v2.ConfigServiceV2Client()
# Setup request
parent = client.project_path('[PROJECT]')
paged_list_response = client.list_sinks(parent)
with pytest.raises(CustomException):
list(paged_list_response)
def test_get_sink(self):
# Setup Expected Response
name = 'name3373707'
destination = 'destination-1429847026'
filter_ = 'filter-1274492040'
writer_identity = 'writerIdentity775638794'
include_children = True
expected_response = {
'name': name,
'destination': destination,
'filter': filter_,
'writer_identity': writer_identity,
'include_children': include_children
}
expected_response = logging_config_pb2.LogSink(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock | .patch('google.api_core | .grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = logging_v2.ConfigServiceV2Client()
# Setup Request
sink_name = client.sink_path('[PROJECT]', '[SINK]')
response = client.get_sink(sink_name)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = logging_config_pb2.GetSinkRequest(
sink_name=sink_name)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_get_sink_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = logging_v2.ConfigServiceV2Client()
# Setup request
sink_name = client.sink_path('[PROJECT]', '[SINK]')
with pytest.raises(CustomException):
client.get_sink(sink_name)
def test_create_sink(self):
# Setup Expected Response
name = 'name3373707'
destination = 'destination-1429847026'
filter_ = 'filter-1274492040'
writer_identity = 'writerIdentity775638794'
include_children = True
expected_response = {
'name': name,
'destination': destination,
'filter': filter_,
'writer_identity': writer_identity,
'include_children': include_children
}
expected_response = logging_config_pb2.LogSink(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = logging_v2.ConfigServiceV2Client()
# Setup Request
parent = client.project_path('[PROJECT]')
sink = {}
response = client.create_sink(parent, sink)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = logging_config_pb2.CreateSinkRequest(
parent=parent, sink=sink)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_create_sink_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = logging_v2.ConfigServiceV2Client()
# Setup request
parent = client.project_path('[PROJECT]')
sink = {}
with pytest.raises(CustomException):
client.create_sink(parent, sink)
def test_update_sink(self):
# Setup Expected Response
name = 'name3373707'
destination = 'destination-1429847026'
filter_ = 'filter-1274492040'
writer_identity = 'writerIdentity775638794'
include_children = True
expected_response = {
'name': name,
'destination': destination,
'filter': filter_,
'writer_identity': writer_identity,
'include_children': include_children
}
expected_response = logging_config_pb2.LogSink(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = logging_v2.ConfigServiceV2Client()
# Setup |
1905410/Misago | misago/users/tests/test_lists_views.py | Python | gpl-2.0 | 2,244 | 0.000446 | from django.contrib.auth import get_user_model
from django.core.urlresolvers import reverse
from django.utils.six.mov | es import range
from misago.acl.testutils import override_acl
from ..models import Rank
from ..testutils import AuthenticatedUserTestCase
class UsersListTestCase(AuthenticatedUserTestCase):
def setUp(self):
super(UsersListTestCase, self).setUp()
override_acl(self.user, {
'can_browse_users_list': 1,
})
class UsersListLanderTests(UsersListTestCase):
def test_lander_no_permission(self):
"""lander returns 403 if user has no | permission"""
override_acl(self.user, {
'can_browse_users_list': 0,
})
response = self.client.get(reverse('misago:users'))
self.assertEqual(response.status_code, 403)
def test_lander_redirect(self):
"""lander returns redirect to valid page if user has permission"""
response = self.client.get(reverse('misago:users'))
self.assertEqual(response.status_code, 302)
self.assertTrue(response['location'].endswith(
reverse('misago:users-active-posters')))
class ActivePostersTests(UsersListTestCase):
def test_active_posters_list(self):
"""active posters page has no showstoppers"""
view_link = reverse('misago:users-active-posters')
response = self.client.get(view_link)
self.assertEqual(response.status_code, 200)
# Create 200 test users and see if errors appeared
User = get_user_model()
for i in range(200):
User.objects.create_user(
'Bob%s' % i, 'm%s@te.com' % i, 'Pass.123', posts=12345)
response = self.client.get(view_link)
self.assertEqual(response.status_code, 200)
class UsersRankTests(UsersListTestCase):
def test_ranks(self):
"""ranks lists are handled correctly"""
for rank in Rank.objects.iterator():
rank_link = reverse('misago:users-rank', kwargs={'slug': rank.slug})
response = self.client.get(rank_link)
if rank.is_tab:
self.assertEqual(response.status_code, 200)
else:
self.assertEqual(response.status_code, 404)
|
grandcat/robotics_g7 | object_recognition/src/object_recognition/msg/__init__.py | Python | gpl-2.0 | 35 | 0 | from ._Recognized_o | bje | cts import *
|
dials/dials | algorithms/scaling/scaling_options.py | Python | bsd-3-clause | 8,202 | 0.001585 | """
Phil scope of options for scaling.
"""
from __future__ import annotations
import iotbx.phil
phil_scope = iotbx.phil.parse(
"""
anomalous = False
.type = bool
.help = "Separate anomalous pairs in scaling and error model optimisation."
.expert_level=0
overwrite_existing_models = False
.type = bool
.help = "If True, create new scaling models for all datasets"
.expert_level = 0
reflection_selection {
method = *quasi_random intensity_ranges use_all random
.type = choice
.help = "Method to use when choosing a reflection subset for scaling model"
"minimisation."
"The quasi_random option randomly selects reflections groups"
"within a dataset, and also selects groups which have good"
"connectedness across datasets for multi-dataset cases. The random"
"option selects reflection groups randomly for both single"
"and multi dataset scaling, so for a single dataset"
"quasi_random == random."
"The intensity_ranges option uses the E2_range, Isigma_range and"
"d_range options to the subset of reflections"
"The use_all option uses all suitable reflections, which may be"
"slow for large datasets."
random {
multi_dataset {
Isigma_cutoff = 1.0
.type = float
.help = "Minimum average I/sigma of reflection groups to use when"
"selecting random reflections for minimisation."
}
min_groups = 2000
.type = int
.help = "The minimum number of symmetry groups to use during"
"minimisation."
.expert_level=1
min_reflections = 50000
.type = int
.help = "The minimum number of reflections to use during minimisation."
.expert_level=1
}
best_unit_cell = None
.type = unit_cell
.help = "Best unit cell value, to use when performing resolution cutting"
"and merging statistics. If None, the median cell will be used."
E2_range = 0.8, 5.0
.type = floats(size=2)
.help = "Minimum and maximum normalised E^2 value to used to select a"
"subset of reflections for minimisation."
.expert_level = 1
Isigma_range = -5.0, 0.0
.type = floats(size=2)
.help = "Minimum and maximum I/sigma values used to select a subset of"
"reflections for minimisation. A value of 0.0 for the maximum"
"indicates that no upper limit should be applied."
.expert_level = 1
d_range = None
.type = floats(size=2)
.help = "Minimum and maximum d-values used to select a subset of"
"reflections for minimisation."
.expert_level = 1
min_partiality = 0.95
.type = float
.help = "Minimum partiality to use when selecting | reflections to use"
"to determine the scaling model and error model."
.expert_level = 2
intensity_choice = profile sum *combine
.alias = intensity
.type = choice
.help = "Option to choose from profile fitted or summation intensities, or
an optimised combination of profile/sum."
.expert_leve | l = 1
combine.Imid = None
.type = floats
.help = "A list of values to try for the midpoint, for profile/sum combination
calculation: the value with the lowest Rmeas will be chosen.
0 and 1 are special values that can be supplied to include profile
and sum respectively in the comparison."
.expert_level = 2
combine.joint_analysis = True
.type = bool
.help = "Option of whether to do intensity combination optimisation
separately (i.e. different Imid per dataset) or joint for
multiple datasets"
.expert_level = 2
}
weighting {
weighting_scheme = *invvar
.type = choice
.help = "Weighting scheme used during Ih calculation. Weighting schemes
other than invvar and unity may trigger iterative reweighting
during minimisation, which may be unstable for certain minimisation
engines (LBFGS)."
.expert_level = 2
error_model {
include scope dials.algorithms.scaling.error_model.error_model.phil_scope
}
}
cut_data {
d_min = None
.type = float
.help = "Option to apply a high resolution cutoff for the dataset (i.e.
the chosen reflections have d > d_min)."
.expert_level = 1
d_max = None
.type = float
.help = "Option to apply a low resolution cutoff for the dataset (i.e.
the chosen reflections have d < d_max)."
.expert_level = 1
partiality_cutoff = 0.4
.type = float
.help = "Value below which reflections are removed from the dataset due
to low partiality."
.expert_level = 1
min_isigi = -5
.type = float
.help = "Value below which reflections are removed from the dataset due"
"to low I/sigI in either profile or summation intensity estimates"
.expert_level = 1
}
scaling_options {
check_consistent_indexing = False
.type = bool
.help = "If True, run dials.cosym on all data in the data preparation"
"step, to ensure consistent indexing."
target_cycle = True
.type = bool
.help = "Option to turn of initial round of targeted scaling
if some datasets are already scaled."
.expert_level = 2
only_target = False
.type = bool
.help = "Option to only do targeted scaling if some datasets
are already scaled."
.expert_level = 2
only_save_targeted = True
.type = bool
.help = "If only_target is true, this option to change whether the dataset
that is being scaled will be saved on its own, or combined with the
already scaled dataset."
.expert_level = 2
target_model = None
.type = path
.help = "Path to cif file to use to calculate target intensities for
scaling."
.expert_level = 2
target_mtz = None
.type = path
.help = "Path to merged mtz file to use as a target for scaling."
.expert_level = 2
nproc = 1
.type = int(value_min=1)
.help = "Number of blocks to divide the data into for minimisation.
This also sets the number of processes to use if the option is
available."
.expert_level = 2
use_free_set = False
.type = bool
.help = "Option to use a free set during scaling to check for overbiasing.
This free set is used to calculate an RMSD, which is shown alongisde
the 'working' RMSD during refinement, but is not currently used
to terminate refinement or make any choices on the model."
.expert_level = 2
free_set_percentage = 10.0
.type = float
.help = "Percentage of symmetry equivalent groups to use for the free set,
if use_free_set is True."
.expert_level = 2
free_set_offset = 0
.type = int
.help = "Offset for choosing unique groups for the free set from the whole
set of unique groups."
.expert_level = 2
full_matrix = True
.type = bool
.help = "Option to turn off GN/LM refinement round used to determine
error estimates on scale factors."
.expert_level = 2
outlier_rejection = *standard simple
.type = choice
.help = "Choice of outlier rejection routine. Standard may take a
significant amount of time to run for large datasets or high
multiplicities, whereas simple should be quick for these datasets."
.expert_level = 1
outlier_zmax = 6.0
.type = float(value_min=3.0)
.help = "Cutoff z-score value for identifying outliers based on their
normalised deviation within the group of equivalent reflections"
.expert_level = 1
emax = 10
.type = float(value_min = 0)
.help = "Reject reflections with normalised intensities E^2 > emax^2"
.expert_ |
lavish205/olympia | src/olympia/addons/urls.py | Python | bsd-3-clause | 3,177 | 0.001574 | from django.conf.urls import include, url
from django.shortcuts import redirect
from olympia.stats.urls import stats_patterns
from . import views
ADDON_ID = r"""(?P<addon_id>[^/<>"']+)"""
# These will all start with /addon/<addon_id>/
detail_patterns = [
url('^$', views.addon_detail, name='addons.detail'),
url('^more$', views.addon_detail, name='addons.detail_more'),
url('^eula/(?P<file_id>\d+)?$', views.eula, name='addons.eula'),
url('^license/(?P<version>[^/]+)?', views.license, name='addons.license'),
url('^privacy/', views.privacy, name='addons.privacy'),
url('^abuse/', views.report_abuse, name='addons.abuse'),
url('^reviews/', include('olympia.ratings.urls')),
url('^statistics/', include(stats_patterns)),
url('^versions/', include('olympia.versions.urls')),
# Old contribution urls
url('^developers$',
lambda r, addon_id: redirect('addons.detail',
addon_id, permanent=True),
name='addons.meet'),
url('^contribute/roadblock/',
lambda r, addon_id: redirect('addons.detail',
addon_id, permanent=True),
name='addons.roadblock'),
url('^contribute/installed/',
lambda r, addon_id: redirect('addons.detail',
addon_id, permanent=True),
name='addons.installed'),
url('^contribute/thanks',
lambda r, addon_id: redirect('addons.detail',
addon_id, permanent=True),
name='addons.thanks'),
url('^contribute/$',
lambda r, addon_id: redirect('addons.detail',
addon_id, permanent=True),
name='addons.contribute'),
url('^contribute/(?P<status>cancel|complete)$',
lambda r, addon_id, status: redirect('addons.detail',
addon_id, permanent=True),
name='addons.contribute_status'),
url('^about$',
lambda r, addon_id: redirect('addons.detail',
addon_id, permanent=True),
name='addons.about'),
]
urlpatterns = [
# Promo modules for the homepage
url('^i/promos$', views.homepage_promos, name='addons.homepage_promos'),
# See https://github.com/mozilla/addons-server/issues/3130
# Hardcode because there is no relation from blocklist items and the
# add-on they block :-(
url('^addon/icloud-bookmarks/$', views.icloud_bookmarks_redirect,
name='addons.icloudbookmarksredirect'),
# URLs for a single add-on | .
url('^addon/%s/' % ADDON_ID, include(detail_patterns)),
# Remora EULA and Privacy policy URLS
url('^addons/policy/0/(?P<addon_id>\d+)/(?P<file_id>\d+)',
lambda r, addon_id, file_id: redirect(
'addons.eula', addon_id, file_id, permanent=True)),
url('^addons/policy/0/(?P<addon_id>\d+)/',
lambda r, addon_id: redirect(
'addons.privacy', addon_id, permanent=True)),
url('^versions/license/(\d+)$', views | .license_redirect),
url('^find-replacement/$', views.find_replacement_addon,
name='addons.find_replacement'),
]
|
Zabanya/warzone2100 | po/parseJson.py | Python | gpl-2.0 | 571 | 0.033275 | import json, sys, re
def printString(s, begin, end):
if not re.match(r'^(\*.*\*|CAM[0-9] .*|Z ?NULL.*)$', s):
sys.stdout.write('{}_({}){}'.format(begin, json.dumps(s, ensure_ascii=False), end))
def parse(obj):
if isinstance(obj, dict):
for k, v in obj.items():
parse(v)
if k == 'name' and isinstance(v, str):
printString(v, '', '\n')
elif | k == 'text' and isinstance(v, list):
for s in v:
if isinstance(s, str):
printString(s, '', '\n')
elif isinstance(obj, list):
| for v in obj:
parse(v)
parse(json.load(open(sys.argv[1], 'r')))
|
apache/incubator-airflow | tests/providers/amazon/aws/sensors/test_dms_task.py | Python | apache-2.0 | 2,939 | 0.00034 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from unittest import mock
import pytest
from airflow.exceptions import AirflowException
from airflow.providers.amazon.aw | s.hooks.dms import DmsHook
from airflow.providers.amazon.aws.sensors.dms_task import DmsTaskCompletedSensor
class TestDmsTaskCompletedSensor(unittest.TestCase):
def setUp(self):
self.sensor = DmsTaskCompletedSensor(
task_id='test_dms_sensor',
aws_conn_id='aws_default',
replication_task_arn='task_arn',
)
@mock.patch.object(DmsHook, 'get_task_status', side_effect=("stopped",))
def test_poke_stopped(self, mock_get_task_ | status):
assert self.sensor.poke(None)
@mock.patch.object(DmsHook, 'get_task_status', side_effect=("running",))
def test_poke_running(self, mock_get_task_status):
assert not self.sensor.poke(None)
@mock.patch.object(DmsHook, 'get_task_status', side_effect=("starting",))
def test_poke_starting(self, mock_get_task_status):
assert not self.sensor.poke(None)
@mock.patch.object(DmsHook, 'get_task_status', side_effect=("ready",))
def test_poke_ready(self, mock_get_task_status):
with pytest.raises(AirflowException) as ctx:
self.sensor.poke(None)
assert 'Unexpected status: ready' in str(ctx.value)
@mock.patch.object(DmsHook, 'get_task_status', side_effect=("creating",))
def test_poke_creating(self, mock_get_task_status):
with pytest.raises(AirflowException) as ctx:
self.sensor.poke(None)
assert 'Unexpected status: creating' in str(ctx.value)
@mock.patch.object(DmsHook, 'get_task_status', side_effect=("failed",))
def test_poke_failed(self, mock_get_task_status):
with pytest.raises(AirflowException) as ctx:
self.sensor.poke(None)
assert 'Unexpected status: failed' in str(ctx.value)
@mock.patch.object(DmsHook, 'get_task_status', side_effect=("deleting",))
def test_poke_deleting(self, mock_get_task_status):
with pytest.raises(AirflowException) as ctx:
self.sensor.poke(None)
assert 'Unexpected status: deleting' in str(ctx.value)
|
andycasey/snob | sandbox_mixture_slf.py | Python | mit | 2,941 | 0.00374 | import numpy as np
from snob import mixture_slf as slf
n_samples, n_features, n_clusters, rank = 1000, 50, 6, 1
sigma = 0.5
true_homo_specific_variances = sigma**2 * np.ones((1, n_features))
rng = np.random.RandomSta | te(321)
U, _, _ = np.linalg.svd(rng.randn(n_features, n_features))
true_factor_loads = U[:, :rank].T
true_factor_scores = rng.randn(n_samples, rank)
X = np.dot(true_factor_scores, true_factor_loads)
# Assign objects to different clusters.
indices = rng.randint(0, n_clusters, size=n_samples)
true_weights = np.zeros(n_c | lusters)
true_means = rng.randn(n_clusters, n_features)
for index in range(n_clusters):
X[indices==index] += true_means[index]
true_weights[index] = (indices==index).sum()
true_weights = true_weights/n_samples
# Adding homoscedastic noise
bar = rng.randn(n_samples, n_features)
X_homo = X + sigma * bar
# Adding heteroscedastic noise
sigmas = sigma * rng.rand(n_features) + sigma / 2.
X_hetero = X + rng.randn(n_samples, n_features) * sigmas
true_hetero_specific_variances = sigmas**2
data = X_hetero
model = slf.SLFGMM(n_clusters)
model.fit(data)
def scatter_common(x, y, title=None):
fig, ax = plt.subplots()
ax.scatter(x,y)
ax.set_title(title or "")
limits = np.array([ax.get_xlim(), ax.get_ylim()])
limits = (limits.min(), limits.max())
ax.plot(limits, limits, c="#666666", linestyle=":", linewidth=0.5, zorder=-1)
ax.set_xlim(limits)
ax.set_ylim(limits)
return fig
scatter_common(true_factor_loads, model.factor_loads, "factor loads")
scatter_common(true_factor_scores, model.factor_scores, "factor scores")
scatter_common(true_homo_specific_variances, model.specific_variances, "specific variances")
# means
# This one is tricky because the indices are not necessarily the same.
# So just take whichever is closest.
idx = np.zeros(n_clusters, dtype=int)
for index, true_mean in enumerate(true_means):
distance = np.sum(np.abs(model._means - true_mean), axis=1) \
+ np.abs(model.weights.flatten()[index] - true_weights)
idx[index] = np.argmin(distance)
assert len(idx) == len(set(idx))
true = true_means.flatten()
inferred = model._means[idx].flatten()
scatter_common(true, inferred, "means")
# Plot some data...
fig, ax = plt.subplots()
ax.scatter(data[:, 0], data[:, 1], facecolor="g")
raise a
# factor scores
ax = axes[1]
true = true_factor_scores.flatten()
inferred = model._factor_scores.flatten()
ax.scatter(true, inferred)
# factor loads
ax = axes[2]
true = true_factor_loads.flatten()
inferred = model._factor_loads.flatten()
ax.scatter(true, inferred)
raise a
true = np.hstack([each.flatten() for each in (true_means, true_factor_scores, true_factor_loads, true_specific_variances)])
inferred = np.hstack([each.flatten() for each in (model.means, model.factor_scores, model.factor_loads, model.specific_variances)])
fig, ax = plt.subplots()
ax.scatter(true, inferred, alpha=0.5)
raise a
|
LCAS/teaching | cmp3103m-code-fragments/scripts/odom_reader.py | Python | mit | 833 | 0.0012 | #!/usr/bin/env py | thon
import rospy
from pprint import pformat
from tf_conversions import transformations
from math import pi
from nav_msgs.msg import Odometry
class odom_reader:
def __init__(self):
self.image_sub = rospy.Subscriber("/odom",
Odometry, self.callback)
"""
| convert an orientation given in quaternions to an actual
angle in degrees for a 2D robot
"""
def odom_orientation(self, q):
y, p, r = transformations.euler_from_quaternion([q.w, q.x, q.y, q.z])
return y * 180 / pi
def callback(self, data):
print "odom pose: \n" + pformat(data.pose.pose)
angle = self.odom_orientation(data.pose.pose.orientation)
print "angle = %f" % angle
ic = odom_reader()
rospy.init_node('odom_reader')
rospy.spin()
|
fstagni/DIRAC | AccountingSystem/Client/Types/Pilot.py | Python | gpl-3.0 | 801 | 0.002497 | __RCSID__ = "$Id$"
from DIRAC.AccountingSystem.Client.Types.BaseAccountingType import BaseAccountingType
class Pilot(BaseAccountingType):
def __init__(self):
BaseAccountingType.__init__(self)
self.definitionKeyFields = [('User', 'VARCHAR(64)'),
('UserGroup', 'VARCHAR(32)'),
| ('Site', 'VARCHAR(64)'),
('GridC | E', "VARCHAR(128)"),
('GridMiddleware', 'VARCHAR(32)'),
('GridResourceBroker', 'VARCHAR(128)'),
('GridStatus', 'VARCHAR(32)'),
]
self.definitionAccountingFields = [('Jobs', "INT UNSIGNED"),
]
self.checkType()
|
herove/dotfiles | sublime/Packages/HTML/html_completions.py | Python | mit | 10,848 | 0.001383 | import sublime, sublime_plugin
import re
def match(rex, str):
m = rex.match(str)
if m:
return m.group(0)
else:
return None
# This responds to on_query_completions, but conceptually it's expanding
# expressions, rather than completing words.
#
# It expands these simple expressions:
# tag.class
# tag#id
class HtmlCompletions(sublime_plugin.EventListener):
def on_query_completions(self, view, prefix, locations):
# Only trigger within HTML
if not view.match_selector(locations[0],
"text.html - source - meta.tag, punctuation.definition.tag.begin"):
return []
# Get the contents of each line, from the beginning of the line to
# each point
lines = [view.substr(sublime.Region(view.line(l).a, l))
for l in locations]
# Reverse the contents of each line, to simulate having the regex
# match backwards
lines = [l[::-1] for l in lines]
# Check the first location looks like an expression
rex = re.compile("([\w-]+)([.#])(\w+)")
expr = match(rex, lines[0])
if not expr:
return []
# Ensure that all other lines have identical expressions
for i in xrange(1, len(lines)):
ex = match(rex, lines[i])
if ex != expr:
return []
# Return the completions
arg, op, tag = rex.match(expr).groups()
arg = arg[::-1]
tag = tag[::-1]
expr = expr[::-1]
if op == '.':
snippet = "<{0} class=\"{1}\">$1</{0}>$0".format(tag, arg)
else:
snippet = "<{0} id=\"{1}\">$1</{0}>$0".format(tag, arg)
return [(expr, snippet)]
# Provide completions that match just after typing an opening angle bracket
class TagCompletions(sublime_plugin.EventListener):
def on_query_completions(self, view, prefix, locations):
# Only trigger within HTML
if not view.match_selector(locations[0],
"text.html - source"):
return []
pt = locations[0] - len(prefix) | - 1
ch = view.substr(sublime.Region(pt, pt + 1))
if ch != '<':
return []
return ([
("a\tTag", "a href=\"$1\">$2</a>"),
("abbr\tTag", "abbr>$1</abbr>"),
("acronym\tTag", "acronym>$1</acronym>"),
("address\tTag", "address>$1</address>"),
("applet\tTag", "applet>$1</applet>"),
("area\tTag", "area>$1</area>") | ,
("b\tTag", "b>$1</b>"),
("base\tTag", "base>$1</base>"),
("big\tTag", "big>$1</big>"),
("blockquote\tTag", "blockquote>$1</blockquote>"),
("body\tTag", "body>$1</body>"),
("button\tTag", "button>$1</button>"),
("center\tTag", "center>$1</center>"),
("caption\tTag", "caption>$1</caption>"),
("cdata\tTag", "cdata>$1</cdata>"),
("cite\tTag", "cite>$1</cite>"),
("col\tTag", "col>$1</col>"),
("colgroup\tTag", "colgroup>$1</colgroup>"),
("code\tTag", "code>$1</code>"),
("div\tTag", "div>$1</div>"),
("dd\tTag", "dd>$1</dd>"),
("del\tTag", "del>$1</del>"),
("dfn\tTag", "dfn>$1</dfn>"),
("dl\tTag", "dl>$1</dl>"),
("dt\tTag", "dt>$1</dt>"),
("em\tTag", "em>$1</em>"),
("fieldset\tTag", "fieldset>$1</fieldset>"),
("font\tTag", "font>$1</font>"),
("form\tTag", "form>$1</form>"),
("frame\tTag", "frame>$1</frame>"),
("frameset\tTag", "frameset>$1</frameset>"),
("head\tTag", "head>$1</head>"),
("h1\tTag", "h1>$1</h1>"),
("h2\tTag", "h2>$1</h2>"),
("h3\tTag", "h3>$1</h3>"),
("h4\tTag", "h4>$1</h4>"),
("h5\tTag", "h5>$1</h5>"),
("h6\tTag", "h6>$1</h6>"),
("i\tTag", "i>$1</i>"),
("iframe\tTag", "iframe src=\"$1\"></iframe>"),
("ins\tTag", "ins>$1</ins>"),
("kbd\tTag", "kbd>$1</kbd>"),
("li\tTag", "li>$1</li>"),
("label\tTag", "label>$1</label>"),
("legend\tTag", "legend>$1</legend>"),
("link\tTag", "link rel=\"stylesheet\" type=\"text/css\" href=\"$1\">"),
("map\tTag", "map>$1</map>"),
("noframes\tTag", "noframes>$1</noframes>"),
("object\tTag", "object>$1</object>"),
("ol\tTag", "ol>$1</ol>"),
("optgroup\tTag", "optgroup>$1</optgroup>"),
("option\tTag", "option>$0</option>"),
("p\tTag", "p>$1</p>"),
("pre\tTag", "pre>$1</pre>"),
("span\tTag", "span>$1</span>"),
("samp\tTag", "samp>$1</samp>"),
("script\tTag", "script type=\"${1:text/javascript}\">$0</script>"),
("style\tTag", "style type=\"${1:text/css}\">$0</style>"),
("select\tTag", "select>$1</select>"),
("small\tTag", "small>$1</small>"),
("strong\tTag", "strong>$1</strong>"),
("sub\tTag", "sub>$1</sub>"),
("sup\tTag", "sup>$1</sup>"),
("table\tTag", "table>$1</table>"),
("tbody\tTag", "tbody>$1</tbody>"),
("td\tTag", "td>$1</td>"),
("textarea\tTag", "textarea>$1</textarea>"),
("tfoot\tTag", "tfoot>$1</tfoot>"),
("th\tTag", "th>$1</th>"),
("thead\tTag", "thead>$1</thead>"),
("title\tTag", "title>$1</title>"),
("tr\tTag", "tr>$1</tr>"),
("tt\tTag", "tt>$1</tt>"),
("u\tTag", "u>$1</u>"),
("ul\tTag", "ul>$1</ul>"),
("var\tTag", "var>$1</var>"),
("br\tTag", "br>"),
("embed\tTag", "embed>"),
("hr\tTag", "hr>"),
("img\tTag", "img src=\"$1\">"),
("input\tTag", "input>"),
("meta\tTag", "meta>"),
("param\tTag", "param name=\"$1\" value=\"$2\">"),
("article\tTag", "article>$1</article>"),
("aside\tTag", "aside>$1</aside>"),
("audio\tTag", "audio>$1</audio>"),
("canvas\tTag", "canvas>$1</canvas>"),
("footer\tTag", "footer>$1</footer>"),
("header\tTag", "header>$1</header>"),
("nav\tTag", "nav>$1</nav>"),
("section\tTag", "section>$1</section>"),
("video\tTag", "video>$1</video>"),
("A\tTag", "A HREF=\"$1\">$2</A>"),
("ABBR\tTag", "ABBR>$1</ABBR>"),
("ACRONYM\tTag", "ACRONYM>$1</ACRONYM>"),
("ADDRESS\tTag", "ADDRESS>$1</ADDRESS>"),
("APPLET\tTag", "APPLET>$1</APPLET>"),
("AREA\tTag", "AREA>$1</AREA>"),
("B\tTag", "B>$1</B>"),
("BASE\tTag", "BASE>$1</BASE>"),
("BIG\tTag", "BIG>$1</BIG>"),
("BLOCKQUOTE\tTag", "BLOCKQUOTE>$1</BLOCKQUOTE>"),
("BODY\tTag", "BODY>$1</BODY>"),
("BUTTON\tTag", "BUTTON>$1</BUTTON>"),
("CENTER\tTag", "CENTER>$1</CENTER>"),
("CAPTION\tTag", "CAPTION>$1</CAPTION>"),
("CDATA\tTag", "CDATA>$1</CDATA>"),
("CITE\tTag", "CITE>$1</CITE>"),
("COL\tTag", "COL>$1</COL>"),
("COLGROUP\tTag", "COLGROUP>$1</COLGROUP>"),
("CODE\tTag", "CODE>$1</CODE>"),
("DIV\tTag", "DIV>$1</DIV>"),
("DD\tTag", "DD>$1</DD>"),
("DEL\tTag", "DEL>$1</DEL>"),
("DFN\tTag", "DFN>$1</DFN>"),
("DL\tTag", "DL>$1</DL>"),
("DT\tTag", "DT>$1</DT>"),
("EM\tTag", "EM>$1</EM>"),
("FIELDSET\tTag", "FIELDSET>$1</FIELDSET>"),
("FONT\tTag", "FONT>$1</FONT>"),
("FORM\tTag", "FORM>$1</FORM>"),
("FRAME\tTag", "FRAME>$1</FRAME>"),
("FRAMESET\tTag", "FRAMESET>$1</FRAMESET>"),
("HEAD\tTag", "HEAD>$1</HEAD>"),
("H1\tTag", "H1>$1</H1>"),
("H2\tTag", "H2>$1</H2>"),
("H3\tTag", "H3>$1</H3>"),
("H4\tTag", "H4>$1</H4>"),
|
jayfk/cookiecutter-django-docker | {{cookiecutter.repo_name}}/config/settings/production.py | Python | bsd-3-clause | 4,238 | 0.003303 | # -*- coding: utf-8 -*-
'''
Production Configurations
- Use djangosecure
- Use mailgun to send emails
- Use redis
'''
from __future__ import absolute_import, unicode_literals
from django.utils import six
from .common import * # noqa
# SECRET CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Raises ImproperlyConfigured exception if DJANO_SECRET_KEY not in os.environ
SECRET_KEY = env("DJANGO_SECRET_KEY")
# This ensures that Django will be able to detect a secure connection
# properly on Heroku.
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# django-secure
# ------------------------------------------------------------------------------
INSTALLED_APPS += ("djangosecure", )
MIDDLEWARE_CLASSES = (
# Make sure djangosecure.middleware.SecurityMiddleware is listed first
'djangosecure.middleware.SecurityMiddleware',
) + MIDDLEWARE_CLASSES
# set this to 60 se | conds and then to 518400 when you can prove it works
SECURE_HSTS_SECONDS = 60
SECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool("DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS", default=True)
| SECURE_FRAME_DENY = env.bool("DJANGO_SECURE_FRAME_DENY", default=True)
SECURE_CONTENT_TYPE_NOSNIFF = env.bool("DJANGO_SECURE_CONTENT_TYPE_NOSNIFF", default=True)
SECURE_BROWSER_XSS_FILTER = True
SESSION_COOKIE_SECURE = False
SESSION_COOKIE_HTTPONLY = True
SECURE_SSL_REDIRECT = env.bool("DJANGO_SECURE_SSL_REDIRECT", default=True)
# SITE CONFIGURATION
# ------------------------------------------------------------------------------
# Hosts/domain names that are valid for this site
# See https://docs.djangoproject.com/en/1.6/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ["*"]
# END SITE CONFIGURATION
# EMAIL
# ------------------------------------------------------------------------------
DEFAULT_FROM_EMAIL = env('DJANGO_DEFAULT_FROM_EMAIL',
default='{{cookiecutter.project_name}} <noreply@{{cookiecutter.domain_name}}>')
EMAIL_BACKEND = 'django_mailgun.MailgunBackend'
MAILGUN_ACCESS_KEY = env('DJANGO_MAILGUN_API_KEY')
MAILGUN_SERVER_NAME = env('DJANGO_MAILGUN_SERVER_NAME')
EMAIL_SUBJECT_PREFIX = env("DJANGO_EMAIL_SUBJECT_PREFIX", default='[{{cookiecutter.project_name}}] ')
SERVER_EMAIL = env('DJANGO_SERVER_EMAIL', default=DEFAULT_FROM_EMAIL)
# TEMPLATE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/templates/api/#django.template.loaders.cached.Loader
TEMPLATES[0]['OPTIONS']['loaders'] = [
('django.template.loaders.cached.Loader', [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
]),
]
# CACHE CONFIGURATION
# ------------------------------------------------------------------------------
CACHES = {
'default': {
'BACKEND': 'redis_cache.RedisCache',
'LOCATION': [
'redis:6379',
],
'OPTIONS': {
'DB': 1,
'PARSER_CLASS': 'redis.connection.HiredisParser',
'CONNECTION_POOL_CLASS': 'redis.BlockingConnectionPool',
'CONNECTION_POOL_CLASS_KWARGS': {
'max_connections': 50,
'timeout': 20,
},
'MAX_CONNECTIONS': 1000,
'PICKLE_VERSION': -1,
},
},
}
# ASSET CONFIGURATION
# ------------------------------------------------------------------------------
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
STATIC_ROOT = '/static'
MEDIA_ROOT = '/media'
STATICFILES_DIRS = (
unicode(APPS_DIR.path("static")),
)
{% if cookiecutter.use_celery %}
# CELERY BROKER CONFIGURATION
# ------------------------------------------------------------------------------
BROKER_URL = "amqp://guest:guest@rabbitmq:5672//"
{% endif %}
{% if cookiecutter.use_sentry %}
# SENTRY CONFIGURATION
# ------------------------------------------------------------------------------
RAVEN_CONFIG = {
'dsn': env("SENTRY_URL"),
}
INSTALLED_APPS = INSTALLED_APPS + (
'raven.contrib.django.raven_compat',
)
{% endif %}
# Your production stuff: Below this line define 3rd party library settings
|
bigchaindb/bigchaindb-examples | server/lib/models/assets.py | Python | apache-2.0 | 10,725 | 0.00317 | from time import sleep
from datetime import datetime
import rethinkdb as r
import cryptoconditions as cc
from decorator import contextmanager
import bigchaindb
import bigchaindb.util
import bigchaindb.crypto
@contextmanager
def take_at_least_seconds(amount_in_seconds):
t_issu | ed = datetime.now()
yield
t_expired = datetime.now() - t_issued
while t_expired.total_seconds() < amount_in_seconds:
sleep(1)
t_expired = datetime.now() - t_issued
def query_reql_response(response, query):
result = list(response)
if result and len(result):
content = re | sult[0]["transaction"]["data"]["payload"]["content"]
if query and content is not None:
if query in content:
return result
else:
return result
return None
def get_owned_assets(bigchain, vk, query=None, table='bigchain'):
assets = []
asset_ids = bigchain.get_owned_ids(vk)
if table == 'backlog':
reql_query = \
r.table(table) \
.filter(lambda tx: tx['transaction']['conditions']
.contains(lambda c: c['new_owners']
.contains(vk)))
response = query_reql_response(reql_query.run(bigchain.conn), query)
if response:
assets += response
elif table == 'bigchain':
for asset_id in asset_ids:
txid = asset_id['txid'] if isinstance(asset_id, dict) else asset_id
reql_query = r.table(table) \
.concat_map(lambda doc: doc['block']['transactions']) \
.filter(lambda transaction: transaction['id'] == txid)
response = query_reql_response(reql_query.run(bigchain.conn), query)
if response:
assets += response
return assets
def get_assets(bigchain, search):
if search:
cursor = \
r.table('bigchain') \
.concat_map(lambda doc: doc["block"]["transactions"]
.filter(lambda transaction: transaction["transaction"]["data"]["payload"]["content"]
.match(search))) \
.run(bigchain.conn)
else:
cursor = \
r.table('bigchain') \
.concat_map(lambda doc: doc["block"]["transactions"]).run(bigchain.conn)
return list(cursor)
def create_asset(bigchain, to, payload):
# a create transaction uses the operation `CREATE` and has no inputs
tx = bigchain.create_transaction(bigchain.me, to, None, 'CREATE', payload=payload)
# all transactions need to be signed by the user creating the transaction
tx_signed = bigchain.sign_transaction(tx, bigchain.me_private)
bigchain.validate_transaction(tx_signed)
# write the transaction to the bigchain
bigchain.write_transaction(tx_signed)
return tx_signed
def create_asset_hashlock(bigchain, payload, secret):
# Create a hash-locked asset without any new_owners
hashlock_tx = bigchain.create_transaction(bigchain.me, None, None, 'CREATE', payload=payload)
hashlock_tx_condition = cc.PreimageSha256Fulfillment(preimage=secret.encode())
# The conditions list is empty, so we need to append a new condition
hashlock_tx['transaction']['conditions'].append({
'condition': {
'details': hashlock_tx_condition.to_dict(),
'uri': hashlock_tx_condition.condition.serialize_uri()
},
'cid': 0,
'new_owners': None
})
# Conditions have been updated, so hash needs updating
hashlock_tx['id'] = bigchaindb.util.get_hash_data(hashlock_tx)
# The asset needs to be signed by the current_owner
hashlock_tx_signed = bigchain.sign_transaction(hashlock_tx, bigchain.me_private)
bigchain.validate_transaction(hashlock_tx_signed)
# write the transaction to the bigchain
bigchain.write_transaction(hashlock_tx_signed)
return hashlock_tx_signed
def transfer_asset(bigchain, source, to, asset_id, sk):
asset = bigchain.get_transaction(asset_id['txid'])
asset_transfer = bigchain.create_transaction(source, to, asset_id, 'TRANSFER',
payload=asset['transaction']['data']['payload'])
asset_transfer_signed = bigchain.sign_transaction(asset_transfer, sk)
bigchain.validate_transaction(asset_transfer_signed)
bigchain.write_transaction(asset_transfer_signed)
return asset_transfer_signed
def escrow_asset(bigchain, source, to, asset_id, sk,
expires_at=None, ilp_header=None, execution_condition=None):
asset = bigchain.get_transaction(asset_id['txid'])
payload = asset['transaction']['data']['payload'].copy()
if ilp_header:
payload.update({'ilp_header': ilp_header})
# Create escrow template with the execute and abort address
asset_escrow = bigchain.create_transaction(source, [source, to], asset_id, 'TRANSFER',
payload=payload)
if not expires_at:
# Set expiry time (100 secs from now)
time_sleep = 100
expires_at = float(bigchaindb.util.timestamp()) + time_sleep
# Create escrow and timeout condition
condition_escrow = cc.ThresholdSha256Fulfillment(threshold=1) # OR Gate
condition_timeout = cc.TimeoutFulfillment(expire_time=str(expires_at)) # only valid if now() <= time_expire
condition_timeout_inverted = cc.InvertedThresholdSha256Fulfillment(threshold=1)
condition_timeout_inverted.add_subfulfillment(condition_timeout)
# Create execute branch
execution_threshold = 3 if execution_condition else 2
condition_execute = cc.ThresholdSha256Fulfillment(threshold=execution_threshold) # AND gate
condition_execute.add_subfulfillment(cc.Ed25519Fulfillment(public_key=to)) # execute address
condition_execute.add_subfulfillment(condition_timeout) # federation checks on expiry
if execution_condition:
condition_execute.add_subcondition_uri(execution_condition)
condition_escrow.add_subfulfillment(condition_execute)
# Create abort branch
condition_abort = cc.ThresholdSha256Fulfillment(threshold=2) # AND gate
condition_abort.add_subfulfillment(cc.Ed25519Fulfillment(public_key=source)) # abort address
condition_abort.add_subfulfillment(condition_timeout_inverted)
condition_escrow.add_subfulfillment(condition_abort)
# Update the condition in the newly created transaction
asset_escrow['transaction']['conditions'][0]['condition'] = {
'details': condition_escrow.to_dict(),
'uri': condition_escrow.condition.serialize_uri()
}
# conditions have been updated, so hash needs updating
asset_escrow['id'] = bigchaindb.util.get_hash_data(asset_escrow)
# sign transaction
asset_escrow_signed = bigchaindb.util.sign_tx(asset_escrow, sk, bigchain=bigchain)
bigchain.write_transaction(asset_escrow_signed)
return asset_escrow_signed
def fulfill_escrow_asset(bigchain, source, to, asset_id, sk, execution_fulfillment=None):
asset = bigchain.get_transaction(asset_id['txid'])
asset_owners = asset['transaction']['conditions'][asset_id['cid']]['new_owners']
other_owner = [owner for owner in asset_owners if not owner == source][0]
# Create a base template for fulfill transaction
asset_escrow_fulfill = bigchain.create_transaction(asset_owners, to, asset_id, 'TRANSFER',
payload=asset['transaction']['data']['payload'])
# Parse the threshold cryptocondition
escrow_fulfillment = cc.Fulfillment.from_dict(
asset['transaction']['conditions'][0]['condition']['details'])
# Get the fulfillment message to sign
tx_escrow_execute_fulfillment_message = \
bigchaindb.util.get_fulfillment_message(asset_escrow_fulfill,
asset_escrow_fulfill['transaction']['fulfillments'][0],
serialized=True)
# get the indices path for the source that wants to fulfill
_, indices = get_subcondition_indices_from_vk(escrow_fulfillment, source)
subf |
plotly/python-api | packages/python/plotly/plotly/validators/volume/colorbar/tickformatstop/_value.py | Python | mit | 497 | 0 | import _plotly_uti | ls.basevalidators
class ValueValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self,
plotly_name="value",
parent_name="volume.colorbar.tickformatstop",
**kwargs
):
| super(ValueValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "style"),
**kwargs
)
|
parlar/calls2xls | external/CrossMap/usr/lib64/python2.7/site-packages/bx/align/tools/chop.py | Python | mit | 1,066 | 0.021576 | """
Support for chopping a list of alignment blocks to only the portion that
intersects a particular interval.
"""
def chop_list( blocks, src, start, end ):
"""
For each alignment block in the sequence `blocks`, chop out the portion
of the block that overlaps the interval [`start`,`end`) in the
component/species named `src`.
"""
new_blocks = []
for block in blocks:
ref = block.get_component_by_src( src )
# If the reference component is on the '-' strand we should complement the interval
if ref.strand == '-':
slice_start = max( ref.src_size - end, ref.start )
slice_end = max( ref.src | _size - start, ref.end )
else:
slice_start = max( start, ref.start )
slice_end = min( end, ref.end )
sliced = block.slice_by_component( ref, slice_start, slice_end )
good = True
for c in sliced.components:
if c.size < 1:
good = False
if good:
new_blocks.append( slice | d )
return new_blocks |
tobiasraabe/otree_virtual_machine_manager | ovmm/commands/list_user.py | Python | mit | 484 | 0 | """This module contains the ``list_user`` command."""
import click
from ovmm.handlers.postgres import PostgreSQLDatabaseHandler
@click.command()
def list_user():
"""List users managed by ovmm."""
click.echo('\n{:-^60}'.format(' Process: List Users '))
postgres = PostgreSQLDatabaseHandler()
user_list = postgres.list_user()
| click.echo('List of user names:')
for i in user_list:
click.echo(i)
click.echo('{:-^60}\n'.format(' Process: End '))
| |
google/google-ctf | third_party/edk2/AppPkg/Applications/Python/Python-2.7.2/Tools/bgen/bgen/bgenHeapBuffer.py | Python | apache-2.0 | 4,346 | 0.002761 | # Buffers allocated on the heap
from bgenOutput import *
from bgenType import OutputOnlyMixIn
from bgenBuffer import FixedInputOutputBufferType
class HeapInputOutputBufferType(FixedInputOutputBufferType):
"""Input-output buffer allocated on the heap -- passed as (inbuffer, outbuffer, size).
Instantiate without parameters.
Call from Python with input buffer.
"""
def __init__(self, datatype = 'char', sizetype = 'int', sizeformat = None):
FixedInputOutputBufferType.__init__(self, "0", datatype, sizetype, sizeformat)
def getOutputBufferDeclarations(self, name, constmode=False, outmode=False):
if constmode:
raise RuntimeError, "Cannot use const output buffer"
if outmode:
out = "*"
else:
out = ""
return ["%s%s *%s__out__" % (self.datatype, out, name)]
def getargsCheck(self, name):
Output("if ((%s__out__ = malloc(%s__in_len__)) == NULL)", name, name)
OutLbrace()
Output('PyErr_NoMemory();')
Output("goto %s__error__;", name)
self.label_needed = 1
OutRbrace()
Output("%s__len__ = %s__in_len__;", name, name)
def passOutput(self, name):
return "%s__in__, %s__out__, (%s)%s__len__" % \
(name, name, self.sizetype, name)
def mkvalueArgs(self, name):
return "%s__out__, (int)%s__len__" % (name, name)
def cleanup(self, name):
Output("free(%s__out__);", name)
FixedInputOutputBufferType.cleanup(self, name)
class VarHeapInputOutputBufferType(HeapInputO | utputBufferType):
"""same as base class, but passed as (i | nbuffer, outbuffer, &size)"""
def passOutput(self, name):
return "%s__in__, %s__out__, &%s__len__" % (name, name, name)
class HeapCombinedInputOutputBufferType(HeapInputOutputBufferType):
"""same as base class, but passed as (inoutbuffer, size)"""
def passOutput(self, name):
return "(%s *)memcpy(%s__out__, %s__in__, %s__len__)" % \
(self.datatype, name, name, name)
class VarHeapCombinedInputOutputBufferType(HeapInputOutputBufferType):
"""same as base class, but passed as (inoutbuffer, &size)"""
def passOutput(self, name):
return "(%s *)memcpy(%s__out__, %s__in__, &%s__len__)" % \
(self.datatype, name, name, name)
class HeapOutputBufferType(OutputOnlyMixIn, HeapInputOutputBufferType):
"""Output buffer allocated on the heap -- passed as (buffer, size).
Instantiate without parameters.
Call from Python with buffer size.
"""
def getInputBufferDeclarations(self, name, constmode=False):
return []
def getargsFormat(self):
return "i"
def getargsArgs(self, name):
return "&%s__in_len__" % name
def passOutput(self, name):
return "%s__out__, %s__len__" % (name, name)
class VarHeapOutputBufferType(HeapOutputBufferType):
"""Output buffer allocated on the heap -- passed as (buffer, &size).
Instantiate without parameters.
Call from Python with buffer size.
"""
def passOutput(self, name):
return "%s__out__, &%s__len__" % (name, name)
class VarVarHeapOutputBufferType(VarHeapOutputBufferType):
"""Output buffer allocated on the heap -- passed as (buffer, size, &size).
Instantiate without parameters.
Call from Python with buffer size.
"""
def passOutput(self, name):
return "%s__out__, %s__len__, &%s__len__" % (name, name, name)
class MallocHeapOutputBufferType(HeapOutputBufferType):
"""Output buffer allocated by the called function -- passed as (&buffer, &size).
Instantiate without parameters.
Call from Python without parameters.
"""
def getargsCheck(self, name):
Output("%s__out__ = NULL;", name)
def getAuxDeclarations(self, name):
return []
def passOutput(self, name):
return "&%s__out__, &%s__len__" % (name, name)
def getargsFormat(self):
return ""
def getargsArgs(self, name):
return None
def mkvalueFormat(self):
return "z#"
def cleanup(self, name):
Output("if( %s__out__ ) free(%s__out__);", name, name)
|
pimiento/captures | captures/wsgi.py | Python | gpl-2.0 | 1,132 | 0.000883 | """
WSGI config for ahaha project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django ap | plication with an application of another
framework.
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ahaha.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
appl | ication = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
|
Chasego/codirit | leetcode/133-Clone-Graph/bfs_001_iterative_jiuzhang.py | Python | mit | 1,381 | 0.005069 | """
Good Points:
1. getNodes
1.1 while q (instead of while len(q) > 0)
1.2 collections.deque (instead of q = [])
Bad Points:
1. another loop to fill mapping instead of doing it in the bfs
"""
# Definition for a Node.
class Node:
def __init__(self, val = 0, neighbors = []):
self.val = val
self.neighbors = neighbors
class Solution:
def cloneGraph(self, node: 'Node') -> 'Node':
root = node
if node is None:
return node
# use bfs algorithm to traverse the graph and get all nodes.
nodes = self.getNodes(node)
# copy nodes, store the old->new mapping information in a hash map
| mapping = {}
for node in nodes:
mapping[node] = Node(node.val)
# copy neighbors(edges)
for node in nodes:
new_node = mapping[node]
for neighbor in node.neighbors:
new_neighb | or = mapping[neighbor]
new_node.neighbors.append(new_neighbor)
return mapping[root]
def getNodes(self, node):
q = collections.deque([node])
result = set([node])
while q:
head = q.popleft()
for neighbor in head.neighbors:
if neighbor not in result:
result.add(neighbor)
q.append(neighbor)
return result |
Vauxoo/account-payment | account_vat_on_payment/account_journal.py | Python | agpl-3.0 | 1,470 | 0 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011-2012 Domsense s.r.l. (<http://www.domsense.com>).
# Copyright (C) 2014 Agile Business Group sagl (<http://www.agilebg.com>)
#
# This program is free software: you c | an redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# | GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import orm, fields
class AccountJournal(orm.Model):
_inherit = "account.journal"
_columns = {
'vat_on_payment_related_journal_id': fields.many2one(
'account.journal', 'Shadow Journal for VAT on payment',
help="Related journal used for shadow registrations on a "
"VAT on payment basis. Set the shadow journal here"),
}
|
makkus/pigshare | setup.py | Python | gpl-3.0 | 1,523 | 0.003283 | 3#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup
import io
import re
init_py = io.open('pigshare/__init__.py').read()
metadata = dict(re.findall("__([a-z]+)__ = '([^']+)'", init_py))
metadata['doc'] | = re.findall('"""(.+)"""', init_py)[0]
requirements = [
"argparse",
"setuptools",
"restkit",
"bo | oby",
"simplejson",
"parinx",
"pyclist",
"argcomplete"
]
test_requirements = [
# TODO: put package test requirements here
]
setup(
name='pigshare',
version=metadata['version'],
description=metadata['doc'],
author=metadata['author'],
author_email=metadata['email'],
url=metadata['url'],
packages=[
'pigshare',
],
package_dir={'pigshare':
'pigshare'},
include_package_data=True,
install_requires=requirements,
license="GPLv3",
zip_safe=False,
keywords='pigshare figshare client rest api',
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Natural Language :: English',
'Programming Language :: Python :: 2.7',
],
test_suite='tests',
tests_require=test_requirements,
entry_points={
'console_scripts': [
'pigshare = pigshare.pigshare:run'
],
}
)
|
mlperf/training_results_v0.6 | Google/benchmarks/transformer/implementations/tpu-v3-512-transformer/transformer/data_generators/all_problems.py | Python | apache-2.0 | 1,542 | 0.01297 | """Imports for problem modules."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import importlib
import re
MODULES = [
"problem_hparams", # pylint: disable=line-too-long
"translate_ende", # pylint: disable=line-too-long
"translate_enfr", # pylint: disable=line-too-long
]
ALL_MODULES = list(MODULES)
def _is_import_err_msg(err_str, module):
module_pattern = "(.)?".join(["(%s) | ?" % | m for m in module.split(".")])
return re.match("^No module named (')?%s(')?$" % module_pattern, err_str)
def _handle_errors(errors):
"""Log out and possibly reraise errors during import."""
if not errors:
return
log_all = True # pylint: disable=unused-variable
err_msg = "Skipped importing {num_missing} data_generators modules."
# BEGIN GOOGLE-INTERNAL
err_msg += (" OK if no other errors. Depend on _heavy or problem-specific "
"py_binary targets if trying to use a module that was skipped.")
log_all = False
# END GOOGLE-INTERNAL
print(err_msg.format(num_missing=len(errors)))
for module, err in errors:
err_str = str(err)
if not _is_import_err_msg(err_str, module):
print("From module %s" % module)
raise err
if log_all:
print("Did not import module: %s; Cause: %s" % (module, err_str))
def import_modules(modules):
errors = []
for module in modules:
try:
importlib.import_module(module)
except ImportError as error:
errors.append((module, error))
_handle_errors(errors)
|
maldevel/PenTestKit | web/compare-post-data.py | Python | gpl-3.0 | 2,917 | 0.01337 | #!/usr/bin/python
# encoding: UTF-8
"""
This file is part of PenTestKit
Copyright (C) 2017-2018 @maldevel
https://github.com/maldevel/PenTestKit
PenTestKit - Useful tools for Penetration Testing.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
For more see the file 'LICENSE' for copying permission.
"""
__author__ = "maldevel"
__copyright__ = "Copyright (c) 2017-2018 @maldevel"
__credits__ = ["maldevel"]
__license__ = "GPLv3"
__version__ = "0.1"
__maintainer__ = "maldevel"
################################
import argparse
import sys
import urlparse
from argparse import RawTextHelpFormatter
################################
if __name__ == '__main__':
parser = argparse.ArgumentParser(formatter_cla | ss=RawTextHelpFormatter)
parser.add_argument("-i1", "--input1",
action="store",
metavar='POST_data',
dest='input1',
type=str,
default=None,
required=True,
| help='POST data to compare')
parser.add_argument("-i2", "--input2",
action="store",
metavar='POST_data',
dest='input2',
type=str,
default=None,
required=True,
help='POST data to compare')
if len(sys.argv) is 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
input1_params = urlparse.parse_qs(args.input1, True)
input1_params = set().union(input1_params.keys())
input2_params = urlparse.parse_qs(args.input2, True)
input2_params = set().union(input2_params.keys())
unique_params = input1_params.union(input2_params)
params1_not_params2 = list(input1_params - input2_params)
params2_not_params1 = list(input2_params - input1_params)
print
print "[+] Unique parameters"
print
print ', '.join(unique_params)
print
print
print "[+] Parameters in input1 and not in input2"
print
print ', '.join(params1_not_params2)
print
print "[+] Parameters in input2 and not in input1"
print
print ', '.join(params2_not_params1)
|
brentbaum/cs3240-labdemo | hello.py | Python | mit | 55 | 0 | from helper import greet | ing
gree | ting("hello world...")
|
sibson/dynoup | settings.py | Python | mit | 470 | 0 | import os
from datetime import timedelta
ROLLBAR_ACCESS_TOKEN = os.environ.get('ROLLBAR_ACCESS_TOKEN')
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL', 'postgres:///dynoup | ')
FERNET_SECRET = os.environ.get('FERNET_SECRET')
CELERY_BROKER_URL = os.environ.get('REDIS_URL', 'redis://')
CELERY_TASK_SERIALIZER = 'json'
CELERYBEAT_S | CHEDULE = {
'http-checks': {
'task': 'scaler.tasks.run_http_checks',
'schedule': timedelta(minutes=1),
}
}
|
hlieberman/ansible-modules-core | cloud/azure/azure_rm_publicipaddress.py | Python | gpl-3.0 | 10,272 | 0.002921 | #!/usr/bin/python
#
# Copyright (c) 2016 Matt Davis, <mdavis@ansible.com>
# Chris Houseknecht, <house@redhat.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = '''
---
module: azure_rm_publicipaddress
version_added: "2.1"
short_description: Manage Azure Public IP Addresses.
description:
- Create, update and delete a Public IP address. Allows setting and updating the address allocation method and
domain name label. Use the azure_rm_networkinterface module to associat | e a Public IP with a network interface.
options:
resource_group:
description:
- Name of resource group with which the Public IP is associated.
required: true
allocation_method:
description:
- Control whether the assigned Public IP remains permanently assigned to the object. If not
set to 'Static', the IP address my change | d anytime an associated virtual machine is power cycled.
choices:
- Dynamic
- Static
default: Dynamic
required: false
domain_name_label:
description:
- The customizable portion of the FQDN assigned to public IP address. This is an explicit setting. If
no value is provided, any existing value will be removed on an existing public IP.
aliases:
- domain_name_label
required: false
default: null
name:
description:
- Name of the Public IP.
required: true
state:
description:
- Assert the state of the Public IP. Use 'present' to create or update a and
'absent' to delete.
default: present
choices:
- absent
- present
required: false
location:
description:
- Valid azure location. Defaults to location of the resource group.
default: resource_group location
required: false
extends_documentation_fragment:
- azure
- azure_tags
author:
- "Chris Houseknecht (@chouseknecht)"
- "Matt Davis (@nitzmahone)"
'''
EXAMPLES = '''
- name: Create a public ip address
azure_rm_publicipaddress:
resource_group: testing
name: my_public_ip
allocation_method: Static
domain_name: foobar
- name: Delete public ip
azure_rm_publicipaddress:
resource_group: testing
name: my_public_ip
state: absent
'''
RETURN = '''
state:
description: Facts about the current state of the object.
returned: always
type: dict
sample:{
"dns_settings": {},
"etag": "W/\"a5e56955-12df-445a-bda4-dc129d22c12f\"",
"idle_timeout_in_minutes": 4,
"ip_address": "52.160.103.93",
"location": "westus",
"name": "publicip002",
"provisioning_state": "Succeeded",
"public_ip_allocation_method": "Static",
"tags": {},
"type": "Microsoft.Network/publicIPAddresses"
}
'''
from ansible.module_utils.basic import *
from ansible.module_utils.azure_rm_common import *
try:
from msrestazure.azure_exceptions import CloudError
from azure.mgmt.network.models import PublicIPAddress, PublicIPAddressDnsSettings
except ImportError:
# This is handled in azure_rm_common
pass
NAME_PATTERN = re.compile(r"^[a-z][a-z0-9-]{1,61}[a-z0-9]$")
def pip_to_dict(pip):
result = dict(
name=pip.name,
type=pip.type,
location=pip.location,
tags=pip.tags,
public_ip_allocation_method=pip.public_ip_allocation_method.value,
dns_settings=dict(),
ip_address=pip.ip_address,
idle_timeout_in_minutes=pip.idle_timeout_in_minutes,
provisioning_state=pip.provisioning_state,
etag=pip.etag
)
if pip.dns_settings:
result['dns_settings']['domain_name_label'] = pip.dns_settings.domain_name_label
result['dns_settings']['fqdn'] = pip.dns_settings.fqdn
result['dns_settings']['reverse_fqdn'] = pip.dns_settings.reverse_fqdn
return result
class AzureRMPublicIPAddress(AzureRMModuleBase):
def __init__(self):
self.module_arg_spec = dict(
resource_group=dict(type='str', required=True),
name=dict(type='str', required=True),
state=dict(type='str', default='present', choices=['present', 'absent']),
location=dict(type='str'),
allocation_method=dict(type='str', default='Dynamic', choices=['Dynamic', 'Static']),
domain_name=dict(type='str', aliases=['domain_name_label']),
)
self.resource_group = None
self.name = None
self.location = None
self.state = None
self.tags = None
self.allocation_method = None
self.domain_name = None
self.results = dict(
changed=False,
state=dict()
)
super(AzureRMPublicIPAddress, self).__init__(derived_arg_spec=self.module_arg_spec,
supports_check_mode=True)
def exec_module(self, **kwargs):
for key in self.module_arg_spec.keys() + ['tags']:
setattr(self, key, kwargs[key])
results = dict()
changed = False
pip = None
resource_group = self.get_resource_group(self.resource_group)
if not self.location:
# Set default location
self.location = resource_group.location
if not NAME_PATTERN.match(self.name):
self.fail("Parameter error: name must begin with a letter or number, end with a letter or number "
"and contain at least one number.")
try:
self.log("Fetch public ip {0}".format(self.name))
pip = self.network_client.public_ip_addresses.get(self.resource_group, self.name)
self.check_provisioning_state(pip, self.state)
self.log("PIP {0} exists".format(self.name))
if self.state == 'present':
results = pip_to_dict(pip)
if self.domain_name != results['dns_settings'].get('domain_name_label'):
self.log('CHANGED: domain_name_label')
changed = True
results['dns_settings']['domain_name_label'] =self.domain_name
if self.allocation_method != results['public_ip_allocation_method']:
self.log("CHANGED: allocation_method")
changed = True
results['public_ip_allocation_method'] = self.allocation_method
update_tags, results['tags'] = self.update_tags(results['tags'])
if update_tags:
changed = True
elif self.state == 'absent':
self.log("CHANGED: public ip {0} exists but requested state is 'absent'".format(self.name))
changed = True
except CloudError:
self.log('Public ip {0} does not exist'.format(self.name))
if self.state == 'present':
self.log("CHANGED: pip {0} does not exist but requested state is 'present'".format(self.name))
changed = True
self.results['state'] = results
self.results['changed'] = changed
if self.check_mode:
return results
if changed:
if self.state == 'present':
if not pip:
self.log("Create new Public IP {0}".format(self.name))
pip = PublicIPAddress(
|
opencivicdata/pupa | pupa/tests/importers/test_membership_importer.py | Python | bsd-3-clause | 8,414 | 0.00309 | import pytest
from pupa.scrape import Membership as ScrapeMembership
from pupa.scrape import Person as ScrapePerson
from pupa.importers import MembershipImporter, PersonImporter, OrganizationImporter
from pupa.exceptions import NoMembershipsError
from opencivicdata.core.models import Organization, Post, Person, Division, Jurisdiction
class DumbMockImporter(object):
""" this is a mock importer that implements a resolve_json_id that is just a pass-through """
json_to_db_id = {}
def resolve_json_id(self, json_id, allow_no_match=False):
return json_id
def create_jurisdiction():
Division.objects.create(id='ocd-division/country:us', name='USA')
Jurisdiction.objects.create(id='fnd-jid', division_id='ocd-division/country:us')
@pytest.mark.django_db
def test_full_membership():
create_jurisdiction()
org = Organization.objects.create(id="fnd", name="Foundation", classification="foundation",
jurisdiction_id="fnd-jid")
hari = Person.objects.create(id="hs", name="Hari Seldon")
robot = Person.objects.create(id="robot", name="R. Daneel Olivaw")
post = Post.objects.create(id='f', label="founder", role="Founder", organization=org)
# add a membership through a post, with a start date
m1 = ScrapeMembership(person_id=hari.id, organization_id=org.id,
post_id=post.id, start_date='2020-03-10', end_date='2021-06-30')
m1.add_contact_detail(type='phone', value='555-555-1234', note='this is fake')
m1.add_link('http://example.com/link')
# add a membership direct to an organization, with an end date
m2 = ScrapeMembership(person_id=robot.id, organization_id=org.id, label='member',
role='member', end_date='2019-11-09')
dumb_imp = DumbMockImporter()
memimp = MembershipImporter('fnd-jid', dumb_imp, dumb_imp, dumb_imp)
memimp.import_data([m1.as_dict(), m2.as_dict()])
# ensure that the memberships attached in the right places
assert org.memberships.count() == 2
assert hari.memberships.count() == 1
assert robot.memberships.count() == 1
assert post.memberships.count() == 1
# ensure that the first membership has contact details and links
m = hari.memberships.get()
cd = m.contact_details.get()
assert cd.type == 'phone'
assert cd.value == '555-555-1234'
assert cd.note == 'this is fake'
assert m.links.all()[0].url == 'http://example.com/link'
# update the imported memberships (i.e., change attributes that are not
# in the spec) and confirm they resolve correctly
memimp2 = MembershipImporter('fnd-jid', dumb_imp, dumb_imp, dumb_imp)
m1.end_date = '2022-03-10'
m2.extras = {'note': 'bleep blorp'}
import_log = memimp2.import_data([m1.as_dict(), m2.as_dict()])
assert import_log['m | embership']['insert'] == 0
assert import_log['membership']['update'] == 2
# confirm the membership res | olved based on start date and its end date was updated
assert hari.memberships.count() == 1
assert hari.memberships.get().end_date == '2022-03-10'
# confirm the membership resolved based on end date and its extras were updated
assert robot.memberships.count() == 1
assert robot.memberships.get().extras == {'note': 'bleep blorp'}
@pytest.mark.django_db
def test_no_membership_for_person():
create_jurisdiction()
Organization.objects.create(id="fnd", name="Foundation", classification="foundation",
jurisdiction_id="fnd-jid")
# import a person with no memberships
p = ScrapePerson('a man without a country')
person_imp = PersonImporter('fnd-jid')
person_imp.import_data([p.as_dict()])
# try to import a membership
dumb_imp = DumbMockImporter()
memimp = MembershipImporter('fnd-jid', person_imp, dumb_imp, dumb_imp)
with pytest.raises(NoMembershipsError):
memimp.import_data([])
@pytest.mark.django_db
def test_no_membership_for_person_including_party():
"""
even though party is specified we should still get a no memberships error because it doesn't
bind the person to a jurisdiction, thus causing duplication
"""
create_jurisdiction()
Organization.objects.create(id="fnd", name="Foundation", classification="foundation",
jurisdiction_id="fnd-jid")
Organization.objects.create(id="dem", name="Democratic", classification="party")
# import a person with no memberships
p = ScrapePerson('a man without a country', party='Democratic')
person_imp = PersonImporter('fnd-jid')
org_imp = OrganizationImporter('fnd-jid')
person_imp.import_data([p.as_dict()])
# try to import a membership
dumb_imp = DumbMockImporter()
memimp = MembershipImporter('fnd-jid', person_imp, org_imp, dumb_imp)
with pytest.raises(NoMembershipsError):
memimp.import_data([p._related[0].as_dict()])
@pytest.mark.django_db
def test_multiple_orgs_of_same_class():
"""
We should be able to set memberships on organizations with the
same classification within the same jurisdictions
"""
create_jurisdiction()
Organization.objects.create(id="fnd", name="Foundation", classification="foundation",
jurisdiction_id="fnd-jid")
Organization.objects.create(id="fdr", name="Federation", classification="foundation",
jurisdiction_id="fnd-jid")
hari = ScrapePerson('Hari Seldon',
primary_org='foundation',
role='founder',
primary_org_name='Foundation')
picard = ScrapePerson('Jean Luc Picard',
primary_org='foundation',
role='founder',
primary_org_name='Federation')
person_imp = PersonImporter('fnd-jid')
person_imp.import_data([hari.as_dict()])
person_imp.import_data([picard.as_dict()])
# try to import a membership
org_imp = OrganizationImporter('fnd-jid')
dumb_imp = DumbMockImporter()
memimp = MembershipImporter('fnd-jid', person_imp, org_imp, dumb_imp)
memimp.import_data([hari._related[0].as_dict(), picard._related[0].as_dict()])
assert Person.objects.get(name='Hari Seldon'
).memberships.get().organization.name == 'Foundation'
assert Person.objects.get(name='Jean Luc Picard'
).memberships.get().organization.name == 'Federation'
@pytest.mark.django_db
def test_multiple_posts_class():
create_jurisdiction()
org = Organization.objects.create(id="fnd", name="Foundation", classification="foundation",
jurisdiction_id="fnd-jid")
hari = Person.objects.create(id="hs", name="Hari Seldon")
founder = Post.objects.create(id='f', label="founder", role="Founder", organization=org)
chair = Post.objects.create(id='c', label="chair", role="Chair", organization=org)
m1 = ScrapeMembership(person_id=hari.id, organization_id=org.id, post_id=founder.id)
m2 = ScrapeMembership(person_id=hari.id, organization_id=org.id, post_id=chair.id)
dumb_imp = DumbMockImporter()
memimp = MembershipImporter('fnd-jid', dumb_imp, dumb_imp, dumb_imp)
memimp.import_data([m1.as_dict(), m2.as_dict()])
# ensure that the memberships attached in the right places
assert org.memberships.count() == 2
assert hari.memberships.count() == 2
assert founder.memberships.count() == 1
assert chair.memberships.count() == 1
@pytest.mark.django_db
def test_unmatched_person():
create_jurisdiction()
org = Organization.objects.create(id="fnd", name="Foundation", classification="foundation",
jurisdiction_id="fnd-jid")
# not a real person, won't have a person_id after import
m1 = ScrapeMembership(person_name='Harry Seldom', organization_id=org.id,
person_id=None
)
dumb_imp = DumbMockImporter()
memimp = MembershipImporter('fnd-jid', dumb_imp, dumb_imp, dumb_imp)
memimp.import_data([m1.as_dict()])
|
kennethreitz/pipenv | pipenv/vendor/dparse/updater.py | Python | mit | 4,566 | 0.001533 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
import re
import json
import tempfile
import toml
import os
class RequirementsTXTUpdater(object):
SUB_REGEX = r"^{}(?=\s*\r?\n?$)"
@classmethod
def update(cls, content, dependency, version, spec="==", hashes=()):
"""
Updates the requirement to the latest version for the given content and adds hashes
if neccessary.
:param content: str, content
:return: str, updated content
"""
new_line = "{name}{spec}{version}".format(name=dependency.full_name, spec=spec, version=version)
appendix = ''
# leave environment markers intact
if ";" in dependency.line:
# condense multiline, split out the env marker, strip comments and --hashes
new_line += ";" + dependency.line.splitlines()[0].split(";", 1)[1] \
.split("#")[0].split("--hash")[0].rstrip()
# add the comment
if "#" in dependency.line:
# split the line into parts: requirement and comment
parts = dependency.line.split("#")
requirement, comment = parts[0], "#".join(parts[1:])
# find all whitespaces between the requirement and the comment
whitespaces = (hex(ord('\t')), hex(ord(' ')))
trailing_whitespace = ''
for c in requirement[::-1]:
if hex(ord(c)) in whitespaces:
trailing_whitespace += c
else:
break
appendix += trailing_whitespace + "#" + comment
# if this is a hashed requirement, add a multiline break before the comment
if dependency.hashes and not new_line.endswith("\\"):
new_line += " \\"
# if this is a hashed requirement, add the hashes
if hashes:
for n, new_hash in enumerate(hashes):
new_line += "\n --hash={method}:{hash}".format(
method=new_hash['method'],
hash=new_hash['hash']
)
# append a new multiline break if this is not the last line
if len(hashes) > n + 1:
new_line += " \\"
new_line += appendix
regex = cls.SUB_REGEX.format(re.escape(dependency.line))
return re.sub(regex, new_line, content, flags=re.MULTILINE)
class CondaYMLUpdater(RequirementsTXTUpdater):
SUB_REGEX = r"{}(?=\s*\r?\n?$)"
class ToxINIUpdater(CondaYMLUpdater):
pass
class SetupCFGUpdater(CondaYMLUpdater):
pass
class PipfileUpdater(object):
@classmethod
def update(cls, content, dependency, version, spec="==", hashes=()):
data = toml.loads(content)
if data:
for package_type in ['packages', 'dev-packages']:
if package_type in data:
if dependency.full_name in data[package_type]:
data[package_type][dependency.full_name] = "{spec}{version}".format(
spec=spec, version=version
)
try:
from pipenv.project import Project
except ImportError:
raise ImportError("Updating a Pipfile requires the pipenv extra to be installed. Install it with "
"pip install dparse[pipenv]")
pipfile = tempfile.NamedTemporaryFile(delete=False)
p = Project(chdir=False)
p.write_toml(data=data, path=pipfile.name)
data = open(pipfile.name).read()
os.remove(pipfile.name)
return data
class PipfileLockUpdater(object):
@classmethod
def update(cls, content, dependency, version, spec="==", hashes=()):
data = json.loads(content)
if data: |
for package_type in ['default', 'develop']:
if package_type in data:
if dependency.full_name in data[package_type]:
data[package_type][dependency.full_name] = {
'hashes': [
"{method}:{hash}".format(
hash=h['hash'],
method=h['method']
| ) for h in hashes
],
'version': "{spec}{version}".format(
spec=spec, version=version
)
}
return json.dumps(data, indent=4, separators=(',', ': ')) + "\n"
|
ResolveWang/algrithm_qa | arrandmatrix/q17.py | Python | mit | 1,207 | 0.00095 | """
问题描述:给定一个矩阵matrix,其中的值有正、负和0,返回子矩阵的最大累加和.
例如,矩阵matrix为
-90 48 78
64 -40 64
-81 -7 66
其中,最大累加和的子矩阵为:
48 78
-40 64
-7 66
所以返回累加和209.
例如,matrix为:
-1 -1 -1
-1 2 2
-1 -1 -1
其中,最大累加和的子矩阵为:
2 2
所以返回累加和为4.
"""
import sys
from arrandmatrix.q16 import MaxSum
class MaxMat | rixSum:
@classmethod
def get_max_sum(cls, matrix):
if not matrix:
return 0
max_value = -sys.maxsize
for i in range(len(matrix)):
j = i
pre_arr = [0 for _ in range(len(matrix[0]))]
while j < len(matrix):
arr = cls.arr_add(matrix[j], pre_arr)
max_value = max([MaxSum.get_max_sum(arr), max_v | alue])
j += 1
pre_arr = arr
return max_value
@classmethod
def arr_add(cls, arr1, arr2):
return [arr1[i]+arr2[i] for i in range(len(arr1))]
if __name__ == '__main__':
my_matrix = [
[-90, 48, 78],
[64, -40, 64],
[-81, -7, 66]
]
print(MaxMatrixSum.get_max_sum(my_matrix)) |
alephobjects/Cura | Cura/util/resources.py | Python | agpl-3.0 | 9,176 | 0.029316 | #coding:utf8
"""
Helper module to get easy access to the path where resources are stored.
This is because the resource location is depended on the packaging method and OS
"""
__copyright__ = "Copyright (C) 2013 David Braam - Released under terms of the AGPLv3 License"
import os
import sys
import glob
import platform
import locale
import gettext
import profile
import ConfigParser as configparser
if sys.platform.startswith('darwin'):
try:
#Foundation import can crash on some MacOS installs
from Foundation import *
except:
pass
if sys.platform.startswith('darwin'):
if hasattr(sys, 'frozen'):
try:
resourceBasePath = NSBundle.mainBundle().resourcePath()
except:
resourceBasePath = os.path.join(os.path.dirname(__file__), "../../../../../")
else:
resourceBasePath = os.path.join(os.path.dirname(__file__), "../../resources")
else:
resourceBasePath = os.path.join(os.path.dirname(__file__), "../../resources")
def getPathForResource(dir, subdir, resource_name):
assert os.path.isdir(dir), "{p} is not a directory".format(p=dir)
path = os.path.normpath(os.path.join(dir, subdir, resource_name))
if not os.path.isfile(path):
return None
return path
def getPathForImage(name):
return getPathForResource(resourceBasePath, 'images', name)
def getPathForMesh(name):
return getPathForResource(resourceBasePath, 'meshes', name)
def getPathForFirmware(name):
return getPathForResource(resourceBasePath, 'firmware', name)
def getDefaultMachineProfiles():
path = os.path.normpath(os.path.join(resourceBasePath, 'machine_profiles', '*.ini'))
return glob.glob(path)
def setupLocalization(selectedLanguage = None):
#Default to english
languages = ['en']
if selectedLanguage is not None:
for item in getLanguageOptions():
if item[1] == selectedLanguage and item[0] is not None:
languages = [item[0]]
break
if languages[0] == 'AUTO':
languages = ['en']
defaultLocale = getDefaultLocale()
if defaultLocale is not None:
for item in getLanguageOptions():
if item[0] == 'AUTO':
continue
if item[0] is not None and defaultLocale.startswith(item[0]):
languages = [item[0]]
locale_path = os.path.normpath(os.path.join(resourceBasePath, 'locale'))
translation = gettext.translation('Cura', locale_path, languages, fallback=True)
#translation.ugettext = lambda message: u'#' + message
translation.install(unicode=True)
def getLanguageOptions():
return [
['AUTO', 'Autodetect'],
['en', 'English'],
['de', 'Deutsch'],
['fr', 'French'],
['tr', 'Turkish'],
['ru', 'Russian'],
# ['ko', 'Korean'],
# ['zh', 'Chinese'],
# ['nl', 'Nederlands'],
# ['es', 'Spanish'],
# ['po', 'Polish']
]
def getDefaultLocale():
defaultLocale = None
# On Windows, we look for the actual UI language, as someone could have
# an english windows but use a non-english locale.
if platform.system() == "Windows":
try:
import ctypes
windll = ctypes.windll.kernel32
defaultLocale = locale.windows_locale[windll.GetUserDefaultUILanguage()]
except:
pass
if defaultLocale is None:
try:
defaultLocale = locale.getdefaultlocale()[0]
except:
pass
return defaultLocale
class ProfileIni(object):
@staticmethod
def str2bool(str):
return False if str is None else str.lower() in ['true', 'yes', '1', 'y', 't']
def __init__(self, ini_file):
self.ini = ini_file
self.path = os.path.split(self.ini)[0]
self.base_name = os.path.splitext(os.path.basename(self.ini))[0]
if self.base_name == 'profile' or self.base_name == 'material':
self.base_name = os.path.basename(self.path)
# Name to show in the UI
self.name = self._getProfileInfo(ini_file, 'name')
if self.name is None:
self.name = self.base_name
self.full_name = self._getProfileInfo(ini_file, 'full_name')
if self.full_name is None:
self.full_name = self.name
# URL for the profile
self.url = self._getProfileInfo(ini_file, 'url')
# Finds the full path to the real profile_file
self.profile_file = self._findProfileFile()
# default = The default profile to select
self.default = self.str2bool(self._getProfileInfo(self.ini, 'default'))
# disabled = do not make available in the UI
self.disabled = self.str2bool(self._getProfileInfo(self.ini, 'disabled'))
# always_visible = Always display in the UI even if it's the only available option
if self._getProfileInfo(self.ini, 'always_visible') is None:
self.always_visible = True
else:
self.always_visible = self.str2bool(self._getProfileInfo(self.ini, 'always_visible'))
try:
self.order = int(self._getProfileInfo(self.ini, 'order'))
except:
self.order = 999
def _findProfileFile(self):
profile_file = self._getProfileInfo(self.ini, 'profile_file')
if profile_file is None:
return self.ini
else:
if os.path.exists(profile_file):
return profile_file
elif os.path.exists(os.path.join(self.path, profile_file)):
return os.path.join(self.path, profile_file)
else:
return self.ini
def _getProfileInfo(self, ini_file, key):
cp = configparser.ConfigParser()
cp.read(ini_file)
disabled = False
if cp.has_option('info', key):
return cp.get('info', key)
return None
def _isInList(self, list):
""" Check if an option with the same base name already exists in the list """
for ini in list:
if ini.base_name == self.base_name:
return True
return False
def getProfileDict(self):
profile_dict = {}
cp = configparser.ConfigParser()
cp.read(self.profile_file)
for setting in profile.settingsList:
section = 'profile' if setting.isProfile() else 'alterations'
if setting.isProfile() or setting.isAlteration():
if cp.has_option(section, setting.getName()):
profile_dict[setting.getName()] = cp.get(section, setting.getName())
return profile_dict
def __cmp__(self, cmp):
if self.order < cmp.order:
return -1
elif self.order > cmp.order:
return 1
else:
if self.name < cmp.name:
return -1
elif self.name == cmp.name:
return 0
else:
return 1
def __str__ (self):
return "%s%s: %d" % (self.name, "(disabled)" if self.disabled else "", self.order)
def __repr__ (self):
return str(self)
class PrintMaterial(ProfileIni):
def __init__(self, ini_file):
super(PrintMaterial, self).__init__(ini_file)
self.profiles = []
self.options = []
self.types = []
types = self._getProfileInfo(self.ini, 'material_types')
if types != None:
for type in types.split('|'):
self.types.append(type.strip())
# Comment for the profile
self.description = self._getProfileInfo(ini_file, 'description')
self.parseDirectory(self.path)
def parseDirectory(self, path):
profile_files = sorted(glob.glob(os.path.join(path, '*/profile.ini')))
if len(profile_files) > 0:
for profile_file in profile_files:
profile_ini = ProfileIni(profile_file)
if not profile_ini._isInList(self.profiles):
self.profiles.append(profile_ini)
option_files = sorted(glob.glob(os.path.join(path, 'option_*.ini')))
for option_file in option_files:
option = ProfileIni(option_file)
if not option._isInList(self.options):
self.options.append(option)
self.profiles.sort()
self.options.sort()
def addGlobalOptions(self, global_options):
for option in global_options:
if not option._isInList(self.options):
self.options.append(option)
self.options.sort()
def __str__ (self):
return "%s%s: %d - Profiles : %s - Options - %s\n" % (self.name, "(disabled)" if self.disabled else "",
self.order, self.profiles, self.options)
def al | phaAndExperimen | tal(item):
has_special = False
experimental_indicator = '*'
key = item.name
if key.startswith(experimental_indicator):
has_special = True
return has_special, key.lstrip(experimental_indicator).lower()
def getSimpleModeMaterials():
machine_type = profile.getMachineSetting('machine_type')
paths = []
paths.append(os.path.normpath(os.path.expanduser(os.path.join('~', '.Cura', 'quickprint', machine_type))))
paths.append(os.path.normpath(os.path.expanduser(os.path.join('~', '.Cura', 'quickprint'))))
paths.append(os.path.normpath(os.path.join(resourceBasePath, 'quickprint', machine_type)))
paths.append(os.path. |
andrewfu0325/gem5-aladdin | src/python/m5/main.py | Python | bsd-3-clause | 14,348 | 0.003694 | # Copyright (c) 2005 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Nathan Binkert
import code
import datetime
import os
import socket
import sys
__all__ = [ 'options', 'arguments', 'main' ]
usage="%prog [gem5 options] script.py [script options]"
version="%prog 2.0"
brief_copyright=\
"gem5 is copyrighted software; use the --copyright option for details."
def parse_options():
import config
from options import OptionParser
options = OptionParser(usage=usage, version=version,
description=brief_copyright)
option = options.add_option
group = options.set_group
# Help options
option('-B', "--build-info", action="store_true", default=False,
help="Show build information")
option('-C', "--copyright", action="store_true", default=False,
help="Show full copyright information")
option('-R', "--readme", action="store_true", default=False,
help="Show the readme")
# Options for configuring the base simulator
option('-d', "--outdir", metavar="DIR", default="m5out",
help="Set the output directory to DIR [Default: %default]")
option('-r', "--redirect-stdout", action="store_true", default=False,
help="Redirect stdout (& stderr, without -e) to file")
option('-e', "--redirect-stderr", action="store_true", default=False,
help="Redirect stderr to file")
option("--stdout-file", metavar="FILE", default="simout",
help="Filename for -r redirection [Default: %default]")
option("--stderr-file", metavar="FILE", default="simerr",
help="Filename for -e redirection [Default: %default]")
option('-i', "--interactive", action="store_true", default=False,
help="Invoke the interactive interpreter after running the script")
option("--pdb", action="store_true", default=False,
help="Invoke the python debugger before running the script")
option('-p', "--path", metavar="PATH[:PATH]", action='append', split=':',
help="Prepend PATH to the system path when invoking the script")
option('-q', "--quiet", action="count", default=0,
help="Reduce verbosity")
option('-v', "--verbose", action="count", default=0,
help="Increase verbosity")
# Statistics options
group("Statistics Options")
option("--stats-file", metavar="FILE", default="stats.txt",
help="Sets the output file for statistics [Default: %default]")
option("--stats-db-file", metavar="FILE", default="",
help = "Sets the output database file for statistics [Default: \
%default]")
# Configuration Options
group("Configuration Options")
option("--dump-config", metavar="FILE", default="config.ini",
help="Dump configuration output file [Default: %default]")
option("--json-config", metavar="FILE", default="config.json",
help="Create JSON output of the configuration [Default: %default]")
option("--dot-config", metavar="FILE", default="config.dot",
help="Create DOT & pdf outputs of the configuration [Default: %def | ault]")
# Debugging options
group("Debugging Options")
option("--debug-break", metavar="TIME[,TIME]", action='append', split=',',
help="Tick to create a brea | kpoint")
option("--debug-help", action='store_true',
help="Print help on debug flags")
option("--debug-flags", metavar="FLAG[,FLAG]", action='append', split=',',
help="Sets the flags for debug output (-FLAG disables a flag)")
option("--debug-start", metavar="TIME", type='int',
help="Start debug output at TIME (must be in ticks)")
option("--debug-file", metavar="FILE", default="cout",
help="Sets the output file for debug [Default: %default]")
option("--debug-ignore", metavar="EXPR", action='append', split=':',
help="Ignore EXPR sim objects")
option("--remote-gdb-port", type='int', default=7000,
help="Remote gdb base port (set to 0 to disable listening)")
# Help options
group("Help Options")
option("--list-sim-objects", action='store_true', default=False,
help="List all built-in SimObjects, their params and default values")
# load the options.py config file to allow people to set their own
# default options
options_file = config.get('options.py')
if options_file:
scope = { 'options' : options }
execfile(options_file, scope)
arguments = options.parse_args()
return options,arguments
def interact(scope):
banner = "gem5 Interactive Console"
ipshell = None
prompt_in1 = "gem5 \\#> "
prompt_out = "gem5 \\#: "
# Is IPython version 0.10 or earlier available?
try:
from IPython.Shell import IPShellEmbed
ipshell = IPShellEmbed(argv=["-prompt_in1", prompt_in1,
"-prompt_out", prompt_out],
banner=banner, user_ns=scope)
except ImportError:
pass
# Is IPython version 0.11 or later available?
if not ipshell:
try:
import IPython
from IPython.config.loader import Config
from IPython.frontend.terminal.embed import InteractiveShellEmbed
cfg = Config()
cfg.PromptManager.in_template = prompt_in1
cfg.PromptManager.out_template = prompt_out
ipshell = InteractiveShellEmbed(config=cfg, user_ns=scope,
banner1=banner)
except ImportError:
pass
if ipshell:
ipshell()
else:
# Use the Python shell in the standard library if IPython
# isn't available.
code.InteractiveConsole(scope).interact(banner)
def main(*args):
import m5
import core
import debug
import defines
import event
import info
import stats
import trace
from util import fatal, warn
if len(args) == 0:
options, arguments = parse_options()
elif len(args) == 2:
options, arguments = args
else:
raise TypeError, "main() takes 0 or 2 arguments (%d given)" % len(args)
m5.options = options
def check_tracing():
if defines.TRACING_ON:
return
fatal("Tracing is not enabled. Compile with TRACING_ON")
# Set the main event queue for the main thread.
event.mainq = event.getEventQueue(0)
event.setEventQueue(event.mainq)
if not os.path.isdir(options.outdir):
os.makedirs(options.outdir)
# These filenames are used only if the redirect_std* options are set
stdout_file = os.path.join(options.outdir, options.stdout_file)
stderr_file = os.path.join(options.outdir, options. |
jhogsett/linkit | python/mc_send.py | Python | mit | 1,096 | 0.000912 | #!/usr/bin/python
import socket
import struct
import sys
message = 'very important data'
multicast_group = ('224.3.29.71', 10000)
# Create the datagram socket
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Set a timeout so the socket does not block indefinitely when trying
# to receive data.
sock.settimeout(0.2)
# Set the time-to-live for messages to 1 so they do not go past the
# local network segment.
ttl = struct.pack('b', 1)
sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, ttl)
try:
# Send data to the multicast group
print >>sys.stderr, 'sending "%s"' % message
sent = sock.sendto(message, multicast_group)
# Look for responses from all recipients
while True:
print >>sys.st | derr, 'waiting to receive'
try:
data, server = sock.recvfrom(16)
except socket.timeout:
print >>sys.stderr, 'timed out, no more responses'
break
else:
print >>sys.stderr, 'received "%s" | from %s' % (data, server)
finally:
print >>sys.stderr, 'closing socket'
sock.close()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.