repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
jhen0409/electron | script/upload.py | Python | mit | 7,881 | 0.011166 | #!/usr/bin/env python
import argparse
import errno
import os
import subprocess
import sys
import tempfile
from lib.config import PLATFORM, get_target_arch, get_chromedriver_version, \
get_platform_key, get_env_var
from lib.util import electron_gyp, execute, get_electron_version, \
parse_version, scoped_cwd
from lib.github import GitHub
ELECTRON_REPO = 'electron/electron'
ELECTRON_VERSION = get_electron_version()
PROJECT_NAME = electron_gyp()['project_name%']
PRODUCT_NAME = electron_gyp()['product_name%']
SOURCE_ROOT = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
OUT_DIR = os.path.join(SOURCE_ROOT, 'out', 'R')
DIST_DIR = os.path.join(SOURCE_ROOT, 'dist')
DIST_NAME = '{0}-{1}-{2}-{3}.zip'.format(PROJECT_NAME,
ELECTRON_VERSION,
get_platform_key(),
get_target_arch())
SYMBOLS_NAME = '{0}-{1}-{2}-{3}-symbols.zip'.format(PROJECT_NAME,
ELECTRON_VERSION,
get_platform_key(),
get_target_arch())
DSYM_NAME = '{0}-{1}-{2}-{3}-dsym.zip'.format(PROJECT_NAME,
ELECTRON_VERSION,
get_platform_key(),
get_target_arch())
def main():
args = parse_args()
if not args.publish_release:
if not dist_newer_than_head():
create_dist = os.path.join(SOURCE_ROOT, 'script', 'create-dist.py')
execute([sys.executable, create_dist])
build_version = get_electron_build_version()
if not ELECTRON_VERSION.startswith(build_version):
error = 'Tag name ({0}) should match build version ({1})\n'.format(
ELECTRON_VERSION, build_version)
sys.stderr.write(error)
sys.stderr.flush()
return 1
github = GitHub(auth_token())
releases = github.repos(ELECTRON_REPO).releases.get()
tag_exists = False
for release in releases:
if not release['draft'] and release['tag_name'] == args.version:
tag_exists = True
break
release = create_or_get_release_draft(github, releases, args.version,
tag_exists)
if args.publish_release:
# Upload the SHASUMS.txt.
execute([sys.executable,
os.path.join(SOURCE_ROOT, 'script', 'upload-checksums.py'),
'-v', ELECTRON_VERSION])
# Upload the index.json.
execute([sys.executable,
os.path.join(SOURCE_ROOT, 'script', 'upload-index-json.py')])
# Press the publish button.
publish_release(github, release['id'])
# Do not upload other files when passed "-p".
return
# Upload Electron with GitHub Releases API.
upload_electron(github, release, os.path.join(DIST_DIR, DIST_NAME))
upload_electron(github, release, os.path.join(DIST_DIR, SYMBOLS_NAME))
if PLATFORM == 'darwin':
upload_electron(github, release, os.path.join(DIST_DIR, DSYM_NAME))
# Upload free version of ffmpeg.
ffmpeg = 'ffmpeg-{0}-{1}-{2}.zip'.format(
ELECTRON_VERSION, get_platform_key(), get_target_arch())
upload_electron(github, release, os.path.join(DIST_DIR, ffmpeg))
# Upload chromedriver and mksnapshot for minor version update.
if parse_version(args.version)[2] == '0':
chromedriver = 'chromedriver-{0}-{1}-{2}.zip'.format(
get | _chromedriver_version(), get_platform_key(), get_target_arch())
upload_electron(github, release, os.path.join(DIST_DIR, chromedriver))
mksnapshot = 'mksnapshot-{0}-{1}-{2}.zip'.format(
ELECTRON_VERSION, get_platform_key | (), get_target_arch())
upload_electron(github, release, os.path.join(DIST_DIR, mksnapshot))
if PLATFORM == 'win32' and not tag_exists:
# Upload PDBs to Windows symbol server.
execute([sys.executable,
os.path.join(SOURCE_ROOT, 'script', 'upload-windows-pdb.py')])
# Upload node headers.
execute([sys.executable,
os.path.join(SOURCE_ROOT, 'script', 'upload-node-headers.py'),
'-v', args.version])
def parse_args():
parser = argparse.ArgumentParser(description='upload distribution file')
parser.add_argument('-v', '--version', help='Specify the version',
default=ELECTRON_VERSION)
parser.add_argument('-p', '--publish-release',
help='Publish the release',
action='store_true')
return parser.parse_args()
def get_electron_build_version():
if get_target_arch() == 'arm' or os.environ.has_key('CI'):
# In CI we just build as told.
return ELECTRON_VERSION
if PLATFORM == 'darwin':
electron = os.path.join(SOURCE_ROOT, 'out', 'R',
'{0}.app'.format(PRODUCT_NAME), 'Contents',
'MacOS', PRODUCT_NAME)
elif PLATFORM == 'win32':
electron = os.path.join(SOURCE_ROOT, 'out', 'R',
'{0}.exe'.format(PROJECT_NAME))
else:
electron = os.path.join(SOURCE_ROOT, 'out', 'R', PROJECT_NAME)
return subprocess.check_output([electron, '--version']).strip()
def dist_newer_than_head():
with scoped_cwd(SOURCE_ROOT):
try:
head_time = subprocess.check_output(['git', 'log', '--pretty=format:%at',
'-n', '1']).strip()
dist_time = os.path.getmtime(os.path.join(DIST_DIR, DIST_NAME))
except OSError as e:
if e.errno != errno.ENOENT:
raise
return False
return dist_time > int(head_time)
def get_text_with_editor(name):
editor = os.environ.get('EDITOR', 'nano')
initial_message = '\n# Please enter the body of your release note for %s.' \
% name
t = tempfile.NamedTemporaryFile(suffix='.tmp', delete=False)
t.write(initial_message)
t.close()
subprocess.call([editor, t.name])
text = ''
for line in open(t.name, 'r'):
if len(line) == 0 or line[0] != '#':
text += line
os.unlink(t.name)
return text
def create_or_get_release_draft(github, releases, tag, tag_exists):
# Search for existing draft.
for release in releases:
if release['draft']:
return release
if tag_exists:
tag = 'do-not-publish-me'
return create_release_draft(github, tag)
def create_release_draft(github, tag):
name = '{0} {1}'.format(PROJECT_NAME, tag)
if os.environ.has_key('CI'):
body = '(placeholder)'
else:
body = get_text_with_editor(name)
if body == '':
sys.stderr.write('Quit due to empty release note.\n')
sys.exit(0)
data = dict(tag_name=tag, name=name, body=body, draft=True)
r = github.repos(ELECTRON_REPO).releases.post(data=data)
return r
def upload_electron(github, release, file_path):
# Delete the original file before uploading in CI.
if os.environ.has_key('CI'):
try:
for asset in release['assets']:
if asset['name'] == os.path.basename(file_path):
github.repos(ELECTRON_REPO).releases.assets(asset['id']).delete()
break
except Exception:
pass
# Upload the file.
params = {'name': os.path.basename(file_path)}
headers = {'Content-Type': 'application/zip'}
with open(file_path, 'rb') as f:
github.repos(ELECTRON_REPO).releases(release['id']).assets.post(
params=params, headers=headers, data=f, verify=False)
def publish_release(github, release_id):
data = dict(draft=False)
github.repos(ELECTRON_REPO).releases(release_id).patch(data=data)
def auth_token():
token = get_env_var('GITHUB_TOKEN')
message = ('Error: Please set the $ELECTRON_GITHUB_TOKEN '
'environment variable, which is your personal token')
assert token, message
return token
if __name__ == '__main__':
import sys
sys.exit(main())
|
rebost/django | django/contrib/gis/tests/layermap/tests.py | Python | bsd-3-clause | 12,784 | 0.003598 | from __future__ import absolute_import
import os
from copy import copy
from decimal import Decimal
from django.contrib.gis.gdal import DataSource
from django.contrib.gis.tests.utils import mysql
from django.contrib.gis.utils.layermapping import (LayerMapping, LayerMapError,
InvalidDecimal, MissingForeignKey)
from django.test import TestCase
from .models import (
City, County, CountyFeat, Interstate, ICity1, ICity2, Invalid, State,
city_mapping, co_mapping, cofeat_mapping, inter_mapping)
shp_path = os.path.realpath(os.path.join(os.path.dirname(__file__), os.pardir, 'data'))
city_shp = os.path.join(shp_path, 'cities', 'cities.shp')
co_shp = os.path.join(shp_path, 'counties', 'counties.shp')
inter_shp = os.path.join(shp_path, 'interstates', 'interstates.shp')
invalid_shp = os.path.join(shp_path, 'invalid', 'emptypoints.shp')
# Dictionaries to hold what's expected in the county shapefile.
NAMES = ['Bexar', 'Galveston', 'Harris', 'Honolulu', 'Pueblo']
NUMS = [1, 2, 1, 19, 1] # Number of polygons for each.
STATES = ['Texas', 'Texas', 'Texas', 'Hawaii', 'Colorado']
class LayerMapTest(TestCase):
def test_init(self):
"Testing LayerMapping initialization."
# Model field that does not exist.
bad1 = copy(city_mapping)
bad1['foobar'] = 'FooField'
# Shapefile field that does not exist.
bad2 = copy(city_mapping)
bad2['name'] = 'Nombre'
# Nonexistent geographic field type.
bad3 = copy(city_mapping)
bad3['point'] = 'CURVE'
# Incrementing through the bad mapping dictionaries and
# ensuring that a LayerMapError is raised.
for bad_map in (bad1, bad2, bad3):
with self.assertRaises(LayerMapError):
lm = LayerMapping(City, city_shp, bad_map)
# A LookupError should be thrown for bogus encodings.
with self.assertRaises(LookupError):
lm = LayerMapping(City, city_shp, city_mapping, encoding='foobar')
def test_simple_layermap(self):
"Test LayerMapping import of a simple point shapefile."
# Setting up for the LayerMapping.
lm = LayerMapping(City, city_shp, city_mapping)
lm.save()
# There should be three cities in the shape file.
self.assertEqual(3, City.objects.count())
# Opening up the shapefile, and verifying the values in each
# of the features made it to the model.
ds = DataSource(city_shp)
layer = ds[0]
for feat in layer:
city = City.objects.get(name=feat['Name'].value)
self.assertEqual(feat['Population'].value, city.population)
self.assertEqual(Decimal(str(feat['Density'])), city.density)
self.assertEqual(feat['Created'].value, city.dt)
# Comparing the geometries.
pnt1, pnt2 = feat.geom, city.point
self.assertAlmostEqual(pnt1.x, pnt2.x, 5)
self.assertAlmostEqual(pnt1.y, pnt2.y, 5)
def test_layermap_strict(self):
"Testing the `strict` keyword, and import of a LineString shapefile."
# When the `strict` keyword is set an error encountered will force
# the importation to stop.
with self.assertRaises(InvalidDecimal):
lm = LayerMapping(Interstate, inter_shp, inter_mapping)
lm.save(silent=True, strict=True)
Interstate.objects.all().delete()
# This LayerMapping should work b/c `strict` is not set.
lm = LayerMapping(Interstate, inter_shp, inter_mapping)
lm.save(silent=True)
# Two interstate should have imported correctly.
self.assertEqual(2, Interstate.objects.count())
# Verifying the values in the l | ayer w/the model.
ds = DataSource(inter_shp)
# Only the first two features of this shapefile are valid.
valid_feats = ds[0][:2]
for feat in valid_feats:
istate = Interstate.objects.get(name=feat['Name'].value)
if feat.fid == 0:
self.assertEqual(Decimal(str(feat['Length'])), istate.length)
elif feat.fid == 1:
# Everything but the first two decimal digits were truncat | ed,
# because the Interstate model's `length` field has decimal_places=2.
self.assertAlmostEqual(feat.get('Length'), float(istate.length), 2)
for p1, p2 in zip(feat.geom, istate.path):
self.assertAlmostEqual(p1[0], p2[0], 6)
self.assertAlmostEqual(p1[1], p2[1], 6)
def county_helper(self, county_feat=True):
"Helper function for ensuring the integrity of the mapped County models."
for name, n, st in zip(NAMES, NUMS, STATES):
# Should only be one record b/c of `unique` keyword.
c = County.objects.get(name=name)
self.assertEqual(n, len(c.mpoly))
self.assertEqual(st, c.state.name) # Checking ForeignKey mapping.
# Multiple records because `unique` was not set.
if county_feat:
qs = CountyFeat.objects.filter(name=name)
self.assertEqual(n, qs.count())
def test_layermap_unique_multigeometry_fk(self):
"Testing the `unique`, and `transform`, geometry collection conversion, and ForeignKey mappings."
# All the following should work.
try:
# Telling LayerMapping that we want no transformations performed on the data.
lm = LayerMapping(County, co_shp, co_mapping, transform=False)
# Specifying the source spatial reference system via the `source_srs` keyword.
lm = LayerMapping(County, co_shp, co_mapping, source_srs=4269)
lm = LayerMapping(County, co_shp, co_mapping, source_srs='NAD83')
# Unique may take tuple or string parameters.
for arg in ('name', ('name', 'mpoly')):
lm = LayerMapping(County, co_shp, co_mapping, transform=False, unique=arg)
except:
self.fail('No exception should be raised for proper use of keywords.')
# Testing invalid params for the `unique` keyword.
for e, arg in ((TypeError, 5.0), (ValueError, 'foobar'), (ValueError, ('name', 'mpolygon'))):
self.assertRaises(e, LayerMapping, County, co_shp, co_mapping, transform=False, unique=arg)
# No source reference system defined in the shapefile, should raise an error.
if not mysql:
self.assertRaises(LayerMapError, LayerMapping, County, co_shp, co_mapping)
# Passing in invalid ForeignKey mapping parameters -- must be a dictionary
# mapping for the model the ForeignKey points to.
bad_fk_map1 = copy(co_mapping); bad_fk_map1['state'] = 'name'
bad_fk_map2 = copy(co_mapping); bad_fk_map2['state'] = {'nombre' : 'State'}
self.assertRaises(TypeError, LayerMapping, County, co_shp, bad_fk_map1, transform=False)
self.assertRaises(LayerMapError, LayerMapping, County, co_shp, bad_fk_map2, transform=False)
# There exist no State models for the ForeignKey mapping to work -- should raise
# a MissingForeignKey exception (this error would be ignored if the `strict`
# keyword is not set).
lm = LayerMapping(County, co_shp, co_mapping, transform=False, unique='name')
self.assertRaises(MissingForeignKey, lm.save, silent=True, strict=True)
# Now creating the state models so the ForeignKey mapping may work.
State.objects.bulk_create([
State(name='Colorado'), State(name='Hawaii'), State(name='Texas')
])
# If a mapping is specified as a collection, all OGR fields that
# are not collections will be converted into them. For example,
# a Point column would be converted to MultiPoint. Other things being done
# w/the keyword args:
# `transform=False`: Specifies that no transform is to be done; this
# has the effect of ignoring the spatial reference check (because the
# county shapefile does not have implicit spatial reference info).
#
# `unique='name'`: Creates models on the condition th |
lbracken/news_data | pipeline/__init__.py | Python | mit | 175 | 0.022857 | # -*- codi | ng: utf-8 -*-
"""
news_data.pipeline
~~~~~~~~~~~~~~~~~~
news_data processing pipeline package
:license: MIT, see LIC | ENSE for more details.
""" |
cryos/tomviz | tomviz/python/deleteSlices.py | Python | bsd-3-clause | 918 | 0.001089 | def transform_scalars(dataset, firstSlice=None, lastSlice=None, axis=2):
"""Delete Slices in Dataset"""
from tomviz import utils
import numpy as np
# Get the current dataset.
array = utils.get_array(dataset)
# Get indices of the slices to be deleted.
indices = np.linspace(firstSlice, lastSlice,
lastSlice - firstSlice + 1).astype(int)
# Delete the spec | ified slices.
array = np.delete(array, indices, axis)
# Set the result as the new scalars.
utils.set_array(dataset, array)
# Delete corresponding tilt anlges if dataset is a tilt series.
if axis == 2:
try:
tilt_angles = utils.get_tilt_angles(dataset)
tilt_angles = np.de | lete(tilt_angles, indices)
utils.set_tilt_angles(dataset, tilt_angles)
except: # noqa
# TODO what exception are we ignoring here?
pass
|
deo1/deo1 | NaiveNet/NaiveNet.py | Python | mit | 5,324 | 0.019534 | # with reference to: https://www.amazon.com/Make-Your-Own-Neural-Network-ebook/dp/B01EER4Z4G
import numpy as np
from numpy import random as rand
import scipy.special
from time import sleep
def main():
print('\nTesting 3 bit binary encoding classification')
# (out: [onehot], in: [binary])
binary_encoding = [
([0,0,0], [1,0,0,0,0,0,0,0]),
([0,0,1], [0,1,0,0,0,0,0,0]),
([0,1,0], [0,0,1,0,0,0,0,0]),
([0,1,1], [0,0,0,1,0,0,0,0]),
([1,0,0], [0,0,0,0,1,0,0,0]),
([1,0,1], [0,0,0,0,0,1,0,0]),
([1,1,0], [0,0,0,0,0,0,1,0]),
([1,1,1], [0,0,0,0,0,0,0,1])]
# 8 integer input, 4 hidden layer nodes, 3 bit classification output
binary_net = NaiveNet(3, 50, 8)
# train
epochs = 50
eps = 0.01 # don't want 1's or 0's in the neural net inputs
max_value = 1 + 2*eps
sleep_time = 10 / epochs
evaluations = []
for epc in range(epochs):
training_iteration = 0
for val in binary_encoding:
inputs = (np.array(val[0]) + eps) / max_value
target_classification = (np.array(val[1]) + eps) / max_value
target_value = np.argmax(target_classification)
# process then optimize
input_layers = binary_net.query(inputs)
output_errors, outputs = binary_net.train(target_classification, input_layers)
training_iteration += 1
print("Epoch: {}\nIteration: {}".format(epc, training_iteration))
print("Input:{}\nEstimate: {}\nTarget:\n{}\nOutput:\n{}\nLoss:\n{}\n".format(
target_value, np.argmax(outputs), np.array(target_classification, ndmin=2).T, outputs, output_errors))
sleep(sleep_time)
if epc == epochs - 1:
if np.argmax(outputs) == target_value: match = 1
else: match = 0
evaluations.append(match)
print("Final Train Accuracy: {} out of {} ({}%)".format(sum(evaluations), len(evaluations), 100 * (sum(evaluations) / len(evaluations))))
class NaiveNet():
"""A naive 3 layer neural network without biases."""
def __init__(self, input_nodes, hidden_nodes, output_nodes, | learn_rate=0.3, initial_weights='gaussian', loss_function='squared'):
self.inodes = input_nodes
self.hnodes | = hidden_nodes
self.onodes = output_nodes
self.activation_function = lambda x: scipy.special.expit(x) # sigmoid / logistic
self.lr = learn_rate
self.__set_weights(initial_weights)
self.__set_loss_function(loss_function)
def __repr__(self):
members = [(k, str(v)) for k, v in vars(self).items() if not k.startswith('_')]
printable = [' {}: {}'.format(m[0], m[1]) for m in members]
return '{}{}{}'.format('NaiveNet(\n', '\n'.join(printable), '\n)')
def __set_weights(self, initial_weights):
if initial_weights == 'uniform':
self.wih = rand.rand(self.inodes, self.hnodes) - 0.5
self.who = rand.rand(self.hnodes, self.onodes) - 0.5
elif initial_weights == 'gaussian':
self.wih = rand.normal(0.0, pow(self.inodes, -0.5), [self.hnodes, self.inodes])
self.who = rand.normal(0.0, pow(self.hnodes, -0.5), [self.onodes, self.hnodes])
else:
raise RuntimeError('initial_weights: "{}" not supported.'.format(initial_weights))
def __set_loss_function(self, loss_function):
if loss_function == 'squared':
# written as a simple difference here, because the squared term is
# already accounted for in the differentiated weights update eq.
self.loss_function = lambda x, y: x - y
else:
raise RuntimeError('loss_function_type: "{}" not supported.'.format(loss_function))
def query(self, inputs):
"""Takes an input to the net and returns an output via forward computation"""
# convert inputs list to 2d array
inputs = np.array(inputs, ndmin=2).T
hidden_inputs = np.dot(self.wih, inputs)
hidden_outputs = self.activation_function(hidden_inputs)
final_inputs = np.dot(self.who, hidden_outputs)
final_outputs = self.activation_function(final_inputs)
return { 'i': inputs, 'hi': hidden_inputs, 'ho': hidden_outputs, 'fi': final_inputs, 'fo': final_outputs }
def train(self, targets, input_layers):
inputs = input_layers['i']
targets = np.array(targets, ndmin=2).T # convert targets list to 2d array
hidden_outputs = input_layers['ho']
final_outputs = input_layers['fo']
# apply the loss function to the output to get the final errors
output_errors = self.loss_function(targets, final_outputs)
# backpropogate the errors - split by weights per node then recombine
hidden_errors = np.dot(self.who.T, output_errors)
# update the output layer weights via gradient descent
self.who += self.lr * np.dot(output_errors * final_outputs * (1.0 - final_outputs), hidden_outputs.T)
# update the hidden layer weights via gradient descent
self.wih += self.lr * np.dot(hidden_errors * hidden_outputs * (1.0 - hidden_outputs), inputs.T)
return output_errors, final_outputs
if __name__ == '__main__':
main()
|
hzlf/openbroadcast | website/apps/profiles/migrations/0002_auto__del_field_profile_mobile_provider__add_field_profile_description.py | Python | gpl-3.0 | 8,152 | 0.007851 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'Profile.mobile_provider'
db.delete_column('user_profiles', 'mobile_provider_id')
# Adding field 'Profile.description'
db.add_column('user_profiles', 'description',
self.gf('lib.fields.extra.MarkdownTextField')(null=True, blank=True),
keep_default=False)
# Adding field 'Profile.description_html'
db.add_column('user_profiles', 'description_html',
self.gf('django.db.models.fields.TextField')(null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Adding field 'Profile.mobile_provider'
db.add_column('user_profiles', 'mobile_provider',
self.gf('django.db.models.fields.related.ForeignKey')(to=orm['profiles.MobileProvider'], null=True, blank=True),
keep_default=False)
# Deleting field 'Profile.description'
db.delete_column('user_profiles', 'description')
# Deleting field 'Profile.description_html'
db.delete_column('user_profiles', 'description_html')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'profiles.link': {
'Meta': {'object_name': 'Link', 'db_table': "'user_links'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'profile': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.Profile']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'profiles.mobileprovider': {
'Meta': {'object_name': 'MobileProvider', 'db_table': "'user_mobile_providers'"},
'domain': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '25'})
},
'profiles.profile': {
'Meta': {'object_name': 'Profile', 'db_table': "'user_profiles'"},
'address1': ( | 'django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'address2': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'birth_date': ('django.db | .models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'description': ('lib.fields.extra.MarkdownTextField', [], {'null': 'True', 'blank': 'True'}),
'description_html': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'gender': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mobile': ('django.contrib.localflavor.us.models.PhoneNumberField', [], {'max_length': '20', 'blank': 'True'}),
'mugshot': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'}),
'zip': ('django.db.models.fields.CharField', [], {'max_length': '10', 'blank': 'True'})
},
'profiles.service': {
'Meta': {'object_name': 'Service', 'db_table': "'user_services'"},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'profile': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.Profile']"}),
'service': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.ServiceType']"}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'profiles.servicetype': {
'Meta': {'object_name': 'ServiceType', 'db_table': "'user_service_types'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
}
}
complete_apps = ['profiles'] |
kwminnick/rackspace-dns-cli | dnsclient/openstack/common/setup.py | Python | apache-2.0 | 12,670 | 0.000316 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Utilities with minimum-depends for use in setup.py
"""
import datetime
import os
import re
import subprocess
import sys
from setuptools.command import sdist
def parse_mailmap(mailmap='.mailmap'):
mapping = {}
if os.path.exists(mailmap):
fp = open(mailmap, 'r')
for l in fp:
l = l.strip()
if not l.startswith('#') and ' ' in l:
canonical_email, alias = ['%s>' % x.strip() for x in
l.split('>')][:2]
mapping[alias] = canonical_email
return mapping
def canonicalize_emails(changelog, mapping):
"""Takes in a string and an email alias mapping and replaces all
instances of the aliases in the string with their real email.
"""
for alias, email in mapping.iteritems():
changelog = changelog.replace(alias, email)
return changelog
# Get requirements from the first file that exists
def get_reqs_from_files(requirements_files):
reqs_in = []
for requirements_file in requirements_files:
if os.path.exists(requirements_file):
return open(requirements_file, 'r').read().split('\n')
return []
def parse_requirements(requirements_files=['requirements.txt',
'tools/pip-requires']):
requirements = []
for line in get_reqs_from_files(requirements_files):
# For the requirements list, we need to inject only the portion
# after egg= so that distutils knows the package it's looking for
# such as:
# -e git://github.com/openstack/nova/master#egg=nova
if re.match(r'\s*-e\s+', line):
requirements.append(re.sub(r'\s*-e\s+.*#egg=(.*)$', r'\1',
line))
# such as:
# http://github.com/openstack/nova/zipball/master#egg=nova
elif re.match(r'\s*https?:', line):
requirements.append(re.sub(r'\s*https?:.*#egg=(.*)$', r'\1',
line))
# -f lines are for index locations, and don't get used here
elif re.match(r'\s*-f\s+', line):
pass
# argparse is part of the standard library starting with 2.7
# adding it to the requirements list screws distro installs
elif line == 'argparse' and sys.version_info >= (2, 7):
pass
else:
requirements.append(line)
return requirements
def parse_dependency_links(requirements_files=['requirements.txt',
'tools/pip-requires']):
dependency_links = []
# dependency_links inject alternate locations to find packages listed
# in requirements
for line in get_reqs_from_files(requirements_files):
# skip comments and blank lines
if re.match(r'(\s*#)|(\s*$)', line):
continue
# lines with -e or -f need the whole line, minus the flag
if re.match(r'\s*-[ef]\s+', line):
dependency_links.append(re.sub(r'\s*-[ef]\s+', '', line))
# lines that are only urls can go in unmolested
elif re.match(r'\s*https?:', line):
dependency_links.append(line)
return dependency_links
def write_requirements():
venv = os.environ.get('VIRTUAL_ENV', None)
if venv is not None:
with open("requirements.txt", "w") as req_file:
output = subprocess.Popen(["pip", "-E", venv, "freeze", "-l"],
stdout=subprocess.PIPE)
requirements = output.communicate()[0].strip()
req_file.write(requirements)
def _run_shell_command(cmd):
if os.name == 'nt':
output = subprocess.Popen(["cmd.exe", "/C", cmd],
stdout=subprocess.PIPE)
else:
output = subprocess.Popen(["/bin/sh", "-c", cmd],
stdout=subprocess.PIPE)
out = output.communicate()
if len(out) == 0:
return None
if len(out[0].strip()) == 0:
return None
return out[0].strip()
def _get_git_next_version_suffix(branch_name):
datestamp = datetime.datetime.now().strftime('%Y%m%d')
if branch_name == 'milestone-proposed':
revno_prefix = "r"
else:
revno_prefix = ""
_run_shell_command("git fetch origin +refs/meta/*:refs/remotes/meta/*")
milestone_cmd = "git show meta/openstack/release:%s" % branch_name
milestonever = _run_shell_command(milestone_cmd)
if not milestonever:
milestonever = ""
post_version = _get_git_post_version()
revno = post_version.split(".")[-1]
return "%s~%s.%s%s" % (milestonever, datestamp, revno_prefix, revno)
def _get_git_current_tag():
return _run_shell_command("git tag --contains HEAD")
def _get_git_tag_info():
return _run_shell_command("git describe --tags")
def _get_git_post_version():
current_tag = _get_git_current_tag()
if current_tag is not None:
return current_tag
else:
tag_info = _get_git_tag_info()
if tag_info is None:
base_version = "0.0"
cmd = "git --no-pager log --oneline"
out = _run_shell_command(cmd)
revno = len(out.split("\n"))
else:
tag_infos = tag_info.split("-")
base_version = "-".join(tag_infos[:-2])
revno = tag_infos[-2]
return "%s.%s" % (base_version, revno)
def write_git_changelog():
"""Write a changelog based on the git changelog."""
| if os.path.isdir('.git'):
git_log_cmd = 'git log --stat'
changelog = _run_shell_command(git_log_cmd)
mailmap = parse_mailmap()
with open("ChangeLog", "w") as changelog_file:
changelog_file.write(canonicalize_emails(changelog, mailmap))
def genera | te_authors():
"""Create AUTHORS file using git commits."""
jenkins_email = 'jenkins@review.openstack.org'
old_authors = 'AUTHORS.in'
new_authors = 'AUTHORS'
if os.path.isdir('.git'):
# don't include jenkins email address in AUTHORS file
git_log_cmd = ("git log --format='%aN <%aE>' | sort -u | "
"grep -v " + jenkins_email)
changelog = _run_shell_command(git_log_cmd)
mailmap = parse_mailmap()
with open(new_authors, 'w') as new_authors_fh:
new_authors_fh.write(canonicalize_emails(changelog, mailmap))
if os.path.exists(old_authors):
with open(old_authors, "r") as old_authors_fh:
new_authors_fh.write('\n' + old_authors_fh.read())
_rst_template = """%(heading)s
%(underline)s
.. automodule:: %(module)s
:members:
:undoc-members:
:show-inheritance:
"""
def read_versioninfo(project):
"""Read the versioninfo file. If it doesn't exist, we're in a github
zipball, and there's really know way to know what version we really
are, but that should be ok, because the utility of that should be
just about nil if this code path is in use in the first place."""
versioninfo_path = os.path.join(project, 'versioninfo')
if os.path.exists(versioninfo_path):
with open(versioninfo_path, 'r') as vinfo:
version = vinfo.read().strip()
else:
version = "0.0.0"
return version
def write_versioninfo(project, version):
"""Write a simple file containing the version of the package."""
open(os.path.join(project, 'versioninfo'), 'w').write("%s\n" % version)
def get_cmdclass():
"""Return dict of command |
dtudares/hello-world | yardstick/yardstick/ssh.py | Python | apache-2.0 | 9,436 | 0 | # Copyright 2013: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# yardstick comment: this is a modified copy of rally/rally/common/sshutils.py
"""High level ssh library.
Usage examples:
Execute command and get output:
ssh = sshclient.SSH("root", "example.com", port=33)
status, stdout, stderr = ssh.execute("ps ax")
if status:
raise Exception("Command failed with non-zero status.")
print stdout.splitlines()
Execute command with huge output:
class PseudoFile(object):
def write(chunk):
if "error" in chunk:
email_admin(chunk)
ssh = sshclient.SSH("root", "example.com")
ssh.run("tail -f /var/log/syslog", stdout=PseudoFile(), timeout=False)
Execute local script on remote side:
ssh = sshclient.SSH("user", "example.com")
status, out, err = ssh.execute("/bin/sh -s arg1 arg2",
stdin=open("~/myscript.sh", "r"))
Upload file:
ssh = sshclient.SSH("user", "example.com")
ssh.run("cat > ~/upload/file.gz", stdin=open("/store/file.gz", "rb"))
Eventlet:
eventlet.monkey_patch(select=True, time=True)
or
eventlet.monkey_patch()
or
sshclient = eventlet.import_patched("opentstack.common.sshclient")
"""
import select
import socket
import time
import paramiko
from scp import SCPClient
import six
import logging
LOG = logging.getLogger(__name__)
class SSHError(Exception):
pass
class SSHTimeout(SSHError):
pass
class SSH(object):
"""Represent ssh connection."""
def __init__(self, user, host, port=22, pkey=None,
key_filename=None, password=None):
"""Initialize SSH client.
:param user: ssh username
:param host: hostname or ip address of remote ssh server
:param port: remote ssh port
:param pkey: RSA or DSS private key string or file object
:param key_filename: private key filename
:param password: password
"""
self.user = user
self.host = host
self.port = port
self.pkey = self._get_pkey(pkey) if pkey else None
self.password = password
self.key_filename = key_filename
self._client = False
def _get_pkey(self, key):
if isinstance(key, six.string_types):
key = six.moves.StringIO(key)
errors = []
for key_class in (paramiko.rsakey.RSAKey, paramiko.dsskey.DSSKey):
try:
return key_class.from_private_key(key)
except paramiko.SSHException as e:
errors.append(e)
raise SSHError("Invalid pkey: %s" % (errors))
def _get_client(self):
if self._client:
return self._client
try:
self._client = paramiko.SSHClient()
self._client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
self._client.connect(self.host, username=self.user,
port=self.port, pkey=self.pkey,
key_filename=self.key_filename,
password=self.password,
allow_agent=False, look_for_keys=False,
timeout=1)
return self._client
except Exception as e:
message = ("Exception %(exception_type)s was raised "
"during connect. Exception value is: %(exception)r")
self._client = False
raise SSHError(message % {"exception": e,
"exception_type": type(e)})
def close(self):
self._client.close()
self._client = False
def run(self, cmd, stdin=None, stdout=None, stderr=None,
raise_on_error=True, timeout=3600):
"""Execute specified command on the server.
:param cmd: Command to be executed.
:param stdin: Open file or string to pass to stdin.
:param stdout: Open file to connect to stdout.
:param stderr: Open file to connect to stderr.
:param raise_on_error: If False then exit code will be return. If True
then exception will be raized if non-zero code.
:param timeout: Timeout in seconds for command execution.
Default 1 hour. No timeout if set to 0.
"""
client = self._get_client()
if isinstance(stdin, six.string_types):
stdin = six.moves.StringIO(stdin)
return self._run(client, cmd, stdin=stdin, stdout=stdout,
stderr=stderr, raise_on_error=raise_on_error,
timeout=timeout)
def _run(self, client, cmd, stdin=None, stdout=None, stderr=None,
raise_on_error=True, timeout=3600):
transport = client.get_transport()
session = transport.open_session()
session.exec_command(cmd)
start_time = time.time()
data_to_send = ""
stderr_data = None
# If we have data to be sent to stdin then `select' should also
# check for stdin availability.
if stdin and not stdin.closed:
writes = [session]
else:
writes = []
while True:
# Block until data can be read/write.
r, w, e = select.select([session], writes, [session], 1)
if session.recv_ready():
data = session.recv(4096)
LOG.debug("stdout: %r" % data)
if stdout is not None:
stdout.write(data)
continue
| if session.recv_stderr_ready():
stderr_data = session.recv_stderr(4096)
LOG.debug("stderr: %r" % stderr_data)
| if stderr is not None:
stderr.write(stderr_data)
continue
if session.send_ready():
if stdin is not None and not stdin.closed:
if not data_to_send:
data_to_send = stdin.read(4096)
if not data_to_send:
stdin.close()
session.shutdown_write()
writes = []
continue
sent_bytes = session.send(data_to_send)
# LOG.debug("sent: %s" % data_to_send[:sent_bytes])
data_to_send = data_to_send[sent_bytes:]
if session.exit_status_ready():
break
if timeout and (time.time() - timeout) > start_time:
args = {"cmd": cmd, "host": self.host}
raise SSHTimeout("Timeout executing command "
"'%(cmd)s' on host %(host)s" % args)
if e:
raise SSHError("Socket error.")
exit_status = session.recv_exit_status()
if 0 != exit_status and raise_on_error:
fmt = "Command '%(cmd)s' failed with exit_status %(status)d."
details = fmt % {"cmd": cmd, "status": exit_status}
if stderr_data:
details += " Last stderr data: '%s'." % stderr_data
raise SSHError(details)
return exit_status
def execute(self, cmd, stdin=None, timeout=3600):
"""Execute the specified command on the server.
:param cmd: Command to be executed.
:param stdin: Open file to be sent on process stdin.
:param timeout: Timeout for execution of the command.
:returns: tuple (exit_status, stdout, stderr) |
botswana-harvard/edc-pharma | edc_pharmacy/old/dispense/labels/dispense_label_context.py | Python | gpl-2.0 | 1,755 | 0 | from django.apps import apps as django_apps
edc_pharma_app_config = django_apps.get_app_config('edc_pharma')
edc_protocol_app_config = django_apps.get_app_config('edc_protocol')
class DispenseLabelContext:
"""Format dispense record into printable ZPL label context."""
def __init__(self, prescriptions=None, user=None):
self.prescriptions = prescriptions
self.user = user
def context(self, prescription=None):
# FIXME, request for print label from the ambition team.
subject_identifier = prescription.subject_identifier
category = prescription.category
result = prescription.recommanded_result if (
prescription.recommanded_result) else prescription.result
return {
'barcode_value': subject_identifier,
'site': edc_pharma_app_config.site_code,
'telephone_number': None,
'medication': prescription.description,
'clinician_initials': prescription.clinician_initials,
'number_of_tablets': None,
'times_per_day': None,
'total_number_of_tablets': f'{result} {category}',
'storage_instructions': None,
'prepared_datetime': p | rescription.modified.str | ftime(
'%Y-%m-%d'),
'subject_identifier': subject_identifier,
'prepared_by': prescription.user_modified,
'protocol': edc_protocol_app_config.protocol,
'initials': prescription.initials,
}
@property
def context_list(self):
context_list = []
for prescription in self.prescriptions:
context = self.context(prescription=prescription)
context_list.append(context)
return context_list
|
piotrgiedziun/university | secure_system_networks/lab2/scripts/scan_all.py | Python | mit | 835 | 0 | import nmap
from prettytable import PrettyTable
# scan network - dispaly all opened ports in given range
nm = nmap.PortScanner()
nm.scan('156.17.40.1-255', '22-443')
tab = PrettyTable(["IP address", "Protocol", "Port", "Product name",
"V | ersion", "Extra info"])
for host in nm.all_hosts():
for proto in nm[host].all_protocols():
lport = nm[host][proto].keys()
lport.sort()
for port in lport:
# incompatible with installed nmap versi | on
if not isinstance(port, int):
continue
item = nm[host][proto][port]
# skip closed
if not item['state'] == "open":
continue
tab.add_row([host, proto, port, item['product'], item['version'],
item['extrainfo']])
print tab
|
int-0/aftris | beatbox.py | Python | gpl-3.0 | 1,953 | 0.00256 | #!/usr/bin/env python
import pygame
from tools import singleton
@singleton
class Audio(object):
def __init__(self, initial_musics={}, initial_sounds={}):
if pygame.mixer.get_init() is None:
pygame.mixer.init()
self.__mute = False
self.__sounds = initial_sounds
self.__musics = initial_musics
def register_sound(self, sound_id, sound_object):
self.__sounds[sound_id] = sound_object
def register_music(self, music_id, music_object):
self.__musics[music_id] = music_object
def unregister_sound(self, sound_id):
if sound_id not in self.__sounds.keys():
return False
del(self.__sounds[sound_id])
def unregister_music(self, music_id):
if music_id not in self.__musics.keys():
return False
del(self.__musics[music_id])
@property
def sounds(self):
return self.__sounds.keys()
@property
def musics(self):
return self.__musics.keys()
@property
def is_muted(self):
return self.__mute
def mute(self):
if self.is_muted:
return
pygame.mixer.music.stop()
self.__mute = True
def unmute(self):
if not self.is_muted:
return
pygame.mixer.music.play(-1)
def set_mute(self, new_state=True):
if new_state:
self.mute()
else:
self.unmute()
def set_bgm_music(self, music_id):
if music_id not in self.musics:
return False
pygame.mixer.music.load(self.__musics[music_id])
if not self.is_muted:
pygame.mixer.music.play(-1)
return False
def play_sound(self, sound_id):
if self.is_mu | ted:
| return True
if sound_id not in self.sounds:
return False
self.__sounds[sound_id].play()
return True
# Create default instance
AUDIO = Audio()
|
arvindn05/osc-core | osc-server-bom/root/opt/vmidc/bin/vmidcShell.py | Python | apache-2.0 | 21,993 | 0.014959 | #!/usr/bin/python
# Copyright (c) Intel Corporation
# Copyright (c) 2017
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
vmidcCliShell.py
Basic command line interface for the vmiDC server. Offers basic command to configure the system.
Invoke the program then type
list
to see the list of commands implemented by the shell.
Uses the python class Cmd (which is part of the default distro) to implement the command line.
Cmd provides command line completition as well as help functionality. Read the online documentation
for specifics.
The ExtendedCmd class provides for chaining together Cmd objects so that multi-level commands
can be written. For example the following user command
set network ntp
is invoked by using two different ExtendedCmd objects, one for "set", one for "network".
"ntp" is handled by the "do_ntp" method of the SetNetworkPrompt(ExtendedCmd) class.
ExtendedCmd wires the completition and list functionality, see the documentation
for that class for specifics.
"""
import atexit
import getpass
import re
import os
import readline
import signal
import socket
import subprocess
import sys
import tempfile
from osc_pbkdf2_key import pbkdf2_operations,PBKDF2_KEY_INFO_VMIDC_FILE_PATH
from cmd import Cmd
VMIDCSERVICE="securityBroker"
VMIDCLOG="/opt/vmidc/bin/log/securityBroker.log"
IPREGEX="^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$"
IPCIDRREGEX="^(((?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)/(?:2[0-4]|1[0-9]|[0-9]))|dhcp)$"
#Modified Domain regex to accept null value(no arguments)
DOMAINREGEX="^$|^[^\s]+$"
HOSTNAMEREGEX="^[^\s]+$"
#MMDDhhmm[[CC]YY][.ss]]
TIMEREGEX="^(1[0-2]|0[1-9])(3[0-1]|[12][0-9]|0[1-9])(2[0-4]|1[0-9]|0[1-9])(60|[1-5][0-9]|0[1-9])(20[0-9][0-9])(\.60|[1-5][0-9]|0[1-9])?$"
def handler(signum, frame):
"""Ignore"""
def restart_service(servicename):
"""
Restart a service
servicename - the name of the service to restart
"""
subprocess.call(["/usr/bin/sudo", "/sbin/service", servicename, "restart"])
def stop_service(servicename):
"""
Stop a service
servicename - the name of the service to stop
"""
subprocess.call(["/usr/bin/sudo", "/sbin/service", servicename, "stop"])
def start_service(servicename):
"""
start a service
servicename - the name of the service to start
"""
subprocess.call(["/usr/bin/sudo", "/sbin/service", servicename, "start"])
def status_service(servicename):
"""
emit the status of a server
servicename - the name of the service to emit the status for
"""
subprocess.call(["/usr/bin/sudo", "/sbin/service", servicename, "status"])
def enable_service(servicename):
"""
enable a service to start on startup
servicename - the name of the service to start
"""
subprocess.call(["/usr/bin/sudo", "/sbin/chkconfig", "--add", servicename])
subprocess.call(["/usr/bin/sudo", "/sbin/chkconfig", "--level", "2345", servicename, "on"])
def disable_service(servicename):
"""
disable a service from starting on startup
servicename - the name of the service to stop
"""
subprocess.call(["/usr/bin/sudo", "/sbin/chkconfig", "--level", "2345", servicename, "off"])
subprocess.call(["/usr/bin/sudo", "/sbin/chkconfig", "--del", servicename])
def validate(lines, regex, errmsg):
""" Validate a series of lines vs a regex
lines - array of strings
regex - a regular expression to match against
errmsg - an error msg to emit if an error is found
should contain %s to print error string
returns None on failure, 1 on success
"""
for line in lines:
if not re.match(regex, line):
if errmsg:
sys.stdout.write(errmsg%(line) + "\n")
return None
return 1
def validate2(lines, regexs, errmsg):
""" Validate a series of lines vs multiple regex
lines - array of strings
regexs - an array of regular expression to match against
errmsg - an error msg to emit if an error is found
should contain %s to print error string
returns None on failure, 1 on success
"""
for line in lines:
match = None
for regex in regexs:
if re.match(regex, line):
match = 1
if not match:
if errmsg:
sys.stdout.write(errmsg%(line) + "\n")
return None
return 1
def | emit(lines):
""" Emit a series of lines to stdout """
for line in lines:
sys.stdout.write(line)
def collect(filename, regex = None, negregex = None, start = [], | end = []):
""" Collect the lines from a file into an array, filtering the results
filename - the filename to collect lines from
regex - collect lines that only match the given expresssion
negregex - exclude lines that match the given expression
start - additional elements at the start
end - additional elements at the end
"""
out = start
with open(filename) as infile:
for line in infile:
if not regex or re.search(regex, line):
if not negregex or not re.search(negregex, line):
out.append(line[:-1])
for line in end:
out.append(line)
return out
def replace(filename, lines):
"""Replaces a given file with an array of strings, while
retaining the permissions of the replaced file
filename - the file to replace
lines - the lines to replace with, newlines will be appended
"""
tmp = tempfile.NamedTemporaryFile(delete=False)
for line in lines:
tmp.write(line + "\n")
tmp.close()
#
# Preserve the premissions from the old file
#
subprocess.call(["/usr/bin/sudo", "/bin/chmod", "--reference=" + filename, tmp.name])
subprocess.call(["/usr/bin/sudo", "/bin/chown", "--reference=" + filename, tmp.name])
getfacl = subprocess.Popen(["/usr/bin/sudo", "/usr/bin/getfacl", "-p", filename], stdout=subprocess.PIPE)
setfacl = subprocess.Popen(["/usr/bin/sudo", "/usr/bin/setfacl", "--set-file=-", tmp.name], stdin=getfacl.stdout)
getfacl.wait()
setfacl.wait()
#
# Move the file
#
subprocess.call(["/usr/bin/sudo", "/bin/mv", tmp.name, filename])
def cat(filename):
"""Emits a given file to stdout
filename - the name of the file to emit
"""
with open(filename) as infile:
for line in infile:
sys.stdout.write(line)
def grep(regex, filename):
"""Emits the lines given file to stdout that match a regex
regex - regular express to match
filename - the name of the file to emit
"""
with open(filename) as infile:
for line in infile:
if re.search(regex, line):
sys.stdout.write(line)
def filter(regex, filename):
"""Emits parts of the lines given file to stdout that match a regex
Only emits the groups (e.g. "([0-9]{7])") of the regex
regex - regular express to match, should contain groups
filename - the name of the file to emit
"""
with open(filename) as infile:
for line in infile:
match = re.search(regex, line)
if match:
for m in match.groups():
sys.stdout.write(m + " ")
sys.stdout.write("\n")
class ExtendedCmd(Cmd):
"""The ExtendCmd class provides a sub-command extension to the python Cmd class.
For any method named sub_* a sub-command is enabled that allows for multiple
word commands. sub_* is must return a boject that has the onecmd and do_list
methods. Basically a do_* is added that will call the onecmd method
of the Cmd object and list_* is added that will call the do_list method
"""
def __init__(self):
"""Initialize a MyCmd object.
Searches |
hpcloud-mon/python-monasca-events | monasca_events/v2_0/__init__.py | Python | apache-2.0 | 679 | 0 | # Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# | Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# i | mplied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = ['Client']
from monasca_events.v2_0.client import Client
|
ShassAro/ShassAro | Bl_project/blVirtualEnv/lib/python2.7/site-packages/django/contrib/formtools/tests/wizard/wizardtests/tests.py | Python | gpl-2.0 | 18,372 | 0.000544 | from __future__ import unicode_literals
import copy
import os
from django import forms
from django.test import TestCase
from django.test.client import RequestFactory
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.auth.tests.utils import skipIfCustomUser
from django.contrib.formtools.wizard.views import CookieWizardView
from django.utils._os import upath
from django.contrib.formtools.tests.models import Poet, Poem
from .forms import temp_storage
# On Python 2, __file__ may end with .pyc
THIS_FILE = upath(__file__.rstrip("c"))
UPLOADED_FILE_NAME = 'test_tag_class.py'
class UserForm(forms.ModelForm):
class Meta:
model = User
fields = '__all__'
UserFormSet = forms.models.modelformset_factory(User, form=UserForm, extra=2)
PoemFormSet = forms.models.inlineformset_factory(Poet, Poem, fields="__all__")
class WizardTests(object):
urls = 'django.contrib.formtools.tests.wizard.wizardtests.urls'
def setUp(self):
self.testuser, created = User.objects.get_or_create(username='testuser1')
# Get new step data, since we modify it during the tests.
self.wizard_step_data = copy.deepcopy(self.wizard_step_data)
self.wizard_step_data[0]['form1-user'] = self.testuser.pk
def tearDown(self):
# Ensure that there are no files in the storage which could lead to false
# results in the next tests. Deleting the whole storage dir is not really
# an option since the storage is defined on the module level and can't be
# easily reinitialized. (FIXME: The tests here should use the view classes
# directly instead of the test client, then the storage issues would go
# away too.)
for file in temp_storage.listdir('')[1]:
temp_storage.delete(file)
def test_initial_call(self):
response = self.client.get(self.wizard_url)
wizard = response.context['wizard']
self.assertEqual(response.status_code, 200)
self.assertEqual(wizard['steps'].current, 'form1')
self.assertEqual(wizard['steps'].step0, 0)
self.assertEqual(wizard['steps'].step1, 1)
self.assertEqual(wizard['steps'].last, 'form4')
self.assertEqual(wizard['steps'].prev, None)
self.assertEqual(wizard['steps'].next, 'form2')
self.assertEqual(wizard['steps'].count, 4)
def test_form_post_error(self):
response = self.client.post(self.wizard_url, self.wizard_step_1_data)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form1')
self.assertEqual(response.context['wizard']['form'].errors,
{'name': ['This field is required.'],
'user': ['This field is required.']})
def test_form_post_success(self):
response = self.client.post(self.wizard_url, self.wizard_step_data[0])
wizard = response.context['wizard']
self.assertEqual(response.status_code, 200)
self.assertEqual(wizard['steps'].current, 'form2')
self.assertEqual(wizard['steps'].step0, 1)
self.assertEqual(wizard['steps'].prev, 'form1')
self.assertEqual(wizard['steps'].next, 'form3')
def test_form_stepback(self):
response = self.client.get(self.wizard_url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form1')
response = self.client.post(self.wizard_url, self.wizard_step_data[0])
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form2')
response = self.client.post(self.wizard_url, {
'wizard_goto_step': response.context['wizard']['steps'].prev})
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form1')
def test_template_context(self):
response = self.client.get(self.wizard_url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form1')
self.assertEqual(response.context.get('another_var', None), None)
response = self.client.post(self.wizard_url, self.wizard_step_data[0])
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form2')
self.assertEqual(response.context.get('another_var', None), True)
# ticket #19025: `form` should be included in context
form = response.context_data['wizard']['form']
self.assertEqual(response.context_data['form'], form)
def test_form_finish(self):
response = self.client.get(self.wizard_url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form1')
response = self.client.post(self.wizard_url, self.wizard_step_data[0])
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form2')
post_data = self.wizard_step_data[1]
with open(upath(THIS_FILE), 'rb') as post_file:
post_data['form2-file1'] = post_file
response = self.client.post(self.wizard_url, post_data)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form3')
# Check that the file got uploaded properly.
with open(THIS_FILE, 'rb') as f, temp_storage.open(UPLOADED_FILE_NAME) as f2:
self.assertEqual(f.read(), f2.read())
response = self.client.post(self.wizard_url, self.wizard_step_data[2])
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form4')
response = self.client.post(self.wizard_url, self.wizard_step_data[3])
self.assertEqual(response.status_code, 200)
# After the wizard is done no files should exist anymore.
self.assertFalse(temp_storage.exists(UPLOADED_FILE_NAME))
all_data = response.context['form_list']
del all_data[1]['file1']
self.assertEqual(all_data, [
{'name': 'Pony', 'thirsty': True, 'user': self.testuser},
{'address1': '123 Main St', 'address2': 'Djangoland'},
{'random_crap': 'blah blah'},
[{'random_crap': 'blah blah'},
| {'random_crap': 'blah blah'}]])
def test_cleaned_data(self):
response = self.client.get(self.wizard_url)
self.assertEqual(response.status_code, 200)
response = self.client.post(self.wizard_url, self.wizard_step_data[0])
self.assertEqual(response.status_code, 200)
post_data = self.wizard_step_data[1]
wit | h open(THIS_FILE, 'rb') as post_file:
post_data['form2-file1'] = post_file
response = self.client.post(self.wizard_url, post_data)
self.assertEqual(response.status_code, 200)
self.assertTrue(temp_storage.exists(UPLOADED_FILE_NAME))
response = self.client.post(self.wizard_url, self.wizard_step_data[2])
self.assertEqual(response.status_code, 200)
response = self.client.post(self.wizard_url, self.wizard_step_data[3])
self.assertEqual(response.status_code, 200)
all_data = response.context['all_cleaned_data']
self.assertEqual(all_data['file1'].name, UPLOADED_FILE_NAME)
self.assertTrue(all_data['file1'].closed)
self.assertFalse(temp_storage.exists(UPLOADED_FILE_NAME))
del all_data['file1']
self.assertEqual(all_data, {
'name': 'Pony', 'thirsty': True, 'user': self.testuser,
'address1': '123 Main St', 'address2': 'Djangoland',
'random_crap': 'blah blah', 'formset-form4': [
{'random_crap': 'blah blah'},
{'random_crap': 'blah blah'}]})
def test_manipulated_data(self):
response = self.client.get(self.wizard_url)
self.assertEqual(response.status_code, 200)
response = self.client.po |
ActiveState/code | recipes/Python/138889_extract_email_addresses/recipe-138889.py | Python | mit | 743 | 0.012113 | def grab_email(files = []):
# if passed a list of text files, will return a list of
# email addresses found in the files, matched according to
# | basic address conventions. Note: supports most possible
# names, but not all valid ones.
found = []
if files != None:
mailsrch = re.compile(r'[\w\-][\w\-\.]+@[\w\-][\w\-\.]+[a-zA-Z]{1,4}')
for file in files:
for line in open(file,'r'):
found.extend(mailsrch.findall(line))
# remove duplicate elements
# borrowed from Tim Peters' algorithm on ASPN Cookbook
| u = {}
for item in found:
u[item] = 1
# return list of unique email addresses
return u.keys()
|
bram85/topydo | test/test_add_command.py | Python | gpl-3.0 | 16,738 | 0.000597 | # Topydo - A todo.txt client written in Python.
# Copyright (C) 2014 - 2015 Bram Schoenmakers <bram@topydo.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest
from datetime import date
from io import StringIO
from topydo.commands import AddCommand, ListCommand
from topydo.lib import TodoList
from topydo.lib.Config import config
from .command_testcase import CommandTest
# We're searching for 'mock'
# pylint: disable=no-name-in-module
try:
from unittest import mock
except ImportError:
import mock
class AddCommandTest(CommandTest):
def setUp(self):
super().setUp()
self.todolist = TodoList.TodoList([])
self.today = date.today().isoformat()
def test_add_task(self):
args = ["New todo"]
command = AddCommand.AddCommand(args, self.todolist, self.out,
self.error)
command.execute()
self.assertEqual(self.todolist.todo(1).source(),
self.today + " New todo")
self.assertEqual(self.errors, "")
def test_add_multiple_args(self):
args = ["New", "todo"]
command = AddCommand.AddCommand(args, self.todolist, self.out,
self.error)
command.execute()
self.assertEqual(self.todolist.todo(1).source(),
self.today + " New todo")
self.assertEqual(self.errors, "")
def test_add_priority1(self):
command = AddCommand.AddCommand(["Foo (C)"], self.todolist, self.out,
self.error)
command.execute()
self.assertEqual(self.todolist.todo(1).priority(), 'C')
self.assertEqual(self.todolist.todo(1).source(),
"(C) " + self.today + " Foo")
self.assertEqual(self.errors, "")
def test_add_priority2(self):
command = AddCommand.AddCommand(["Foo (CC)"], self.todolist, self.out,
self.error)
command.execute()
self.assertEqual(self.todolist.todo(1).priority(), None)
self.assertEqual(self.todolist.todo(1).source(),
self.today + " Foo (CC)")
self.assertEqual(self.errors, "")
def test_add_priority3(self):
command = AddCommand.AddCommand(["Fo(C)o"], self.todolist, self.out,
self.error)
command.execute()
self.assertEqual(self.todolist.todo(1).priority(), None)
self.assertEqual(self.todolist.todo(1).source(),
self.today + " Fo(C)o")
self.assertEqual(self.errors, "")
def test_add_priority4(self):
command = AddCommand.AddCommand(["(C) Foo"], self.todolist, self.out,
self.error)
command.execute()
self.assertEqual(self.todolist.todo(1).priority(), 'C')
self.assertEqual(self.todolist.todo(1).source(),
"(C) " + self.today + " Foo")
self.assertEqual(self.errors, "")
def test_add_dep01(self):
command = AddCommand.AddCommand(["Foo"], self.todolist, self.out,
self.error)
command.execute()
command = AddCommand.AddCommand(["Bar before:1"], self.todolist,
self.out, self.error)
command.execute()
self.assertEqual(self.todolist.todo(1).source(),
self.today + " Foo id:1")
self.assertEqual(self.todolist.todo(2).source(),
self.today + " Bar p:1")
self.assertEqual(self.errors, "")
def test_add_dep02(self):
command = AddCommand.AddCommand(["Foo"], self.todolist | , self.out,
self.error)
command.execute()
command = AddCommand.AddCommand(["Bar partof:1"], self.todolist)
command.execute()
self.assertEqual(self.todolist.todo(1).source(),
self.today + " Foo id:1")
self.assertEqual(self.todolist.todo(2).sourc | e(),
self.today + " Bar p:1")
self.assertEqual(self.errors, "")
def test_add_dep03(self):
command = AddCommand.AddCommand(["Foo"], self.todolist)
command.execute()
command = AddCommand.AddCommand(["Bar after:1"], self.todolist,
self.out, self.error)
command.execute()
self.assertEqual(self.todolist.todo(1).source(),
self.today + " Foo p:1")
self.assertEqual(self.todolist.todo(2).source(),
self.today + " Bar id:1")
self.assertEqual(self.errors, "")
def test_add_dep04(self):
""" Test for using an after: tag with non-existing value. """
command = AddCommand.AddCommand(["Foo after:1"], self.todolist,
self.out, self.error)
command.execute()
self.assertFalse(self.todolist.todo(1).has_tag("after"))
self.assertEqual(self.todolist.todo(1).source(), self.today + " Foo")
self.assertEqual(self.output,
"| 1| " + self.todolist.todo(1).source() + "\n")
self.assertEqual(self.errors, "")
def test_add_dep05(self):
""" Test for using an after: tag with non-existing value. """
command = AddCommand.AddCommand(["Foo after:2"], self.todolist,
self.out, self.error)
command.execute()
self.assertFalse(self.todolist.todo(1).has_tag("after"))
self.assertEqual(self.todolist.todo(1).source(), self.today + " Foo")
self.assertEqual(self.output,
"| 1| " + self.todolist.todo(1).source() + "\n")
self.assertEqual(self.errors, "")
def test_add_dep06(self):
command = AddCommand.AddCommand(["Foo"], self.todolist, self.out,
self.error)
command.execute()
command = AddCommand.AddCommand(["Bar"], self.todolist, self.out,
self.error)
command.execute()
command = AddCommand.AddCommand(["Baz before:1 before:2"],
self.todolist, self.out, self.error)
command.execute()
self.assertEqual(self.todolist.todo(1).source(),
self.today + " Foo id:1")
self.assertEqual(self.todolist.todo(2).source(),
self.today + " Bar id:2")
self.assertEqual(self.todolist.todo(3).source(),
self.today + " Baz p:1 p:2")
self.assertEqual(self.errors, "")
def test_add_dep07(self):
command = AddCommand.AddCommand(["Foo"], self.todolist, self.out,
self.error)
command.execute()
command = AddCommand.AddCommand(["Bar"], self.todolist, self.out,
self.error)
command.execute()
command = AddCommand.AddCommand(["Baz after:1 after:2"], self.todolist,
self.out, self.error)
command.execute()
self.assertEqual(self.todolist.todo(1).source(),
self.today + " Foo p:1")
self.assertEqual(self.todolist.todo(2).source(),
self.today + " Bar p:1")
self.assertEqual(self.todolist.todo(3).source(),
self.today + " Baz id:1")
self.assertE |
Berulacks/ethosgame | ethos/levels/level0.py | Python | gpl-2.0 | 2,904 | 0.020317 | import sys,os
#sys.path.append(os.path.join(os.path.dirname(__file__), '../../..'))
#from ethosgame.ethos.level import Level
from ..level import Level
#from ethosgame.ethos.gameobject import GameObject
from ..gameobject import GameObject
#from ethosgame.ethos.drawnobject import DrawnObje | ct
from ..drawnobject import DrawnObject
import pygame
from pygame.locals import *
from pygame import Color, image, font, sprite
class Level0(Level):
def __init__(self):
| super(Level0, self).__init__()
self.activeSprites = sprite.RenderClear()
self.drawnSprites = []
self.npc = GameObject(image.load('User.png'), 100,50)
self.activeSprites.add(self.npc)
self.block1 = GameObject(image.load('platform.png'), 100, 400)
self.activeSprites.add(self.block1);
self.mousex = 0
self.mousey = 0
#The highest height our npc
#can climb. If a the dY with a
#point is higher than this, the
#npc will just fall to his death
self.MAX_HILL_HEIGHT = 3
self.toDrawRectTopLeft = (0,0)
self.toDrawRectBottomRight = (0,0)
self.drawing = False
self.pts = []
print "Level 0 initialized."
def update(self, dT):
#print "Running level0"
#Character info
for gobject in self.activeSprites:
if gobject is not self.npc:
if not gobject.rect.colliderect(self.npc.rect):
#if self.npc.vy < 0.3 and (gobject.rect.y >= self.npc.rect.y + self.npc.rect.height):
if self.npc.vy < 0.3:
self.npc.vy += 0.1
else:
self.npc.vy = 0
gobject.update(dT)
collidingPoints = []
for drawnstuff in self.drawnSprites:
for point in drawnstuff.pts:
x = self.npc.rect.collidepoint(point)
if x:
collidingPoints.append(point)
if(len(collidingPoints) > 0):
self.npc.processPointCollision(collidingPoints)
def processKeyDown(self,key):
print "You hit the key " + str(key) + "!"
if key == pygame.K_RIGHT:
self.npc.vx = 0.1
def processMouseMotion(self,pos):
#print "Your mouse is at " + str(pos[0]) + " " + str(pos[1])
self.mousex = pos[0]
self.mousey = pos[1]
if self.drawing and len(self.pts) < 100:
self.pts.append( pos )
def processMouseButtonDown(self, pos):
print "Ya clicked at " + str(pos[0]) + " " + str(pos[1]) + " ya goof!"
self.drawing = True
self.toDrawRectTopLeft = (pos[0],pos[1])
if len(self.pts) > 0:
self.pts = []
def processMouseButtonUp(self, pos):
print "Ya let go"
if self.drawing is True:
self.drawing = False
self.drawnSprites.append ( DrawnObject(self.pts) )
self.toDrawRectBottomRight = (pos[0], pos[1])
|
RPGOne/Skynet | version_requirements.py | Python | bsd-3-clause | 4,443 | 0.000225 | from distutils.version import LooseVersion
import functools
import re
import sys
def _check_version(actver, version, cmp_op):
"""
Check version string of an active module against a required version.
If dev/prerelease tags result in TypeError for string-number comparison,
it is assumed that the dependency is satisfied.
Users on dev branches are responsible for keeping their own packages up to
date.
Copyright (C) 2013 The IPython Development Team
Distributed under the terms of the BSD License.
"""
try:
if cmp_op == '>':
return LooseVersion(actver) > LooseVersion(version)
elif cmp_op == '>=':
return LooseVersion(actver) >= LooseVersion(version)
elif cmp_op == '=':
return LooseVersion(actver) == LooseVersion(version)
elif cmp_op == '<':
return LooseVersion(actver) < LooseVersion(version)
else:
return False
except TypeError:
return True
def get_module_version(module_name):
"""Return module version or None if version can't be retrieved."""
mod = __import__(module_name,
fromlist=[module_name.rpartition('.')[-1]])
return getattr(mod, '__version__', getattr(mod, 'VERSION', None))
def is_installed(name, version=None):
"""Test if *name* is installed.
Parameters
----------
name : str
Name of module or "python"
version : str, optional
Version string to test against.
If version is not None, checking version
(must have an attribute named '__version__' or 'VERSION')
Version may start with =, >=, > or < to specify the exact requirement
Returns
-------
out : bool
True if `name` is installed matching the optional version.
Notes
-----
Original Copyright (C) 2009-2011 Pierre Raybaut
Licensed under the terms of the MIT License.
"""
if name.lower() == 'python':
actver = sys.version[:6]
else:
try:
actver = get_module_version(name)
except ImportError:
return False
if version is None:
return True
else:
match = re.search('[0-9]', version)
assert match is not None, "Invalid version number"
symb = version[:match.start()]
if not symb:
symb = '='
assert symb in ('>=', '>', '=', '<'),\
"Invalid version condition '%s'" % symb
version = version[match.start():]
return _check_version(actver, version, symb)
def require(name, version=None):
"""Return decorator that forces a requirement for a function or class.
Parameters
----------
name : str
Name of module or "python".
version : str, optional
Version string to test against.
If version is not None, checking version
(must have an attribute named '__version | __' or 'VERSION')
Version may start with =, >=, > or < to specify the exact requirement
Returns
-------
func : function
A decorator that raises an ImportError if a function is run
in the absence of the input dependency.
"""
def decorator(obj):
@functools.wraps(obj)
def func_wrapped(*args, **kwargs):
if is_installed(name, ver | sion):
return obj(*args, **kwargs)
else:
msg = '"%s" in "%s" requires "%s'
msg = msg % (obj, obj.__module__, name)
if not version is None:
msg += " %s" % version
raise ImportError(msg + '"')
return func_wrapped
return decorator
def get_module(module_name, version=None):
"""Return a module object of name *module_name* if installed.
Parameters
----------
module_name : str
Name of module.
version : str, optional
Version string to test against.
If version is not None, checking version
(must have an attribute named '__version__' or 'VERSION')
Version may start with =, >=, > or < to specify the exact requirement
Returns
-------
mod : module or None
Module if *module_name* is installed matching the optional version
or None otherwise.
"""
if not is_installed(module_name, version):
return None
return __import__(module_name,
fromlist=[module_name.rpartition('.')[-1]])
|
CENDARI/dblookup | fabfile.py | Python | mit | 2,134 | 0.00328 | from fabric.api import env, local, lcd
from fabric.colors import red, green
from fabric.decorators import task, runs_once
from fabric.operations import prompt
from fabric.utils import abort
from zipfile import ZipFile
import datetime
import fileinput
import importlib
i | mport os
import random
import re
import subprocess
import sys
import time
PROJ_ROOT = os.pa | th.dirname(env.real_fabfile)
env.project_name = 'dblookup'
env.python = 'python' if 'VIRTUAL_ENV' in os.environ else './bin/python'
@task
def setup():
"""
Set up a local development environment
This command must be run with Fabric installed globally (not inside a
virtual environment)
"""
if os.getenv('VIRTUAL_ENV') or hasattr(sys, 'real_prefix'):
abort(red('Deactivate any virtual environments before continuing.'))
make_virtual_env()
print ('\nDevelopment environment successfully created.')
@task
def download_dbpedia():
"Download files from dbpedia"
with lcd(PROJ_ROOT):
local('if [ ! -d dbpedia-2015 ]; then mkdir dbpedia-2015; fi')
server = 'http://downloads.dbpedia.org/2015-04'
#server = 'http://data.dws.informatik.uni-mannheim.de/dbpedia/2014/en/'
files = [
'dbpedia_2015-04.nt.bz2',
'core/instance-types_en.nt.bz2',
'core/labels_en.nt.bz2',
'core/transitive-redirects_en.nt.bz2',
'core/short-abstracts_en.nt.bz2',
'core/geo-coordinates_en.nt.bz2',
'core/infobox-properties_en.nt.bz2'
]
with lcd('./dbpedia-2015'):
for file in files:
local('wget -N "%s/%s"; fi' % (server, file))
local('wget -N http://wikistats.ins.cwi.nl/data/wikistats-2015-enf.csv.bz2')
def make_virtual_env():
"Make a virtual environment for local dev use"
with lcd(PROJ_ROOT):
local('virtualenv .')
local('./bin/pip install -r requirements.txt')
@task
def create_index():
"Compute a large index containing the dbpedia entries ready to send to ElasticSearch"
with lcd(PROJ_ROOT):
local('{python} populate.py'.format(**env))
|
onfinternational/QGISProcessingScripts | Scripts/S1ProcessByRelativeOrbit.py | Python | mpl-2.0 | 5,838 | 0.007879 | # -*- coding: utf-8 -*-
"""
Created on Tue Sep 26 18:14:51 2017
@author: cedric
"""
'''
This script apply calibration and orthorectification process of S1 GRD data
'''
'''
IMPORT
'''
import os
from S1Lib.S1OwnLib import (ReturnRealCalibrationOTBValue,
GetFileByExtensionFromDirectory,
GetNewDatesFromListFilesInputManifest,
ReprojVector,
CheckAllDifferentRelativeOrbit,
getS1ByTile,
CreateShapeFromPath,
ProcessS1Dataset)
#from S1Lib.S1OTBLib import
# TO DELETE
'''
NOTES
Things to do
- Optimize the ortorectification by using extent of the polygon to replace clip
'''
##Sentinel-1 Deforestation Process=group
##1 - Calibration and Orthorectification over tiles=name
##Input_Data_Folder=folder
##DEM_Folder=folder
##Input_Polygon_File=vector
##Relative_Orbit_Field_Name=field Input_Polygon_File
##Relative_Orbit_To_Process=string 120-47
##Calibration_Type=string Sigma0
##Output_EPSG=crs
##Output_Resolution=number 10
##Output_Data_Folder=folder
##Ram=number 256
# END TO DELETE
'''
Input OF THE PROGRAM
'''
'''
This string have to contain the folder path that contain all the unziped S1
data to process
'''
Input_Data_Folder = '/media/cedric/CL/ONFGuyane/Data/Sentinel1/TestScript/Input'
'''
This string have to contain the folder path that contain all dem covering the
study area. Tiff format is required
'''
DEM_Folder = '/media/cedric/CL/ONFGuyane/Data/Sentinel1/TestScript/DEM'
'''
Path of the vector file that contain one or more polygon to process
Each polygone will be process independantly like independant tile
This vector have to contain one field with integer type containing
the Relative orbit number to process for the current polygon
'''
Input_Polygon_File = '/media/cedric/CL/ONFGuyane/Data/Sentinel1/TestScript/cliparea.shp'
'''
Name of the field containing the relative orbit
'''
Relative_Orbit_Field_Name = 'id'
'''
Value of all the relative orbit to process. Have to be separated by '-' like
120-47
'''
Relative_Orbit_To_Process = '120'
'''
String containing the calibration type:
Sigma0
Gamma0
Beta0
'''
Calibration_Type = 'Sigma0'
'''
Output EPSG (only in meter)
'''
Output_EPSG = 'EPSG:3857'
'''
Output resolution in meter
'''
Output_Resolution = '10'
'''
Output folder that will contain one subfolder for each processed polygon and
each relative orbit
'''
Output_Data_Folder = '/media/cedric/CL/ONFGuyane/Data/Sentinel1/TestScript/Ortho'
'''
Amount of allocate ram
In case of use half of the available memory (not the physical memory)
'''
Ram = 2000
'''
THE PROGRAM ITSELF
'''
# Internal variable
Noise = 'false'
'''
Main step
1 - Transform Calibration user input into OTB command and get its acronym
2 - List ALL Sentinel-1 Manifest file
3 - Filter the one that not already processed in the output folder
4 - Scan path for all polygon and control that there is different path for all
polygon
5 - Loop thru relative orbit given by the user and
5a - Loop thru polygon
For each relative orbit loop thru all the polygon of this orbit
- Create on subfolder per polygon
- Filter all the data to take the one which intersect the study area
- Process the data
'''
# 1 - Transform Calibration user input into OTB command and get its acronym
OTBCalibrationType, CalibrationName = ReturnRealCalibrationOTBValue(Calibration_Type)
# 2 - List ALL Sentinel-1 Manifest file
# List all SENTINEL-1 manifest.safe files
ManifestFiles = GetFileByExtensionFromDirectory(Input_Data_Folder, 'manifest.safe')
# 3 - Filter the one that not already processed in the output folder
ManifestFiles = GetNewDatesFromListFilesInputManifest(ManifestFiles, Input_Data_Folder, Output_Data_Folder)
# 4 - Scan path for all polygon and control that there is different path for all
# polygon
# Get Path name list user
PathUserList = Relative_Orbit_To_Process.split('-')
PathUserList = [int(path) for path in PathUserList]
CheckAllDifferentRelativeOrbit(Input_Polygon_File,Relative_Orbit_Field_Name, PathUserList)
# Reproject Shapefile to 4326 to be compliant with S1 raw data
Input_Polygon_FileEPSG4326 = Input_Polygon_File.replace('.shp', 'EPSG4326.shp')
ReprojVector(Input_Polygon_File, Input_Polygon_FileEPSG4326, 432 | 6)
# 5 - Loop thru relative orbit given by the user and
# 5a - Loop thru polygon
for userPath in PathUserList:
# Filter files that intersect the required path
intersectRaster = getS1ByTile(Input_Polygon_FileEPSG4326,
ManifestFiles,
Relative_Orbit_Field_Name,
userPath)
# If intersect is not empty we create output directory
if len(intersectRaster) > | 0:
PathDir = os.path.join(Output_Data_Folder, 'p' + str(userPath))
if not os.path.exists(PathDir):
os.makedirs(PathDir)
else: # if no data go to next path
continue
# Create Shape file of the current path
PathShape = os.path.join(PathDir, 'p' + str(userPath) + '.shp')
CreateShapeFromPath(Input_Polygon_FileEPSG4326,
Relative_Orbit_Field_Name,
str(userPath),
PathShape)
# Run the process
ProcessS1Dataset(intersectRaster,
PathDir,
PathShape,
DEM_Folder,
Output_Resolution,
Calibration_Type,
Noise,
CalibrationName,
OTBCalibrationType,
Output_EPSG,
Ram) |
Razican/Exploding-Stars | web/spaceappsbilbao/NextGenThreat/views.py | Python | mit | 2,281 | 0.017552 | #-*-*- encoding: utf-8 -*-*-
from django.shortcuts import render
from django.template import RequestContext, loader, Context
from django.http import JsonResponse
from .models import Airburst
def index(request):
return render(request, 'NextGenThreat/index.html', {})
def radar(request):
latest_airburst_list = Airburst.objects.all()
context = {'latest_airburst_list': latest_airburst_list}
return render(request, 'NextGenThreat/radar.html', context)
def credits(request):
context = {'contributors': [
{'name': 'Aitor', 'lastname': 'Brazaola', 'description': 'Aitor is a student of Computer Engineering. Been a geek and software developer, he has a podcast with Iban Eguia named El Gato de Turing.'},
{'name': 'Eneko', 'lastname': 'Cruz', 'description': 'Eneko is studying Computer Science at the University of Deusto and Mathematics at the National University of Distance Education (UNED). His main interests are information security, mathematics and computer vision.'},
{'name': 'Elena', 'lastname': 'López de Dicastillo', 'description': 'Elena is a student at University of Deusto. She is studying Telecom Engineering and is very interested in fields such as Internet security, biomedicine and aeronautics. She is currently working on OpenStratos to send a Raspberry Pi to the stratosphere.'},
{'name': 'Iban', 'lastname': 'Eguia', 'description': 'Iban is a future IT engineer and a total space geek. Translator and contributor at CodeIgniter and core developer at OpenStratos and XG Project. He has a podcast with Aitor Brazaola called El Gato de Turing.'},
{'name': 'Alejandro', 'lastname': 'Pérez', 'description': 'Alejandro is a last year software engineering student, researcher in bioinformatics and net security and cofounder of aprenditeka.'},
],
}
return render(request, 'NextGenThreat/credits.html', context)
def api(request):
airburst_list = Airburst.objects.all()
response = {}
for airburst in airburst_list:
response[airburst.id] = {'radiated_energy': airburst.radiat | ed_energy,
'impact_energy': airburst.impact_energy,
'latitude': airburst.latitude,
'longitude': airb | urst.longitude,
'date': airburst.date.isoformat(),
'altitude': airburst.altitude,
}
return JsonResponse(response)
|
google-research/google-research | demogen/model_config.py | Python | apache-2.0 | 9,065 | 0.003309 | # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for loading the model dataset.
This file contains all necessary utilities for loading a model
from the margin distribution generalization dataset.
Typical usage example:
root_dir = # directory where the model dataset is at
model_config = ModelConfig()
model_path = model_config.get_model_dir_name(root_dir)
model_fn = model_config.get_model_fn()
sess = tf.Session()
image = # input tensor
logits = model_fn(image, False)
model_config.load_parameters(model_path, sess)
"""
import json
import os
import tensorflow.compat.v1 as tf
from demogen.models.get_model import get_model
from tensorflow.contrib import training as contrib_training
CKPT_NAME = 'model.ckpt-150000'
# all available settings in the dataset
ALL_MODEL_SPEC = {
'NIN_CIFAR10': {
'wide_multiplier': [1.0, 1.5, 2.0],
'batchnorm': [True, False],
'dropout_prob': [0.0, 0.2, 0.5],
'augmentation': [True, False],
'decay_fac': [0.0, 0.001, 0.005],
'copy': [1, 2]
},
'RESNET_CIFAR10': {
'wide_multiplier': [1.0, 2.0, 4.0],
'normalization': ['batch', 'group'],
'augmentation': [True, False],
'decay_fac': [0.0, 0.02, 0.002],
'learning_rate': [0.01, 0.001],
'copy': [1, 2, 3]
},
'RESNET_CIFAR100': {
'wide_multiplier': [1.0, 2.0, 4.0],
'normalization': ['batch', 'group'],
'augmentation': [True, False],
'decay_fac': [0.0, 0.02, 0.002],
'learning_rate': [0.1, 0.01, 0.001],
'copy': [1, 2, 3]
}
}
class ModelConfig(object):
"""A class for easy use of the margin distribution model dataset.
A model config contains all the relevant information for building a model
in the margin distribution model dataset. Some attributes only apply
for specific architecture and changing for the other architecture does
not have any effects.
Attributes:
model_type: The overall topology of the model.
dataset: The name of the dataset the model uses.
wide_multiplier: How much wider is the model compared to the base model.
dropout_prob: Probability of dropping random unit (only for nin).
augmentation: If data augmentation is used.
decay_fac: Coefficient for l2 weight decay.
batchnorm: If batchnorm is used (only for nin).
normalization: Type of normalization (only for resnet).
learning_rate: Initial learning rate (only for resnet).
copy: Index of the copy.
num_class: Number of classes in the dataset.
data_format: What kind of data the model expects (e.g. channel first/last).
root_dir: Optional root directory.
"""
def __init__(
self,
model_type='nin',
dataset='cifar10',
wide_multiplier=1.0,
batchnorm=False,
dropout_prob=0.0,
data_augmentation=False,
l2_decay_factor=0.0,
normalization='batch',
learning_rate=0.01,
root_dir=None,
copy=1):
assert model_type == 'nin' or model_type == 'resnet'
assert dataset == 'cifar10' or dataset == 'cifar100'
experiment_type = (model_type + '_' + dataset).upper()
candidate_params = ALL_MODEL_SPEC[experiment_type]
assert wide_multiplier in candidate_params['wide_multiplier']
assert l2_decay_factor in candidate_params['decay_fac']
assert copy in candidate_params['copy']
self.model_type = model_type
self.dataset = dataset
self.wide_multiplier = wide_multiplier
self.dropout_prob = dropout_prob
self.augmentation = data_augmentation
self.decay_fac = l2_decay_factor
self.batchnorm = batchnorm
self.normalization = normalization
self.learning_rate = learning_rate
self.copy = copy
self.num_class = 10 if dataset == 'cifar10' else 100
self.data_format = 'HWC' if model_type == 'nin' else 'CHW'
self.root_dir = root_dir
def _get_stats(self, stats_file_name, stats_name):
if not self.root_dir:
raise ValueError('This model config does not have a root directory.')
stats_path = os.path.join(self.get_model_dir_name(), stats_file_name)
with tf.io.gfile.GFile(stats_path, 'r') as f:
return json.load(f)[stats_name]
def training_stats(self, stats_name='Accuracy'):
"""Get the specified training statistics of the model.
Args:
stats_name: Name of the stats to look up ('Accuracy' or 'CrossEntropy').
Returns:
The train stats specified by stats_name
Raises:
ValueError: the model config's root_dir is None
"""
return self._get_stats('train.json', stats_name)
def test_stats(self, stats_name='Accuracy'):
"""Get the specified test statistics of the model.
Args:
stats_name: Name of the stats to look up ('Accuracy' or 'CrossEntropy').
Returns:
The test stats specified by stats_name
Raises:
ValueError: the model config's root_dir is None
"""
return self._get_stats('eval.json', stats_name)
def get_model_dir_name(self, root_dir=None):
"""Get the name of the trained model's directory.
Generates the name of the directory that contain a trained model
specified by the ModelConfig object. The name of the directory is
generaly indicative of | the hyperparameter settings of the model.
Args:
root_dir: Optional root directory where experiment directory is located.
Returns:
A string | that contains the checkpoint containing weights and
training/test accuracy of a model.
Raises:
ValueError: The model type is not in the dataset
"""
if not root_dir:
assert self.root_dir
root_dir = self.root_dir
if self.model_type == 'nin':
data_dir = 'NIN_'
data_dir += self.dataset
model_parent_dir = os.path.join(root_dir, data_dir.upper())
model_path = [
self.model_type,
'wide_{}x'.format(self.wide_multiplier),
'bn' if self.batchnorm else '',
'dropout_{}'.format(self.dropout_prob),
'aug' if self.augmentation else '',
'decay_{}'.format(self.decay_fac),
str(self.copy)
]
model_dir = os.path.join(model_parent_dir, '_'.join(model_path))
elif self.model_type == 'resnet':
data_dir = 'RESNET_'
data_dir += self.dataset
model_parent_dir = os.path.join(root_dir, data_dir.upper())
model_path = [
self.model_type,
'wide_{}x'.format(self.wide_multiplier),
'{}norm'.format(self.normalization),
'aug' if self.augmentation else '',
'decay_{}'.format(self.decay_fac)
]
if self.learning_rate != 0.01:
model_path.append('lr_{}'.format(self.learning_rate))
model_path.append(str(self.copy))
model_dir = os.path.join(model_parent_dir, '_'.join(model_path))
else:
raise ValueError('model type {} is not available'.format(self.model_type))
return model_dir
def get_checkpoint_path(self, root_dir=None):
return os.path.join(self.get_model_dir_name(root_dir), CKPT_NAME)
def get_model_fn(self):
"""Get a model function of the model specified by a model configuration.
Generates a callable function that can build a model specified
by self. The function is meant to be called on tensors of
input images.
Returns:
A callable model function built according to the hyper parameters of self.
"""
config = contrib_training.HParams(
wide=self.wide_multiplier,
dropout=self.dropout_prob,
batchnorm=self.batchnorm,
weight_decay=True,
decay_fac=self.decay_fac,
normalization=self. |
wmvanvliet/psychic | psychic/nodes/chain.py | Python | bsd-3-clause | 3,338 | 0.003895 | import nu | mpy as np
from .basenode import BaseNode
from ..dataset import DataSet
from ..helpers import to_one_of_n
def _apply_sklearn(n, d, last_node=False):
if n.__module__.startswith('sklearn'):
# Use the most suitable function
if no | t last_node and hasattr(n, 'transform'):
X = n.transform(d.X)
elif hasattr(n, 'predict_proba'):
X = n.predict_proba(d.X)
elif hasattr(n, 'predict'):
p = n.predict(d.X)
if p.dtype == np.float:
X = p
else:
X = to_one_of_n(p.T, list(range(d.nclasses))).T
elif last_node and hasattr(n, 'transform'):
X = n.transform(d.X)
else:
raise ValueError(
'node' + repr(n) + ' doesn\'t have a transform, '+
'predict_proba or predict function')
return DataSet(data=X.T, default=d)
else:
return n.apply(d)
class Chain(BaseNode):
def __init__(self, nodes):
BaseNode.__init__(self)
self.nodes = list(nodes)
def _pre_process(self, d):
''' Train and apply all nodes but the last '''
for n in self.nodes[:-1]:
self.log.info('Processing with %s...' % str(n))
self.log.debug('d = %s' % d)
# Check whether this node comes from scikit-learn
if n.__module__.startswith('sklearn'):
if hasattr(n, 'fit_transform'):
X = n.fit_transform(d.X, d.y)
elif hasattr(n, 'transform'):
n.fit(d.X, d.y)
X = n.transform(d.X)
elif hasattr(n, 'predict_proba'):
n.fit(d.X, d.y)
X = n.predict_proba(d.X)
elif hasattr(n, 'predict'):
n.fit(d.X, d.y)
p = n.predict(d.X)
if p.dtype == np.float:
X = p
else:
X = to_one_of_n(p.T, list(range(d.nclasses))).T
d = DataSet(data=X.T, default=d)
else:
d = n.train(d).apply(d)
return d
def train_(self, d):
d = self._pre_process(d)
n = self.nodes[-1]
self.log.info('Training %s...' % str(n))
self.log.debug('d = %s' % d)
if n.__module__.startswith('sklearn'):
n.fit(d.X, d.y)
else:
n.train(d)
def apply_(self, d):
for i,n in enumerate(self.nodes):
self.log.info('Testing with %s...' % str(n))
self.log.debug('d = %s' % d)
d = _apply_sklearn(n, d, last_node = (i==len(self.nodes)-1))
return d
def train_apply(self, dtrain, dtest=None):
if dtest is not None:
return self.train(dtrain).apply(dtest)
else:
# Train and test set are the same, do some optimization
dtrain = self._pre_process(dtrain)
n = self.nodes[-1]
if n.__module__.startswith('sklearn'):
n.fit(dtrain.X, dtrain.y)
else:
n.train(dtrain)
return _apply_sklearn(n, dtrain, last_node=True)
def __str__(self):
return 'Chain (%s)' % ' ->\n'.join([str(n) for n in self.nodes])
|
srcLurker/home-assistant | homeassistant/components/sensor/transmission.py | Python | mit | 4,962 | 0 | """
Support for monitoring the Transmission BitTorrent client API.
For more details about this platform, please refer to the documentation a | t
https://home-assistant.io/components/sensor.transmission/
"""
import logging
from datetime import timedelta
import voluptuous as | vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_HOST, CONF_PASSWORD, CONF_USERNAME, CONF_NAME, CONF_PORT,
CONF_MONITORED_VARIABLES, STATE_UNKNOWN, STATE_IDLE)
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['transmissionrpc==0.11']
_LOGGER = logging.getLogger(__name__)
_THROTTLED_REFRESH = None
DEFAULT_NAME = 'Transmission'
DEFAULT_PORT = 9091
SENSOR_TYPES = {
'current_status': ['Status', None],
'download_speed': ['Down Speed', 'MB/s'],
'upload_speed': ['Up Speed', 'MB/s']
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_MONITORED_VARIABLES, default=[]):
vol.All(cv.ensure_list, [vol.In(SENSOR_TYPES)]),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PASSWORD): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_USERNAME): cv.string,
})
# pylint: disable=unused-argument
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the Transmission sensors."""
import transmissionrpc
from transmissionrpc.error import TransmissionError
name = config.get(CONF_NAME)
host = config.get(CONF_HOST)
username = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
port = config.get(CONF_PORT)
transmission_api = transmissionrpc.Client(
host, port=port, user=username, password=password)
try:
transmission_api.session_stats()
except TransmissionError:
_LOGGER.exception("Connection to Transmission API failed")
return False
# pylint: disable=global-statement
global _THROTTLED_REFRESH
_THROTTLED_REFRESH = Throttle(timedelta(seconds=1))(
transmission_api.session_stats)
dev = []
for variable in config[CONF_MONITORED_VARIABLES]:
dev.append(TransmissionSensor(variable, transmission_api, name))
add_devices(dev)
class TransmissionSensor(Entity):
"""Representation of a Transmission sensor."""
def __init__(self, sensor_type, transmission_client, client_name):
"""Initialize the sensor."""
self._name = SENSOR_TYPES[sensor_type][0]
self.transmission_client = transmission_client
self.type = sensor_type
self.client_name = client_name
self._state = None
self._unit_of_measurement = SENSOR_TYPES[sensor_type][1]
@property
def name(self):
"""Return the name of the sensor."""
return '{} {}'.format(self.client_name, self._name)
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self._unit_of_measurement
# pylint: disable=no-self-use
def refresh_transmission_data(self):
"""Call the throttled Transmission refresh method."""
from transmissionrpc.error import TransmissionError
if _THROTTLED_REFRESH is not None:
try:
_THROTTLED_REFRESH()
except TransmissionError:
_LOGGER.error("Connection to Transmission API failed")
def update(self):
"""Get the latest data from Transmission and updates the state."""
self.refresh_transmission_data()
if self.type == 'current_status':
if self.transmission_client.session:
upload = self.transmission_client.session.uploadSpeed
download = self.transmission_client.session.downloadSpeed
if upload > 0 and download > 0:
self._state = 'Up/Down'
elif upload > 0 and download == 0:
self._state = 'Seeding'
elif upload == 0 and download > 0:
self._state = 'Downloading'
else:
self._state = STATE_IDLE
else:
self._state = STATE_UNKNOWN
if self.transmission_client.session:
if self.type == 'download_speed':
mb_spd = float(self.transmission_client.session.downloadSpeed)
mb_spd = mb_spd / 1024 / 1024
self._state = round(mb_spd, 2 if mb_spd < 0.1 else 1)
elif self.type == 'upload_speed':
mb_spd = float(self.transmission_client.session.uploadSpeed)
mb_spd = mb_spd / 1024 / 1024
self._state = round(mb_spd, 2 if mb_spd < 0.1 else 1)
|
Nebucatnetzer/tamagotchi | pygame/lib/python3.4/site-packages/faker/providers/job/uk_UA/__init__.py | Python | gpl-2.0 | 4,166 | 0.005346 | # coding=utf-8
from __future__ import unicode_literals
from .. import Provider as BaseProvider
# Ukrainian job names taken from
# https://uk.wikipedia.org/wiki/%D0%A1%D0%BF%D0%B8%D1%81%D0%BE%D0%BA_%D0%BF%D1%80%D0%BE%D1%84%D0%B5%D1%81%D1%96%D0%B9
# on 22th September 2014
class Provider(BaseProvider):
jobs = [
# А
'Агроном', 'Адвокат', 'Актор', 'Акушер', 'Антрополог', 'Архітектор', 'Археолог', 'Астронавт', 'Астроном',
'Астрофізик',
# Б
'Бібліограф', 'Біолог', 'Бізнесмен', 'Ботанік', 'Будник', 'Бухгалтер', 'Бібліотекар',
# В
'Ветеринар', 'Випробувач', 'Водій', 'Вчитель', 'Візажист',
# Г
'Географ', 'Геолог' 'Геофізик' 'Гицель', 'Гінеколог', 'Гірник', 'Гірничий інженер', 'Головний меркшейдер',
'Графік', 'Громадський діяч',
# Ґ
'Ґрунтознавець',
# Д
'Державний службовець', 'Дерун', 'Детектив', 'Дизайнер', 'Дипломат', 'Диригент', 'Д | оцент', 'Драматург',
'Ді-джей', 'Дантист',
# Е
'Економіст', 'Електрик', 'Електромонтер', 'Електромонтажник', 'Електрослюсар', 'Електротехнік', 'Епідеміолог',
'Етнограф',
# Є
'Є | внух', 'Єгер',
# Ж
'Журналіст', 'Живописець',
# З
'Золотар', 'Зоолог',
# І
'Інженер', 'Історик',
# К
'Каскадер', 'Кінорежисер', 'Клавішник', 'Клоун', 'Композитор', 'Конструктор', 'Краєзнавець', 'Кушнір',
'Кіноактор', 'Кінокритик', 'Кінорежисер', 'Кур\'єр', 'Кухар', 'Кінолог', 'Круп\'є',
# Л
'Льотчик', 'Лікар', 'Літературознавець', 'Локсмайстер',
# М
'Математик', 'Машиніст', 'Медик', 'Менеджер', 'Мистецтвознавець', 'Мірошник', 'Мікробіолог', 'Мінералог',
'Мовознавець', 'Модель', 'Модельєр', 'Музикант', 'Музикознавець', 'Музичний редактор', 'Маркетолог',
'М\'ясник',
# Н
'Намотувальник', 'Науковець', 'Няня', 'Нотаріус',
# П
'Палеонтолог', 'Паралегал', 'Парфюмер', 'Патологоанатом', 'Педагог', 'Пекар', 'Перекладач', 'Петрограф',
'Письменник', 'Піаніст', 'Підприємець', 'Пілот', 'Правник', 'Програміст', 'Провізор', 'Прокурор',
'Промисловець', 'Професор', 'Психолог', 'Публіцист', 'Продюсер', 'Паблік рилейшнз',
# Р
'Режисер', 'Різноробочий', 'Реабілітолог', 'Редактор', 'Реставратор', 'Ріелтор',
# С
'Сантехнік', 'Складальник', 'Скульптор', 'Соціолог', 'Співак', 'Сценарист', 'Стропальник', 'Стоматолог',
'Слідчий', 'Стиліст', 'Секретар',
# Ф
'Фармацевт', 'Фермер', 'Фізик', 'Філолог', 'Фольклорист', 'Фотограф', 'Фрілансер', 'Футболіст', 'Флорист',
# Х
'Хімік', 'Художник', 'Хореограф',
# Ш
'Шериф', 'Швачка', 'Штукатур',
# Ю
'Ювелір', 'Юрист'
]
|
Alberto-Beralix/Beralix | i386-squashfs-root/usr/lib/python2.7/dist-packages/twisted/internet/glib2reactor.py | Python | gpl-3.0 | 62 | 0.016129 | ../ | ../../../../share/pyshared/twisted | /internet/glib2reactor.py |
codito/pomito | pomito/hooks/__init__.py | Python | mit | 312 | 0 | """
Hooks are notification only agents. They are notified of special events in a
Pomodoro lifecycle.
"""
import abc
class H | ook(metaclass=abc.ABCMeta):
"""Base class for all hooks"""
@abc.abstractmethod
def initialize(self):
pass
@a | bc.abstractmethod
def close(self):
pass
|
leppa/home-assistant | homeassistant/components/statsd/__init__.py | Python | apache-2.0 | 2,957 | 0.000676 | """Support for sending data to StatsD."""
import logging
import statsd
import voluptuous as vol
from homeassistant.const import CONF_HOST, CONF_PORT, CONF_PREFIX, EVENT_STATE_CHANGED
from homeassistant.helpers import state as state_helper
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
CONF_ATTR = "log_attributes"
CONF_RATE = "rate"
CON | F_VALUE_MAP = "value_mapping"
DEFAULT_HOST = "localhost"
DEFAULT_PORT = 8125
DEFAULT_PREFIX = "hass"
DEFAULT_RATE = 1
DOMAIN = "statsd"
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_HOST, default=DEFAULT_HOST): cv.string,
vol.Optional(CONF_ATTR, default=False): cv.boolean,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
| vol.Optional(CONF_PREFIX, default=DEFAULT_PREFIX): cv.string,
vol.Optional(CONF_RATE, default=DEFAULT_RATE): vol.All(
vol.Coerce(int), vol.Range(min=1)
),
vol.Optional(CONF_VALUE_MAP): dict,
}
)
},
extra=vol.ALLOW_EXTRA,
)
def setup(hass, config):
"""Set up the StatsD component."""
conf = config[DOMAIN]
host = conf.get(CONF_HOST)
port = conf.get(CONF_PORT)
sample_rate = conf.get(CONF_RATE)
prefix = conf.get(CONF_PREFIX)
value_mapping = conf.get(CONF_VALUE_MAP)
show_attribute_flag = conf.get(CONF_ATTR)
statsd_client = statsd.StatsClient(host=host, port=port, prefix=prefix)
def statsd_event_listener(event):
"""Listen for new messages on the bus and sends them to StatsD."""
state = event.data.get("new_state")
if state is None:
return
try:
if value_mapping and state.state in value_mapping:
_state = float(value_mapping[state.state])
else:
_state = state_helper.state_as_number(state)
except ValueError:
# Set the state to none and continue for any numeric attributes.
_state = None
states = dict(state.attributes)
_LOGGER.debug("Sending %s", state.entity_id)
if show_attribute_flag is True:
if isinstance(_state, (float, int)):
statsd_client.gauge("%s.state" % state.entity_id, _state, sample_rate)
# Send attribute values
for key, value in states.items():
if isinstance(value, (float, int)):
stat = "%s.%s" % (state.entity_id, key.replace(" ", "_"))
statsd_client.gauge(stat, value, sample_rate)
else:
if isinstance(_state, (float, int)):
statsd_client.gauge(state.entity_id, _state, sample_rate)
# Increment the count
statsd_client.incr(state.entity_id, rate=sample_rate)
hass.bus.listen(EVENT_STATE_CHANGED, statsd_event_listener)
return True
|
stephanekirsch/e-colle | accueil/models/ramassage.py | Python | agpl-3.0 | 14,879 | 0.018956 | from django.db import models, transaction, connection
from django.http import Http404
from datetime import date, timedelta
from django.db.models.functions import Lower
from django.db.models import Count, Sum, Min, Max
from .note import Note, array2tree
from .classe import Classe
from .semaine import Semaine
from ecolle.settings import BDD
def totalMois(arg):
if BDD == 'postgresql' or BDD == 'postgresql_psycopg2' or BDD == 'mysql' or BDD == 'oracle':
return "EXTRACT(YEAR FROM {0})*12 + EXTRACT(MONTH FROM {0}) -1".format(arg)
elif BDD == 'sqlite3':
return "strftime('%%Y',{0})*12+strftime('%%m',{0})-1".format(arg)
else:
return "" # à compléter par ce qu'il faut dans le cas ou vous utilisez
# un SGBD qui n'est ni mysql, ni postgresql, ni sqlite ni oracle
def mois():
"""Renvoie les mois min et max (+1 mois) des semaines de colle. Renvoie le mois courant en double si aucune semaine n'est définie"""
try:
moisMin=Semaine.objects.aggregate(Min('lundi'))
moisMax=Semaine.objects.aggregate(Max('lundi'))
moisMin=date(moisMin['lundi__min'].year+moisMin['lundi__min'].month//12,moisMin['lundi__min'].month%12+1,1)-timedelta(days=1) # dernier jour du mois
moisMax=moisMax['lundi__max']+timedelta(days=35)
moisMax=date(moisMax.year+moisMax.month//12,moisMax.month%12+1,1)-timedelta(days=1) # dernier jour du mois
except Exception:
hui=date.today()
moisMin=moisMax=date(hui.year+hui.month//12,hui.month%12+1,1)-timedelta(days=1)
return moisMin,moisMax
class RamassageManager(models.Manager):
def createOrUpdate(self, moisFin):
"""crée ou met à jour le ramassage dont le mois de fin est moisFin puis crée le décompte associé"""
if self.filter(moisFin__gt=moisFin).exists(): # s'il existe un ramassage postérieur, erreur 404 (ça ne doit pas arriver, sauf bidouille)
raise Http404
if self.filter(moisFin=moisFin).exists(): # s'il existe déjà un ramassage pour ce mois
ramassage = self.get(moisFin = moisFin) # on le récupère
else:
ramassage = Ramassage(moisFin = moisFin) # sinon on le crée
requete = "SELECT co.id id_colleur, ma.id id_matiere, cl.id id_classe, {} moisTotal, SUM(ma.temps) \
FROM accueil_colleur co\
INNER JOIN accueil_user u\
ON u.colleur_id = co.id\
INNER JOIN accueil_colleur_classes cocl\
ON co.id = cocl.colleur_id\
INNER JOIN accueil_classe cl\
ON cocl.classe_id = cl.id\
LEFT OUTER JOIN accueil_note no\
ON no.colleur_id = co.id AND no.classe_id = cl.id\
INNER JOIN accueil_matiere ma\
ON no.matiere_id = ma.id\
WHERE u.is_active = {} AND no.date_colle <= %s\
GROUP BY co.id, ma.id, cl.id, moisTotal".format(totalMois("no.date_colle"),1 if BDD == "sqlite3" else "TRUE")
with connection.cursor() as cursor:
cursor.execute(requete,(moisFin,))
with transaction.atomic():
ramassage.save() # on sauvegarde le ramassage pour le créer ou mettre à jour sa date/heure
Decompte.objects.filter(pk=ramassage.pk).delete() # on efface le décompte précédent si c'est une maj du ramassage/decompte
for row in cursor.fetchall(): # on -re-crée le décompte
Decompte.objects.create(colleur_id=row[0],matiere_id=row[1],classe_id=row[2],ramassage_id=ramassage.pk, mois=row[3] ,temps=row[4])
def decompteRamassage(self, ramassage, csv = True, parClasse = True, parMois = False, full = False, colleur = False):
"""Renvoie, pour chaque classe, la liste des colleurs avec leur nombre d'heures de colle ramassées au ramassage 'ramassage', s'ils en ont effectué
Si colleur ne vaut pas False, on ne ramasse que les colles du colleur en question"""
if Ramassage.objects.filter(moisFin__lt=ramassage.moisFin).exists() and not full: # s'il existe un ramassage antérieur et qu'on ne ramasse pas depuis le début
mois = Ramassage.objects.filter(moisFin__lt=ramassage.moisFin).aggregate(Max('moisFin'))['moisFin__max']
ramassage_precedent = Ramassage.objects.get(moisFin = mois) # on récupère le ramassage précédent
ramassage_precedent_pk = ramassage_precedent.pk
else:
ramassage_precedent_pk = 0
# pas de FULL OUTER JOIN avec MySQL, donc on bidouille avec jointure externe à gauche / à droite et un UNION ALL
requete = "SELECT cl.id classe_id, cl.nom classe_nom, cl.annee, ma.nom matiere_nom, COALESCE(et.nom, 'Inconnu') etab, col.grade, u.last_name nom, u.first_name prenom, col.id colleur_id,\
dec2.mois mois, SUM(dec2.temps) - COALESCE(SUM(dec1.temps),0) heures\
FROM accueil_decompte dec2\
LEFT OUTER JOIN accueil_decompte dec1\
ON dec1.colleur_id = dec2.colleur_id AND dec1.classe_id = dec2.classe_id AND dec1.matiere_id = dec2.matiere_id\
AND dec1.mois = dec2.mois AND dec1.ramassage_id = %s\
INNER JOIN accueil_colleur col\
ON dec2.colleur_id = col.id\
INNER JOIN accueil_classe cl\
ON dec2.classe_id = cl.id\
INNER JOIN accueil_matiere ma\
ON dec2.matiere_id = ma.id\
INNER JOIN accueil_user u\
ON u.colleur_id = col.id\
LEFT OUTER JOIN accueil_etablissement et\
ON col.etablisse | ment_id = et.id\
WHERE dec2.ramassage_id=%s AND dec2.temps - COALESCE(dec1.temps,0) != 0{}\
GROUP BY ma.nom, u.last_name, u.first_name, col.id, cl.id, et.nom, dec2.mois\
UNION ALL SELECT cl.id classe_id, cl.nom classe_nom, cl.annee, ma.nom matiere_nom, COALESCE(et.nom, 'Inconnu') etab, col.grade, u.last_name nom, u.first_name prenom, col.id colleur_id,\
dec1.mo | is mois, - SUM(dec1.temps) heures\
FROM accueil_decompte dec1\
LEFT OUTER JOIN accueil_decompte dec2\
ON dec1.colleur_id = dec2.colleur_id AND dec1.classe_id = dec2.classe_id AND dec1.matiere_id = dec2.matiere_id\
AND dec1.mois = dec2.mois AND dec2.ramassage_id=%s\
INNER JOIN accueil_colleur col\
ON dec1.colleur_id = col.id\
INNER JOIN accueil_classe cl\
ON dec1.classe_id = cl.id\
INNER JOIN accueil_matiere ma\
ON dec1.matiere_id = ma.id\
INNER JOIN accueil_user u\
ON u.colleur_id = col.id\
LEFT OUTER JOIN accueil_etablissement et\
ON col.etablissement_id = et.id\
WHERE dec2.id IS NULL AND dec1.ramassage_id = %s{}\
GROUP BY ma.nom, u.last_name, u.first_name, col.id, cl.id, et.nom, dec1.mois".format("" if not colleur else " AND col.id = %s", "" if not colleur else " AND col.id = %s")
if parMois:
requete = "SELECT * FROM ({}) as req\
ORDER BY {} req.matiere_nom, req.etab, req.grade, req.nom, req.prenom, req.mois".format(requete, "req.annee, req.classe_nom, " if parClasse else "")
else:
requete = "SELECT req.classe_id, req.classe_nom, req.annee, req.matiere_nom, req.etab, req.grade, req.nom, req.prenom, req.colleur_id, \
SUM(req.heures) heures FROM ({}) as req\
GROUP BY req.classe_id, req.classe_nom, req.annee, req.matiere_nom, req.etab, req.grade, req.nom, req.prenom, req.colleur_id\
ORDER BY {}req.matiere_nom, req.etab, req.grade, req.nom, req.prenom".format(requete, "req.annee, req.classe_nom, " if parClasse else "")
with connection.cursor() as cursor:
cursor.execute(requete,(ramassage_precedent_pk,ramassage.pk,ramassage.pk,ramassage_precedent_pk) if not colleur else (ramassage_precedent_pk,ramassage.pk,colleur.pk,ramassage.pk,ramassage_precedent_pk,colleur.pk))
decomptes = cursor.fetchall()
LISTE_GRADES=["inconnu","certifié","bi-admissible","agrégé","chaire sup"]
if not parClasse: # si on note par annee/effectif pour un csv ou un pdf
classes = Classe.objects.annotate(eleve_compte=Count('classeeleve'))
effectif_classe = [False]*6
for classe in classes:
effectif_classe[int(20<=classe.eleve_compte<=35)+2*int(35<classe.elev |
mscuthbert/abjad | abjad/tools/pitchtools/test/test_pitchtools_PitchArrayCell_pitches.py | Python | gpl-3.0 | 756 | 0.001323 | # -*- encoding: utf-8 -*-
from abjad import *
def test_pitchtools_PitchArrayCell_pitches_01():
array = pitchtools.PitchArray([[1, 2, 1], [2, 1, 1]])
array[0 | ].cells[0].pitches.append(NamedPitch(0))
array[0].cells[1].pitches.append(NamedPitch(2))
'''
[c'] [d' ] []
[ ] [] []
'''
assert array[0].cells[0].pitches == [NamedPitch(0)]
assert array[0].cells[1].pitches == [NamedPitch(2)]
assert array[0].cells[2].pitches == []
assert array[1].cells[0].pitches == []
assert array[1].cells[1].pitches == []
assert array[1].cells[2].pitches == []
def test_pitchtools_PitchArrayCell_pitches_02() | :
cell = pitchtools.PitchArrayCell([NamedPitch(0)])
assert cell.pitches == [NamedPitch(0)] |
spiceqa/virt-test | qemu/tests/sr_iov_hotplug_negative.py | Python | gpl-2.0 | 4,079 | 0 | import logging
import os
from autotest.client.shared import error
from autotest.client import utils
from virttest import utils_test, utils_misc, utils_net
@error.context_aware
def run_sr_iov_hotplug_negative(test, params, env):
"""
KVM sr-iov hotplug negatvie test:
1) Boot up VM.
2) Try to remove sr-iov device driver module (optional)
3) Hotplug sr-iov device to VM with negative parameters
4) Verify that qemu could handle the negative parameters
check hotplug error message (optional)
:param test: qemu test object
:param params: Dictionary with the test parameters
:param env: Dictionary with test environment.
"""
def make_pci_add_cmd(pa_pci_id, pci_addr="auto"):
pci_add_cmd = ("pci_add pci_addr=%s host host=%s,if=%s" %
(pci_addr, pa_pci_id, pci_model))
if params.get("hotplug_params"):
assign_param = params.get("hotplug_params").split()
for param in assign_param:
value = params.get(param)
if value:
pci_add_cmd += ",%s=%s" % (param, value)
return pci_add_cmd
def make_device_add_cmd(pa_pci_id, pci_addr=None):
device_id = "%s" % pci_model + "-" + utils_misc.generate_random_id()
pci_add_cmd = ("device_add id=%s,driver=pci-assign,host=%s" %
(device_id, pa_pci_id))
if pci_addr is not None:
pci_add_cmd += " | ,addr=%s" % pci_addr
if params.get("hotplug_params"):
assign_param = params.get("hotplug_params").split()
for param in assign_param:
value = params.get(param)
if value:
pci_add_cmd += ",%s=%s" % (param, value)
return pci_add_ | cmd
neg_msg = params.get("negative_msg")
vm = env.get_vm(params["main_vm"])
vm.verify_alive()
rp_times = int(params.get("repeat_times", 1))
pci_model = params.get("pci_model", "pci-assign")
pci_addr = params.get("pci_addr")
modprobe_cmd = params.get("modprobe_cmd")
if modprobe_cmd:
# negative test, both guest and host should still work well.
msg = "Try to remove sr-iov module in host."
error.context(msg, logging.info)
utils.system(modprobe_cmd)
device = {}
device["type"] = params.get("hotplug_device_type", "vf")
device['mac'] = utils_net.generate_mac_address_simple()
if params.get("device_name"):
device["name"] = params.get("device_name")
if vm.pci_assignable is not None:
pa_pci_ids = vm.pci_assignable.request_devs(device)
# Probe qemu to verify what is the supported syntax for PCI hotplug
if vm.monitor.protocol == 'qmp':
cmd_output = vm.monitor.info("commands")
else:
cmd_output = vm.monitor.send_args_cmd("help")
if not cmd_output:
raise error.TestError("Unknow version of qemu")
cmd_type = utils_misc.find_substring(str(cmd_output), "pci_add",
"device_add")
for j in range(rp_times):
if cmd_type == "pci_add":
pci_add_cmd = make_pci_add_cmd(pa_pci_ids[0], pci_addr)
elif cmd_type == "device_add":
pci_add_cmd = make_device_add_cmd(pa_pci_ids[0], pci_addr)
try:
msg = "Adding pci device with command '%s'" % pci_add_cmd
error.context(msg, logging.info)
case_fail = False
add_output = vm.monitor.send_args_cmd(pci_add_cmd, convert=False)
case_fail = True
except Exception, e:
if neg_msg:
msg = "Check negative hotplug error message"
error.context(msg, logging.info)
if neg_msg not in str(e):
msg = "Could not find '%s' in error msg '%s'" % (
neg_msg, e)
raise error.TestFail(msg)
logging.debug("Could not boot up vm, %s" % e)
if case_fail:
raise error.TestFail("Did not raise exception during hotpluging")
|
medifle/python_6.00.1x | L3_P9_bisection_search.py | Python | mit | 654 | 0.006116 | # Lec 3, Problem 9
# bisection search
print('Please think of a number between 0 and 100!')
low = 0
high = 100
x = '0'
while x != 'c':
guess = (low + high) / 2
print('Is your secret number ' + str(guess) + '?')
x = raw_input("Enter 'h' to indicate the guess is too high. Enter 'l' to ind | icate the guess is too low. Enter 'c' to indicate I guessed correctly. ")
if x == 'c':
print('Game over. Your secret number was: ' + str(guess))
break
elif x == 'h':
high = guess
elif x == 'l':
l | ow = guess
else:
print('Sorry, I did not understand your input.')
|
SoftwearDevelopment/spynl | spynl/main/utils.py | Python | mit | 18,906 | 0.000529 | """Helper functions and view derivers for spynl.main."""
import json
import logging
import traceback
import sys
import os
import contextlib
from functools import wraps
from inspect import isclass, getfullargspec
import yaml
from tld import get_tld
from tld.exceptions import TldBadUrl, TldDomainNotFound
from pyramid.response import Response
from pyramid.renderers import json_renderer_factory
from pyramid.exceptions import Forbidden
from pyramid import threadlocal
from pyramid.httpexceptions import HTTPForbidden, HTTPNotFound
from spynl.main import urlson
from spynl.main.exceptions import SpynlException, MissingParameter, BadOrigin
from spynl.main.version import __version__ as spynl_version
from spynl.main.locale import SpynlTranslationString as _
def get_request():
"""
Retrieve current request.
Use with care, though:
http://docs.pylonsproject.org/projects/pyramid/en/latest/narr/threadlocals.html
"""
return threadlocal.get_current_request()
def get_settings(setting=None):
"""
Get settings (from .ini file [app:main] section)
If setting is given, get its value from the application settings and return it.
Can also be accessed from the request object: request.registry.settings
For more info on the way we do it here, consult
http://docs.pylonsproject.org/projects/pyramid/en/latest/narr/threadlocals.html
Our policy is to not edit the settings during a request/response cycle.
"""
registry_settings = threadlocal.get_current_registry().settings or {}
if setting is not None:
return registry_settings.get(setting)
return registry_settings
def check_origin(endpoint, info):
"""Check if origin is allowed"""
def wrapper_view(context, request):
"""raise HTTPForbidden if origin isn't allowed"""
origin = request.headers.get('Origin', '')
if not is_origin_allowed(origin):
# because this is a wrapper, the bad origin will not be properly
# escalated to forbidden, so it needs to be done like this.
raise Forbidden(
detail=BadOrigin(origin).message.translate(request.localizer)
)
return endpoint(context, request)
return wrapper_view
# NOTE this has NOTHING to do with the check options view deriver. But we
# need to register it somewhere.
check_origin.options = ('is_error_view',)
def validate_locale(locale):
"""Validate a locale against our supported languages."""
supported_languages = [
lang.strip().lower()
for lang in get_settings().get('spynl.languages', 'en').split(',')
]
language = None
if not locale:
return
# we're only looking for languages here, not dialects.
language = str(locale)[:2].lower()
if language in supported_languages:
return language
def handle_pre_flight_request(endpoint, info):
"""
"pre-flight-request": return custom response with some information on
what we allow. Used by browsers before they send XMLHttpRequests.
"""
def wrapper(context, request):
"""Call the endpoint if not an OPTION (pre-flight) request,
otherwise return a custom Response."""
if request.method != 'OPTIONS':
return endpoint(context, request)
else:
headerlist = []
origin = request.headers.get('Origin')
if origin: # otherwise we are on localhost or are called directly
if is_origin_allowed(origin):
headerlist.append(('Access-Control-Allow-Origin', origin))
else:
headerlist.append(('Access-Control-Allow-Origin', 'null'))
headerlist.extend(
[
('Access-Control-Allow-Methods', 'GET,POST'),
('Access-Control-Max-Age', '86400'),
('Access-Control-Allow-Credentials', 'true'),
('Content-Length', '0'),
('Content-Type', 'text/plain'),
]
)
# you can send any headers to Spynl, basically
if 'Access-Control-Request-Headers' in request.headers:
headerlist.append(
(
'Access-Control-Allow-Headers',
request.headers['Access-Control-Request-Headers'],
)
)
# returning a generic and resource-agnostic pre-flight response
return Response(headerlist=headerlist)
return wrapper
def is_origin_allowed(origin):
"""
Check request origin for matching our whitelists.
First tries dev whitelists (that list is expected to hold
either complete URLs or mere protocols, e.g. "chrome-extension://").
Then the tld whitelist is tried, which is expected to hold
only the top-level domains.
Returns True if origin is allowed, False otherwise.
"""
if not origin:
return True
settings = get_settings()
dev_whitelist = parse_csv_list(settings.get('spynl.dev_origin_whitelist', ''))
dev_list_urls = [url for url in dev_w | hitelist if not ur | l.endswith('://')]
origin_allowed = origin in dev_list_urls
dev_list_protocols = [url for url in dev_whitelist if url.endswith('://')]
for protocol in dev_list_protocols:
if origin.startswith(protocol):
origin_allowed = True
if not origin_allowed:
try:
tld = get_tld(origin)
except (TldBadUrl, TldDomainNotFound):
tld = origin # dev domains like e.g. 0.0.0.0:9000 will fall here
tld_whitelist = parse_csv_list(settings.get('spynl.tld_origin_whitelist', ''))
if tld in tld_whitelist:
origin_allowed = True
return origin_allowed
def get_header_args(request):
"""Return a dictionary with arguments passed as headers."""
# these require a spynl-specific prefix to be recognized
headers = {
key: value
for key, value in request.headers.items()
if key.lower().startswith('x-spynl-')
}
# We might also get the session id and client IP address with the headers
for key in request.headers.keys():
if key.lower() == 'sid':
headers['sid'] = request.headers[key]
if key == 'X-Forwarded-For':
headers['X-Forwarded-For'] = request.headers[key]
return headers
def get_parsed_body(request):
"""Return the body of the request parsed if request was POST or PUT."""
settings = get_settings()
body_parser = settings.get('spynl.post_parser')
if request.method in ('POST', 'PUT'):
if body_parser:
request.parsed_body = body_parser(request)
else:
request.parsed_body = {} if not request.body else json.loads(request.body)
else:
# disregard any body content if not a POST of PUT request
request.parsed_body = {}
return request.parsed_body
def unify_args(request):
"""
Make one giant args dictonary from GET, POST, headers and cookies and
return it. On the way, create r.parsed_body and r.parsed_get as well.
It is possible to provide a custom parser for the POST body in the
settings. Complex data can be given via GET as a JSON string.
GET would overwrite POST when parameter names collide.
"""
args = {}
# get headers first, they might be useful for parsing the body
args.update(get_header_args(request))
# get POST data
args.update(get_parsed_body(request))
# get GET args, can be written in JSON style
# args.update(urlson.loads_dict(request.GET))
# TODO: needs some refactoring - maybe urlson can actually do this parsing
# for us. We don't know the context yet.
from spynl.main.serial import objects
context = hasattr(request, 'context') and request.context or None
args.update(
json.loads(
json.dumps(urlson.loads_dict(request.GET)),
object_hook=objects.SpynlDecoder(context=context),
)
)
request.endpoint_method = find_view_name(request)
# get cookies, but do not overwrite explicitly given settings
for key in request |
a301-teaching/a301_code | a301lib/thermo.py | Python | mit | 1,394 | 0.025108 | import numpy as np
g=9.8 #don't worry about g(z) for this exercise
Rd=287. #kg/m^3
def calcScaleHeight(df):
"""
Calculate the pressure scale height H_p
Parameters
----------
T: vector (float)
temperature (K)
p: vector (float) of len(T)
pressure (pa)
z: vector (float) of len(T
height (m)
Returns
-------
Hbar: vector (float) of len(T)
pressure scale height (m)
"""
z=df['z'].values
Temp=df['temp'].values
dz=np.diff(z)
TLayer=(Temp[1:] + Temp[0:-1])/2.
oneOverH=g/(Rd*TLayer)
Zthick=z[-1] - z[0]
oneOverHbar=np.sum(oneOverH*dz)/Zthick
Hbar = 1/oneOverHbar
return Hbar
def calcDensHeight(df):
"""
Calculate the density scale height H_rho
Parameters
----------
T: vector (float)
temperature (K)
p: vector (float) of len(T)
pressure (pa)
z: vector (float) of len(T
height (m)
Returns
- | ------
| Hbar: vector (float) of len(T)
density scale height (m)
"""
z=df['z'].values
Temp=df['temp'].values
dz=np.diff(z)
TLayer=(Temp[1:] + Temp[0:-1])/2.
dTdz=np.diff(Temp)/np.diff(z)
oneOverH=g/(Rd*TLayer) + (1/TLayer*dTdz)
Zthick=z[-1] - z[0]
oneOverHbar=np.sum(oneOverH*dz)/Zthick
Hbar = 1/oneOverHbar
return Hbar
|
andresriancho/HTTPretty | setup.py | Python | mit | 2,629 | 0.00038 | # #!/usr/bin/env python
# -*- coding: utf-8 -*-
# <HTTPretty - HTTP client mock for Python>
# Copyright (C) <2011-2018> Gabriel Falcao <gabriel@nacaolivre.org>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to | the following
# con | ditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
import io
import os
from setuptools import setup, find_packages
def read_version():
ctx = {}
exec(local_file('httpretty', 'version.py'), ctx)
return ctx['version']
local_file = lambda *f: \
io.open(
os.path.join(os.path.dirname(__file__), *f), encoding='utf-8').read()
install_requires = ['six']
tests_requires = ['nose', 'sure', 'coverage', 'mock', 'rednose']
setup(
name='httpretty',
version=read_version(),
description='HTTP client mock for Python',
long_description=local_file('README.rst'),
author='Gabriel Falcao',
author_email='gabriel@nacaolivre.org',
url='https://httpretty.readthedocs.io',
zip_safe=False,
packages=find_packages(exclude=['*tests*']),
tests_require=tests_requires,
install_requires=install_requires,
license='MIT',
test_suite='nose.collector',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Software Development :: Testing'
],
)
|
ChristosChristofidis/h2o-3 | h2o-py/tests/testdir_misc/pyunit_all_confusion_matrix_funcs.py | Python | apache-2.0 | 6,323 | 0.008857 | import sys
sys.path.insert(1, "../../")
import h2o
import random
def all_confusion_matrix_funcs(ip,port):
# Connect to h2o
h2o.init(ip,port)
metrics = ["min_per_class_accuracy", "absolute_MCC", "precision", "accuracy", "f0point5", "f2", "f1"]
train = [True, False]
valid = [True, False]
print "PARSING TRAINING DATA"
air_train = h2o.import_frame(path=h2o.locate("smalldata/airlines/AirlinesTrain.csv.zip"))
print "PARSING TESTING DATA"
air_test = h2o.import_frame(path=h2o.locate("smalldata/airlines/AirlinesTest.csv.zip"))
print
print "RUNNING FIRST GBM: "
print
gbm_bin = h2o.gbm(x=air_train[["Origin", "Dest", "Distance", "UniqueCarrier", "fMonth", "fDayofMonth","fDayOfWeek"]],
y=air_train["IsDepDelayed"].asfactor(),
validation_x=air_test[["Origin", "Dest", "Distance", "UniqueCarrier", "fMonth", "fDayofMonth",
"fDayOfWeek"]],
validation_y=air_test["IsDepDelayed"].asfactor(),
distribution="bernoulli")
print
print "RUNNING SECOND GBM: "
print
gbm_mult = h2o.gbm(x=air_train[["Origin", "Dest", "Distance", "UniqueCarrier", "IsDepDelayed", "fDayofMonth",
"fMonth"]],
| y=air_train["fDayOfWeek"].asfactor(),
validation_x=air_test[["Origin", "Dest", "Distance", "UniqueCarrier", "IsDepDelayed", "fDayofMonth",
"fMonth"]],
validation_y=air_test["fDayOfWeek"].asfactor(),
distributio | n="multinomial")
def dim_check(cm, m, t, v):
assert len(cm) == 2 and len(cm[0]) == 2 and len(cm[1]) == 2, "incorrect confusion matrix dimensions " \
"for metric/thresh: {0}, train: {1}, valid: " \
"{2}".format(m, t, v)
def type_check(cm, m, t, v):
assert isinstance(cm[0][0], (int, float)) and isinstance(cm[0][1], (int, float)) and \
isinstance(cm[1][0], (int, float)) and isinstance(cm[0][0], (int, float)), \
"confusion matrix entries should be integers or floats but got {0}, {1}, {2}, {3}. metric/thresh: {4}, " \
"train: {5}, valid: {6}".format(type(cm[0][0]), type(cm[0][1]), type(cm[1][0]), type(cm[1][1]), m,
t, v)
def count_check(cm, m, t, v):
if v:
assert cm[0][0] + cm[0][1] + cm[1][0] + cm[1][1] == air_test.nrow(), \
"incorrect confusion matrix elements: {0}, {1}, {2}, {3}. Should sum " \
"to {4}. metric/thresh: {5}, train: {6}, valid: {7}".format(cm[0][0], cm[0][1], cm[1][0], cm[1][1],
air_test.nrow(), m, t, v)
else:
assert cm[0][0] + cm[0][1] + cm[1][0] + cm[1][1] == air_train.nrow(), \
"incorrect confusion matrix elements: {0}, {1}, {2}, {3}. Should sum " \
"to {4}. metric/thresh: {5}, train: {6}, valid: {7}".format(cm[0][0], cm[0][1], cm[1][0], cm[1][1],
air_train.nrow(), m, t, v)
# H2OBinomialModel.confusion_matrix()
for m in metrics:
for t in train:
for v in valid:
if t and v: continue
cm = gbm_bin.confusion_matrix(metrics=m, train=t, valid=v)
if cm:
cm = cm.to_list()
dim_check(cm, m, t, v)
type_check(cm, m, t, v)
count_check(cm, m, t, v)
# H2OBinomialModel.confusion_matrix()
for x in range(10):
for t in train:
for v in valid:
if t and v: continue
thresholds = [gbm_bin.find_threshold_by_max_metric(m,t,v) for m in
random.sample(metrics,random.randint(1,len(metrics)))]
cms = gbm_bin.confusion_matrix(thresholds=thresholds, train=t, valid=v)
if not isinstance(cms, list): cms = [cms]
for idx, cm in enumerate(cms):
cm = cm.to_list()
dim_check(cm, thresholds[idx], t, v)
type_check(cm, thresholds[idx], t, v)
count_check(cm, thresholds[idx], t, v)
# H2OMultinomialModel.confusion_matrix()
cm = gbm_mult.confusion_matrix(data=air_test)
cm_count = 0
for r in range(7):
for c in range(7):
cm_count += cm.cell_values[r][c]
assert cm_count == air_test.nrow(), "incorrect confusion matrix elements. Should sum to {0}, but got {1}".\
format(air_test.nrow(), cm_count)
# H2OBinomialModelMetrics.confusion_matrix()
bin_perf = gbm_bin.model_performance(valid=True)
for metric in metrics:
cm = bin_perf.confusion_matrix(metrics=metric).to_list()
dim_check(cm, metric, False, True)
type_check(cm, metric, False, True)
count_check(cm, metric, False, True)
# H2OBinomialModelMetrics.confusion_matrix()
bin_perf = gbm_bin.model_performance(train=True)
for x in range(10):
thresholds = [gbm_bin.find_threshold_by_max_metric(m,t,v) for m in
random.sample(metrics,random.randint(1,len(metrics)))]
cms = bin_perf.confusion_matrix(thresholds=thresholds)
if not isinstance(cms, list): cms = [cms]
for idx, cm in enumerate(cms):
cm = cm.to_list()
dim_check(cm, thresholds[idx], True, False)
type_check(cm, thresholds[idx], True, False)
count_check(cm, thresholds[idx], True, False)
# H2OMultinomialModelMetrics.confusion_matrix()
mult_perf = gbm_mult.model_performance(valid=True)
cm = mult_perf.confusion_matrix()
cm_count = 0
for r in range(7):
for c in range(7):
cm_count += cm.cell_values[r][c]
assert cm_count == air_test.nrow(), "incorrect confusion matrix elements. Should sum to {0}, but got {1}". \
format(air_test.nrow(), cm_count)
if __name__ == "__main__":
h2o.run_test(sys.argv, all_confusion_matrix_funcs) |
airspeed-velocity/asv | asv/commands/rm.py | Python | bsd-3-clause | 3,669 | 0 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import sys
from fnmatch import fnmatchcase
from . import Command, util
from .. import console
from ..console import log
from ..results import iter_results
class Rm(Command):
@classmethod
def setup_arguments(cls, subparsers):
parser = subparsers.add_parser(
"rm", help="Remove results from the database",
description="""
Removes entries from the results database.
""")
parser.add_argument(
'patterns', nargs='+',
help="""Pattern(s) to match, each of the form X=Y. X may
be one of "benchmark", "commit_hash", "python" or any of
the machine or environment params. Y is a case-sensitive
glob pattern.""")
parser.add_argument(
"-y", action="store_true",
help="""Don't prompt for confirmation.""")
parser.set_defaults(func=cls.run_from_args)
return parser
@classmethod
def run_from_conf_args(cls, conf, args):
return cls.run(conf, args.patterns, args.y)
@classmethod
def run(cls, conf, patterns, y=True):
global_patterns = {}
single_benchmark = None
files_to_remove = set()
count = 0
for patt | ern in patterns:
parts = pattern.s | plit('=', 1)
if len(parts) != 2:
raise util.UserError("Invalid pattern '{0}'".format(pattern))
if parts[0] == 'benchmark':
if single_benchmark is not None:
raise util.UserError("'benchmark' appears more than once")
single_benchmark = parts[1]
else:
if parts[0] in global_patterns:
raise util.UserError(
"'{0}' appears more than once".format(parts[0]))
global_patterns[parts[0]] = parts[1]
for result in iter_results(conf.results_dir):
found = True
for key, val in global_patterns.items():
if key == 'commit_hash':
if not util.hash_equal(result.commit_hash, val):
found = False
break
elif key == 'python':
if not fnmatchcase(result.env.python, val):
found = False
break
else:
if not fnmatchcase(result.params.get(key), val):
found = False
break
if not found:
continue
if single_benchmark is not None:
found = False
for benchmark in list(result.get_all_result_keys()):
if fnmatchcase(benchmark, single_benchmark):
count += 1
files_to_remove.add(result)
result.remove_result(benchmark)
else:
files_to_remove.add(result)
if single_benchmark is not None:
log.info("Removing {0} benchmarks in {1} files".format(
count, len(files_to_remove)))
else:
log.info("Removing {0} files".format(len(files_to_remove)))
if not y:
do = console.get_answer_default("Perform operations", "n")
if len(do) and do.lower()[0] != 'y':
sys.exit(0)
if single_benchmark is not None:
for result in files_to_remove:
result.save(conf.results_dir)
else:
for result in files_to_remove:
result.rm(conf.results_dir)
|
transientskp/tkp | tkp/accessors/dataaccessor.py | Python | bsd-2-clause | 6,243 | 0.001121 | import logging
from tkp.quality.rms import rms_with_clipped_subregion
from tkp.accessors.requiredatts import RequiredAttributesMetaclass
from math import degrees, sqrt, sin, pi, cos
logger = logging.getLogger(__name__)
class DataAccessor(object):
__metaclass__ = RequiredAttributesMetaclass
_required_attributes = [
'beam',
'centre_ra',
'centre_decl',
'data',
'freq_bw',
'freq_eff',
'pixelsize',
'tau_time',
'taustart_ts',
'url',
'wcs',
]
def __init__(self):
# Sphinx only picks up the class docstring if it's under an __init__
# *le sigh*
"""
Base class for accessors used with
:class:`tkp.sourcefinder.image.ImageData`.
Data accessors provide a uniform way for the ImageData class (ie,
generic image representation) to access the various ways in which
images may be stored (FITS files, arrays in memory, potentially HDF5,
etc).
This class cannot be instantiated directly, but should be subclassed
and the abstract properties provided. Note that all abstract
properties are required to provide a valid accessor.
Additional properties may also be provided by subclasses. However,
TraP components are required to degrade gracefully in the absence of
this optional properties.
The required attributes are as follows:
Attributes:
beam (tuple): Restoring beam. Tuple of three floats:
semi-major axis (in pixels), semi-minor axis (pixels)
and position angle (radians).
centre_ra (float): Right ascension at the central pixel of the image.
Units of J2000 decimal degrees.
centre_decl (float): Declination at the central pixel of the image.
Units of J2000 decimal degrees.
data(numpy.ndarray): Two dimensional numpy.ndarray of floating point
pixel values.
(TODO: Definitive statement on orientation/transposing.)
freq_bw(float): The frequency bandwidth of this image in Hz.
freq_eff(float): Effective frequency of the image in Hz.
That is, the mean frequency of all the visibility data which
comprises this image.
pixelsize(tuple): (x, y) tuple representing the size of a pixel
along each axis in units of degrees.
tau_time(float): Total time on sky in seconds.
taustart_ts(float): Timestamp of the first integration which
constitutes part of this image. MJD in s | econds.
url(string): A (string) URL representing the location of the image
at time of processing.
wcs(:class:`tkp.utility.coordinates.WCS`): An instance of
:py:class:`tkp.utility.coordinates.WCS`,
describing the mapping from data pixels to sky-coordinates.
The class also provides some common functionality:
static methods used for parsing datafiles, and an 'extract_metadata'
function | which provides key info in a simple dict format.
"""
def extract_metadata(self):
"""
Massage the class attributes into a flat dictionary with
database-friendly values.
While rather tedious, this is easy to serialize and store separately
to the actual image data.
May be extended by subclasses to return additional data.
"""
return {
'tau_time': self.tau_time,
'freq_eff': self.freq_eff,
'freq_bw': self.freq_bw,
'taustart_ts': self.taustart_ts,
'url': self.url,
'beam_smaj_pix': self.beam[0],
'beam_smin_pix': self.beam[1],
'beam_pa_rad': self.beam[2],
'centre_ra': self.centre_ra,
'centre_decl': self.centre_decl,
'deltax': self.pixelsize[0],
'deltay': self.pixelsize[1],
}
def parse_pixelsize(self):
"""
Returns:
- deltax: pixel size along the x axis in degrees
- deltay: pixel size along the x axis in degrees
"""
wcs = self.wcs
# Check that pixels are square
# (Would have to be pretty strange data for this not to be the case)
assert wcs.cunit[0] == wcs.cunit[1]
if wcs.cunit[0] == "deg":
deltax = wcs.cdelt[0]
deltay = wcs.cdelt[1]
elif wcs.cunit[0] == "rad":
deltax = degrees(wcs.cdelt[0])
deltay = degrees(wcs.cdelt[1])
else:
raise ValueError("Unrecognised WCS co-ordinate system")
# NB. What's a reasonable epsilon here?
eps = 1e-7
if abs(abs(deltax) - abs(deltay)) > eps:
raise ValueError("Image WCS header suggests non-square pixels."
"This is an untested use case, and may break things -"
"specifically the skyregion tracking but possibly other stuff too.")
return deltax, deltay
@staticmethod
def degrees2pixels(bmaj, bmin, bpa, deltax, deltay):
"""
Convert beam in degrees to beam in pixels and radians.
For example Fits beam parameters are in degrees.
Arguments:
- bmaj: Beam major axis in degrees
- bmin: Beam minor axis in degrees
- bpa: Beam position angle in degrees
- deltax: Pixel size along the x axis in degrees
- deltay: Pixel size along the y axis in degrees
Returns:
- semimaj: Beam semi-major axis in pixels
- semimin: Beam semi-minor axis in pixels
- theta: Beam position angle in radians
"""
semimaj = (bmaj / 2.) * (sqrt(
(sin(pi * bpa / 180.)**2) / (deltax**2) +
(cos(pi * bpa / 180.)**2) / (deltay**2))
)
semimin = (bmin / 2.) * (sqrt(
(cos(pi * bpa / 180.)**2) / (deltax**2) +
(sin(pi * bpa / 180.)**2) / (deltay**2))
)
theta = pi * bpa / 180
return (semimaj, semimin, theta)
|
jessefeinman/FintechHackathon | python-getting-started/sample stuff/gettingstarted/urls.py | Python | bsd-2-clause | 385 | 0.002597 | from django.conf.urls import include, url
from django.contrib import admin
admin.autodiscover()
import hello.views
# Examples:
# | url(r'^$', 'gettingstarted.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
urlpatterns = [
url(r'^$', hello.views.index, name='index'),
url(r'^db', hello.views.db, name='db'),
url(r'^admin/', in | clude(admin.site.urls)),
]
|
jdemel/gnuradio | gr-digital/examples/example_fll.py | Python | gpl-3.0 | 5,057 | 0.006723 | #!/usr/bin/env python
#
# Copyright 2011-2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from gnuradio import gr, digital, filter
from gnuradio import blocks
from gnuradio import channels
from gnuradio import eng_notation
from gnuradio.eng_arg import eng_float, intx
from argparse import ArgumentParser
import sys
import numpy
try:
from matplotlib import pyplot
except ImportError:
print("Error: could not from matplotlib import pyplot (http://matplotlib.sourceforge.net/)")
sys.exit(1)
class example_fll(gr.top_block):
def __init__(self, N, sps, rolloff, ntaps, bw, noise, foffset, toffset, | poffset):
gr.top_block.__init__(self)
rrc_taps = filter.firdes.root_raised_cosine(
sps, sps, 1.0, rolloff, ntaps)
data = 2.0*numpy.random.randint(0, 2, N) - 1.0
data = numpy.exp(1j*poffset) * data
self.src = blocks.vector_source_c(data.tolist(), False)
self.rrc = filter.interp_fir_filter_ccf(sps, rrc_taps)
self.chn = channels.channel_model(noise, foffset, toffset)
self.fll = digital.fll_band_ed | ge_cc(sps, rolloff, ntaps, bw)
self.vsnk_src = blocks.vector_sink_c()
self.vsnk_fll = blocks.vector_sink_c()
self.vsnk_frq = blocks.vector_sink_f()
self.vsnk_phs = blocks.vector_sink_f()
self.vsnk_err = blocks.vector_sink_f()
self.connect(self.src, self.rrc, self.chn, self.fll, self.vsnk_fll)
self.connect(self.rrc, self.vsnk_src)
self.connect((self.fll,1), self.vsnk_frq)
self.connect((self.fll,2), self.vsnk_phs)
self.connect((self.fll,3), self.vsnk_err)
def main():
parser = ArgumentParser(conflict_handler="resolve")
parser.add_argument("-N", "--nsamples", type=int, default=2000,
help="Set the number of samples to process [default=%(default)r]")
parser.add_argument("-S", "--sps", type=int, default=4,
help="Set the samples per symbol [default=%(default)r]")
parser.add_argument("-r", "--rolloff", type=eng_float, default=0.35,
help="Set the rolloff factor [default=%(default)r]")
parser.add_argument("-W", "--bandwidth", type=eng_float, default=2*numpy.pi/100.0,
help="Set the loop bandwidth [default=%(default)r]")
parser.add_argument("-n", "--ntaps", type=int, default=45,
help="Set the number of taps in the filters [default=%(default)r]")
parser.add_argument("--noise", type=eng_float, default=0.0,
help="Set the simulation noise voltage [default=%(default)r]")
parser.add_argument("-f", "--foffset", type=eng_float, default=0.2,
help="Set the simulation's normalized frequency offset (in Hz) [default=%(default)r]")
parser.add_argument("-t", "--toffset", type=eng_float, default=1.0,
help="Set the simulation's timing offset [default=%(default)r]")
parser.add_argument("-p", "--poffset", type=eng_float, default=0.0,
help="Set the simulation's phase offset [default=%(default)r]")
args = parser.parse_args()
# Adjust N for the interpolation by sps
args.nsamples = args.nsamples // args.sps
# Set up the program-under-test
put = example_fll(args.nsamples, args.sps, args.rolloff,
args.ntaps, args.bandwidth, args.noise,
args.foffset, args.toffset, args.poffset)
put.run()
data_src = numpy.array(put.vsnk_src.data())
data_err = numpy.array(put.vsnk_err.data())
# Convert the FLL's LO frequency from rads/sec to Hz
data_frq = numpy.array(put.vsnk_frq.data()) / (2.0*numpy.pi)
# adjust this to align with the data. There are 2 filters of
# ntaps long and the channel introduces another 4 sample delay.
data_fll = numpy.array(put.vsnk_fll.data()[2*args.ntaps-4:])
# Plot the FLL's LO frequency
f1 = pyplot.figure(1, figsize=(12,10))
s1 = f1.add_subplot(2,2,1)
s1.plot(data_frq)
s1.set_title("FLL LO")
s1.set_xlabel("Samples")
s1.set_ylabel("Frequency (normalized Hz)")
# Plot the FLL's error
s2 = f1.add_subplot(2,2,2)
s2.plot(data_err)
s2.set_title("FLL Error")
s2.set_xlabel("Samples")
s2.set_ylabel("FLL Loop error")
# Plot the IQ symbols
s3 = f1.add_subplot(2,2,3)
s3.plot(data_src.real, data_src.imag, "o")
s3.plot(data_fll.real, data_fll.imag, "rx")
s3.set_title("IQ")
s3.set_xlabel("Real part")
s3.set_ylabel("Imag part")
# Plot the symbols in time
s4 = f1.add_subplot(2,2,4)
s4.plot(data_src.real, "o-")
s4.plot(data_fll.real, "rx-")
s4.set_title("Symbols")
s4.set_xlabel("Samples")
s4.set_ylabel("Real Part of Signals")
pyplot.show()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
|
NTesla/wordpress-sploit-framework | web_server_builder.py | Python | gpl-3.0 | 2,235 | 0.026846 | from BaseHTT | PServer i | mport HTTPServer
from SimpleHTTPServer import SimpleHTTPRequestHandler
import sys
import random
global_url = global_parameters = global_method = global_payload = ''
class HTTPHandler (SimpleHTTPRequestHandler):
server_version = "LibHttpWSF/1.0"
def do_GET(self):
print "[+] New connection: %s:%d" % (self.client_address[0], self.client_address[1])
self.index()
def prepare_request(self, csrf_name):
global global_url
global global_parameters
global global_method
global global_payload
if global_method.lower() == "get":
global_url += "?"
for key, value in global_parameters.items():
global_url += key + "=" + value + "&"
global_payload = global_payload.replace("[EXPLOIT]", global_url)
return global_payload
elif global_method.lower() == "post":
result = "<form id='"+csrf_name+"' action='"+global_url+"' method='post'>"
for key, value in global_parameters.items():
result += "<input type='hidden' name='"+key+"' value='"+value+"'>"
result += "</form>"
return result
def index(self):
global global_method
if global_method.lower() == "get":
html_response = '<html><head></head><body>'+self.prepare_request('')+'</body></html>'
else:
csrf_name = ''.join(random.choice('0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ') for i in range(36))
html_response = '<html><head></head><body onload="document.getElementById(\''+csrf_name+'\').submit()">'+self.prepare_request(csrf_name)+'</body></html>'
self.send_response(200)
self.send_header("Content-type", "text/html")
self.send_header("Content-length", len(html_response))
self.end_headers()
self.wfile.write(html_response)
return SimpleHTTPRequestHandler.do_GET(self)
class WebServer:
"""
Initialise un serveur web sur le port 8080
"""
def __init__(self, port, url, parameters, method, payload):
global global_url
global global_parameters
global global_method
global global_payload
global_url = url
global_parameters = parameters
global_method = method
global_payload = payload
self.port = port
print("The web server is started on port %i" % self.port)
self.initialize()
def initialize(self):
server = HTTPServer(("",self.port), HTTPHandler)
server.serve_forever() |
openstack/tosca-parser | toscaparser/__init__.py | Python | apache-2.0 | 643 | 0 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy o | f the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
i | mport pbr.version
__version__ = pbr.version.VersionInfo(
'tosca-parser').version_string()
|
HarrisonAlpine/google-classroom-tools | list_students.py | Python | mit | 1,063 | 0.000941 | #!/usr/bin/env python
import googlehelper as gh
import json
import os
# DEFAULT_COURSE_ID = '7155852796' # Computer Programming A1
# DEFAULT_COURSE_ID = '7621825175' # Robotics
DEFAULT_COURSE_ID = '7557587733' # Computer Programming A4
if __name__ == '__main__':
# course = gh.get_course(DEFAULT_COURSE_ID)
# students = gh.list_students(DEFAULT_COURSE_ID)
course = gh.get_course_from_user()
course_id = course['id']
students = gh.list_students(course_id)
course_dir = gh.get_course_dir(course)
os.makedirs(course_dir, exist_ok=True)
txt_file = os.path.join(course_dir, 'students.txt')
json_file = os.path.join(course_dir, 'students.json')
with open(txt_file, 'w') as f:
for student in students:
line = '{}\t{}'.format(student['profile']['name']['fullName'],
student['profile']['id'])
print(line, file=f)
with open(json_file, 'w') a | s f:
print(json.dumps(students, indent=2), file=f)
with open(txt_file) as f:
| print(f.read()) |
crmccreary/openerp_server | openerp/addons/survey/wizard/__init__.py | Python | agpl-3.0 | 1,237 | 0.000808 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-TODAY OpenERP | S.A. <http://www.openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without ev | en the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import survey_send_invitation
import survey_print_statistics
import survey_print_answer
import survey_browse_answer
import survey_selection
import survey_answer
import survey_print
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: |
chrisxue815/leetcode_python | problems/test_0316_greedy.py | Python | unlicense | 1,258 | 0 | import unittest
import utils
def _find_max_possible_index(s, count, counts):
counts = list(counts)
for i in range(len(s) - 1, -1, -1):
c = ord(s[i])
if counts[c]:
counts[c] = 0
count -= 1
if count == 0:
return i
# O(n^2) time. O(1) space. Greedy.
class Solution:
def removeDuplicateLetters(self, s: str) -> str:
result = []
counts = [0] * (ord('z') + 1)
for c in s:
counts[ord(c)] = 1
count = sum(counts)
hi = _find_max_possible_index(s, count, counts)
lo = 0
| for _ in range(count):
min_c = 256
min_i = 0
for i in range(lo, hi + 1):
c = ord(s[i])
if counts[c] and c < min_c:
min_c = c
min_i = i
result.append(chr(min_c))
counts[min_c] = 0
count -= 1
lo = min_i + 1
if min_c == ord(s[hi]):
hi = _find_max | _possible_index(s, count, counts)
return ''.join(result)
class Test(unittest.TestCase):
def test(self):
utils.test(self, __file__, Solution)
if __name__ == '__main__':
unittest.main()
|
0xa/pyopenvpn | ovpn_proxy.py | Python | mit | 8,065 | 0.001984 | #!/bin/python3
import random
import logging
import socket
import io
import ipaddress
import threading
import select
import time
import queue
from argparse import ArgumentParser
from datetime import datetime, timedelta
from scapy.all import *
from pyopenvpn import Client, Settings
class SOCKS5Connection(threading.Thread):
def __init__(self, server, sock, src_port, dest_host, dest_port):
super().__init__()
self.daemon = True
self.server = server
self.sock = sock
self.src_port = src_port
self.dest_host = dest_host
self.dest_port = dest_port
self.running = True
self.log = server.log
self.outgoing_packets = []
self.tunnel_in_queue = queue.Queue()
def run(self):
# FIXME: I HAVE NO IDEA HOW TCP WORKS, the last ACK is fucked up, the
# whole thing is slow, breaks down on large pages and may explode at
# any time.
# SYN
syn = TCP(sport=self.src_port, dport=self.dest_port, seq=random.randint(0, 0xfffffff))
self.outgoing_packets.append(syn)
# Wait for SYN ACK
synack = self.tunnel_in_queue.get()
# FIXME: check if it's actually a SYN ACK
# ACK the SYN ACK
hsack = TCP(sport=self.src_port, dport=self.dest_port, flags='A',
seq=synack.ack, ack=synack.seq + 1)
self.outgoing_packets.append(hsack)
l_seq = hsack.seq
r_seq = hsack.ack
self.log.info("Opened connection to %s:%d", self.dest_host, self.dest_port)
self.sock.settimeout(0.001)
while self.running:
try:
data = self.sock.recv(2048)
if not data:
| break
except (socket.timeout, BrokenPipeError):
data = None
if data:
packet = TCP(sport=self.src_port, dport=self.dest_port, flags='A',
seq=l_seq, | ack=r_seq)
packet /= Raw(load=data)
self.outgoing_packets.append(packet)
l_seq += len(data)
try:
packet = self.tunnel_in_queue.get(block=False)
self.sock.send(bytes(packet.payload))
assert packet.ack == l_seq
r_seq = packet.seq
except queue.Empty:
pass
except BrokenPipeError:
break
lack = TCP(sport=self.src_port, dport=self.dest_port, flags='A',
seq=l_seq, ack=r_seq)
self.outgoing_packets.append(lack)
fack = TCP(sport=self.src_port, dport=self.dest_port, flags='FA',
seq=l_seq, ack=r_seq)
self.outgoing_packets.append(fack)
self.log.info("Closed connection to %s:%d", self.dest_host, self.dest_port)
def receive(self, packet):
self.tunnel_in_queue.put(packet)
class SOCKS5Server:
def __init__(self, host, port):
self.log = logging.getLogger('SOCKS5')
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.bind((host, port))
self.sock.listen(100)
self.server_addr = ipaddress.IPv4Address(host)
self.server_port = port
self.connections = dict()
def find_free_port(self):
usable = list(range(1024, 65535))
random.shuffle(usable)
for p in usable:
if p not in self.connections:
return p
raise Exception("Too many connections, no free port")
def handle_connection(self, sock, stream):
version = stream.read(1)[0]
assert version == 5
command = stream.read(1)[0]
reserved = stream.read(1)[0]
address_type = stream.read(1)[0]
if address_type == 1:
data = stream.read(4)
address = '.'.join(str(b) for b in data)
elif address_type == 3:
length = stream.read(1)[0]
try:
address = socket.gethostbyname(stream.read(length).decode('utf-8'))
except socket.gaierror:
response = b'\x05\x04\x00\x01'
response += self.server_addr.packed
response += self.server_port.to_bytes(2, 'big')
sock.sendall(response)
return
elif address_type == 4:
data = stream.read(16)
address = ':'.join('%x' % b for b in data)
else:
raise Exception("Got unsupported address type: 0x%x" % address_type)
dest_host = address
dest_port = int.from_bytes(stream.read(2), 'big')
if command == 1:
# CONNECT
response = b'\x05\x00\x00\x01'
response += self.server_addr.packed
response += self.server_port.to_bytes(2, 'big')
sock.sendall(response)
port = self.find_free_port()
self.log.info("Opening connection from port %d to %s:%d",
port, dest_host, dest_port)
c = SOCKS5Connection(self, sock, port, dest_host, dest_port)
c.start()
self.connections[port] = c
elif command == 2:
# BIND
raise NotImplementedError()
elif command == 3:
# UDP
raise NotImplementedError()
else:
raise Exception("Got unsupported command: 0x%x" % address_type)
def __call__(self, client):
while True:
incoming = client.recv_data()
if not incoming:
break
ip = incoming
tcp = incoming.payload
if not isinstance(tcp, TCP):
self.log.debug("Ignored: not TCP")
continue
local_port = tcp.dport
if local_port not in self.connections:
self.log.debug("Ignored: no connection at port %d", local_port)
continue
c = self.connections[local_port]
if not c or not c.running:
self.log.debug("Ignored: dead connection at port %d", local_port)
continue
if ip.src != c.dest_host:
self.log.debug("Ignored: connection host not matching on port %d (%s / %s)",
local_port, ip.src, c.dest_host)
continue
data = bytes(tcp.payload)
self.log.debug("Received %d bytes -> port %d", len(data), local_port)
c.receive(tcp)
for local_port, c in self.connections.items():
for packet in c.outgoing_packets:
self.log.debug("Sending packet from port %d, %r", local_port, packet)
p = IP(src=client.tunnel_ipv4, dst=c.dest_host) / packet
client.send_data(bytes(p))
c.outgoing_packets = []
try:
self.sock.settimeout(0.1)
rsock, _ = self.sock.accept()
# Handle new connections
hello = rsock.recv(2)
assert hello[0] == 5
methods = rsock.recv(hello[1])
rsock.sendall(b'\x05\x00')
buffer = rsock.recv(2048)
print(repr(buffer))
if buffer:
self.handle_connection(rsock, io.BytesIO(buffer))
except socket.timeout:
pass
def main():
logging.basicConfig(level=logging.DEBUG,
format="%(levelname)-5s:%(name)-8s: %(message)s")
parser = ArgumentParser()
parser.add_argument('config_file', help="OpenVPN configuration file")
parser.add_argument('host')
parser.add_argument('port', type=int, default=9000)
parser.add_argument('-i', dest='interval', default=1, metavar='interval', type=int)
parser.add_argument('-W', dest='timeout', default=5, metavar='timeout', type=int)
parser.add_argument('-c', dest='count', default=0, metavar='count', type=int)
args = parser.parse_args()
c = Client(Settings.from_file(args.config_file), SOCKS5Server(args.host, args.port))
c.run()
if __name__ == '__main__':
main()
|
alexandriagroup/fnapy | tests/offline/test_manager.py | Python | mit | 549 | 0.001825 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
| #
# Copyright © 2016 <>
#
# Distributed under terms of the MIT license.
# Third-party modules
import pytest
# Projects modules
from fnapy.fnapy_manager import FnapyManager
def test_manager_raises_TypeError_with_invalid_connection():
"""FnapyManager should raise a TypeError when the connection is not a FnapyConnection"""
with pytest.raises(TypeError):
connection = {'part | ner_id': 'XXX', 'shop_id': 'XXX', 'key': 'XXX'}
manager = FnapyManager(connection)
|
aarontuor/antk | test/test_transforms.py | Python | mit | 4,376 | 0.006627 | import antk.core.loader as loader
import numpy as np
import scipy.sparse as sps
"""
:any:`center`
:any:`l1normalize`
:any:`l2normalize`
:any:`pca_whiten`
:any:`tfidf`
:any:`unit_variance`
"""
x = np.array([[0.0,0.0,6.0],
[2.0,4.0,2.0],
[2.0,6.0,0.0]])
y = sps.csr_matrix(x)
# numpy.testing.assert_array_almost_equal
def test_l1_dense_test_axis0():
assert np.array_equal(loader.l1normalize(x, axis=0),
np.array([[0.0, 0.0, .75],
[.5, .4, .25],
[.5, .6, 0.0]]))
def test_l1_sparse_test_axis0():
assert np.array_equal(loader.l1normalize(y, axis=0),
np.array([[0.0, 0.0, .75],
[.5, .4, .25],
[.5, .6, 0.0]]))
def test_l1_dense_test_axis1():
assert np.array_equal(loader.l1 | normalize(x, axis=1),
np.array([[0.0, 0.0, 1.0],
[.25, .5, .25],
[.25, .75, 0.0]]))
def test_l1_sparse_test_axis1():
assert np.array_equal(loader.l1normalize(y, axis=1),
np.array([[0.0, 0.0, 1.0],
[.25, .5, .25],
[.25, .75, 0.0]]))
# def test_l2_dense_test_axis0():
# assert np.testing.assert_arr | ay_almost_equal(loader.l2normalize(x, axis=0),
# np.array([[0.0, 0.0, 3.0 / np.sqrt(10.0)],
# [1.0 / np.sqrt(2.0), 2.0 / np.sqrt(13.0), 1.0 / np.sqrt(10.0)],
# [1.0 / np.sqrt(2.0), 3.0 / np.sqrt(13.0), 0.0]]), decimal=5)
#
#
# def test_l2_sparse_test_axis0():
# assert np.testing.assert_array_almost_equal(loader.l2normalize(x, axis=0),
# np.array([[0.0, 0.0, 3.0 / np.sqrt(10.0)],
# [1.0 / np.sqrt(2.0), 2.0 / np.sqrt(13.0),
# 1.0 / np.sqrt(10.0)],
# [1.0 / np.sqrt(2.0), 3.0 / np.sqrt(13.0), 0.0]]), decimal=5)
def test_max_dense_test_axis0():
assert np.array_equal(loader.maxnormalize(x, axis=0),
[[0.0, 0.0, 1.0],
[1.0, 2.0/3.0, 1.0/3.0],
[1.0, 1.0, 0.0]])
def test_max_sparse_test_axis0():
assert np.array_equal(loader.maxnormalize(y, axis=0),
[[0.0, 0.0, 1.0],
[1.0, 2.0 / 3.0, 1.0 / 3.0],
[1.0, 1.0, 0.0]])
def test_max_dense_test_axis1():
assert np.array_equal(loader.maxnormalize(x, axis=1),
[[0.0, 0.0, 1.0],
[.5, 1, .5],
[1.0/3.0, 1.0, 0.0]])
def test_max_sparse_test_axis1():
assert np.array_equal(loader.maxnormalize(y, axis=1),
[[0.0, 0.0, 1.0],
[.5, 1, .5],
[1.0 / 3.0, 1.0, 0.0]])
def test_center_dense_test():
np.testing.assert_array_almost_equal(loader.center(x, axis=None).mean(axis=None), 0.0)
def test_center_dense_test_axis0():
np.testing.assert_array_almost_equal(np.sum(loader.center(x, axis=0).mean(axis=0)), 0.0)
def test_center_dense_test_axis1():
np.testing.assert_array_almost_equal(np.sum(loader.center(x, axis=1).mean(axis=1)), 0.0)
def test_center_sparse_test():
np.testing.assert_array_almost_equal(np.sum(loader.center(y, axis=None).mean(axis=None)), 0.0)
def test_center_sparse_test_axis0():
np.testing.assert_array_almost_equal(np.sum(loader.center(y, axis=0).mean(axis=0)), 0.0)
def test_center_sparse_test_axis1():
np.testing.assert_array_almost_equal(np.sum(loader.center(y, axis=1).mean(axis=1)), 0.0)
def test_unit_variance_dense_test():
np.testing.assert_array_almost_equal(loader.unit_variance(x, axis=None).std(axis=None), 1.0)
def test_unit_variance_sparse_test_axis0():
np.testing.assert_array_almost_equal(np.sum(loader.unit_variance(y, axis=0).std(axis=0)), 3.0)
def test_unit_variance_sparse_test_axis1():
np.testing.assert_array_almost_equal(np.sum(loader.unit_variance(y, axis=1).std(axis=1)), 3.0)
|
praxeo/outcumbent | Outcumbent/settings.py | Python | gpl-2.0 | 2,140 | 0.000467 | """
Django settings for Outcumbent project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
|
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
TEMPLATE_DIRS = ('templates')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '3&@zwum@#!0f+g(k-pvlw#9n05t$kuz_5db58-02739t+u*u(r'
# SECURITY WARNING: don't run with debu | g turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'outcumbent',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'Outcumbent.urls'
WSGI_APPLICATION = 'Outcumbent.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'outcumbentdb',
'USER': 'root',
#'PASSWORD': 'root',
'HOST': '127.0.0.1',
'PORT': '3306'
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
|
patrick91/pycon | backend/blog/migrations/0003_auto_20191130_0913.py | Python | mit | 967 | 0.003102 | # Generated by Django 2.2.7 on 2019-11-30 09:13
from django.db import migrations
import i18n.fields
class Migration(migrations.Migration):
dependencies = [
('blog', '0002_auto_20190809_2128'),
]
operations = [
migrations.AlterField(
model_name='post',
name='content',
field=i18n.fields.I18nTextField(blank=True, verbose_name='content'),
),
m | igrations.AlterField(
model_name='post',
name='excerpt',
field=i18n.fields.I18nTextField(verbose_name='excerpt'),
),
migrations.AlterField(
model_name='post',
name='slug',
field=i18n.fields.I18nCharField(blank=True, max_length=200, verbose_name='slug'),
),
| migrations.AlterField(
model_name='post',
name='title',
field=i18n.fields.I18nCharField(max_length=200, verbose_name='title'),
),
]
|
majerteam/sqla_inspect | sqla_inspect/py3o.py | Python | gpl-3.0 | 12,289 | 0.000489 | # -*- coding: utf-8 -*-
# * Authors:
# * Arezki Feth <f.a@majerti.fr>;
# * Miotte Julien <j.m@majerti.fr>;
# * TJEBBES Gaston <g.t@majerti.fr>
"""
Py3o exporters
>>> model = Company.query().first()
>>> template = Template.query().first()
>>> odt_file_datas = compile_template(model, template.data_obj)
"""
from __future__ import absolute_import
from io import BytesIO
from xml.sax.saxutils import escape
from sqlalchemy.orm import (
ColumnProperty,
RelationshipProperty,
)
from genshi.core import Markup
from py3o.template import Template
from sqla_inspect.base import (
BaseSqlaInspector,
)
from sqla_inspect.export import (
format_value,
)
from sqla_inspect.ascii import (
force_unicode,
)
from sqla_inspect.py3o_tmpl import CONTENT_TMPL
def format_py3o_val(value):
"""
format a value to fit py3o's context
* Handle linebreaks
"""
value = force_unicode(value)
value = escape(value)
value = value.replace(u'\n', u'<text:line-break/>')
return Markup(value)
class SqlaContext(BaseSqlaInspector):
"""
Provide a tool to build a context dict based on a given model. The datas are
built following the informations retrieved from the model's declaration.
Custom configuration can be achieved by customizing the info dict attribute
from each column.
config_key
The key in the info dict we will look for.
Actually handles the following informations :
exclude : should the column be excluded from the output
name : the key in the resulting dict
>>> serializer = SqlaContext(Company)
>>> company = Company.get(263)
>>> res = s.compile_obj(company)
:param model: a SQLA model
"""
config_key = 'py3o'
def __init__(self, model, rels=None, **kw):
BaseSqlaInspector.__init__(self, model, **kw)
# We store the relations already treated by storing the primaryjoin that
# they use, since the backref uses the same join string, we can avoid
# recursive collection
self.rels = rels or []
self.columns = self.collect_columns()
def collect_columns(self):
"""
Collect columns information from a given model.
a column info contains
the py3 informations
exclude
Should the column be excluded from the current context ?
name
the name of the key in the resulting py3o context of the
column
__col__
The original column object
__prop__
In case of a relationship, the SqlaContext wrapping the given
object
"""
res = []
for prop in self.get_sorted_columns():
info_dict = self.get_info_field(prop)
export_infos = info_dict.get('export', {}).copy()
main_infos = export_infos.get(self.config_key, {}).copy()
if export_infos.get('exclude'):
if main_infos.get('exclude', True):
continue
infos = export_infos
infos.update(main_infos)
# Si la clé name n'est pas définit on la met au nom de la colonne
# par défaut
infos.setdefault('name', prop.key)
infos['__col__'] = prop
if isinstance(prop, RelationshipProperty):
join = str(prop.primaryjoin)
if join in self.rels:
continue
else:
self.rels.append(str(join))
infos['__prop__'] = SqlaContext(
prop.mapper,
rels=self.rels[:]
)
res.append(infos)
return res
def make_doc(self):
"""
Generate the doc for the current context in the form
{'key': 'label'}
"""
res = {}
for column in self.columns:
if isinstance(column['__col__'], ColumnProperty):
key = column['name']
label = column['__col__'].columns[0].info.get(
'colanderalchemy', {}
).get('title')
if label is None:
continue
res[key] = label
elif isinstance(column['__col__'], RelationshipProperty):
# 1- si la relation est directe (une AppOption), on override le
# champ avec la valeur (pour éviter des profondeurs)
# 2- si l'objet lié est plus complexe, on lui fait son propre
# chemin
# 3- si la relation est uselist, on fait une liste d'élément
# liés qu'on place dans une clé "l" et on place l'élément lié
# dans une clé portant le nom de son index
key = column['name']
label = column['__col__'].info.get(
'colanderalchemy', {}
).get('title')
if label is N | one:
continue
if column['__col__'].uselist:
subres = column['__prop__'].make_doc()
for subkey, value in subres.items():
| new_key = u"%s.first.%s" % (key, subkey)
res[new_key] = u"%s - %s (premier élément)" % (
label, value
)
new_key = u"%s.last.%s" % (key, subkey)
res[new_key] = u"%s - %s (dernier élément)" % (
label, value
)
else:
subres = column['__prop__'].make_doc()
for subkey, value in subres.items():
new_key = u"%s.%s" % (key, subkey)
res[new_key] = u"%s - %s" % (label, value)
print("------------------ Rendering the docs -------------------")
keys = res.keys()
keys.sort()
for key in keys:
value = res[key]
print(u"{0} : py3o.{1}".format(value, key))
return res
def gen_xml_doc(self):
"""
Generate the text tags that should be inserted in the content.xml of a
full model
"""
res = self.make_doc()
var_tag = """
<text:user-field-decl office:value-type="string"
office:string-value="%s" text:name="py3o.%s"/>"""
text_tag = """<text:p text:style-name="P1">
<text:user-field-get text:name="py3o.%s">%s</text:user-field-get>
</text:p>
"""
keys = res.keys()
keys.sort()
texts = ""
vars = ""
for key in keys:
value = res[key]
vars += var_tag % (value, key)
texts += text_tag % (key, value)
return CONTENT_TMPL % (vars, texts)
def _get_formatted_val(self, obj, attribute, column):
"""
Return the formatted value of the attribute "attribute" of the obj "obj"
regarding the column's description
:param obj obj: The instance we manage
:param str attribute: The string defining the path to access the end
attribute we want to manage
:param dict column: The column description dictionnary
:returns: The associated value
"""
attr_path = attribute.split('.')
val = None
tmp_val = obj
for attr in attr_path:
tmp_val = getattr(tmp_val, attr, None)
if tmp_val is None:
break
if tmp_val is not None:
val = tmp_val
value = format_value(column, val, self.config_key)
return format_py3o_val(value)
def _get_column_value(self, obj, column):
"""
Return a single cell's value
:param obj obj: The instance we manage
:param dict column: The column description dictionnary
:returns: The associated value
"""
return self._get_formatted_val(obj, column['__col__'].key, column)
def _get_to_many_relationship_value(self, obj, column): |
naught101/sobol_seq | sobol_seq/__init__.py | Python | mit | 374 | 0.002674 | """
Sobol sequence gener | ator.
https://github.com/naught101/sobol_seq
"""
from .sobol_seq import i4_sobol_generate, i4_uniform, i4_sobol, i4_sobol_generate_std_normal
from .sobol_seq import i4_bit_hi1, i4_bit_lo0, prime_ge
__all__ = ["i4_sobol_generate", "i4_uniform", "i4_sobol", "i4_bit_hi1",
| "i4_bit_lo0", "prime_ge", "i4_sobol_generate_std_normal"]
|
tcstewar/spinnbot | plot_lr_1.py | Python | gpl-2.0 | 3,783 | 0.011102 | import pylab
pylab.figure(figsize=(8,4))
pylab.axes((0.11, 0.13, 0.85, 0.8))
color=['k', 'b', 'g', 'r']
pylab.plot([0, 1, 2, 3, 4, 5, 6, 7],[0.45077721721559999, 0.40372168451659995, 0.38063819377489994, 0.36765218894180002, 0.36047701604800009, 0.34854098046839999, 0.33848337337579998, 0.32900642344309999], label='bump', color=color[0], linewidth=2)
pylab.errorbar([0, 1, 2, 3, 4, 5, 6, 7],[0.45077721721559999, 0.40372168451659995, 0.38063819377489994, 0.36765218894180002, 0.36047701604800009, 0.34854098046839999, 0.33848337337579998, 0.32900642344309999],yerr=[[-0.013265381779199947, -0.013437916614800094, -0.0044647911065000545, -0.001681967947199936, -0.0014084514488998945, -0.00077153864980000764, -0.00066725146120005796, -0.00046413902760006343],[-0.012064005172299996, -0.0098137054201999918, -0.0043855462533999301, -0.0016006322225000091, -0.0015216810068000575, -0.0009909929340999879, -0.00078496901829999688, -0.0004114867839999925]], color=color[0])
pylab.plot([0, 1, 2, 3, 4, 5, 6, 7],[0.41341129187800008, 0.37749449837800003, 0.34623771137129999, 0.3111087466461, 0.29583614024819999, 0.27249395370759999, 0.25184347208939994, 0.23262005669969996], color=color[1], linewidth=2, label='bump, vision')
pylab.errorbar([0, 1, 2, 3, 4, 5, 6, 7],[0.41341129187800008, 0.37749449837800003, 0.34623771137129999, 0.3111087466461, 0.29583614024819999, 0.27249395370759999, 0.25184347208939994, 0.23262005669969996],yerr=[[-0.027785266776099937, -0.015515430454699997, -0.0087409518771000005, -0.0054220716469000063, -0.0029500679904999605, -0.0019725006428999992, -0.0024776339118000035, -0.0016884565214000113],[-0.027177436092700125, -0.013854861098000104, -0.0078351510641000432, -0.0044292627604999857, -0.0033514177269999834, -0.0013415376437999615, -0.0019758213936999502, -0.0013266185552999743]], color=color[1])
pylab.plot([0, 1, 2, 3, 4, 5, 6, 7],[0.36044984271919994, 0.32797571307869999, 0.30232267850920003, 0.28995787213160007, 0.27707500186599998, 0.26403148906309992, 0.25519789108169999, 0.24432621429909998], color=color[2], linewidth=2, label='bump, movement')
pylab.errorbar([0, 1, 2, 3, 4, 5, 6, 7],[0.36044984271919994, 0.32797571307869999, 0.30232267850920003, 0.28995787213160007, 0.27707500186 | 599998, 0.26403148906309992, 0.25519789108 | 169999, 0.24432621429909998],yerr=[[-0.027047134775899984, -0.017672144615600016, -0.003892905421899906, -0.0040572456741999607, -0.0032339877449999999, -0.0027035312431000769, -0.0065918208782000387, -0.0028030154090999682],[-0.015704226333299987, -0.011571601761000028, -0.0034103611732000938, -0.0039626564557000421, -0.0047592739836000431, -0.0027847225884999327, -0.0058723607012999834, -0.0027977928138000208]], color=color[2])
pylab.plot([0, 1, 2, 3, 4, 5, 6, 7],[0.35800996797899998, 0.31101486767409997, 0.25402347910849998, 0.23721116673370002, 0.216113753666, 0.19624479101880002, 0.18897807754690002, 0.1771350849686], color=color[3], linewidth=2, label='all three')
pylab.errorbar([0, 1, 2, 3, 4, 5, 6, 7],[0.35800996797899998, 0.31101486767409997, 0.25402347910849998, 0.23721116673370002, 0.216113753666, 0.19624479101880002, 0.18897807754690002, 0.1771350849686],yerr=[[-0.036046588185100037, -0.0074319024362999864, -0.0041195536681000178, -0.0079736550924999761, -0.0037913736615999838, -0.0031418788213999627, -0.0022935152658999647, -0.0039125769192999704],[-0.028557083645299963, -0.0088217753404999932, -0.0043254259892999414, -0.0039052101355000202, -0.0023816627960999981, -0.0026435380838000233, -0.0024196880487000194, -0.0046501691346000074]], color=color[3])
pylab.xticks([0,1,2,3,4,5,6,7], [10,20,50,100,200,500,1000,2000])
pylab.xlim(0,7)
pylab.legend(loc='upper right')
pylab.xlabel('number of neurons')
pylab.ylabel('error')
pylab.savefig('lr_1.png', dpi=600)
pylab.show() |
dementrock/nbgrader | nbgrader/formgrader/formgrade.py | Python | bsd-3-clause | 14,959 | 0.002206 | import json
import os
from functools import wraps
from flask import Flask, request, abort, redirect, url_for, render_template, \
send_from_directory, Blueprint, g, make_response
from ..api import MissingEntry
app = Flask(__name__, static_url_path='')
blueprint = Blueprint('formgrade', __name__)
def auth(f):
"""Authenticated flask app route."""
@wraps(f)
def authenticated(*args, **kwargs):
result = app.auth.authenticate()
if result is True:
pass # Success
elif result is False:
abort(403) # Forbidden
else:
return result # Redirect
return f(*args, **kwargs)
return authenticated
def set_index(url, request):
if 'index' in request.args:
return "{}?in | dex={}".format(url, request.args.get('index'))
| else:
return url
@app.errorhandler(500)
def internal_server_error(e):
return render_template(
'gradebook_500.tpl',
base_url=app.auth.base_url,
error_code=500), 500
@app.errorhandler(502)
def upstream_server_error(e):
return render_template(
'gradebook_500.tpl',
base_url=app.auth.base_url,
error_code=502), 502
@blueprint.errorhandler(403)
def unauthorized(e):
return render_template(
'gradebook_403.tpl',
base_url=app.auth.base_url,
error_code=403), 403
@blueprint.url_defaults
def bp_url_defaults(endpoint, values):
name = getattr(g, 'name', None)
if name is not None:
values.setdefault('name', name)
@blueprint.url_value_preprocessor
def bp_url_value_preprocessor(endpoint, values):
g.name = values.pop('name')
@blueprint.route("/static/<path:filename>")
def static_proxy(filename):
return send_from_directory(os.path.join(app.root_path, 'static'), filename)
@blueprint.route("/fonts/<filename>")
def fonts(filename):
return redirect(url_for('.static_proxy', filename=os.path.join("components", "bootstrap", "fonts", filename)))
@blueprint.route("/submissions/components/<path:filename>")
@auth
def components(filename):
return redirect(url_for('.static_proxy', filename=os.path.join("components", filename)))
@blueprint.route("/mathjax/<path:filename>")
@auth
def mathjax(filename):
return send_from_directory(os.path.dirname(app.mathjax_url), filename)
@blueprint.route("/")
@auth
def home():
return redirect(url_for('.view_assignments'))
@blueprint.route("/assignments/")
@auth
def view_assignments():
assignments = []
for assignment in app.gradebook.assignments:
x = assignment.to_dict()
x["average_score"] = app.gradebook.average_assignment_score(assignment.name)
x["average_code_score"] = app.gradebook.average_assignment_code_score(assignment.name)
x["average_written_score"] = app.gradebook.average_assignment_written_score(assignment.name)
assignments.append(x)
return render_template(
"assignments.tpl",
assignments=assignments,
base_url=app.auth.base_url)
@blueprint.route("/students/")
@auth
def view_students():
students = app.gradebook.student_dicts()
students.sort(key=lambda x: x.get("last_name") or "no last name")
return render_template(
"students.tpl",
students=students,
base_url=app.auth.base_url)
@blueprint.route("/assignments/<assignment_id>/")
@auth
def view_assignment(assignment_id):
try:
assignment = app.gradebook.find_assignment(assignment_id)
except MissingEntry:
abort(404)
notebooks = []
for notebook in assignment.notebooks:
x = notebook.to_dict()
x["average_score"] = app.gradebook.average_notebook_score(notebook.name, assignment.name)
x["average_code_score"] = app.gradebook.average_notebook_code_score(notebook.name, assignment.name)
x["average_written_score"] = app.gradebook.average_notebook_written_score(notebook.name, assignment.name)
notebooks.append(x)
assignment = assignment.to_dict()
return render_template(
"assignment_notebooks.tpl",
assignment=assignment,
notebooks=notebooks,
base_url=app.auth.base_url)
@blueprint.route("/students/<student_id>/")
@auth
def view_student(student_id):
try:
student = app.gradebook.find_student(student_id)
except MissingEntry:
abort(404)
submissions = []
for assignment in app.gradebook.assignments:
try:
submission = app.gradebook.find_submission(assignment.name, student.id).to_dict()
except MissingEntry:
submission = {
"id": None,
"name": assignment.name,
"student": student.id,
"duedate": None,
"timestamp": None,
"extension": None,
"total_seconds_late": 0,
"score": 0,
"max_score": assignment.max_score,
"code_score": 0,
"max_code_score": assignment.max_code_score,
"written_score": 0,
"max_written_score": assignment.max_written_score,
"needs_manual_grade": False
}
submissions.append(submission)
submissions.sort(key=lambda x: x.get("duedate") or "no due date")
student = student.to_dict()
return render_template(
"student_assignments.tpl",
assignments=submissions,
student=student,
base_url=app.auth.base_url)
@blueprint.route("/assignments/<assignment_id>/<notebook_id>/")
@auth
def view_assignment_notebook(assignment_id, notebook_id):
try:
app.gradebook.find_notebook(notebook_id, assignment_id)
except MissingEntry:
abort(404)
submissions = app.gradebook.notebook_submission_dicts(notebook_id, assignment_id)
submissions.sort(key=lambda x: x["id"])
for i, submission in enumerate(submissions):
submission["index"] = i
return render_template(
"notebook_submissions.tpl",
notebook_id=notebook_id,
assignment_id=assignment_id,
submissions=submissions,
base_url=app.auth.base_url)
@blueprint.route("/students/<student_id>/<assignment_id>/")
@auth
def view_student_assignment(student_id, assignment_id):
try:
assignment = app.gradebook.find_submission(assignment_id, student_id)
except MissingEntry:
abort(404)
submissions = [n.to_dict() for n in assignment.notebooks]
submissions.sort(key=lambda x: x['name'])
return render_template(
"student_submissions.tpl",
assignment_id=assignment_id,
student=assignment.student.to_dict(),
submissions=submissions,
base_url=app.auth.base_url
)
@blueprint.route("/submissions/<submission_id>/<path:path>")
@auth
def view_submission_files(submission_id, path):
try:
submission = app.gradebook.find_submission_notebook_by_id(submission_id)
assignment_id = submission.assignment.assignment.name
student_id = submission.student.id
except MissingEntry:
abort(404)
dirname = os.path.join(app.notebook_dir, app.notebook_dir_format.format(
nbgrader_step=app.nbgrader_step,
assignment_id=assignment_id,
student_id=student_id))
return send_from_directory(dirname, path)
@blueprint.route("/submissions/<submission_id>/next")
@auth
def view_next_submission(submission_id):
try:
submission = app.gradebook.find_submission_notebook_by_id(submission_id)
assignment_id = submission.assignment.assignment.name
notebook_id = submission.notebook.name
except MissingEntry:
abort(404)
submissions = app.gradebook.notebook_submissions(notebook_id, assignment_id)
# find next submission
submission_ids = sorted([x.id for x in submissions])
ix = submission_ids.index(submission.id)
if ix == (len(submissions) - 1):
return redirect(url_for('.view_assignment_notebook', assignment_id=assignment_id, notebook_id=notebook_id))
else:
return redirect(set_index(
url_for('.view_submission', submission_id=submission_ids[ix + 1]), request))
@blueprint.route( |
nathanbjenx/cairis | cairis/bin/at2om.py | Python | apache-2.0 | 5,006 | 0.014183 | #!/usr/bin/python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import argparse
import pydot
import os
__author__ = 'Shamal Faily'
def dotToObstacleModel(graph,contextName,originatorName):
goals = []
goalNames = set([])
obstacles = []
acs = {}
for node in graph.get_nodes():
nodeShape = node.get_shape()
nodeStyle = str(node.get_style())
if nodeShape == 'box' and nodeStyle == 'rounded':
obstacles.append(node.get_name())
elif nodeShape == 'box' and nodeStyle == 'None':
nodeName = node.get_name()
if (nodeName != 'node' and nodeName != 'edge'):
goals.append(node.get_name())
goalNames.add(node.get_name())
elif nodeShape == 'triangle':
acs[node.get_name()] = node.get_label()
xmlBuf = '<?xml version="1.0"?>\n<!DOCTYPE cairis_model PUBLIC "-//CAIRIS//DTD MODEL 1.0//EN" "http://cairis.org/dtd/cairis_model.dtd">\n\n<cairis_model>\n\n'
xmlBuf += '<cairis>\n <project_settings name="' + contextName + '">\n <contributors>\n <contributor first_name="None" surname="None" affiliation="' + originatorName + '" role="Scribe" />\n </contributors>\n </project_settings>\n <environment name="' + contextName + '" short_code="' + contextName + '">\n <definition>' + contextName + '</definition>\n <asset_values>\n <none>TBC</none>\n <low>TBC</low>\n <medium>TBC</medium>\n <high>TBC</high>\n </asset_values>\n </environment>\n</cairis>\n\n<goals>\n'
for g in goals:
xmlBuf += ' <goal name=' + g + ' originator="' + originatorName + '">\n <goal_environment name="' + contextName + '" category="Maintain" priority="Medium">\n <definition>' + g + '</definition>\n <fit_criterion>TBC</fit_criterion>\n <issue>None</issue>\n </goal_environment>\n </goal>\n'
for o in obstacles:
xmlBuf += ' <obstacle name=' + o + ' originator="' + originatorName + '">\n <obstacle_environment name="' + contextName + '" category="Threat">\n <definition>' + o + '</definition>\n </obsta | cle_environment>\n </obstacle>\n'
xmlBuf += '</goals>\n\n'
fromAssocs = []
toAssocs | = {}
assocs = []
for e in graph.get_edge_list():
fromName = e.get_source()
toName = e.get_destination()
if fromName in acs:
if fromName not in toAssocs:
toAssocs[fromName] = [toName]
else:
toAssocs[fromName].append(toName)
elif toName in acs:
fromAssocs.append((fromName,toName))
else:
if fromName in goalNames:
assocs.append(' <goal_association environment="' + contextName + '" goal_name=' + fromName + ' goal_dim="goal" ref_type="obstruct" subgoal_name=' + toName + ' subgoal_dim="obstacle" alternative_id="0">\n <rationale>None</rationale>\n </goal_association>\n')
else:
assocs.append(' <goal_association environment="' + contextName + '" goal_name=' + fromName + ' goal_dim="obstacle" ref_type="resolve" subgoal_name=' + toName + ' subgoal_dim="goal" alternative_id="0">\n <rationale>None</rationale>\n </goal_association>\n')
for fromName,toName in fromAssocs:
for subGoalName in toAssocs[toName]:
assocs.append(' <goal_association environment="' + contextName + '" goal_name=' + fromName + ' goal_dim="obstacle" ref_type=' + acs[toName] + ' subgoal_name=' + subGoalName + ' subgoal_dim="obstacle" alternative_id="0">\n <rationale>None</rationale>\n </goal_association>\n')
xmlBuf += '<associations>\n'
for assoc in assocs:
xmlBuf += assoc
xmlBuf += '</associations>\n\n</cairis_model>'
return xmlBuf
def main(args=None):
parser = argparse.ArgumentParser(description='Attack Tree to CAIRIS Model converter')
parser.add_argument('dotFile',help='attack tree model to import (Dot format)')
parser.add_argument('--context',dest='contextName',help='attack context')
parser.add_argument('--author',dest='originatorName',help='author/s')
parser.add_argument('--out',dest='outFile',help='output file (CAIRIS format)')
args = parser.parse_args()
dotInstance = pydot.graph_from_dot_file(args.dotFile)
xmlBuf = dotToObstacleModel(dotInstance[0],args.contextName,args.originatorName)
f = open(args.outFile,'w')
f.write(xmlBuf)
f.close()
if __name__ == '__main__':
main()
|
freeserver/readerss | grabber/engines/Sqlalchemy.py | Python | agpl-3.0 | 3,081 | 0.00779 | from engines.base import BaseDB
from models.model import Feed, Entry
from sqlalchemy import create_engine, Column, String, Integer, DateTime
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
Base = declarative_base()
class SqlalchemyDB(BaseDB):
"""
The SQLAlchemy access implementation.
"""
def setup_database(self):
Base.metadata.create_all(self.engine)
def __init__(self, connection_string):
self.engine = create_engine(connection_string)
Session = sessionmaker(self.engine)
self.session = Session()
def get_item(self, table, id=None, **filters):
table = item_converter(table)
if id:
filters = {'id': id}
return self.session.query(table).filter_by(**filters).first().__dict__
def get_items(self, table, **filters):
table = item_converter(table)
rv = map(lambda item: item.__dict__, self.session.query(table).filter_by(**filters).all())
return rv
def add_item(self, table, data):
item_orm = item_converter(table, data)
merged_item = self.session.merge(item_orm)
self.session.add(merged_item)
self.session.commit()
def add_items(self, table, datas):
items_orm = map(lambda item: item_converter(table, item), datas)
merged_items = map(lambda item: self.session.merge(item), items_orm)
for merged_item in merged_items:
self.session.add(merged_item)
self.session.commit()
def remove_item(self, table, id=None, **filters):
item = self.get_item(table, id, **filters)
self.session.delete(item)
def item_converter(table, data=None):
"""
Works out what model to use for the table and sorts out the data
:param table:the table name
:param data:the data item to enter in to the database
"""
if table == SQLAFeed.TYPE:
if data:
return SQLAFeed(**data)
else: |
return SQLAFeed
elif table == SQLAEntry.TYPE:
if data:
return SQLAEntry(**data)
else:
return SQLAEntry
else:
raise Exception("Table type not found: {0}".format(table))
class SQLAFeed(Feed, Base):
__tablename__ = Feed.TYPE
id = Column(String, primary_key=True)
url = Column(String)
title = C | olumn(String)
subtitle = Column(String)
updated = Column(DateTime(timezone=True))
published = Column(DateTime(timezone=True))
link = Column(String)
last_accessed = Column(DateTime(timezone=True))
minimum_wait = Column(Integer)
errors = Column(Integer)
class SQLAEntry(Entry, Base):
__tablename__ = Entry.TYPE
id = Column(String, primary_key=True)
feed_id = Column(String, nullable=False) # needs relationship setting up
title=Column(String)
summary=Column(String)
link=Column(String)
updated=Column(DateTime(timezone=True))
published=Column(DateTime(timezone=True))
DATABASE=SqlalchemyDB
|
pythononwheels/pow_devel | pythononwheels/start/stubs/dash_handler_template.py | Python | mit | 2,667 | 0.012748 |
from {{appname}}.handlers.powhandler import PowHandler
from {{appname}}.conf.config impor | t myapp
from {{appname}}.lib.application import app
import simplejson as json
import tornado.web
from tornado import gen
from {{appnam | e}}.pow_dash import dispatcher
# Please import your model here. (from yourapp.models.dbtype)
@app.add_route("/dash.*", dispatch={"get" :"dash"})
@app.add_route("/_dash.*", dispatch={"get" :"dash_ajax_json", "post": "dash_ajax_json"})
class Dash(PowHandler):
#
# Sample dash handler to embedd dash into PythonOnWheels
#
def dash(self, **kwargs):
"""
This is the place where dash is called.
dispatcher returns the HMTL including title, css, scripts and config via => dash.Dash.index()
(See: in pow_dash.py => myDash.index)
You can then insert the returned HTML into your template.
I do this below in the self.render/self.success call => see base_dash.bs4 template (mustache like syntax)
"""
print("processing dash method")
#external_stylesheets = see config.py dash section
retval = dispatcher(self.request, username="fake", session_id=1234, index=True )
#
# this is the render template call which embeds the dash code (dash_block=retval)
# from dispatcher (see above)
self.set_header('Content-Type', "text/html")
self.render("dash_index.tmpl", dash_block=retval)
# self.success(template="index.tmpl", dash_block=retval, data=res )
def dash_ajax_json(self):
"""
respond to the dash ajax json / react request's
"""
print(" processing dash_ajax method")
#
# now hand over to the dispatcher
#
retval = dispatcher(self.request, index=False, username="fake", session_id=1234, powapp=self.application)
#self.set_header('Content-Type', 'application/json')
self.write(retval)
# def dash_ajax_assets(self):
# """
# respond to the dash ajax assets/ react request's
# """
# print(" processing dash_ajax_assets method")
# #
# # now hand over to the dispatcher
# #
# """Handle Dash requests and guess the mimetype. Needed for static files."""
# url = request.path.split('?')[0]
# content_type, _encoding = mimetypes.guess_type(url)
# retval = dispatcher(self.request, index=False, username="fake", session_id=1234, powapp=self.application)
# self.set_header('Content-Type', content_type)
# self.write(retval) |
NicoVarg99/daf-recipes | ckan/ckan/ckan/ckan/tests/legacy/functional/test_pagination.py | Python | gpl-3.0 | 6,052 | 0.004296 | # encoding: utf-8
import re
from nose.tools import assert_equal
from ckan.lib.create_test_data import CreateTestData
import ckan.model as model
from ckan.tests.legacy import TestController, url_for, setup_test_search_index
def scrape_search_results(response, object_type):
assert object_type in ('dataset', 'group_dataset', 'group', 'user')
if object_type is not 'group_dataset':
results = re.findall('href="/%s/%s_(\d\d)"' % (object_type, object_type),
str(response))
else:
object_type = 'dataset'
results = re.findall('href="/%s/%s_(\d\d)"' % (object_type, object_type),
str(response))
return results
def test_scrape_user():
html = '''
<li class="username">
<img src="//gravatar.com/ava | tar/d41d8cd98f00b204e980 | 0998ecf8427e?s=16&d=http://test.ckan.net/images/icons/user.png" /> <a href="/user/user_00">user_00</a>
</li>
...
<li class="username">
<img src="//gravatar.com/avatar/d41d8cd98f00b204e9800998ecf8427e?s=16&d=http://test.ckan.net/images/icons/user.png" /> <a href="/user/user_01">user_01</a>
</li>
'''
res = scrape_search_results(html, 'user')
assert_equal(res, ['00', '01'])
class TestPaginationPackage(TestController):
@classmethod
def setup_class(cls):
setup_test_search_index()
model.repo.init_db()
# no. entities per page is hardcoded into the controllers, so
# create enough of each here so that we can test pagination
cls.num_packages_in_large_group = 51
packages = []
for i in range(cls.num_packages_in_large_group):
packages.append({
# CS: nasty_string ignore
'name': u'dataset_%s' % str(i).zfill(2),
'groups': u'group_00'
})
CreateTestData.create_arbitrary(packages)
@classmethod
def teardown_class(self):
model.repo.rebuild_db()
def test_package_search_p1(self):
res = self.app.get(url_for(controller='package', action='search', q='groups:group_00'))
assert 'href="/dataset?q=groups%3Agroup_00&page=2"' in res
pkg_numbers = scrape_search_results(res, 'dataset')
assert_equal(['50', '49', '48', '47', '46', '45', '44', '43', '42', '41', '40', '39', '38', '37', '36', '35', '34', '33', '32', '31'], pkg_numbers)
def test_package_search_p2(self):
res = self.app.get(url_for(controller='package', action='search', q='groups:group_00', page=2))
assert 'href="/dataset?q=groups%3Agroup_00&page=1"' in res
pkg_numbers = scrape_search_results(res, 'dataset')
assert_equal(['30', '29', '28', '27', '26', '25', '24', '23', '22', '21', '20', '19', '18', '17', '16', '15', '14', '13', '12', '11'], pkg_numbers)
def test_group_datasets_read_p1(self):
res = self.app.get(url_for(controller='group', action='read', id='group_00'))
assert 'href="/group/group_00?page=2' in res, res
pkg_numbers = scrape_search_results(res, 'group_dataset')
assert_equal(['50', '49', '48', '47', '46', '45', '44', '43', '42', '41', '40', '39', '38', '37', '36', '35', '34', '33', '32', '31'], pkg_numbers)
def test_group_datasets_read_p2(self):
res = self.app.get(url_for(controller='group', action='read', id='group_00', page=2))
assert 'href="/group/group_00?page=1' in res, res
pkg_numbers = scrape_search_results(res, 'group_dataset')
assert_equal(['30', '29', '28', '27', '26', '25', '24', '23', '22', '21', '20', '19', '18', '17', '16', '15', '14', '13', '12', '11'], pkg_numbers)
class TestPaginationGroup(TestController):
@classmethod
def setup_class(cls):
# no. entities per page is hardcoded into the controllers, so
# create enough of each here so that we can test pagination
cls.num_groups = 22
# CS: nasty_string ignore
groups = [u'group_%s' % str(i).zfill(2) for i in range(0, cls.num_groups)]
CreateTestData.create_arbitrary(
[], extra_group_names=groups
)
@classmethod
def teardown_class(self):
model.repo.rebuild_db()
def test_group_index(self):
res = self.app.get(url_for(controller='group', action='index'))
assert 'href="/group?q=&sort=&page=2"' in res, res
grp_numbers = scrape_search_results(res, 'group')
assert_equal(['00', '01', '02', '03', '04', '05', '06', '07', '08', '09', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '20'], grp_numbers)
res = self.app.get(url_for(controller='group', action='index', page=2))
assert 'href="/group?q=&sort=&page=1"' in res
grp_numbers = scrape_search_results(res, 'group')
assert_equal(['21'], grp_numbers)
class TestPaginationUsers(TestController):
@classmethod
def setup_class(cls):
# no. entities per page is hardcoded into the controllers, so
# create enough of each here so that we can test pagination
cls.num_users = 21
# CS: nasty_string ignore
users = [u'user_%s' % str(i).zfill(2) for i in range(cls.num_users)]
CreateTestData.create_arbitrary(
[], extra_user_names = users,
)
@classmethod
def teardown_class(self):
model.repo.rebuild_db()
def test_users_index(self):
res = self.app.get(url_for(controller='user', action='index'))
assert 'href="/user?q=&order_by=name&page=2"' in res
user_numbers = scrape_search_results(res, 'user')
assert_equal(['00', '01', '02', '03', '04', '05', '06', '07', '08', '09', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19'], user_numbers)
res = self.app.get(url_for(controller='user', action='index', page=2))
assert 'href="/user?q=&order_by=name&page=1"' in res
user_numbers = scrape_search_results(res, 'user')
assert_equal(['20'], user_numbers)
|
siosio/intellij-community | python/helpers/pycharm/_jb_pytest_runner.py | Python | apache-2.0 | 1,933 | 0.005173 | # coding=utf-8
# Copyright 2000-2021 JetBrains s.r.o. and contributors. Use of this source code is governed by the Apache 2.0 license that can be found in the LICENSE file.
import pytest
from distutils import version
import sys
from _pytest.config import get_plugin_manager
from pkg_resources import iter_entry_points
from _jb_runner_tools import jb_patch_separator, jb_doc_args, JB_DISABLE_BUFFERING, start_protocol, parse_arguments, \
set_parallel_mode
from teamcity import pytest_plugin
if __name__ == '__main__':
path, targets, additional_args = parse_arguments()
sys.argv += additional_args
joined_targets = jb_patch_separator(targets, fs_glue="/", python_glue="::", fs_to_python_glue=".py::")
# When file is launched in pytest it should be file.py: you can't provide it as bare module
joined_targets = [t + ".py" if ":" not in t else t for t in joined_targets]
sys.argv += [path] if path else joined_targets
# plugin is discovered automatically in 3, but not in 2
# to prevent "plugin already registered" problem we check it first
plugins_to_load = []
if not get_plugin_manager().hasplugin("pytest-teamcity"):
if "pytest-teamcity" not in map(lambda e: e.name, iter_entry_points(group='pytest11', name=None)):
plugins_to_load.append(pytest_plugin)
args = sys.argv[1:]
if "--jb-show-summary" in args:
args.remove("--jb-show-summary")
elif version.LooseVersion(pytest.__version__) >= version.LooseVersion("6.0"):
args += ["--no-header", "--no-summary", "-q"]
if JB_DISABLE_BUFFERING and "-s" not in args:
args += ["-s"]
jb_doc_args("pytest", args)
class Plugin:
@staticmethod
def pytest_configure(config):
| if getattr(config.option, "numprocesses", None):
| set_parallel_mode()
start_protocol()
sys.exit(pytest.main(args, plugins_to_load + [Plugin]))
|
brhoades/holdem-bot | poker/poker.py | Python | mit | 2,684 | 0.003353 | # Heads Up Texas Hold'em Challenge bot
# Based on the Heads Up Omaha Challange - Starter Bot by Jackie <jackie@starapple.nl>
# Last update: 22 May, 2014
# @author Chris Parlette <cparlette@gmail.com>
# @version 1.0
# @license MIT License (http://opensource.org/licenses/MIT)
class Pocket(object):
'''
Pocket class
'''
def __init__(self, cards):
self.cards = cards
def __iter__(self):
return iter(self.cards)
class Ranker(object):
'''
Ranker class
'''
@staticmethod
def rank_five_cards(cards):
# List of all card values
values = sorted(['23456789TJQKA'.find(card.value) for card in cards])
# Checks if hand is a straight
is_straight = all([values[i] == values[0] + i for i in range(5)])
# Additional straight check
if not is_straight:
# Weakest straight
is_straight = all(values[i] == values[0] + i for i in range(4)) and values[4] == 12
# Rotate values as the ace is weakest in this case
values = values[1:] + values[:1]
# Checks if hand is a flush
is_flush = all([card.suit == cards[0].suit for card in cards])
# Get card value counts
value_count = {value: values.count(value) for value in values}
# Sort value counts by most occuring
sorted_value_count = sorted([(count, value) for value, count in value_count.items()], reverse = True)
| # Get all kinds (e.g. four of a kind, three of a kind, pair)
kinds = [value_count[0] for value_count in sorted_value_count]
# Get values for kinds
kind_values = [value_count[1] for value_count in sorted_value_count]
# Royal flush
if is_straight and is_flush and values[0] == 8:
return ['9'] + values |
# Straight flush
if is_straight and is_flush:
return ['8'] + kind_values
# Four of a kind
if kinds[0] == 4:
return ['7'] + kind_values
# Full house
if kinds[0] == 3 and kinds[1] == 2:
return ['6'] + kind_values
# Flush
if is_flush:
return ['5'] + kind_values
# Straight
if is_straight:
return ['4'] + kind_values
# Three of a kind
if kinds[0] == 3:
return ['3'] + kind_values
# Two pair
if kinds[0] == 2 and kinds[1] == 2:
return ['2'] + kind_values
# Pair
if kinds[0] == 2:
return ['1'] + kind_values
# No pair
return ['0'] + kind_values
|
chrisenytc/coffy | coffees/urls.py | Python | mit | 331 | 0.02719 | from django.c | onf.urls import patterns, url
from coffees import views
urlpatterns = patterns('',
# GET /api
url(r'^$', views.index, name='coffees_index'),
# POST /api/coffees
url(r'^coffees/$', views.create, name='coffees_create'),
# GET /api/<username>
url(r'^( | ?P<username>[\w-]+)/$', views.detail, name='coffees_detail'),
) |
hadim/profileextractor | tools/autocopyleft.py | Python | bsd-3-clause | 1,265 | 0.012648 | #-*- coding: utf-8 -*-
import sys, fileinput, os
# Configuration
path_src = "../src/"
fextension = [".py"]
date_copyright = "2011"
authors = "see AUTHORS"
project_name = "ProfileExtractor"
header_file = "license_header.txt"
def pre_append(line, file_name):
fobj = fileinput.FileInput(file_name, inplace=1)
first_line = fobj.readline()
sys.stdout.write("%s\n%s" % (line, first_line))
for line in fobj:
sys.stdout.write("%s" % line)
fobj.close()
def listdirectory(path, extension):
all_files = []
for root, dirs, files in os.walk(path):
for i in files:
if os.path.splitext(i)[1] in extension:
all_files.append(os.path.join(root, i))
return all_files
if __name__ == '__main__':
f = open(header_file, 'r')
licence_head = f.readlines()
f.close()
files = listdirectory(path_src, fextension)
for f in files:
name = os.path.basename(f) |
str_lhead = ""
for l in licence_head:
l = l.replace("DATE", date_copyright)
l = l.replace("AUTHORS", authors)
l = l.replace(" | PROJECT_NAME", project_name)
l = l.replace("FILENAME", name)
str_lhead += l
pre_append(str_lhead, f)
|
jaberg/nengo | nengo/templates/gate.py | Python | mit | 3,030 | 0.020132 | title='Gate'
label='Gate'
icon='gate.png'
description="""<html>This template creates an ensemble that drives an inhibitory gate on an existing specified ensemble. </html>"""
params=[
('name','Name',str,'Name of the new gating ensemble'),
('gated','Name of gated ensemble',str,'Name of the existing ensemble to gate'),
('neurons','Number of neurons',int,'Number of neurons in the new gating ensemble'),
('pstc','Gating PSTC [s]', float, 'Post-synaptic time constant of the gating ensemble'),
]
import nef
import ca.nengo
def test_params(net, p):
gatedIsSet = False
nameIsTaken = False
nodeList = net.network.getNodes()
for i in nodeList:
if i.name == p['gated']:
gatedIsSet = True
elif i.name == p['name']:
nameIsTaken = True
if nameIsTaken: return 'That name is already taken'
if not gatedIsSet: return 'Must provide the name of an existing ensemble to be gated'
target=net.network.getNode(p['gated'])
if not isinstance(target, ca.nengo.model.impl.NetworkArrayImpl) and not isinstance(target, ca.nengo.model.nef.NEFEnsemble):
return 'The ensemble to be gated must be either an ensemble or a network array'
if p['neurons']<1: return 'The number of neurons must be greater than zero'
if p['pstc']<=0: return 'The post-synaptic time constant must be greater than zero'
from java.util import ArrayList
from java.util import HashMap
from ca.nengo.model.impl import NetworkArrayImpl
def make(net,name='Gate', gated='visual', neurons=40 ,pstc=0.01):
gate=net.make(name, neurons, 1, intercept=(-0.7, 0), encoders=[[-1]])
def addOne(x):
return [x[0]+1]
net.connect(gate, None, func=addOne, origin_name='xBiased', create_projection=False)
output=net.network.getNode(gated)
if isinstance(output,NetworkArrayImpl):
weights=[[-10]]*(output.nodes[0].neurons*len(output.nodes))
else:
weights=[[-10]]*output.neurons
count=0
while 'gate_%02d'%count in [t.name for t in output.terminations]:
count=count+1
oname = str('gate_%02d'%count)
output.addTermination(oname, weights, pstc, False)
orig = gate.getOrigin('xBiased')
term = output.getTermination(oname)
net.network.addProjection(orig, term)
if net.network.getMetaData("gate") == None:
net.network.setMetaData("gate", HashMap())
gates = net.network.getMetaData("gate")
gate=HashMap(4)
gate.put("name", name)
gate.put("gated", gated)
gate.put("neurons", neuron | s)
gate.put("pstc", pstc)
gates.put(name, gate)
if net.network.getMetaData("templates") == None:
net.network.setMetaData("templates | ", ArrayList())
templates = net.network.getMetaData("templates")
templates.add(name)
if net.network.getMetaData("templateProjections") == None:
net.network.setMetaData("templateProjections", HashMap())
templateproj = net.network.getMetaData("templateProjections")
templateproj.put(name, gated)
|
magenta/symbolic-music-diffusion | utils/train_utils.py | Python | apache-2.0 | 3,999 | 0.009002 | # Copyright 2021 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Training utilities."""
import jax
import math
import numpy as np
from absl import logging
from flax import struct
from functools import partial
@struct.dataclass
class EarlyStopping:
"""Early stopping to avoid overfitting during training.
Attributes:
min_delta: Minimum delta between updates to be considered an
improvement.
patience: Number of steps of no improvement before stopping.
best_metric: Current best metric value.
patience_count: Number of steps since last improving update.
should_stop: Whether the training loop should stop to avoid
overfitting.
"""
min_delta: float = 0
patience: int = 0
best_metric: float = float('inf')
pati | ence_count: int = 0
should_stop: bool = False
def update(self, metric):
"""Update the state based on metric.
Returns:
Whether there was an improvement greater than min_delta from
the previous best_metric and the updated EarlyStop object.
"""
if math.isinf(
self.best_metric) or self.best_metric - metric > self.min_delta:
return True, self.replace(best_metric=metric, patience_count=0)
else:
| should_stop = self.patience_count >= self.patience or self.should_stop
return False, self.replace(patience_count=self.patience_count + 1,
should_stop=should_stop)
@struct.dataclass
class EMAHelper:
"""Exponential moving average of model parameters.
Attributes:
mu: Momentum parameter.
params: Flax network parameters to update.
"""
mu: float
params: any
@jax.jit
def update(self, model):
ema_params = jax.tree_multimap(
lambda p_ema, p: p_ema * self.mu + p * (1 - self.mu), self.params,
model.params)
return self.replace(mu=self.mu, params=ema_params)
def log_metrics(metrics,
step,
total_steps,
epoch=None,
summary_writer=None,
verbose=True):
"""Log metrics.
Args:
metrics: A dictionary of scalar metrics.
step: The current step.
total_steps: The total number of steps.
epoch: The current epoch.
summary_writer: A TensorBoard summary writer.
verbose: Whether to flush values to stdout.
"""
metrics_str = ''
for metric in metrics:
value = metrics[metric]
if metric == 'lr':
metrics_str += '{} {:5.4f} | '.format(metric, value)
else:
metrics_str += '{} {:5.2f} | '.format(metric, value)
if summary_writer is not None:
writer_step = step
if epoch is not None:
writer_step = total_steps * epoch + step
summary_writer.scalar(metric, value, writer_step)
if epoch is not None:
epoch_str = '| epoch {:3d} '.format(epoch)
else:
epoch_str = ''
if verbose:
logging.info('{}| {:5d}/{:5d} steps | {}'.format(epoch_str, step,
total_steps, metrics_str))
def report_model(model):
"""Log number of trainable parameters and their memory footprint."""
trainable_params = np.sum(
[param.size for param in jax.tree_leaves(model.params)])
footprint_bytes = np.sum([
param.size * param.dtype.itemsize
for param in jax.tree_leaves(model.params)
])
logging.info('Number of trainable paramters: {:,}'.format(trainable_params))
logging.info('Memory footprint: %dMB', footprint_bytes / 2**20)
|
Sbalbp/DIRAC | DataManagementSystem/scripts/dirac-dms-put-and-register-request.py | Python | gpl-3.0 | 2,952 | 0.034553 | #!/bin/env python
""" create and put 'PutAndRegister' request with a single local file
warning: make sure the file you want to put is accessible from DIRAC production hosts,
i.e. put file on network fs (AFS or NFS), otherwise operation will fail!!!
"""
__RCSID__ = "$Id: $"
import os
from DIRAC.Core.Base import Script
Script.setUsageMessage( '\n'.join( [ __doc__,
'Usage:',
' %s [option|cfgfile] requestName LFN localFile targetSE' % Script.scriptName,
'Arguments:',
' requestName: a request name',
' LFN: logical file name'
' localFile: local file you want to put',
' targetSE: target SE' ] ) )
# # execution
if __name__ == "__main__":
from DIRAC.Core.Base.Script import parseCommandLine
parseCommandLine()
import DIRAC
from DIRAC import gLogger
args = Script.getPositionalArgs()
requestName = None
LFN = None
PFN = None
targetSE = None
if not len( args ) != 4:
Script.showHelp()
DIRAC.exit( 0 )
else:
requestName = args[0]
LFN = args[1]
PFN = args[2]
targetSE = args[3]
if not os.path.isabs(LFN):
gLogger.error( "LFN should be absolute path!!!" )
DIRAC.exit( -1 )
gLogger.info( "will create request '%s' with 'PutAndRegister' "\
"operation using %s pfn and %s target SE" % ( requestName, PFN, targetSE ) )
from DIRAC.RequestManagementSystem.Client.Request import Request
from DIRAC.RequestManagementSystem.Client.Operation import Operation
from DIRAC.RequestManagementSystem.Client.File import File
from DIRAC.RequestManagemen | tSystem.Client.ReqClient import ReqClient
from DIRAC.Core.Utilities.Adler import fileAdler
if not os.path.exists( PFN ):
gLogger.error( "%s does not exist" % PFN )
DIRAC.exit( -1 )
if not os.path.isfile( PFN ):
gLogger.error( | "%s is not a file" % PFN )
DIRAC.exit( -1 )
PFN = os.path.abspath( PFN )
size = os.path.getsize( PFN )
adler32 = fileAdler( PFN )
request = Request()
request.RequestName = requestName
putAndRegister = Operation()
putAndRegister.Type = "PutAndRegister"
putAndRegister.TargetSE = targetSE
opFile = File()
opFile.LFN = LFN
opFile.PFN = PFN
opFile.Size = size
opFile.Checksum = adler32
opFile.ChecksumType = "ADLER32"
putAndRegister.addFile( opFile )
reqClient = ReqClient()
putRequest = reqClient.putRequest( request )
if not putRequest["OK"]:
gLogger.error( "unable to put request '%s': %s" % ( requestName, putRequest["Message"] ) )
DIRAC.exit( -1 )
gLogger.always( "Request '%s' has been put to ReqDB for execution." )
gLogger.always( "You can monitor its status using command: 'dirac-rms-show-request %s'" % requestName )
DIRAC.exit( 0 )
|
scripnichenko/nova | nova/api/openstack/compute/quota_sets.py | Python | apache-2.0 | 8,030 | 0.000374 | # Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_utils import strutils
import six
import six.moves.urllib.parse as urlparse
import webob
from nova.api.openstack.compute.schemas import quota_sets
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api import validation
from nova import exception
from nova.i18n import _
from nova import objects
fro | m nova import quota
ALIAS = "os-quota-sets"
QUOTAS = quota.QUOTAS
authorize = extensions.os_compute_authorizer(ALIAS)
class QuotaSetsController(wsgi.Controller):
def _format_quota_set(self, project_id, quota_set):
"""Convert the quota object to a result dict."""
if project_id:
| result = dict(id=str(project_id))
else:
result = {}
for resource in QUOTAS.resources:
if resource in quota_set:
result[resource] = quota_set[resource]
return dict(quota_set=result)
def _validate_quota_limit(self, resource, limit, minimum, maximum):
# NOTE: -1 is a flag value for unlimited
if limit < -1:
msg = (_("Quota limit %(limit)s for %(resource)s "
"must be -1 or greater.") %
{'limit': limit, 'resource': resource})
raise webob.exc.HTTPBadRequest(explanation=msg)
def conv_inf(value):
return float("inf") if value == -1 else value
if conv_inf(limit) < conv_inf(minimum):
msg = (_("Quota limit %(limit)s for %(resource)s must "
"be greater than or equal to already used and "
"reserved %(minimum)s.") %
{'limit': limit, 'resource': resource, 'minimum': minimum})
raise webob.exc.HTTPBadRequest(explanation=msg)
if conv_inf(limit) > conv_inf(maximum):
msg = (_("Quota limit %(limit)s for %(resource)s must be "
"less than or equal to %(maximum)s.") %
{'limit': limit, 'resource': resource, 'maximum': maximum})
raise webob.exc.HTTPBadRequest(explanation=msg)
def _get_quotas(self, context, id, user_id=None, usages=False):
if user_id:
values = QUOTAS.get_user_quotas(context, id, user_id,
usages=usages)
else:
values = QUOTAS.get_project_quotas(context, id, usages=usages)
if usages:
return values
else:
return {k: v['limit'] for k, v in values.items()}
@extensions.expected_errors(())
def show(self, req, id):
context = req.environ['nova.context']
authorize(context, action='show', target={'project_id': id})
params = urlparse.parse_qs(req.environ.get('QUERY_STRING', ''))
user_id = params.get('user_id', [None])[0]
return self._format_quota_set(id,
self._get_quotas(context, id, user_id=user_id))
@extensions.expected_errors(())
def detail(self, req, id):
context = req.environ['nova.context']
authorize(context, action='detail', target={'project_id': id})
user_id = req.GET.get('user_id', None)
return self._format_quota_set(id, self._get_quotas(context, id,
user_id=user_id,
usages=True))
@extensions.expected_errors(400)
@validation.schema(quota_sets.update)
def update(self, req, id, body):
context = req.environ['nova.context']
authorize(context, action='update', target={'project_id': id})
project_id = id
params = urlparse.parse_qs(req.environ.get('QUERY_STRING', ''))
user_id = params.get('user_id', [None])[0]
quota_set = body['quota_set']
force_update = strutils.bool_from_string(quota_set.get('force',
'False'))
settable_quotas = QUOTAS.get_settable_quotas(context, project_id,
user_id=user_id)
# NOTE(dims): Pass #1 - In this loop for quota_set.items(), we validate
# min/max values and bail out if any of the items in the set is bad.
valid_quotas = {}
for key, value in six.iteritems(body['quota_set']):
if key == 'force' or (not value and value != 0):
continue
# validate whether already used and reserved exceeds the new
# quota, this check will be ignored if admin want to force
# update
value = int(value)
if not force_update:
minimum = settable_quotas[key]['minimum']
maximum = settable_quotas[key]['maximum']
self._validate_quota_limit(key, value, minimum, maximum)
valid_quotas[key] = value
# NOTE(dims): Pass #2 - At this point we know that all the
# values are correct and we can iterate and update them all in one
# shot without having to worry about rolling back etc as we have done
# the validation up front in the loop above.
for key, value in valid_quotas.items():
try:
objects.Quotas.create_limit(context, project_id,
key, value, user_id=user_id)
except exception.QuotaExists:
objects.Quotas.update_limit(context, project_id,
key, value, user_id=user_id)
# Note(gmann): Removed 'id' from update's response to make it same
# as V2. If needed it can be added with microversion.
return self._format_quota_set(None, self._get_quotas(context, id,
user_id=user_id))
@extensions.expected_errors(())
def defaults(self, req, id):
context = req.environ['nova.context']
authorize(context, action='defaults', target={'project_id': id})
values = QUOTAS.get_defaults(context)
return self._format_quota_set(id, values)
# TODO(oomichi): Here should be 204(No Content) instead of 202 by v2.1
# +microversions because the resource quota-set has been deleted completely
# when returning a response.
@extensions.expected_errors(())
@wsgi.response(202)
def delete(self, req, id):
context = req.environ['nova.context']
authorize(context, action='delete', target={'project_id': id})
params = urlparse.parse_qs(req.environ.get('QUERY_STRING', ''))
user_id = params.get('user_id', [None])[0]
if user_id:
QUOTAS.destroy_all_by_project_and_user(context,
id, user_id)
else:
QUOTAS.destroy_all_by_project(context, id)
class QuotaSets(extensions.V21APIExtensionBase):
"""Quotas management support."""
name = "Quotas"
alias = ALIAS
version = 1
def get_resources(self):
resources = []
res = extensions.ResourceExtension(ALIAS,
QuotaSetsController(),
member_actions={'defaults': 'GET',
'detail': 'GET'})
resources.append(res)
return resources
def get_controller_extensions(self):
return []
|
yymao/slackbots | calc.py | Python | mit | 1,617 | 0.004329 | import re
from urllib import quote_plus
import math
from common import escape
_globals = {'__builtins__':{}}
_locals = vars(math)
try:
import cosmo
import smhm
except ImportError:
p | ass
else:
for k in cosmo.__all__:
_locals[k] = cosmo.__dict__[k]
for k in smhm.__all__:
_locals[k] = smhm.__dict__[k]
_help_msg = '''Supports most simple math functions and the following cosmology functions:
- `cd`: comoving distance [Mpc]
- `ld`: luminosity distance [Mpc]
- `ad`: angular distance [Mpc]
- `age`: age of the universe [Gyr]
- `lookback`: lookback time [Gyr]
All cosmology functions have this call signature: `(z, om=0.3, ol=1-om, h=1)`. Note that h is d | efault to 1.
Also supports the mean stellar mass--halo mass relation from Behroozi+2013:
- `smhm`: stellar mass [Msun]
Call signature: `sm(hm, z)`
'''
_escape_pattern = r'[^A-Za-z0-9\-+*/%&|~!=()<>.,#]|\.(?=[A-Za-z_])'
_error_msg = '''Hmmm... something\'s wrong with the input expression.
Type `/calc help` to see supported cosmology functions.
Or just try asking <https://www.google.com/?#q={0}|Google>.'''
def program(data):
expr = data['text'].strip()
if expr in ['', '-h', '--help', 'help']:
return _help_msg
escaped_expr = re.sub(_escape_pattern, '', expr.replace('^', '**'))
try:
ans = eval(escaped_expr, _globals, _locals)
except:
ans = _error_msg.format(escape(quote_plus(expr,'')))
else:
if isinstance(ans, float):
ans = '= {0:g}'.format(ans)
else:
ans = '= {0}'.format(ans)
return '{0}\n{1}'.format(escape(expr), ans)
|
orbitfold/pyo | examples/tables/05_table_maker.py | Python | gpl-3.0 | 1,093 | 0.007319 | #!/usr/bin/env python
# encoding: utf-8
"""
Creates a new sound table from random chunks of a soundfile.
"""
from pyo import *
import random, os
s = Server(sr=44100, nchnls=2, buffersize=512, duplex=0).boot()
path = "../snds/baseballmajeur_m.aif"
dur = sndinfo(path)[1]
t = SndTable(path, start=0, stop=1)
amp = Fader(fadein=0.005, fadeout=0.005, dur=0, mul=0.4).play()
a = Looper(t, pitch=[1.,1.], dur=t.getDur(), xfade=5, mul=amp).out()
def addsnd():
start = random.uniform(0, dur*0.7)
duration = random.uniform(.1, .3)
pos = random.un | iform(0.05, t.getDur()-0.5)
cross = random.uniform(0.04, duration/2)
t.insert(path, pos=pos, crossfade=cross, start=start, stop=start+duration)
def delayed_generation():
start = random.uniform(0, dur*0.7)
duration = random.uniform(.1, .3)
t.setSound(path, start=start, stop=start+duration)
for i in range(10):
addsnd()
a.dur = t.getDur()
a.reset()
amp.play()
caller = CallAfter(function=delayed_generation, time=0.005).stop()
def gen():
amp.stop()
| caller.play()
gen()
s.gui(locals()) |
davandev/davanserver | davan/config/no_private_config.py | Python | mit | 1,198 | 0.010017 | '''
Created on 1 nov. 2016
@author: davandev
Default configuration file used if not defined somewhere else
'''
FIBARO_USER = "my_user"
FIBARO_PASSWORD = "my_password"
TELEGRAM_TOKEN = 'my_telegram_token'
TELEGRAM_CHATID = {'chatid':'user3'}
TELEGRAM_USER_DAVID = 'my_telegram_user1'
TELEGRAM_U | SER_MIA = 'my_telegram_user2'
CAMERA_USER = "my_cam_user"
CAMERA_PASSWORD = "my_cam_password"
VOICERSS_TOKEN = "my_voicerss_token"
WEATHER_TOKEN = "my_weather_token"
WEATHER_STATION_ID = "my_weather_station_token"
USER_PIN = {'pin':'us | er'}
TELLDUS_PUBLIC_KEY = "my_telldus_key"
TELLDUS_PRIVATE_KEY = "my_telldus_key"
ROUTER_USER = "my_router_user"
ROUTER_PASSWORD = "my_router_password"
RECEIVER_BOT_TOKEN = "my_token"
GOOGLE_CALENDAR_TOKEN = "my_google_token"
DEVICES_UNKNOWN={}
DEVICES_FAMILY={}
DEVICES_FRIEND={}
DEVICES_HOUSE={}
NOKIA_CONSUMER_KEY ="consumer_key"
NOKIA_CONSUMER_SECRET = "consumer secret"
NOKIA_OAUTH_VERIFIER = "oauth verifier"
NOKIA_ACCESS_TOKEN = 'oauthtoken'
NOKIA_ACCESS_TOKEN_SECRET = 'oauthtokensecret'
NOKIA_USER_ID = 'user'
SL_API_KEYS = {
'PLATS': '',
'UPPSLAG': '',
'REALTID': '',
'SITEID': ''
}
TRADFRI_KEYS=""
|
VapourApps/va_master | va_master/consul_kv/initial_consul_data.py | Python | gpl-3.0 | 5,705 | 0.036284 | initial_consul_data = {
"update" : {
"providers/va_standalone_servers" : {"username": "admin", "servers": [], "sec_groups": [], "images": [], "password": "admin", "ip_address": "127.0.0.1", "networks": [], "sizes": [], "driver_name": "generic_driver", "location": "", "defaults": {}, "provider_name": "va_standalone_servers"},
"users" : [],
},
"overwrite" : {
"va_flavours" : {"va-small": {"num_cpus": 1, "max_memory": 1048576, "vol_capacity": 5, "memory": 1048576}, "debian": {"num_cpus": 1, "max_memory": 1048576, "vol_capacity": 5, "memory": 1048576}},
"service_presets/highstate_preset":{"name": "highstate", "script": "salt {server} state.highstate test=True | perl -lne 's\/^Failed:\\s+\/\/ or next; s\/\\s.*\/\/; pri | nt'"},
"service_presets/ping_preset":{"name": "ping_preset", "script" : "ping -c1 {address} > /dev/null", "interval": "30s", "timeout": "10s"},
"service_presets/tcp_preset":{"name": "TCP", "tcp": "{address}", "interval": "30s", "timeout": "10s"},
"managed_actions/ssh/root" : {
"actions" : [
{'name' : | 'reboot', 'type' : 'confirm'},
# {'name' : 'delete', 'type' : 'confirm'},
{'name' : 'remove_server', 'type' : 'confirm', 'kwargs' : ['datastore_handler', 'server_name'], 'requires_ssh' : False},
{'name' : 'stop', 'type' : 'confirm'},
{'name' : 'show_processes', 'type' : 'text', 'label' : 'Show processes'},
{'name' : 'show_usage', 'type' : 'text', 'label' : 'Show usage'},
{'name' : 'get_users', 'type' : 'text', 'label' : 'Get users'},
{'name' : 'restart_service', 'type' : 'form', 'label' : 'Restart service'}
]
},
"managed_actions/ssh/user" : { #Temporarily, we have all functions avialable for non-root users but we may change this in the future.
"actions" : [
{'name' : 'reboot', 'type' : 'confirm'},
{'name' : 'delete', 'type' : 'confirm'},
{'name' : 'remove_server', 'type' : 'confirm', 'kwargs' : ['datastore_handler', 'server_name'], 'requires_ssh' : False},
{'name' : 'stop', 'type' : 'action'},
{'name' : 'show_processes', 'type' : 'text', 'label' : 'Show processes'},
{'name' : 'show_usage', 'type' : 'text', 'label' : 'Show usage'},
{'name' : 'get_users', 'type' : 'text', 'label' : 'Get users'},
{'name' : 'restart_service', 'type' : 'form', 'label' : 'Restart process'}
]
},
"managed_actions/winexe/administrator" : {
"actions" : [
{'name' : 'reboot', 'type' : 'confirm'},
{'name' : 'delete', 'type' : 'confirm'},
{'name' : 'start', 'type' : 'action'},
{'name' : 'stop', 'type' : 'action'}
]
},
"managed_actions/winexe/user" : {
"actions" : [
{'name' : 'reboot', 'type' : 'confirm'},
{'name' : 'stop', 'type' : 'action'}
],
},
"managed_actions/provider/openstack" : {
"actions" : [
{'name' : 'reboot', 'type' : 'confirm'},
{'name' : 'delete', 'type' : 'confirm'},
{'name' : 'start', 'type' : 'action'},
{'name' : 'stop', 'type' : 'action'}
],
},
"managed_actions/provider/aws" : {
"actions" : [
{'name' : 'reboot', 'type' : 'confirm'},
{'name' : 'delete', 'type' : 'confirm'},
{'name' : 'start', 'type' : 'action'},
{'name' : 'stop', 'type' : 'action'}
],
},
"managed_actions/provider/lxc" : {
"actions" : [
{'name' : 'reboot', 'type' : 'confirm'},
{'name' : 'delete', 'type' : 'confirm'},
{'name' : 'start', 'type' : 'action'},
{'name' : 'stop', 'type' : 'action'}
],
},
"managed_actions/provider/digital_ocean" : {
"actions" : [
{'name' : 'reboot', 'type' : 'confirm'},
{'name' : 'delete', 'type' : 'confirm'},
{'name' : 'start', 'type' : 'action'},
{'name' : 'stop', 'type' : 'action'}
],
},
"managed_actions/provider/libvirt" : {
"actions" : [
{'name' : 'reboot', 'type' : 'confirm'},
{'name' : 'delete', 'type' : 'confirm'},
{'name' : 'start', 'type' : 'action'},
{'name' : 'stop', 'type' : 'action'}
],
},
"managed_actions/provider/century_link_driver" : {
"actions" : [
{'name' : 'reboot', 'type' : 'confirm'},
{'name' : 'delete', 'type' : 'confirm'},
{'name' : 'start', 'type' : 'action'},
{'name' : 'stop', 'type' : 'action'}
],
},
"managed_actions/provider/generic_driver" : {
"actions" : [
{'name' : 'reboot', 'type' : 'confirm'},
{'name' : 'delete', 'type' : 'confirm'},
{'name' : 'start', 'type' : 'action'},
{'name' : 'stop', 'type' : 'action'}
],
},
"managed_actions/salt/" : {
"actions" : [
{'name' : 'reboot', 'type' : 'confirm'},
{'name' : 'delete', 'type' : 'confirm'},
{'name' : 'stop', 'type' : 'action'}
],
}
}
}
|
jarussi/riotpy | riotpy/managers/team.py | Python | bsd-3-clause | 1,661 | 0.002408 | # coding: utf-8
from base import Manager
from riotpy.resources.team import TeamResource
class TeamManager(Manager):
def get_team_list_by_summoner_ids(self, summoner_ids):
"""
Get a list of teams by summoner ids.
:param ids: list of summoner_ids to look or all the ids separated by `,`.
Ex: ['123', '1234'] or '5123,1234'
:return: A resources.team.TeamResource list
"""
if isinstance(summoner_ids, list):
| summoner_ids = ','.join(summoner_ids)
content = self._get('/api/lol/{}/{}/team/by-summoner/{}'.format(
self.api.reg | ion,
self.version,
summoner_ids)
)
teams = []
for team_dict in content.values():
# assuming he can only be in one team. Easy to change if mistaken
teams.append(self._dict_to_resource(team_dict[0], resource_class=TeamResource))
return teams
def get_team_list_by_team_ids(self, team_ids):
"""
Get a list of teams by team ids.
:param ids: list of team_ids to look or all the ids separated by `,`.
Ex: ['123', '1234'] or '5123,1234'
:return: A resources.team.TeamResource list
"""
if isinstance(team_ids, list):
team_ids = ','.join(team_ids)
content = self._get('/api/lol/{}/{}/team/{}'.format(
self.api.region,
self.version,
team_ids)
)
teams = []
for team_dict in content.values():
teams.append(self._dict_to_resource(team_dict, resource_class=TeamResource))
return teams
|
cinghiopinghio/dotinstall | dotinstall/__init__.py | Python | gpl-3.0 | 354 | 0 | __productname__ = 'dotinstall'
_ | _version__ = '0.1'
__copyright__ = "Copyright (C) 2014 Cinghio Pinghio"
__author__ = "Cinghio Pinghio"
__author_email__ = "cinghio@linuxmail.org"
__description__ = "Install dotfiles"
__long_description__ = "Install dofile based on some rules"
__url__ = "cinghiopinghio...."
__license__ = "Licensed under the GNU GPL v | 3+."
|
gvanrossum/asyncio | examples/hello_coroutine.py | Python | apache-2.0 | 380 | 0 | """Print 'Hello World' every two seconds, using | a coroutine."""
import asyncio
@asyncio.coroutine
def greet_every_two_seconds():
while True:
print('Hello World')
yield from asyncio.sleep(2)
if __name__ == '__main__':
loop = asyncio.get_event_loop()
try:
loop | .run_until_complete(greet_every_two_seconds())
finally:
loop.close()
|
nzlosh/st2 | st2actions/tests/unit/policies/test_concurrency_by_attr.py | Python | apache-2.0 | 17,676 | 0.004243 | # Copyright 2020 The StackStorm Authors.
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import mock
from mock import call
# Importing st2actions.scheduler relies on config being parsed :/
import st2tests.config as tests_config
tests_config.parse_args()
import st2common
from st2actions.scheduler import handler as scheduling_queue
from st2common.bootstrap.policiesregistrar import register_policy_types
from st2common.constants import action as action_constants
from st2common.models.db.action import LiveActionDB
from st2common.persistence.action import LiveAction
from st2common.persistence.execution_queue import ActionExecutionSchedulingQueue
from st2common.persistence.policy import Policy
from st2common.services import action as action_service
from st2common.services import coordination
from st2common.transport.liveaction import LiveActionPublisher
from st2common.transport.publishers import CUDPublisher
from st2common.bootstrap import runnersregistrar as runners_registrar
from st2tests import ExecutionDbTestCase, EventletTestCase
import st2tests.config as tests_config
from st2tests.fixturesloader import FixturesLoader
from st2tests.mocks.execution import MockExecutionPublisher
from st2tests.mocks.liveaction import MockLiveActionPublisherSchedulingQueueOnly
from st2tests.mocks.runners import runner
from six.moves import range
__all__ = ["ConcurrencyByAttributePolicyTestCase"]
PACK = "generic"
TEST_FIXTURES = {
"actions": ["action1.yaml", "action2.yaml"],
"policies": ["policy_3.yaml", "policy_7.yaml"],
}
NON_EMPTY_RESULT = {"data": "non-empty"}
MOCK_RUN_RETURN_VALUE = (
action_constants.LIVEACTION_STATUS_RUNNING,
NON_EMPTY_RESULT,
None,
)
SCHEDULED_STATES = [
action_constants.LIVEACTION_STATUS_SCHEDULED,
action_constants.LIVEACTION_STATUS_RUNNING,
action_constants.LIVEACTION_STATUS_SUCCEEDED,
]
@mock.patch(
"st2common.runners.base.get_runner", mock.Mock(return_value=runner.get_runner())
)
@mock.patch(
"st2actions.container.base.get_runner", mock.Mock(return_value=runner.get_runner())
)
@mock.patch.object(
CUDPublisher,
"publish_update",
mock.MagicMock(side_effect=MockExecutionPublisher.publish_update),
)
@mock.patch.object(CUDPublisher, "publish_create", mock.MagicMock(return_value=None))
class ConcurrencyByAttributePolicyTestCase(EventletTestCase, ExecutionDbTestCase):
@classmethod
def setUpClass(cls):
EventletTestCase.setUpClass()
ExecutionDbTestCase.setUpClass()
# Override the coordinator to use the noop driver otherwise the tests will be blocked.
tests_config.parse_args(coordinator_noop=True)
coordination.COORDINATOR = None
# Register runners
runners_registrar.register_runners()
# Register common policy types
register_policy_types(st2common)
loader = FixturesLoader()
loader.save_fixtures_to_db(fixtures_pack=PACK, fixtures_dict=TEST_FIXTURES)
@classmethod
def tearDownClass(cls):
# Reset the coordinator.
coordination.coordinator_teardown(coordination.COORDINATOR)
coordination.COORDINATOR = None
super(ConcurrencyByAttributePolicyTestCase, cls).tearDownClass()
# NOTE: This monkey patch needs to happen again here because during tests for some reason this
# method gets unpatched (test doing reload() or similar)
@mock.patch(
"st2actions.container.base.get_runner",
mock.Mock(return_value=runner.get_runner()),
)
def tearDown(self):
for liveaction in LiveAction.get_all():
action_service.update_status(
liveaction, action_constants.LIVEACTION_STATUS_CANCELED
)
@staticmethod
def _process_scheduling_queue():
for queued_req in ActionExecutionSchedulingQueue.get_all():
scheduling_queue.get_handler()._handle_execution(queued_req)
@mock.patch.object(
runner.MockAction | Runner,
"run",
mock.MagicMock(return_value=MOCK_RUN_RETURN_VALUE),
)
@mock.patch.object(
LiveActionPublisher,
"publish_state",
mock.MagicMock(
side_effect=MockLiveActionPublisherSchedulingQueueOnly.publish_state
),
)
def test_over_threshold_delay_executio | ns(self):
policy_db = Policy.get_by_ref("wolfpack.action-1.concurrency.attr")
self.assertGreater(policy_db.parameters["threshold"], 0)
self.assertIn("actionstr", policy_db.parameters["attributes"])
# Launch action executions until the expected threshold is reached.
for i in range(0, policy_db.parameters["threshold"]):
liveaction = LiveActionDB(
action="wolfpack.action-1", parameters={"actionstr": "foo"}
)
action_service.request(liveaction)
# Run the scheduler to schedule action executions.
self._process_scheduling_queue()
# Check the number of action executions in scheduled state.
scheduled = [
item for item in LiveAction.get_all() if item.status in SCHEDULED_STATES
]
self.assertEqual(len(scheduled), policy_db.parameters["threshold"])
# Assert the correct number of published states and action executions. This is to avoid
# duplicate executions caused by accidental publishing of state in the concurrency policies.
# num_state_changes = len(scheduled) * len(['requested', 'scheduled', 'running'])
expected_num_exec = len(scheduled)
expected_num_pubs = expected_num_exec * 3
self.assertEqual(
expected_num_pubs, LiveActionPublisher.publish_state.call_count
)
self.assertEqual(expected_num_exec, runner.MockActionRunner.run.call_count)
# Execution is expected to be delayed since concurrency threshold is reached.
liveaction = LiveActionDB(
action="wolfpack.action-1", parameters={"actionstr": "foo"}
)
liveaction, _ = action_service.request(liveaction)
expected_num_pubs += 1 # Tally requested state.
self.assertEqual(
expected_num_pubs, LiveActionPublisher.publish_state.call_count
)
# Run the scheduler to schedule action executions.
self._process_scheduling_queue()
# Since states are being processed asynchronously, wait for the
# liveaction to go into delayed state.
liveaction = self._wait_on_status(
liveaction, action_constants.LIVEACTION_STATUS_DELAYED
)
expected_num_exec += 0 # This request will not be scheduled for execution.
expected_num_pubs += 0 # The delayed status change should not be published.
self.assertEqual(
expected_num_pubs, LiveActionPublisher.publish_state.call_count
)
self.assertEqual(expected_num_exec, runner.MockActionRunner.run.call_count)
# Execution is expected to be scheduled since concurrency threshold is not reached.
# The execution with actionstr "fu" is over the threshold but actionstr "bar" is not.
liveaction = LiveActionDB(
action="wolfpack.action-1", parameters={"actionstr": "bar"}
)
liveaction, _ = action_service.request(liveaction)
# Run the scheduler to schedule action executions.
self._process_scheduling_queue()
# Since states are being processed asynchronously, wait for the
# liveaction to go into scheduled state.
liveaction = self._wait_on_statuses(liveaction, SCHEDULED_STATES)
expected_num_exec += 1 # This request |
mlperf/training_results_v0.7 | Intel/benchmarks/minigo/8-nodes-32s-cpx-tensorflow/intel_quantization/quantize_graph/quantize_graph_pad.py | Python | apache-2.0 | 2,437 | 0.000821 | # -*- coding: utf-8 -*-
from tensorflow.core.framework import node_def_pb2
from tensorflow.python.framework import tensor_util
from intel_quantization.quantize_graph.quantize_graph_base import QuantizeNodeBase
from intel_quantization.quantize_graph.quantize_graph_common import QuantizeGraphHelper as helper
class FuseNodeStartWithPad(QuantizeNodeBase):
def __init__(self, input_graph, output_node_names, perchannel,
start_node_name):
super(FuseNodeStartWithPad,
self).__init__(input_graph, output_node_names, perchannel,
start_node_name)
def has_relu(self, node_name):
for _, value in self.node_name_mapping.items():
if value.node.name == node_name:
break
if value.node.op in ("Relu", "Relu6"):
return True
return False
def _apply_pad_conv_fusion(self):
for _, value in self.node_name_mapping.items():
if value.node.op in ("Pad") and self.node_name_mapping[
value.
output[0]].node.op == "Conv2D" and self._find_relu_node(
value.node):
paddings_tensor = tensor_util.MakeNdarray(
self.node_name_mapping[value.node.input[1]].node.
attr["value"].tensor).flatten()
if any(paddings_tensor):
new_node = node_def_pb2.NodeDef()
new_node.CopyFrom(value.node)
self.add_output_graph_node(new_node)
else:
self.node_name_mapping[
value.output[0]].node.input[0] = value.node.input[0]
helper.set_attr_int_list(
self.node_name_mapping[value.output[0]].node,
"padding_list", paddings_tensor)
else:
| new_node = node_def_pb2.NodeDef()
new_node.CopyFrom(value.node)
self.add_output_graph_node(new_node)
def get_longest_fuse(self):
return 2 # pad + conv
def apply_the_transform(self):
self._get_op_list()
self._apply_pad_conv_fusion()
self._reset_output_node_maps()
self.output_graph = self.remove_redundant_quanti | zation(
self.output_graph)
# self.remove_dead_nodes(self.output_node_names)
return self.output_graph
|
jordanemedlock/psychtruths | temboo/core/Library/Zendesk/Views/GetViewCount.py | Python | apache-2.0 | 3,674 | 0.004899 | # -*- coding: utf-8 -*-
###############################################################################
#
# GetViewCount
# Returns the ticket count for a single view.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
######### | ######################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class GetViewCount(Choreography):
def __init__(self, temboo_session):
"""
Create a new inst | ance of the GetViewCount Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(GetViewCount, self).__init__(temboo_session, '/Library/Zendesk/Views/GetViewCount')
def new_input_set(self):
return GetViewCountInputSet()
def _make_result_set(self, result, path):
return GetViewCountResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return GetViewCountChoreographyExecution(session, exec_id, path)
class GetViewCountInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the GetViewCount
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_Email(self, value):
"""
Set the value of the Email input for this Choreo. ((required, string) The email address you use to login to your Zendesk account.)
"""
super(GetViewCountInputSet, self)._set_input('Email', value)
def set_ID(self, value):
"""
Set the value of the ID input for this Choreo. ((conditional, string) Retrieve a view count for the ID of the specified view.)
"""
super(GetViewCountInputSet, self)._set_input('ID', value)
def set_Password(self, value):
"""
Set the value of the Password input for this Choreo. ((required, password) Your Zendesk password.)
"""
super(GetViewCountInputSet, self)._set_input('Password', value)
def set_Server(self, value):
"""
Set the value of the Server input for this Choreo. ((required, string) Your Zendesk domain and subdomain (e.g., temboocare.zendesk.com).)
"""
super(GetViewCountInputSet, self)._set_input('Server', value)
class GetViewCountResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the GetViewCount Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. ((json) The response from Zendesk.)
"""
return self._output.get('Response', None)
class GetViewCountChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return GetViewCountResultSet(response, path)
|
xinghalo/DMInAction | src/mlearning/chap02-knn/dating/knn.py | Python | apache-2.0 | 2,943 | 0.043838 | #-*- coding: UTF-8 -*-
from numpy import *
import operator
def classifyPerson():
resultList = ['not at all','in small doses','in large doses']
percentTats = float(raw_input("percentage of time spent playing video games?"))
ffMiles = float(raw_input("frequent filter miles earned per year?"))
iceCream = float(raw_input("liters of ice cream consumed per year?"))
datingDataMat,datingLables = file2matrix('datingTestSet2.txt')
normMat,ranges,minVals = autoNorm(datingDataMat)
inArr = array([ffMiles,percentTats,iceCream])
classifierResult = classify0((inArr-minVals)/ranges,normMat,datingLables,3)
print "You will probably like this person:", resultList[classifierResult-1]
def datingClassTest():
hoRatio = 0.10
datingDataMat,datingLabels = file2matrix('datingTestSet2.txt')
normMat,ranges,minVals = autoNorm(datingDataMat)
m = normMat.shape[0]
numTestVecs = int(m*hoRatio)
errorCount = 0.0
for i in range(numTestVecs):
classifierResult = classify0(normMat[i,:],normMat[numTestVecs:m,:],datingLabels[numTestVecs:m],3)
print "the classifier came back with: %d, the real answer is: %d" %(classifierResult,datingLabels[i])
if(classifierResult != datingLabels[i]) : errorCount+=1.0
print "total error rate is : %f " %(errorCount/float(numTestVecs))
def autoNorm(dataSet):
minValue = dataSet.min(0) # 寻找最小值
maxValue = dataSet.max(0) # 寻找最大值
ranges = maxValue - minValue # 最大值-最小值
normDataSet = zeros(shape(dataSet))
m = dataSet.shape[0]
normDataSet = dataSet - tile(minValue,(m,1))
normDataSet = normDataSet/tile(ranges,(m,1)) # 值 - 最小值 / 最大值 - 最小值
return normDataSet, ranges, minValue
def file2matrix(filename):
fr = open(filename)
arrayOLines = fr.readlines()
numberOfLines = len(arrayOLines) # 获得总的记录条数
returnMat = zeros((numberOfLines,3)) # 初始化矩阵,全都置为0
classLabelVector = []
index = 0
for line in arrayOLines:
line = line.strip()
listFromLine = line.split('\t')
returnMat[index,:] = listFromLine[0:3]
classLabelVector.append(int(listFromLine[-1]))
index += 1
# returnMat 返回全部的值
# classLabelVector 数据对应的标签
return returnMat,classLabelVector
def createDataSet():
group = array([[1.0,1.1],[1.0,1.0],[0,0],[0,0.1]])
labels = ['A','A','B','B']
return group,labels
def classify0(inX, dataSet, labels, k):
# 获取数据集的大小, 4
dataSetSize = dataSet.shape[0]
# 复制输入的向量,然后做减法
diffMat = tile(inX, (dataSetSize,1)) - dataSet
# print diffMat
sqDiffMat = diffMat**2
sqDistances = sqDiffMat.sum(axis=1)
distances = sqDistances**0.5
sortedDistIndices = distances.argsort()
classCount = { | }
for i in range(k):
voteIlable = labels[sortedDistIndices[i]]
classCount[voteIlable] = classCount.get(voteIlable,0)+1
sortedClassCount = sorted( | classCount.iteritems(),key=operator.itemgetter(1),reverse=True)
return sortedClassCount[0][0]
|
marcopompili/django-galleries | django_galleries/urls.py | Python | bsd-3-clause | 571 | 0.005254 | """
Created on 19/mag/2013
@author: Marco Pompili
"""
from django.conf.urls import patterns, url
from django.views.generic import ListView
from .models import Gallery
urlpatterns = patterns('',
url(r'^$', ListView.as_view(
| queryset=Gallery.objects.all()[:],
context_object_name='django_galleries',
template_name='django_galleries/index.html'
), name='index'),
url('^(?P<pk>\d+)/$', 'django_galleri | es.views.gallery'),
) |
eli261/jumpserver | apps/common/mixins/api.py | Python | gpl-2.0 | 2,727 | 0.000368 | # -*- coding: utf-8 -*-
#
from django.http import JsonResponse
from django.core.cache import cache
from django.utils.translation import ugettext_lazy as _
from django.contrib import messages
from ..const import KEY_CACHE_RESOURCES_ID
__all__ = [
"JSONResponseMixin", "IDInCacheFilterMixin", "IDExportFilterMixin",
"IDInFilterMixin", "ApiMessageMixin"
]
class JSONResponseMixin(object):
"""JSON mixin"""
@staticmethod
def render_json_response(context):
return JsonResponse(context)
class IDInFilterMixin(object):
def filter_queryset(self, queryset):
queryset = super(IDInFilterMixin, self).filter_queryset(queryset)
id_list = self.request.query_params.get('id__in')
if id_list:
import json
try:
ids = json.loads(id_list)
except Exception as e:
return queryset
| if isinstance(ids, list):
queryset = queryset.filter(id__in=ids)
return queryset
class IDInCacheFilterMixin(object):
def filter_queryset(self, queryset):
queryset = super().filter_queryset(queryset)
spm = self.request.query_params.get('spm')
if not spm:
return queryset
cache_key = KEY_CACHE_RESOURCES_ID.format(spm) |
resources_id = cache.get(cache_key)
if resources_id and isinstance(resources_id, list):
queryset = queryset.filter(id__in=resources_id)
return queryset
class IDExportFilterMixin(object):
def filter_queryset(self, queryset):
# 下载导入模版
if self.request.query_params.get('template') == 'import':
return []
else:
return super(IDExportFilterMixin, self).filter_queryset(queryset)
class ApiMessageMixin:
success_message = _("%(name)s was %(action)s successfully")
_action_map = {"create": _("create"), "update": _("update")}
def get_success_message(self, cleaned_data):
if not isinstance(cleaned_data, dict):
return ''
data = {k: v for k, v in cleaned_data.items()}
action = getattr(self, "action", "create")
data["action"] = self._action_map.get(action)
try:
message = self.success_message % data
except:
message = ''
return message
def dispatch(self, request, *args, **kwargs):
resp = super().dispatch(request, *args, **kwargs)
if request.method.lower() in ("get", "delete", "patch"):
return resp
if resp.status_code >= 400:
return resp
message = self.get_success_message(resp.data)
if message:
messages.success(request, message)
return resp
|
Dogcrafter/BotiBot | Utils.py | Python | gpl-2.0 | 3,000 | 0.035333 | #!/usr/local/bin/python3.4
# coding=utf-8
################################################################################################
# Name: Utils Klasse
#
# Beschreibung: Liest dyn. die Service Module ein,
# welche in configuration.json eingetragen wurden.
#
# Version: 1.0.0
# Author: Dogcrafter
# Author URI: https://blog.dogcrafter.de
# License: GPL2
# License URI: http://www.gnu.org/licenses/gpl-2.0.html
################################################################################################
# Changelog
# 1.0.0 - Initial release
################################################################################################
import json
import sys
import os
import imp
from inspect import getmembers, isfunction
class clUtils:
# Init
def __init__(self):
path = os.path.dirname(os.path.abspath(sys.argv[0]))
confFile = path + "/files/configuration.json"
self.__confData = self.openConfData(confFile)
self.__modules = self.setModules(path, self.__confData["modules"])
self.__helpTxt = ""
def openConfData(self,confFile):
with open(confFile) as data_file:
confData = json.load(data_file)
return confData
# get Bot configuration
def getFunctions(self):
return self.__functions
# get modules for import
def getModules(self):
return self.__modules
def setModules(self, path, inModules):
extModules = {}
i = 0
for module in inModules:
mod = self.importFromURI(path + '/services/' + module)
if mod is not None:
extModules[i] = mod
i = i + 1
return extModules
def importFromURI(self, uri, absl=False):
if not absl:
uri = os.path.normpath(os.path.join(os.path.dirname(__file__), uri))
path, fname = os.path.split(uri)
mname, ext = os.path.splitext(fname)
no_ext = os.path.join(path, mname)
#if os.path.exists(no_ext + '.pyc'):
# try:
# return imp.load_compiled(mname, no_ext + '.pyc')
# except:
# pass
if os.path.exists(no_ext + '.py'):
try:
return imp.load_source(mname, no_ext + '.py')
except:
print 'Import Fehler' ,no_ext + '.py'
pass
# get function list from module
def getFunctionsList(self,module):
functions_list = [o for o in getmembers(module) if isfunction(o[1])]
return functions_list
# get Help text
def getHelpTxt(self):
return self.__helpTxt
# set Help Text
def setHelpTxt(self,text):
self.__helpTxt = self.__helpTxt + text
def addCommandHandlerFromModules(self,dispatcher):
for module in self.__modules:
functions_list = self.getFunctionsList(self.__modules | [module])
i = 0
for func in functions_list:
# handlers
functionText = functions_list[i][0]
if functionText == "getHelpTxt":
self.setHelpTxt(getattr(self.__modules[module],functio | ns_list[i][0])())
else:
function = getattr(self.__modules[module],functions_list[i][0])
dispatcher.addTelegramCommandHandler(functionText,function)
i = i + 1
|
bospetersen/h2o-3 | h2o-py/h2o/transforms/preprocessing.py | Python | apache-2.0 | 3,037 | 0.016464 | from .transform_base import H2OTransformer
class H2OScaler(H2OTransformer):
"""
Standardize an H2OFrame by demeaning and scaling each column.
The default scaling will result in an H2OFrame with columns
having zero mean and unit variance. Users may specify the
centering and scaling values used in the standardization of
the H2OFrame.
"""
def __init__(self, center=True, scale=True):
"""
:param center: A boolean or list of numbers. If True, then columns will be demeaned before scaling.
If False, then columns will not be demeaned before scaling.
If centers is an array of numbers, then len(centers) must match the number of
columns in the dataset. Each value is removed fro | m the respective column
before scaling.
:param scale: A boolean or list of numbers. If True, then columns will be scaled by the column's standard deviation.
If False, then columns will not be scaled.
If scales is an array, then len(scales) must match the number of columns in
the dataset. Each column is scaled by the res | pective value in this array.
:return: An instance of H2OScaler.
"""
self.parms = locals()
self.parms = {k:v for k,v in self.parms.iteritems() if k!="self"}
if center is None or scale is None: raise ValueError("centers and scales must not be None.")
self._means=None
self._stds=None
@property
def means(self): return self._means
@property
def stds(self): return self._stds
def fit(self,X,y=None, **params):
"""
Fit this object by computing the means and standard deviations used by the transform
method.
:param X: An H2OFrame; may contain NAs and/or categoricals.
:param y: None (Ignored)
:param params: Ignored
:return: This H2OScaler instance
"""
if isinstance(self.parms["center"],(tuple,list)): self._means = self.parms["center"]
if isinstance(self.parms["scale"], (tuple,list)): self._stds = self.parms["scale"]
if self.means is None and self.parms["center"]: self._means = X.mean()
else: self._means = False
if self.stds is None and self.parms["scale"]: self._stds = X.sd()
else: self._stds = False
return self
def transform(self,X,y=None,**params):
"""
Scale an H2OFrame with the fitted means and standard deviations.
:param X: An H2OFrame; may contain NAs and/or categoricals.
:param y: None (Ignored)
:param params: (Ignored)
:return: A scaled H2OFrame.
"""
return X.scale(self.means,self.stds)._frame()
def inverse_transform(self,X,y=None,**params):
"""
Undo the scale transformation.
:param X: An H2OFrame; may contain NAs and/or categoricals.
:param y: None (Ignored)
:param params: (Ignored)
:return: An H2OFrame
"""
for i in X.ncol:
X[i] = self.means[i] + self.stds[i]*X[i]
return X |
Logic-gate/shuffelz | shuffelz.py | Python | gpl-2.0 | 4,742 | 0.033741 | #!/usr/bin/env python
'''# shufflez.py '''
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the <organization> nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
__author__ = ["A'mmer Almadani:Mad_Dev", "sysbase.org"]
__email__ = ["mad_dev@linuxmail.org", "mail@sysbase.org"]
import random
import urllib2
from search import GoogleSearch, SearchError
import time
from multiprocessing import Process
from threading import Timer
class shufflez:
def __init__(self):
self.word_list = 'lists/wordlist.txt'
self.websites = 'lists/websites.txt'
self.user_agent = 'lists/user_agent.txt'
def together(self, *functions):
process = []
for function in functions:
s = Process(target=function)
s.start()
process.append(s)
for s in process:
s.join()
def randomize(self, r, typ):
'''Return Random List
r (range): int
typ : word | site | user-agent
'''
lst = []
if typ == 'word':
list_to_parse = self.word_list
elif typ == 'site':
list_to_parse = self.websites
elif typ == 'user-agent':
list_to_parse = self.user_agent
a = open(list_to_parse, 'r')
for i in a.readlines():
lst.append(i)
random.shuffle(lst)
if typ == 'site':
return map(lambda x:x if 'http://' in x else 'http://' +x, lst)[0:int(r)]
else:
return lst[0:int(r)]
def append_to_list(self, typ, lst):
if typ == 'word':
l = self.word_list
elif typ == 'link':
l = self.websites
li = open(l, 'a')
for i in lst | :
li.write(i+'\n')
li.close()
def open_url(self, url, user_agent):
try:
header = { 'User-Agent' : str(user_agent) }
req = urllib2.Request(url, headers=header)
response = urllib2.urlo | pen(req)
print 'STATUS', response.getcode()
except:
pass
def google(self, term):
links_from_google = []
words_from_google = []
try:
gs = GoogleSearch(term)
gs.results_per_page = 10
results = gs.get_results()
for res in results:
words_from_google.append(res.title.encode('utf8'))
print '\033[92mGot new words from Google...appending to list\n\033[0m'
self.append_to_list('word', words_from_google)
links_from_google.append(res.url.encode('utf8'))
print '\033[92mGot new link from Google...appending to list\n\033[0m'
self.append_to_list('link', links_from_google)
except SearchError, e:
print "Search failed: %s" % e
mask = shufflez()
def random_websites():
count = random.randint(1,15)
for i, e, in zip(mask.randomize(10, 'site'), mask.randomize(10, 'user-agent')):
if count == random.randint(1,15):
break
else:
sleep_time = str(random.randint(1,5)) +'.'+ str(random.randint(1,9))
print 'VISITING', '\033[92m', i , '\033[0m', 'USING', '\033[94m', e, '\033[0m', 'SLEEPING FOR', '\033[95m', sleep_time, 'SECONDS', '\033[0m'
time.sleep(float(sleep_time))
mask.open_url(i, e)
print '\n'
def random_google():
count = random.randint(1,15)
for i in mask.randomize(10, 'word'):
if count == random.randint(1,15):
break
else:
sleep_time = str(random.randint(1,5)) +'.'+ str(random.randint(1,9))
print 'SEARCHING FOR', '\033[92m', i ,'\033[0m', 'SLEEPING FOR', '\033[95m', sleep_time, 'SECONDS', '\033[0m', '\n'
time.sleep(float(sleep_time))
mask.google(i)
#while True:
# try:
# mask.together(random_google(), random_websites())
# except KeyboardInterrupt:
# print 'Exit'
# break |
karllessard/tensorflow | tensorflow/python/saved_model/nested_structure_coder.py | Python | apache-2.0 | 17,543 | 0.006156 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Module that encodes (decodes) nested structures into (from) protos.
The intended use is to serialize everything ne | eded to restore a `Function` that
was saved into a SavedModel. This may include concrete function inputs and
outputs, signatures, function specs, etc.
Example use:
coder = nested_structure_coder.StructureCoder()
# Encode into proto.
signature | _proto = coder.encode_structure(function.input_signature)
# Decode into a Python object.
restored_signature = coder.decode_proto(signature_proto)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import six
from tensorflow.core.protobuf import struct_pb2
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.data.ops import optional_ops
from tensorflow.python.distribute import values
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import indexed_slices
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops.numpy_ops import np_arrays
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.ops.ragged import row_partition
from tensorflow.python.util import compat
from tensorflow.python.util.compat import collections_abc
class NotEncodableError(Exception):
"""Error raised when a coder cannot encode an object."""
class StructureCoder(object):
"""Encoder and decoder for nested structures into protos."""
_codecs = []
@classmethod
def register_codec(cls, x):
cls._codecs.append(x)
@classmethod
def _get_encoders(cls):
return [(c.can_encode, c.do_encode) for c in cls._codecs]
@classmethod
def _get_decoders(cls):
return [(c.can_decode, c.do_decode) for c in cls._codecs]
def _map_structure(self, pyobj, coders):
for can, do in coders:
if can(pyobj):
recursion_fn = functools.partial(self._map_structure, coders=coders)
return do(pyobj, recursion_fn)
raise NotEncodableError(
"No encoder for object [%s] of type [%s]." % (str(pyobj), type(pyobj)))
def encode_structure(self, nested_structure):
"""Encodes nested structures composed of encodable types into a proto.
Args:
nested_structure: Structure to encode.
Returns:
Encoded proto.
Raises:
NotEncodableError: For values for which there are no encoders.
"""
return self._map_structure(nested_structure, self._get_encoders())
def can_encode(self, nested_structure):
"""Determines whether a nested structure can be encoded into a proto.
Args:
nested_structure: Structure to encode.
Returns:
True if the nested structured can be encoded.
"""
try:
self.encode_structure(nested_structure)
except NotEncodableError:
return False
return True
def decode_proto(self, proto):
"""Decodes proto representing a nested structure.
Args:
proto: Proto to decode.
Returns:
Decoded structure.
Raises:
NotEncodableError: For values for which there are no encoders.
"""
return self._map_structure(proto, self._get_decoders())
class _ListCodec(object):
"""Codec for lists."""
def can_encode(self, pyobj):
return isinstance(pyobj, list)
def do_encode(self, list_value, encode_fn):
encoded_list = struct_pb2.StructuredValue()
encoded_list.list_value.CopyFrom(struct_pb2.ListValue())
for element in list_value:
encoded_list.list_value.values.add().CopyFrom(encode_fn(element))
return encoded_list
def can_decode(self, value):
return value.HasField("list_value")
def do_decode(self, value, decode_fn):
return [decode_fn(element) for element in value.list_value.values]
StructureCoder.register_codec(_ListCodec())
def _is_tuple(obj):
return not _is_named_tuple(obj) and isinstance(obj, tuple)
def _is_named_tuple(instance):
"""Returns True iff `instance` is a `namedtuple`.
Args:
instance: An instance of a Python object.
Returns:
True if `instance` is a `namedtuple`.
"""
if not isinstance(instance, tuple):
return False
return (hasattr(instance, "_fields") and
isinstance(instance._fields, collections_abc.Sequence) and
all(isinstance(f, six.string_types) for f in instance._fields))
class _TupleCodec(object):
"""Codec for tuples."""
def can_encode(self, pyobj):
return _is_tuple(pyobj)
def do_encode(self, tuple_value, encode_fn):
encoded_tuple = struct_pb2.StructuredValue()
encoded_tuple.tuple_value.CopyFrom(struct_pb2.TupleValue())
for element in tuple_value:
encoded_tuple.tuple_value.values.add().CopyFrom(encode_fn(element))
return encoded_tuple
def can_decode(self, value):
return value.HasField("tuple_value")
def do_decode(self, value, decode_fn):
return tuple(decode_fn(element) for element in value.tuple_value.values)
StructureCoder.register_codec(_TupleCodec())
class _DictCodec(object):
"""Codec for dicts."""
def can_encode(self, pyobj):
return isinstance(pyobj, dict)
def do_encode(self, dict_value, encode_fn):
encoded_dict = struct_pb2.StructuredValue()
encoded_dict.dict_value.CopyFrom(struct_pb2.DictValue())
for key, value in dict_value.items():
encoded_dict.dict_value.fields[key].CopyFrom(encode_fn(value))
return encoded_dict
def can_decode(self, value):
return value.HasField("dict_value")
def do_decode(self, value, decode_fn):
return {key: decode_fn(val) for key, val in value.dict_value.fields.items()}
StructureCoder.register_codec(_DictCodec())
class _NamedTupleCodec(object):
"""Codec for namedtuples.
Encoding and decoding a namedtuple reconstructs a namedtuple with a different
actual Python type, but with the same `typename` and `fields`.
"""
def can_encode(self, pyobj):
return _is_named_tuple(pyobj)
def do_encode(self, named_tuple_value, encode_fn):
encoded_named_tuple = struct_pb2.StructuredValue()
encoded_named_tuple.named_tuple_value.CopyFrom(struct_pb2.NamedTupleValue())
encoded_named_tuple.named_tuple_value.name = \
named_tuple_value.__class__.__name__
for key in named_tuple_value._fields:
pair = encoded_named_tuple.named_tuple_value.values.add()
pair.key = key
pair.value.CopyFrom(encode_fn(named_tuple_value._asdict()[key]))
return encoded_named_tuple
def can_decode(self, value):
return value.HasField("named_tuple_value")
def do_decode(self, value, decode_fn):
key_value_pairs = value.named_tuple_value.values
items = [(pair.key, decode_fn(pair.value)) for pair in key_value_pairs]
named_tuple_type = collections.namedtuple(value.named_tuple_value.name,
[item[0] for item in items])
return named_tuple_type(**dict(items))
StructureCoder.register_codec(_NamedTupleCodec())
class _Float64Codec(object):
"""Codec for floats."""
def can_encode(self, pyobj):
return isinstance(pyobj, float)
def do_encode(self, float64_value, encode_fn):
del encode_fn
value = struct_pb2.StructuredValue()
value.float64_value = float64_value
|
kylexiaox/WechatWebShareJs | apiAccess.py | Python | apache-2.0 | 1,927 | 0.003114 | __author__ = 'kyle_xiao'
import tornado.httpclient
import urllib
import json
import hashlib
class AccessTicket(object):
def __init__(self, timestamp, appId | , key, nonceStr):
"""
:param timestamp:
:param appId:
:param key:
:param nonceStr:
"""
self.appId = appId
self.key = key
self.ret = {
'nonceStr': nonceStr,
'jsapi_ticket': self.getTicket(),
'timestamp': timestamp,
'url': ""
}
| def getAccessToken(self):
"""
get the wechat access_token
:return:
"""
client = tornado.httpclient.HTTPClient()
response = client.fetch("https://api.weixin.qq.com/cgi-bin/token?" + \
urllib.urlencode(
{"grant_type": "client_credential", "appid": self.appId, "secret": self.key}))
body = json.loads(response.body)
return body["access_token"]
def getTicket(self, token=None):
"""
get the access ticket by using the token
:param token:
:return:
"""
if token == None:
token = self.getAccessToken()
client = tornado.httpclient.HTTPClient()
response = client.fetch("https://api.weixin.qq.com/cgi-bin/ticket/getticket?" + \
urllib.urlencode({"access_token": token, "type": "jsapi"}))
body = json.loads(response.body)
return body["ticket"]
def sign(self, url):
"""
config one url to the wechat share
:param url:
:return:
"""
self.ret["url"] = url
string = '&'.join(['%s=%s' % (key.lower(), self.ret[key]) for key in sorted(self.ret)])
signature = hashlib.sha1(string.encode('ascii')).hexdigest()
return signature
|
dchoruzy/python-stdnum | stdnum/iso7064/mod_11_10.py | Python | lgpl-2.1 | 2,129 | 0 | # mod_11_10.py - functions for performing the ISO 7064 Mod 11, 10 algorithm
#
# Copyright (C) 2010, 2011, 2012, 2013 Arthur de Jong
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
"""The ISO 7064 Mod 11, 10 algorithm.
The Mod 11, 10 algorithm uses a number of calculations modulo 11 and 10 to
determine a checksum.
For a module that can do generic Mod x+1, x calculations see the
:mod:`stdnum.iso7064.mod_37_36` module.
>>> calc_check_digit('79462')
'3'
>>> validate('794623')
'794623'
>>> calc_check_digit('00200667308')
'5'
>>> validate('002006673085')
'002006673085'
"""
from stdnum.exceptions impor | t *
def checksum(number):
"""Calculate the checksum. A valid number should have a checksum of 1."""
check = 5
for n in number:
check = (((check or 10) * 2) % 11 + int(n)) % 10
return check
def calc_ | check_digit(number):
"""With the provided number, calculate the extra digit that should be
appended to make it a valid number."""
return str((1 - ((checksum(number) or 10) * 2) % 11) % 10)
def validate(number):
"""Checks whether the check digit is valid."""
try:
valid = checksum(number) == 1
except Exception:
raise InvalidFormat()
if not valid:
raise InvalidChecksum()
return number
def is_valid(number):
"""Checks whether the check digit is valid."""
try:
return bool(validate(number))
except ValidationError:
return False
|
DIVERSIFY-project/SMART-GH | daemon-wservice/experiments/constants.py | Python | apache-2.0 | 2,244 | 0.028075 | # vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
"""
This module contains all the constants that our experiments need
"""
GPS_LOCATIONS = {
'Rathmines': (53.3265199,-6.2648571),
'Santry': (53.3944773,-6.2468027),
'Sandyford': (53.2698337,-6.2245713),
'IKEA': (53.40741905,-6.27500737325097),
'Monkstown': (53.2936533,-6.1539917),
'Wadelai': (53.3867994,-6.2699323),
'Dun Laoghaire': (53.2923448,-6.1360003),
'Mountjoy Square': (53.3561389,-6.255421),
'Sandymount' : (53.3318342,-6.2153462),
'Phoenix Park' : (53.3588502,-6.33066374280722),
'Ballycullen' : (53.27358235,-6.32541157781442),
'Clongriffin' : (53.4026399,-6.151014),
'Dundrum' : (53.2891457,-6.2433756),
'Beaumont' : (53.3860585,-6.2329828),
'Ballinteer' : (53.2764266,-6.2528361),
'Dublin Airport' : (53.4286802,-6.25454977707029),
'Limekiln' : (53.3043734,-6.3326528),
'Charlestown' : (53.4039206,-6.3031688),
'Citywest Road' : (53.2770971,-6.4148524),
'Ringsend' : (53.3418611,-6.2267122),
'Walkinstown Roundabout' : (53.3200169,-6.3359576),
'East Wall R | oad' : (53.3551831,-6.229539),
'Tallaght Village' : (53.2896093,-6.3595578),
'Clare Hall Estate' : (53.3990349,-6.1625034),
'Merrion Square' : (53.3396823,-6.24916614558252),
'Captain\'s Hill, Leixlip' : (53.3697544,-6.4869624),
'Baggot Street' : (53.3329104,-6 | .2425717),
'Blanchardstown' : (53.3868998,-6.3775408),
'Donnybrook' : (53.3219341,-6.2361395),
'Castleknock' : (53.3729581,-6.3624744),
'Stillorgan' : (53.2888378,-6.198343),
'Heuston' : (53.34647135,-6.29405804549595)
}
def getLocation(constant):
return GPS_LOCATIONS(constant, False)
def getAllLocations():
return GPS_LOCATIONS
APP_PARAMS = {
'host_and_port' : 'http://localhost:8080/',
'service' : 'restful-graphhopper-1.0/',
'endpoint': 'route',
'locale': 'en',
'vehicle': 'car',
'weighting': 'fastest'
}
def getAppParams(constant):
return APP_PARAMS.get(constant, False)
def getAppConfig():
return APP_PARAMS
|
winkidney/cmdtree | src/cmdtree/tests/functional/test_command.py | Python | mit | 1,660 | 0.000602 | import pytest
from cmdtree import INT, entry
from cmdtree import command, argument, option
@argument("host", help="server listen address")
@option("reload", is_flag=True, help="if auto-reload on")
@option("port", help="server port", type=INT, default=8888)
@command(help="run a http server on given address")
def run_server(host, reload, port):
return host, port, reload
@command(help="run a http server on given address")
@argument("host", help="server listen address")
@option("port", help="server port", type=INT, default=8888)
def order(port, host):
return host, port
def test_should_return_given_argument():
from cmdtree import entry
result = entry(
["run_server", "host", "--reload", "--port", "8888"]
)
assert result == ("host", 8888, True)
def test_should_reverse_decorator_order_has_no_side_effect():
from cmdtree import entry
result = entry(
["order", "host", "--port", "8888"]
)
assert result == ("host", 8888)
def test_should_option_order_not_cause_argument_miss():
from cmdtree import entry
@command("test_miss")
@option("kl | ine")
@argument("script_path", help="file path of python _script")
def run_test(script_path, kline):
return script_path, kline
assert entry(
["test_miss", "path", "--kline", "fake"]
) == ("path", "fake")
d | ef test_should_double_option_order_do_not_cause_calling_error():
@command("test_order")
@option("feed")
@option("config", help="config file path for kline database")
def hello(feed, config):
return feed
assert entry(
["test_order", "--feed", "fake"]
) == "fake" |
PicOrb/docker-sensu-server | plugins/aux/check-boundary.py | Python | mit | 2,430 | 0.004527 | #!/usr/bin/python
from sensu_plugin import SensuPluginMetricJSON
import requests
#import os
import json
from sh import curl
from walrus import *
#from redis import *
import math
#from requests.auth import HTTPBasicAuth
import statsd
import warnings
from requests.packages.urllib3 import exceptions
db = Database(host='localhost', port=6379, db=0)
c = statsd.StatsClient('grafana', 8125)
class FooBarBazMetricJSON(SensuPluginMetricJSON):
def run(self):
endpoints = ['topology', 'remediations']
positions = [30, 50, 99]
api = 'ecepeda-api.route105.net'
token_curl = curl('https://{0}/aims/v1/authenticate'.format(api), '-s', '-k', '-X', 'PO | ST', '-H', 'Accept: application/json', '--user', '2A6B0U16535H6X0D5822:$2a$12$WB8KmRcUnGpf1M6oEdLBe.GrfBEaa94U4QMBTPMuVWktWZf91AJk')
headers = {'X-Iam-Auth-Token': json.loads(str(token_curl))['authentication']['token'], 'X-Request-Id': 'DEADBEEF'}
for endpoint in endpoints:
a = db.ZSet('measures_{0}'.format(endpoint))
percentiles = db.Hash('percentiles_{0}'.format(endpoint))
current = percentiles['current']
if current is None | or int(current) > 99:
current = 1
url = 'https://{0}/assets/v1/67000001/environments/814C2911-09BB-1005-9916-7831C1BAC182/{1}'.format(api, endpoint)
with warnings.catch_warnings():
warnings.simplefilter("ignore", exceptions.InsecureRequestWarning)
r = requests.get(url, headers=headers, verify=False)
a.remove(current)
a.add(current, r.elapsed.microseconds)
c.timing(endpoint, int(r.elapsed.microseconds)/1000)
iterate = True
elements = []
iterator = a.__iter__()
while iterate:
try:
elem = iterator.next()
elements.append({'position': elem[0], 'time': elem[1]})
except:
iterate = False
if len(elements) > 0:
for percentile in positions:
position = (percentile*.01) * len(elements) - 1
percentiles[percentile] = elements[int(math.ceil(position))]
percentiles['current'] = int(current) + 1
self.output(str(percentiles))
self.warning(str(endpoints))
if __name__ == "__main__":
f = FooBarBazMetricJSON()
|
xpharry/Udacity-DLFoudation | tutorials/reinforcement/gym/gym/scoreboard/client/__init__.py | Python | mit | 86 | 0 | import logging
import os
from gy | m import er | ror
logger = logging.getLogger(__name__)
|
piantado/LOTlib | LOTlib/Legacy/Visualization/Stringification.py | Python | gpl-3.0 | 6,858 | 0.006707 | """
Functions for mappings FunctionNodes to strings
"""
from LOTlib.FunctionNode import isFunctionNode, BVUseFunctionNode, BVAddFunctionNode
import re
percent_s_regex = re.compile(r"%s")
def schemestring(x, d=0, bv_names=None):
"""Outputs a scheme string in (lambda (x) (+ x 3)) format.
Arguments:
x: We return the string for this FunctionNode.
bv_names: A dictionary from the uuids to nicer names.
"""
if isinstance(x, str):
return x
elif isFunctionNode(x):
if bv_names is None:
bv_names = dict()
name = x.name
if isinstance(x, BVUseFunctionNode):
name = bv_names.get(x.name, x.name)
if x.args is None:
return name
else:
if x.args is None:
return name
elif isinstance(x, BVAddFunctionNode):
assert name is 'lambda'
return "(%s (%s) %s)" % (name, x.added_rule.name,
map(lambda a: schemestring(a, d+1, bv_names=bv_names), x.args))
else:
return "(%s %s)" % (name, map(lambda a: schemestring(a,d+1, bv_names=bv_names), x.args))
def fullstring(x, d=0, bv_names=None):
"""
A string mapping function that is for equality checking. This is necessary because pystring silently ignores
FunctionNode.names that are ''. Here, we print out everything, including returntypes
:param x:
:param d:
:param bv_names:
:return:
"""
if isinstance(x, str):
return x
elif isFunctionNode(x):
if bv_names is None:
bv_names = dict()
if x.name == 'lambda':
# On a lambda, we must add the introduced bv, and then remove it again afterwards
bvn = ''
if isinstance(x, BVAddFunctionNode) and x.added_rule is no | t None:
bvn = x.added_rule.bv_prefix+str(d)
bv_names[x.added_rule.name] = bvn
assert len(x.args) == 1
ret = 'lambda<%s> %s: %s' % ( x.returntype, bvn, fullstring(x.args[0], d=d+1, bv_names=bv_names) )
if isinstance(x, BVAddFunctionNode) and x.added_rule is not None:
try:
del bv_names[x.added_rule.name]
ex | cept KeyError:
x.fullprint()
return ret
else:
name = x.name
if isinstance(x, BVUseFunctionNode):
name = bv_names.get(x.name, x.name)
if x.args is None:
return "%s<%s>"%(name, x.returntype)
else:
return "%s<%s>(%s)" % (name,
x.returntype,
', '.join(map(lambda a: fullstring(a, d=d+1, bv_names=bv_names), x.args)))
def pystring(x, d=0, bv_names=None):
"""Output a string that can be evaluated by python; gives bound variables names based on their depth.
Args:
bv_names: A dictionary from the uuids to nicer names.
"""
if isinstance(x, str):
return x
elif isFunctionNode(x):
if bv_names is None:
bv_names = dict()
if x.name == "if_": # this gets translated
assert len(x.args) == 3, "if_ requires 3 arguments!"
# This converts from scheme (if bool s t) to python (s if bool else t)
b = pystring(x.args[0], d=d+1, bv_names=bv_names)
s = pystring(x.args[1], d=d+1, bv_names=bv_names)
t = pystring(x.args[2], d=d+1, bv_names=bv_names)
return '( %s if %s else %s )' % (s, b, t)
elif x.name == '':
assert len(x.args) == 1, "Null names must have exactly 1 argument"
return pystring(x.args[0], d=d, bv_names=bv_names)
elif x.name == ',': # comma join
return ', '.join(map(lambda a: pystring(a, d=d, bv_names=bv_names), x.args))
elif x.name == "apply_":
assert x.args is not None and len(x.args)==2, "Apply requires exactly 2 arguments"
#print ">>>>", self.args
return '( %s )( %s )' % tuple(map(lambda a: pystring(a, d=d, bv_names=bv_names), x.args))
elif x.name == 'lambda':
# On a lambda, we must add the introduced bv, and then remove it again afterwards
bvn = ''
if isinstance(x, BVAddFunctionNode) and x.added_rule is not None:
bvn = x.added_rule.bv_prefix+str(d)
bv_names[x.added_rule.name] = bvn
assert len(x.args) == 1
ret = 'lambda %s: %s' % ( bvn, pystring(x.args[0], d=d+1, bv_names=bv_names) )
if isinstance(x, BVAddFunctionNode) and x.added_rule is not None:
try:
del bv_names[x.added_rule.name]
except KeyError:
x.fullprint()
return ret
elif percent_s_regex.search(x.name): # If we match the python string substitution character %s, then format
return x.name % tuple(map(lambda a: pystring(a, d=d+1, bv_names=bv_names), x.args))
else:
name = x.name
if isinstance(x, BVUseFunctionNode):
name = bv_names.get(x.name, x.name)
if x.args is None:
return name
else:
return name+'('+', '.join(map(lambda a: pystring(a, d=d+1, bv_names=bv_names), x.args))+')'
def lambdastring(fn, d=0, bv_names=None):
"""
A nicer printer for pure lambda calculus. This can use unicode for lambdas
"""
if bv_names is None:
bv_names = dict()
if fn is None: # just pass these through -- simplifies a lot
return None
elif fn.name == '':
assert len(fn.args)==1
return lambdastring(fn.args[0])
elif isinstance(fn, BVAddFunctionNode):
assert len(fn.args)==1 and fn.name == 'lambda'
if fn.added_rule is not None:
bvn = fn.added_rule.bv_prefix+str(d)
bv_names[fn.added_rule.name] = bvn
return u"\u03BB%s.%s" % (bvn, lambdastring(fn.args[0], d=d+1, bv_names=bv_names)) # unicode version with lambda
#return "L%s.%s" % (bvn, lambda_str(fn.args[0], d=d+1, bv_names=bv_names))
elif fn.name == 'apply_':
assert len(fn.args)==2
if fn.args[0].name == 'lambda':
return "((%s)(%s))" % tuple(map(lambda a: lambdastring(a, d=d+1, bv_names=bv_names), fn.args))
else:
return "(%s(%s))" % tuple(map(lambda a: lambdastring(a, d=d+1, bv_names=bv_names), fn.args))
elif isinstance(fn, BVUseFunctionNode):
assert fn.args is None
return bv_names[fn.name]
else:
assert fn.args is None
assert not percent_s_regex(fn.name), "*** String formatting not yet supported for lambdastring"
return str(fn.name) |
sbesson/PyGithub | github/Topic.py | Python | lgpl-3.0 | 5,670 | 0.004938 | ############################ Copyrights and license ############################
# #
# Copyright 2018 Steve Kowalik <steven@wedontsleep.org> #
# #
# This file is part of PyGithub. #
# http://pygithub.readthedocs.io/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
import github.GithubObject
class Topic(github.GithubObject.NonCompletableGithubObject):
"""
This class represents topics as used by https://github.com/topics. The object reference can be found here https://docs.github.com/en/rest/reference/search#search-topics
"""
def __repr__(self):
return self.get__repr__({"name": self._name.value})
@property
def name(self):
"""
:type: string
"""
return self._name.value
@property
def display_name(self):
"""
:type: string
"""
return self._display_name.value
@property
def short_description(self):
"""
:type: string
"""
return self._short_description.value
@property
def description(self):
"""
:type: string
"""
return self._description.value
@property
def created_by(self):
"""
:type: string
"""
return self._created_by.value
@property
def released(self):
"""
:type: string
"""
return self._released.value
@property
def created_at(self):
"""
:type: datetime.datetime
"""
return self._created_at.value
@property
def updated_at(self):
"""
:type: datetime.datetime
"""
return self._updated_at.value
@property
def featured(self):
"""
:type: bool
"""
return self._featured.value
@property
def curated(self):
"""
:type: bool
"""
return self._curated.value
@property
def score(self):
"""
:type: float
"""
return self._score.value
def _initAttributes(self):
self._name = github.GithubObject.NotSet
self._display_name = github.GithubObject.NotSet
self._short_description = github.GithubObject.NotSet
self._description = github.GithubObject.NotSet
self._created_by = github.GithubObject.NotSet
self._released = github.GithubObject.NotSet
self._created_at = github.GithubObject.NotSet
self._updated_at = github.GithubObject.NotSet
self._featured = github.GithubObject.NotSet
self._curated = github.GithubObject.NotSet
self._score = github.GithubObject.NotSet
def _useAttributes(self, attributes):
if "name" in attributes: # pragma no branch
self._name = self._makeStringAttribute(attributes["name"])
if "display_name" in attributes: # pragma no branch
self._display_name = self._makeStrin | gAttribute(attributes["display_name"])
if "short_description" in attributes: # pragma no branch
self._short_description = self._makeStringAttribute(
attributes["shor | t_description"]
)
if "description" in attributes: # pragma no branch
self._description = self._makeStringAttribute(attributes["description"])
if "created_by" in attributes: # pragma no branch
self._created_by = self._makeStringAttribute(attributes["created_by"])
if "released" in attributes: # pragma no branch
self._released = self._makeStringAttribute(attributes["released"])
if "created_at" in attributes: # pragma no branch
self._created_at = self._makeDatetimeAttribute(attributes["created_at"])
if "updated_at" in attributes: # pragma no branch
self._updated_at = self._makeDatetimeAttribute(attributes["updated_at"])
if "featured" in attributes: # pragma no branch
self._featured = self._makeBoolAttribute(attributes["featured"])
if "curated" in attributes: # pragma no branch
self._curated = self._makeBoolAttribute(attributes["curated"])
if "score" in attributes: # pragma no branch
self._score = self._makeFloatAttribute(attributes["score"])
|
Senbjorn/mipt_lab_2016 | contest_7/task_2.py | Python | gpl-3.0 | 71 | 0.028169 | #task_2
a = | int(input())
print(sum(lis | t(map(int, list((bin(a))[2:]))))) |
Haunter17/MIR_SU17 | exp3/exp3e/exp3e.py | Python | mit | 24,990 | 0.023729 | import numpy as np
import tensorflow as tf
import h5py
import time
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import sys
# Functions for initializing neural nets parameters
def init_weight_variable(shape, nameIn):
initial = tf.truncated_normal(shape, stddev=0.1, dtype=tf.float32)
return tf.Variable(initial, name=nameIn)
def init_bias_variable(shape, nameIn):
initial = tf.constant(0.1, shape=shape, dtype=tf.float32)
return tf.Variable(initial, name=nameIn)
def conv2d(x, W):
return tf.nn.conv2d(x, W, [1, 1, 1, 1], 'VALID')
def loadData(filepath):
'''
Load and return four variables from the file with path filepath
X_train: input data for training
y_train: labels for X_train
X_val: input data for validation
y_val: labels for X_val
'''
print('==> Experiment 2l')
print('==> Loading data from {}'.format(filepath))
# benchmark
t_start = time.time()
# reading data
f = h5py.File(filepath)
X_train = np.array(f.get('trainingFeatures'))
y_train = np.array(f.get('trainingLabels'))
X_val = np.array(f.get('validationFeatures'))
y_val = np.array(f.get('validationLabels'))
t_end = time.time()
print('--Time elapsed for loading data: {t:.2f} \
seconds'.format(t = t_end - t_start))
del f
print('-- Number of training samples: {}'.format(X_train.shape[0]))
print('-- Number of validation samples: {}'.format(X_val.shape[0]))
print('Shape of X_train: %s'%str(X_train.shape))
print('Shape of y_train: %s'%str(y_train.shape))
print('Shape of X_val: %s'%str(X_val.shape))
print('Shape of y_val: %s'%str(y_val.shape))
return [X_train, y_train, X_val, y_val]
#self, X_train, y_train, X_val, y_val, num_freq, filter_row, filter_col, k1, k2, learningRate, pooling_strategy):
# set up property that makes it only be set once
# we'll use this to avoid adding tensors to the graph multiple times
import functools
def lazy_property(function):
attribute = '_cache_' + function.__name__
@property
@functools.wraps(function)
def decorator(self):
if not hasattr(self, attribute):
setattr(self, attribute, function(self))
return getattr(self, attribute)
return decorator
class Model:
def __init__(self, num_freq, X_train, y_train, X_val, y_val, filter_row, filter_col, k1, num_hidden_nodes, learningRate, debug):
'''
Initializer for the model
'''
# store the data
self.X_train, self.y_train, self.X_val, self.y_val = X_train, y_train, X_val, y_val
# store the parameters sent to init that define our model
self.num_freq, self.filter_row, self.filter_col, self.k1, self.num_hidden_nodes, self.learningRate, self.debug = num_freq, filter_row, filter_col, k1, num_hidden_nodes, learningRate, debug
# find num_training_vec, total_features, num_frames, num_classes, and l from the shape of the data
# and store them
self.storeParamsFromData()
# Set-up and store the input and output placeholders
x = tf.placeholder(tf.float32, [None, self.total_features])
y_ = tf.placeholder(tf.float32, [None, self.num_classes])
self.x = x
self.y_ = y_
# Setup and store tensor that performs the one-hot encoding
y_train_OHEnc = tf.one_hot(self.y_train.copy(), self.num_classes)
y_val_OHEnc = tf.one_hot(self.y_val.copy(), self.num_classes)
self.y_train_OHEnc = y_train_OHEnc
self.y_val_OHEnc = y_val_OHEnc
# create each lazy_property
# each lazy_property will add tensors to the graph
self.y_conv
self.cross_entropy
self.train_step
self.accuracy
# properties for use in debugging
if self.debug:
self.grads_and_vars
# print to the user that the network has been set up, along with its properties
print("Setting up Single Conv Layer Neural net with %g x %g filters, k1 = %g, followed by hidden layer with %g nodes, learningRate = %g"%(filter_row, filter_col, k1, num_hidden_nodes, learningRate))
def storeParamsFromData(self):
'''
Calculate and store parameters from the raw data
total_features: The number of CQT coefficients total (incldues all context frames)
num_training_vec: The number of training examples in your dataset
num_frames: The number of context frames in each training example (total_features / num_freq)
num_classes: The number of songs we're distinguishing between in our output
l: The length of our second convolutional kernel - for now, its equal to num_frames
'''
# Neural-network model set-up
# calculating some values which will be nice as we set up the model
num_training_vec, total_features = self.X_train.shape
num_frames = int(total_features / self.num_freq)
print('-- Num frames: {}'.format(num_frames))
num_classes = int(max(self.y_train.max(), self.y_val.max()) + 1)
l = num_frames
# store what will be helpful later
self.total_features = total_features
self.num_training_vec = num_training_vec
self.num_frames = num_frames
self.num_classes = num_classes
self.l = l
@lazy_property
def y_conv(self):
# reshape the input into the form of a spectrograph
x_image = tf.reshape(self.x, [-1, self.num_freq, self.num_frames, 1])
x_image = tf.identity(x_image, name="x_image")
# first convolutional layer parameters
self.W_conv1 = init | _weight_variable([self | .filter_row, self.filter_col, 1, self.k1], "W_conv1")
self.b_conv1 = init_bias_variable([self.k1], "b_conv1")
# tensor that computes the output of the first convolutional layer
h_conv1 = tf.nn.relu(conv2d(x_image, self.W_conv1) + self.b_conv1)
h_conv1 = tf.identity(h_conv1, name="h_conv_1")
# flatten out the output of the first convolutional layer to pass to the softmax layer
h_conv1_flat = tf.reshape(h_conv1, [-1, (self.num_freq - self.filter_row + 1) * (self.num_frames - self.filter_col + 1) * self.k1])
h_conv1_flat = tf.identity(h_conv1_flat, name="h_conv1_flat")
# go through a hidden layer
self.W_1 = init_weight_variable([(self.num_freq - self.filter_row + 1) * (self.num_frames - self.filter_col + 1) * self.k1, self.num_hidden_nodes], "W1")
self.b_1 = init_bias_variable([self.num_hidden_nodes], "b_1")
a_1 = tf.nn.relu(tf.matmul(h_conv1_flat, self.W_1) + self.b_1)
# softmax layer parameters
self.W_sm = init_weight_variable([self.num_hidden_nodes, self.num_classes], "W_sm")
self.b_sm = init_bias_variable([self.num_classes], "b_sm")
# the output of the layer - un-normalized and without a non-linearity
# since cross_entropy_with_logits takes care of that
y_conv = tf.matmul(a_1, self.W_sm) + self.b_sm
y_conv = tf.identity(y_conv, name="y_conv")
return y_conv # would want to softmax it to get an actual prediction
@lazy_property
def cross_entropy(self):
'''
Create a tensor that computes the cross entropy cost
Use the placeholder y_ as the labels, with input y_conv
Note that softmax_cross_entropy_with_logits takes care of normalizing
y_conv to make it a probability distribution
This tensor can be accessed using: self.cross_entropy
'''
cross_entropy = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=self.y_, logits=self.y_conv))
cross_entropy = tf.identity(cross_entropy, name="cross_entropy")
return cross_entropy
@lazy_property
def optimizer(self):
'''
Create a tensor that represents the optimizer. This tensor can
be accessed using: self.optimizer
'''
optimizer = tf.train.AdamOptimizer(learning_rate = self.learningRate)
return optimizer
@lazy_property
def train_step(self):
'''
Creates a tensor that represents a single training step. This tensor
can be passed a feed_dict that has x and y_, and it will compute the gradients
and perform a single step.
This tensor can be accessed using: self.train_step
'''
return self.optimizer.minimize(self.cross_entropy)
@lazy_property
def accuracy(self):
'''
Create a tensor that computes the accuracy, using the placeholder y_ as the labeled data
and y_conv for the predictions of the network.
This tensor can be accessed using: self.accuracy
'''
correct_prediction = tf.equal(tf.argmax(self.y_conv, 1), tf.argmax(self.y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
return accuracy
'''
Properties that we'll use for debugging |
alvarovmz/PyLinkedIn | codegen/templates/LinkedInObject.IsInstance.list.complex.py | Python | gpl-3.0 | 148 | 0.074324 | all( {% include "LinkedInObject. | IsInstance.py" with variable="element" type=type.name|add:"."|add:type.name only %} for element in {{ | variable }} )
|
jorisvandenbossche/pandas | pandas/core/frame.py | Python | bsd-3-clause | 376,408 | 0.00059 | """
DataFrame
---------
An efficient 2D container for potentially mixed-type time series or other
labeled data series.
Similar to its R counterpart, data.frame, except providing automatic data
alignment and a host of useful data manipulation methods having to do with the
labeling information
"""
from __future__ import annotations
import collections
from collections import abc
import datetime
import functools
from io import StringIO
import itertools
import mmap
from textwrap import dedent
from typing import (
IO,
TYPE_CHECKING,
Any,
Callable,
Hashable,
Iterable,
Iterator,
Literal,
Sequence,
cast,
overload,
)
import warnings
import numpy as np
import numpy.ma as ma
from pandas._config import get_option
from pandas._libs import (
algos as libalgos,
lib,
properties,
)
from pandas._libs.hashtable import duplicated
from pandas._libs.lib import no_default
from pandas._typing import (
AggFuncType,
AnyArrayLike,
ArrayLike,
Axes,
Axis,
ColspaceArgType,
CompressionOptions,
Dtype,
DtypeObj,
FilePathOrBuffer,
FillnaOptions,
FloatFormatType,
FormattersType,
Frequency,
IndexKeyFunc,
IndexLabel,
Level,
PythonFuncType,
Renamer,
Scalar,
StorageOptions,
Suffixes,
TimedeltaConvertibleTypes,
TimestampConvertibleTypes,
ValueKeyFunc,
npt,
)
from pandas.compat._optional import import_optional_dependency
from pandas.compat.numpy import function as nv
from pandas.util._decorators import (
Appender,
Substitution,
deprecate_kwarg,
deprecate_nonkeyword_arguments,
doc,
rewrite_axis_style_signature,
)
from pandas.util._validators import (
validate_ascending,
validate_axis_style_args,
validate_bool_kwarg,
validate_percentile,
)
from pandas.core.dtypes.cast import (
construct_1d_arraylike_from_scalar,
construct_2d_arraylike_from_scalar,
find_common_type,
infer_dtype_from_scalar,
invalidate_string_dtypes,
maybe_box_native,
maybe_downcast_to_dtype,
validate_numeric_casting,
)
from pandas.core.dtypes.common import (
ensure_platform_int,
infer_dtype_from_object,
is_1d_only_ea_dtype,
is_1d_only_ea_obj,
is_bool_dtype,
is_dataclass,
is_datetime64_any_dtype,
is_dict_like,
is_dtype_equal,
is_extension_array_dtype,
is_float,
is_float_dtype,
is_hashable,
is_integer,
is_integer_dtype,
is_iterator,
is_list_like,
is_object_dtype,
is_scalar,
is_sequence,
pandas_dtype,
)
from pandas.core.dtypes.d | types import ExtensionDtype
from pandas.core.dtypes.missing import (
isna,
notna,
)
from pandas.core import (
algorithms,
common as com,
generic,
nanops,
ops,
)
from pandas.core.accessor import CachedAccessor
from pandas.core.apply import (
reconstruct_ | func,
relabel_result,
)
from pandas.core.array_algos.take import take_2d_multi
from pandas.core.arraylike import OpsMixin
from pandas.core.arrays import (
DatetimeArray,
ExtensionArray,
TimedeltaArray,
)
from pandas.core.arrays.sparse import SparseFrameAccessor
from pandas.core.construction import (
extract_array,
sanitize_array,
sanitize_masked_array,
)
from pandas.core.generic import (
NDFrame,
_shared_docs,
)
from pandas.core.indexers import check_key_length
from pandas.core.indexes.api import (
DatetimeIndex,
Index,
PeriodIndex,
default_index,
ensure_index,
ensure_index_from_sequences,
)
from pandas.core.indexes.multi import (
MultiIndex,
maybe_droplevels,
)
from pandas.core.indexing import (
check_bool_indexer,
convert_to_index_sliceable,
)
from pandas.core.internals import (
ArrayManager,
BlockManager,
)
from pandas.core.internals.construction import (
arrays_to_mgr,
dataclasses_to_dicts,
dict_to_mgr,
mgr_to_mgr,
ndarray_to_mgr,
nested_data_to_arrays,
rec_array_to_mgr,
reorder_arrays,
to_arrays,
treat_as_nested,
)
from pandas.core.reshape.melt import melt
from pandas.core.series import Series
from pandas.core.sorting import (
get_group_index,
lexsort_indexer,
nargsort,
)
from pandas.io.common import get_handle
from pandas.io.formats import (
console,
format as fmt,
)
from pandas.io.formats.info import (
BaseInfo,
DataFrameInfo,
)
import pandas.plotting
if TYPE_CHECKING:
from pandas.core.groupby.generic import DataFrameGroupBy
from pandas.core.resample import Resampler
from pandas.io.formats.style import Styler
# ---------------------------------------------------------------------
# Docstring templates
_shared_doc_kwargs = {
"axes": "index, columns",
"klass": "DataFrame",
"axes_single_arg": "{0 or 'index', 1 or 'columns'}",
"axis": """axis : {0 or 'index', 1 or 'columns'}, default 0
If 0 or 'index': apply function to each column.
If 1 or 'columns': apply function to each row.""",
"inplace": """
inplace : bool, default False
If True, performs operation inplace and returns None.""",
"optional_by": """
by : str or list of str
Name or list of names to sort by.
- if `axis` is 0 or `'index'` then `by` may contain index
levels and/or column labels.
- if `axis` is 1 or `'columns'` then `by` may contain column
levels and/or index labels.""",
"optional_labels": """labels : array-like, optional
New labels / index to conform the axis specified by 'axis' to.""",
"optional_axis": """axis : int or str, optional
Axis to target. Can be either the axis name ('index', 'columns')
or number (0, 1).""",
"replace_iloc": """
This differs from updating with ``.loc`` or ``.iloc``, which require
you to specify a location to update with some value.""",
}
_numeric_only_doc = """numeric_only : bool or None, default None
Include only float, int, boolean data. If None, will attempt to use
everything, then use only numeric data
"""
_merge_doc = """
Merge DataFrame or named Series objects with a database-style join.
A named Series object is treated as a DataFrame with a single named column.
The join is done on columns or indexes. If joining columns on
columns, the DataFrame indexes *will be ignored*. Otherwise if joining indexes
on indexes or indexes on a column or columns, the index will be passed on.
When performing a cross merge, no column specifications to merge on are
allowed.
.. warning::
If both key columns contain rows where the key is a null value, those
rows will be matched against each other. This is different from usual SQL
join behaviour and can lead to unexpected results.
Parameters
----------%s
right : DataFrame or named Series
Object to merge with.
how : {'left', 'right', 'outer', 'inner', 'cross'}, default 'inner'
Type of merge to be performed.
* left: use only keys from left frame, similar to a SQL left outer join;
preserve key order.
* right: use only keys from right frame, similar to a SQL right outer join;
preserve key order.
* outer: use union of keys from both frames, similar to a SQL full outer
join; sort keys lexicographically.
* inner: use intersection of keys from both frames, similar to a SQL inner
join; preserve the order of the left keys.
* cross: creates the cartesian product from both frames, preserves the order
of the left keys.
.. versionadded:: 1.2.0
on : label or list
Column or index level names to join on. These must be found in both
DataFrames. If `on` is None and not merging on indexes then this defaults
to the intersection of the columns in both DataFrames.
left_on : label or list, or array-like
Column or index level names to join on in the left DataFrame. Can also
be an array or list of arrays of the length of the left DataFrame.
These arrays are treated as if they are columns.
right_on : label or list, or array-like
Column or index level names to join on in the right DataFrame. Can also
be an array or li |
Morgan-Stanley/treadmill | lib/python/treadmill/sproc/alert_monitor.py | Python | apache-2.0 | 4,428 | 0 | """Process alerts.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import errno
import logging
import os
import os.path
import click
from treadmill import alert
from treadmill import appenv
from treadmill import dirwatch
from treadmill import fs
from treadmill import plugin_manager
_LOGGER = logging.getLogger(__name__)
_DEF_WAITING_PERIOD = 120
_DEF_MAX_QUEUE_LEN = 100
class _NoOpBackend:
"""Dummy default alert backend if no plugin can be found.
"""
@staticmethod
def send_event(type_=None,
instanceid=None,
summary=None,
event_time=None,
on_success_callback=None,
**kwargs):
"""Log the alert in params.
"""
# pylint: disable=unused-argument
_LOGGER.critical(
'Alert raised: %s:%s:\n %s %s', type_, instanceid, summary, kwargs
)
if on_success_callback is not None:
on_success_callback()
def _get_on_create_handler(alert_backend):
"""Return a func using 'alert_backend' to handle if an alert is created.
"""
def _on_created(alert_file):
"""Handler for newly created alerts.
"""
# Avoid triggerring on temporary files
if os.path.basename(alert_file)[0] == '.':
return None
try:
alert_ = alert.read(alert_file)
except OSError as err:
if err.errno == errno.ENOENT:
# File already gone, nothing to do.
return None
else:
raise
return alert_backend.send_event(
on_success_callback=lambda: fs.rm_safe(alert_file),
**alert_
)
return _on_created
def _load_alert_backend(plugin_name):
backend = _NoOpBackend()
if plugin_name is None:
return backend
try:
backend = plugin_manager.load('treadmill.alert.plugins', plugin_name)
except KeyError:
_LOGGER.info('Alert backend %r could not been loaded.', plugin_name)
return backend
def _serve_forever(watcher, alerts_dir, max_queue_length, wait_interval):
"""Wait for and handle events until the end of time.
"""
while True:
_process_existing_alerts(alerts_dir, watcher.on_created)
if watcher.wait_for_events(timeout=wait_interval):
watcher.process_events()
_remove_extra_alerts(alerts_dir, max_queue_length)
def _process_existing_alerts(alerts_dir, process_func):
"""Retry sending the alerts in alerts_dir.
"""
for alert_file in os.listdir(alerts_dir):
process_func(os.path.join(alerts_dir, alert_file))
def _remove_extra_alerts(alerts_dir, max_queue_length=0):
"""Keep the most recent max_queue_length files in alerts_dir.
"""
# None means do not slice
index = None if max_queue_length == 0 else 0 - max_queue_length
for alert_file in sorted(os.listdir(alerts_dir))[:index]:
# if file/dir started as '.', we do not remove
if alert_file[0] != '.':
fs.rm_safe(os.path.join(alerts_dir, alert_file))
def init():
"""App main.
"""
@click.command(name='alert_monitor')
@click.option(
'--approot',
type=click.Path(exists=True),
envvar='TREADMILL_APPROOT',
required=True
)
@click.option('--plugin', help='Alert backend to use', required=False)
@click.option(
'--max-queue-len | gth',
help='Keep at most that many files in alerts directory'
', default: {}'.format | (_DEF_MAX_QUEUE_LEN),
type=int,
default=_DEF_MAX_QUEUE_LEN
)
@click.option(
'--wait-interval',
help='Time to wait between WT alerting retry attempts (sec)'
', default: {}'.format(_DEF_WAITING_PERIOD),
type=int,
default=_DEF_WAITING_PERIOD
)
def alert_monitor_cmd(approot, plugin, max_queue_length, wait_interval):
"""Publish alerts.
"""
tm_env = appenv.AppEnvironment(root=approot)
watcher = dirwatch.DirWatcher(tm_env.alerts_dir)
watcher.on_created = _get_on_create_handler(
_load_alert_backend(plugin)
)
_serve_forever(
watcher, tm_env.alerts_dir, max_queue_length, wait_interval
)
return alert_monitor_cmd
|
jordanemedlock/psychtruths | temboo/core/Library/Amazon/S3/GetBucketLocation.py | Python | apache-2.0 | 4,330 | 0.005543 | # -*- coding: utf-8 -*-
###############################################################################
#
# GetBucketLocation
# Returns the Region where the bucket is stored.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class GetBucketLocation(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the GetBucketLocation Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(GetBucketLocation, self).__init__(temboo_session, '/Library/Amazon/S3/GetBucketLocation')
def new_input_set(self):
return GetBucketLocationInputSet()
def _make_result_set(self, result, path):
return GetBucketLocationResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return GetBucketLocationChoreographyExecution(session, exec_id, path)
class GetBucketLocationInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the GetBucketLocation
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AWSAccessKeyId(self, value):
"""
Set the value of the AWSAccessKeyId input for this Choreo. ((required, string) The Access Key ID provided by Amazon Web Services.)
"""
super(GetBucketLocationInputSet, self)._set_input('AWSAccessKeyId', value)
def set_AWSSecretKeyId(self, value):
"""
Set the value of the AWSSecretKeyId input for this Choreo. ((required, string) The Secret Key ID provided by Amazon Web Services.)
"""
super(GetBucketLocationInputSet, self)._set_input('AWSSecretKeyId', value)
def set_BucketName(self, value):
"""
Set the value of the BucketName input for this Choreo. ((required, string) The name of the bucket associated with the location you want to retrieve.)
"""
super(GetBucketLocationInputSet, self)._set_input('BucketName', value)
def set_ResponseFormat(self, value):
| """
Set the value of the ResponseFormat input for this Choreo. ((optional, string) The format that the response should be in. Valid values are "xml" (the default) and "json".)
"""
| super(GetBucketLocationInputSet, self)._set_input('ResponseFormat', value)
class GetBucketLocationResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the GetBucketLocation Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from Amazon.)
"""
return self._output.get('Response', None)
def get_LocationConstraint(self):
"""
Retrieve the value for the "LocationConstraint" output from this Choreo execution. ((string) The Region returned by the choreo. Valid values: blank (Default US Classic Region AKA us-east-1), EU (AKA eu-west-1), us-west-1, us-west-2, ap-southeast-1, ap-southeast-2, ap-northeast-1, sa-east-1.)
"""
return self._output.get('LocationConstraint', None)
class GetBucketLocationChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return GetBucketLocationResultSet(response, path)
|
IgnitionProject/ignition | demo/dsl/riemann/euler.py | Python | bsd-3-clause | 356 | 0.002809 | from ignit | ion.dsl.riemann import *
q = Conserved('q')
rho, rhou, E = q.fields(['rho', 'rhou', 'E'])
u = rhou / rho
gamma = Constant('gamma')
P = gamma * (E - .5 * u * rhou)
f = [rhou,
P + u * rhou,
u * (E + P)]
#generate(f, q, "euler_kernel. | py")
G = Generator()
G.flux = f
G.conserved = q
G.eig_method = "numerical"
G.write("euler_kernel.py")
|
cydenix/OpenGLCffi | OpenGLCffi/GL/EXT/SUN/mesh_array.py | Python | mit | 153 | 0.019608 | from OpenGLCffi.GL import params
@params(api='gl', prms=['mode', 'first', 'count', 'width'])
def glDrawMeshArra | ysSUN(mode, first, count, width):
pass
| |
kytos/python-openflow | tests/unit/v0x01/test_symmetric/test_hello.py | Python | mit | 506 | 0 | """Hello message tests."""
from pyof.v0x01.symmetric.hello import Hello
fro | m tests.unit.test_struct import TestStruct
class TestHello(TestStruct):
"""Hello message tests (also those in :class:`.TestDump`)."""
@classmethod
def setUpClass(cls):
"""Configure raw file and its object in parent class (TestDump)."""
super().setUpClass()
super().set_raw_dump_file('v0x01', 'ofpt_hello')
| super().set_raw_dump_object(Hello, xid=1)
super().set_minimum_size(8)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.