repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
fqc/django_rest_test
|
django_rest_test/wsgi.py
|
Python
|
mit
| 1,449
| 0.00069
|
"""
WSGI config for django_rest_test project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
imp
|
ort os
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "django_rest_test.settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "django_rest_test.settings")
# This ap
|
plication object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
|
mick-d/nipype
|
nipype/interfaces/ants/tests/test_auto_MeasureImageSimilarity.py
|
Python
|
bsd-3-clause
| 1,674
| 0.0227
|
# AUTO-GENERATED by tools/checkspec
|
s.py - DO NOT EDIT
from __future__ import unicode_literals
from ..registration import MeasureImageSimilarity
def test_MeasureImageSimilarity_inputs():
input_map = dict(args=dict(argstr='%s',
),
dimension=dict(argstr
|
='--dimensionality %d',
position=1,
),
environ=dict(nohash=True,
usedefault=True,
),
fixed_image=dict(mandatory=True,
),
fixed_image_mask=dict(argstr='%s',
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
metric=dict(argstr='%s',
mandatory=True,
),
metric_weight=dict(requires=['metric'],
usedefault=True,
),
moving_image=dict(mandatory=True,
),
moving_image_mask=dict(requires=['fixed_image_mask'],
),
num_threads=dict(nohash=True,
usedefault=True,
),
radius_or_number_of_bins=dict(mandatory=True,
requires=['metric'],
),
sampling_percentage=dict(mandatory=True,
requires=['metric'],
),
sampling_strategy=dict(requires=['metric'],
usedefault=True,
),
terminal_output=dict(deprecated='1.0.0',
nohash=True,
),
)
inputs = MeasureImageSimilarity.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(inputs.traits()[key], metakey) == value
def test_MeasureImageSimilarity_outputs():
output_map = dict(similarity=dict(),
)
outputs = MeasureImageSimilarity.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(outputs.traits()[key], metakey) == value
|
shanot/imp
|
modules/atom/test/test_clone.py
|
Python
|
gpl-3.0
| 1,208
| 0.000828
|
import IMP
import IMP.test
import IMP.core
import IMP.atom
class Tests(IMP.test.TestCase):
def test_bonded(self):
"""Check close and destroy Hierarchy """
m = IMP.Model()
mh = IMP.atom.read_pdb(self.get_input_file_name("mini.pdb"), m)
nump = len(m.get_particle_indexes())
mhc = IMP.atom.create_clone(mh)
nnump = len(m.get_particle_indexes())
self.assertEqual(nump * 2, nnump)
IMP.atom.destroy(mhc)
mhc = None
self.assertEqual(nump, len(m.get_particle_indexes())
|
)
IMP.atom.destroy(mh)
mh = None
|
self.assertEqual(0, len(m.get_particle_indexes()))
def test_destroy_child(self):
"""Destroy of a child should update the parent"""
m = IMP.Model()
mh = IMP.atom.read_pdb(self.get_input_file_name("mini.pdb"), m)
atoms = IMP.atom.get_by_type(mh, IMP.atom.ATOM_TYPE)
self.assertEqual(len(atoms), 68)
IMP.atom.destroy(atoms[0])
# This will fail if the atom is not removed from the parent residue
atoms = IMP.atom.get_by_type(mh, IMP.atom.ATOM_TYPE)
self.assertEqual(len(atoms), 67)
if __name__ == '__main__':
IMP.test.main()
|
guillaume-philippon/aquilon
|
lib/aquilon/worker/commands/update_machine.py
|
Python
|
apache-2.0
| 16,386
| 0.000305
|
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2008,2009,2010,2011,2012,2013,2014,2015,2016 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains the logic for `aq update machine`."""
import re
from aquilon.exceptions_ import ArgumentError
from aquilon.aqdb.model import (Chassis, ChassisSlot, Model, Machine,
Resource, BundleResource, Share, Filesystem)
from aquilon.aqdb.types import CpuType
from aquilon.worker.broker import BrokerCommand
from aquilon.worker.dbwrappers.hardware_entity import update_primary_ip
from aquilon.worker.dbwrappers.interface import set_port_group, generate_ip
from aquilon.worker.dbwrappers.location import get_location
from aquilon.worker.dbwrappers.resources import (find_resource,
get_resource_holder)
from aquilon.worker.templates import (PlenaryHostData,
PlenaryServiceInstanceToplevel)
from aquilon.worker.processes import DSDBRunner
_disk_map_re = re.compile(r'^([^/]+)/(?:([^/]+)/)?([^/]+):([^/]+)/(?:([^/]+)/)?([^/]+)$')
def parse_remap_disk(old_vmholder, new_vmholder, remap_disk):
result = {}
if not remap_disk:
return result
maps = remap_disk.split(",")
for map in maps:
res = _disk_map_re.match(map)
if not res:
raise ArgumentError("Invalid disk backend remapping "
"specification: '%s'" % map)
src_type, src_rg, src_name, dst_type, dst_rg, dst_name = res.groups()
src_cls = Resource.polymorphic_subclass(src_type,
"Invalid resource type")
dst_cls = Resource.polymorphic_subclass(dst_type,
"Invalid resource type")
if dst_cls not in (Share, Filesystem):
raise ArgumentError("%s is not a valid virtual disk backend "
"resource type." % dst_type)
src_backend = find_resource(src_cls, old_vmholder, src_rg, src_name)
dst_backend = find_resource(dst_cls, new_vmholder, dst_rg, dst_name)
result[src_backend] = dst_backend
return result
def get_metacluster(holder):
if hasattr(holder, "metacluster"):
return holder.metacluster
# vmhost
if hasattr(holder, "cluster") and holder.cluster:
return holder.cluster.metacluster
else:
# TODO vlocal still has clusters, so this case not tested yet.
return None
def update_disk_backing_stores(dbmachine, old_holder, new_holder, remap_disk):
if not old_holder:
old_holder = dbmachine.vm_container.holder.holder_object
if not new_holder:
new_holder = old_holder
disk_mapping = parse_remap_disk(old_holder, new_holder, remap_disk)
for dbdisk in dbmachine.disks:
old_bstore = dbdisk.backing_store
if isinstance(old_bstore.holder, BundleResource):
resourcegroup =
|
old_bstore.holder.resourcegroup.name
else:
resourcegroup = None
if old_bstore in disk_mapping:
new_bstore = disk_mapping[old_bstore]
else:
new_bstore = find_resource(old_bstore.__class__, new_holder,
resourcegroup, old_bstore.name,
|
error=ArgumentError)
dbdisk.backing_store = new_bstore
def update_interface_bindings(session, logger, dbmachine, autoip):
for dbinterface in dbmachine.interfaces:
old_pg = dbinterface.port_group
if not old_pg:
continue
old_net = old_pg.network
# Suppress the warning about PG mismatch - we'll update the addresses
# later
set_port_group(session, logger, dbinterface, old_pg.name,
check_pg_consistency=False)
logger.info("Updated {0:l} to use {1:l}.".format(dbinterface,
dbinterface.port_group))
new_net = dbinterface.port_group.network
if new_net == old_net or not autoip:
dbinterface.check_pg_consistency(logger=logger)
continue
for addr in dbinterface.assignments:
if addr.network != old_net:
continue
new_ip = generate_ip(session, logger, dbinterface, autoip=True,
network_environment=old_net.network_environment)
for dbdns_rec in addr.dns_records:
dbdns_rec.network = new_net
dbdns_rec.ip = new_ip
old_ip = addr.ip
addr.ip = new_ip
addr.network = new_net
logger.info("Changed {0:l} IP address from {1!s} to {2!s}."
.format(dbinterface, old_ip, new_ip))
dbinterface.check_pg_consistency(logger=logger)
def move_vm(session, logger, dbmachine, resholder, remap_disk,
allow_metacluster_change, autoip, plenaries):
old_holder = dbmachine.vm_container.holder.holder_object
if resholder:
new_holder = resholder.holder_object
else:
new_holder = old_holder
if new_holder != old_holder:
old_mc = get_metacluster(old_holder)
new_mc = get_metacluster(new_holder)
if old_mc != new_mc and not allow_metacluster_change:
raise ArgumentError("Moving VMs between metaclusters is "
"disabled by default. Use the "
"--allow_metacluster_change option to "
"override.")
plenaries.add(old_holder)
plenaries.add(new_holder)
dbmachine.vm_container.holder = resholder
if new_holder != old_holder or remap_disk:
update_disk_backing_stores(dbmachine, old_holder, new_holder, remap_disk)
if new_holder != old_holder or autoip:
update_interface_bindings(session, logger, dbmachine, autoip)
if hasattr(new_holder, 'location_constraint'):
dbmachine.location = new_holder.location_constraint
else:
dbmachine.location = new_holder.hardware_entity.location
class CommandUpdateMachine(BrokerCommand):
requires_plenaries = True
required_parameters = ["machine"]
def render(self, session, logger, plenaries, machine, model, vendor, serial, uuid,
clear_uuid, chassis, slot, clearchassis, multislot, vmhost,
cluster, metacluster, allow_metacluster_change, cpuname,
cpuvendor, cpucount, memory, ip, autoip, uri, remap_disk,
comments, **arguments):
dbmachine = Machine.get_unique(session, machine, compel=True)
oldinfo = DSDBRunner.snapshot_hw(dbmachine)
old_location = dbmachine.location
plenaries.add(dbmachine)
if dbmachine.vm_container:
plenaries.add(dbmachine.vm_container)
if dbmachine.host:
# Using PlenaryHostData directly, to avoid warnings if the host has
# not been configured yet
plenaries.add(dbmachine.host, cls=PlenaryHostData)
if clearchassis:
del dbmachine.chassis_slot[:]
if chassis:
dbchassis = Chassis.get_unique(session, chassis, compel=True)
dbmachine.location = dbchassis.location
if slot is None:
raise ArgumentError("Option --chassis requires --slot "
"information.")
self.adjust_slot(session, logger,
dbmachine, dbchassis, slot, multislot)
elif slot is not No
|
chuckatkins/legion
|
language/examples/mssp/gen_graph.py
|
Python
|
apache-2.0
| 8,232
| 0.008017
|
#!/usr/bin/env python
from __future__ import print_function
import argparse
import array
import math
import os
import random
import sys
import subprocess
def create_graph(nodes, edges, verbose):
if verbose: print('Creating random graph with {} nodes and {} edges...'.format(nodes, edges))
n1 = [ random.randint(0, nodes - 1) for x in xrange(edges) ]
n2 = [ random.randint(0, nodes - 1) for x in xrange(edges) ]
length = [ random.expovariate(1.0) for x in xrange(edges) ]
return { 'nodes': nodes,
'edges': edges,
'n1': n1,
'n2': n2,
'length': length }
def compute_subgraphs(n, p):
return [(x*(n/p) + min(x, n%p), ((x+1)*(n/p)-1) + min(x + 1, n%p)) for x in xrange(0, p)]
def find_subgraph_index(n, subgraphs):
s = [i for i, (start, end) in zip(xrange(len(subgraphs)), subgraphs) if start <= n and n <= end]
assert len(s) == 1
return s[0]
def find_subgraph(n, subgraphs):
return subgraphs[find_subgraph_index(n, subgraphs)]
def create_clustered_DAG_graph(nodes, edges, nsubgraphs, cluster_factor, verbose):
if verbose: print('Creating clustered DAG graph with {} nodes and {} edges...'.format(nodes, edges))
subgraphs = compute_subgraphs(nodes, nsubgraphs)
def make_edge():
n1 = random.randint(0, nodes - 1)
if random.randint(1, 100) <= cluster_factor:
s = find_subgraph(n1, subgraphs)
n2 = random.randint(*s)
else:
n2 = random.randint(min(n1, nodes-1), nodes-1)
return (n1, n2)
n1, n2 = zip(*(make_edge() for x in xrange(edges)))
length = [random.expovariate(1.0) for x in xrange(edges)]
return { 'nodes': nodes,
'edges': edges,
'n1': n1,
'n2': n2,
'length': length }
def create_clustered_geometric_graph(nodes, edges, nsubgraphs, cluster_factor, verbose):
if verbose: print('Creating clustered geometric graph with {} nodes and {} edges...'.format(nodes, edges))
blocks = int(math.sqrt(nsubgraphs))
assert blocks**2 == nsubgraphs
bounds = [((1.0*(i%blocks)/blocks, 1.0*(i%blocks + 1)/blocks),
(1.0*(i/blocks)/blocks, 1.0*(i/blocks + 1)/blocks))
for i in xrange(nsubgraphs)]
subgraphs = compute_subgraphs(nodes, nsubgraphs)
pos = [(random.uniform(*x), random.uniform(*y))
for (lo, hi), (x, y) in zip(subgraphs, bounds)
for _ in xrange(lo, hi+1)]
def make_edge():
n1 = random.randint(0, nodes - 1)
if random.randint(1, 100) <= cluster_factor:
s = find_subgraph(n1, subgraphs)
n2 = random.randint(*s)
else:
i = find_subgraph_index(n1, subgraphs)
ix, iy = i%blocks, i/blocks
if random.randint(0, 1) == 0:
s2 = subgraphs[((ix+1)%blocks) + iy*blocks]
else:
s2 = subgraphs[ix + ((iy+1)%blocks)*blocks]
n2 = random.randint(*s2)
return (n1, n2)
n1, n2 = zip(*(make_edge() for x in xrange(edges)))
length = [xlen + random.expovariate(1000/xlen if xlen > 0.0001 else 1)
for x in xrange(edges)
for xlen in [math.sqrt(sum((a - b)**2 for a, b in zip(pos[n1[x]], pos[n2[x]])))]]
return { 'nodes': nodes,
'edges': edges,
'n1': n1,
'n2': n2,
'length': length }
def metis_graph(g, metis, subgraphs, outdir, verbose):
if verbose: print('Running METIS...')
with open(os.path.join(outdir, 'graph.metis'), 'wb') as f:
f.write('{:3d} {:3d} 000\n'.format(g['nodes'], g['edges']))
for n in xrange(g['nodes']):
f.write(' '.join('{:3d} 1'.format(n2+1) for n1, n2 in zip(g['n1'], g['n2']) if n1 == n))
f.write('\n')
subprocess.check_call([metis, os.path.join(outdir, 'graph.metis'), str(subgraphs)])
with open(os.path.join(outdir, 'graph.metis.part.{}'.format(subgraphs)), 'rb') as f:
colors = [int(x) for x in f.read().split()]
mapping = dict(zip(sorted(xrange(g['nodes']), key = lambda x: colors[x]), range(g['nodes'])))
g['n1'] = [mapping[g['n1'][x]] for x in xrange(g['edges'])]
g['n2'] = [mapping[g['n2'][x]] for x in xrange(g['edges'])]
def sort_graph(g, verbose):
if verbose: print('Sorting graph...')
mapping = dict(zip(sorted(xrange(g['edges']), key = lambda x: (g['n1'][x], g['n2'][x])), range(g['edges'])))
g['n1'] = [g['n1'][mapping[x]] for x in xrange(g['edges'])]
g['n2'] = [g['n2'][mapping[x]] for x in xrange(g['edges'])]
g['length'] = [g['length'][mapping[x]] for x in xrange(g['edges'])]
def solve_graph(g, source, verbose):
if verbose: print('Solving graph...')
parent = [ -1 for x in xrange(g['nodes']) ]
dist = [ 1e100 for x in xrange(g['nodes']) ]
dist[source] = 0
while True:
count = 0
for n1, n2, length in zip(g['n1'], g['n2'], g['length']):
c2 = length + dist[n1]
if c2 < dist[n2]:
dist[n2] = c2
parent[n2] = n1
count += 1
#print 'count = {:d}'.format(count)
if count == 0:
break
# if verbose:
# for i, e in enumerate(zip(g['n1'], g['n2'], g['length'])):
# print('{:3d} {:3d} {:3d} {:5.3f}'.format(i, e[0], e[1], e[2]))
# for i, n in enumerate(zip(parent, dist)):
# print('{:3d} {:3d} {:5.3f}'.format(i, n[0], n[1]))
return dist
def write_graph(g, problems, outdir, verbose):
if verbose: print('Writing graph...')
with open(os.path.join(outdir, 'edges.dat'), 'wb') as f:
array.array('i', g['n
|
1']).tofile(f)
array.array('i', g['n2']).tofile(f)
array.array('f', g['length']).tofile(f)
with open(os.path.join(outdir, 'graph.dot'), 'wb') as f:
f.write('digraph {\n')
f.write('\n'.join('{} -> {} [ style = "{}"]'.format(e1, e2, 'dotted' if e2 <= e1 else 'solid'
|
) for e1, e2 in zip(g['n1'], g['n2'])))
f.write('\n}\n')
with open(os.path.join(outdir, 'graph.txt'), 'w') as f:
f.write('nodes {:d}\n'.format(g['nodes']))
f.write('edges {:d}\n'.format(g['edges']))
f.write('data edges.dat\n')
sources = random.sample(xrange(g['nodes']), problems)
for s in sources:
parents = solve_graph(g, s, verbose)
with open(os.path.join(outdir, 'result_{:d}.dat'.format(s)), 'wb') as f2:
array.array('f', parents).tofile(f2)
f.write('source {:d} result_{:d}.dat\n'.format(s, s))
if __name__ == '__main__':
p = argparse.ArgumentParser(description='graph generator')
p.add_argument('--nodes', '-n', type=int, default=10)
p.add_argument('--edges', '-e', type=int, default=20)
p.add_argument('--type', '-t', default='random', choices=['random', 'clustered_DAG', 'clustered_geometric'])
p.add_argument('--subgraphs', '-s', type=int, default=1)
p.add_argument('--cluster-factor', '-c', type=int, default=95)
p.add_argument('--problems', '-p', type=int, default=1)
p.add_argument('--randseed', '-r', type=int, default=12345)
p.add_argument('--metis-path', default='./metis-install/bin/gpmetis')
p.add_argument('--metis', '-m', action='store_true')
p.add_argument('--outdir', '-o', required=True)
p.add_argument('--verbose', '-v', action='store_true')
args = p.parse_args()
random.seed(args.randseed)
if args.type == 'random':
G = create_graph(args.nodes, args.edges, args.verbose)
elif args.type == 'clustered_DAG':
G = create_clustered_DAG_graph(args.nodes, args.edges, args.subgraphs, args.cluster_factor, args.verbose)
elif args.type == 'clustered_geometric':
G = create_clustered_geometric_graph(args.nodes, args.edges, args.subgraphs, args.cluster_factor, args.verbose)
else:
assert false
try:
os.mkdir(args.outdir)
except:
pass
assert os.path.isdir(args.outdir)
if args.metis:
assert os.path.isfile(args.metis_path)
metis_graph(G, args.metis_path, args.subgraphs, args.outdir, args.verbose)
sort_graph(G, args.verbose)
write
|
wubr2000/googleads-python-lib
|
examples/dfp/v201502/creative_template_service/get_all_creative_templates.py
|
Python
|
apache-2.0
| 1,984
| 0.008569
|
#!/usr/bin/python
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific lang
|
uage governing permissions and
# limitations under the License.
"""This code example gets all creative templates.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of o
|
ur README.
"""
# Import appropriate modules from the client library.
from googleads import dfp
def main(client):
# Initialize appropriate service.
creative_template_service = client.GetService(
'CreativeTemplateService', version='v201502')
# Create a filter statement.
statement = dfp.FilterStatement()
# Get creative templates by statement.
while True:
response = creative_template_service.getCreativeTemplatesByStatement(
statement.ToStatement())
if 'results' in response:
# Display results.
for template in response['results']:
print ('Creative template with id \'%s\', name \'%s\', and type \'%s\' '
'was found.' % (template['id'],
template['name'],
template['type']))
statement.offset += dfp.SUGGESTED_PAGE_LIMIT
else:
break
print '\nNumber of results found: %s' % response['totalResultSetSize']
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client)
|
bjodah/PyLaTeX
|
examples/full.py
|
Python
|
mit
| 2,838
| 0
|
#!/usr/bin/python
"""
This example demonstrates several features of PyLaTeX.
It includes plain equations, tables, equations using numpy objects, tikz plots,
and figures.
.. :copyright: (c) 2014 by Jelte Fennema.
:license: MIT, see License for more details.
"""
# begin-doc-include
import numpy as np
from pylatex import Document, Section, Subsection, Tabular, Math, TikZ, Axis, \
Plot, Figure, Package, Matrix
from pylatex.utils import italic
import os
if __name__ == '__main__':
image_filename = os.path.join(os.path.dirname(__file__), 'kitten.jpg')
doc = Document()
doc.packages.append(Package('geometry', options=['tmargin=1cm',
'lmargin=10cm']))
with doc.create(Section('The simple stuff')):
doc.append('Some regular text and some')
doc.append(italic('italic text. '))
doc.append('\nAlso some crazy characters: $&#{}')
with doc.create(Subsection('Math that is incorrect')):
doc.append(Math(data=['2*3', '=', 9]))
with doc.create(Subsection('Table of something')):
with doc.create(Tabular('rc|cl')) as table:
table.add_hline()
table.add_row((1, 2, 3, 4))
table.add_hline(1, 2)
table.add_empty_row()
table.add_row((4, 5, 6, 7))
a = np.array([[100, 10, 20]]).T
M = np.matrix([[2, 3, 4],
[0, 0, 1],
[0, 0, 2]])
with doc.create(Section('The fancy stuff')):
with doc.create(Subsection('Correct matrix equations')):
doc.append(Math(data=[Matrix(M), Matrix(a), '=', Matrix(M * a)]))
with doc.create(Subsection('Beautiful graphs')):
with doc.create(TikZ()):
plot_options = 'height=6cm, width=6cm, grid=major'
with doc.create(Axis(options=plot_options)) as plot:
plot.append(Plot(name='model', func='-x^5 - 242'))
coordinates = [
(-4.77778, 2027.60977),
(-3.55556, 347.84069),
(-2.33333, 22.58953),
(-1.11111, -493.50066),
(0.11111, 46.66082),
(1.33333, -205.56286),
(2.55556, -341.40638),
(3.77778, -1169.24780),
(5.00000, -3269.56775),
]
plot.append(Plot(name='estimate', coordinates=coordinates))
with doc.create(Subsection('Cute kitten pictures')):
with doc.crea
|
te(Figure(position='h!')) as kitten_pic:
kitten_pic.add_image(image_filename,
|
width='120px')
kitten_pic.add_caption('Look it\'s on its back')
doc.generate_pdf('full')
|
qedsoftware/commcare-hq
|
corehq/sql_accessors/migrations/0025_update_get_ledger_values_for_cases.py
|
Python
|
bsd-3-clause
| 703
| 0
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
from corehq.form_processor.models import CaseTransaction
from corehq.sql_db.operations import RawSQLMigration
|
, HqRunSQL
migrator = RawSQLMigration(('corehq', 'sql_accessors', 'sql_templates'), {
'TRANSACTION_TYPE_FORM': CaseTransaction.TYPE_FORM
})
class Migration(migrations.Migration):
dependencies = [
('sql_accessors', '0024_update_save_ledger_values'),
]
operations = [
HqRunSQL(
"DROP FUNCTION IF EXISTS get_ledger_values_for_cases(TEXT[])",
"SELECT 1"
),
migrator.get_migration('get_ledger_values_
|
for_cases.sql'),
]
|
DOE-NEPA/geonode_2.0_to_2.4_migration
|
migrate_documents_document_modified.py
|
Python
|
gpl-2.0
| 2,363
| 0.025815
|
#!/usr/bin/python
import os
import psycopg2
import sys
import django_content_type_mapping
file = open("/home/" + os.getlogin() + "/.pgpass", "r")
pgpasses = []
for line in file:
pgpasses.append(line.rstrip("\n").split(":"))
file.close()
for pgpass in pgpasses:
#print str(pgpass)
if pgpass[0] == "54.236.235.110" and pgpass[3] == "geonode":
src_pgpass = pgpass
if pgpass[0] == "54.197.226.56" and pgpass[3] == "geonode":
dst_pgpass = pgpass
src = psycopg2.connect(host=src_pgpass[0], database="geonode2", user=src_pgpass[3], password=src_pgpass[4])
dst = psycopg2.connect(host=dst_pgpass[0], database="geonode", user=dst_pgpass[3], password=dst_pgpass[4])
src_cur = src.cursor()
dst_cur = dst.cursor()
src_cur.execute("select resourcebase_ptr_id, content_type_id, object_id, doc_file, extension, popular_count, share_count from documents_document")
for src_row in src_cur:
assignments = []
#resourcebase_ptr_id
assignments.append(src_row[0])
#title_en
assignments.append(None)
#abstract_en
assignments.append(None)
#purpose_en
assignments.append(None)
#constraints_other_en
assignments.append(None)
#supplemental_information_en
assignments.append(None)
#distribution_description_en
assignments.append(None)
#data_quality_statement_en
assignments.append(None)
#content_type_id
assignments.append(django_content_type_mapping.get_django_content_type_id(src_row[1]))
#object_id
assignments.append(src_row[2])
#doc_file
assignments.append(src_row[3])
#extension
assignments.append(src_row[4])
#doc_type
ass
|
ignments.append(None)
#doc_url
assignments.append(None)
try:
dst_cur.execute("insert into documents_document(resourcebase_ptr_id, title_en, abstract_en, purpose_en, constraints_other_en, supplemental_information_en, distribution_description_en, data_quality_statement_en, content_type_id, object_id, doc_file, extension, doc_type, doc_url) values (%s
|
, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)", assignments)
dst.commit()
except Exception as error:
print
print type(error)
print str(error) + "select resourcebase_ptr_id, content_type_id, object_id, doc_file, extension, popular_count, share_count from documents_document"
print str(src_row)
dst.rollback()
dst.commit()
src_cur.close()
dst_cur.close()
src.close()
dst.close()
|
PlayCircular/play_circular
|
apps/actividades/admin_views.py
|
Python
|
agpl-3.0
| 2,927
| 0.02188
|
#coding=utf-8
# Copyright (C) 2014 by Víctor Romero Blanco <info at playcircular dot com>.
# http://playcircular.com/
# It's licensed under the AFFERO GENERAL PUBLIC LICENSE unless stated otherwise.
# You can get copies of the licen
|
ses here: http://www.affero.org/oagpl.html
# AFFERO GENERA
|
L PUBLIC LICENSE is also included in the file called "LICENSE".
from django.contrib import admin
from django.conf import settings
from configuracion.models import *
from django.contrib.auth.models import User
from django.http import HttpResponse, Http404, HttpResponseRedirect, HttpResponseNotAllowed
from django.template import RequestContext
from django.shortcuts import render_to_response
from django.contrib.admin.views.decorators import staff_member_required
from django.views.decorators.csrf import csrf_exempt
from django.contrib.auth.decorators import login_required
from django.utils.translation import ugettext as _
from grupos.models import *
from grupos.forms import *
from actividades.models import *
from usuarios.models import *
from django.core import serializers
from django.db.models import Q
###################################################################################################
@login_required
def recarga_actividad(request):
if request.is_ajax() and request.POST:
seleccionados = request.POST.get('seleccionados')
str_grupos = seleccionados.split(',')
id_grupos = []
for item in str_grupos:
numero = int(item)
id_grupos.append(numero)
if len(id_grupos) > 0:
n_grupos_administrados = Miembro.objects.filter(usuario=request.user,activo=True,nivel=u'Administrador').count()
try:
categorias = Idiomas_categoria.objects.filter((Q(categoria__grupo__in=id_grupos) | Q(categoria__superadmin=True)) & Q(idioma=request.LANGUAGE_CODE))
except Idiomas_categoria.DoesNotExist:
categorias = Idiomas_categoria.objects.filter(Q(categoria__grupo__in=id_grupos) | Q(categoria__superadmin=True)).order_by('-idioma_default')
if request.user.is_superuser or n_grupos_administrados > 0:
usuarios_qs = Miembro.objects.filter(grupo__in=id_grupos,activo=True).values_list('usuario', flat=True)
if request.user.is_superuser:
#El Superadmin puede publicar sin que pernezca a ningún grupo para que no lo controlen los Admin de los grupos
usuarios_qs = list(usuarios_qs) + [request.user.pk]
usuarios = User.objects.filter(pk__in=usuarios_qs).distinct()
else:
usuarios = User.objects.filter(pk=request.user.pk)
datos = list(usuarios) + list(categorias)
else:
datos = []
else:
datos = []
#se devuelven los anios en formato json, solo nos interesa obtener como json
data = serializers.serialize("json", datos, fields=('pk','username','nombre','categoria'))
return HttpResponse(data, mimetype="application/javascript")
###################################################################################################
|
noslenfa/tdjangorest
|
uw/lib/python2.7/site-packages/IPython/kernel/tests/test_public_api.py
|
Python
|
apache-2.0
| 1,308
| 0.006881
|
"""Test the IPython.kernel public API
Authors
-------
* MinRK
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2013, the IPython Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#------------
|
-----------------------------------------------------------------
import nose.tools as nt
from IPython.testing import decorators as dec
from IPython.kernel import launcher, connect
from IPython import kernel
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
@dec.parametric
def test_kms():
for base in ("", "Multi"):
KM = base + "KernelManag
|
er"
yield nt.assert_true(KM in dir(kernel), KM)
@dec.parametric
def test_kcs():
for base in ("", "Blocking"):
KM = base + "KernelClient"
yield nt.assert_true(KM in dir(kernel), KM)
@dec.parametric
def test_launcher():
for name in launcher.__all__:
yield nt.assert_true(name in dir(kernel), name)
@dec.parametric
def test_connect():
for name in connect.__all__:
yield nt.assert_true(name in dir(kernel), name)
|
miku/vy
|
vyapp/plugins/box.py
|
Python
|
mit
| 875
| 0.009143
|
"""
"""
from traceback import format_exc as debug
from vyapp.stdout import Stdout
from vyapp.tools import exec_quiet, set_status_msg
from vyapp.ask import *
import sys
def redirect_stdout(area):
try:
sys.stdout.remove(area)
except ValueError:
pass
sys.stdout.append(Stdout(area))
set_status_msg('Output redirected to %s' % area.index('insert'))
def install(area):
area.install(('NORMAL', '<Control-W>', lambda event: event.widget.tag_delete_ranges(Stdout.TAG_CODE)),
('NORMAL', '<Control-Tab>
|
', lambda event: sys.stdout.restore()),
('NORMAL', '<Key-W>', lambda event: event.widget.tag_delete(Stdout.TAG_CODE)),
('NORMAL', '<Control-w>', lambda event: exec_quiet(sys.stdout.remove, event.widget)),
('NORMAL', '<Tab>', lambda event: redirect_stdout(event.widget)))
| |
sourlows/rating-cruncher
|
src/lib/click/core.py
|
Python
|
apache-2.0
| 65,670
| 0.000091
|
import os
import sys
import codecs
from contextlib import contextmanager
from itertools import repeat
from functools import update_wrapper
from .types import convert_type, IntRange, BOOL
from .utils import make_str, make_default_short_help, echo
from .exceptions import ClickException, UsageError, BadParameter, Abort, \
MissingParameter
from .termui import prompt, confirm
from .formatting import HelpFormatter, join_options
from .parser import OptionParser, split_opt
from ._compat import PY2, isidentifier, iteritems
_missing = object()
SUBCOMMAND_METAVAR = 'COMMAND [ARGS]...'
SUBCOMMANDS_METAVAR = 'COMMAND1 [ARGS]... [COMMAND2 [ARGS]...]...'
def _bashcomplete(cmd, prog_name, complete_var=None):
"""Internal handler for the bash completion support."""
if complete_var is None:
complete_var = '_%s_COMPLETE' % (prog_name.replace('-', '_')).upper()
complete_instr = os.environ.get(complete_var)
if not complete_instr:
return
from ._bashcomplete import bashcomplete
if bashcomplete(cmd, prog_name, complete_var, complete_instr):
sys.exit(1)
def batch(iterable, batch_size):
return list(zip(*repeat(iter(iterable), batch_size)))
def invoke_param_callback(callback, ctx, param, value):
code = getattr(callback, '__code__', None)
args = getattr(code, 'co_argcount', 3)
if args < 3:
# This will become a warning in Click 3.0:
from warnings import warn
warn(Warning('Invoked legacy parameter callback "%s". The new '
'signature for such callbacks starting with '
'click 2.0 is (ctx, param, value).'
% callback), stacklevel=3)
return callback(ctx, value)
return callback(ctx, param, value)
@contextmanager
def augment_usage_errors(ctx, param=None):
"""Context manager that attaches extra information to exceptions that
fly.
"""
try:
yield
except BadParameter as e:
if e.ctx is None:
e.ctx = ctx
if param is not None and e.param is None:
e.param = param
raise
except UsageError as e:
if e.ctx is None:
e.ctx = ctx
raise
def iter_params_for_processing(invocation_order, declaration_order):
"""Given a sequence of parameters in the order as should be considered
for processing and an iterable of parameters that exist, this returns
a list in the correct order as they should be processed.
"""
def sort_key(item):
try:
idx = invocation_order.index(item)
except ValueError:
idx = float('inf')
return (not item.is_eager, idx)
return sorted(declaration_order, key=sort_key)
class Context(object):
"""The context is a special internal object that holds state relevant
for the script execution at every single level. It's normally invisible
to commands unless they opt-in to getting access to it.
The context is useful as it can pass internal objects around and can
control special execution features such as reading data from
environment variables.
A context can be used as context manager in which case it will call
:meth:`close` on teardown.
.. versionadded:: 2.0
Added the `resilient_parsing`, `help_option_names`,
`token_normalize_func` parameters.
.. versionadded:: 3.0
Added the `allow_extra_args` and `allow_interspersed_args`
parameters.
.. versionadded:: 4.0
Added the `color`, `ignore_unknown_options`, and
`max_content_width` parameters.
:param command: the command class for this context.
:param parent: the parent context.
:param info_name: the info name for this invocation. Generally this
is the most descriptive name for the script or
command. For the toplevel script it is usually
the name of the script, for commands below it it's
the name of the script.
:param obj: an arbitrary object of user data.
:param auto_envvar_prefix: the prefix to use for automatic environment
variables. If this is `None` then reading
from environment variables is disabled. This
does not affect manually set environment
variables which are always read.
:param default_map: a dictionary (like object) with default values
for parameters.
:param terminal_width: the width of the terminal. The default is
inherit from parent context. If no context
defines the terminal width then auto
detection will be applied.
:param max_content_width: the maximum width for content rendered by
Click (this currently only affects help
pages). This defaults to 80 characters if
not overridden. In other words: even if the
terminal is larger than that, Click will not
format things wider than 80 characters by
default. In addition to that, formatters might
add some safety mapping on the right.
:param resilient_parsing: if this flag is enabled then Click will
parse without any interactivity or callback
invocation. This is useful for implementing
things such as completion support.
:param allow_extra_args: if this is set to `True` then extra arguments
at the end will not raise an error and will be
kept on the context. The default is to inherit
from the command.
:param allow_interspersed_args: if this is set to `False` then options
and arguments cannot be mixed. The
default is to inherit from the command.
:param ignore_unknown_options: instructs click to ignore options it does
not know and keeps them for later
processing.
:param help_option_names: optionally a list of strings that define how
the default help parameter is named. The
default is ``['--help']``.
:param token_normalize_func: an optional function that is used to
normalize tokens (options, choices,
etc.). This for instance can be used to
implement case insensitive behavior.
:param color: controls if the terminal supports ANSI colors or not. The
default is autodetection. This is only needed if ANSI
codes are used in texts that Click prints which is by
default not the case. This for instance would affect
help output.
"""
def __init__(self, command, parent=None, info_name=None, obj=None,
auto_envvar_prefix=None, default_map=None,
terminal_width=None, max_content_width=None,
resilient_parsing=False, allow_extra_args=None,
allow_interspersed_args=None,
ignore_unknown_options=None, help_option_names=None,
token_normalize_func=None, color=None):
#: the parent context or `None` if none exists.
self.parent = parent
#: the :class:`Command` for this context.
self.command = command
#: the descriptive information name
self.info_name = info_name
#: the parsed pa
|
rameters except if the value is hidden in which
#: case it's not remembered.
self.params = {}
#: the leftover arguments.
self.args = []
if obj is None and parent is not None:
|
obj = parent.obj
#: the user object stored.
self.
|
neurodebian/pkg-neuron
|
share/lib/python/neuron/neuroml/rdxml.py
|
Python
|
gpl-2.0
| 1,545
| 0.029126
|
try:
from xml.etree import cElementTree as etree
except ImportError:
from xml.etree import ElementTree as etree
import xml2nrn
# module names derived from the namespace. Add new tags in proper namespace
import neuroml
import metadata
import morphml
import biophysics
class FileWrapper:
def __init__(self, source):
self.source = source
self.lineno = 0
def read(self, bytes):
s = self.source.readline()
self.lineno += 1
return s
# for each '{namespace}element' call the corresponding module.func
def handle(x2n, fw, event, node):
tag = node.tag.split('}')
# hopefully a namespace token corresponding to an imported module name
ns = tag[0].spli
|
t('/')[-2]
tag = ns+'.'+tag[1] #namespace.element should correspond to module.func
f = None
try:
if event == 'start':
f = eval(tag)
elif event == 'end':
f = eval(tag + '_end')
except:
pass
if f:
x2n.locator.lineno = fw.lineno
try:
f(x2n, node) # handle the element when it opens
except:
print tag,' failed at ', x2n.locator.getLineNumber()
elif event == 'start':
print 'ignore', node.tag # no function to handle
|
the element
return 0
return 1
def rdxml(fname, ho = None):
f = FileWrapper(open(fname))
x2n = xml2nrn.XML2Nrn()
ig = None
for event, elem in etree.iterparse(f, events=("start", "end")):
if ig != elem:
if handle(x2n, f, event, elem) == 0:
ig = elem
if (ho):
ho.parsed(x2n)
if __name__ == '__main__':
rdxml('temp.xml')
|
gpotter2/scapy
|
scapy/all.py
|
Python
|
gpl-2.0
| 1,320
| 0
|
# This file is part of Scapy
# See http://www.secdev.org/projects/scapy for more information
# Copyright (C) Philippe Biondi <phil@secdev.org>
# This program is published under a GPLv2 license
"""
Aggregate top level objects from all Scapy modules.
"""
from scapy.base_classes import *
from scapy.config import *
from scapy.dadict import *
from scapy.data import *
from scapy.error impor
|
t *
from scapy.themes import *
from scapy.arch import *
from scapy.interfaces import *
from scapy.plist import *
from scapy.fields import *
from scapy.packet import *
from scapy.asn1fields import *
from scapy.asn1packet import *
from scapy.utils import *
from scapy.route import *
from scapy.sendrecv import *
from scapy.sessions import *
from scapy.supersocket import *
from scapy.volatile import
|
*
from scapy.as_resolvers import *
from scapy.automaton import *
from scapy.autorun import *
from scapy.main import *
from scapy.consts import *
from scapy.compat import raw # noqa: F401
from scapy.layers.all import *
from scapy.asn1.asn1 import *
from scapy.asn1.ber import *
from scapy.asn1.mib import *
from scapy.pipetool import *
from scapy.scapypipes import *
if conf.ipv6_enabled: # noqa: F405
from scapy.utils6 import * # noqa: F401
from scapy.route6 import * # noqa: F401
from scapy.ansmachine import *
|
testmana2/test
|
Helpviewer/Bookmarks/DefaultBookmarks_rc.py
|
Python
|
gpl-3.0
| 2,920
| 0.001712
|
# -*- coding: utf-8 -*-
# Resource object code
#
# Created by: The Resource Compiler for PyQt5 (Qt v5.4.1)
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore
qt_resource_data = b"\
\x00\x00\x01\xf1\
\x00\
\x00\x09\x00\x78\x9c\xdd\x96\x51\x6f\x9b\x30\x10\xc7\xdf\xfb\x29\
\x3c\x1e\x9a\x4d\x15\xd0\x49\x7b\x98\x52\x48\x34\x92\x4c\xea\xd4\
\xaa\x54\x69\x55\xf5\xd1\x98\x0b\x71\x01\xdb\x35\x26\x09\xdf\x7e\
\x86\xb0\x96\xa4\x2c\xa4\x1d\x4f\xe3\xc5\xd8\x77\xbe\xdf\x9d\x8d\
\xff\xc6\x19\x6f\xd2\x04\xad\x40\x66\x94\x33\xd7\xf8\x6a\x9d\x1b\
\x08\x18\xe1\x21\x65\x91\x6b\xe4\x6a\x61\x7e\x37\xc6\xa3\x13\xe7\
\xd3\xf4\x66\x72\xf7\xe8\xcf\xd0\x26\x80\x44\xf7\xcb\x66\x77\xda\
\xe8\x04\xe9\xc7\x59\xf0\x24\x04\x89\xaa\x26\x74\x0d\xc6\x6b\x43\
\x65\x54\x54\x25\x30\xf2\x38\x8f\x53\x2c\xe3\x0c\x79\x58\x3a\xf6\
\x76\xf0\xd5\x29\xa8\xcd\x68\x29\x61\xe1\x1a\x4b\xa5\xc4\xd0\xb6\
\x41\x52\x62\xd2\x10\x2c\x51\xa8\x25\x67\xa6\x90\xfc\x09\x88\xca\
\x2c\x2e\x23\xbb\xc1\x68\x70\x66\x7a\x0a\x7a\x80\x00\xcd\xa9\x82\
\xb7\x1c\xfb\x0f\xa8\x93\xbd\x5e\xaf\x2d\x49\x75\xb5\x01\x66\x31\
\xe1\xa9\xc8\x95\x5e\x1e\x4b\xbf\xfd\x85\xec\x17\xb7\xea\x9d\xe4\
\x43\xeb\xd6\x88\xdc\x88\x9b\xbd\x09\xdc\x51\xc2\xb3\xb2\x28\xb7\
\xf7\x53\x6e\x0f\xde\x1e\xbb\x25\xf1\xa3\x98\x21\xac\x20\xe1\x42\
\x7f\x2e\x87\xe9\xd3\x17\xbf\x3e\xf8\x21\x27\x35\xff\x30\x94\x93\
\x3c\x05\xa6\xb0\xd2\xdf\x72\x1f\xdc\x20\xe1\xd1\x31\x60\x4f\xfb\
\xf5\xc1\x5b\x70\x99\xa7\xc7\x00\x7f\x96\x8e\x7d\x10\x45\x82\x19\
\xa8\x4e\xa4\x5f\xb9\xa1\x5b\xd5\x07\xf3\x59\x11\xbd\x49\x12\xda\
\x0e\xfc\x6e\x99\x93\xca\xaf\x1f\xa6\x89\x85\x68\xd5\x98\x1d\x
|
a4\
\xf9\xa3\xf6\x3a\x1a\xea\xd8\xdb\x03\xff\x7e\x05\xf0\x2b\xfd\xfb\
\xb8\x0a\x6c\xf5\xb3\xa3\xa4\x1a\x72\x85\x59\x94\xe3\x08\x4a\x5a\
|
\xd6\x93\x2a\x88\x42\xd0\x66\x12\x65\xbf\x33\x11\x1f\x93\xb8\xcc\
\xe3\x92\x85\xb0\x19\x22\xbf\xf0\x2f\x3f\xb8\xd4\x7b\xbd\xbd\x45\
\x2f\x20\x3b\x74\x5f\x5d\x03\xcb\xff\xdb\x0b\xeb\xdb\xbf\xa1\x9f\
\xf0\x0a\x67\x44\x52\xa1\x86\x09\x27\x95\x98\x5a\x95\x65\x90\x62\
\x9a\x28\x3e\x1c\xcf\xef\xbd\x5f\xb3\xc9\x9d\x3b\x40\x67\x28\xac\
\x45\xd7\xaa\x48\x7a\x60\x70\x8a\x53\x71\xe1\xdd\x4c\x1f\x2b\x3b\
\x64\x04\x0b\xf8\xbc\x13\xe9\xcb\x45\x7b\xf2\x73\x60\x21\xba\xa2\
\x2c\xee\xcc\xfb\x75\xf3\x1d\x7b\xfb\x23\xf3\x1b\xc5\xa5\x8d\x58\
\
"
qt_resource_name = b"\
\x00\x15\
\x0c\xd3\x2e\x3c\
\x00\x44\
\x00\x65\x00\x66\x00\x61\x00\x75\x00\x6c\x00\x74\x00\x42\x00\x6f\x00\x6f\x00\x6b\x00\x6d\x00\x61\x00\x72\x00\x6b\x00\x73\x00\x2e\
\x00\x78\x00\x62\x00\x65\x00\x6c\
"
qt_resource_struct = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x01\x00\x00\x00\x01\x00\x00\x00\x00\
"
def qInitResources():
QtCore.qRegisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources()
|
heddle317/moto
|
moto/elb/models.py
|
Python
|
apache-2.0
| 15,471
| 0.001357
|
from __future__ import unicode_literals
import datetime
from boto.ec2.elb.attributes import (
LbAttributes,
ConnectionSettingAttribute,
ConnectionDrainingAttribute,
AccessLogAttribute,
CrossZoneLoadBalancingAttribute,
)
from boto.ec2.elb.policies import (
Policies,
OtherPolicy,
)
from moto.core import BaseBackend, BaseModel
from moto.ec2.models import ec2_backends
from .exceptions import (
LoadBalancerNotFoundError,
TooManyTagsError,
BadHealthCheckDefinition,
DuplicateLoadBalancerName,
)
class FakeHealthCheck(BaseModel):
def __init__(self, timeout, healthy_threshold, unhealthy_threshold,
interval, target):
self.timeout = timeout
self.healthy_threshold = healthy_threshold
self.unhealthy_threshold = unhealthy_threshold
self.interval = interval
self.target = target
if not target.startswith(('HTTP', 'TCP', 'HTTPS', 'SSL')):
raise BadHealthCheckDefinition
class FakeListener(BaseModel):
def __init__(self, load_balancer_port, instance_port, protocol, ssl_certificate_id):
self.load_balancer_port = load_balancer_port
self.instance_port = instance_port
self.protocol = protocol.upper()
self.ssl_certificate_id = ssl_certificate_id
self.policy_names = []
def __repr__(self):
return "FakeListener(lbp: %s, inp: %s, pro: %s, cid: %s, policies: %s)" % (self.load_balancer_port, self.instance_port, self.protocol, self.ssl_certificate_id, self.policy_names)
class FakeBackend(BaseModel):
def __init__(self, instance_port):
self.instance_port = instance_port
self.policy_names = []
def __repr__(self):
return "FakeBackend(inp: %s, policies: %s)" % (self.instance_port, self.policy_names)
class FakeLoadBalancer(BaseModel):
def __init__(self, name, zones, ports, scheme='internet-facing', vpc_id=None, subnets=None):
self.name = name
self.health_check = None
self.instance_ids = []
self.zones = zones
self.listeners = []
self.backends = []
self.created_time = datetime.datetime.now()
self.scheme = scheme
self.attributes = FakeLoadBalancer.get_default_attributes()
self.policies = Policies()
self.policies.other_policies = []
self.policies.app_cookie_stickiness_policies = []
self.policies.lb_cookie_stickiness_policies = []
self.subnets = subnets or []
self.vpc_id = vpc_id or 'vpc-56e10e3d'
self.tags = {}
self.dns_name = "%s.us-east-1.elb.amazonaws.com" % (name)
for port in ports:
listener = FakeListener(
protocol=(port.get('protocol') or port['Protocol']),
load_balancer_port=(
port.get('load_balancer_port') or port['LoadBalancerPort']),
|
instance_port=(
port.get('instance_port') or port['InstancePort']),
ssl_certificate_id=port.get(
'ssl_certificate_id', port.get('SSLCertificateId')),
)
self.listeners.append(listener)
# it is unclear per the AWS documentation as to when or how backend
# information gets set, so let's guess
|
and set it here *shrug*
backend = FakeBackend(
instance_port=(
port.get('instance_port') or port['InstancePort']),
)
self.backends.append(backend)
@classmethod
def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
properties = cloudformation_json['Properties']
elb_backend = elb_backends[region_name]
new_elb = elb_backend.create_load_balancer(
name=properties.get('LoadBalancerName', resource_name),
zones=properties.get('AvailabilityZones', []),
ports=properties['Listeners'],
scheme=properties.get('Scheme', 'internet-facing'),
)
instance_ids = properties.get('Instances', [])
for instance_id in instance_ids:
elb_backend.register_instances(new_elb.name, [instance_id])
policies = properties.get('Policies', [])
port_policies = {}
for policy in policies:
policy_name = policy["PolicyName"]
other_policy = OtherPolicy()
other_policy.policy_name = policy_name
elb_backend.create_lb_other_policy(new_elb.name, other_policy)
for port in policy.get("InstancePorts", []):
policies_for_port = port_policies.get(port, set())
policies_for_port.add(policy_name)
port_policies[port] = policies_for_port
for port, policies in port_policies.items():
elb_backend.set_load_balancer_policies_of_backend_server(
new_elb.name, port, list(policies))
health_check = properties.get('HealthCheck')
if health_check:
elb_backend.configure_health_check(
load_balancer_name=new_elb.name,
timeout=health_check['Timeout'],
healthy_threshold=health_check['HealthyThreshold'],
unhealthy_threshold=health_check['UnhealthyThreshold'],
interval=health_check['Interval'],
target=health_check['Target'],
)
return new_elb
@classmethod
def update_from_cloudformation_json(cls, original_resource, new_resource_name, cloudformation_json, region_name):
cls.delete_from_cloudformation_json(
original_resource.name, cloudformation_json, region_name)
return cls.create_from_cloudformation_json(new_resource_name, cloudformation_json, region_name)
@classmethod
def delete_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
elb_backend = elb_backends[region_name]
try:
elb_backend.delete_load_balancer(resource_name)
except KeyError:
pass
@property
def physical_resource_id(self):
return self.name
def get_cfn_attribute(self, attribute_name):
from moto.cloudformation.exceptions import UnformattedGetAttTemplateException
if attribute_name == 'CanonicalHostedZoneName':
raise NotImplementedError(
'"Fn::GetAtt" : [ "{0}" , "CanonicalHostedZoneName" ]"')
elif attribute_name == 'CanonicalHostedZoneNameID':
raise NotImplementedError(
'"Fn::GetAtt" : [ "{0}" , "CanonicalHostedZoneNameID" ]"')
elif attribute_name == 'DNSName':
return self.dns_name
elif attribute_name == 'SourceSecurityGroup.GroupName':
raise NotImplementedError(
'"Fn::GetAtt" : [ "{0}" , "SourceSecurityGroup.GroupName" ]"')
elif attribute_name == 'SourceSecurityGroup.OwnerAlias':
raise NotImplementedError(
'"Fn::GetAtt" : [ "{0}" , "SourceSecurityGroup.OwnerAlias" ]"')
raise UnformattedGetAttTemplateException()
@classmethod
def get_default_attributes(cls):
attributes = LbAttributes()
cross_zone_load_balancing = CrossZoneLoadBalancingAttribute()
cross_zone_load_balancing.enabled = False
attributes.cross_zone_load_balancing = cross_zone_load_balancing
connection_draining = ConnectionDrainingAttribute()
connection_draining.enabled = False
attributes.connection_draining = connection_draining
access_log = AccessLogAttribute()
access_log.enabled = False
attributes.access_log = access_log
connection_settings = ConnectionSettingAttribute()
connection_settings.idle_timeout = 60
attributes.connecting_settings = connection_settings
return attributes
def add_tag(self, key, value):
if len(self.tags) >= 10 and key not in self.tags:
raise TooManyTagsError()
self.tags[key] = value
def list_tags(self):
return self.tags
def remove_tag(self, key):
if key in self.tags:
del self.tags[key]
|
carpedm20/fbchat
|
tests/threads/test_group.py
|
Python
|
bsd-3-clause
| 1,493
| 0
|
from fbchat import GroupData, User
def test_group_from_graphql(session):
data = {
"name": "Group ABC",
"thread_key": {"thread_fbid": "11223344"},
"image": None,
"is_group_thread": True,
"all_participants": {
"nodes": [
{"messaging_actor": {"__typename": "User", "id": "1234"}},
{"messaging_actor": {"__typename": "User", "id": "2345"}},
{"messaging_actor": {"__typename": "User", "id": "3456"}},
]
},
"customization_info": {
"participant_customizations": [],
"outgoing_bubble_color": None,
"emoji": "😀",
},
"thread_admins":
|
[{"id": "1234"}],
"group_approval_queue": {"nodes": []},
"approval_mode": 0,
"joinable_mode": {"mode": "0", "link": ""},
"event_reminders": {"nodes": []},
}
assert GroupData(
session=session,
id="11223344",
photo=None,
name="Group ABC",
last_active=None,
message_count=None,
plan=None,
participants=[
User(session=session, id="1234"),
User(session=sessio
|
n, id="2345"),
User(session=session, id="3456"),
],
nicknames={},
color="#0084ff",
emoji="😀",
admins={"1234"},
approval_mode=False,
approval_requests=set(),
join_link="",
) == GroupData._from_graphql(session, data)
|
switch-education/pxt
|
tests/pydecompile-test/baselines/enum_user_defined_bit_mask_bad_sequence.py
|
Python
|
mit
| 148
| 0.027027
|
/// <ref
|
erence path="./testBlocks/enums.ts" />
enum EnumOfFlags {
W
|
= 1,
X = 1 << 1,
Z = 1 << 3
}
let userDefinedTest7 = EnumOfFlags.W
|
psy0rz/zfs_autobackup
|
zfs_autobackup/TreeHasher.py
|
Python
|
gpl-3.0
| 2,011
| 0.015415
|
import itertools
import os
class TreeHasher():
"""uses BlockHasher recursively on a directory tree
Input and output generators are in the format: ( relative-filepath, chunk_nr, hexdigest)
"""
def __init__(self, block_hasher):
"""
:type block_hasher: BlockHasher
"""
self.block_hasher=block_hasher
def generate(self, start_path):
"""Use BlockHasher on every file in a tree, yielding the results
note that it only checks the contents of actual files. It ignores metadata like permissions and mtimes.
It also ignores empty directories, symlinks and special files.
"""
def walkerror(e):
raise e
for (dirpath, dirnames, filenames) in os.walk(start_path, onerror=walkerror):
for f in filenames:
file_path=os.path.join(dirpath, f)
if (not os.path.islink(file_path)) and os.path.isfile(file_path):
for (chunk_nr, hash) in self.block_has
|
her.generate(file_path):
yield ( os.path.relpath(file_path,start_path), chunk_nr, hash )
def compare(self, start_path, g
|
enerator):
"""reads from generator and compares blocks
yields mismatches in the form: ( relative_filename, chunk_nr, compare_hexdigest, actual_hexdigest )
yields errors in the form: ( relative_filename, chunk_nr, compare_hexdigest, "message" )
"""
count=0
def filter_file_name( file_name, chunk_nr, hexdigest):
return ( chunk_nr, hexdigest )
for file_name, group_generator in itertools.groupby(generator, lambda x: x[0]):
count=count+1
block_generator=itertools.starmap(filter_file_name, group_generator)
for ( chunk_nr, compare_hexdigest, actual_hexdigest) in self.block_hasher.compare(os.path.join(start_path,file_name), block_generator):
yield ( file_name, chunk_nr, compare_hexdigest, actual_hexdigest )
|
cadrian/microcosmos
|
src/net/cadrian/microcosmos/model/bugs/__init__.py
|
Python
|
gpl-3.0
| 1,120
| 0.000893
|
# Microcosmos: an antsy game
# Copyright (C) 2010 Cyril ADRIAN <cyril.adrian@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 exclusively.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this pro
|
gram. If not, see <http://www.gnu.org/licenses/>.
#
"""
The Bugs model package provides bugs and their specific behaviour.
"""
from net.cadrian.microcosmos.model.bugs.antFemales import AntFemale, Target as AntFemaleTarget
from net.cadrian.microcosmos.model.bugs.antQueens import AntQueen
from net.cadrian.microcosmos.model.bugs.antSoldiers import AntSoldier
from net.cadrian.microcosmos.model.bugs
|
.antWorkers import AntWorker
from net.cadrian.microcosmos.model.bugs.lice import Louse
|
ProgrammaBol/wiggler
|
wiggler/ui/resources.py
|
Python
|
gpl-3.0
| 10,546
| 0
|
import wx
import wiggler.ui.dialogs as dialogs
class ResourceManager(wx.Control):
def __init__(self, parent, resources, events):
wx.Control.__init__(self, parent)
self.parent = parent
self.events = events
self.resources = resources
self.events.subscribe(self, ['add_costume', 'del_costume',
'add_character', 'del_character',
'add_sheet', 'del_sheet',
'add_image', 'del_image',
'add_sprite', 'del_sprite',
'add_animation', 'del_animation',
'change_background'])
self.Bind(self.events.EVT_NOTICE, self.notice_handler)
def notice_handler(self, event):
if event.notice == 'change_background':
self.change_background()
elif event.notice == 'add_costume':
self.add_costume()
elif event.notice == 'del_costume':
self.del_costume()
elif event.notice ==
|
'add_sheet':
self.add_sheet()
elif event.notice == 'del_sheet':
self.del_sheet()
elif event.notice == 'add_image':
pass
elif event.notice == 'del_image':
pass
elif event.notice == 'add_character':
self.add_cha
|
racter()
elif event.notice == 'del_character':
self.del_character()
elif event.notice == 'add_animation':
pass
elif event.notice == 'del_animation':
pass
elif event.notice == 'add_sprite':
self.add_sprite()
elif event.notice == 'del_sprite':
self.del_sprite()
event.Skip()
def change_background(self):
dlg = dialogs.ChangeBackgroundDialog(self.parent)
res = dlg.ShowModal()
if res == wx.ID_OK:
back_type = dlg.back_type.GetValue()
back_spec = dlg.back_spec.GetValue()
self.resources.change_default_background(back_type, back_spec)
dlg.Destroy()
def add_sheet(self):
# definition_fields = Factory_sheet.definition_fields
# dialog with definition fields, source file with browse button
# resource with same name , overwrite ?
filename = dialogs.open_sheet(self.parent)
if filename is not None:
dia = dialogs.AddSheetDialog(None, -1, "Insert sheet details",
self.resources)
result = dia.ShowModal()
if result == wx.ID_OK:
self.settings = dia.GetSettings()
try:
self.resources.add_resource(
'sheets', self.settings['name'],
{'colorkey': self.settings['colorkey'],
'abs_path': filename})
except ValueError as e:
wx.MessageBox(str(e), "Error",
wx.OK | wx.ICON_INFORMATION)
dia.Destroy()
return True
def del_sheet(self):
# LISTCTR with very large icons ?
# use resources.find_deps
# print self.resources.find_deps('sheets', 'master')
# name = 'testsheet'
# self.resources.remove_resource('sheets', name)
# and everything associated to IT!!!
dia = dialogs.DelSheetDialog(None, -1, "Delete sheet",
self.resources)
result = dia.ShowModal()
if result == wx.ID_OK:
self.settings = dia.GetSettings()
for x in self.resources.find_deps('sheets',
self.settings['sheet']):
for elem in x:
try:
self.resources.remove_resource(elem[0], elem[1])
except Exception as e:
wx.MessageBox(str(e), "Error", wx.OK |
wx.ICON_INFORMATION)
try:
self.resources.remove_resource('sheets',
self.settings['sheet'])
except Exception as e:
wx.MessageBox(str(e), "Error", wx.OK | wx.ICON_INFORMATION)
dia.Destroy()
return True
def add_costume(self):
# dialog with definitions and a area selection on the sheet
dia = dialogs.AddCostumeDialog(None, -1, "Add a new costume",
self.resources)
result = dia.ShowModal()
if result == wx.ID_OK:
self.settings = dia.GetSettings()
# print self.settings['name'], self.settings['rect'], \
# self.settings['sheet']
try:
self.resources.add_resource(
'costumes', self.settings['name'],
{'name': self.settings['name'],
'sheet': self.settings['sheet'],
'rect': self.settings['rect']})
except ValueError as e:
wx.MessageBox(str(e), "Error",
wx.OK | wx.ICON_INFORMATION)
dia.Destroy()
return True
def del_costume(self):
# LISTCTRL with large icons
dia = dialogs.DelCostumeDialog(None, -1, "Delete costume",
self.resources)
result = dia.ShowModal()
if result == wx.ID_OK:
self.settings = dia.GetSettings()
for x in self.resources.find_deps('costumes',
self.settings['costume']):
for elem in x:
try:
self.resources.remove_resource(elem[0], elem[1])
except Exception as e:
wx.MessageBox(str(e), "Error", wx.OK |
wx.ICON_INFORMATION)
try:
self.resources.remove_resource('costumes',
self.settings['costume'])
except Exception as e:
wx.MessageBox(str(e), "Error", wx.OK | wx.ICON_INFORMATION)
dia.Destroy()
return True
def add_sprite(self):
# dialog with definition, select from existing costumes,
# animations, sounds...
# or add empty
dia = dialogs.AddSpriteDialog(None, -1, "Add a new sprite",
self.resources)
result = dia.ShowModal()
if result == wx.ID_OK:
self.settings = dia.GetSettings()
try:
self.resources.add_resource('sprites', self.settings['name'],
{'name': self.settings['name'],
'base_class': self.settings
['base_class'],
'costumes': self.settings
['costumes'],
'animations': [],
'sounds': [],
'self_sufficiency': 0,
'user_code': {'__init__': ''}})
except ValueError as e:
wx.MessageBox(str(e), "Error",
wx.OK | wx.ICON_INFORMATION)
dia.Destroy()
return True
def del_sprite(self):
# LISTCTRK with name + sprite definition
dia = dialogs.DelSpriteDialog(None, -1, "Delete a sprite",
self.resources)
result = dia.ShowModal()
if result == wx.ID_OK:
self.settings = dia.GetSettings()
for x in self.resources.find_deps('sprites',
self.settings['sprite']):
for elem in x:
try:
self.resources.remove_resource(elem[0], elem[1])
except Exception as e:
wx.Messag
|
nevil-brownlee/pypy-libtrace
|
test/pypy-test-cases/test-bpf-filter.py
|
Python
|
gpl-3.0
| 780
| 0.007692
|
#!/usr/bin/env python
# Thu, 13 Mar 14 (PDT)
# bpf-filter.rb: Create a packet filter,
# use it to print udp records from a trace
# Copyright (C) 2015, Nevil Brownlee, U Auckland | WAND
from plt_testing import *
t = get_example_trace('anon-v4.pcap')
filter = plt
|
.filter('udp port 53') # Only want DNS packets
t.conf_filter(filter)
t.conf_snaplen(500)
#t.conf_promisc(True)
# Remember: on a live interface, must sudo to capture
# on a trace file, can't set promicuous
nfp = 0; offset = 12
for pkt in t:
nfp += 1
udp = pkt.udp
test_println("%4d:" % (nfp), get_tag())
print_udp(pkt.udp, offset, get_tag("nfp:"+str(nfp)))
test_println('')
if n
|
fp == 4:
break
test_println("%d filtered packets" % nfp, get_tag())
|
runt18/nupic
|
src/nupic/algorithms/CLAClassifier.py
|
Python
|
agpl-3.0
| 25,671
| 0.006544
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""This file implements the CLAClassifier."""
import array
from collections import deque
import itertools
import numpy
# This determines how large one of the duty cycles must get before each of the
# duty cycles are updated to the current iteration.
# This must be less than float32 size since storage is float32 size
DUTY_CYCLE_UPDATE_INTERVAL = numpy.finfo(numpy.float32).max / (2 ** 20)
g_debugPrefix = "CLAClassifier"
def _pFormatArray(array_, fmt="%.2f"):
"""Return a string with pretty-print of a numpy array using the given format
for each element"""
return "[ " + " ".join(fmt % x for x in array_) + " ]"
class BitHistory(object):
"""Class to store an activationPattern bit history."""
__slots__ = ("_classifier", "_id", "_stats", "_lastTotalUpdate",
"_learnIteration", "_version")
__VERSION__ = 2
def __init__(self, classifier, bitNum, nSteps):
"""Constructor for bit history.
Parameters:
---------------------------------------------------------------------
classifier: instance of the CLAClassifier that owns us
bitNum: activation pattern bit number this history is for,
used only for debug messages
nSteps: number of steps of prediction this history is for, used
only for debug messages
"""
# Store reference to the classifier
self._classifier = classifier
# Form our "id"
self._id = "{0:d}[{1:d}]".format(bitNum, nSteps)
# Dictionary of bucket entries. The key is the bucket index, the
# value is the dutyCycle, which is the rolling average of the duty cycle
self._stats = array.array("f")
# lastUpdate is the iteration number of the last time it was updated.
self._lastTotalUpdate = None
# The bit's learning iteration. This is updated each time store() gets
# called on this bit.
self._learnIteration = 0
# Set the version to the latest version.
# This is used for serialization/deserialization
self._version = BitHistory.__VERSION__
def store(self, iteration, bucketIdx):
"""Store a new item in our history.
This gets called for a bit whenever it is active and learning is enabled
Parameters:
--------------------------------------------------------------------
iteration: the learning iteration number, which is only incremented
when learning is enabled
bucketIdx: the bucket index to store
Save duty cycle by normalizing it to the same iteration as
the rest of the duty cycles which is lastTotalUpdate.
This is done to speed up computation in inference since all of the duty
cycles can now be scaled by a single number.
The duty cycle is brought up to the current iteration only at inference and
only when one of the duty cycles gets too large (to avoid overflow to
larger data type) since the ratios between the duty cycles are what is
important. As long as all of the duty cycles are at the same iteration
their ratio is the same as it would be for any other iteration, because the
update is simply a multiplication by a scalar that depends on the number of
steps between the last update of the duty cycle and the current iteration.
"""
# If lastTotalUpdate has not been set, set it to the current iteration.
if self._lastTotalUpdate is None:
self._lastTotalUpdate = iteration
# Get the duty cycle stored for this bucket.
statsLen = len(self._stats) - 1
if bucketIdx > statsLen:
self._stats.extend(itertools.repeat(0.0, bucketIdx - statsLen))
# Update it now.
# duty cycle n steps ago is dc{-n}
# duty cycle for current iteration is (1-alpha)*dc{-n}*(1-alpha)**(n)+alpha
dc = self._stats[bucketIdx]
# To get the duty cycle from n iterations ago that when updated to the
# current iteration would equal the dc of the current iteration we simply
# divide the duty cycle by (1-alpha)**(n). This results in the formula
# dc'{-n} = dc{-n} + alpha/(1-alpha)**n where the apostrophe symbol is used
# to denote that this is the new duty cycle at that iteration. This is
# equivalent to the duty cycle dc{-n}
denom = ((1.0 - self._classifier.alpha) **
(iteration - self._lastTotalUpdate))
if denom > 0:
dcNew = dc + (self._classifier.alpha / denom)
# This is to prevent errors associated with inf rescale if too large
if denom == 0 or dcNew > DUTY_CYCLE_UPDATE_INTERVAL:
exp = ((1.0 - self._classifier.alpha) **
(iteration - self._lastTotalUpdate))
for (bucketIdxT, dcT) in enumerate(self._stats):
dcT *= exp
self._stats[bucketIdxT] = dcT
# Reset time since last update
self._lastTotalUpdate = iteration
# Add alpha since now exponent is 0
dc = self._stats[bucketIdx] + self._classifier.alpha
else:
dc = dcNew
self._stats[bucketIdx] = dc
if self._classifier.verbosity >= 2:
print "updated DC for {0!s}, bucket {1:d} to {2:f}".format(self._id, bucketIdx, dc)
def infer(self, votes):
"""Look up and return the votes for each bucketIdx for this bit.
Parameters:
--------------------------------------------------------------------
votes: a numpy array, initialized to all 0's, that should be filled
in with the votes for each bucket. The vote for bucket index N
should go into votes[N].
"""
# Place the duty cycle into the votes and update the running total for
# normalization
total = 0
for (bucketIdx, dc) in enumerate(self._stats):
# Not updating to current iteration since we are normalizing anyway
if dc > 0.0:
votes[bucketIdx] = dc
total += dc
# Experiment... try normalizing the votes from each bit
if total > 0:
votes /= total
if
|
self._classifier.verbosity >= 2:
print "bucket votes for {0!s}:".format((self._id)), _pFormatArray(votes)
def __getstate__(self):
return dict((elem, getattr(self, elem)) for elem in self.__slots__)
def __setstate__(self, state):
version = 0
if "_version" in state:
version = state["_version"]
# Migrate from version 0 to version 1
if version == 0:
stats = state.pop("_stats")
assert isinstance(stats, dict)
maxBucket = max(stats.iterkeys())
|
self._stats = array.array("f", itertools.repeat(0.0, maxBucket + 1))
for (index, value) in stats.iteritems():
self._stats[index] = value
elif version == 1:
state.pop("_updateDutyCycles", None)
elif version == 2:
pass
else:
raise Exception("Error while deserializing {0!s}: Invalid version {1!s}".format(self.__class__, version))
for (attr, value) in state.iteritems():
setattr(self, attr, value)
self._version = BitHistory.__VERSION__
def write(self, proto):
proto.id = self._id
statsProto = proto.init("stats", len(self._stats))
for (bucketIdx, dutyCycle) in enumerate(self._stats):
statsProto[bucketIdx].index = bucketIdx
statsProto[bucketIdx].dutyCycle = dutyCycle
proto.lastTotalUpdate = self._lastTotalUpdate
proto.le
|
charlesccychen/beam
|
sdks/python/apache_beam/transforms/window.py
|
Python
|
apache-2.0
| 18,027
| 0.007544
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Windowing concepts.
A WindowInto transform logically divides up or groups the elements of a
PCollection into finite windows according to a windowing function (derived from
WindowFn).
The output of WindowInto contains the same elements as input, but they have been
logically assigned to windows. The next GroupByKey(s) transforms, including one
within a composite transform, will group by the combination of keys and windows.
Windowing a PCollection allows chunks of it to be processed individually, before
the entire PCollection is available. This is especially important for
PCollection(s) with unbounded size, since the full PCollection is never
available at once, since more data is continually arriving. For PCollection(s)
with a bounded size (aka. conventional batch mode), by default, all data is
implicitly in a single window (see GlobalWindows), unless WindowInto is
applied.
For example, a simple form of windowing divides up the data into fixed-width
time intervals, using FixedWindows.
Seconds are used as the time unit for the built-in windowing primitives here.
Integer or floating point seconds can be passed to these primitives.
Internally, seconds, with microsecond granularity, are stored as
timeutil.Timestamp and timeutil.Duration objects. This is done to avoid
precision errors that would occur with floating point representations.
Custom windowing function classes can be created, by subclassing from
WindowFn.
"""
from __future__ import absolute_import
import abc
from builtins import object
from builtins import range
from functools import total_ordering
from future.utils import with_metaclass
from google.protobuf import duration_pb2
from google.protobuf import timestamp_pb2
from apache_beam.coders import coders
from apache_beam.portability import common_urns
from apache_beam.portability import python_urns
from apache_beam.portability.api import beam_runner_api_pb2
from apache_beam.portability.api import standard_window_fns_pb2
from apache_beam.transforms import timeutil
from apache_beam.utils import proto_utils
from apache_beam.utils import urns
from apache_beam.utils.timestamp import MIN_TIMESTAMP
from apache_beam.utils.timestamp import Duration
from apache_beam.utils.timestamp import Timestamp
from apache_beam.utils.windowed_value import WindowedValue
__all__ = [
'TimestampCombiner',
'WindowFn',
'BoundedWindow',
'IntervalWindow',
'TimestampedValue',
'GlobalWindow',
'NonMergingWindowFn',
'GlobalWindows',
'FixedWindows',
'SlidingWindows',
'Sessions',
]
# TODO(ccy): revisit naming and semantics once Java Apache Beam finalizes their
# behavior.
class TimestampCombiner(object):
"""Determines how output timestamps of grouping operations are assigned."""
OUTPUT_AT_EOW = beam_runner_api_pb2.OutputTime.END_OF_WINDOW
OUTPUT_AT_EARLIEST = beam_runner_api_pb2.OutputTime.EARLIEST_IN_PANE
OUTPUT_AT_LATEST = beam_runner_api_pb2.OutputTime.LATEST_IN_PANE
# TODO(robertwb): Add this to the runner API or remove it.
OUTPUT_AT_EARLIEST_TRANSFORMED = 'OUTPUT_AT_EARLIEST_TRANSFORMED'
@staticmethod
def get_impl(timestamp_combiner, window_fn):
if timestamp_combiner == TimestampCombiner.OUTPUT_AT_EOW:
return timeutil.OutputAtEndOfWindowImpl()
elif timestamp_combiner == TimestampCombiner.OUTPUT_AT_EARLIEST:
return timeutil.OutputAtEarliestInputTimestampImpl()
elif timestamp_combiner == TimestampCombiner.OUTPUT_AT_LATEST:
return timeutil.OutputAtLatestInputTimestampImpl()
elif timestamp_combiner == TimestampCombiner.OUTPUT_AT_EARLIEST_TRANSFORMED:
return timeutil.OutputAtEarliestTransformedInputTimestampImpl(window_fn)
else:
raise ValueError('Invalid TimestampCombiner: %s.' % timestamp_combiner)
class WindowFn(with_metaclass(abc.ABCMeta, urns.RunnerApiFn)):
"""An abstract windowing function defining a basic assign and merge."""
class AssignContext(object):
"""Context passed to WindowFn.assign()."""
def __init__(self, timestamp, element=None, window=None):
self.timestamp = Timestamp.of(timestamp)
self.element = element
self.window = window
@abc.abstractmethod
def assign(self, assign_context):
"""Associates windows to an element.
Arguments:
assign_context: Instance of AssignContext.
Returns:
An iterable of BoundedWindow.
"""
raise NotImplementedError
class MergeContext(object):
"""Context passed to WindowFn.merge() to perform merging, if any."""
def __init__(self, windows):
self.windows = list(windows)
def merge(self, to_be_merged, merge_result):
raise NotImplementedError
@abc.abstractmethod
def merge(self, merge_context):
"""Returns a window that is the result of merging a set of windows."""
raise NotImplementedError
def is_merging(self):
"""Returns whether th
|
is WindowFn merges windows."""
return True
@abc.abstractmethod
def get_window_coder(self):
raise NotImplementedError
def get_transformed_output_time(self, window, input_timestamp): # pylint: disable=unused-argument
"""Given input time and output window, returns output time for window.
If TimestampC
|
ombiner.OUTPUT_AT_EARLIEST_TRANSFORMED is used in the
Windowing, the output timestamp for the given window will be the earliest
of the timestamps returned by get_transformed_output_time() for elements
of the window.
Arguments:
window: Output window of element.
input_timestamp: Input timestamp of element as a timeutil.Timestamp
object.
Returns:
Transformed timestamp.
"""
# By default, just return the input timestamp.
return input_timestamp
urns.RunnerApiFn.register_pickle_urn(python_urns.PICKLED_WINDOWFN)
class BoundedWindow(object):
"""A window for timestamps in range (-infinity, end).
Attributes:
end: End of window.
"""
def __init__(self, end):
self.end = Timestamp.of(end)
def max_timestamp(self):
return self.end.predecessor()
def __eq__(self, other):
raise NotImplementedError
def __ne__(self, other):
# Order first by endpoint, then arbitrarily
return self.end != other.end or hash(self) != hash(other)
def __lt__(self, other):
if self.end != other.end:
return self.end < other.end
return hash(self) < hash(other)
def __le__(self, other):
if self.end != other.end:
return self.end <= other.end
return hash(self) <= hash(other)
def __gt__(self, other):
if self.end != other.end:
return self.end > other.end
return hash(self) > hash(other)
def __ge__(self, other):
if self.end != other.end:
return self.end >= other.end
return hash(self) >= hash(other)
def __hash__(self):
raise NotImplementedError
def __repr__(self):
return '[?, %s)' % float(self.end)
class IntervalWindow(BoundedWindow):
"""A window for timestamps in range [start, end).
Attributes:
start: Start of window as seconds since Unix epoch.
end: End of window as seconds since Unix epoch.
"""
def __init__(self, start, end):
super(IntervalWindow, self).__init__(end)
self.start = Timestamp.of(start)
def __hash__(self):
return hash((self.start, self.end))
def __eq__(self, other):
return (self.start == other.start
and self.end == other.end
and type(self) == type(o
|
rparrapy/sugar
|
extensions/cpsection/updater/view.py
|
Python
|
gpl-2.0
| 16,181
| 0
|
# Copyright (C) 2008, One Laptop Per Child
# Copyright (C) 2009, Tomeu Vizoso
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
from gettext import gettext as _
from gettext import ngettext
import locale
import logging
from gi.repository import GObject
from gi.repository import Gtk
from sugar3.graphics import style
from sugar3.graphics.icon import Icon, CellRendererIcon
from jarabe.controlpanel.sectionview import SectionView
from jarabe.model.update import updater
from jarabe.model import bundleregistry
_DEBUG_VIEW_ALL = True
class ActivityUpdater(SectionView):
def __init__(self, model, alerts):
SectionView.__init__(self)
self._model = updater.get_instance()
self._id_progresss = self._model.connect('progress',
self.__progress_cb)
self._id_updates = self._model.connect('updates-available',
self.__updates_available_cb)
self._id_error = self._model.connect('error',
self.__error_cb)
self._id_finished = self._model.connect('finished',
self.__finished_cb)
self.set_spacing(style.DEFAULT_SPACING)
self.set_border_width(style.DEFAULT_SPACING * 2)
self._top_label = Gtk.Label()
self._top_label.set_line_wrap(True)
self._top_label.set_justify(Gtk.Justification.LEFT)
self._top_label.props.xalign = 0
self.pack_start(self._top_label, False, True, 0)
self._top_label.show()
separator = Gtk.HSeparator()
self.pack_start(separator, False, True, 0)
separator.show()
self._bottom_label = Gtk.Label()
self._bottom_label.set_line_wrap(True)
self._bottom_label.set_justify(Gtk.Justification.LEFT)
self._bottom_label.props.xalign = 0
self._bottom_label.set_markup(
_('Software updates correct errors, eliminate security '
'vulnerabilities, and provide new features.'))
self.pack_start(self._bottom_label, False, True, 0)
self._bottom_label.show()
self._update_box = None
self._progress_pane = None
state = self._model.get_state()
if state in (updater.STATE_IDLE, updater.STATE_CHECKED):
self._refresh()
elif state in (updater.STATE_CHECKING, updater.STATE_DOWNLOADING,
updater.STATE_UPDATING):
self._switch_to_progress_pane()
self._progress_pane.set_message(_('Update in progress...'))
self.connect('destroy', self.__destroy_cb)
def __destroy_cb(self, widget):
self._model.disconnect(self._id_progresss)
self._model.disconnect(self._id_updates)
self._model.disconnect(self._id_error)
self._model.disconnect(self._id_finished)
self._model.clean()
def _switch_to_update_box(self, updates):
if self._update_box in self.get_children():
return
if self._pr
|
ogress_pane in self.get_children():
self.remove(self._progress_pane)
self._progress_pane = None
if self._update_box is None:
self._update_box = UpdateBox(updates)
self._update_box.refresh_button.connect(
'clicked',
self.__refresh_button_clicked_cb)
self._update_box.install_button.connect(
|
'clicked',
self.__install_button_clicked_cb)
self.pack_start(self._update_box, expand=True, fill=True, padding=0)
self._update_box.show()
def _switch_to_progress_pane(self):
if self._progress_pane in self.get_children():
return
if self._model.get_state() == updater.STATE_CHECKING:
top_message = _('Checking for updates...')
else:
top_message = _('Installing updates...')
self._top_label.set_markup('<big>%s</big>' % top_message)
if self._update_box in self.get_children():
self.remove(self._update_box)
self._update_box = None
if self._progress_pane is None:
self._progress_pane = ProgressPane()
self._progress_pane.cancel_button.connect(
'clicked',
self.__cancel_button_clicked_cb)
self.pack_start(
self._progress_pane, expand=True, fill=False, padding=0)
self._progress_pane.show()
def _clear_center(self):
if self._progress_pane in self.get_children():
self.remove(self._progress_pane)
self._progress_pane = None
if self._update_box in self.get_children():
self.remove(self._update_box)
self._update_box = None
def __progress_cb(self, model, state, bundle_name, progress):
if state == updater.STATE_CHECKING:
if bundle_name:
message = _('Checking %s...') % bundle_name
else:
message = _('Looking for updates...')
elif state == updater.STATE_DOWNLOADING:
message = _('Downloading %s...') % bundle_name
elif state == updater.STATE_UPDATING:
message = _('Updating %s...') % bundle_name
self._switch_to_progress_pane()
self._progress_pane.set_message(message)
self._progress_pane.set_progress(progress)
def __updates_available_cb(self, model, updates):
logging.debug('ActivityUpdater.__updates_available_cb')
available_updates = len(updates)
if not available_updates:
top_message = _('Your software is up-to-date')
else:
top_message = ngettext('You can install %s update',
'You can install %s updates',
available_updates)
top_message = top_message % available_updates
top_message = GObject.markup_escape_text(top_message)
self._top_label.set_markup('<big>%s</big>' % top_message)
if not available_updates:
self._clear_center()
else:
self._switch_to_update_box(updates)
def __error_cb(self, model, updates):
logging.debug('ActivityUpdater.__error_cb')
top_message = _('Can\'t connect to the activity server')
self._top_label.set_markup('<big>%s</big>' % top_message)
self._bottom_label.set_markup(
_('Verify your connection to internet and try again, '
'or try again later'))
self._clear_center()
def __refresh_button_clicked_cb(self, button):
self._refresh()
def _refresh(self):
self._model.check_updates()
def __install_button_clicked_cb(self, button):
self._model.update(self._update_box.get_bundles_to_update())
def __cancel_button_clicked_cb(self, button):
self._model.cancel()
def __finished_cb(self, model, installed_updates, failed_updates,
cancelled):
num_installed = len(installed_updates)
logging.debug('ActivityUpdater.__finished_cb')
top_message = ngettext('%s update was installed',
'%s updates were installed', num_installed)
top_message = top_message % num_installed
top_message = GObject.markup_escape_text(top_message)
self._top_label.set_markup('<big>%s</big>' % top_message)
self._clear_center()
def undo(self):
self._model.cancel()
class ProgressPane
|
obi-two/Rebelion
|
data/scripts/templates/object/tangible/loot/loot_schematic/shared_death_watch_mandalorian_belt_schematic.py
|
Python
|
mit
| 509
| 0.043222
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE
|
IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/loot/loot_schematic/shared_death_watch_mandalorian_belt_schematic.iff"
result.attribute_template_id = -1
result.stfName("craft_item_ingredients_n","armor_mandalorian_be
|
lt")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
fidencio/sssd
|
src/sbus/codegen/sbus_Introspection.py
|
Python
|
gpl-3.0
| 9,617
| 0
|
#
# Authors:
# Pavel Brezina <pbrezina@redhat.com>
#
# Copyright (C) 2017 Red Hat
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from collections import OrderedDict
import xml.etree.ElementTree as etree
class Introspectable:
class Element(object):
""" This is a basic introspectable object. This class will make
sure that the given xml element is of correct type and provide
some helper functions to simplify work of the children.
Children objects must implement TagName attribute, which contains
the name of the expected xml tag.
All introspectable objects contain the following properties:
- name : str -- name of the object
- annotations : OrderedDict -- available annotations
"""
def __init__(self, element):
self.check(element, self.TagName)
self.element = element
self.name = element.attrib["name"]
self.annotations = self.find(SBus.Annotation)
def find(self, object_class):
return Introspectable.FindElements(self.element, object_class)
def check(self, element, tagname):
if element.tag != tagname:
raise ValueError('Unexpected tag name "%s" (%s expected)!'
% (element.tag, tagname))
if "name" not in element.attrib:
raise ValueError('Missing attribute name!')
def getAttr(self, name, default_value):
return self.element.attrib.get(name, default_value)
def getExistingAttr(self, name):
if name not in self.element.attrib:
raise ValueError('Element %s name="%s" is missing attribute %s'
% (self.TagName, self.name, name))
return self.element.attrib[name]
class Invokable(Element)
|
:
""" This is a base class for invokable objects -- methods and signals.
Invokable objects has available additional attributes:
- input OrderedDict -- input signature and arguments
- output : OrderedDict -- output signature and arguments
"""
def __init__(self, element):
super(Introspectable.Invokable, self).__init__(element
|
)
self.key = self.getAttr("key", None)
self.arguments = self.find(SBus.Argument)
input = self.getInputArguments()
output = self.getOutputArguments()
self.input = SBus.Signature(input, self.annotations)
self.output = SBus.Signature(output, self.annotations)
return
def getInputArguments(self):
return self.getArguments("in")
def getOutputArguments(self):
return self.getArguments("out")
def getArguments(self, type):
args = OrderedDict()
for name, arg in self.arguments.items():
if type == "in" and arg.isInput():
args[name] = arg
continue
if type == "out" and arg.isOutput():
args[name] = arg
continue
return args
@staticmethod
def Introspect(path):
root = etree.parse(path).getroot()
return Introspectable.FindElements(root, SBus.Interface)
@staticmethod
def FindElements(parent, object_class):
dict = OrderedDict()
for child in parent:
if child.tag != object_class.TagName:
continue
object = object_class(child)
if object.name in dict:
raise ValueError('%s name="%s" is already present '
'in the same parent element\n'
% (object_class.TagName, object.name))
dict[object.name] = object
"""
Arguments can't be sorted and annotations order should be left on
the author of introspection. Otherwise we want to sort the dictionary
alphabetically based on keys.
"""
if object_class in [SBus.Argument, SBus.Annotation]:
return dict
return OrderedDict(sorted(dict.items()))
class SBus:
class Interface(Introspectable.Element):
TagName = "interface"
def __init__(self, element):
super(SBus.Interface, self).__init__(element)
self.methods = self.find(SBus.Method)
self.signals = self.find(SBus.Signal)
self.properties = self.find(SBus.Property)
return
class Method(Introspectable.Invokable):
TagName = "method"
def __init__(self, element):
super(SBus.Method, self).__init__(element)
class Signal(Introspectable.Invokable):
TagName = "signal"
def __init__(self, element):
super(SBus.Signal, self).__init__(element)
class Property(Introspectable.Invokable):
TagName = "property"
def __init__(self, element):
self.name = element.attrib["name"]
self.element = element
self.access = self.getExistingAttr("access")
self.type = self.getExistingAttr("type")
super(SBus.Property, self).__init__(element)
if self.key is not None:
raise ValueError('Keying is not supported on properties: %s '
% self.name)
def getInputArguments(self):
if not self.isWritable():
return {}
return {"value": SBus.Argument.Create("value", self.type, "in")}
def getOutputArguments(self):
if not self.isReadable():
return {}
return {"value": SBus.Argument.Create("value", self.type, "out")}
def isReadable(self):
return self.access == "read" or self.access == "readwrite"
def isWritable(self):
return self.access == "write" or self.access == "readwrite"
class Annotation(Introspectable.Element):
TagName = "annotation"
def __init__(self, element):
super(SBus.Annotation, self).__init__(element)
self.value = self.getAttr("value", None)
return
@staticmethod
def Find(annotations, name, default_value):
if name in annotations:
annotation = annotations[name]
if annotation.value is None:
return default_value
return annotation.value
return default_value
@staticmethod
def FindBool(annotations, name, Assume=False):
assume = "true" if Assume else "false"
value = SBus.Annotation.Find(annotations, name, assume)
if value.lower() == "true":
return True
else:
return False
@staticmethod
def CheckIfTrue(names, annotations):
for name in names:
if SBus.Annotation.FindBool(annotations, name, False):
return True
return False
@staticmethod
def CheckIfFalse(names, annotations):
for name in names:
if not SBus.Annotation.FindBool(annotations, name, True):
return False
return True
@staticmethod
def AtleastOneIsSet(names, annotations):
for name in names:
value = SBus.Annotation.Find(annotations, name, None)
if value is not None:
return T
|
GHubgenius/clusterd
|
src/platform/jboss/fingerprints/JBoss5JMX.py
|
Python
|
mit
| 182
| 0.005495
|
from src.platform.jboss
|
.interfaces i
|
mport JMXInterface
class FPrint(JMXInterface):
def __init__(self):
super(FPrint, self).__init__()
self.version = "5.0"
|
pydanny/django-easy-profiles
|
test_project/urls.py
|
Python
|
mit
| 223
| 0.004484
|
from django.conf.urls.defaults import *
from django.contrib import admin
admin.
|
autodiscover()
urlpatterns = patterns('',
(r'^profiles/', include('easy_profiles.urls')
|
),
(r'^admin/', include(admin.site.urls)),
)
|
HaraldWeber/client
|
src/modvault/utils.py
|
Python
|
gpl-3.0
| 15,457
| 0.012292
|
#-------------------------------------------------------------------------------
# Copyright (c) 2012 Gael Honorez.
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the GNU Public License v3.0
# which accompanies this distribution, and is available at
# http://www.gnu.org/licenses/gpl.html
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#-------------------------------------------------------------------------------
import os
import sys
import urllib2
import re
import shutil
from PyQt4 import QtCore, QtGui
from util import strtodate, datetostr, now, PREFSFILENAME
import util
import logging
from vault import luaparser
import warnings
import cStringIO
import zipfile
logger = logging.getLogger(__name__)
MODFOLDER = os.path.join(util.PERSONAL_DIR, "My Games", "Gas Powered Games", "Supreme Commander Forged Alliance", "Mods")
MODVAULT_DOWNLOAD_ROOT = "http://content.faforever.com/faf/vault/"
installedMods = [] # This is a global list that should be kept intact. So it should be cleared using installedMods[:] = []
class ModInfo(object):
def __init__(self, **kwargs):
self.name = "Not filled in"
self.version = 0
self.folder = ""
self.__dict__.update(kwargs)
def setFolder(self, localfolder):
self.localfolder = localfolder
self.absfolder = os.path.join(MODFOLDER, localfolder)
self.mod_info = os.path.join(self.absfolder, "mod_info.lua")
|
def update(self):
self.setFolder(self.localfolder)
if isinstance(self.version, int):
self.totalname = "%s v%d" % (self.name, self.version)
elif isinstance(self.version, float):
|
s = str(self.version).rstrip("0")
self.totalname = "%s v%s" % (self.name, s)
else:
raise TypeError, "version is not an int or float"
def to_dict(self):
out = {}
for k,v in self.__dict__.items():
if isinstance(v, (unicode, str, int, float)) and not k[0] == '_':
out[k] = v
return out
def __str__(self):
return '%s in "%s"' % (self.totalname, self.localfolder)
def getAllModFolders(): #returns a list of names of installed mods
mods = []
if os.path.isdir(MODFOLDER):
mods = os.listdir(MODFOLDER)
return mods
def getInstalledMods():
installedMods[:] = []
for f in getAllModFolders():
m = None
if os.path.isdir(os.path.join(MODFOLDER,f)):
try:
m = getModInfoFromFolder(f)
except:
continue
else:
try:
m = getModInfoFromZip(f)
except:
continue
if m:
installedMods.append(m)
logger.debug("getting installed mods. Count: %d" % len(installedMods))
return installedMods
def modToFilename(mod):
return mod.absfolder
def isModFolderValid(folder):
return os.path.exists(os.path.join(folder,"mod_info.lua"))
def iconPathToFull(path):
"""
Converts a path supplied in the icon field of mod_info with an absolute path to that file.
So "/mods/modname/data/icons/icon.dds" becomes
"C:\Users\user\Documents\My Games\Gas Powered Games\Supreme Commander Forged Alliance\Mods\modname\data\icons\icon.dds"
"""
if not (path.startswith("/mods") or path.startswith("mods")):
logger.info("Something went wrong parsing the path %s" % path)
return ""
return os.path.join(MODFOLDER, os.path.normpath(path[5+int(path[0]=="/"):])) #yay for dirty hacks
def fullPathToIcon(path):
p = os.path.normpath(os.path.abspath(path))
return p[len(MODFOLDER)-5:].replace('\\','/')
def getIcon(name):
img = os.path.join(util.CACHE_DIR, name)
if os.path.isfile(img):
logger.debug("Using cached preview image for: " + name)
return img
return None
def getModInfo(modinfofile):
modinfo = modinfofile.parse({"name":"name","uid":"uid","version":"version","author":"author",
"description":"description","ui_only":"ui_only",
"icon":"icon"},
{"version":"1","ui_only":"false","description":"","icon":"","author":""})
modinfo["ui_only"] = (modinfo["ui_only"] == 'true')
if not "uid" in modinfo:
logger.warn("Couldn't find uid for mod %s" % modinfo["name"])
return None
#modinfo["uid"] = modinfo["uid"].lower()
try:
modinfo["version"] = int(modinfo["version"])
except:
try:
modinfo["version"] = float(modinfo["version"])
except:
modinfo["version"] = 0
logger.warn("Couldn't find version for mod %s" % modinfo["name"])
return (modinfofile, modinfo)
def parseModInfo(folder):
if not isModFolderValid(folder):
return None
modinfofile = luaparser.luaParser(os.path.join(folder,"mod_info.lua"))
return getModInfo(modinfofile)
modCache = {}
def getModInfoFromZip(zfile):
'''get the mod info from a zip file'''
if zfile in modCache:
return modCache[zfile]
r = None
if zipfile.is_zipfile(os.path.join(MODFOLDER,zfile)) :
zip = zipfile.ZipFile(os.path.join(MODFOLDER,zfile), "r", zipfile.ZIP_DEFLATED)
if zip.testzip() == None :
for member in zip.namelist() :
filename = os.path.basename(member)
if not filename:
continue
if filename == "mod_info.lua":
modinfofile = luaparser.luaParser("mod_info.lua")
modinfofile.iszip = True
modinfofile.zip = zip
r = getModInfo(modinfofile)
if r == None:
logger.debug("mod_info.lua not found in zip file %s" % zfile)
return None
f, info = r
if f.error:
logger.debug("Error in parsing mod_info.lua in %s" % zfile)
return None
m = ModInfo(**info)
print zfile
m.setFolder(zfile)
m.update()
modCache[zfile] = m
return m
def getModInfoFromFolder(modfolder): # modfolder must be local to MODFOLDER
if modfolder in modCache:
return modCache[modfolder]
r = parseModInfo(os.path.join(MODFOLDER,modfolder))
if r == None:
logger.debug("mod_info.lua not found in %s folder" % modfolder)
return None
f, info = r
if f.error:
logger.debug("Error in parsing %s/mod_info.lua" % modfolder)
return None
m = ModInfo(**info)
m.setFolder(modfolder)
m.update()
modCache[modfolder] = m
return m
def getActiveMods(uimods=None): # returns a list of ModInfo's containing information of the mods
"""uimods:
None - return all active mods
True - only return active UI Mods
False - only return active non-UI Mods
"""
active_mods = []
try:
if not os.path.exists(PREFSFILENAME):
logger.info("No game.prefs file found")
return []
l = luaparser.luaParser(PREFSFILENAME)
l.loweringKeys = False
modlist = l.parse({"active_mods":"active_mods"},{"active_mods":{}})["active_mods"]
if l.error:
logger.info("Error in reading the game.prefs file")
return []
uids = [uid for uid,b in modlist.items() if b == 'true']
#logger.debug("Active mods detected: %s" % str(uids))
allmods = []
for m in installedMods:
if ((uimods == True and m.ui_only) or (uimods == False and not m.ui_only) or uimods == None):
allmods.append(m)
active_mods = [m for m in allmods if m.uid in uids]
#log
|
lordakshaya/pyexcel
|
examples/example_usage_of_internal_apis/simple_usage/series.py
|
Python
|
bsd-3-clause
| 2,753
| 0.006902
|
"""
series.py
:copyright: (c) 2014-2015 by Onni Software Ltd.
:license: New BSD License, see LICENSE for more details
This shows how to use **SeriesReader** to get the data in various ways
But you can use them with **Reader** class as well
"""
import os
from pyexcel.ext import ods3
from pyexcel import SeriesReader
from pyexcel.utils import to_dict, to_array
from pyexcel.filters import OddRowFilter, EvenColumnFilter
from pyexcel import Writer
import json
def main(base_dir):
# print all in json
#
# Column 1 Column 2 Column 3
# 1 4 7
# 2 5 8
# 3 6 9
reader = SeriesReader(os.path.join(base_dir,"example_series.ods"))
data = to_dict(reader)
print(json.dumps(data))
# output:
# {"Column 2": [4.0, 5.0, 6.0], "Column 3": [7.0, 8.0, 9.0], "Column 1": [1.0, 2.0, 3.0]}
# get the column headers
print(reader.colnames)
# [u'Column 1', u'Column 2', u'Column 3']
# get the content in one dimensional array
data = to_array(reader.enumerate())
print(data)
# [1.0, 4.0, 7.0, 2.0, 5.0, 8.0, 3.0, 6.0, 9.0]
# get the content in one dimensional array
# in reverse order
data = to_array(reader.reverse())
print(data)
# get the content in one dimensional array
# but iterate it vertically
data = to_array(reader.vertical())
print(data)
# [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]
# get the content in one dimensional array
# but iterate it vertically in revserse
# order
data = to_array(reader.rvertical())
print(data)
#[9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0]
# get a two dimensional array
data = to_array(reader.rows())
print(data)
#[[1.0, 4.0, 7.0], [2.0, 5.0, 8.0], [3.0, 6.0, 9.0]]
# get
|
a two dimensional array in reverse
# order
data = to_array(reader.rrows())
print(data)
# [[3.0, 6.0, 9.0], [2.0, 5.0, 8.0], [
|
1.0, 4.0, 7.0]]
# get a two dimensional array but stack columns
data = to_array(reader.columns())
print(data)
# [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]
# get a two dimensional array but stack columns
# in reverse order
data = to_array(reader.rcolumns())
print(data)
#[[7.0, 8.0, 9.0], [4.0, 5.0, 6.0], [1.0, 2.0, 3.0]]
# filter out odd rows and even columns
reader.filter(OddRowFilter())
reader.filter(EvenColumnFilter())
data = to_dict(reader)
print(data)
# {u'Column 3': [8.0], u'Column 1': [2.0]}
# and you can write the filtered results
# into a file
w = Writer("example_series_filter.xls")
w.write_reader(reader)
w.close()
if __name__ == '__main__':
main(os.getcwd())
|
geography-munich/sciprog
|
material/sub/jrjohansson/scripts/hello-world.py
|
Python
|
apache-2.0
| 45
| 0
|
#
|
!/usr/bin/env python
print("Hello world!")
| |
ep1cman/workload-automation
|
wlauto/devices/linux/odroidxu3_linux/__init__.py
|
Python
|
apache-2.0
| 1,073
| 0.001864
|
# Copyright 2014-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from wlauto import LinuxDevice, Parameter
class OdroidXU3LinuxDevice(LinuxDevice):
name = "odroidxu3_linux"
desc
|
ription = 'HardKernel Odroid XU3 development board (Ubuntu image).'
core_modules = [
'odroidxu3-fan',
]
parameters = [
Parameter('core_names', default=['a7', 'a7', 'a7', 'a7', 'a15', 'a15', 'a15', 'a15'], override=True),
Parameter('core_clusters', default=[0, 0, 0, 0, 1, 1, 1, 1], override=True),
]
abi = 'armeabi'
|
davogler/POSTv3
|
customers/migrations/0010_auto_20170124_2322.py
|
Python
|
mit
| 412
| 0.002427
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migrat
|
ion):
dependencies = [
('customers', '0009_recipient_type'),
]
operations = [
migrations.AlterModelOptions(
name='recipient',
options={'ordering': ['last_name'], 'verbose_name_plural':
|
'Recipients'},
),
]
|
jr55662003/My_Rosalind_solution
|
RSTR.py
|
Python
|
gpl-3.0
| 1,176
| 0.008547
|
'''
Given: A positive integer N≤100000, a number x between 0 and 1, and a DNA string s of length at most 10 bp.
Return: The probability that if N random DNA strings having the same length as s are constructed with GC-content x
(see “Introduction to Random Strings”), then at least one of the strings equals s.
We allow for the same random string to be created more than once.
'''
# P(at least one of the strings equals to s) = 1 - P(no
|
ne of the s
|
trings equals s)
def random_motif_match(N, x, s):
s_construct = {"A": (1 - x) / 2,
"T": (1 - x) / 2,
"C": x / 2,
"G": x / 2}
prob = 1
# probability of exactly equals to s
for b in s:
prob *= s_construct[b]
return 1 - (1 - prob) ** N
if __name__ == "__main__":
with open("data/rosalind_rstr.txt", "r") as f:
lines = f.readlines()
N = int(lines[0].rstrip().split(" ")[0])
x = float(lines[0].rstrip().split(" ")[1])
s = lines[1].rstrip()
with open("data/output_rstr.txt", "w") as o:
o.write(str(random_motif_match(N, x, s)))
print(random_motif_match(N, x, s))
|
OS2World/APP-INTERNET-torpak_2
|
Tools/scripts/byext.py
|
Python
|
mit
| 3,894
| 0.002311
|
#! /usr/bin/env python
"""Show file statistics by extension."""
import os
import sys
class Stats:
def __init__(self):
self.stats = {}
def statargs(self, args):
for arg in args:
if os.path.isdir(arg):
self.statdir(arg)
elif os.path.isfile(arg):
self.statfile(arg)
else:
sys.stderr.write("Can't find %s\n" % file)
self.addstats("<???>", "unknown", 1)
def statdir(self, dir):
self.addstats("<dir>", "dirs", 1)
try:
names = os.listdir(dir)
except os.error, err:
sys.stderr.write("Can't list %s: %s\n" % (file, err))
self.addstats(ext, "unlistable", 1)
return
names.sort()
for name in names:
if name.startswith(".#"):
continue # Skip CVS temp files
if name.endswith("~"):
continue# Skip Emacs backup files
full = os.path.join(dir, name)
if os.path.islink(full):
self.addstats("<lnk>", "links", 1)
elif os.path.isdir(full):
self.statdir(full)
else:
self.statfile(full)
def statfile(self, file):
head, ext = os.path.splitext(file)
head, base = os.path.split(file)
if ext == base:
ext = "" # E.g. .cvsignore is deemed not to have an extension
ext = os.path.normcase(ext)
if not ext:
ext = "<none>"
self.addstats(ext, "files", 1)
try:
f = open(file, "rb")
except IOError, err:
sys.stderr.write("Can't open %s: %s\n" % (file, err))
self.addstats(ext, "unopenable", 1)
return
data = f.read()
f.close()
self.addstats(ext, "bytes", len(data))
if '\0' in data:
self.addstats(ext, "binary", 1)
return
if not data:
self.addstats(ext, "empty", 1)
#self.addstats(ext, "chars", len(data))
lines = data.splitlines()
self.addstats(ext, "lines", len(lines))
del lines
words = data.split()
self.addstats(ext, "words", len(words))
def addstats(self, ext, key, n):
d = self.stats.setdefault(ext, {})
d[key] = d.get(key, 0) + n
def report(self):
exts = self.stats.keys()
exts.sort()
# Get the column keys
columns = {}
for ext in exts:
columns.
|
update(self.stats[ext])
cols = columns.keys()
cols.sort()
colwidth = {}
colwidth["ext"] = max([len(ext) for ext in exts])
minwidth = 6
self.stats["TOTAL"] = {}
for col in cols:
total = 0
cw = max(minwidth, len(col))
for ext in exts:
|
value = self.stats[ext].get(col)
if value is None:
w = 0
else:
w = len("%d" % value)
total += value
cw = max(cw, w)
cw = max(cw, len(str(total)))
colwidth[col] = cw
self.stats["TOTAL"][col] = total
exts.append("TOTAL")
for ext in exts:
self.stats[ext]["ext"] = ext
cols.insert(0, "ext")
def printheader():
for col in cols:
print "%*s" % (colwidth[col], col),
print
printheader()
for ext in exts:
for col in cols:
value = self.stats[ext].get(col, "")
print "%*s" % (colwidth[col], value),
print
printheader() # Another header at the bottom
def main():
args = sys.argv[1:]
if not args:
args = [os.curdir]
s = Stats()
s.statargs(args)
s.report()
if __name__ == "__main__":
main()
|
leeseuljeong/leeseulstack_neutron
|
neutron/api/v2/attributes.py
|
Python
|
apache-2.0
| 28,875
| 0.000035
|
# Copyright (c) 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
import re
from neutron.common import constants
from neutron.common import exceptions as n_exc
from neutron.openstack.common import log as logging
from neutron.openstack.common import uuidutils
LOG = logging.getLogger(__name__)
ATTR_NOT_SPECIFIED = object()
# Defining a constant to avoid repeating string literal in several modules
SHARED = 'shared'
# Used by range check to indicate no limit for a bound.
UNLIMITED = None
def _verify_dict_keys(expected_keys, target_dict, strict=True):
"""Allows to verify keys in a dictionary.
:param expected_keys: A list of keys expected to be present.
:param target_dict: The dictionary which should be verified.
:param strict: Specifies whether additional keys are allowed to be present.
:return: True, if keys in the dictionary correspond to the specification.
"""
if not isinstance(target_dict, dict):
msg = (_("Invalid input. '%(target_dict)s' must be a dictionary "
"with keys: %(expected_keys)s") %
{'target_dict': target_dict, 'expected_keys': expected_keys})
return msg
expected_keys = set(expected_keys)
provided_keys = set(target_dict.keys())
predicate = expected_keys.__eq__ if strict else expected_keys.issubset
if not predicate(provided_keys):
msg = (_("Validation of dictionary's keys failed."
"Expected keys: %(expected_keys)s "
"Provided keys: %(provided_keys)s") %
{'expected_keys': expected_keys,
'provided_keys': provided_keys})
return msg
def is_attr_set(attribute):
return not (attribute is None or attribute is ATTR_NOT_SPECIFIED)
def _validate_values(data, valid_values=None):
if data not in valid_values:
msg = (_("'%(data)s' is not in %(valid_values)s") %
{'data': data, 'valid_values': valid_values})
LOG.debug(msg)
return msg
def _validate_not_empty_string_or_none(data, max_len=None):
if data is not None:
return _validate_not_empty_string(data, max_len=max_len)
def _validate_not_empty_string(data, max_len=None):
msg = _validate_string(data, max_len=max_len)
if msg:
return msg
if not data.strip():
return _("'%s' Blank strings are not permitted") % data
def _validate_string_or_none(data, max_len=None):
if data is not None:
return _validate_string(data, max_len=max_len)
def _validate_string(data, max_len=None):
if not isinstance(data, basestring):
msg = _("'%s' is not a valid string") % data
LOG.debug(msg)
return msg
if max_len is not None and len(data) > max_len:
msg = (_("'%(data)s' exceeds maximum length of %(max_len)s") %
{'data': data, 'max_len': max_len})
LOG.debug(msg)
return msg
def _validate_boolean(data, valid_values=None):
try:
convert_to_boolean(data)
except n_exc.InvalidInput:
msg = _("'%s' is not a valid boolean value") % data
LOG.debug(msg)
return msg
def _validate_range(data, valid_values=None):
"""Check that integer value is within a range provided.
Test is inclusive. Allows either limit to be ignored, to allow
checking ranges where only the lower or upper limit matter.
It is expected that the limits provided are valid integers or
the value None.
"""
min_value = valid_values[0]
max_value = valid_values[1]
try:
data = int(data)
except (ValueError, TypeError):
msg = _("'%s' is not an integer") % data
LOG.debug(msg)
return msg
if min_value is not UNLIMITED and data < min_value:
msg = _("'%(data)s' is too small - must be at least "
"'%(limit)d'") % {'data': data, 'limit': min_value}
LOG.debug(msg)
return msg
if max_value is not UNLIMITED and data > max_value:
msg = _("'%(data)s' is too large - must be no larger than "
"'%(limit)d'") % {'data': data, 'limit': max_value}
LOG.debug(msg)
return msg
def _validate_no_whitespace(data):
"""Validates that input has no whitespace."""
if re.search('\s', data):
msg = _("'%s' contains whitespace") % data
LOG.debug(msg)
raise n_exc.InvalidInput(error_message=msg)
return data
def _validate_mac_address(data, valid_values=None):
try:
valid_mac = netaddr.valid_mac(_validate_
|
no_whitespace(data))
except Exception:
valid_mac = False
# TODO(arosen): The code in this file should be refactored
# so it
|
catches the correct exceptions. _validate_no_whitespace
# raises AttributeError if data is None.
if not valid_mac:
msg = _("'%s' is not a valid MAC address") % data
LOG.debug(msg)
return msg
def _validate_mac_address_or_none(data, valid_values=None):
if data is None:
return
return _validate_mac_address(data, valid_values)
def _validate_ip_address(data, valid_values=None):
try:
netaddr.IPAddress(_validate_no_whitespace(data))
except Exception:
msg = _("'%s' is not a valid IP address") % data
LOG.debug(msg)
return msg
def _validate_ip_pools(data, valid_values=None):
"""Validate that start and end IP addresses are present.
In addition to this the IP addresses will also be validated
"""
if not isinstance(data, list):
msg = _("Invalid data format for IP pool: '%s'") % data
LOG.debug(msg)
return msg
expected_keys = ['start', 'end']
for ip_pool in data:
msg = _verify_dict_keys(expected_keys, ip_pool)
if msg:
LOG.debug(msg)
return msg
for k in expected_keys:
msg = _validate_ip_address(ip_pool[k])
if msg:
LOG.debug(msg)
return msg
def _validate_fixed_ips(data, valid_values=None):
if not isinstance(data, list):
msg = _("Invalid data format for fixed IP: '%s'") % data
LOG.debug(msg)
return msg
ips = []
for fixed_ip in data:
if not isinstance(fixed_ip, dict):
msg = _("Invalid data format for fixed IP: '%s'") % fixed_ip
LOG.debug(msg)
return msg
if 'ip_address' in fixed_ip:
# Ensure that duplicate entries are not set - just checking IP
# suffices. Duplicate subnet_id's are legitimate.
fixed_ip_address = fixed_ip['ip_address']
if fixed_ip_address in ips:
msg = _("Duplicate IP address '%s'") % fixed_ip_address
else:
msg = _validate_ip_address(fixed_ip_address)
if msg:
LOG.debug(msg)
return msg
ips.append(fixed_ip_address)
if 'subnet_id' in fixed_ip:
msg = _validate_uuid(fixed_ip['subnet_id'])
if msg:
LOG.debug(msg)
return msg
def _validate_ip_or_hostname(host):
ip_err = _validate_ip_address(host)
if not ip_err:
return
name_err = _validate_hostname(host)
if not name_err:
return
msg = _("%(host)s is not a valid IP or hostname. Details: "
"%(ip_err)s, %(name_err)s") % {'ip_err': ip_err, 'host': host,
'name_err': name_err}
return msg
def _validate_nameservers(data, valid_values=None):
if not hasattr(data, '__iter__'):
msg = _("Invalid data format fo
|
obi-two/Rebelion
|
data/scripts/templates/object/draft_schematic/food/shared_dish_cho_nor_hoola.py
|
Python
|
mit
| 452
| 0.04646
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE T
|
HE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Intangible()
result.template = "object/draft_s
|
chematic/food/shared_dish_cho_nor_hoola.iff"
result.attribute_template_id = -1
result.stfName("string_id_table","")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
jwacalex/MULTEX-EAST-PoS-Tagger
|
mte.py
|
Python
|
lgpl-3.0
| 12,810
| 0.009446
|
"""
A reader for corpora whose documents are in MTE format.
"""
import os
from functools import reduce
from nltk import compat
from nltk.corpus.reader import concat, TaggedCorpusReader
lxmlAvailable = False
try:
from lxml import etree
lxmlAvailable = True
except ImportError:
#first try c version of ElementTree
try:
import xml.etree.cElementTree as etree
except ImportError:
import xml.etree.ElementTree as etree
import re
def xpath(root, path, ns):
if lxmlAvailable:
return root.xpath(path, namespaces=ns)
else:
return root.findall(path, ns)
class MTEFileReader:
"""
Class for loading the content of the multext-east corpus. It
parses the xml files and does some tag-filtering depending on the
given method parameters.
"""
ns = {'tei': 'http://www.tei-c.org/ns/1.0', 'xml': 'http://www.w3.org/XML/1998/namespace'}
tag_ns = '{http://www.tei-c.org/ns/1.0}'
xml_ns = '{http://www.w3.org/XML/1998/namespace}'
def __init__(self, file_path):
tree = etree.parse(file_path)
self.__root = xpath(tree.getroot(), './tei:text/tei:body', self.ns)[0]
@classmethod
def _words(self, text_root):
return [w.text for w in xpath(text_root, './/*', self.ns) if
w.tag == self.tag_ns + "w" or w.tag == self.tag_ns + "c"]
@classmethod
def _sents(self, text_root):
return [MTEFileReader._words(s) for s in xpath(text_root, './/tei:s', self.ns)]
@classmethod
def _paras(self, text_root):
return [MTEFileReader._sents(p) for p in xpath(text_root, './/tei:p', self.ns)]
@classmethod
def _lemma_words(self, text_root):
return [(w.text, w.attrib['lemma']) for w in xpath(text_root, './/tei:w', self.ns)]
@classmethod
def _tagged_words(self, text_root, tags=""):
if tags is None or tags == "":
return [(w.text, w.attrib['ana']) for w in xpath(text_root, './/tei:w', self.ns)]
else:
tags = re.compile('^' + re.sub("-",".",tags) + '.*$')
return [(w.text, w.attrib['ana']) for w in xpath(text_root, './/tei:w', self.ns)
if tags.match(w.attrib['ana'])]
@classmethod
def _lemma_sents(self, text_root):
return [MTEFileReader._lemma_words(s) for s in xpath(text_root, './/tei:s', self.ns)]
@classmethod
def _tagged_sents(self, text_root, tags=""):
# double list comprehension to remove empty sentences in case there is a sentence only containing punctuation marks
return [t for t in [MTEFileReader._tagged_words(s, tags) for s in xpath(text_root, './/tei:s', self.ns)] if len(t) > 0]
@classmethod
def _lemma_paras(self, text_root):
return [MTEFileReader._lemma_sents(p) for p in xpath(text_root, './/tei:p', self.ns)]
@classmethod
def _tagged_paras(self, text_root, tags=""):
return [t for t in [MTEFileReader._tagged_sents(p, tags) for p in xpath(text_root, './/tei:p', self.ns)] if len(t) > 0]
def words(self):
return MTEFileReader._words(self.__root)
def sents(self):
return MTEFileReader._sents(self.__root)
def paras(self):
return MTEFileReader._paras(self.__root)
def lemma_words(self):
return MTEFileReader._lemma_words(self.__root)
def tagged_words(self, tags=""):
return MTEFileReader._tagged_words(self.__root, tags)
def lemma_sents(self):
return MTEFileReader._lemma_sents(self.__root)
def tagged_sents(self, tags=""):
return MTEFileReader._tagged_sents(self.__root)
def lemma_paras(self):
return MTEFileReader._lemma_paras(self.__root)
def tagged_paras(self, tags=""):
return MTEFileReader._tagged_paras(self.__root)
class MTETagConverter:
"""
Class for converting msd tags to universal tags, more conversion
options are currently not implemented.
"""
mapping_msd_universal = {
'A': 'ADJ', 'S': 'ADP', 'R': 'ADV', 'C': 'CONJ',
'D': 'DET', 'N': 'NOUN', 'M': 'NUM', 'Q': 'PRT',
'P': 'PRON', 'V': 'VERB', '.': '.', '-': 'X'}
@staticmethod
def msd_to_universal(tag):
"""
This function converts the annotation from the Multex-East to the universal tagset
as described in Chapter 5 of the NLTK-Book
Unknown Tags will be mapped to X. Punctuation marks are not supported in MSD tags, so
"""
indicator = tag[0] if not tag[0] == "#" else tag[1]
if not indicator in MTETagConverter.mapping_msd_universal:
indicator = '-'
return MTETagConverter.mapping_msd_universal[indicator]
class MTECorpusReader(TaggedCorpusReader):
"""
Reader for corpora following the TEI-p5 xml scheme, such as MULTEXT-East.
MULTEXT-East contains part-of-speech-tagged words with a quite precise tagging
scheme. These tags can be converted to the Universal tagset
"""
def __init__(self, root=None, fileids=None, encoding='utf8'):
"""
Construct a new MTECorpusreader for a set of documents
located at the given root directory. Example usage:
>>> root = '/...path to corpus.../'
>>> reader = MTECorpusReader(root, 'oana-*.xml', 'utf8') # doctest: +SKIP
:param root: The root directory for this corpus. (default points to location in multext config file)
:param fileids: A list or regexp specifying the fileids in this corpus. (default is oana-en.xml)
:param enconding: The encoding of the given files (default is utf8)
"""
TaggedCorpusReader.__init__(self, root, fileids, encoding)
def __fileids(self, fileids):
if fileids is None: fileids = self._fileids
elif isinstance(fileids, compat.string_types): fileids = [fileids]
# filter wrong userinput
fileids = filter(lambda x : x in self._fileids, fileids)
# filter multext-east sourcefiles that are not compatible to the teip5 specification
fileids = filter(lambda x : x not in ["oana-bg.xml", "oana-mk.xml"], fileids)
if not fileids:
print("No valid multext-east file specified")
return fileids
def readme(self):
"""
Prints some information about this corpus.
:return: the content of the attached README file
:rtype: str
"""
return self.open("00README.txt").read()
def raw(self, fileids=None):
"""
:param fileids: A list specifying the fileids that should be used.
:return: the given file(s) as a single string.
:rtype: str
"""
return concat([self.open(f).read() for f in self.__fileids(fileids)])
def words(self, fileids=None):
"""
:param fileids: A list specifying the fileids that should
|
be used.
:return: the given file(s) as a list of words and punctuation symbols.
:rtype: list(str)
"""
return reduce(lambda a, b : a + b ,[MTEFileReader(os.path.join(self._root, f)).words() for f in self.__fileids(fileids)], [])
def sents(self, fileids=None):
"""
:param fileids: A list specifying the fileids that should be used.
:return: the given file(s) as a list of sentences or utterances,
|
each encoded as a list of word strings
:rtype: list(list(str))
"""
return reduce(lambda a, b : a + b ,[MTEFileReader(os.path.join(self._root, f)).sents() for f in self.__fileids(fileids)], [])
def paras(self, fileids=None):
"""
:param fileids: A list specifying the fileids that should be used.
:return: the given file(s) as a list of paragraphs, each encoded as a list
of sentences, which are in turn encoded as lists of word string
:rtype: list(list(list(str)))
"""
return reduce(lambda a, b : a + b ,[MTEFileReader(os.path.join(self._root, f)).paras() for f in self.__fileids(fileids)], [])
def lemma_words(self, fileids=None):
"""
:param fileids: A list specifying the fileids that should be used.
:return: the given file(s) as a list of words, the corr
|
charlesthomas/coinshot
|
setup.py
|
Python
|
mit
| 1,206
| 0.000829
|
#!/usr/bin/env python
from setuptools import setup
NAME = 'coinshot'
DESCRIPTION = 'simple python module for pushover.net'
VERSION = open('VERSION').read().strip()
LONG_DESC = open('README.rst').read()
LICENSE = "MIT License"
setup(
name=NAME,
version=VERSION,
author='Charles Thomas',
author_email='ch@rlesthom.as',
packages=['coinshot'],
url='https://github.com/charlesthomas/%s' % NAME,
license=LICENSE,
description=DESCRIPTION,
long_description=LONG_DESC,
long_description_content_type='text/x-rst',
install_requires=["simplejson >= 3.3.0"],
scripts=['bin/shoot'],
classifiers=['Development Status :: 5 - Production/S
|
table',
|
'Intended Audience :: Developers',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: MIT License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Topic :: Communications',
'Topic :: Software Development :: Libraries :: Python Modules']
)
|
lnhubbell/tweetTrack
|
streamScript/domain/get_tweets_by_user.py
|
Python
|
mit
| 5,107
| 0.000196
|
import os
import tweepy
from query_db import query_db, send_user_queries_to_db, read_in_bb_file
from our_keys.twitter_keys import my_keys
from itertools import chain, repeat
u"""Reads in a file of cities and their bounding boxes. Queries the
database to get a list of all unique users who have tweeted from that
city. Queries Twitter api to get 200 tweets from each user, then inserts
200 tweets for up to 100 users per city into a separate database table
called "Tweet200."""
ROOT_DIR = os.path.abspath(os.getcwd())
def get_twitter_api():
u"""Gets twitter keys from key file."""
for our_set, our_keys in my_keys.items():
auth = tweepy.OAuthHandler(
our_keys['consumer_key'],
our_keys['consumer_secret']
)
auth.set_access_token(
our_keys['access_key'],
our_keys['access_secret']
)
print "Hi, I'm the key generator: ", our_keys['access_key']
yield tweepy.API(auth)
def get_unique_handles(vals):
u"""Takes in a list of tweets from a given city. Returns a dict of
unique user handles for each location."""
users = {}
for tweet in vals:
name = tweet[1]
if name in users:
users[name] += 1
else:
users[name] = 1
heavy_users = []
for user in users:
if users[user] > 2:
heavy_users.append(user)
return heavy_users
def format_tweet_history(history, user, city):
u"""Formats tweets pieces to be fed to sql query.
History is a list-like set of tweets. User is the screen name
as a string. City is the string name of the city we querried for."""
tweet_history = []
for tweet in history:
screen_name = user
text = tweet.text
if len(text) > 150:
print text
created_at = tweet.created_at.strftime('%m/%d/%Y, %H:%M')
location = tweet.geo
location_lat = None
location_lng = None
if location:
location_lat = location['coordinates'][0]
location_lng = location['coordinates'][1]
hashtags = []
# if location:
tweet = (
screen_name, text, location_lat, location_lng,
created_at, hashtags, city
)
tweet_history.append(tweet)
return tweet_history
def check_list_low_tweeters():
with open(ROOT_DIR + "text/stop_names.txt", 'r') as a_file:
names = a_file.read().split("\n")
return names
def query_twitter_for_histories(users
|
, city=None, cap=100, data_collection=True):
u"""Calls function to return a dict of cities and the unique users for each
city. Iterates over the dict to extract the tweet text/locations/timestamps
for each tweet, bundles results into DB-friendly tuples. Returns a list of
lists of tuples."""
api_generator = get_twitter_api()
api_generator = chain.from_iterable(repeat(tuple(api_generator), 1000))
|
api = api_generator.next()
city_tweets = []
user_count = 0
too_low_count = 0
for user in users:
if user_count > cap:
break
if user in check_list_low_tweeters() and data_collection is True:
continue
history = []
# tweet_history = []
try:
history = api.user_timeline(screen_name=user, count=200)
except tweepy.error.TweepError as err:
print "Tweepy Error: ", err.message
api = api_generator.next()
continue
if len(history) >= 200 or not data_collection:
user_count += 1
tweet_history = format_tweet_history(history, user, city)
# if len(tweet_history):
city_tweets.append(tweet_history)
print user_count
else:
print "Too few tweets in this user's history."
with open(ROOT_DIR + "text/stop_names.txt", 'a') as a_file:
a_file.write(user)
a_file.write("\n")
too_low_count += 1
total = user_count + too_low_count
print "total requests: ", total
return city_tweets
def process_each_city():
u"""Calls functions to insert user data into Tweet200 table."""
bb_dict = read_in_bb_file()
for city, values in bb_dict.items():
with open(ROOT_DIR + "text/stop_cities.txt", "r") as ffff:
stop_cities = ffff.read()
if city not in stop_cities:
vals = query_db(city, values)
print "Now checking ", city
handles = get_unique_handles(vals)
print city, len(handles)
if len(handles) >= 200:
print "Now querying twitter for histories"
tweets = query_twitter_for_histories(handles, city)
if len(tweets) >= 100:
send_user_queries_to_db(tweets, city)
else:
print "Not enough users with twitter histories in ", city
if __name__ == "__main__":
while True:
try:
process_each_city()
except Exception:
print "I got an exception"
continue
|
facebookresearch/ParlAI
|
parlai/agents/safe_local_human/safe_local_human.py
|
Python
|
mit
| 4,597
| 0.00087
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Agent that gets the local keyboard input in the act() function.
Applies safety classifier(s) to process user and partner messages.
"""
from typing import Optional
from parlai.core.params import ParlaiParser
from parlai.core.opt import Opt
from parlai.core.message import Message
from parlai.utils.misc import display_messages
from parlai.utils.strings import colorize
from parlai.agents.local_human.local_human import LocalHumanAgent
from parlai.utils.safety import OffensiveStringMatcher, OffensiveLanguageClassifier
OFFENSIVE_USER_REPLY = '[ Sorry, could not process that message. Please try again. ]'
OFFENSIVE_BOT_REPLY = (
'[ Unsafe model reply detected. Clearing agent history. Please try again. ]'
)
class SafeLocalHumanAgent(LocalHumanAgent):
@classmethod
def add_cmdline_args(
cls, parser: ParlaiParser, partial_opt: Optional[Opt] = None
) -> ParlaiParser:
"""
Add command-line arguments specifically for this agent.
"""
agent = parser.add_argument_group('Safe Local Human Arguments')
agent.add_argument(
'--safety',
type=str,
default='all',
choices={'none', 'string_matcher', 'classifier', 'all'},
help='Apply safety filtering to messages',
)
super().add_cmdline_args(parser, partial_opt=partial_opt)
return parser
def __init__(self, opt, shared=None):
super().__init__(opt)
self.id = 'safeLocalHuman'
self._init_safety(opt)
def _init_safety(self, opt):
"""
Initialize safety modules.
"""
if opt['safety'] == 'string_matcher' or opt['safety'] == 'all':
self.offensive_string_matcher = OffensiveStringMatcher()
if opt['safety'] == 'classifier' or opt['safety'] == 'all':
self.offensive_classifier = OffensiveLanguageClassifier()
self.self_offensive = False
def check_offensive(self, text):
"""
Check if text is offensive using string matcher and classifier.
"""
if text == '':
return False
if (
hasattr(self, 'offensive_string_matcher')
and text in self.offensive_string_matcher
):
return True
if hasattr(self, 'offensive_classifier') and text in self.offensive_classifier:
return True
return False
def observe(self, msg):
"""
Observe bot reply if and only if it passes.
"""
if self.self_offensive:
# User was offensive, they must try again
re
|
turn
# Now check if bot was offensive
bot_offensive = self.check_offensive(msg.get('text', ''))
if not bot_offensive:
# View bot message
print(
display_messages(
[msg],
add_fields=self.opt.get('display_add_fields', ''),
prettify=self.opt.get('di
|
splay_prettify', False),
verbose=self.opt.get('verbose', False),
)
)
msg.force_set('bot_offensive', False)
else:
msg.force_set('bot_offensive', True)
print(OFFENSIVE_BOT_REPLY)
def get_reply(self):
reply_text = input(colorize('Enter Your Message:', 'field') + ' ')
reply_text = reply_text.replace('\\n', '\n')
return reply_text
def act(self):
# get human reply
reply = Message(
{
'id': self.getID(),
'label_candidates': self.fixedCands_txt,
'episode_done': False,
}
)
reply_text = self.get_reply()
# check if human reply is offensive
self.self_offensive = self.check_offensive(reply_text)
while self.self_offensive:
print(OFFENSIVE_USER_REPLY)
reply_text = self.get_reply()
# check if human reply is offensive
self.self_offensive = self.check_offensive(reply_text)
# check for episode done
if '[DONE]' in reply_text or self.opt.get('single_turn', False):
raise StopIteration
# set reply text
reply['text'] = reply_text
# check if finished
if '[EXIT]' in reply_text:
self.finished = True
raise StopIteration
return reply
|
metu-kovan/human_model
|
src/animator.py
|
Python
|
gpl-3.0
| 8,441
| 0.050468
|
#!/usr/bin/env python
import roslib
roslib.load_manifest('human_model')
import rospy
import json
import tf
import numpy
from abc import ABCMeta,abstractmethod
from tf.transformations import quaternion_multiply as quatMult,quatern
|
ion_conjugate
from collections import deque,defaultdict,OrderedDict
"""Module for converting tf data to construct a human model"""
def Vec(*args):
"""returns a vector (numpy float array) with the length of number of given arguments"""
return numpy.array(args,dtype=float)
|
def normalize(v):
"""returns unit vector or quaternion"""
return v/numpy.linalg.norm(v)
def quatRotatePoint(q,p,o=Vec(0,0,0)):
"""returns point p rotated around quaternion q with the origin o (default (0,0,0)"""
return quatMult(
quatMult(q,numpy.append(p-o,(0,))),
quaternion_conjugate(q)
)[:3]+o
def calculateQuaternion(v1,v2):
"""calculates the quaternion for rotating v1 to v2. Note that both v1 and v2 must be unit vector"""
cross=numpy.cross(v1,v2)
return normalize(numpy.append(cross,(1+numpy.dot(v1,v2),)))
class AveragePosition(object):
"""Example Position Class
Calculates the average of the last n positions lazily
Calculated value can be accessed or changed via pos attribute:
p=AveragePosition(10)
p.pos+=Vec(1,2,3)
print(p.pos)
If an alternative position class is needed to be defined these functions,
must be defined in class:
@property
def pos(self):
...
@pos.setter
def pos(self,p):
...
def append(self,p):
...
"""
def __init__(self,n=100):
self.transformations=deque((Vec(0,0,0),),n)
self.calculated=None
@property
def pos(self):
if self.calculated is None:
self.calculated=numpy.average(self.transformations,0)
return self.calculated
@pos.setter
def pos(self,p):
self.calculated=p
"""appends the given position p to the deque, and resets the calculated
average value"""
def append(self,p):
self.calculated=None
self.transformations.append(p)
class JointTree(object):
"""Recursive data structure to define joint tree.It have following attributes:
length:distance to the parent (if not fixed frame)
fixedFrame:fixates the point to the fixedFrame+the displacement of the tree
invert:inverts the rotating axis for connected limb
displacement:used to preserve the position of the node with respect to its parent(resets on new position)
limbPos:position of the limb(resets on new position)
limbRot:orientation of the limb(resets on new position)
"""
def toDict(self,ordered=False):
"""Converts tree to dictionary which can be exported as JSON,if ordered is true
it returns an OrderedDict instead of dictionary and preserves the order of attributes"""
d=OrderedDict if ordered else dict
return d(
((
self.name,
d((
('length',self.length),
('invert',self.invert),
('fixedFrame',None if self.fixedFrame is None else tuple(self.fixedFrame)),
('children',tuple(i.toDict(ordered) for i in self.children)),
))
),))
@staticmethod
def fromDict(dictionary,pos):
"""converts a dictionary to JointTree"""
(k,v)=next(iter(dictionary.items()))
return JointTree(k,pos,**v)
def __init__(self,name,posFunc,**kwargs):
"""gets the name of the node and a function takes no argument,returns a Position class
(e.g. lambda : AveragePosition(10). It takes these optional arguments with the following default values:
length=0
invert=False
fixedFrame=None
children=[] (it can contain either dictionary or JointTree)
"""
self.name=name
self.currentPos=posFunc()
self.length=kwargs.get("length",0)
self.invert=kwargs.get("invert",False)
fixedFrame=kwargs.get("fixedFrame",None)
self.fixedFrame=None if fixedFrame is None else Vec(*fixedFrame)
self.children=[]
children=kwargs.get("children",[])
try:
if isinstance(children[0],dict):
for i in children:
(k,v)=next(iter(i.items()))
self.addChild(JointTree(k,posFunc,**v))
else:
for i in children:
self.addChild(i)
except IndexError:
pass
self.parent=None
self.__uncalculate()
def __uncalculate(self):
self.displacement=Vec(0,0,0)
self.limbPos=Vec(0,0,0)
self.limbRot=Vec(0,0,0,1)
def __iter__(self):
"""iterates over tree depth-first order"""
yield self
for i in self.children:
for j in iter(i):
yield j
def __getitem__(self,name):
"""returns the node with the given name, it raises a KeyError if there is no match"""
for c in self:
if c.name==name:
return c
raise KeyError("There is no node in tree with '{}' name".format(name))
def addChild(self,child):
"""adds new node to the tree"""
child.parent=self
self.children.append(child)
def collectPosition(self,ls):
"""gets the position of the joints from tf.TransformListener ls. It does nothing if there is no sent pose"""
try:
(trans,_)=ls.lookupTransform('/world',self.name,rospy.Time(0))
except tf.Exception as e:
return
self.currentPos.append(Vec(*trans))
self.__uncalculate()
def setPosition(self):
"""calculates the position of the joint"""
if self.fixedFrame is not None:
self.displacement+=self.fixedFrame-self.currentPos.pos
self.currentPos.pos+=self.displacement
elif self.parent is not None:
n=self.currentPos.pos+self.displacement
p=self.parent.currentPos.pos
n=normalize(n-p)*self.length+p
self.displacement=n-self.currentPos.pos
self.currentPos.pos=n
for i in self.children:
i.displacement+=self.displacement
self.displacement=Vec(0,0,0)
def connectLimbs(self):
"""calculates the pose of the limbs"""
p=self.currentPos.pos
for i in self.children:
c=i.currentPos.pos
i.limbPos=(p+c)/2
v2=normalize((p-c) if not i.invert else (c-p))
i.limbRot=calculateQuaternion(Vec(0,0,1),v2)
def sendPoses(self,br):
"""sends the pose of joints and limbs to given tf.TransformBroadcaster"""
br.sendTransform(self.currentPos.pos,(0,0,0,1),rospy.Time.now(),self.name+'_link','/world')
for i in self.children:
br.sendTransform(i.limbPos,i.limbRot,rospy.Time.now(),"{}_{}".format(self.name,i.name),'/world')
def applyDisplacement(self,displacement):
"""applies the given displacement to the parent and all of its children"""
for i in self:
i.currentPos.pos+=displacement
i.limbPos+=displacement
if __name__ == '__main__':
rospy.init_node('animator')
treeDict=json.loads(rospy.get_param("/tree"))
tree=JointTree.fromDict(treeDict,lambda : AveragePosition(10))
br = tf.TransformBroadcaster()
ls = tf.TransformListener()
rate = rospy.Rate(50.0)
while not rospy.is_shutdown():
for i in tree:
i.collectPosition(ls)
for i in tree:
i.setPosition()
(o,r,l) = ("SpineShoulder","ShoulderRight","ShoulderLeft")
#these three are special condition,They are aligned on a straight line
#Also note that the z value of ShoulderRight and ShoulderLeft equals to that of SpineShoulder
if i.name==o:
r=i[r]
l=i[l]
cr=r.currentPos.pos+r.displacement
cl=l.currentPos.pos+l.displacement
cr[2]=i.currentPos.pos[2]
cl[2]=i.currentPos.pos[2]
k=i.currentPos.pos-(cr+cl)/2
cr+=k
cl+=k
r.displacement=cr-r.currentPos.pos
l.displacement=cl-l.currentPos.pos
for i in tree:
i.connectLimbs()
#calculates the Orientation of Torso (Upper and Lower) and connected joints
q1=tree["SpineShoulder"].limbRot
q2=calculateQuaternion(Vec(0,1,0),normalize(tree["ShoulderRight"].currentPos.pos-tree["ShoulderLeft"].currentPos.pos))
tree["SpineShoulder"].limbRot=quatMult(q2,q1)
tree["ShoulderRight"].applyDisplacement(quatRotatePoint(q1,tree["ShoulderRight"].currentPos.pos,tree["SpineShoulder"].currentPos.pos)-tree["ShoulderRight"].currentPos.pos)
tree["ShoulderLeft"].applyDisplacement(quatRotatePoint(q1,tree["ShoulderLeft"].currentPos.pos,tree["SpineShoulder"].currentPos.pos)-tree["ShoulderLeft"].currentPos.pos)
v=tree["HipRight"].currentPos.pos-tree["HipLeft"].currentPos.pos
q2=calculateQuaternion(Vec(0,1,0),normalize(v))
q=quatMult(q2,q1)
tree["SpineBase"].limbRot=q
tree["HipRight"].applyDisplacement(quatRotatePoint(q,tree["SpineBase"].currentPos.pos+Vec(0.01,tree["HipRight"].length,-0.05),tree["SpineBase"].currentPos.pos)-tree["HipRight"].c
|
henrist/aqmt
|
aqmt/calc_window.py
|
Python
|
mit
| 2,753
| 0.001453
|
#!/usr/bin/env python3
#
# This file generates an estimation of window size for the
# two queues for _each_ sample. It will not be exact, and
# it's correctness will vary with the variation of queue delay
# in the queue.
#
# The results are saved to:
# - derived/window
# each line formatted as: <sample id> <window ecn in bits> <window nonecn in bits>
#
# Dependency:
# - calc_queuedelay.py (for per sample queue stats)
import os
import sys
def get_rates(rate_file):
rates = []
with open(rate_file, 'r') as f:
for line in f:
# skip comments
if line[0] == '#':
continue
# format of rate file:
# <sample id> <sample time> <rate in b/s>
rates.append(int(line.split()[2]))
return rates
def get_rtts_with_queue(queue_file, base_rtt):
rtts = []
with open(queue_file, 'r') as f:
for line in f:
# skip comments
if line[0] == '#':
continue
# format of queue file:
# <sample time> <average_in_us> ...
# the average might be '-' if it is unknown
queue_avg = line.split()[1]
queue_avg = 0 if queue_avg == '-' else float(queue_avg)
# add rtt and normalize to seconds
# base rtt is in ms
rtts.append((queue_avg / 1000 + base_rtt) / 1000)
return rtts
def calc_window(rates, rtts_s):
windows = []
# all data should have same amount of samples
for i, rate in enumerate(rates):
rtt = rtts_s[i] # rtt in seconds
windows.append(rate * rtt)
return windows
def write_window(file, window_ecn_list, window_nonecn_list):
with open(file, 'w') as f:
f.write('#sample_id window_ecn_in_bits window_nonecn_in_bits\n')
for i, window_ecn in enumerate(window_ecn_list):
window_nonecn = window_nonecn_list[i]
f.write('%d %d %d\n' % (i, window_ecn, window_nonecn))
def process_test(folder, base_rtt_ecn_ms, base_rtt_nonecn_ms):
write_window(
folder + '/derived/window',
calc_window(
get_rates(folder + '/ta/rate_ecn'),
get_rtts_with_queue(folder + '/derived/queue_ecn_samplestats', base_rtt_ecn_ms),
),
calc_window(
get_rates(folder + '/ta/rate_nonecn'),
get_rtts_with_queue(folder + '/derived/queue_nonecn_samplestats', base_rtt_nonecn_ms),
),
)
if __name__ == '__main__':
if len(sys.argv) < 4:
print('U
|
sage: %s <test_folde
|
r> <rtt_ecn_ms> <rtt_nonecn_ms>' % sys.argv[0])
sys.exit(1)
process_test(
sys.argv[1],
float(sys.argv[2]),
float(sys.argv[3]),
)
print('Generated win')
|
spaam/svtplay-dl
|
lib/svtplay_dl/service/vimeo.py
|
Python
|
mit
| 1,817
| 0.003302
|
# ex:ts=4:sw=4:sts=4:et
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
import copy
import json
import re
from svtplay_dl.error import ServiceError
from svtplay_dl.fetcher.hls import hlsparse
from svtplay_dl.fetcher.http import HTTP
from svtplay_dl.service import OpenGraphThumbMixin
from svtplay_dl.service import Service
class Vimeo(Service, OpenGraphThumbMixin):
supported_domains = ["vimeo.com", "player.vimeo.com"]
def get(self):
data = self.get_urldata()
match_cfg_url = re.search('data-config-url="([^"]+)" data-fallback-url', data)
match_clip_page_cfg = re.search(r"vimeo\.clip_page_config\s*=\s*({.+?});", data)
if match_cfg_url:
player_url = match_cfg_url.group(1).replace("&", "&")
elif match_clip_page_cfg:
page_config = json.loads(match_clip_page_cfg.group(1))
player_url = page_config["player"]["config_url"]
else:
yield ServiceError(f"Can't find video file for: {self.url}")
return
player_data = self.http.request("get", player_url).text
if player_data:
jsondata = json.loads(player_data)
if ("hls" in jsondata["request"]["files"]) and ("fastly_skyfire" i
|
n jso
|
ndata["request"]["files"]["hls"]["cdns"]):
hls_elem = jsondata["request"]["files"]["hls"]["cdns"]["fastly_skyfire"]
yield from hlsparse(self.config, self.http.request("get", hls_elem["url"]), hls_elem["url"], output=self.output)
avail_quality = jsondata["request"]["files"]["progressive"]
for i in avail_quality:
yield HTTP(copy.copy(self.config), i["url"], i["height"], output=self.output)
else:
yield ServiceError("Can't find any streams.")
return
|
Thingee/cinder
|
cinder/scheduler/weights/capacity.py
|
Python
|
apache-2.0
| 3,311
| 0.000302
|
# Copyright (c) 2013 eBay Inc.
# Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Weighers that weigh hosts by their capacity, including following two
weighers:
1. Capacity Weigher. Weigh hosts by their available capacity.
The default is to spread volumes across all hosts evenly. If you prefer
stacking, you can set the 'capacity_weight_multiplier' option to a negative
number and the weighing has the opposite effect of the default.
2. Allocated Capacity Weigher. Weigh hosts by their allocated capacity.
The default behavior is to place new volume to the host allocated the least
space. This weigher is intended to simulate the behavior of SimpleScheduler.
If you prefer to place volumes to host allocated the most space, you can
set the 'allocated_capacity_weight_multiplier' option to a positive number
and the weighing has the opposite effect of the default.
"""
import math
from oslo.config import cfg
from cinder.openstack.common.scheduler import weights
capacity_weight_opts = [
cfg.FloatOpt('capacity_weight_multiplier',
default=1.0,
help='Multiplier used for weighing volume capacity. '
'Negative numbers mean to stack vs spread.'),
cfg.FloatOpt('allocated_capacity_weight_multiplier',
default=-1.0,
help='Multiplier used for weighing volume capacity. '
'Negative numbers mean to stack vs spread.'),
]
CONF = cfg.CONF
CONF.register_opts(capacity_weight_opts)
class CapacityWeigher(weights.BaseHostWeigher):
def _weight_multiplier(self):
"""Override the weight multiplier."""
return CONF.capacity_weight_multiplier
def _weigh_object(self, host_state, weight_properties):
"""Higher weights win. We want spread
|
ing to be the default."""
reserved = float(host_state.reserved_percentage) / 100
free_space = host_state.free_capacity_gb
if free_space == 'infinite' or free_space == 'unknown':
#(zhiteng) 'infinite' and 'unknown' are treated the same
# here, for sorting purpose.
free = float('inf')
else:
free = math.floor(host_state.free_capacity_gb * (1 - reserved))
return free
class AllocatedCapacityWeigher(weights.BaseHost
|
Weigher):
def _weight_multiplier(self):
"""Override the weight multiplier."""
return CONF.allocated_capacity_weight_multiplier
def _weigh_object(self, host_state, weight_properties):
# Higher weights win. We want spreading (choose host with lowest
# allocated_capacity first) to be the default.
allocated_space = host_state.allocated_capacity_gb
return allocated_space
|
tropp/acq4
|
acq4/analysis/scripts/beamProfiler.py
|
Python
|
mit
| 976
| 0.009221
|
from PyQt4 import QtCore
import acq4.Manager
import acq4.util.imageAnalysis as imageAnalysis
run = True
man = acq4.Manager.getManager()
cam = man.getDevice('Camera')
frames = []
def collect(frame):
global frames
frames.append(frame)
cam.sigNewFrame.connect(collect)
def measure():
if len(frames) == 0:
QtCore.QTimer.singleShot(100, measure)
return
global run
if run:
global frames
|
frame = frames[-1]
frames = []
img = frame.data()
w,h = img.shape
img = img[2*w/5:3*w/5, 2*h/5:3*h/5]
w,h = img.shape
fit = imageAnalysis.fitGaussian2D(img, [100, w/2., h/2., w/4., 0])
# convert sigma to full width at 1/e
fit[0][3] *= 2 * 2**0.5
print "WIDTH:", fit[0]
|
[3] * frame.info()['pixelSize'][0] * 1e6, "um"
print " fit:", fit
else:
global frames
frames = []
QtCore.QTimer.singleShot(2000, measure)
measure()
|
ithinksw/philo
|
philo/contrib/julian/__init__.py
|
Python
|
isc
| 89
| 0.033708
|
"""
This ver
|
sion of julian is currently in development and
|
is not considered stable.
"""
|
asvetlov/optimization-kaunas-2017
|
2.py
|
Python
|
apache-2.0
| 463
| 0.006479
|
import timeit
import pyximport; pyximpor
|
t.install()
from mod2 import cysum, cysum2
def pysum(start, step, count):
ret = start
for i in range(count):
ret += step
return ret
print('Python',
timeit.timeit('pysum(0, 1, 100)', 'from __main__ import pysum'))
print('Cython', ti
|
meit.timeit('cysum(0, 1, 100)', 'from __main__ import cysum'))
print('Cython with types',
timeit.timeit('cysum2(0, 1, 100)', 'from __main__ import cysum2'))
|
SoftwareIntrospectionLab/MininGit
|
pycvsanaly2/extensions/FileTypes.py
|
Python
|
gpl-2.0
| 6,480
| 0.00571
|
# Copyright (C) 2008 LibreSoft
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Authors :
# Carlos Garcia Campos <carlosgc@gsyc.escet.urjc.es>
from pycvsanaly2.Database import (SqliteDatabase, MysqlDatabase,
TableAlreadyExists, statement)
from pycvsanaly2.extensions import (Extension, register_extension,
ExtensionRunError)
from pycvsanaly2.extensions.file_types import guess_file_type
from pycvsanaly2.utils import to_utf8, uri_to_filename
class DBFileType(object):
id_counter = 1
__insert__ = """INSERT INTO file_types (id, file_id, type)
values (?, ?, ?)"""
def __init__(self, id, type, file_id):
if id is None:
self.id = DBFileType.id_counter
DBFileType.id_counter += 1
else:
self.id = id
self.type = to_utf8(type)
self.file_id = file_id
class FileTypes(Extension):
def __init__(self):
self.db = None
def __create_table(self, cnn):
cursor = cnn.cursor()
if isinstance(self.db, SqliteDatabase):
import sqlite3.dbapi2
try:
cursor.execute("CREATE TABLE file_types (" +
"id integer primary key," +
"file_id integer," +
"type varchar" +
")")
except sqlite3.dbapi2.OperationalError:
cursor.close()
raise TableAlreadyExists
except:
raise
elif isinstance(self.db, MysqlDatabase):
import MySQLdb
try:
cursor.execute("CREATE TABLE file_types (" +
"id INT primary key," +
"file_id integer REFERENCES files(id)," +
"type mediumtext" +
") CHARACTER SET=utf8")
except MySQLdb.OperationalError, e:
if e.args[0] == 1050:
cursor.close()
raise TableAlreadyExists
raise
except:
raise
cnn.commit()
cursor.close()
def __create_indices(self, cnn):
cursor = cnn.cursor()
if isinstance(self.db, MysqlDatabase):
import MySQLdb
try:
cursor.execute("create index parent_id on file_links(parent_id)")
except MySQLdb.OperationalError, e:
if e.args[0] != 1061:
cursor.close()
|
raise
try:
cursor.execute("create index repository_id on files(repository_id)")
except MySQLdb.OperationalError, e:
if e.args[0] != 1061:
cursor.close()
raise
cursor.close()
def __get
|
_files_for_repository(self, repo_id, cursor):
query = "SELECT ft.file_id from file_types ft, files f " + \
"WHERE f.id = ft.file_id and f.repository_id = ?"
cursor.execute(statement(query, self.db.place_holder), (repo_id,))
files = [res[0] for res in cursor.fetchall()]
return files
def run(self, repo, uri, db):
self.db = db
path = uri_to_filename(uri)
if path is not None:
repo_uri = repo.get_uri_for_path(path)
else:
repo_uri = uri
cnn = self.db.connect()
cursor = cnn.cursor()
cursor.execute(statement("SELECT id from repositories where uri = ?",
db.place_holder), (repo_uri,))
repo_id = cursor.fetchone()[0]
files = []
try:
self.__create_table(cnn)
except TableAlreadyExists:
cursor.execute(statement("SELECT max(id) from file_types",
db.place_holder))
id = cursor.fetchone()[0]
if id is not None:
DBFileType.id_counter = id + 1
files = self.__get_files_for_repository(repo_id, cursor)
except Exception, e:
raise ExtensionRunError(str(e))
self.__create_indices(cnn)
query = """select distinct f.id fid, f.file_name fname
from files f
where f.repository_id = ?
and not exists (select id from file_links where parent_id = f.id)"""
cursor.execute(statement(query, db.place_holder), (repo_id,))
write_cursor = cnn.cursor()
rs = cursor.fetchmany()
while rs:
types = []
for file_id, file_name in rs:
if file_id in files:
continue
type = guess_file_type(file_name)
types.append(DBFileType(None, type, file_id))
if types:
file_types = [(type.id, type.file_id, type.type) \
for type in types]
write_cursor.executemany(statement(DBFileType.__insert__,
self.db.place_holder),
file_types)
rs = cursor.fetchmany()
cnn.commit()
write_cursor.close()
cursor.close()
cnn.close()
def backout(self, repo, uri, db):
update_statement = """delete from file_types where
file_id in (select id from files f
where f.repository_id = ?)"""
self._do_backout(repo, uri, db, update_statement)
register_extension("FileTypes", FileTypes)
|
jacquesqiao/Paddle
|
python/paddle/fluid/tests/unittests/test_fake_quantize_op.py
|
Python
|
apache-2.0
| 1,823
| 0
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
#
|
limitations under the License.
import unittest
import numpy as np
from op_test import OpTest
class TestFakeQuantizeOp(OpTest):
def setUp(self):
self.op_type = "fake_quantize"
self.attrs = {
'bit_length': 8,
'quantiz
|
e_type': 'abs_max',
'window_size': 10000
}
self.inputs = {
'X': np.random.random((10, 10)).astype("float32"),
'InScales': np.zeros(self.attrs['window_size']).astype("float32"),
'InCurrentIter': np.zeros(1).astype("float32"),
'InMovingScale': np.zeros(1).astype("float32")
}
self.scale = {
'abs_max': np.max(np.abs(self.inputs['X'])).astype("float32")
}
self.outputs = {
'Out': np.round(self.inputs['X'] / self.scale['abs_max'] * (
(1 << (self.attrs['bit_length'] - 1)) - 1)),
'OutScales': np.zeros(self.attrs['window_size']).astype("float32"),
'OutMovingScale':
np.array([self.scale['abs_max']]).astype("float32"),
'OutCurrentIter': np.zeros(1).astype("float32")
}
def test_check_output(self):
self.check_output()
if __name__ == "__main__":
unittest.main()
|
Veeenz/Telegram-DMI-Bot
|
classes/StringParser.py
|
Python
|
gpl-3.0
| 328
| 0.036585
|
# -*- coding: utf-8 -*-
import re
class StringParser(object):
@staticmethod
def removeCFU(stringToParse):
updatedString = re.sub('\s?[0-9] CFU.*', '', stringToParse)
return updatedString
@stat
|
icmethod
def startsWithUpper(stringToPa
|
rse):
stringToParse = stringToParse[0].upper()+stringToParse[1:]
return stringToParse
|
luisfer85/newspaper2
|
newspaper2/newspaper2/news/migrations/0003_event_publish_date.py
|
Python
|
apache-2.0
| 520
| 0.001923
|
# -*- coding: utf-8 -*-
|
# Generated by Django 1.11.5 on 2017-09-22 14:42
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('news', '0002_event'),
]
operations = [
migrations.AddField(
model_name='event',
name='publish_date',
field=models.DateTimeField(default='2017-09-22 16:45', verbose_name='publish_date'),
preserve_default=False,
),
|
]
|
vorwerkc/pymatgen
|
pymatgen/electronic_structure/boltztrap.py
|
Python
|
mit
| 103,362
| 0.001887
|
"""
This module provides classes to run and analyze boltztrap on pymatgen band
structure objects. Boltztrap is a software interpolating band structures and
computing materials properties from this band structure using Boltzmann
semi-classical transport theory.
Boltztrap has been developed by Georg Madsen.
http://www.icams.de/content/research/software-development/boltztrap/
You need version 1.2.3 or higher
References are::
Madsen, G. K. H., and Singh, D. J. (2006).
BoltzTraP. A code for calculating band-structure dependent quantities.
Computer Physics Communications, 175, 67-71
"""
import logging
import math
import os
import subprocess
import tempfile
import time
import numpy as np
from monty.dev import requires
from monty.json import MSONable, jsanitize
from monty.os import cd
from monty.os.path import which
from scipy import constants
from scipy.spatial import distance
from pymatgen.core.lattice import Lattice
from pymatgen.core.units import Energy, Length
from pymatgen.electronic_structure.bandstructure import BandStructureSymmLine, Kpoint
from pymatgen.electronic_structure.core import Orbital
from pymatgen.electronic_structure.dos import CompleteDos, Dos, Spin
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.symmetry.bandstructure import HighSymmKpath
__author__ = "Geoffroy Hautier, Zachary Gibbs, Francesco Ricci, Anubhav Jain"
__copyright__ = "Copyright 2013, The Materials Project"
__version__ = "1.1"
__maintainer__ = "Geoffroy Hautier"
__email__ = "geoffroy@uclouvain.be"
__status__ = "Development"
__date__ = "August 23, 2013"
class BoltztrapRunner(MSONable):
"""
This class is used to run Boltztrap on a band structure object.
"""
@requires(
which("x_trans"),
"BoltztrapRunner requires the executables 'x_trans' to be in "
"the path. Please download the Boltztrap at http://"
"www.icams.de/content/research/software-development/boltztrap/ "
"and follow the instructions in the README to compile "
"Bolztrap accordingly. Then add x_trans to your path",
)
def __init__(
self,
bs,
nelec,
dos_type="HISTO",
energy_grid=0.005,
lpfac=10,
run_type="BOLTZ",
band_nb=None,
tauref=0,
tauexp=0,
tauen=0,
soc=False,
doping=None,
energy_span_around_fermi=1.5,
scissor=0.0,
kpt_line=None,
spin=None,
cond_band=False,
tmax=1300,
tgrid=50,
symprec=1e-3,
cb_cut=10,
timeout=7200,
):
"""
Args:
bs:
A band structure object
nelec:
the number of electrons
dos_type:
two options for the band structure integration: "HISTO"
(histogram) or "TETRA" using the tetrahedon method. TETRA
typically gives better results (especially for DOSes)
but takes more time
energy_grid:
the energy steps used for the integration (eV)
lpfac:
the number of interpolation points in the real space. By
default 10 gives 10 time more points in the real space than
the number of kpoints given in reciprocal space
run_type:
type of boltztrap usage. by default
- BOLTZ: (default) compute transport coefficients
- BANDS: interpolate all bands contained in the energy range
specified in energy_span_around_fermi variable, along specified
k-points
- DOS: compute total and partial dos (custom BoltzTraP code
needed!)
- FERMI: compute fermi surface or more correctly to
get certain bands interpolated
band_nb:
indicates a band number. Used for Fermi Surface interpolation
(run_type="FERMI")
spin:
specific spin component (1: up, -1: down) of the band selected
in FERMI mode (mandatory).
cond_band:
if a conduction band is specified in FERMI mode,
set this variable as True
tauref:
reference relaxation time. Only set to a value different than
zero if we want to model beyond the constant relaxation time.
tauexp:
exponent for the energy in the non-constant relaxation time
approach
tauen:
reference energy for the non-constant relaxation time approach
soc:
results from spin-orbit coupling (soc) computations give
typically non-polarized (no spin up or down) results but single
electron occupations. If the band structure comes from a soc
computation, you should set soc to True (default False)
doping:
the fixed doping levels you want to compute. Boltztrap provides
both transport values depending on electron chemical potential
(fermi energy) and for a series of fixed carrier
concentrations. By default, this is set to 1e16 to 1e22 in
increments of factors of 10.
energy_span_around_fermi:
usually the interpolation is not needed on the entire energy
range but on a specific range around the fermi level.
This energy gives this range in eV. by default it is 1.5 eV.
If DOS or BANDS type are selected, this range is automatically
set to cover the entire energy range.
scissor:
scissor to apply to the band gap (eV). This applies a scissor
operation moving the band edges without changing the band
shape. This is useful to correct the often underestimated band
gap in DFT. Default is 0.0 (no scissor)
kpt_line:
list of fractional coordinates of kpoints as arrays or list of
Kpoint objects for BANDS mode calculation (standard path of
high symmetry k-points is automatically set as default)
tmax:
Maximum temperature (K) for calculation (default=1300)
tgrid:
Temperature interval for calculation (default=50)
symprec: 1e-3 is the default in pymatgen. If the kmesh has been
generated using a different symprec, it has to be specified
to avoid a "factorization error" in BoltzTraP calculation.
If a kmesh that spans the whole Brillouin zone has been used,
or to disable all the symmetries, set symprec to None.
cb_cut: by default 10% of the highest conduction bands are
removed because they are often not accurate.
Tune cb_cut to change the percentage (0-100) of bands
that are removed.
timeout: overall time limit (in seconds): mainly to avoid infinite
loop when trying to find Fermi levels.
"""
self.lpfac = lpfac
self._bs = bs
self._nelec = nelec
self.dos_type = dos_type
self.energy_grid = energy_grid
|
self.error = []
self.run_type = run_type
self.band_nb = band_nb
self.spin = spin
self.cond_band = cond_band
self.tauref = tauref
self.tauexp = tauexp
self.tauen = tauen
self.soc = soc
self.kpt_line = kpt_line
self.cb_cut = cb_cut / 100.0
if isinstance(doping, list) and len(doping) > 0:
self.doping = doping
else:
self.doping = []
for d in [1e16, 1e17, 1e18, 1e1
|
9, 1e20, 1e21]:
self.doping.extend([1 * d, 2.5 * d, 5 * d, 7.5 * d])
self.doping.append(1e22)
self.energy_span_around_fermi = energy_span_around_fermi
self.scissor = scissor
self.tmax = tmax
self.tgrid = tgrid
self._sy
|
jnosal/seth
|
seth/tests/test_versioning.py
|
Python
|
mit
| 6,814
| 0.001761
|
from seth import versioning
from seth.tests import IntegrationTestBase
from seth.classy.rest import generics
class DefaultVersioningResource(generics.GenericApiView):
def get(self, **kwargs):
return {}
class NotShowVersionResource(generics.GenericApiView):
display_version = False
def get(self, **kwargs):
return {}
class BaseVersioningTestCase(IntegrationTestBase):
def extend_app_configuration(self, config):
config.include('seth')
config.register_resource(DefaultVersioningResource, '/test_basic')
config.register_resource(NotShowVersionResource, '/test_do_not_display_version')
def test_default_setup(self):
r = self.app.get('/test_basic')
self.assertEqual(r.status_int, 200)
self.assertIn('API-Version', r.headers.keys())
self.assertEqual(r.headers['API-Version'], '1.0')
def test_do_not_display_version(self):
r = self.app.get('/test_do_not_display_version')
self.assertEqual(r.status_int, 200)
self.assertNotIn('API-Version', r.headers.keys())
class CustomVersioningPoliciesTestCase(IntegrationTestBase):
def extend_app_configuration(self, config):
config.include('seth')
class NoGetVersionInfoPolicy(versioning.BaseVersioningPolicy):
default_version = '2.0'
class NoGetVersionInfonResource(generics.GenericApiView):
versioning_policy = NoGetVersionInfoPolicy
def get(self, **kwargs):
return {}
config.register_resource(NoGetVersionInfonResource, '/test_no_get_version_info')
class AnotherVersionPolicy(versioning.BaseVersioningPolicy):
default_version = '2.0'
def get_version_info(self, request, *args, **kwargs):
return '2.0'
class AnotherVersionResource(generics.GenericApiView):
versioning_policy = AnotherVersionPolicy
def get(self, **kwargs):
return {}
config.register_resource(AnotherVersionResource, '/test_another_version')
class PredefineVersionPolicy(versioning.BaseVersioningPolicy):
default_version = None
def get_default_version(self, request):
return '666'
def get_version_info(self, request, *args, **kwargs):
return '666'
class PredefineVersionResource(generics.GenericApiView):
versioning_policy = PredefineVersionPolicy
def get(self, **kwargs):
return {}
config.register_resource(PredefineVersionResource, '/test_predefine')
def test_raises_NotImplementedError_if_get_version_info_is_not_provided(self):
self.assertRaises(NotImplementedError, lambda: self.app.get('/test_no_get_version_info'))
def test_another_version_set(self):
r = self.app.get('/test_another_version')
self.assertEqual(r.status_int, 200)
self.assertIn('API-Version', r.headers.keys())
self.assertEqual(r.headers['API-Version'], '2.0')
def test_predefine_version(self):
r = self.app.get('/test_predefine')
self.assertEqual(r.status_int, 200)
self.assertIn('API-Version', r.headers.keys())
self.assertEqual(r.headers['API-Version'], '666')
class CheckParamsVersionPolicy(IntegrationTestBase):
def extend_app_configuration(self, config):
config.include('seth')
class CheckQueryParamsResource(generics.GenericApiView):
versioning_policy = versioning.CheckQueryParamsVersioningPolicy
def get(self, **kwargs):
return {}
config.register_resource(CheckQueryParamsResource, '/test_query_par
|
ams')
class AllowVersionOnePolicy(versioning.CheckQueryParamsVersioningPolicy):
default_version = '22.0'
def get_allowed_version(self):
return ['5.0']
class CheckQueryParamsResourceSecond(generics.GenericApiView):
versioning_policy = AllowVersionOnePolicy
def g
|
et(self, **kwargs):
return {}
config.register_resource(CheckQueryParamsResourceSecond, '/test_allow_version')
def test_no_version_in_query_params_all_versions_allowed(self):
r = self.app.get('/test_query_params')
self.assertEqual(r.status_int, 200)
def test_wrong_version_in_query_params_all_versions_allowed(self):
r = self.app.get('/test_query_params?version=2.0')
self.assertEqual(r.status_int, 200)
def test_correct_version_in_query_params_all_versions_allowed(self):
r = self.app.get('/test_query_params?version=1.0')
self.assertEqual(r.status_int, 200)
def test_allow_default_version(self):
r = self.app.get('/test_allow_version?version=22.0')
self.assertEqual(r.status_int, 200)
def test_allowed_versions(self):
r = self.app.get('/test_allow_version?version=5.0')
self.assertEqual(r.status_int, 200)
def test_wrong_version_in_query_params_allowed_are_set(self):
r = self.app.get('/test_allow_version?version=1.0', expect_errors=True)
self.assertEqual(r.status_int, 404)
def test_no_version_in_query_params_allowed_are_set(self):
r = self.app.get('/test_allow_version', expect_errors=True)
self.assertEqual(r.status_int, 404)
class CheckHeaderVersionPolicy(IntegrationTestBase):
def extend_app_configuration(self, config):
config.include('seth')
class AllowVersionOnePolicy(versioning.CheckHeaderVersioningPolicy):
default_version = '22.0'
def get_allowed_version(self):
return ['5.0']
class CheckQueryParamsResourceSecond(generics.GenericApiView):
versioning_policy = AllowVersionOnePolicy
def get(self, **kwargs):
return {}
config.register_resource(CheckQueryParamsResourceSecond, '/test_allow_header')
def test_allow_default_version(self):
r = self.app.get('/test_allow_header', headers={'Api-Version': '22.0'})
self.assertEqual(r.status_int, 200)
def test_allowed_versions(self):
r = self.app.get('/test_allow_header', headers={'Api-Version': '5.0'})
self.assertEqual(r.status_int, 200)
def test_wrong_version_in_headers(self):
r = self.app.get('/test_allow_header', headers={'Api-Version': '666.0'}, expect_errors=True)
self.assertEqual(r.status_int, 404)
def test_no_header_in_request(self):
r = self.app.get('/test_allow_header', expect_errors=True)
self.assertEqual(r.status_int, 404)
def test_wrong_header_set(self):
r = self.app.get('/test_allow_header', headers={'Api-WRONG': '22.0'}, expect_errors=True)
self.assertEqual(r.status_int, 404)
|
SickGear/SickGear
|
lib/soupsieve_py3/css_types.py
|
Python
|
gpl-3.0
| 8,916
| 0.001682
|
"""CSS selector structure items."""
import copyreg
from collections.abc import Hashable, Mapping
__all__ = (
'Selector',
'SelectorNull',
'SelectorTag',
'SelectorAttribute',
'SelectorContains',
'SelectorNth',
'SelectorLang',
'SelectorList',
'Namespaces',
'CustomSelectors'
)
SEL_EMPTY = 0x1
SEL_ROOT = 0x2
SEL_DEFAULT = 0x4
SEL_INDETERMINATE = 0x8
SEL_SCOPE = 0x10
SEL_DIR_LTR = 0x20
SEL_DIR_RTL = 0x40
SEL_IN_RANGE = 0x80
SEL_OUT_OF_RANGE = 0x100
SEL_DEFINED = 0x200
SEL_PLACEHOLDER_SHOWN = 0x400
class Immutable(object):
"""Immutable."""
__slots__ = ('_hash',)
def __init__(self, **kwargs):
"""Initialize."""
temp = []
for k, v in kwargs.items():
temp.append(type(v))
temp.append(v)
super(Immutable, self).__setattr__(k, v)
super(Immutable, self).__setattr__('_hash', hash(tuple(temp)))
@classmethod
def __base__(cls):
"""Get base class."""
return cls
def __eq__(self, other):
"""Equal."""
return (
isinstance(other, self.__base__()) and
all([getattr(other, key) == getattr(self, key) for key in self.__slots__ if key != '_hash'])
)
def __ne__(self, other):
"""Equal."""
return (
not isinstance(other, self.__base__()) or
any([getattr(other, key) != getattr(self, key) for key in self.__slots__ if key != '_hash'])
)
def __hash__(self):
"""Hash."""
return self._hash
def __setattr__(self, name, value):
"""Prevent mutability."""
raise AttributeError("'{}' is immutable".format(self.__class__.__name__))
def __repr__(self): # pragma: no cover
"""Representation."""
return "{}({})".format(
self.__base__(), ', '.join(["{}={!r}".format(k, getattr(self, k)) for k in self.__slots__[:-1]])
)
__str__ = __repr__
class ImmutableDict(Mapping):
"""Hashable, immutable dictionary."""
def __init__(self, *args, **kwargs):
"""Initialize."""
arg = args[0] if args else kwargs
is_dict = isinstance(arg, dict)
if (
is_dict and not all([isinstance(v, Hashable) for v in arg.values()]) or
not is_dict and not all([isinstance(k, Hashable) and isinstance(v, Hashable) for k, v in arg])
):
raise TypeError('All values must be hashable')
self._d = dict(*args, **kwargs)
self._hash = hash(tuple([(type(x), x, type(y), y) for x, y in sorted(self._d.items())]))
def __iter__(self):
"""Iterator."""
return iter(self._d)
def __len__(self):
"""Length."""
return len(self._d)
def __getitem__(self, key):
"""Get item: `namespace['key']`."""
return self._d[key]
def __hash__(self):
"""Hash."""
return self._hash
def __repr__(self): # pragma: no cover
"""Representation."""
return "{!r}".format(self._d)
__str__ = __repr__
class Namespaces(ImmutableDict):
"""Namespaces."""
def __init__(self, *args, **kwargs):
"""Initialize."""
# If there are arguments, check the first index.
# `super` should fail if the user gave multiple arguments,
# so don't bother checking that.
arg = args[0] if args else kwargs
is_dict = isinstance(arg, dict)
if is_dict and not all([isinstance(k, str) and isinstance(v, str) for k, v in arg.items()]):
raise TypeError('Namespace keys and values must be Unicode strings')
elif not is_dict and not all([isinstance(k, str) and isinstance(v, str) for k, v in arg]):
raise TypeError('Namespace keys and values must be Unicode strings')
super(Namespaces, self).__init__(*args, **kwargs)
class CustomSelectors(ImmutableDict):
"""Custom selectors."""
def __init__(self, *args, **kwargs):
"""Initialize."""
# If there are arguments, check the first index.
# `super` should fail if the user gave multiple arguments,
# so don't bother checking that.
arg = args[0] if args else kwargs
is_dict = isinstance(arg, dict)
if is_dict and not all([isinstance(k, str) and isinstance(v, str) for k, v in arg.items()]):
raise TypeError('CustomSelectors keys and values must be Unicode strings
|
')
elif not is_dict and not all([isinstance(k, str) and isinstance(v, str) for k, v in arg]):
raise Type
|
Error('CustomSelectors keys and values must be Unicode strings')
super(CustomSelectors, self).__init__(*args, **kwargs)
class Selector(Immutable):
"""Selector."""
__slots__ = (
'tag', 'ids', 'classes', 'attributes', 'nth', 'selectors',
'relation', 'rel_type', 'contains', 'lang', 'flags', '_hash'
)
def __init__(
self, tag, ids, classes, attributes, nth, selectors,
relation, rel_type, contains, lang, flags
):
"""Initialize."""
super(Selector, self).__init__(
tag=tag,
ids=ids,
classes=classes,
attributes=attributes,
nth=nth,
selectors=selectors,
relation=relation,
rel_type=rel_type,
contains=contains,
lang=lang,
flags=flags
)
class SelectorNull(Immutable):
"""Null Selector."""
def __init__(self):
"""Initialize."""
super(SelectorNull, self).__init__()
class SelectorTag(Immutable):
"""Selector tag."""
__slots__ = ("name", "prefix", "_hash")
def __init__(self, name, prefix):
"""Initialize."""
super(SelectorTag, self).__init__(
name=name,
prefix=prefix
)
class SelectorAttribute(Immutable):
"""Selector attribute rule."""
__slots__ = ("attribute", "prefix", "pattern", "xml_type_pattern", "_hash")
def __init__(self, attribute, prefix, pattern, xml_type_pattern):
"""Initialize."""
super(SelectorAttribute, self).__init__(
attribute=attribute,
prefix=prefix,
pattern=pattern,
xml_type_pattern=xml_type_pattern
)
class SelectorContains(Immutable):
"""Selector contains rule."""
__slots__ = ("text", "_hash")
def __init__(self, text):
"""Initialize."""
super(SelectorContains, self).__init__(
text=text
)
class SelectorNth(Immutable):
"""Selector nth type."""
__slots__ = ("a", "n", "b", "of_type", "last", "selectors", "_hash")
def __init__(self, a, n, b, of_type, last, selectors):
"""Initialize."""
super(SelectorNth, self).__init__(
a=a,
n=n,
b=b,
of_type=of_type,
last=last,
selectors=selectors
)
class SelectorLang(Immutable):
"""Selector language rules."""
__slots__ = ("languages", "_hash",)
def __init__(self, languages):
"""Initialize."""
super(SelectorLang, self).__init__(
languages=tuple(languages)
)
def __iter__(self):
"""Iterator."""
return iter(self.languages)
def __len__(self): # pragma: no cover
"""Length."""
return len(self.languages)
def __getitem__(self, index): # pragma: no cover
"""Get item."""
return self.languages[index]
class SelectorList(Immutable):
"""Selector list."""
__slots__ = ("selectors", "is_not", "is_html", "_hash")
def __init__(self, selectors=tuple(), is_not=False, is_html=False):
"""Initialize."""
super(SelectorList, self).__init__(
selectors=tuple(selectors),
is_not=is_not,
is_html=is_html
)
|
danbob123/gplearn
|
gplearn/skutils/tests/test_validation.py
|
Python
|
bsd-3-clause
| 14,136
| 0.000283
|
"""Tests for input validation functions"""
import warnings
from tempfile import NamedTemporaryFile
from itertools import product
import numpy as np
from numpy.testing import assert_array_equal, assert_warns
import scipy.sparse as sp
from nose.tools import assert_raises, assert_true, assert_false, assert_equal
from gplearn.skutils.testing import assert_raises_regexp
from gplearn.skutils import as_float_array, check_array, check_symmetric
from gplearn.skutils import check_X_y
from gplearn.skutils.estimator_checks import NotAnArray
from sklearn.random_projection import sparse_random_matrix
from sklearn.linear_model import ARDRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.svm import SVR
from sklearn.datasets import make_blobs
from gplearn.skutils.validation import (
NotFittedError,
has_fit_parameter,
check_is_fitted,
check_consistent_length)
from gplearn.skutils.testing import assert_raise_message
def test_as_float_array():
# Test function for as_float_array
X = np.ones((3, 10), dtype=np.int32)
X = X + np.arange(10, dtype=np.int32)
# Checks that the return type is ok
X2 = as_float_array(X, copy=False)
np.testing.assert_equal(X2.dtype, np.float32)
# Another test
X = X.astype(np.int64)
X2 = as_float_array(X, copy=True)
# Checking that the array wasn't overwritten
assert_true(as_float_array(X, False) is not X)
# Checking that the new type is ok
np.testing.assert_equal(X2.dtype, np.float64)
# Here, X is of the right type, it shouldn't be modified
X = np.ones((3, 2), dtype=np.float32)
assert_true(as_float_array(X, copy=False) is X)
# Test that if X is fortran ordered it stays
X = np.asfortranarray(X)
assert_true(np.isfortran(as_float_array(X, copy=True)))
# Test the copy parameter with some matrices
matrices = [
np.matrix(np.arange(5)),
sp.csc_matrix(np.arange(5)).toarray(),
sparse_random_matrix(10, 10, density=0.10).toarray()
]
for M in matrices:
N = as_float_array(M, copy=True)
N[0, 0] = np.nan
assert_false(np.isnan(M).any())
def test_np_matrix():
# Confirm that input validation code does not return np.matrix
X = np.arange(12).reshape(3, 4)
assert_false(isinstance(as_float_array(X), np.matrix))
assert_false(isinstance(as_float_array(np.matrix(X)), np.matrix))
assert_false(isinstance(as_float_array(sp.csc_matrix(X)), np.matrix))
def test_memmap():
# Confirm that input validation code doesn't copy memory mapped arrays
asflt = lambda x: as_float_array(x, copy=False)
with NamedTemporaryFile(prefix='sklearn-test') as tmp:
M = np.memmap(tmp, shape=100, dtype=np.float32)
M[:] = 0
for f in (check_array, np.asarray, asflt):
X = f(M)
X[:] = 1
assert_array_equal(X.ravel(), M)
X[:] = 0
def test_ordering():
# Check that ordering is enforced correctly by validation utilities.
# We need to check each validation utility, because a 'copy' without
# 'order=K' will kill the ordering.
X = np.ones((10, 5))
for A in X, X.T:
for copy in (True, False):
B = check_array(A, order='C', copy=copy)
assert_true(B.flags['C_CONTIGUOUS'])
B = check_array(A, order='F', copy=copy)
assert_true(B.flags['F_CONTIGUOUS'])
if copy:
assert_false(A is B)
X = sp.csr_matrix(X)
X.data = X.data[::-1]
assert_false(X.data.flags['C_CONTIGUOUS'])
for copy in (True, False):
Y = check_array(X, accept_sparse='csr', copy=copy, order='C')
assert_true(Y.data.flags['C_CONTIGUOUS'])
def test_check_array():
# accept_sparse == None
# raise error on sparse inputs
X = [[1, 2], [3, 4]]
X_csr = sp.csr_matrix(X)
assert_raises(TypeError, check_array, X_csr)
# ensure_2d
X_array = check_array([0, 1, 2])
assert_equal(X_array.ndim, 2)
X_array = check_array([0, 1, 2], ensure_2d=False)
assert_equal(X_array.ndim, 1)
# don't allow ndim > 3
X_ndim = np.arange(8).reshape(2, 2, 2)
assert_raises(ValueError, check_array, X_n
|
dim)
check_array(X_ndim, allow_nd=True) # doesn't raise
# force_all_finite
X_inf = np.arange(4).reshape(2, 2).astype(np.float)
X_inf[0, 0] = np.inf
assert_raises(ValueError, check_array, X_inf)
check_array(X_inf, force_all_finite=False) # no raise
# nan che
|
ck
X_nan = np.arange(4).reshape(2, 2).astype(np.float)
X_nan[0, 0] = np.nan
assert_raises(ValueError, check_array, X_nan)
check_array(X_inf, force_all_finite=False) # no raise
# dtype and order enforcement.
X_C = np.arange(4).reshape(2, 2).copy("C")
X_F = X_C.copy("F")
X_int = X_C.astype(np.int)
X_float = X_C.astype(np.float)
Xs = [X_C, X_F, X_int, X_float]
dtypes = [np.int32, np.int, np.float, np.float32, None, np.bool, object]
orders = ['C', 'F', None]
copys = [True, False]
for X, dtype, order, copy in product(Xs, dtypes, orders, copys):
X_checked = check_array(X, dtype=dtype, order=order, copy=copy)
if dtype is not None:
assert_equal(X_checked.dtype, dtype)
else:
assert_equal(X_checked.dtype, X.dtype)
if order == 'C':
assert_true(X_checked.flags['C_CONTIGUOUS'])
assert_false(X_checked.flags['F_CONTIGUOUS'])
elif order == 'F':
assert_true(X_checked.flags['F_CONTIGUOUS'])
assert_false(X_checked.flags['C_CONTIGUOUS'])
if copy:
assert_false(X is X_checked)
else:
# doesn't copy if it was already good
if (X.dtype == X_checked.dtype and
X_checked.flags['C_CONTIGUOUS'] == X.flags['C_CONTIGUOUS']
and X_checked.flags['F_CONTIGUOUS'] == X.flags['F_CONTIGUOUS']):
assert_true(X is X_checked)
# allowed sparse != None
X_csc = sp.csc_matrix(X_C)
X_coo = X_csc.tocoo()
X_dok = X_csc.todok()
X_int = X_csc.astype(np.int)
X_float = X_csc.astype(np.float)
Xs = [X_csc, X_coo, X_dok, X_int, X_float]
accept_sparses = [['csr', 'coo'], ['coo', 'dok']]
for X, dtype, accept_sparse, copy in product(Xs, dtypes, accept_sparses,
copys):
with warnings.catch_warnings(record=True) as w:
X_checked = check_array(X, dtype=dtype,
accept_sparse=accept_sparse, copy=copy)
if (dtype is object or sp.isspmatrix_dok(X)) and len(w):
message = str(w[0].message)
messages = ["object dtype is not supported by sparse matrices",
"Can't check dok sparse matrix for nan or inf."]
assert_true(message in messages)
else:
assert_equal(len(w), 0)
if dtype is not None:
assert_equal(X_checked.dtype, dtype)
else:
assert_equal(X_checked.dtype, X.dtype)
if X.format in accept_sparse:
# no change if allowed
assert_equal(X.format, X_checked.format)
else:
# got converted
assert_equal(X_checked.format, accept_sparse[0])
if copy:
assert_false(X is X_checked)
else:
# doesn't copy if it was already good
if (X.dtype == X_checked.dtype and X.format == X_checked.format):
assert_true(X is X_checked)
# other input formats
# convert lists to arrays
X_dense = check_array([[1, 2], [3, 4]])
assert_true(isinstance(X_dense, np.ndarray))
# raise on too deep lists
assert_raises(ValueError, check_array, X_ndim.tolist())
check_array(X_ndim.tolist(), allow_nd=True) # doesn't raise
# convert weird stuff to arrays
X_no_array = NotAnArray(X_dense)
result = check_array(X_no_array)
assert_true(isinstance(result, np.ndarray))
def test_check_array_min_samples_and_features_messages():
# empty list is considered 2D by default:
msg = "0 feature
|
mywulin/functest
|
functest/tests/unit/cli/test_cli_base.py
|
Python
|
apache-2.0
| 3,933
| 0
|
#!/usr/bin/env python
# Copyright (c) 2016 Orange and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
# pylint: disable=missing-docstring
import logging
import os
import unittest
import mock
from click.testing import CliRunner
with mock.patch('functest.cli.commands.cli_testcase.CliTestcase.__init__',
mock.Mock(return_value=None)), \
mock.patch('functest.cli.commands.cli_tier.CliTier.__init__',
mock.Mock(return_value=None)):
os.environ['OS_AUTH_URL'] = ''
from functest.cli import cli_base
class CliBaseTesting(unittest.TestCase):
def setUp(self):
self.runner = CliRunner()
self._openstack = cli_base.OPENSTACK
self._env = cli_base.ENV
self._testcase = cli_base.TESTCASE
self._tier = cli_base.TIER
def test_os_check(self):
with mock.patch.object(self._openstack, 'check') as mock_method:
result = self.runner.invoke(cli_base.os_check)
self.assertEqual(result.exit_code, 0)
self.assertTrue(mock_method.called)
def test_os_show_credentials(self):
with mock.patch.object(self._openstack, 'show_credentials') \
as mock_method:
result = self.runner.invoke(cli_base.os_show_credentials)
self.assertEqual(result.exit_code, 0)
self.assertTrue(mock_method.called)
def test_env_show(self):
with mock.patch.object(self._env, 'show') as mock_method:
result = self.runner.invoke(cli_base.env_show)
self.assertEqual(result.exit_code, 0)
self.assertTrue(mock_method.called)
def test_testcase_list(self):
with mock.patch.object(self._testcase, 'list') as mock_method:
result = self.runner.invoke(cli_base.testcase_list)
self.assertEqual(result.exit_code, 0)
self.assertTrue(mock_method.called)
def test_testcase_show(self):
with mock.patch.object(self._testcase, 'show') as mock_method:
result = self.runner.invoke(cli_base.testcase_show, ['testname'])
self.assertEqual(result.exit_code, 0)
self.assertTrue(mock_method.called)
def test_testcase_run(self):
with mock.patch.object(self._testcase, 'run') as mock_method:
result = self.runner.invoke(cli_base.testcase_run,
['testname', '--noclean'])
sel
|
f.assertEqual(result.exit_code, 0)
self.assertTrue(mock_method.called)
def test_tier_list(self):
with mock.patch.object(self._tie
|
r, 'list') as mock_method:
result = self.runner.invoke(cli_base.tier_list)
self.assertEqual(result.exit_code, 0)
self.assertTrue(mock_method.called)
def test_tier_show(self):
with mock.patch.object(self._tier, 'show') as mock_method:
result = self.runner.invoke(cli_base.tier_show, ['tiername'])
self.assertEqual(result.exit_code, 0)
self.assertTrue(mock_method.called)
def test_tier_gettests(self):
with mock.patch.object(self._tier, 'gettests') as mock_method:
result = self.runner.invoke(cli_base.tier_gettests, ['tiername'])
self.assertEqual(result.exit_code, 0)
self.assertTrue(mock_method.called)
def test_tier_run(self):
with mock.patch.object(self._tier, 'run') as mock_method:
result = self.runner.invoke(cli_base.tier_run,
['tiername', '--noclean'])
self.assertEqual(result.exit_code, 0)
self.assertTrue(mock_method.called)
if __name__ == "__main__":
logging.disable(logging.CRITICAL)
unittest.main(verbosity=2)
|
buguen/pylayers
|
pylayers/antprop/examples/ex_antenna5.py
|
Python
|
lgpl-3.0
| 2,472
| 0.029126
|
from pylayers.antprop.antenna import *
from pylayers.antprop.antvsh import *
import matplotlib.pylab as plt
from numpy import *
import pdb
"""
This test :
1 : loads a measured antenna
2 : applies an electrical delay obtained from data with getdelay method
3 : eva
|
luate the antenna vsh coefficient with a downsampling factor of 2
4 : evaluates the relative error of reconstruction (vsh3) for various values of order l
5 : display the results
"""
filename = 'S1R1.mat'
A = Antenna(filename,
|
'ant/UWBAN/Matfile')
B = Antenna(filename,'ant/UWBAN/Matfile')
#plot(freq,angle(A.Ftheta[:,maxPowerInd[1],maxPowerInd[2]]*exp(2j*pi*freq.reshape(len(freq))*electricalDelay)))
freq = A.fa.reshape(104,1,1)
delayCandidates = arange(-10,10,0.001)
electricalDelay = A.getdelay(freq,delayCandidates)
disp('Electrical Delay = ' + str(electricalDelay)+' ns')
A.Ftheta = A.Ftheta*exp(2*1j*pi*freq*electricalDelay)
B.Ftheta = B.Ftheta*exp(2*1j*pi*freq*electricalDelay)
A.Fphi = A.Fphi*exp(2*1j*pi*freq*electricalDelay)
B.Fphi = B.Fphi*exp(2*1j*pi*freq*electricalDelay)
dsf = 2
A = vsh(A,dsf)
B = vsh(B,dsf)
tn = []
tet = []
tep = []
te = []
tmse = []
l = 20
A.C.s1tos2(l)
B.C.s1tos2(l)
u = np.shape(A.C.Br.s2)
Nf = u[0]
Nk = u[1]
tr = np.arange(2,Nk)
A.C.s2tos3_new(Nk)
B.C.s2tos3(1e-6)
UA = np.sum(A.C.Cr.s3*np.conj(A.C.Cr.s3),axis=0)
UB = np.sum(B.C.Cr.s3*np.conj(B.C.Cr.s3),axis=0)
ua = A.C.Cr.ind3
ub = B.C.Cr.ind3
da ={}
db ={}
for k in range(Nk):
da[str(ua[k])]=UA[k]
db[str(ub[k])]=UB[k]
tu = []
for t in sort(da.keys()):
tu.append(da[t] - db[t])
errelTha,errelPha,errela = A.errel(l,20,dsf,typ='s3')
errelThb,errelPhb,errelb = B.errel(l,20,dsf,typ='s3')
print "a: nok",errela,errelPha,errelTha
print "b: ok ",errelb,errelPhb,errelThb
for r in tr:
E = A.C.s2tos3_new(r)
errelTh,errelPh,errel = A.errel(l,20,dsf,typ='s3')
print 'r : ',r,errel,E
tet.append(errelTh)
tep.append(errelPh)
te.append(errel)
#
line1 = plt.plot(array(tr),10*log10(array(tep)),'b')
line2 = plt.plot(array(tr),10*log10(array(tet)),'r')
line3 = plt.plot(array(tr),10*log10(array(te)),'g')
#
plt.xlabel('order l')
plt.ylabel(u'$\epsilon_{rel}$ (dB)',fontsize=18)
plt.title('Evolution of reconstruction relative error wrt order')
plt.legend((u'$\epsilon_{rel}^{\phi}$',u'$\epsilon_{rel}^{\\theta}$',u'$\epsilon_{rel}^{total}$'))
plt.legend((line1,line2,line3),('a','b','c'))
plt.show()
plt.legend(('errel_phi','errel_theta','errel'))
|
Kingdread/qutebrowser
|
qutebrowser/browser/tabhistory.py
|
Python
|
gpl-3.0
| 5,900
| 0.003559
|
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2015 Florian Bruhin (The Compiler) <mail@qutebrowser.org>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Utilities related to QWebHistory."""
from PyQt5.QtCore import QByteArray, QDataStream, QIODevice, QUrl
from qutebrowser.utils import utils, qtutils
HISTORY_STREAM_VERSION = 2
BACK_FORWARD_TREE_VERSION = 2
class TabHistoryItem:
"""A single item in the tab history.
Attributes:
url: The QUrl of this item.
original_url: The QUrl of this item which was originally requested.
title: The title as string of this item.
active: Whether this item is the item currently navigated to.
user_data: The user data for this item.
"""
def __init__(self, url, title, *, original_url=None, active=False,
user_data=None):
self.url = url
if original_url is None:
self.original_url = url
else:
self.original_url = original_url
self.title = title
self.active = active
self.user_data = user_data
def __repr__(self):
return utils.get_repr(self, constructor=True, url=self.url,
original_url=self.original_url, title=self.title,
active=self.active, user_data=self.user_data)
def _encode_url(url):
"""Encode an QUrl suitable to pass to QWebHistory."""
data = bytes(QUrl.toPercentEncoding(url.toString(), b':/#?&+=@%*'))
return data.decode('ascii')
def _serialize_item(i, item, stream):
"""Serialize a single WebHistoryItem into a QDataStream.
Args:
i: The index of the current item.
item: The WebHistoryItem to write.
stream: The QDataStream to write to.
"""
### Source/WebCore/history/qt/HistoryItemQt.cpp restoreState
## urlString
stream.writeQString(_encode_url(item.url))
## title
stream.writeQString(item.title)
## originalURLString
stream.writeQString(_encode_url(item.original_url))
### Source/WebCore/history/HistoryItem.cpp decodeBackForwardTree
## backForwardTreeEncodingVersion
stream.writeUInt32(BACK_FORWARD_TREE_VERSION)
## size (recursion stack)
stream.writeUInt64(0)
## node->m_documentSequenceNumber
# If two HistoryItems have the same document sequence number, then they
# refer to the same instance of a document. Traversing history from one
# such HistoryItem to another preserves the document.
stream.writeInt64(i + 1)
## size (node->m_documentState)
stream.writeUInt64(0)
## node->m_formContentType
# info used to repost form data
stream.writeQString(None)
## hasFormData
stream.writeBool(False)
## node->m_itemSequenceNumber
# If two HistoryItems have the same item sequence number, then they are
# clones of one another. Traversing history from one such HistoryItem to
# another is a no-op. HistoryItem clones are created for parent and
# sibling frames when only a subframe navigates.
stream.writeInt64(i + 1)
## node->m_referrer
stream.writeQString(None)
## node->m_scrollPoint (x)
try:
stream.writeInt32(item.user_data['scroll-pos'].x())
except (KeyError, TypeError):
stream.writeInt32(0)
## node->m_scrollPoint (y)
try:
stream.writeInt32(item.user_data['scroll-pos'].y())
except (KeyError, TypeError):
stream.writeInt32(0)
## node->m_pageScaleFactor
stream.writeFloat(1)
## hasStateObject
# Support for HTML5 History
stream.writeBool(False)
## node->m_target
stream.writeQString(None)
### Source/WebCore/history/qt/HistoryItemQt.cpp restoreState
## validUserData
# We could restore the user data here, but we prefer to use the
# QWebHistoryItem API for that.
stream.writeBool(False)
def serialize(items):
"""Serialize a list of QWebHistoryItems to a data stream.
Args:
items: An iterable of WebHistoryItems.
Return:
A (stream, data, user_data) tuple.
stream: The reseted QDataStream.
data: The QByteArray with the raw data.
user_data: A list with each item's user data.
Warning:
If 'data' goes out of scope, reading from 'stream' will result in a
segfault!
"""
data = QByteArray()
stream = QDataStream(data, QIODevice.ReadWrite)
user_data = []
current_idx = None
for i, item in
|
enumerate(items):
|
if item.active:
if current_idx is not None:
raise ValueError("Multiple active items ({} and {}) "
"found!".format(current_idx, i))
else:
current_idx = i
if items:
if current_idx is None:
raise ValueError("No active item found!")
else:
current_idx = 0
### Source/WebKit/qt/Api/qwebhistory.cpp operator<<
stream.writeInt(HISTORY_STREAM_VERSION)
stream.writeInt(len(items))
stream.writeInt(current_idx)
for i, item in enumerate(items):
_serialize_item(i, item, stream)
user_data.append(item.user_data)
stream.device().reset()
qtutils.check_qdatastream(stream)
return stream, data, user_data
|
sdpython/pyquickhelper
|
_unittests/ut_pycode/test_venv_helper.py
|
Python
|
mit
| 902
| 0
|
"""
@brief test tree node (time=50s)
"""
import sys
import os
import unittest
from pyquickhelper.loghelper import fLOG
from pyquickhelper.pycode import get_temp_folder, ExtTestCase
from pyquickhelper.pycode.venv_helper import create_virtual_env
class TestVenvHelper(ExtTestCase):
def test_venv_empty(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
if __name__ != "__main__":
# does not accept virtual environment
return
temp =
|
get_temp_folder(__file__, "temp_venv_empty")
out = create_virtual_env(temp, fLOG=fLOG)
fLOG("-----")
fLOG(out)
fLOG("-----")
pyt = os.path.join(temp, "Scripts")
self.assertExists(pyt)
lo = os.listdir(pyt)
self.assertNotEmpty(lo)
|
if __name__ == "__main__":
unittest.main()
|
mprochnow/mpdav
|
mpdav/file_backend.py
|
Python
|
gpl-3.0
| 12,064
| 0.001078
|
# coding: utf-8
#
# This file is part of mpdav.
#
# mpdav is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# mpdav is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with mpdav. If not, see <http://www.gnu.org/licenses/>.
import md5
import mimetypes
import os.path
import shutil
import time
import multi_status
import response
import status
BLOCK_SIZE = 8192 # just an assumption
def epoch2iso8601(ts):
t = time.localtime(ts)
tz = (time.altzone if t.tm_isdst else time.timezone) / 3600 * -1
return time.strftime("%Y-%m-%dT%H:%M:%S", t) + "%+.02d:00" % tz
def epoch2iso1123(ts):
return time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime(ts))
class FileIterator(object):
def __init__(self, filename):
self.filename = filename
def __iter__(self):
remaining = os.stat(self.filename).st_size
f = open(self.filename, "rb")
while remaining > 0:
r = min(remaining, BLOCK_SIZE)
yield f.read(r)
remaining -= r
f.close()
class FileBackend(object):
def __init__(self, root, show_hidden=False, base_path="/"):
self.root = os.path.abspath(root)
self.show_hidden = show_hidden
self.base_path = base_path.rstrip("/")
def propfind(self, path, depth, request_xml):
# TODO implement support for allprop
paths = self._build_paths(path, depth)
return multi_status.MultiStatus(self._get_properties(paths, request_xml))
def _build_paths(self, path, depth):
path = path.strip("/")
path = os.path.abspath(os.path.join(self.root, path))
if path.startswith(self.root) and os.path.exists(path):
paths = [path]
if os.path.isdir(path) and depth == 1:
for p in os.listdir(path):
if self._show(p):
paths.append(os.path.join(path, p))
for i, p in enumerate(paths):
if os.path.isdir(p) and p[:-1] != "/":
paths[i] = p + "/"
return paths
raise IOError
def _show(self, filename):
return self.show_hidden or not filename.startswith(".")
def _get_properties(self, paths, request_xml):
result = []
for p in paths:
prop_stat = multi_status.PropStat(status.OK)
try:
st = os.stat(p)
fs_st = os.statvfs(p.encode("utf-8"))
except:
continue
name = self._build_displayname(p)
is_dir = os.path.isdir(p)
for property_ in request_xml.find("{DAV:}propfind", "{DAV:}prop"):
if property_ == "{DAV:}resourcetype":
prop_stat.add_resourcetype(is_dir)
elif property_ == "{DAV:}creationdate":
prop_stat.add_creationdate(epoch2iso8601(st.st_ctime))
elif property_ == "{DAV:}displayname":
prop_stat.add_displayname(name)
elif property_ == "{DAV:}getcontentlength":
if not is_dir:
prop_stat.add_getcontentlength(st.st_size)
elif property_ == "{DAV:}getcontenttype":
if not is_dir:
ct = mimetypes.guess_type(p)[0] or "application/octet-stream"
prop_stat.add_getcontenttype(ct)
elif property_ == "{DAV:}getetag":
prop_stat.add_getetag(md5.new("%s%s" % (name.encode("utf-8"), st.st_mtime)).hexdigest())
elif property_ == "{DAV:}getlastmodified":
prop_stat.add_getlastmodified(epoch2iso1123(st.st_mtime))
elif property_ == "{DAV:}quota-available-bytes":
prop_stat.add_quota_available_bytes(fs_st.f_bavail * fs_st.f_frsize)
elif property_ == "{DAV:}quota-used-bytes":
prop_stat.add_quota_used_bytes((fs_st.f_blocks - fs_st.f_bavail) * fs_st.f_frsize)
else:
print "Request for not supported property %s" % property_
href = self.base_path + p[len(self.root):]
result.append(multi_status.Response(href, prop_stat))
return result
def _build_displayname(self, path):
cut = len(self.root)
return os.path.basename(os.path.normpath(path[cut:]))
def head(self, path):
return self.get(path, False)
def get(self, path, with_body=True):
filename = os.path.abspath(os.path.join(self.root, path.strip("/")))
if not filename.startswith(self.root):
return response.Response(status.FORBIDDEN)
elif not os.path.exists(filename):
return response.Response(status.NOT_FOUND)
if os.path.isdir(filename):
body = None
content_length = "0"
if with_body:
body = self._get_collection(filename)
content_length = str(len(body))
return response.Response(status.OK,
{"Content-Type": "text/html",
"Content-Length": content_length},
[body] if with_body else None)
else:
st = os.stat(filename)
headers = {"Content-Type": mimetypes.guess_type(filename)[0] or "application/octet-stream",
"Content-Length": str(st.st_size)}
return response.Response(status.OK,
headers,
FileIterator(filename) if with_body else None)
def _get_collection(self, path):
filenames = os.listdir(path)
directories = [f for f in filenames if self._show(f) and os.path.isdir(os.path.join(path, f))]
files = [f for f in filenames if self._show(f) and os.path.isfile(os.path.join(path, f))]
directories.sort(key=lambda d: d.lower())
files.sort(key=lambda f: f.lower())
filenames = directories + files
result = u"""\
<html>
<head>
<title>Content of %s</title>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8" />
</head>
<body>
<ul style="padding:0;margin:0;list-style-type:none;">
""" % os.path.basename(path)
tplDirectory = """<li><a href="%s">[%s]</a></li>\n"""
tplFile = """<li><a href="%s">%s</a></li>\n"""
for f in filenames:
p = os.path.join(path, f)
href = self.base_path + p[len(self.root):]
if os.path.isdir(p):
result += tplDirectory % (href, f)
else:
result += tplFile % (href, f)
result += """\
</ul>
</body>
</html>
"""
return result.encode("utf-8")
def put(self, path, content_length, body):
filename = os.path.abspath(os.path.join(self.root, path.strip("/")))
if not filename.startswith(self.root):
return response.Response(status.FORBIDDEN)
elif os.path.isdir(filename):
return response.Response(status.NOT_ALLOWED)
elif not os.path.isdir(os.path.dirname(filename)):
return response.Response(status.CONFLICT)
created = not os.path.exists(filen
|
ame)
f = open(filename, "wb")
|
if content_length:
remaining = content_length
while remaining > 0:
buf = body.read(min(remaining, BLOCK_SIZE))
if len(buf):
f.write(buf)
remaining -= len(buf)
else:
break
f.close()
if created:
return response.Response(status.CREATED)
else:
return response
|
befelix/GPy
|
GPy/likelihoods/link_functions.py
|
Python
|
bsd-3-clause
| 4,850
| 0.008454
|
# Copyright (c) 2012-2015 The GPy authors (see AUTHORS.txt)
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as np
import scipy
from ..util.univariate_Gaussian import std_norm_cdf, std_norm_pdf
import scipy as sp
from ..util.misc import safe_exp, safe_square, safe_cube, safe_quad, safe_three_times
class GPTransformation(object):
"""
Link function class for doing non-Gaussian likelihoods approximation
:param Y: observed output (Nx1 numpy.darray)
.. note:: Y values allowed depend on the likelihood_function used
"""
def __init__(self):
pass
def transf(self,f):
"""
Gaussian process tranformation function, latent space -> output space
"""
raise NotImplementedError
def dtransf_df(self,f):
"""
derivative of transf(f) w.r.t. f
"""
raise NotImplementedError
def d2transf_df2(self,f):
"""
second derivative of transf(f) w.r.t. f
"""
raise NotImplementedError
def d3transf_df3(self,f):
"""
third derivative of transf(f) w.r.t. f
"""
raise NotImplementedError
def to_dict(self):
raise NotImplementedError
def _to_dict(self):
return {}
@staticmethod
def from_dict(input_dict):
import copy
input_dict = copy.deepcopy(input_dict)
link_class = input_dict.pop('class')
import GPy
link_class = eval(link_class)
return link_class._from_dict(link_class, input_dict)
@staticmethod
def _from_dict(link_class, input_dict):
return link_class(**input_dict)
class Identity(GPTransformation):
"""
.. math::
g(f) = f
"""
def transf(self,f):
return f
def dtransf_df(self,f):
return np.ones_like(f)
def d2transf_df2(self,f):
return np.zeros_like(f)
def d3transf_df3(self,f):
return np.zeros_like(f)
def to_dict(self):
input_dict = super(Identity, self
|
)._to_dict()
input_dict["class"] = "GPy.likelihoods.link_functions.Identity"
return input_dict
class Probit(GPTransformation):
"""
.. math::
g(f) = \\Phi^{-1} (mu)
"""
def transf(self,f):
return std_norm_cdf(f)
def dtransf_df(self,f):
return std_norm_pdf(f)
def d2transf_df2(self,f):
return -f * std_norm_pdf(f)
def d3transf_df3(self,f):
return (saf
|
e_square(f)-1.)*std_norm_pdf(f)
def to_dict(self):
input_dict = super(Probit, self)._to_dict()
input_dict["class"] = "GPy.likelihoods.link_functions.Probit"
return input_dict
class Cloglog(GPTransformation):
"""
Complementary log-log link
.. math::
p(f) = 1 - e^{-e^f}
or
f = \log (-\log(1-p))
"""
def transf(self,f):
ef = safe_exp(f)
return 1-np.exp(-ef)
def dtransf_df(self,f):
ef = safe_exp(f)
return np.exp(f-ef)
def d2transf_df2(self,f):
ef = safe_exp(f)
return -np.exp(f-ef)*(ef-1.)
def d3transf_df3(self,f):
ef = safe_exp(f)
ef2 = safe_square(ef)
three_times_ef = safe_three_times(ef)
r_val = np.exp(f-ef)*(1.-three_times_ef + ef2)
return r_val
class Log(GPTransformation):
"""
.. math::
g(f) = \\log(\\mu)
"""
def transf(self,f):
return safe_exp(f)
def dtransf_df(self,f):
return safe_exp(f)
def d2transf_df2(self,f):
return safe_exp(f)
def d3transf_df3(self,f):
return safe_exp(f)
class Log_ex_1(GPTransformation):
"""
.. math::
g(f) = \\log(\\exp(\\mu) - 1)
"""
def transf(self,f):
return scipy.special.log1p(safe_exp(f))
def dtransf_df(self,f):
ef = safe_exp(f)
return ef/(1.+ef)
def d2transf_df2(self,f):
ef = safe_exp(f)
aux = ef/(1.+ef)
return aux*(1.-aux)
def d3transf_df3(self,f):
ef = safe_exp(f)
aux = ef/(1.+ef)
daux_df = aux*(1.-aux)
return daux_df - (2.*aux*daux_df)
class Reciprocal(GPTransformation):
def transf(self,f):
return 1./f
def dtransf_df(self, f):
f2 = safe_square(f)
return -1./f2
def d2transf_df2(self, f):
f3 = safe_cube(f)
return 2./f3
def d3transf_df3(self,f):
f4 = safe_quad(f)
return -6./f4
class Heaviside(GPTransformation):
"""
.. math::
g(f) = I_{x \\geq 0}
"""
def transf(self,f):
#transformation goes here
return np.where(f>0, 1, 0)
def dtransf_df(self,f):
raise NotImplementedError("This function is not differentiable!")
def d2transf_df2(self,f):
raise NotImplementedError("This function is not differentiable!")
|
limodou/uliweb
|
uliweb/contrib/develop/__init__.py
|
Python
|
bsd-2-clause
| 318
| 0.012579
|
def dev_nav(active=None):
from
|
uliweb import settings
out = "<span>"
for i in settings.MENUS_DEVELOP.nav:
if active!=i["name"]:
out += "<a href='%s'>%s<a> "%(i["link"],i["title"])
else:
out += "<strong>%s</strong> "%(i["title"])
out += "</span>"
return ou
|
t
|
tommy-u/enable
|
examples/savage/toggle_demo.py
|
Python
|
bsd-3-clause
| 673
| 0.002972
|
import os
from traits.api import HasTraits
from traitsui.api import View, Item
from enable.savage.trait_defs.ui.svg_button import SVGButton
pause_icon = os.path.join(os.path.dirname(__file__), 'player_pause.svg')
resume_icon = os.path.join(os.path.dirname(__file__), 'player_play.svg')
class SVGDemo(HasTrait
|
s):
pause = SVGButton('Pause', filename=pause_icon,
toggle_filename=resume_icon,
toggle_state=True,
toggle_label='Resume',
|
toggle_tooltip='Resume',
tooltip='Pause', toggle=True)
trait_view = View(Item('pause'))
SVGDemo().configure_traits()
|
FFMG/myoddweb.piger
|
monitor/api/python/Python-3.7.2/Lib/idlelib/rpc.py
|
Python
|
gpl-2.0
| 21,137
| 0.000899
|
"""RPC Implementation, originally written for the Python Idle IDE
For security reasons, GvR requested that Idle's Python execution server process
connect to the Idle process, which listens for the connection. Since Idle has
only one client per server, this was not a limitation.
+---------------------------------+ +-------------+
| socketserver.BaseRequestHandler | | SocketIO |
+---------------------------------+ +-------------+
^ | register() |
| | unregister()|
| +-------------+
| ^ ^
| | |
| + -------------------+ |
| | |
+-------------------------+ +-----------------+
| RPCHandler | | RPCClient |
| [attribute of RPCServer]| | |
+-------------------------+ +-----------------+
The RPCServer handler class is expected to provide register/unregister methods.
RPCHandler inherits the mix-in class SocketIO, which provides these methods.
See the Idle run.main() docstring for further information on how this was
accomplished in Idle.
"""
import builtins
import copyreg
import io
import marshal
import os
import pickle
import queue
import select
import socket
import socketserver
import struct
import sys
import threading
import traceback
import types
def unpickle_code(ms):
"Return code object from marshal string ms."
co = marshal.loads(ms)
assert isinstance(co, types.CodeType)
return co
def pickle_code(co):
"Return unpickle function and tuple with marshalled co code object."
assert isinstance(co, types.CodeType)
ms = marshal.dumps(co)
return unpickle_code, (ms,)
def dumps(obj, protocol=None):
"Return pickled (or marshalled) string for obj."
# IDLE passes 'None' to select pickle.DEFAULT_PROTOCOL.
f = io.BytesIO()
p = CodePickler(f, protocol)
p.dump(obj)
return f.getvalue()
class CodePickler(pickle.Pickler):
dispatch_table = {types.CodeType: pickle_code}
dispatch_table.update(copyreg.dispatch_table)
BUFSIZE = 8*1024
LOCALHOST = '127.0.0.1'
class RPCServer(socketserver.TCPServer):
def __init__(self, addr, h
|
andlerclass=None):
if handlerclass is None:
handlerclass = RPCHandler
socketserver.TCPServer.__init__(self, addr, handlerclass)
def server_bind(self):
"Override TCPServer method, no bind() phase for connecting entity"
|
pass
def server_activate(self):
"""Override TCPServer method, connect() instead of listen()
Due to the reversed connection, self.server_address is actually the
address of the Idle Client to which we are connecting.
"""
self.socket.connect(self.server_address)
def get_request(self):
"Override TCPServer method, return already connected socket"
return self.socket, self.server_address
def handle_error(self, request, client_address):
"""Override TCPServer method
Error message goes to __stderr__. No error message if exiting
normally or socket raised EOF. Other exceptions not handled in
server code will cause os._exit.
"""
try:
raise
except SystemExit:
raise
except:
erf = sys.__stderr__
print('\n' + '-'*40, file=erf)
print('Unhandled server exception!', file=erf)
print('Thread: %s' % threading.current_thread().name, file=erf)
print('Client Address: ', client_address, file=erf)
print('Request: ', repr(request), file=erf)
traceback.print_exc(file=erf)
print('\n*** Unrecoverable, server exiting!', file=erf)
print('-'*40, file=erf)
os._exit(0)
#----------------- end class RPCServer --------------------
objecttable = {}
request_queue = queue.Queue(0)
response_queue = queue.Queue(0)
class SocketIO(object):
nextseq = 0
def __init__(self, sock, objtable=None, debugging=None):
self.sockthread = threading.current_thread()
if debugging is not None:
self.debugging = debugging
self.sock = sock
if objtable is None:
objtable = objecttable
self.objtable = objtable
self.responses = {}
self.cvars = {}
def close(self):
sock = self.sock
self.sock = None
if sock is not None:
sock.close()
def exithook(self):
"override for specific exit action"
os._exit(0)
def debug(self, *args):
if not self.debugging:
return
s = self.location + " " + str(threading.current_thread().name)
for a in args:
s = s + " " + str(a)
print(s, file=sys.__stderr__)
def register(self, oid, object):
self.objtable[oid] = object
def unregister(self, oid):
try:
del self.objtable[oid]
except KeyError:
pass
def localcall(self, seq, request):
self.debug("localcall:", request)
try:
how, (oid, methodname, args, kwargs) = request
except TypeError:
return ("ERROR", "Bad request format")
if oid not in self.objtable:
return ("ERROR", "Unknown object id: %r" % (oid,))
obj = self.objtable[oid]
if methodname == "__methods__":
methods = {}
_getmethods(obj, methods)
return ("OK", methods)
if methodname == "__attributes__":
attributes = {}
_getattributes(obj, attributes)
return ("OK", attributes)
if not hasattr(obj, methodname):
return ("ERROR", "Unsupported method name: %r" % (methodname,))
method = getattr(obj, methodname)
try:
if how == 'CALL':
ret = method(*args, **kwargs)
if isinstance(ret, RemoteObject):
ret = remoteref(ret)
return ("OK", ret)
elif how == 'QUEUE':
request_queue.put((seq, (method, args, kwargs)))
return("QUEUED", None)
else:
return ("ERROR", "Unsupported message type: %s" % how)
except SystemExit:
raise
except KeyboardInterrupt:
raise
except OSError:
raise
except Exception as ex:
return ("CALLEXC", ex)
except:
msg = "*** Internal Error: rpc.py:SocketIO.localcall()\n\n"\
" Object: %s \n Method: %s \n Args: %s\n"
print(msg % (oid, method, args), file=sys.__stderr__)
traceback.print_exc(file=sys.__stderr__)
return ("EXCEPTION", None)
def remotecall(self, oid, methodname, args, kwargs):
self.debug("remotecall:asynccall: ", oid, methodname)
seq = self.asynccall(oid, methodname, args, kwargs)
return self.asyncreturn(seq)
def remotequeue(self, oid, methodname, args, kwargs):
self.debug("remotequeue:asyncqueue: ", oid, methodname)
seq = self.asyncqueue(oid, methodname, args, kwargs)
return self.asyncreturn(seq)
def asynccall(self, oid, methodname, args, kwargs):
request = ("CALL", (oid, methodname, args, kwargs))
seq = self.newseq()
if threading.current_thread() != self.sockthread:
cvar = threading.Condition()
self.cvars[seq] = cvar
self.debug(("asynccall:%d:" % seq), oid, methodname, args, kwargs)
self.putmessage((seq, request))
return seq
def asyncqueue(self, oid, methodname, args, kwargs):
request = ("QUEUE", (oid, methodname, args, kwargs))
seq = self.newseq()
if threading.current_thread() != self.sockthread:
cvar = threading.Condition()
self.cvars[seq] = cvar
self.debug(("asyncqueue:%d:" % seq), oid, methodname, args, kwargs)
self.putmessage((seq, request))
ret
|
okanasik/JdeRobot
|
src/tools/visualStates/samples/goforward/goforward.py
|
Python
|
gpl-3.0
| 4,201
| 0.035468
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import easyiceconfig as EasyIce
import jderobotComm as comm
import sys, signal
sys.path.append('/usr/local/share/jderobot/python/visualHFSM_py')
import traceback, threading, time
from automatagui import AutomataGui, QtGui, GuiSubautomata
from jderobot import MotorsPrx
from jderobot import LaserPrx
class Automata():
def __init__(self):
self.lock = threading.Lock()
self.displayGui = False
self.StatesSub1 = [
"GoForward",
"GoBack",
]
self.sub1 = "GoForward"
self.run1 = True
def calculate_obstacle(self):
self.laserData = self.KobukiLaser.getLaserData()
min_dist = 1000
for i in range(len(self.laserData.values)):
if self.laserData.values[i] < min_dist:
min_dist = self.laserData.values[i]
if min_dist < 1.0:
return True
else:
return False
def startThreads(self):
self.t1 = threading.Thread(target=self.subautomata1)
self.t1.start()
def createAutomata(self):
guiSubautomataList = []
# Creating subAutomata1
guiSubautomata1 = GuiSubautomata(1,0, self.automataGui)
guiSubautomata1.newGuiNode(1, 0, 69, 163, 1, 'GoForward')
guiSubautomata1.newGuiNode(2, 0, 255, 117, 0, 'GoBack')
guiSubautomata1.newGuiTransition((69, 163), (255, 117), (139, 78), 1, 1, 2)
guiSubautomata1.newGuiTransition((255, 117), (69, 163), (189, 196), 2, 2, 1)
guiSubautomataList.append(guiSubautomata1)
|
return guiSubautomataList
def shutDown(self):
self.run1 = False
def runGui(self):
app = QtGui.QApplication(sys.argv)
self.automataGui = AutomataGui()
self.automataGui.setAutomata(self.createAutomata())
self.automataGui.loadAutomata()
self.startThreads()
self.automataGui.show()
app.exec_()
def subautomata1(self):
self.run1 = True
cycle = 100
t_activated = False
t_fin = 0
while(self.run
|
1):
totala = time.time() * 1000000
# Evaluation if
if(self.sub1 == "GoForward"):
if(self.calculate_obstacle()):
self.sub1 = "GoBack"
if self.displayGui:
self.automataGui.notifySetNodeAsActive('GoBack')
elif(self.sub1 == "GoBack"):
if(not self.calculate_obstacle()):
self.sub1 = "GoForward"
if self.displayGui:
self.automataGui.notifySetNodeAsActive('GoForward')
# Actuation if
if(self.sub1 == "GoForward"):
self.KobukiMotors.sendV(0.5)
self.KobukiMotors.sendW(0.0)
elif(self.sub1 == "GoBack"):
self.KobukiMotors.sendV(-0.3)
self.KobukiMotors.sendW(0.2)
totalb = time.time() * 1000000
msecs = (totalb - totala) / 1000;
if(msecs < 0 or msecs > cycle):
msecs = cycle
else:
msecs = cycle - msecs
time.sleep(msecs / 1000)
if(msecs < 33 ):
time.sleep(33 / 1000);
def connectToProxys(self):
self.ic = EasyIce.initialize(sys.argv)
self.ic,self.node = comm.init(self.ic)
# Contact to KobukiMotors
self.KobukiMotors = comm.getMotorsClient(self.ic, 'automata.KobukiMotors')
if(not self.KobukiMotors):
raise Exception('could not create client with KobukiMotors')
print('KobukiMotors connected')
# Contact to KobukiLaser
self.KobukiLaser = comm.getLaserClient(self.ic, 'automata.KobukiLaser')
if(not self.KobukiLaser):
raise Exception('could not create client with KobukiLaser')
print('KobukiLaser connected')
def destroyIc(self):
self.KobukiMotors.stop()
self.KobukiLaser.stop()
comm.destroy(self.ic, self.node)
def start(self):
if self.displayGui:
self.guiThread = threading.Thread(target=self.runGui)
self.guiThread.start()
else:
self.startThreads()
def join(self):
if self.displayGui:
self.guiThread.join()
self.t1.join()
def readArgs(self):
for arg in sys.argv:
splitedArg = arg.split('=')
if splitedArg[0] == '--displaygui':
if splitedArg[1] == 'True' or splitedArg[1] == 'true':
self.displayGui = True
print('runtime gui enabled')
else:
self.displayGui = False
print('runtime gui disabled')
if __name__ == '__main__':
signal.signal(signal.SIGINT, signal.SIG_DFL)
automata = Automata()
try:
automata.connectToProxys()
automata.readArgs()
automata.start()
automata.join()
sys.exit(0)
except:
traceback.print_exc()
automata.destroyIc()
sys.exit(-1)
|
cloudtools/awacs
|
awacs/iotanalytics.py
|
Python
|
bsd-2-clause
| 2,141
| 0.000467
|
# Copyright (c) 2012-2021, Mark Peek <mark@peek.org>
# All rights reserved.
#
# See LICENSE file for full license.
from .aws import Action as BaseAction
from .aws import BaseARN
service_name = "AWS IoT Analytics"
prefix = "iotanalytics"
class Action(BaseAction):
def __init__(self, action: str = None) -> None:
super().__init__(prefix, action)
class ARN(BaseARN):
def __init__(self, resource: str = "", region: str = "", account: str = "") -> None:
super().__init__(
service=prefix, resource=resource, region=region, account=account
)
BatchPutMessage = Action("BatchPutMessage")
CancelPipelineReprocessing = Action("CancelPipelineReprocessing")
CreateChannel = Action("CreateChannel")
CreateDataset = Action("CreateDataset")
CreateDatasetContent = Action("CreateDatasetContent")
CreateDatastore = Action("CreateDatastore")
CreatePipeline = Action("CreatePipeline")
DeleteChannel = Action("DeleteChannel")
DeleteDataset = Action("DeleteDataset")
DeleteDatasetContent = Action("DeleteDatasetContent")
DeleteDatastore = Action("DeleteDatastore")
DeletePipeline = Action("DeletePipeline")
DescribeChannel = Action("DescribeChannel")
DescribeDataset = Action("DescribeDataset")
DescribeDatastore = Action("DescribeDatastore")
DescribeLoggingOptions = Action("DescribeLoggingOptions")
DescribePipeline = Action("DescribePipeline")
GetDatasetContent = Action("GetDatasetContent")
ListChannels = Action("ListChannels")
ListDatasetContents = Action("ListDatasetContents")
ListDatasets = Action("ListDatasets")
ListDatastores = Action("ListDatastores")
ListPipelines = Action("ListPipelines")
ListTagsForResource = Act
|
ion("ListTagsForResource")
PutLoggingOptions = Action("PutLoggingOptions")
RunPipelineActivity =
|
Action("RunPipelineActivity")
SampleChannelData = Action("SampleChannelData")
StartPipelineReprocessing = Action("StartPipelineReprocessing")
TagResource = Action("TagResource")
UntagResource = Action("UntagResource")
UpdateChannel = Action("UpdateChannel")
UpdateDataset = Action("UpdateDataset")
UpdateDatastore = Action("UpdateDatastore")
UpdatePipeline = Action("UpdatePipeline")
|
BirkbeckCTP/janeway
|
jenkins/janeway_settings.py
|
Python
|
agpl-3.0
| 160
| 0.00625
|
INSTALLED_APPS= ["django_nose"]
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
NOSE_ARGS = [
'--with-xunit',
'--xunit-file=jenkins
|
/nosetests.xml',
]
| |
anchore/anchore-engine
|
anchore_engine/analyzers/syft/handlers/python.py
|
Python
|
apache-2.0
| 2,570
| 0.002724
|
import os
from anchore_engine.analyzers.syft.handlers.common import save_entry_to_findings
from anchore_engine.analyzers.utils import dig
def save_entry(findings, engine_entry, pkg_key=None):
if not pkg_key:
pkg_name = engine_entry.get("name", "")
pkg_version = engine_entry.get(
"version", engine_entry.get("latest", "")
) # rethink this... ensure it's right
pkg_key = engine_entry.get(
"location",
"/virtual/pypkg/site-packages/{}-{}".format(pkg_name, pkg_version),
)
save_entry_to_findings(findings, engine_entry, "pkgs.python", pkg_key)
def translate_and_save_entry(findings, artifact):
"""
Handler function to map syft results for the python package type into the engine "raw" document format.
"""
if "python-package-cataloger" not in artifact["foundBy"]:
# engine only includes python findings for egg and wheel insta
|
llations (with rich metadata)
return
site_pkg_root = artifact["metadata"]["sitePackagesRootPath"]
name = artifact["name"]
# anchore engine always uses the name, however, the name may not be a top-level package
# instead default to the first top-level package unless the name is listed among the
# top level packages explicitly defined in the metadata. Note that the top-level package
# is optional!
pkg_key_names = dig(artifact, "metad
|
ata", "topLevelPackages", force_default=[])
pkg_key_name = None
for key_name in pkg_key_names:
if name in key_name:
pkg_key_name = name
else:
pkg_key_name = key_name
if not pkg_key_name:
pkg_key_name = name
pkg_key = os.path.join(site_pkg_root, pkg_key_name)
origin = dig(artifact, "metadata", "author", force_default="")
email = dig(artifact, "metadata", "authorEmail", default=None)
if email:
origin += " <%s>" % email
files = []
for file in dig(artifact, "metadata", "files", force_default=[]):
files.append(os.path.join(site_pkg_root, file["path"]))
# craft the artifact document
pkg_value = {
"name": name,
"version": artifact["version"],
"latest": artifact["version"],
"files": files,
"origin": origin,
"license": dig(artifact, "metadata", "license", force_default=""),
"location": site_pkg_root,
"type": "python",
"cpes": artifact.get("cpes", []),
}
# inject the artifact document into the "raw" analyzer document
save_entry(findings, pkg_value, pkg_key)
|
matrix-org/synapse
|
contrib/experiments/cursesio.py
|
Python
|
apache-2.0
| 4,229
| 0.000473
|
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import curses
import curses.wrapper
from curses.ascii import isprint
from twisted.internet import reactor
class CursesStdIO:
def __init__(self, stdscr, callback=None):
self.statusText = "Synapse test app -"
self.searchText = ""
self.stdscr = stdscr
self.logLine = ""
self.callback = callback
self._setup()
def _se
|
tup(self):
self.stdscr.nodelay(1) # Make non blocking
self.rows, self.cols = self.stdscr.getmaxyx()
self.lines = []
curses.use_default_colors()
self.paintStatus(self.statusText)
self.stdscr.refresh()
def set_callback(self, callback):
self.callback = callback
def fileno(self):
"""We want to select on FD 0"""
return 0
|
def connectionLost(self, reason):
self.close()
def print_line(self, text):
"""add a line to the internal list of lines"""
self.lines.append(text)
self.redraw()
def print_log(self, text):
self.logLine = text
self.redraw()
def redraw(self):
"""method for redisplaying lines based on internal list of lines"""
self.stdscr.clear()
self.paintStatus(self.statusText)
i = 0
index = len(self.lines) - 1
while i < (self.rows - 3) and index >= 0:
self.stdscr.addstr(self.rows - 3 - i, 0, self.lines[index], curses.A_NORMAL)
i = i + 1
index = index - 1
self.printLogLine(self.logLine)
self.stdscr.refresh()
def paintStatus(self, text):
if len(text) > self.cols:
raise RuntimeError("TextTooLongError")
self.stdscr.addstr(
self.rows - 2, 0, text + " " * (self.cols - len(text)), curses.A_STANDOUT
)
def printLogLine(self, text):
self.stdscr.addstr(
0, 0, text + " " * (self.cols - len(text)), curses.A_STANDOUT
)
def doRead(self):
"""Input is ready!"""
curses.noecho()
c = self.stdscr.getch() # read a character
if c == curses.KEY_BACKSPACE:
self.searchText = self.searchText[:-1]
elif c == curses.KEY_ENTER or c == 10:
text = self.searchText
self.searchText = ""
self.print_line(">> %s" % text)
try:
if self.callback:
self.callback.on_line(text)
except Exception as e:
self.print_line(str(e))
self.stdscr.refresh()
elif isprint(c):
if len(self.searchText) == self.cols - 2:
return
self.searchText = self.searchText + chr(c)
self.stdscr.addstr(
self.rows - 1,
0,
self.searchText + (" " * (self.cols - len(self.searchText) - 2)),
)
self.paintStatus(self.statusText + " %d" % len(self.searchText))
self.stdscr.move(self.rows - 1, len(self.searchText))
self.stdscr.refresh()
def logPrefix(self):
return "CursesStdIO"
def close(self):
"""clean up"""
curses.nocbreak()
self.stdscr.keypad(0)
curses.echo()
curses.endwin()
class Callback:
def __init__(self, stdio):
self.stdio = stdio
def on_line(self, text):
self.stdio.print_line(text)
def main(stdscr):
screen = CursesStdIO(stdscr) # create Screen object
callback = Callback(screen)
screen.set_callback(callback)
stdscr.refresh()
reactor.addReader(screen)
reactor.run()
screen.close()
if __name__ == "__main__":
curses.wrapper(main)
|
m42e/jirash
|
lib/jirashell.py
|
Python
|
mit
| 48,170
| 0.003405
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
#
# A Jira shell (using the Jira XML-RPC API).
#
# <https://confluence.atlassian.com/display/JIRA042/Creating+a+XML-RPC+Client>
# <http://docs.atlassian.com/software/jira/docs/api/rpc-jira-plugin/latest/com/atlassian/jira/rpc/xmlrpc/XmlRpcService.html>
#
__version__ = "1.6.0"
import warnings
warnings.filterwarnings("ignore", module="wstools.XMLSchema", lineno=3107)
# Ignore this:
# /opt/local/lib/python2.6/xmlrpclib.py:612: DeprecationWarning: The xmllib module is obsolete.
warnings.filterwarnings("ignore", module="xmlrpclib", lineno=612)
import getpass
import os
import sys
import logging
from pprint import pprint
import json
import xmlrpclib
import time
import codecs
import operator
import webbrowser
import re
TOP = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.insert(0, os.path.join(TOP, "deps"))
import cmdln
import requests
# This is a total hack for <https://github.com/trentm/jirash/issues/2>.
# It ensures that utf-8 is used for implicit string conversion deep
# in httplib.py for Python 2.7 (which changed from 2.6 resulting in
# that conversion).
if sys.version_info >= (2, 7):
reload(sys)
sys.setdefaultencoding('utf-8')
#---- globals and config
log = logging.getLogger("jirash")
#---- exceptions
class JiraShellError(Exception):
pass
class JiraShellUsageError(JiraShellError):
pass
#---- monkey-patching
def _decode(data, encoding, is8bit=re.compile("[\x80-\xff]").search):
# decode non-ascii string (if possible)
if unicode and encoding and is8bit(data):
data = unicode(data, encoding, 'replace')
return data
xmlrpclib._decode = _decode
def _isint(s):
try:
int(s)
except ValueError:
return False
else:
return True
#---- Jira API
class Jira(object):
def __init__(self, jira_url, username, password):
self.jira_url = jira_url
self.username = username
self.password = password
self.server = xmlrpclib.ServerProxy(jira_url + '/rpc/xmlrpc',
verbose=False)
self.auth = self.server.jira1.login(username, password)
# WARNING: if we allow a longer jira shell session, then caching
# might need invalidation.
self.cache = {}
_soap_server = None
_soap_auth = None
def _get_soap_server(self):
try:
import pyexpat
except ImportError:
msg = ("Your Python (%s) doesn't have the 'pyexpat' module "
"needed to call the Jira SOAP API. You must install that "
"and retry." % sys.executable)
how = howto_install_pyexpat()
if how:
msg += " You could try `%s`." % how
raise JiraShellUsageError(msg)
import SOAPpy
from StringIO import StringIO
if not self._soap_server:
soap_url = self.jira_url + '/rpc/soap/jirasoapservice-v2?wsdl'
try:
oldStdout = sys.stdout
sys.stdout = StringIO() # trap log output from WSDL parsing
self._soap_server = SOAPpy.WSDL.Proxy(soap_url)
finally:
sys.stdout = oldStdout
self._soap_auth = self._soap_server.login(
self.username, self.password)
return self._soap_server, self._soap_auth
def _jira_soap_call(self, methodName, args):
server, auth = self._get_soap_server()
authedArgs = [auth] + args
out = getattr(server, methodName)(*authedArgs)
typeName = out._typeName()
if typeName == "struct":
return out._asdict()
elif typeName == "typedArray":
outList = [item._asdict() for item in out._aslist()]
return outList
else:
raise JiraShellError("unknown SOAPpy outparam type: '%s'" % typeName)
def _jira_rest_call(self, method, path, **kwargs):
"""Typical kwargs (from `requests`) are:
- params
- data
- headers
"""
url = self.jira_url + '/rest/api/2' + path
r = requests.request(method, url, auth=(self.username, self.password),
**kwargs)
return r
def filters(self):
if "filters" not in self.cache:
filters = self.server.jira1.getFavouriteFilters(self.auth)
filters.sort(key=operator.itemgetter("name"))
self.cache["filters"] = filters
return self.cache["filters"]
def user(self, username):
return self.server.jira1.getUser(self.auth, username)
def projects(self):
if "projects" not in self.cache:
projects = self.server.jira1.getProjectsNoSchemes(self.auth)
projects = [p for p in projects if "Archived" not in p["name"]]
projects.sort(key=operator.itemgetter("key"))
self.cache["projects"] = projects
return self.cache["projects"]
def project(self, key):
projects = self.projects()
for p in projects:
if p["key"] == key:
return p
else:
raise JiraShellError("unknown project: %r" % key)
def priorities(self):
if "priorities" not in self.cache:
priorities = self.server.jira1.getPriorities(self.auth)
self.cache["priorities"] = priorities
return self.cache["priorities"]
def priority(self, priority_id):
assert isinstance(priority_id, str)
for p in self.priorities():
if p["id"] == priority_id:
return p
else:
raise JiraShellError("unknown priority: %r" % priority_id)
def issue_link_types(self):
if "issue_link_types" not in self.cache:
res = self._jira_rest_call("GET", "/issueLinkType")
if res.status_code != 200:
raise JiraShellError("error getting issue link types: %s"
% res.text)
self.cache["issue_link_types"] = res.json()["issueLinkTypes"]
return self.cache["issue_link_types"]
def link(self, link_type_name, inward_issue_key, outward_issue_key):
"""Link issue.
E.g. making PROJ-123 a dup of PROJ-100 would be:
<jira>.link('Duplicate', 'PROJ-123', 'PROJ-100')
where 'Duplicate' is the link type "name" (as from `.link_types()`).
"""
|
data = {
"type": {
"name": link_type_name
},
"inwardIssue": {
"key": inward_issue_key
},
"outwardIssue": {
"key": outward_issue_key
}
}
res = self._jira_rest_call('POST'
|
, '/issueLink',
headers={'content-type': 'application/json'},
data=json.dumps(data))
if res.status_code != 201:
raise JiraShellError('error linking (%s, %s, %s): %s %s'
% (link_type_name, inward_issue_key, outward_issue_key,
res.status_code, res.text))
def issue(self, key):
#XXX
# It's right under 'issuelinks' in each issue's JSON representation. Example:
#
#https://jira.atlassian.com/rest/api/latest/issue/JRA-9?fields=summary,issuelinks
return self.server.jira1.getIssue(self.auth, key)
def issues_from_filter(self, filter):
"""Return all issues for the given filter.
@param filter {String} Filter (saved search) to use. The given
argument can be the filter id, name, or a unique substring or
multi-term substring (e.g. 'foo bar' would match 'Filter foo
and bar') of the name.
"""
# Find the filter.
filterObj = None
filters = self.filters()
# - if int, then try id match first
if _isint(filter):
filter = int(filter)
for f in filters:
if int(f["id"]) == filter:
filterObj = f
break
else:
raise JiraShellError("no filter with id %r" % filter)
if not filterObj:
# - try full name match
for f in filters:
if f["name"] == filte
|
dol-sen/portage
|
repoman/pym/repoman/tests/runTests.py
|
Python
|
gpl-2.0
| 1,959
| 0.009188
|
#!/usr/bin/env python
# runTests.py -- Portage Unit Test Functionality
# Copyright 2006-2017 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import os, sys
import os.path as osp
import grp
import platform
import pwd
import signal
def debug_signal(signum, frame):
import pdb
pdb.set_trace()
if platform.python_implementation() == 'Jython':
debug_signum = signal.SIGUSR2 # bug #424259
else:
debug_signum = signal.SIGUSR1
signal.signal(debug_signum, debug_signal)
# Pretend that the current user's uid/gid are the 'portage' uid/gid,
# so things go smoothly regardless of the current user and global
# user/group configuration.
os.environ["PORTAGE_USERNAME"] = pwd.getpwuid(os.getuid()).pw_name
os.environ["PORTAGE_GRPNAME"] = grp.getgrgid(os.getgid()).gr_name
# Insert our parent dir so we can do shiny import "tests"
# This line courtesy of Marienz and Pkgcore ;)
repoman_pym = osp.dirname(osp.dirname(osp.dirname(osp.realpath(__file__))))
sys.path.insert(0, repoman_pym)
# Add in the parent portage python modules
portage_pym = osp.dirname(osp.dirname(repoman_pym))+'/pym'
sys.path.insert(0, portage_pym)
# import our centrally initialized portage instance
from repoman._portage import portage
|
portage._internal_caller = True
# Ensure that we don't instantiate portage.settings, so that tests should
# work the same regardless of global configuration file state/existence.
portage._disable_legacy_globals()
if os.environ.get('NOCOLOR') in ('yes', 'true'):
portage.output.nocolor()
import repoman.tests as tests
from portage.const import PORTAGE_BIN_PATH
path =
|
os.environ.get("PATH", "").split(":")
path = [x for x in path if x]
insert_bin_path = True
try:
insert_bin_path = not path or \
not os.path.samefile(path[0], PORTAGE_BIN_PATH)
except OSError:
pass
if insert_bin_path:
path.insert(0, PORTAGE_BIN_PATH)
os.environ["PATH"] = ":".join(path)
if __name__ == "__main__":
sys.exit(tests.main())
|
PMEAL/OpenPNM
|
tests/unit/io/STLTest.py
|
Python
|
mit
| 1,388
| 0
|
import os
import py
import pytest
import numpy as np
import openpnm as op
from openpnm.models.misc import from_neighbor_pores
@pytest.mark.skip(reason="'netgen' is only available on conda")
class STLTest:
|
def setup_class(self):
np.random.seed(10)
self.net = op.network.Cubic(shape=[2, 2, 2])
self.
|
net["pore.diameter"] = 0.5 + np.random.rand(self.net.Np) * 0.5
Dt = from_neighbor_pores(target=self.net, prop="pore.diameter") * 0.5
self.net["throat.diameter"] = Dt
self.net["throat.length"] = 1.0
def teardown_class(self):
os.remove(f"{self.net.name}.stl")
os.remove("custom_stl.stl")
def test_export_data_stl(self):
op.io.to_stl(network=self.net)
assert os.path.isfile(f"{self.net.name}.stl")
op.io.to_stl(network=self.net, filename="custom_stl")
assert os.path.isfile("custom_stl.stl")
if __name__ == '__main__':
# All the tests in this file can be run with 'playing' this file
t = STLTest()
self = t # For interacting with the tests at the command line
t.setup_class()
for item in t.__dir__():
if item.startswith('test'):
print(f'Running test: {item}')
try:
t.__getattribute__(item)()
except TypeError:
t.__getattribute__(item)(tmpdir=py.path.local())
t.teardown_class()
|
sorenh/cc
|
nova/__init__.py
|
Python
|
apache-2.0
| 1,336
| 0.000749
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2010 Anso Labs, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. Yo
|
u may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distrib
|
uted under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
:mod:`nova` -- Cloud IaaS Platform
===================================
.. automodule:: nova
:platform: Unix
:synopsis: Infrastructure-as-a-Service Cloud platform.
.. moduleauthor:: Jesse Andrews <jesse@ansolabs.com>
.. moduleauthor:: Devin Carlen <devin.carlen@gmail.com>
.. moduleauthor:: Vishvananda Ishaya <vishvananda@yahoo.com>
.. moduleauthor:: Joshua McKenty <joshua@cognition.ca>
.. moduleauthor:: Manish Singh <yosh@gimp.org>
.. moduleauthor:: Andy Smith <andy@anarkystic.com>
"""
from exception import *
|
RedHatInsights/insights-core
|
insights/parsers/etc_udev_rules.py
|
Python
|
apache-2.0
| 1,878
| 0.001065
|
"""
EtcUdevRules - file ``/etc/udev/rules.d/``
============================
|
==============
This module is similar to the :py:mod:`insights.parsers.udev_rules`
but parse .rules files under ``/etc/ude/rules.d/`` directory instead.
The parsers included in this mo
|
dule are:
UdevRules40Redhat - file ``/etc/udev/rules.d/40-redhat.rules``
--------------------------------------------------------------
"""
from insights import parser
from insights.core import LogFileOutput
from insights.specs import Specs
from insights.util import deprecated
@parser(Specs.etc_udev_40_redhat_rules)
class UdevRules40Redhat(LogFileOutput):
"""
.. warning::
This parser is deprecated, please use
:py:class:`insights.parsers.udev_rules.UdevRules40Redhat` instead.
Read the content of ``/etc/udev/rules.d/40-redhat.rules`` file.
.. note::
The syntax of the `.rules` file is complex, and no rules require to
get the serialized parsed result currently. An only existing rule's
supposed to check the syntax of some specific line, so here the
:class:`insights.core.LogFileOutput` is the base class.
Sample input::
# do not edit this file, it will be overwritten on update
# CPU hotadd request
SUBSYSTEM=="cpu", ACTION=="add", TEST=="online", ATTR{online}=="0", ATTR{online}="1"
# Memory hotadd request
SUBSYSTEM!="memory", ACTION!="add", GOTO="memory_hotplug_end"
PROGRAM="/bin/uname -p", RESULT=="s390*", GOTO="memory_hotplug_end"
LABEL="memory_hotplug_end"
Examples:
>>> 'LABEL="memory_hotplug_end"' in udev_rules.lines
True
"""
def __init__(self, *args, **kwargs):
deprecated(UdevRules40Redhat, "Import UdevRules40Redhat from insights.parsers.udev_rules instread.")
super(UdevRules40Redhat, self).__init__(*args, **kwargs)
|
avisingh599/NeuralModels
|
character-rnn/char-rnn.py
|
Python
|
mit
| 1,663
| 0.048707
|
import numpy as np
import theano
from theano import tensor as T
from generateTrainDataonText import createTrain
from neuralmodels.utils import permute
from neuralmodels.loadcheckpoint import *
from neuralmodels.costs import softmax_loss
from neuralmodels.models import *
from neuralmodels.predictions import OutputMaxProb, OutputSampleFromDis
|
crete
from neuralmodels.layers import *
def text_prediction(class_ids_reverse,p_labels):
N = p_labels.shape[1]
T = p_labels.shape[0]
text_output = []
for i in range(N):
t = ''
for j in p_labels[:,i]:
t = t + class_ids_reverse[j]
text_output.append(t)
return text_output
if __name__ == '__main__':
num_sam
|
ples = 10000
num_validation = 100
num_train = num_samples - num_validation
len_samples = 300
epochs = 30
batch_size = 100
learning_rate_decay = 0.97
decay_after=5
[X,Y,num_classes,class_ids_reverse] = createTrain('shakespeare_input.txt',num_samples,len_samples)
inputD = num_classes
outputD = num_classes
permutation = permute(num_samples)
X = X[:,permutation]
Y = Y[:,permutation]
X_tr = X[:,:num_train]
Y_tr = Y[:,:num_train]
X_valid = X[:,num_train:]
Y_valid = Y[:,num_train:]
# Creating network layers
layers = [OneHot(num_classes),LSTM(),LSTM(),LSTM(),softmax(num_classes)]
trY = T.lmatrix()
# Initializing network
rnn = RNN(layers,softmax_loss,trY,1e-3)
# Fitting model
rnn.fitModel(X_tr,Y_tr,1,'checkpoints/',epochs,batch_size,learning_rate_decay,decay_after)
# Printing a generated sentence
out = rnn.predict_language_model(X_valid[:,:1],1000,OutputSampleFromDiscrete)
# Print the sentence here
text_produced = text_prediction(class_ids_reverse,out)
|
kyunooh/pingdumb
|
pingdumb/conf.py
|
Python
|
apache-2.0
| 2,422
| 0.002064
|
import getpass
import json
import getopt
from genericpath import isfile
from os.path import sep
from pingdumb.main_module import url_type
def read_config():
f_path = "." + sep + "pingdumb.json"
if not isfile(f_path):
f = open(f_path, 'w')
conf = {
"url": "jellyms.kr",
"smtpServer": "smtp.gmail.com:587",
"smtpUser": "",
"toEmail": "",
"interval": 300,
}
f.write(json.dumps(conf))
f.close()
return conf
else:
f = open(f_path, 'r+b')
conf = json.loads(f.read().decode('utf-8'))
f.close()
return conf
def write_config(conf):
if 'smtpPw' in conf:
del conf['smtpPw']
f_path = "." + sep + "pingdumb.json"
f = open(f_path, 'w')
f.truncate()
f.write(json.dumps(conf))
f.close()
def input_conf(message, default):
value = input(message)
if not value:
return default
return value
def set_config():
configure = read_config()
url_for_test = input_conf(
"URL to test? (" + configure["url"] + ")", configure["url"]
)
url_for_test = url_type(url_for_test)
recv_mail = input_conf(
"Receive mail? (" + configure["toEmail"] + ")",
configure["toEmail"]
)
s_server = input_conf(
"SMTP server? (" + configure["smtpServer"] + ")",
configure["smtpServer"]
)
s_user = input_conf(
"SMTP Server username? (" + configure["smtpUser"] + ")",
configure["smtpUser"]
)
s_pw = getpass.getpass("SMTP Server password?", "")
interval = input_conf(
"interval of seconds? (" + str(configure["interval"]) + ")",
configure["interval"]
)
interval = int(interval)
configure["url"] = url_for_test
configure["toEmail"] = recv_mail
configure["smtpServer"
|
] = s_server
configure["smtpUser"] = s_user
configure["smtpPw"] = s_pw
configure["interval"] = interval
return configure
def configure_to_tuple():
configure = read
|
_config()
return configure["url"], configure["smtpServer"], \
configure["smtpUser"], configure["toEmail"], configure["interval"]
def extract_password_with_argv(argv):
opts, args = getopt.getopt(argv, 'p')
for o, a in opts:
if o == "-p":
return getpass.getpass("SMTP Server password", "")
|
Jonekee/chromium.src
|
native_client_sdk/src/build_tools/tests/easy_template_test.py
|
Python
|
bsd-3-clause
| 3,559
| 0.006182
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import cStringIO
import difflib
import os
import sys
import unittest
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
BUILD_TOOLS_DIR = os.path.dirname(SCRIPT_DIR)
sys.path.append(BUILD_TOOLS_DIR)
import easy_template
class EasyTemplateTestCase(unittest.TestCase):
def _RunTest(self, template, expected, template_dict):
src = cStringIO.StringIO(template)
dst = cStringIO.StringIO()
easy_template.RunTemplate(src, dst, template_dict)
if dst.getvalue() != expected:
expected_
|
lines = expected.splitlines
|
(1)
actual_lines = dst.getvalue().splitlines(1)
diff = ''.join(difflib.unified_diff(
expected_lines, actual_lines,
fromfile='expected', tofile='actual'))
self.fail('Unexpected output:\n' + diff)
def testEmpty(self):
self._RunTest('', '', {})
def testNewlines(self):
self._RunTest('\n\n', '\n\n', {})
def testNoInterpolation(self):
template = """I love paris in the
the springtime [don't you?]
{this is not interpolation}.
"""
self._RunTest(template, template, {})
def testSimpleInterpolation(self):
self._RunTest(
'{{foo}} is my favorite number',
'42 is my favorite number',
{'foo': 42})
def testLineContinuations(self):
template = "Line 1 \\\nLine 2\n"""
self._RunTest(template, template, {})
def testIfStatement(self):
template = r"""
[[if foo:]]
foo
[[else:]]
not foo
[[]]"""
self._RunTest(template, "\n foo\n", {'foo': True})
self._RunTest(template, "\n not foo\n", {'foo': False})
def testForStatement(self):
template = r"""[[for beers in [99, 98, 1]:]]
{{beers}} bottle{{(beers != 1) and 's' or ''}} of beer on the wall...
[[]]"""
expected = r"""99 bottles of beer on the wall...
98 bottles of beer on the wall...
1 bottle of beer on the wall...
"""
self._RunTest(template, expected, {})
def testListVariables(self):
template = r"""
[[for i, item in enumerate(my_list):]]
{{i+1}}: {{item}}
[[]]
"""
self._RunTest(template, "\n1: Banana\n2: Grapes\n3: Kumquat\n",
{'my_list': ['Banana', 'Grapes', 'Kumquat']})
def testListInterpolation(self):
template = "{{', '.join(growing[0:-1]) + ' and ' + growing[-1]}} grow..."
self._RunTest(template, "Oats, peas, beans and barley grow...",
{'growing': ['Oats', 'peas', 'beans', 'barley']})
self._RunTest(template, "Love and laughter grow...",
{'growing': ['Love', 'laughter']})
def testComplex(self):
template = r"""
struct {{name}} {
[[for field in fields:]]
[[ if field['type'] == 'array':]]
{{field['basetype']}} {{field['name']}}[{{field['size']}}];
[[ else:]]
{{field['type']}} {{field['name']}};
[[ ]]
[[]]
};"""
expected = r"""
struct Foo {
std::string name;
int problems[99];
};"""
self._RunTest(template, expected, {
'name': 'Foo',
'fields': [
{'name': 'name', 'type': 'std::string'},
{'name': 'problems', 'type': 'array', 'basetype': 'int', 'size': 99}]})
def testModulo(self):
self._RunTest('No expression %', 'No expression %', {})
self._RunTest('% before {{3 + 4}}', '% before 7', {})
self._RunTest('{{2**8}} % after', '256 % after', {})
self._RunTest('inside {{8 % 3}}', 'inside 2', {})
self._RunTest('Everywhere % {{8 % 3}} %', 'Everywhere % 2 %', {})
if __name__ == '__main__':
unittest.main()
|
Tuteria/Recruitment-test
|
config/wsgi.py
|
Python
|
mit
| 1,461
| 0
|
"""
WSGI config for Tuteria-Application-Test project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
from django.core.wsgi import get_wsgi_application
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "config.settings.production"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.production")
# This application object is u
|
sed by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(applica
|
tion)
|
ageek/confPyNotebooks
|
sklearn-scipy-2013/solutions/08B_digits_clustering.py
|
Python
|
gpl-2.0
| 767
| 0.003911
|
from sklearn.cluster import KMeans
kmeans = KMeans(n_clusters=10)
clusters = kmeans.fit_predict(digits.data)
print kmeans.cluster_centers_.shape
#------------------------------------------------------------
# visualize the cluster centers
fig = plt.figure(figsize=(8, 3))
for i in range(10):
ax = fig.add_subplot(2, 5, 1 + i)
ax.imshow(kmeans.cluster_centers_[i].reshape((8, 8)),
cmap=plt.cm.binary)
from sklearn.manifold
|
import Isomap
X_iso = Isomap(n_neighbors=10).fit_transform(digits.data)
|
#------------------------------------------------------------
# visualize the projected data
fig, ax = plt.subplots(1, 2, figsize=(8, 4))
ax[0].scatter(X_iso[:, 0], X_iso[:, 1], c=clusters)
ax[1].scatter(X_iso[:, 0], X_iso[:, 1], c=digits.target)
|
l33tdaima/l33tdaima
|
p748e/shortest_completing_word.py
|
Python
|
mit
| 2,378
| 0.009672
|
from typing import List
from collections import defaultdict, Counter
class Solution:
def shortestCompletingWordV1(self, licensePlate: str, words: List[str]) -> str:
# build the signature of licensePlate
sig = defaultdict(int)
for c in licensePlate.upper():
if c.isalpha():
sig[c] += 1
# search for the min length word matching the signature
ans = ''
for word in words:
wsig = sig.copy()
for c in word:
cu = c.upper()
if cu not in wsig:
continue
wsig[cu] -= 1
if wsig[cu] == 0:
del wsig[cu]
if len(wsig) == 0 and (len(word) < len(ans) or ans == ''):
ans = word
break
return ans
def shortestCompletingWordV2(self, licensePlate: str, words: List[str]) -> str:
"""
In first line, just fi
|
lter out all none letters from the plate and make sure all letters are lower case.
In second line, produce Counter of each words and use Counter operater & (intersection) to extract the count of shared letters between the word and the plate.
If all the counts are equal, this returns true. Then, just extract the word that satisfies this condition and has the shortest length.
This is slower than V1 though
"""
pc = Counter(filter(lambda x : x.isalpha(), li
|
censePlate.lower()))
return min([w for w in words if Counter(w) & pc == pc], key=len)
# TESTS
tests = [
{
'licensePlate': "1s3 PSt",
'words': ["step", "steps", "stripe", "stepple"],
'expected': "steps"
},
{
'licensePlate': "1s3 456",
'words': ["looks", "pest", "stew", "show"],
'expected': "pest"
},
{
'licensePlate': "AN87005",
'words': ["participant","individual","start","exist","above","already","easy","attack","player","important"],
'expected': "important"
}
]
for t in tests:
sol = Solution()
actual = sol.shortestCompletingWordV2(t['licensePlate'], t['words'])
print('Shorted completing word matching', t['licensePlate'], 'in', t['words'], '->', actual)
assert(actual == t['expected'])
assert(t['expected'] == sol.shortestCompletingWordV2(t['licensePlate'], t['words']))
|
uclouvain/OSIS-Louvain
|
base/migrations/0107_learningunit_learning_container.py
|
Python
|
agpl-3.0
| 577
| 0.001733
|
# -*- coding: utf-8 -*-
# Generated by
|
Django 1.9 on 2017-04-28 15:02
from __future__ import unicode_literals
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('base', '0106_a
|
uto_20170428_1119'),
]
operations = [
migrations.AddField(
model_name='learningunit',
name='learning_container',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='base.LearningContainer'),
),
]
|
nathanielvarona/airflow
|
airflow/utils/dag_processing.py
|
Python
|
apache-2.0
| 49,728
| 0.002413
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Processes DAGs."""
import enum
import importlib
import inspect
import logging
import multiprocessing
import os
import random
import signal
import sys
import time
from abc import ABCMeta, abstractmethod
from collections import defaultdict
from datetime import datetime, timedelta
from importlib import import_module
from multiprocessing.connection import Connection as MultiprocessingConnection
from typing import TYPE_CHECKING, Any, Callable, Dict, List, NamedTuple, Optional, Tuple, Union, cast
from setproctitle import setproctitle # pylint: disable=no-name-in-module
from sqlalchemy import or_
from tabulate import tabulate
import airflow.models
from airflow.configuration import conf
from airflow.models import DagModel, errors
from airflow.models.serialized_dag import SerializedDagModel
from airflow.models.taskinstance import SimpleTaskInstance
from airflow.settings import STORE_DAG_CODE
from airflow.stats import Stats
from airflow.utils import timezone
from airflow.utils.callback_requests import CallbackRequest, SlaCallbackRequest, TaskCallbackRequest
from airflow.utils.file import list_py_file_paths
from airflow.utils.log.logging_mixin import LoggingMixin
from airflow.utils.mixins import MultiprocessingStartMethodMixin
from airflow.utils.net import get_hostname
from airflow.utils.process_utils import kill_child_processes_by_pids, reap_process_group
from airflow.utils.session import provide_session
from airflow.utils.state import State
if TYPE_CHECKING:
import pathlib
class AbstractDagFileProcessorProcess(metaclass=ABCMeta):
"""Processes a DAG file. See SchedulerJob.process_file() for more details."""
@abstractmethod
def start(self) -> None:
"""Launch the process to process the file"""
raise NotImplementedError()
@abstractmethod
def terminate(self, sigkill: bool = False):
"""Terminate (and then kill) the process launched to process the file"""
raise NotImplementedError()
@abstractmethod
def kill(self) -> None:
"""Kill the process launched to process the file, and ensure consistent state."""
raise NotImplementedError()
@property
@abstractmethod
def pid(self) -> int:
""":return: the PID of the process launched to process the given file"""
raise NotImplementedError()
@property
@abstractmethod
def exit_code(self) -> Optional[int]:
"""
After the process is finished, this can be called to get the return code
:return: the exit code of the process
:rtype: int
"""
raise NotImplementedError()
@property
@abstractmethod
def done(self) -> bool:
"""
Check if the process launched to process this file is done.
:return: whether the process is finished running
:rtype: bool
"""
raise NotImplementedError()
@property
@abstractmethod
def result(self) -> Optional[Tuple[int, int]]:
"""
A list of simple dags found, and the number of import errors
:return: result of running SchedulerJob.process_file() if available. Otherwise, none
:rtype: Optional[Tuple[int, int]]
"""
raise NotImplementedError()
@property
@abstractmethod
def start_time(self) -> datetime:
"""
:return: When this started to process the file
:rtype: datetime
"""
raise NotImplementedError()
@property
@abstractmethod
def file_path(self) -> str:
"""
:return: the path to the file that this is processing
:rtype: unicode
"""
raise NotImplementedError()
@property
@abstractmethod
def waitable_handle(self):
"""A "waitable" handle that can be passed to ``multiprocessing.connection.wait()``"""
raise NotImplementedError()
class DagParsingStat(NamedTuple):
"""Information on processing progress"""
done: bool
all_files_processed: bool
class DagFileStat(NamedTuple):
"""Information about single processing of one file"""
num_dags: int
import_errors: int
last_finish_time: Optional[datetime]
last_duration: Optional[float]
run_count: int
class DagParsingSignal(enum.Enum):
"""All signals sent to parser."""
AGENT_RUN_ONCE = 'agent_run_once'
TERMINATE_MANAGER = 'terminate_manager'
END_MANAGER = 'end_manager'
class DagFileProcessorAgent(LoggingMixin, MultiprocessingStartMethodMixin):
"""
Agent for DAG file processing. It is responsible for all DAG parsing
related jobs in scheduler process. Mainly it can spin up DagFileProcessorManager
in a subprocess, collect DAG parsing results from it and communicate
signal/DAG parsing stat with it.
This class runs in the main `airflow scheduler` process.
:param dag_directory: Directory where DAG definitions are kept. All
files in file_paths should be under this directory
:type dag_directory: str
:param max_runs: The number of times to parse and schedule each file. -1
for unlimited.
:type max_runs: int
:param processor_factory: function that creates processors for DAG
definition files. Arguments are (dag_definition_path, log_file_path)
:type processor_factory: ([str, List[CallbackRequest], Optional[List[str]], bool]) -> (
AbstractDagFileProcessorProcess
)
:param processor_timeout: How long to wait before timing out a DAG file processor
:type processor_timeout: timedelta
:param dag_ids: if specified, only schedule tasks with these DAG IDs
:type dag_ids: list[str]
:param pickle_dags: whether to pickle DAGs.
:type: pickle_dags: bool
:param async_mode: Whether to start agent in async mode
:type async_mode: bool
"""
def __init__(
self,
dag_directory: str,
max_runs: int,
processor_factory: Callable[
[str, List[CallbackRequest], Optional[List[str]], bool], AbstractDagFileProcessorProcess
],
processor_timeout: timedelta,
dag_ids: Optional[List[str]],
pickle_dags: bool,
async_mode: bool,
):
super().__init__()
self._file_path_queue: List[str] = []
self._dag_directory: str = dag_directory
self._max_runs = max_runs
self._processor_factory = processor_factory
self._processor_timeout = processor_timeout
self._dag_ids = dag_ids
self._pickle_dags = pickle_dags
self._async_mode = async_mode
# Map from file path to the processor
self._processors: Dict[str, AbstractDagFileProcessorProcess] = {}
# Pipe for commu
|
nicating signals
self._process: Optional[multiprocessing.process.BaseProcess] = None
self._done: bool =
|
False
# Initialized as true so we do not deactivate w/o any actual DAG parsing.
self._all_files_processed = True
self._parent_signal_conn: Optional[MultiprocessingConnection] = None
self._last_parsing_stat_received_at: float = time.monotonic()
def start(self) -> None:
"""Launch DagFileProcessorManager processor and start DAG parsing loop in manager."""
mp_start_method = self._get_multiprocessing_start_method()
context = multiprocessing.get_context(mp_start_method)
self._last_parsing_stat_received_at = time.monotonic()
self._par
|
ActiveState/code
|
recipes/Python/439094_get_IP_address_associated_network_interface/recipe-439094.py
|
Python
|
mit
| 357
| 0.011204
|
import socket
import fcntl
import struct
def get_ip_address(ifname):
s = s
|
ocket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return socket.inet_ntoa(fcntl.ioctl(
s.fileno(),
0x8915, # SIOCGIFADDR
struct.pack('256s', ifname[:15])
)[20:24])
>>> get_ip_address('lo')
'127.0
|
.0.1'
>>> get_ip_address('eth0')
'38.113.228.130'
|
de-vri-es/qtile
|
test/test_fakescreen.py
|
Python
|
mit
| 15,532
| 0.000064
|
# Copyright (c) 2011 Florian Mounier
# Copyright (c) 2012, 2014 Tycho Andersen
# Copyright (c) 2013 Craig Barnes
# Copyright (c) 2014 Sean Vig
# Copyright (c) 2014 Adi Sieker
# Copyright (c) 2014 Sebastien Blot
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import pytest
import libqtile.manager
import libqtile.config
from libqtile import layout, bar, widget
from libqtile.config import Screen
LEFT_ALT = 'mod1'
WINDOWS = 'mod4'
FONTSIZE = 13
CHAM1 = '8AE234'
CHAM3 = '4E9A06'
GRAPH_KW = dict(line_width=1,
graph_color=CHAM3,
fill_color=CHAM3 + '.3',
border_width=1,
border_color=CHAM3
)
# screens look like this
# 600 300
# |-------------|-----|
# | 480| |580
# | A | B |
# |----------|--| |
# | 400|--|-----|
# | C | |400
# |----------| D |
# 500 |--------|
# 400
#
# Notice there is a hole in the middle
# also D goes down below the others
class FakeScreenConfig(object):
auto_fullscreen = True
main = None
groups = [
libqtile.config.Group("a"),
libqtile.config.Group("b"),
libqtile.config.Group("c"),
libqtile.config.Group("d")
]
layouts = [
layout.Max(),
layout.RatioTile(),
layout.Tile(),
]
floating_layout = libqtile.layout.floating.Floating()
keys = []
mouse = []
fake_screens = [
Screen(
bottom=bar.Bar(
[
widget.GroupBox(this_screen_border=CHAM3,
borderwidth=1,
fontsize=FONTSIZE,
padding=1, margin_x=1, margin_y=1),
widget.AGroupBox(),
widget.Prompt(),
widget.Sep(),
widget.WindowName(fontsize=FONTSIZE, margin_x=6),
widget.Sep(),
widget.CPUGraph(**GRAPH_KW),
widget.MemoryGraph(**GRAPH_KW),
widget.SwapGraph(foreground='20C020', **GRAPH_KW),
widget.Sep(),
widget.Systray(),
widget.Sep(),
widget.Clock(format='%H:%M:%S %d.%m.%Y',
fontsize=FONTSIZE, padding=6),
],
24,
background="#555555"
),
left=bar.Gap(16),
right=bar.Gap(20),
x=0, y=0, width=600, height=480
),
Screen(
top=bar.Bar(
[
widget.GroupBox(),
widget.WindowName(),
widget.Clock()
],
30,
),
bottom=bar.Gap(24),
left=bar.Gap(12),
x=600, y=0, width=300, height=580
),
Screen(
top=bar.Bar(
[
widget.GroupBox(),
widget.WindowName(),
widget.Clock()
],
30,
),
bottom=bar.Gap(16),
right=bar.Gap(40),
x=0, y=480, width=500, height=400
),
Screen(
top=bar.Bar(
[
widget.GroupBox(),
widget.WindowName(),
widget.Clock()
],
30,
),
left=bar.Gap(20),
right=bar.Gap(24),
x=500, y=580, width=400, height=400
),
]
screens = fake_screens
xephyr_config = {
"xinerama": False,
"two_screens": False,
"width": 900,
"height": 980
}
fakescreen_config = pytest.mark.parametrize("xephyr, qtile", [(xephyr_config, FakeScreenConfig)], indirect=True)
@fakescreen_config
def test_basic(qtile):
qtile.testWindow("zero")
assert qtile.c.layout.info()["clients"] == ["zero"]
assert qtile.c.screen.info() == {
'y': 0, 'x': 0, 'index': 0, 'width': 600, 'height': 480}
qtile.c.to_screen(1)
qtile.testWindow("one")
assert qtile.c.layout.info()["clients"] == ["one"]
assert qtile.c.screen.info() == {
'y': 0, 'x': 600, 'index': 1, 'width': 300, 'height': 580}
qtile.c.to_screen(2)
qtile.testXeyes()
assert qtile.c.screen.info() == {
'y': 480, 'x': 0, 'index': 2, 'width': 500, 'height': 400}
qtile.c.to_screen(3)
qtile.testXclock()
assert qtile.c.screen.info() == {
'y': 580, 'x': 500, 'index': 3, 'width': 400, 'height': 400}
@fakescreen_config
def test_gaps(qtile):
g = qtile.c.screens()[0]["gaps"]
assert g["bottom"] == (0, 456, 600, 24)
assert g["left"] == (0, 0,
|
16, 456)
assert g["right"] == (580, 0, 20, 456)
g = qtile.c.screens()[1]["gaps"]
assert g["top"] == (600, 0, 300, 30)
assert g["bottom"] == (600, 556, 300, 24)
assert g["left"] == (600, 30, 12, 526)
g = qtile.c.screens()[2]["gaps"]
assert g["to
|
p"] == (0, 480, 500, 30)
assert g["bottom"] == (0, 864, 500, 16)
assert g["right"] == (460, 510, 40, 354)
g = qtile.c.screens()[3]["gaps"]
assert g["top"] == (500, 580, 400, 30)
assert g["left"] == (500, 610, 20, 370)
assert g["right"] == (876, 610, 24, 370)
@fakescreen_config
def test_maximize_with_move_to_screen(qtile):
"""Ensure that maximize respects bars"""
qtile.testXclock()
qtile.c.window.toggle_maximize()
assert qtile.c.window.info()['width'] == 564
assert qtile.c.window.info()['height'] == 456
assert qtile.c.window.info()['x'] == 16
assert qtile.c.window.info()['y'] == 0
assert qtile.c.window.info()['group'] == 'a'
# go to second screen
qtile.c.to_screen(1)
assert qtile.c.screen.info() == {
'y': 0, 'x': 600, 'index': 1, 'width': 300, 'height': 580}
assert qtile.c.group.info()['name'] == 'b'
qtile.c.group['a'].toscreen()
assert qtile.c.window.info()['width'] == 288
assert qtile.c.window.info()['height'] == 526
assert qtile.c.window.info()['x'] == 612
assert qtile.c.window.info()['y'] == 30
assert qtile.c.window.info()['group'] == 'a'
@fakescreen_config
def test_float_first_on_second_screen(qtile):
qtile.c.to_screen(1)
assert qtile.c.screen.info() == {
'y': 0, 'x': 600, 'index': 1, 'width': 300, 'height': 580}
qtile.testXclock()
# I don't know where y=30, x=12 comes from...
assert qtile.c.window.info()['float_info'] == {
'y': 30, 'x': 12, 'width': 164, 'height': 164
}
qtile.c.window.toggle_floating()
assert qtile.c.window.info()['width'] == 164
assert qtile.c.window.info()['height'] == 164
assert qtile.c.window.info()['x'] == 612
assert qtile.c.window.info()['y'] == 30
assert qtile.c.window.info()['group'] == 'b'
assert qtile.c.window.info()['float_info'] == {
'y': 30, 'x': 12, 'width': 164, 'height': 164
}
@fakescreen_config
def test_float_change_screens(qtile):
# add some eyes, and float clock
qtile.testXeyes()
qtile.testXclock()
qtile.c.window.t
|
thof/decapromolist
|
src/get_subcategories.py
|
Python
|
gpl-3.0
| 3,880
| 0.001804
|
# Copyright (C) 2015 https://github.com/thof
#
# This file is part of decapromolist.
#
# decapromolist is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implie
|
d warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import json
import urllib2
from lxml import html
from utils import Utils
class GetSubcategories:
def getCategories2(self):
dataCat = []
headers = {'User-agent': 'Mozilla/5.0'}
req = u
|
rllib2.Request('https://www.decathlon.pl/pl/menu-load-sub-categories?categoryId=394904', None, headers)
req = urllib2.urlopen(req)
content = req.read().decode('UTF-8')
response = html.fromstring(content)
for cat in response.xpath('//a'):
url = cat.attrib['href']
start = url.find('-')+1
subId = url[start:url.find('-', start)]
# subId = cat.attrib['data-secondary-category-id']
subName = cat.text
data = {'subId': int(subId), 'url': Utils.getConfig()['siteURL'] + url, 'subName': subName}
dataCat.append(data)
return dataCat
def getCategories(self):
categories = []
catUrl = []
content = urllib2.urlopen(Utils.getConfig()['siteURL']).read()
response = html.fromstring(content)
for cat in response.xpath('//li/@primarycategoryid'):
if cat not in categories:
categories.append(cat)
for cat in categories:
url = "{}/pl/getSubNavigationMenu?primaryCategoryId={}".format(Utils.getConfig()['siteURL'], cat)
catUrl.append(url)
return catUrl
def getSubcategories(self, catUrl):
dataCat = []
for url in catUrl:
content = urllib2.urlopen(url).read()
jsonData = json.loads(content)
for cat in jsonData['category']['categories']:
for subcat in cat['categories']:
data = {'id': int(cat['id']), 'name': cat['label'], 'subId': int(subcat['id']),
'subName': subcat['label'], 'url': Utils.getConfig()['siteURL'] + subcat['uri']}
dataCat.append(data)
return dataCat
@staticmethod
def getThirdLevelCat(catUrl):
dataCat = []
for url in catUrl:
content = urllib2.urlopen(url).read()
jsonData = json.loads(content)
for cat in jsonData['category']['categories']:
data = {'id': int(jsonData['category']['id']), 'name': jsonData['category']['label'],
'subId': int(cat['id']), 'subName': cat['label']}
if cat['uri'].find(Utils.getConfig()['siteURL']) == -1:
data['url'] = Utils.getConfig()['siteURL'] + cat['uri']
else:
data['url'] = cat['uri']
data['subId'] = int(cat['uri'][cat['uri'].find("C-")+2:cat['uri'].find("-", cat['uri'].find("C-")+2)])
dataCat.append(data)
return dataCat
def saveSubcategories(self, dataCat):
Utils.renameFile(Utils.getConfig()['subcatFile'])
Utils.saveJsonFile(Utils.getConfig()['subcatFile'], dataCat)
if __name__ == "__main__":
proc = GetSubcategories()
# catUrl = proc.getCategories()
# dataCat = proc.getSubcategories(catUrl)
dataCat = proc.getCategories2()
proc.saveSubcategories(dataCat)
print "Done"
|
ikeikeikeike/django-impala-backend
|
impala/introspection.py
|
Python
|
mit
| 310
| 0
|
from django.db.backends import B
|
aseDatabaseIntrospection
class DatabaseIntrospection(BaseDatabaseIntrospection):
def get_table_list(self, cursor):
"Returns a list of table names in the current database."
cursor.execute("SHOW TABLES")
return [row[0] for
|
row in cursor.fetchall()]
|
jhamman/rasmlib
|
rasmlib/analysis/plugins/sample.py
|
Python
|
gpl-3.0
| 865
| 0.001156
|
"""
2x2 plotting analysis for 2 datasets pluggin
| ------------- | ------------- |
| contours from | pcolor from |
| both datasets | dataset 1 |
| ------------- | ------------- |
| pcolor diff | pcolor from |
| both datasets | dataset 2 |
| ------------- | ------------- |
colorbar location = bottom
"""
class Samp
|
leException(Exception):
pass
_NDATASETS = 2
_NPANNELS = 4
def run(cases, compares, domain, **kwargs):
"""plugin run function"""
case_names = cases.keys()
compare_names = compares.keys()
dsets = cases.values()+compares.values()
if len(dsets) != _NDATASETS:
raise SampleException('Inco
|
rrect number of datasets provided')
# get_monthly_means(*dsets)
# get_seasonal_means()
# get_annual_means()
# get_full_means()
return
def __plot():
return
|
plum-umd/java-sketch
|
java_sk/rewrite/desugar.py
|
Python
|
mit
| 1,242
| 0.002415
|
# import logging
from ast.visit import visit as v
from ast.node import Node
from ast.body.methoddeclaration import MethodDeclaration
from ast.stmt.minrepeatstmt import MinrepeatStmt
class Desugar(object):
def __init__(self):
self._cur_mtd = None
@v.on("node")
def visit(self, node):
"""
This is the generic method to initialize the dynamic dispatcher
"""
@v.when(Node)
def visit(self, node):
for c in node.childrenNodes: c.accept(self)
@v.when(MethodDeclaration)
def visit(self, node):
self._cur_mtd = node
for c in node.childrenNodes: c.accept(self)
@v.when(MinrepeatStmt)
def visit(self, node):
raise NotImplementedError
# Old impl
# @v.when(Statement)
# def visit(self, node):
# if node.kind == C.S.MINREPEAT:
# b = '\n'.join(map(str, node.b))
# body = u""
# for i in xrange(9): # TODO: parameterize
#
|
body += u"""
# if (??) {{ {} }}
# """.format(b)
# logging.debug(
# "desugaring minrepeat @ {}".format(self._cur_mtd.name))
# return to_statements(self
|
._cur_mtd, body)
# return [node]
|
kfdm/gntp
|
test/subscribe.py
|
Python
|
mit
| 347
| 0.020173
|
# -*- coding: utf-8 -*-
# Simple script to test sending UTF8 text with the GrowlNotifier class
import logging
logging.basicConfig(leve
|
l=logging.DEBUG)
|
from gntp.notifier import GrowlNotifier
import platform
growl = GrowlNotifier(notifications=['Testing'],password='password',hostname='ayu')
growl.subscribe(platform.node(),platform.node(),12345)
|
soar-telescope/goodman
|
goodman_pipeline/images/tests/test_goodman_ccd.py
|
Python
|
bsd-3-clause
| 471
| 0
|
from __future__ import absolute_import
from unittest import TestCase, skip
from ..goodman_ccd import get_args, MainApp
class MainApp
|
Test(TestCase):
def setUp(self):
self.main_app = MainApp()
def test___call__(self):
self.assertRaises(SystemExit, self.main_app)
def test___call___show_version(self):
arguments = ['--version']
args = get_args(arguments=arguments)
self.assertRa
|
ises(SystemExit, self.main_app, args)
|
mbiokyle29/geno-browser
|
runserver.py
|
Python
|
mit
| 58
| 0
|
#!flask/bin/python
from gb import app
|
app.run(debug=Tr
|
ue)
|
Jorge-Rodriguez/ansible
|
lib/ansible/modules/database/postgresql/postgresql_schema.py
|
Python
|
gpl-3.0
| 10,842
| 0.002583
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2016, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: postgresql_schema
short_description: Add or remove PostgreSQL schema from a remote host
description:
- Add or remove PostgreSQL schema from a remote host.
version_added: "2.3"
options:
name:
description:
- Name of the schema to add or remove.
required: true
database:
description:
- Name of the database to connect to.
default: postgres
login_user:
description:
- The username used to authenticate with.
login_password:
description:
- The password used to authenticate with.
login_host:
description:
- Host running the database.
default: localhost
login_unix_socket:
description:
- Path to a Unix domain socket for local connections.
owner:
description:
- Name of the role to set as owner of the schema.
port:
description:
- Database port to connect to.
default: 5432
session_role:
version_added: "2.8"
description: |
Switch to session_role after connecting. The specified session_role must be a role that the current login_user is a member of.
Permissions checking for SQL commands is carried out as though the session_role were the one that had logged in originally.
state:
description:
- The schema state.
default: present
choices: [ "present", "absent" ]
cascade_drop:
description:
- Drop schema with CASCADE to remove child objects
type: bool
default: false
version_added: '2.8'
ssl_mode:
description:
- Determines whether or with what priority a secure SSL TCP/IP connection
will be negotiated with the server.
- See U(https://www.postgresql.org/docs/current/static/libpq-ssl.html) for
more information on the modes.
- Default of C(prefer) matches libpq default.
default: prefer
choices: ["disable", "allow", "prefer", "require", "verify-ca", "verify-full"]
version_added: '2.8'
ssl_rootcert:
description:
- Specifies the name of a file containing SSL certificate authority (CA)
certificate(s). If the file exists, the server's certificate will be
verified to be signed by one of these authorities.
version_added: '2.8'
notes:
- This module uses I(psycopg2), a Python PostgreSQL database adapter. You must ensure that psycopg2 is installed on
the host before using this module. If the remote host is the PostgreSQL server (which is the default case), then PostgreSQL must also be installed
on the remote host. For Ubuntu-based systems, install the C(postgresql), C(libpq-dev), and C(python-psycopg2) packages on the remote host before
using this module.
requirements: [ psycopg2 ]
author:
- Flavien Chantelot (@Dorn-) <contact@flavien.io>
- Thomas O'Donnell (@andytom)
'''
EXAMPLES = '''
# Create a new schema with name "acme"
- postgresql_schema:
name: acme
# Create a new schema "acme" with a user "bob" who will own it
- postgresql_schema:
name: acme
owner: bob
# Drop schema "acme" with cascade
- postgresql_schema:
name: acme
ensure: absent
cascade_drop: yes
'''
RETURN = '''
schema:
description: Name of the schema
returned: success, changed
type: str
sample: "acme"
'''
import traceback
PSYCOPG2_IMP_ERR = None
try:
import psycopg2
import psycopg2.extras
except ImportError:
PSYCOPG2_IMP_ERR = traceback.format_exc()
postgresqldb_found = False
else:
postgresqldb_found = True
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils.database import SQLParseError, pg_quote_identifier
from ansible.module_utils.six import iteritems
from ansible.module_utils._text import to_native
class NotSupportedError(Exception):
pass
# ===========================================
# PostgreSQL module specific support methods.
#
def set_owner(cursor, schema, owner):
query = "ALTER SCHEMA %s OWNER TO %s" % (
pg_quote_identifier(schema, 'schema'),
pg_quote_identifier(owner, 'role'))
cursor.execute(query)
return True
def get_schema_info(cursor, schema):
query = """
SELECT schema_owner AS owner
FROM information_schema.schemata
WHERE schema_name = %(schema)s
"""
cursor.execute(query, {'schema': schema})
return cursor.fetchone()
def schema_exists(cursor, schema):
query = "SELECT schema_name FROM information_schema.schemata WHERE schema_name = %(schema)s"
cursor.execute(query, {'schema': schema})
return cursor.rowcount == 1
def schema_delete(cursor, schema, cascade):
if schema_exists(cursor, schema):
query = "DROP SCHEMA %s" % pg_quote_identifier(schema, 'schema')
if cascade:
query += " CASCADE"
cursor.execute(query)
return True
else:
return False
def schema_create(cursor, schema, owner):
if not schema_exists(cursor, schema):
query_fragments = ['CREATE SCHEMA %s' % pg_quote_identifier(schema, 'schema')]
if owner:
query_fragments.append('AUTHORIZATION %s' % pg_quote_identifier(owner, 'role'))
query = ' '.join(query_fragments)
cursor.execute(query)
return True
else:
schema_info = get_schema_info(cursor, schema)
if owner and owner != schema_info['owner']:
return set_owner(cursor, schema, owner)
else:
return False
def schema_matches(cursor, schema, owner):
if not schema_exists(cursor, schema):
return False
else:
schema_info = get_schema_info(cursor, schema)
if owner and owner != schema_info['owner']:
return False
else:
return True
# ===========================================
# Module execution.
#
def main():
module = AnsibleModule(
argument_spec=dict(
login_user=dict(default="postgres"),
login_password=dict(default="", no_log=True),
login_host=dict(default=""),
login_unix_socket=dict(default=""),
port=dict(default="5432"),
schema=dict(required=True, aliases=['name']),
owner=dict(default=""),
|
database=dict(default="postgres"),
cascade_drop=dic
|
t(type="bool", default=False),
state=dict(default="present", choices=["absent", "present"]),
ssl_mode=dict(default='prefer', choices=[
'disable', 'allow', 'prefer', 'require', 'verify-ca', 'verify-full']),
ssl_rootcert=dict(default=None),
session_role=dict(),
),
supports_check_mode=True
)
if not postgresqldb_found:
module.fail_json(msg=missing_required_lib('psycopg2'), exception=PSYCOPG2_IMP_ERR)
schema = module.params["schema"]
owner = module.params["owner"]
state = module.params["state"]
sslrootcert = module.params["ssl_rootcert"]
cascade_drop = module.params["cascade_drop"]
session_role = module.params["session_role"]
changed = False
# To use defaults values, keyword arguments must be absent, so
# check which values are empty and don't include in the **kw
# dictionary
params_map = {
"login_host": "host",
"login_user": "user",
"login_password": "password",
"port": "port",
"database": "database",
"ssl_mode": "sslmode",
"ssl_rootcert": "sslrootcert"
}
kw = dict((params_map[k], v) for (k, v) in iteritems(module.params)
if k in params_map and v != "" and v is not None)
# If a login_unix_socket is specified, incorporate it here.
is_localhost = "host" not in kw or kw["host"] == "" or kw["host"] == "localhost"
if is_localhost and module.params["login_unix_socket"
|
NinjaMSP/crossbar
|
crossbar/controller/test/test_run.py
|
Python
|
agpl-3.0
| 42,995
| 0.000442
|
#####################################################################################
#
# Copyright (c) Crossbar.io Technologies GmbH
#
# Unless a separate license agreement exists between you and Crossbar.io GmbH (e.g.
# you have purchased a commercial license), the license terms below apply.
#
# Should you enter into a separate license agreement after having received a copy of
# this software, then the terms of such license agreement replace the terms below at
# the time at which such license agreement becomes effective.
#
# In case a separate license agreement ends, and such agreement ends without being
# replaced by another separate license agreement, the license terms below apply
# from the time at which said agreement ends.
#
# LICENSE TERMS
#
# This program is free software: you can redistribute it and/or modify it under the
# terms of the GNU Affero General Public License, version 3, as published by the
# Free Software Foundation. This program is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See the GNU Affero General Public License Version 3 for more details.
#
# You should have received a copy of the GNU Affero General Public license along
# with this program. If not, see <http://www.gnu.org/licenses/agpl-3.0.en.html>.
#
#####################################################################################
from __future__ import absolute_import, division, print_function
import json
import os
import sys
from six import PY3
from twisted.internet.selectreactor import SelectReactor
from twisted.internet.task import LoopingCall
from crossbar.controller import cli
from .test_cli import CLITestBase
# Turn this to `True` to print the stdout/stderr of the Crossbars spawned
DEBUG = False
def make_lc(self, reactor, func):
if DEBUG:
self.stdout_length = 0
self.stderr_length = 0
def _(lc, reactor):
if DEBUG:
stdout = self.stdout.getvalue()
stderr = self.stderr.getvalue()
if self.stdout.getvalue()[self.stdout_length:]:
print(self.stdout.getvalue()[self.stdout_length:],
file=sys.__stdout__)
if self.stderr.getvalue()[self.stderr_length:]:
print(self.stderr.getvalue()[self.stderr_length:],
file=sys.__stderr__)
self.stdout_length = len(stdout)
self.stderr_length = len(stderr)
return func(lc, reactor)
lc = LoopingCall(_)
lc.a = (lc, reactor)
lc.clock = reactor
lc.start(0.1)
return lc
class ContainerRunningTests(CLITestBase):
def setUp(self):
CLITestBase.setUp(self)
# Set up the configuration directories
self.cbdir = os.path.abspath(self.mktemp())
os.mkdir(self.cbdir)
self.config = os.path.abspath(os.path.join(self.cbdir, "config.json"))
self.code_location = os.path.abspath(self.mktemp())
os.mkdir(self.code_location)
def _start_run(self, config, app, stdout_expected, stderr_expected,
end_on):
with open(self.config, "wb") as f:
f.write(json.dumps(config, ensure_ascii=False).encode('utf8'))
with open(self.code_location + "/myapp.py", "w") as f:
f.write(app)
reactor = SelectReactor()
make_lc(self, reactor, end_on)
# In case it hard-locks
reactor.callLater(self._subprocess_timeout, reactor.stop)
cli.run("crossbar",
["start",
"--cbdir={}".format(self.cbdir),
"--logformat=syslogd"],
reactor=reactor)
out = self.stdout.getvalue()
err = self.stderr.getvalue()
for i in stdout_expected:
if i not in out:
self.fail(u"Error: '{}' not in:\n{}".format(i, out))
for i in stderr_expected:
if i not in err:
self.fail(u"Error: '{}' not in:\n{}".format(i, err))
def test_start_run(self):
"""
A basic start, that enters the reactor.
"""
expected_stdout = [
"Entering reactor event loop", "Loaded the component!"
]
expected_stderr = []
def _check(lc, reactor):
if "Loaded the component!" in self.stdout.getvalue():
lc.stop()
try:
reactor.stop()
except:
pass
config = {
"controller": {
},
"workers": [
{
"type": "router",
"options": {
"pythonpath": ["."]
},
"realms": [
{
"name": "realm1",
"roles": [
{
"name": "anonymous",
"permissions": [
{
"uri": "*",
"publish": True,
"subscribe": True,
"call": True,
"register": True
}
]
}
]
}
],
"transports": [
{
"type": "web",
"endpoint": {
"type": "tcp",
"port": 8080
},
"paths": {
"/": {
"directory": ".",
"type": "static"
|
},
"ws": {
"type": "websocket"
}
}
}
]
},
{
"type": "container",
"options": {
"pythonpath": [self.code_location]
|
},
"components": [
{
"type": "class",
"classname": "myapp.MySession",
"realm": "realm1",
"transport": {
"type": "websocket",
"endpoint": {
"type": "tcp",
"host": "127.0.0.1",
"port": 8080
},
"url": "ws://127.0.0.1:8080/ws"
}
}
]
}
]
}
myapp = """#!/usr/bin/env python
from twisted.logger import Logger
from autobahn.twisted.wamp import ApplicationSession
from autobahn.wamp.exception import ApplicationError
class MySession(ApplicationSession):
log = Logger()
def onJoin(self, details):
self.log.info("Loaded the component!")
"""
self._start_run(config, myapp, expected_stdout, expected_stderr,
_check)
def test_start_run_guest(self):
"""
A basic start of a guest.
"""
expected_stdout = [
"Entering reactor event loop", "Loaded the component!"
]
expected_stderr = []
def _check(lc, reactor):
if "Loaded the component!" in self.stdout.getvalue():
lc.stop()
try:
reactor.stop()
except:
pass
config = {
"controller": {
|
openstack/nomad
|
cyborg/api/controllers/base.py
|
Python
|
apache-2.0
| 2,260
| 0
|
# Copyright 2017 Huawei Technologies Co.,LTD.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import
|
datetime
import pecan
import wsme
from wsme import types as wtypes
from pecan import rest
class APIBase(wtypes.Base):
created_at = wsme.wsattr(datetime.datetime, readonly=True)
"""The time in UTC at which the object is created"""
updated_at = wsme.wsattr(datetime.datetime, readonly=True)
"""The time in UTC at which the object is updated"""
def as_dict(self):
"""Render this object as a dict of its fields."""
return dict((k, getattr(self, k))
f
|
or k in self.fields
if hasattr(self, k) and getattr(self, k) != wsme.Unset)
class CyborgController(rest.RestController):
def _handle_patch(self, method, remainder, request=None):
"""Routes ``PATCH`` _custom_actions."""
# route to a patch_all or get if no additional parts are available
if not remainder or remainder == ['']:
controller = self._find_controller('patch_all', 'patch')
if controller:
return controller, []
pecan.abort(404)
controller = getattr(self, remainder[0], None)
if controller and not inspect.ismethod(controller):
return pecan.routing.lookup_controller(controller, remainder[1:])
# route to custom_action
match = self._handle_custom_action(method, remainder, request)
if match:
return match
# finally, check for the regular patch_one/patch requests
controller = self._find_controller('patch_one', 'patch')
if controller:
return controller, remainder
pecan.abort(405)
|
obi-two/Rebelion
|
data/scripts/templates/object/draft_schematic/space/chassis/shared_hutt_medium_s02.py
|
Python
|
mit
| 458
| 0.048035
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SE
|
E THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Intangible()
result.template = "object/draft_sche
|
matic/space/chassis/shared_hutt_medium_s02.iff"
result.attribute_template_id = -1
result.stfName("string_id_table","")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
rfguri/vimfiles
|
bundle/ycm/third_party/ycmd/ycmd/completers/javascript/tern_completer.py
|
Python
|
mit
| 22,105
| 0.026646
|
# Copyright (C) 2015 ycmd contributors
#
# This file is part of ycmd.
#
# ycmd is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ycmd is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ycmd. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from builtins import * # noqa
from future.utils import iterkeys
from future import standard_library
standard_library.install_aliases()
import logging
import os
import requests
import threading
import traceback
from subprocess import PIPE
from ycmd import utils, responses
from ycmd.completers.completer import Completer
from ycmd.completers.completer_utils import GetFileContents
_logger = logging.getLogger( __name__ )
PATH_TO_TERN_BINARY = os.path.abspath(
os.path.join(
os.path.dirname( __file__ ),
'..',
'..',
'..',
'third_party',
'tern_runtime',
'node_modules',
'tern',
'bin',
'tern' ) )
PATH_TO_NODE = utils.PathToFirstExistingExecutable( [ 'node' ] )
# host name/address on which the tern server should listen
# note: we use 127.0.0.1 rather than localhost because on some platforms
# localhost might not be correctly configured as an alias for the loopback
# address. (ahem: Windows)
SERVER_HOST = '127.0.0.1'
def ShouldEnableTernCompleter():
"""Returns whether or not the tern completer is 'installed'. That is whether
or not the tern submodule has a 'node_modules' directory. This is pretty much
the only way we can know if the user added '--tern-completer' on
install or manually ran 'npm install' in the tern submodule directory."""
if not PATH_TO_NODE:
_logger.warning( 'Not using Tern completer: unable to find node' )
return False
_logger.info( 'Using node binary from: ' + PATH_TO_NODE )
installed = os.path.exists( PATH_TO_TERN_BINARY )
if not installed:
_logger.info( 'Not using Tern completer: not installed at ' +
PATH_TO_TERN_BINARY )
return False
return True
def GlobalConfigExists( tern_config ):
"""Returns whether or not the global config file with the supplied path
exists. This method primarily exists to allow testability and simply returns
whether the supplied file exists."""
return os.path.exists( tern_config )
def FindTernProjectFile( starting_directory ):
for folder in utils.PathsToAllParentFolders( starting_directory ):
tern_project = os.path.join( folder, '.tern-project' )
if os.path.exists( tern_project ):
return tern_project
# As described here: http://ternjs.net/doc/manual.html#server a global
# .tern-config file is also supported for the Tern server. This can provide
# meaningful defaults (for libs, and possibly also for require paths), so
# don't warn if we find one. The point is that if the user has a .tern-config
# set up, then she has deliberately done so and a ycmd warning is unlikely
# to be anything other than annoying.
tern_config = os.path.expanduser( '~/.tern-config' )
if GlobalConfigExists( tern_config ):
return tern_config
return None
class TernCompleter( Completer ):
"""Completer for JavaScript using tern.js: http://ternjs.net.
The protocol is defined here: http://ternjs.net/doc/manual.html#protocol"""
def __init__( self, user_options ):
super( TernCompleter, self ).__init__( user_options )
self._server_keep_logfiles = user_options[ 'server_keep_logfiles' ]
# Used to ensure that starting/stopping of the server is synchronised
self._server_state_mutex = threading.RLock()
self._do_tern_project_check = False
with self._server_state_mutex:
self._server_stdout = None
self._server_stderr = None
self._Reset()
self._StartServer()
def _WarnIfMissingTernProject( self ):
# The Tern server will operate without a .tern-project file. However, it
# does not operate optimally, and will likely lead to issues reported that
# JavaScript completion is not working properly. So we raise a warning if we
# aren't able to detect some semblance of manual Tern configuration.
# We do this check after the server has started because the server does
# have nonzero use without a project file, however limited. We only do this
# check once, though because the server can only handle one project at a
# time. This doesn't catch opening a file which is not part of the project
# or any of those things, but we can only do so much. We'd like to enhance
# ycmd to handle this better, but that is a FIXME for now.
if self._ServerIsRunning() and self._do_tern_project_check:
self._do_tern_project_check = False
tern_project = FindTernProjectFile( os.getcwd() )
if not tern_project:
_logger.warning( 'No .tern-project file detected: ' + os.getcwd() )
raise RuntimeError( 'Warning: Unable to detect a .tern-project file '
'in the hierarchy before ' + os.getcwd() +
' and no global .tern-config file was found. '
'This is required for accurate JavaScript '
'completion. Please see the User Guide for '
'details.' )
else:
_logger.info( 'Detected .tern-project file at: ' + tern_project )
def _GetServerAddress( self ):
return 'http://' + SERVER_HOST + ':' + str( self._server_port )
def ComputeCandidatesInner( self, request_data ):
query = {
'type': 'completions',
'types': True,
'docs': True,
'filter': False,
'caseInsensitive': True,
'guess': False,
'sort': False,
'includeKeywords': False,
'expandWordForward': False,
'omitObjectPrototype': False
}
completions = self._GetResponse( query,
request_data[ 'start_codepoint' ],
|
request_data ).get( 'completions', [] )
def BuildDoc( completion ):
doc = completion.get( 'type', 'Unknown type' )
if 'doc' in completion:
doc = doc + '\n' + completion[ 'doc' ]
return doc
return [ responses.BuildCompletionData( completion[ 'name' ],
completion.get( 'type', '?' ),
|
BuildDoc( completion ) )
for completion in completions ]
def OnFileReadyToParse( self, request_data ):
self._WarnIfMissingTernProject()
# Keep tern server up to date with the file data. We do this by sending an
# empty request just containing the file data
try:
self._PostRequest( {}, request_data )
except:
# The server might not be ready yet or the server might not be running.
# in any case, just ignore this we'll hopefully get another parse request
# soon.
pass
def GetSubcommandsMap( self ):
return {
'RestartServer': ( lambda self, request_data, args:
self._RestartServer() ),
'StopServer': ( lambda self, request_data, args:
self._StopServer() ),
'GoToDefinition': ( lambda self, request_data, args:
self._GoToDefinition( request_data ) ),
'GoTo': ( lambda self, request_data, args:
self._GoToDefinition( request_data ) ),
'GoToReferences': ( lambda self, request_data, args:
self._GoToReferences( request_data ) ),
'GetType': ( lambda self, request_data, args:
|
cvanoort/USDrugUseAnalysis
|
Report1/Code/afu_use30.py
|
Python
|
isc
| 2,851
| 0.020694
|
import csv
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as stats
from scipy.optimize import curve_fit
def countKey(key,listDataDicts):
outDict = {}
for row in listDataDicts:
try:
outDict[row[key]] += 1
except KeyError:
outDict[row[key]] = 1
return outDict
def avgUse30Days(key, listDataDicts):
totalDays = 0
numberUsers = 0
for person in listDataDicts:
if int(person[key]) < 31 :
totalDays += int(person[key])
numberUsers += 1
return (1.0*totalDays/numberUsers)
def avgUse30DaysWithZeros(key, listDataDicts):
totalDays = 0
numberUsers = 0
for person in listDataDicts:
if ( int(person[key]) < 31 ):
totalDays += int(person[key])
numberUsers += 1
elif ( int(person[key]) == 93 ):
numberUsers += 1
else:
pass
return (1.0*totalDays/numberUsers)
def powerLaw(x,a,b):
return a*(x**(-b))
def expDecay(x,a,b):
return a*np.exp(b*x)
listDataDicts = []
with open('34933-0001-Data.tsv', 'rb') as tsvFile:
tsvReader = csv.DictRead
|
er(tsvFile,delimiter='\t')
for row in tsvReader:
listDataDicts.append(row)
ageFirstUseKeys = ['CIGTRY', 'SNUFTRY', 'CHEWTRY', 'CIG
|
ARTRY', 'ALCTRY', 'MJAGE', 'COCAGE', 'HERAGE', 'HALLAGE', 'INHAGE', 'ANALAGE', 'TRANAGE', 'STIMAGE', 'SEDAGE']
useLast30Keys = ['CIG30USE','SNF30USE','CHW30USE','CGR30USE','ALCDAYS','MJDAY30A','COCUS30A','HER30USE','HAL30USE','INHDY30A','PRDAYPMO','TRDAYPMO','STDAYPMO','SVDAYPMO']
xdata = []
ydata = []
for person in listDataDicts:
for i in range(len(ageFirstUseKeys)):
if (int(person[ageFirstUseKeys[i]]) < 900) and (int(person[useLast30Keys[i]]) < 31):
xdata.append(int(person[ageFirstUseKeys[i]]))
ydata.append(int(person[useLast30Keys[i]]))
slope,intercept,rValue,pValue,stdErr = stats.linregress(xdata,ydata)
print "Drug First Use Age vs Usage Frequency Linear Regression"
print "Slope: %f, Intercept: %f, RSQ-Value: %f, P-Value: %f, Standard Error: %f,\n 95%% Confidence Interval: %f +- %f\n" %(slope,intercept,rValue*rValue,pValue,stdErr, slope, 1.96*stdErr)
'''# Curve fit with a power law
xfit = range(90)
popt1, pcov1 = curve_fit(powerLaw, xdata, ydata)
print "Power Law Curve fit: ",popt1,np.sqrt(np.diag(pcov1)),"\n"
fitLiney1 = np.zeros(len(xfit))
for i in range(len(xfit)):
fitLiney1[i] = powerLaw( xfit[i], popt1[0], popt1[1] )
'''
xdata2 = [ x for x in range(89) ]
ydata2 = [ (x*slope + intercept) for x in range(89) ]
plt.plot(xdata,ydata,'b.',xdata2,ydata2,'r-')
plt.title("Age of First Use vs Usage in the Last 30 Days")
plt.xlabel("Age of First Use")
plt.ylabel("Usage in the Past 30 Days)")
plt.legend(["Data","Linear Fit"])
plt.xlim(0,90)
plt.ylim(0,31)
plt.tight_layout()
plt.show()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.