text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
|---|---|---|---|---|---|---|
"""
WSGI config for django_rest_test project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "django_rest_test.settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "django_rest_test.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
|
fqc/django_rest_test
|
django_rest_test/wsgi.py
|
Python
|
mit
| 1,449
| 0.00069
|
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from __future__ import unicode_literals
from ..registration import MeasureImageSimilarity
def test_MeasureImageSimilarity_inputs():
input_map = dict(args=dict(argstr='%s',
),
dimension=dict(argstr='--dimensionality %d',
position=1,
),
environ=dict(nohash=True,
usedefault=True,
),
fixed_image=dict(mandatory=True,
),
fixed_image_mask=dict(argstr='%s',
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
metric=dict(argstr='%s',
mandatory=True,
),
metric_weight=dict(requires=['metric'],
usedefault=True,
),
moving_image=dict(mandatory=True,
),
moving_image_mask=dict(requires=['fixed_image_mask'],
),
num_threads=dict(nohash=True,
usedefault=True,
),
radius_or_number_of_bins=dict(mandatory=True,
requires=['metric'],
),
sampling_percentage=dict(mandatory=True,
requires=['metric'],
),
sampling_strategy=dict(requires=['metric'],
usedefault=True,
),
terminal_output=dict(deprecated='1.0.0',
nohash=True,
),
)
inputs = MeasureImageSimilarity.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(inputs.traits()[key], metakey) == value
def test_MeasureImageSimilarity_outputs():
output_map = dict(similarity=dict(),
)
outputs = MeasureImageSimilarity.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(outputs.traits()[key], metakey) == value
|
mick-d/nipype
|
nipype/interfaces/ants/tests/test_auto_MeasureImageSimilarity.py
|
Python
|
bsd-3-clause
| 1,674
| 0.0227
|
import IMP
import IMP.test
import IMP.core
import IMP.atom
class Tests(IMP.test.TestCase):
def test_bonded(self):
"""Check close and destroy Hierarchy """
m = IMP.Model()
mh = IMP.atom.read_pdb(self.get_input_file_name("mini.pdb"), m)
nump = len(m.get_particle_indexes())
mhc = IMP.atom.create_clone(mh)
nnump = len(m.get_particle_indexes())
self.assertEqual(nump * 2, nnump)
IMP.atom.destroy(mhc)
mhc = None
self.assertEqual(nump, len(m.get_particle_indexes()))
IMP.atom.destroy(mh)
mh = None
self.assertEqual(0, len(m.get_particle_indexes()))
def test_destroy_child(self):
"""Destroy of a child should update the parent"""
m = IMP.Model()
mh = IMP.atom.read_pdb(self.get_input_file_name("mini.pdb"), m)
atoms = IMP.atom.get_by_type(mh, IMP.atom.ATOM_TYPE)
self.assertEqual(len(atoms), 68)
IMP.atom.destroy(atoms[0])
# This will fail if the atom is not removed from the parent residue
atoms = IMP.atom.get_by_type(mh, IMP.atom.ATOM_TYPE)
self.assertEqual(len(atoms), 67)
if __name__ == '__main__':
IMP.test.main()
|
shanot/imp
|
modules/atom/test/test_clone.py
|
Python
|
gpl-3.0
| 1,208
| 0.000828
|
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2008,2009,2010,2011,2012,2013,2014,2015,2016 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains the logic for `aq update machine`."""
import re
from aquilon.exceptions_ import ArgumentError
from aquilon.aqdb.model import (Chassis, ChassisSlot, Model, Machine,
Resource, BundleResource, Share, Filesystem)
from aquilon.aqdb.types import CpuType
from aquilon.worker.broker import BrokerCommand
from aquilon.worker.dbwrappers.hardware_entity import update_primary_ip
from aquilon.worker.dbwrappers.interface import set_port_group, generate_ip
from aquilon.worker.dbwrappers.location import get_location
from aquilon.worker.dbwrappers.resources import (find_resource,
get_resource_holder)
from aquilon.worker.templates import (PlenaryHostData,
PlenaryServiceInstanceToplevel)
from aquilon.worker.processes import DSDBRunner
_disk_map_re = re.compile(r'^([^/]+)/(?:([^/]+)/)?([^/]+):([^/]+)/(?:([^/]+)/)?([^/]+)$')
def parse_remap_disk(old_vmholder, new_vmholder, remap_disk):
result = {}
if not remap_disk:
return result
maps = remap_disk.split(",")
for map in maps:
res = _disk_map_re.match(map)
if not res:
raise ArgumentError("Invalid disk backend remapping "
"specification: '%s'" % map)
src_type, src_rg, src_name, dst_type, dst_rg, dst_name = res.groups()
src_cls = Resource.polymorphic_subclass(src_type,
"Invalid resource type")
dst_cls = Resource.polymorphic_subclass(dst_type,
"Invalid resource type")
if dst_cls not in (Share, Filesystem):
raise ArgumentError("%s is not a valid virtual disk backend "
"resource type." % dst_type)
src_backend = find_resource(src_cls, old_vmholder, src_rg, src_name)
dst_backend = find_resource(dst_cls, new_vmholder, dst_rg, dst_name)
result[src_backend] = dst_backend
return result
def get_metacluster(holder):
if hasattr(holder, "metacluster"):
return holder.metacluster
# vmhost
if hasattr(holder, "cluster") and holder.cluster:
return holder.cluster.metacluster
else:
# TODO vlocal still has clusters, so this case not tested yet.
return None
def update_disk_backing_stores(dbmachine, old_holder, new_holder, remap_disk):
if not old_holder:
old_holder = dbmachine.vm_container.holder.holder_object
if not new_holder:
new_holder = old_holder
disk_mapping = parse_remap_disk(old_holder, new_holder, remap_disk)
for dbdisk in dbmachine.disks:
old_bstore = dbdisk.backing_store
if isinstance(old_bstore.holder, BundleResource):
resourcegroup = old_bstore.holder.resourcegroup.name
else:
resourcegroup = None
if old_bstore in disk_mapping:
new_bstore = disk_mapping[old_bstore]
else:
new_bstore = find_resource(old_bstore.__class__, new_holder,
resourcegroup, old_bstore.name,
error=ArgumentError)
dbdisk.backing_store = new_bstore
def update_interface_bindings(session, logger, dbmachine, autoip):
for dbinterface in dbmachine.interfaces:
old_pg = dbinterface.port_group
if not old_pg:
continue
old_net = old_pg.network
# Suppress the warning about PG mismatch - we'll update the addresses
# later
set_port_group(session, logger, dbinterface, old_pg.name,
check_pg_consistency=False)
logger.info("Updated {0:l} to use {1:l}.".format(dbinterface,
dbinterface.port_group))
new_net = dbinterface.port_group.network
if new_net == old_net or not autoip:
dbinterface.check_pg_consistency(logger=logger)
continue
for addr in dbinterface.assignments:
if addr.network != old_net:
continue
new_ip = generate_ip(session, logger, dbinterface, autoip=True,
network_environment=old_net.network_environment)
for dbdns_rec in addr.dns_records:
dbdns_rec.network = new_net
dbdns_rec.ip = new_ip
old_ip = addr.ip
addr.ip = new_ip
addr.network = new_net
logger.info("Changed {0:l} IP address from {1!s} to {2!s}."
.format(dbinterface, old_ip, new_ip))
dbinterface.check_pg_consistency(logger=logger)
def move_vm(session, logger, dbmachine, resholder, remap_disk,
allow_metacluster_change, autoip, plenaries):
old_holder = dbmachine.vm_container.holder.holder_object
if resholder:
new_holder = resholder.holder_object
else:
new_holder = old_holder
if new_holder != old_holder:
old_mc = get_metacluster(old_holder)
new_mc = get_metacluster(new_holder)
if old_mc != new_mc and not allow_metacluster_change:
raise ArgumentError("Moving VMs between metaclusters is "
"disabled by default. Use the "
"--allow_metacluster_change option to "
"override.")
plenaries.add(old_holder)
plenaries.add(new_holder)
dbmachine.vm_container.holder = resholder
if new_holder != old_holder or remap_disk:
update_disk_backing_stores(dbmachine, old_holder, new_holder, remap_disk)
if new_holder != old_holder or autoip:
update_interface_bindings(session, logger, dbmachine, autoip)
if hasattr(new_holder, 'location_constraint'):
dbmachine.location = new_holder.location_constraint
else:
dbmachine.location = new_holder.hardware_entity.location
class CommandUpdateMachine(BrokerCommand):
requires_plenaries = True
required_parameters = ["machine"]
def render(self, session, logger, plenaries, machine, model, vendor, serial, uuid,
clear_uuid, chassis, slot, clearchassis, multislot, vmhost,
cluster, metacluster, allow_metacluster_change, cpuname,
cpuvendor, cpucount, memory, ip, autoip, uri, remap_disk,
comments, **arguments):
dbmachine = Machine.get_unique(session, machine, compel=True)
oldinfo = DSDBRunner.snapshot_hw(dbmachine)
old_location = dbmachine.location
plenaries.add(dbmachine)
if dbmachine.vm_container:
plenaries.add(dbmachine.vm_container)
if dbmachine.host:
# Using PlenaryHostData directly, to avoid warnings if the host has
# not been configured yet
plenaries.add(dbmachine.host, cls=PlenaryHostData)
if clearchassis:
del dbmachine.chassis_slot[:]
if chassis:
dbchassis = Chassis.get_unique(session, chassis, compel=True)
dbmachine.location = dbchassis.location
if slot is None:
raise ArgumentError("Option --chassis requires --slot "
"information.")
self.adjust_slot(session, logger,
dbmachine, dbchassis, slot, multislot)
elif slot is not None:
dbchassis = None
for dbslot in dbmachine.chassis_slot:
if dbchassis and dbslot.chassis != dbchassis:
raise ArgumentError("Machine in multiple chassis, please "
"use --chassis argument.")
dbchassis = dbslot.chassis
if not dbchassis:
raise ArgumentError("Option --slot requires --chassis "
"information.")
self.adjust_slot(session, logger,
dbmachine, dbchassis, slot, multislot)
dblocation = get_location(session, **arguments)
if dblocation:
loc_clear_chassis = False
for dbslot in dbmachine.chassis_slot:
dbcl = dbslot.chassis.location
if dbcl != dblocation:
if chassis or slot is not None:
raise ArgumentError("{0} conflicts with chassis {1!s} "
"location {2}."
.format(dblocation, dbslot.chassis,
dbcl))
else:
loc_clear_chassis = True
if loc_clear_chassis:
del dbmachine.chassis_slot[:]
dbmachine.location = dblocation
if model:
# If overriding model, should probably overwrite default
# machine specs as well.
dbmodel = Model.get_unique(session, name=model, vendor=vendor,
compel=True)
if not dbmodel.model_type.isMachineType():
raise ArgumentError("The update_machine command cannot update "
"machines of type %s." %
dbmodel.model_type)
# We probably could do this by forcing either cluster or
# location data to be available as appropriate, but really?
# Failing seems reasonable.
if dbmodel.model_type != dbmachine.model.model_type and \
(dbmodel.model_type.isVirtualMachineType() or
dbmachine.model.model_type.isVirtualMachineType()):
raise ArgumentError("Cannot change machine from %s to %s." %
(dbmachine.model.model_type,
dbmodel.model_type))
old_nic_model = dbmachine.model.nic_model
new_nic_model = dbmodel.nic_model
if old_nic_model != new_nic_model:
for iface in dbmachine.interfaces:
if iface.model == old_nic_model:
iface.model = new_nic_model
dbmachine.model = dbmodel
if cpuname or cpuvendor:
dbcpu = Model.get_unique(session, name=cpuname, vendor=cpuvendor,
model_type=CpuType.Cpu, compel=True)
dbmachine.cpu_model = dbcpu
if cpucount is not None:
dbmachine.cpu_quantity = cpucount
if memory is not None:
dbmachine.memory = memory
if serial is not None:
dbmachine.serial_no = serial
if comments is not None:
dbmachine.comments = comments
if uuid:
q = session.query(Machine)
q = q.filter_by(uuid=uuid)
existing = q.first()
if existing:
raise ArgumentError("{0} is already using UUID {1!s}."
.format(existing, uuid))
dbmachine.uuid = uuid
elif clear_uuid:
dbmachine.uuid = None
if uri and not dbmachine.model.model_type.isVirtualMachineType():
raise ArgumentError("URI can be specified only for virtual "
"machines and the model's type is %s" %
dbmachine.model.model_type)
if uri is not None:
dbmachine.uri = uri
# FIXME: For now, if a machine has its interface(s) in a portgroup
# this command will need to be followed by an update_interface to
# re-evaluate the portgroup for overflow.
# It would be better to have --pg and --autopg options to let it
# happen at this point.
if cluster or vmhost or metacluster:
if not dbmachine.vm_container:
raise ArgumentError("Cannot convert a physical machine to "
"virtual.")
resholder = get_resource_holder(session, logger, hostname=vmhost,
cluster=cluster,
metacluster=metacluster,
compel=False)
move_vm(session, logger, dbmachine, resholder, remap_disk,
allow_metacluster_change, autoip, plenaries)
elif remap_disk:
update_disk_backing_stores(dbmachine, None, None, remap_disk)
if ip:
if dbmachine.host:
for srv in dbmachine.host.services_provided:
si = srv.service_instance
plenaries.add(si, cls=PlenaryServiceInstanceToplevel)
update_primary_ip(session, logger, dbmachine, ip)
if dbmachine.location != old_location and dbmachine.host:
for vm in dbmachine.host.virtual_machines:
plenaries.add(vm)
vm.location = dbmachine.location
session.flush()
# Check if the changed parameters still meet cluster capacity
# requiremets
if dbmachine.cluster:
dbmachine.cluster.validate()
if allow_metacluster_change and dbmachine.cluster.metacluster:
dbmachine.cluster.metacluster.validate()
if dbmachine.host and dbmachine.host.cluster:
dbmachine.host.cluster.validate()
for dbinterface in dbmachine.interfaces:
dbinterface.check_pg_consistency(logger=logger)
# The check to make sure a plenary file is not written out for
# dummy aurora hardware is within the call to write(). This way
# it is consistent without altering (and forgetting to alter)
# all the calls to the method.
with plenaries.transaction():
dsdb_runner = DSDBRunner(logger=logger)
dsdb_runner.update_host(dbmachine, oldinfo)
dsdb_runner.commit_or_rollback("Could not update machine in DSDB")
return
def adjust_slot(self, session, logger,
dbmachine, dbchassis, slot, multislot):
for dbslot in dbmachine.chassis_slot:
# This update is a noop, ignore.
# Technically, this could be a request to trim the list down
# to just this one slot - in that case --clearchassis will be
# required.
if dbslot.chassis == dbchassis and dbslot.slot_number == slot:
return
if len(dbmachine.chassis_slot) > 1 and not multislot:
raise ArgumentError("Use --multislot to support a machine in more "
"than one slot, or --clearchassis to remove "
"current chassis slot information.")
if not multislot:
slots = ", ".join(str(dbslot.slot_number) for dbslot in
dbmachine.chassis_slot)
logger.info("Clearing {0:l} out of {1:l} slot(s) "
"{2}".format(dbmachine, dbchassis, slots))
del dbmachine.chassis_slot[:]
q = session.query(ChassisSlot)
q = q.filter_by(chassis=dbchassis, slot_number=slot)
dbslot = q.first()
if dbslot:
if dbslot.machine:
raise ArgumentError("{0} slot {1} already has machine "
"{2}.".format(dbchassis, slot,
dbslot.machine.label))
else:
dbslot = ChassisSlot(chassis=dbchassis, slot_number=slot)
dbmachine.chassis_slot.append(dbslot)
return
|
guillaume-philippon/aquilon
|
lib/aquilon/worker/commands/update_machine.py
|
Python
|
apache-2.0
| 16,386
| 0.000305
|
#!/usr/bin/env python
from __future__ import print_function
import argparse
import array
import math
import os
import random
import sys
import subprocess
def create_graph(nodes, edges, verbose):
if verbose: print('Creating random graph with {} nodes and {} edges...'.format(nodes, edges))
n1 = [ random.randint(0, nodes - 1) for x in xrange(edges) ]
n2 = [ random.randint(0, nodes - 1) for x in xrange(edges) ]
length = [ random.expovariate(1.0) for x in xrange(edges) ]
return { 'nodes': nodes,
'edges': edges,
'n1': n1,
'n2': n2,
'length': length }
def compute_subgraphs(n, p):
return [(x*(n/p) + min(x, n%p), ((x+1)*(n/p)-1) + min(x + 1, n%p)) for x in xrange(0, p)]
def find_subgraph_index(n, subgraphs):
s = [i for i, (start, end) in zip(xrange(len(subgraphs)), subgraphs) if start <= n and n <= end]
assert len(s) == 1
return s[0]
def find_subgraph(n, subgraphs):
return subgraphs[find_subgraph_index(n, subgraphs)]
def create_clustered_DAG_graph(nodes, edges, nsubgraphs, cluster_factor, verbose):
if verbose: print('Creating clustered DAG graph with {} nodes and {} edges...'.format(nodes, edges))
subgraphs = compute_subgraphs(nodes, nsubgraphs)
def make_edge():
n1 = random.randint(0, nodes - 1)
if random.randint(1, 100) <= cluster_factor:
s = find_subgraph(n1, subgraphs)
n2 = random.randint(*s)
else:
n2 = random.randint(min(n1, nodes-1), nodes-1)
return (n1, n2)
n1, n2 = zip(*(make_edge() for x in xrange(edges)))
length = [random.expovariate(1.0) for x in xrange(edges)]
return { 'nodes': nodes,
'edges': edges,
'n1': n1,
'n2': n2,
'length': length }
def create_clustered_geometric_graph(nodes, edges, nsubgraphs, cluster_factor, verbose):
if verbose: print('Creating clustered geometric graph with {} nodes and {} edges...'.format(nodes, edges))
blocks = int(math.sqrt(nsubgraphs))
assert blocks**2 == nsubgraphs
bounds = [((1.0*(i%blocks)/blocks, 1.0*(i%blocks + 1)/blocks),
(1.0*(i/blocks)/blocks, 1.0*(i/blocks + 1)/blocks))
for i in xrange(nsubgraphs)]
subgraphs = compute_subgraphs(nodes, nsubgraphs)
pos = [(random.uniform(*x), random.uniform(*y))
for (lo, hi), (x, y) in zip(subgraphs, bounds)
for _ in xrange(lo, hi+1)]
def make_edge():
n1 = random.randint(0, nodes - 1)
if random.randint(1, 100) <= cluster_factor:
s = find_subgraph(n1, subgraphs)
n2 = random.randint(*s)
else:
i = find_subgraph_index(n1, subgraphs)
ix, iy = i%blocks, i/blocks
if random.randint(0, 1) == 0:
s2 = subgraphs[((ix+1)%blocks) + iy*blocks]
else:
s2 = subgraphs[ix + ((iy+1)%blocks)*blocks]
n2 = random.randint(*s2)
return (n1, n2)
n1, n2 = zip(*(make_edge() for x in xrange(edges)))
length = [xlen + random.expovariate(1000/xlen if xlen > 0.0001 else 1)
for x in xrange(edges)
for xlen in [math.sqrt(sum((a - b)**2 for a, b in zip(pos[n1[x]], pos[n2[x]])))]]
return { 'nodes': nodes,
'edges': edges,
'n1': n1,
'n2': n2,
'length': length }
def metis_graph(g, metis, subgraphs, outdir, verbose):
if verbose: print('Running METIS...')
with open(os.path.join(outdir, 'graph.metis'), 'wb') as f:
f.write('{:3d} {:3d} 000\n'.format(g['nodes'], g['edges']))
for n in xrange(g['nodes']):
f.write(' '.join('{:3d} 1'.format(n2+1) for n1, n2 in zip(g['n1'], g['n2']) if n1 == n))
f.write('\n')
subprocess.check_call([metis, os.path.join(outdir, 'graph.metis'), str(subgraphs)])
with open(os.path.join(outdir, 'graph.metis.part.{}'.format(subgraphs)), 'rb') as f:
colors = [int(x) for x in f.read().split()]
mapping = dict(zip(sorted(xrange(g['nodes']), key = lambda x: colors[x]), range(g['nodes'])))
g['n1'] = [mapping[g['n1'][x]] for x in xrange(g['edges'])]
g['n2'] = [mapping[g['n2'][x]] for x in xrange(g['edges'])]
def sort_graph(g, verbose):
if verbose: print('Sorting graph...')
mapping = dict(zip(sorted(xrange(g['edges']), key = lambda x: (g['n1'][x], g['n2'][x])), range(g['edges'])))
g['n1'] = [g['n1'][mapping[x]] for x in xrange(g['edges'])]
g['n2'] = [g['n2'][mapping[x]] for x in xrange(g['edges'])]
g['length'] = [g['length'][mapping[x]] for x in xrange(g['edges'])]
def solve_graph(g, source, verbose):
if verbose: print('Solving graph...')
parent = [ -1 for x in xrange(g['nodes']) ]
dist = [ 1e100 for x in xrange(g['nodes']) ]
dist[source] = 0
while True:
count = 0
for n1, n2, length in zip(g['n1'], g['n2'], g['length']):
c2 = length + dist[n1]
if c2 < dist[n2]:
dist[n2] = c2
parent[n2] = n1
count += 1
#print 'count = {:d}'.format(count)
if count == 0:
break
# if verbose:
# for i, e in enumerate(zip(g['n1'], g['n2'], g['length'])):
# print('{:3d} {:3d} {:3d} {:5.3f}'.format(i, e[0], e[1], e[2]))
# for i, n in enumerate(zip(parent, dist)):
# print('{:3d} {:3d} {:5.3f}'.format(i, n[0], n[1]))
return dist
def write_graph(g, problems, outdir, verbose):
if verbose: print('Writing graph...')
with open(os.path.join(outdir, 'edges.dat'), 'wb') as f:
array.array('i', g['n1']).tofile(f)
array.array('i', g['n2']).tofile(f)
array.array('f', g['length']).tofile(f)
with open(os.path.join(outdir, 'graph.dot'), 'wb') as f:
f.write('digraph {\n')
f.write('\n'.join('{} -> {} [ style = "{}"]'.format(e1, e2, 'dotted' if e2 <= e1 else 'solid') for e1, e2 in zip(g['n1'], g['n2'])))
f.write('\n}\n')
with open(os.path.join(outdir, 'graph.txt'), 'w') as f:
f.write('nodes {:d}\n'.format(g['nodes']))
f.write('edges {:d}\n'.format(g['edges']))
f.write('data edges.dat\n')
sources = random.sample(xrange(g['nodes']), problems)
for s in sources:
parents = solve_graph(g, s, verbose)
with open(os.path.join(outdir, 'result_{:d}.dat'.format(s)), 'wb') as f2:
array.array('f', parents).tofile(f2)
f.write('source {:d} result_{:d}.dat\n'.format(s, s))
if __name__ == '__main__':
p = argparse.ArgumentParser(description='graph generator')
p.add_argument('--nodes', '-n', type=int, default=10)
p.add_argument('--edges', '-e', type=int, default=20)
p.add_argument('--type', '-t', default='random', choices=['random', 'clustered_DAG', 'clustered_geometric'])
p.add_argument('--subgraphs', '-s', type=int, default=1)
p.add_argument('--cluster-factor', '-c', type=int, default=95)
p.add_argument('--problems', '-p', type=int, default=1)
p.add_argument('--randseed', '-r', type=int, default=12345)
p.add_argument('--metis-path', default='./metis-install/bin/gpmetis')
p.add_argument('--metis', '-m', action='store_true')
p.add_argument('--outdir', '-o', required=True)
p.add_argument('--verbose', '-v', action='store_true')
args = p.parse_args()
random.seed(args.randseed)
if args.type == 'random':
G = create_graph(args.nodes, args.edges, args.verbose)
elif args.type == 'clustered_DAG':
G = create_clustered_DAG_graph(args.nodes, args.edges, args.subgraphs, args.cluster_factor, args.verbose)
elif args.type == 'clustered_geometric':
G = create_clustered_geometric_graph(args.nodes, args.edges, args.subgraphs, args.cluster_factor, args.verbose)
else:
assert false
try:
os.mkdir(args.outdir)
except:
pass
assert os.path.isdir(args.outdir)
if args.metis:
assert os.path.isfile(args.metis_path)
metis_graph(G, args.metis_path, args.subgraphs, args.outdir, args.verbose)
sort_graph(G, args.verbose)
write_graph(G, args.problems, args.outdir, args.verbose)
|
chuckatkins/legion
|
language/examples/mssp/gen_graph.py
|
Python
|
apache-2.0
| 8,232
| 0.008017
|
#!/usr/bin/python
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example gets all creative templates.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
"""
# Import appropriate modules from the client library.
from googleads import dfp
def main(client):
# Initialize appropriate service.
creative_template_service = client.GetService(
'CreativeTemplateService', version='v201502')
# Create a filter statement.
statement = dfp.FilterStatement()
# Get creative templates by statement.
while True:
response = creative_template_service.getCreativeTemplatesByStatement(
statement.ToStatement())
if 'results' in response:
# Display results.
for template in response['results']:
print ('Creative template with id \'%s\', name \'%s\', and type \'%s\' '
'was found.' % (template['id'],
template['name'],
template['type']))
statement.offset += dfp.SUGGESTED_PAGE_LIMIT
else:
break
print '\nNumber of results found: %s' % response['totalResultSetSize']
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client)
|
wubr2000/googleads-python-lib
|
examples/dfp/v201502/creative_template_service/get_all_creative_templates.py
|
Python
|
apache-2.0
| 1,984
| 0.008569
|
#!/usr/bin/python
"""
This example demonstrates several features of PyLaTeX.
It includes plain equations, tables, equations using numpy objects, tikz plots,
and figures.
.. :copyright: (c) 2014 by Jelte Fennema.
:license: MIT, see License for more details.
"""
# begin-doc-include
import numpy as np
from pylatex import Document, Section, Subsection, Tabular, Math, TikZ, Axis, \
Plot, Figure, Package, Matrix
from pylatex.utils import italic
import os
if __name__ == '__main__':
image_filename = os.path.join(os.path.dirname(__file__), 'kitten.jpg')
doc = Document()
doc.packages.append(Package('geometry', options=['tmargin=1cm',
'lmargin=10cm']))
with doc.create(Section('The simple stuff')):
doc.append('Some regular text and some')
doc.append(italic('italic text. '))
doc.append('\nAlso some crazy characters: $&#{}')
with doc.create(Subsection('Math that is incorrect')):
doc.append(Math(data=['2*3', '=', 9]))
with doc.create(Subsection('Table of something')):
with doc.create(Tabular('rc|cl')) as table:
table.add_hline()
table.add_row((1, 2, 3, 4))
table.add_hline(1, 2)
table.add_empty_row()
table.add_row((4, 5, 6, 7))
a = np.array([[100, 10, 20]]).T
M = np.matrix([[2, 3, 4],
[0, 0, 1],
[0, 0, 2]])
with doc.create(Section('The fancy stuff')):
with doc.create(Subsection('Correct matrix equations')):
doc.append(Math(data=[Matrix(M), Matrix(a), '=', Matrix(M * a)]))
with doc.create(Subsection('Beautiful graphs')):
with doc.create(TikZ()):
plot_options = 'height=6cm, width=6cm, grid=major'
with doc.create(Axis(options=plot_options)) as plot:
plot.append(Plot(name='model', func='-x^5 - 242'))
coordinates = [
(-4.77778, 2027.60977),
(-3.55556, 347.84069),
(-2.33333, 22.58953),
(-1.11111, -493.50066),
(0.11111, 46.66082),
(1.33333, -205.56286),
(2.55556, -341.40638),
(3.77778, -1169.24780),
(5.00000, -3269.56775),
]
plot.append(Plot(name='estimate', coordinates=coordinates))
with doc.create(Subsection('Cute kitten pictures')):
with doc.create(Figure(position='h!')) as kitten_pic:
kitten_pic.add_image(image_filename, width='120px')
kitten_pic.add_caption('Look it\'s on its back')
doc.generate_pdf('full')
|
bjodah/PyLaTeX
|
examples/full.py
|
Python
|
mit
| 2,838
| 0
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
from corehq.form_processor.models import CaseTransaction
from corehq.sql_db.operations import RawSQLMigration, HqRunSQL
migrator = RawSQLMigration(('corehq', 'sql_accessors', 'sql_templates'), {
'TRANSACTION_TYPE_FORM': CaseTransaction.TYPE_FORM
})
class Migration(migrations.Migration):
dependencies = [
('sql_accessors', '0024_update_save_ledger_values'),
]
operations = [
HqRunSQL(
"DROP FUNCTION IF EXISTS get_ledger_values_for_cases(TEXT[])",
"SELECT 1"
),
migrator.get_migration('get_ledger_values_for_cases.sql'),
]
|
qedsoftware/commcare-hq
|
corehq/sql_accessors/migrations/0025_update_get_ledger_values_for_cases.py
|
Python
|
bsd-3-clause
| 703
| 0
|
#!/usr/bin/python
import os
import psycopg2
import sys
import django_content_type_mapping
file = open("/home/" + os.getlogin() + "/.pgpass", "r")
pgpasses = []
for line in file:
pgpasses.append(line.rstrip("\n").split(":"))
file.close()
for pgpass in pgpasses:
#print str(pgpass)
if pgpass[0] == "54.236.235.110" and pgpass[3] == "geonode":
src_pgpass = pgpass
if pgpass[0] == "54.197.226.56" and pgpass[3] == "geonode":
dst_pgpass = pgpass
src = psycopg2.connect(host=src_pgpass[0], database="geonode2", user=src_pgpass[3], password=src_pgpass[4])
dst = psycopg2.connect(host=dst_pgpass[0], database="geonode", user=dst_pgpass[3], password=dst_pgpass[4])
src_cur = src.cursor()
dst_cur = dst.cursor()
src_cur.execute("select resourcebase_ptr_id, content_type_id, object_id, doc_file, extension, popular_count, share_count from documents_document")
for src_row in src_cur:
assignments = []
#resourcebase_ptr_id
assignments.append(src_row[0])
#title_en
assignments.append(None)
#abstract_en
assignments.append(None)
#purpose_en
assignments.append(None)
#constraints_other_en
assignments.append(None)
#supplemental_information_en
assignments.append(None)
#distribution_description_en
assignments.append(None)
#data_quality_statement_en
assignments.append(None)
#content_type_id
assignments.append(django_content_type_mapping.get_django_content_type_id(src_row[1]))
#object_id
assignments.append(src_row[2])
#doc_file
assignments.append(src_row[3])
#extension
assignments.append(src_row[4])
#doc_type
assignments.append(None)
#doc_url
assignments.append(None)
try:
dst_cur.execute("insert into documents_document(resourcebase_ptr_id, title_en, abstract_en, purpose_en, constraints_other_en, supplemental_information_en, distribution_description_en, data_quality_statement_en, content_type_id, object_id, doc_file, extension, doc_type, doc_url) values (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)", assignments)
dst.commit()
except Exception as error:
print
print type(error)
print str(error) + "select resourcebase_ptr_id, content_type_id, object_id, doc_file, extension, popular_count, share_count from documents_document"
print str(src_row)
dst.rollback()
dst.commit()
src_cur.close()
dst_cur.close()
src.close()
dst.close()
|
DOE-NEPA/geonode_2.0_to_2.4_migration
|
migrate_documents_document_modified.py
|
Python
|
gpl-2.0
| 2,363
| 0.025815
|
#coding=utf-8
# Copyright (C) 2014 by Víctor Romero Blanco <info at playcircular dot com>.
# http://playcircular.com/
# It's licensed under the AFFERO GENERAL PUBLIC LICENSE unless stated otherwise.
# You can get copies of the licenses here: http://www.affero.org/oagpl.html
# AFFERO GENERAL PUBLIC LICENSE is also included in the file called "LICENSE".
from django.contrib import admin
from django.conf import settings
from configuracion.models import *
from django.contrib.auth.models import User
from django.http import HttpResponse, Http404, HttpResponseRedirect, HttpResponseNotAllowed
from django.template import RequestContext
from django.shortcuts import render_to_response
from django.contrib.admin.views.decorators import staff_member_required
from django.views.decorators.csrf import csrf_exempt
from django.contrib.auth.decorators import login_required
from django.utils.translation import ugettext as _
from grupos.models import *
from grupos.forms import *
from actividades.models import *
from usuarios.models import *
from django.core import serializers
from django.db.models import Q
###################################################################################################
@login_required
def recarga_actividad(request):
if request.is_ajax() and request.POST:
seleccionados = request.POST.get('seleccionados')
str_grupos = seleccionados.split(',')
id_grupos = []
for item in str_grupos:
numero = int(item)
id_grupos.append(numero)
if len(id_grupos) > 0:
n_grupos_administrados = Miembro.objects.filter(usuario=request.user,activo=True,nivel=u'Administrador').count()
try:
categorias = Idiomas_categoria.objects.filter((Q(categoria__grupo__in=id_grupos) | Q(categoria__superadmin=True)) & Q(idioma=request.LANGUAGE_CODE))
except Idiomas_categoria.DoesNotExist:
categorias = Idiomas_categoria.objects.filter(Q(categoria__grupo__in=id_grupos) | Q(categoria__superadmin=True)).order_by('-idioma_default')
if request.user.is_superuser or n_grupos_administrados > 0:
usuarios_qs = Miembro.objects.filter(grupo__in=id_grupos,activo=True).values_list('usuario', flat=True)
if request.user.is_superuser:
#El Superadmin puede publicar sin que pernezca a ningún grupo para que no lo controlen los Admin de los grupos
usuarios_qs = list(usuarios_qs) + [request.user.pk]
usuarios = User.objects.filter(pk__in=usuarios_qs).distinct()
else:
usuarios = User.objects.filter(pk=request.user.pk)
datos = list(usuarios) + list(categorias)
else:
datos = []
else:
datos = []
#se devuelven los anios en formato json, solo nos interesa obtener como json
data = serializers.serialize("json", datos, fields=('pk','username','nombre','categoria'))
return HttpResponse(data, mimetype="application/javascript")
###################################################################################################
|
PlayCircular/play_circular
|
apps/actividades/admin_views.py
|
Python
|
agpl-3.0
| 2,927
| 0.02188
|
"""Test the IPython.kernel public API
Authors
-------
* MinRK
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2013, the IPython Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
import nose.tools as nt
from IPython.testing import decorators as dec
from IPython.kernel import launcher, connect
from IPython import kernel
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
@dec.parametric
def test_kms():
for base in ("", "Multi"):
KM = base + "KernelManager"
yield nt.assert_true(KM in dir(kernel), KM)
@dec.parametric
def test_kcs():
for base in ("", "Blocking"):
KM = base + "KernelClient"
yield nt.assert_true(KM in dir(kernel), KM)
@dec.parametric
def test_launcher():
for name in launcher.__all__:
yield nt.assert_true(name in dir(kernel), name)
@dec.parametric
def test_connect():
for name in connect.__all__:
yield nt.assert_true(name in dir(kernel), name)
|
noslenfa/tdjangorest
|
uw/lib/python2.7/site-packages/IPython/kernel/tests/test_public_api.py
|
Python
|
apache-2.0
| 1,308
| 0.006881
|
"""
"""
from traceback import format_exc as debug
from vyapp.stdout import Stdout
from vyapp.tools import exec_quiet, set_status_msg
from vyapp.ask import *
import sys
def redirect_stdout(area):
try:
sys.stdout.remove(area)
except ValueError:
pass
sys.stdout.append(Stdout(area))
set_status_msg('Output redirected to %s' % area.index('insert'))
def install(area):
area.install(('NORMAL', '<Control-W>', lambda event: event.widget.tag_delete_ranges(Stdout.TAG_CODE)),
('NORMAL', '<Control-Tab>', lambda event: sys.stdout.restore()),
('NORMAL', '<Key-W>', lambda event: event.widget.tag_delete(Stdout.TAG_CODE)),
('NORMAL', '<Control-w>', lambda event: exec_quiet(sys.stdout.remove, event.widget)),
('NORMAL', '<Tab>', lambda event: redirect_stdout(event.widget)))
|
miku/vy
|
vyapp/plugins/box.py
|
Python
|
mit
| 875
| 0.009143
|
import os
import sys
import codecs
from contextlib import contextmanager
from itertools import repeat
from functools import update_wrapper
from .types import convert_type, IntRange, BOOL
from .utils import make_str, make_default_short_help, echo
from .exceptions import ClickException, UsageError, BadParameter, Abort, \
MissingParameter
from .termui import prompt, confirm
from .formatting import HelpFormatter, join_options
from .parser import OptionParser, split_opt
from ._compat import PY2, isidentifier, iteritems
_missing = object()
SUBCOMMAND_METAVAR = 'COMMAND [ARGS]...'
SUBCOMMANDS_METAVAR = 'COMMAND1 [ARGS]... [COMMAND2 [ARGS]...]...'
def _bashcomplete(cmd, prog_name, complete_var=None):
"""Internal handler for the bash completion support."""
if complete_var is None:
complete_var = '_%s_COMPLETE' % (prog_name.replace('-', '_')).upper()
complete_instr = os.environ.get(complete_var)
if not complete_instr:
return
from ._bashcomplete import bashcomplete
if bashcomplete(cmd, prog_name, complete_var, complete_instr):
sys.exit(1)
def batch(iterable, batch_size):
return list(zip(*repeat(iter(iterable), batch_size)))
def invoke_param_callback(callback, ctx, param, value):
code = getattr(callback, '__code__', None)
args = getattr(code, 'co_argcount', 3)
if args < 3:
# This will become a warning in Click 3.0:
from warnings import warn
warn(Warning('Invoked legacy parameter callback "%s". The new '
'signature for such callbacks starting with '
'click 2.0 is (ctx, param, value).'
% callback), stacklevel=3)
return callback(ctx, value)
return callback(ctx, param, value)
@contextmanager
def augment_usage_errors(ctx, param=None):
"""Context manager that attaches extra information to exceptions that
fly.
"""
try:
yield
except BadParameter as e:
if e.ctx is None:
e.ctx = ctx
if param is not None and e.param is None:
e.param = param
raise
except UsageError as e:
if e.ctx is None:
e.ctx = ctx
raise
def iter_params_for_processing(invocation_order, declaration_order):
"""Given a sequence of parameters in the order as should be considered
for processing and an iterable of parameters that exist, this returns
a list in the correct order as they should be processed.
"""
def sort_key(item):
try:
idx = invocation_order.index(item)
except ValueError:
idx = float('inf')
return (not item.is_eager, idx)
return sorted(declaration_order, key=sort_key)
class Context(object):
"""The context is a special internal object that holds state relevant
for the script execution at every single level. It's normally invisible
to commands unless they opt-in to getting access to it.
The context is useful as it can pass internal objects around and can
control special execution features such as reading data from
environment variables.
A context can be used as context manager in which case it will call
:meth:`close` on teardown.
.. versionadded:: 2.0
Added the `resilient_parsing`, `help_option_names`,
`token_normalize_func` parameters.
.. versionadded:: 3.0
Added the `allow_extra_args` and `allow_interspersed_args`
parameters.
.. versionadded:: 4.0
Added the `color`, `ignore_unknown_options`, and
`max_content_width` parameters.
:param command: the command class for this context.
:param parent: the parent context.
:param info_name: the info name for this invocation. Generally this
is the most descriptive name for the script or
command. For the toplevel script it is usually
the name of the script, for commands below it it's
the name of the script.
:param obj: an arbitrary object of user data.
:param auto_envvar_prefix: the prefix to use for automatic environment
variables. If this is `None` then reading
from environment variables is disabled. This
does not affect manually set environment
variables which are always read.
:param default_map: a dictionary (like object) with default values
for parameters.
:param terminal_width: the width of the terminal. The default is
inherit from parent context. If no context
defines the terminal width then auto
detection will be applied.
:param max_content_width: the maximum width for content rendered by
Click (this currently only affects help
pages). This defaults to 80 characters if
not overridden. In other words: even if the
terminal is larger than that, Click will not
format things wider than 80 characters by
default. In addition to that, formatters might
add some safety mapping on the right.
:param resilient_parsing: if this flag is enabled then Click will
parse without any interactivity or callback
invocation. This is useful for implementing
things such as completion support.
:param allow_extra_args: if this is set to `True` then extra arguments
at the end will not raise an error and will be
kept on the context. The default is to inherit
from the command.
:param allow_interspersed_args: if this is set to `False` then options
and arguments cannot be mixed. The
default is to inherit from the command.
:param ignore_unknown_options: instructs click to ignore options it does
not know and keeps them for later
processing.
:param help_option_names: optionally a list of strings that define how
the default help parameter is named. The
default is ``['--help']``.
:param token_normalize_func: an optional function that is used to
normalize tokens (options, choices,
etc.). This for instance can be used to
implement case insensitive behavior.
:param color: controls if the terminal supports ANSI colors or not. The
default is autodetection. This is only needed if ANSI
codes are used in texts that Click prints which is by
default not the case. This for instance would affect
help output.
"""
def __init__(self, command, parent=None, info_name=None, obj=None,
auto_envvar_prefix=None, default_map=None,
terminal_width=None, max_content_width=None,
resilient_parsing=False, allow_extra_args=None,
allow_interspersed_args=None,
ignore_unknown_options=None, help_option_names=None,
token_normalize_func=None, color=None):
#: the parent context or `None` if none exists.
self.parent = parent
#: the :class:`Command` for this context.
self.command = command
#: the descriptive information name
self.info_name = info_name
#: the parsed parameters except if the value is hidden in which
#: case it's not remembered.
self.params = {}
#: the leftover arguments.
self.args = []
if obj is None and parent is not None:
obj = parent.obj
#: the user object stored.
self.obj = obj
#: A dictionary (-like object) with defaults for parameters.
if default_map is None \
and parent is not None \
and parent.default_map is not None:
default_map = parent.default_map.get(info_name)
self.default_map = default_map
#: This flag indicates if a subcommand is going to be executed. A
#: group callback can use this information to figure out if it's
#: being executed directly or because the execution flow passes
#: onwards to a subcommand. By default it's None, but it can be
#: the name of the subcommand to execute.
#:
#: If chaining is enabled this will be set to ``'*'`` in case
#: any commands are executed. It is however not possible to
#: figure out which ones. If you require this knowledge you
#: should use a :func:`resultcallback`.
self.invoked_subcommand = None
if terminal_width is None and parent is not None:
terminal_width = parent.terminal_width
#: The width of the terminal (None is autodetection).
self.terminal_width = terminal_width
if max_content_width is None and parent is not None:
max_content_width = parent.max_content_width
#: The maximum width of formatted content (None implies a sensible
#: default which is 80 for most things).
self.max_content_width = max_content_width
if allow_extra_args is None:
allow_extra_args = command.allow_extra_args
#: Indicates if the context allows extra args or if it should
#: fail on parsing.
#:
#: .. versionadded:: 3.0
self.allow_extra_args = allow_extra_args
if allow_interspersed_args is None:
allow_interspersed_args = command.allow_interspersed_args
#: Indicates if the context allows mixing of arguments and
#: options or not.
#:
#: .. versionadded:: 3.0
self.allow_interspersed_args = allow_interspersed_args
if ignore_unknown_options is None:
ignore_unknown_options = command.ignore_unknown_options
#: Instructs click to ignore options that a command does not
#: understand and will store it on the context for later
#: processing. This is primarily useful for situations where you
#: want to call into external programs. Generally this pattern is
#: strongly discouraged because it's not possibly to losslessly
#: forward all arguments.
#:
#: .. versionadded:: 4.0
self.ignore_unknown_options = ignore_unknown_options
if help_option_names is None:
if parent is not None:
help_option_names = parent.help_option_names
else:
help_option_names = ['--help']
#: The names for the help options.
self.help_option_names = help_option_names
if token_normalize_func is None and parent is not None:
token_normalize_func = parent.token_normalize_func
#: An optional normalization function for tokens. This is
#: options, choices, commands etc.
self.token_normalize_func = token_normalize_func
#: Indicates if resilient parsing is enabled. In that case Click
#: will do its best to not cause any failures.
self.resilient_parsing = resilient_parsing
# If there is no envvar prefix yet, but the parent has one and
# the command on this level has a name, we can expand the envvar
# prefix automatically.
if auto_envvar_prefix is None:
if parent is not None \
and parent.auto_envvar_prefix is not None and \
self.info_name is not None:
auto_envvar_prefix = '%s_%s' % (parent.auto_envvar_prefix,
self.info_name.upper())
else:
self.auto_envvar_prefix = auto_envvar_prefix.upper()
self.auto_envvar_prefix = auto_envvar_prefix
if color is None and parent is not None:
color = parent.color
#: Controls if styling output is wanted or not.
self.color = color
self._close_callbacks = []
self._depth = 0
def __enter__(self):
self._depth += 1
return self
def __exit__(self, exc_type, exc_value, tb):
self._depth -= 1
if self._depth == 0:
self.close()
def make_formatter(self):
"""Creates the formatter for the help and usage output."""
return HelpFormatter(width=self.terminal_width,
max_width=self.max_content_width)
def call_on_close(self, f):
"""This decorator remembers a function as callback that should be
executed when the context tears down. This is most useful to bind
resource handling to the script execution. For instance, file objects
opened by the :class:`File` type will register their close callbacks
here.
:param f: the function to execute on teardown.
"""
self._close_callbacks.append(f)
return f
def close(self):
"""Invokes all close callbacks."""
for cb in self._close_callbacks:
cb()
self._close_callbacks = []
@property
def command_path(self):
"""The computed command path. This is used for the ``usage``
information on the help page. It's automatically created by
combining the info names of the chain of contexts to the root.
"""
rv = ''
if self.info_name is not None:
rv = self.info_name
if self.parent is not None:
rv = self.parent.command_path + ' ' + rv
return rv.lstrip()
def find_root(self):
"""Finds the outermost context."""
node = self
while node.parent is not None:
node = node.parent
return node
def find_object(self, object_type):
"""Finds the closest object of a given type."""
node = self
while node is not None:
if isinstance(node.obj, object_type):
return node.obj
node = node.parent
def ensure_object(self, object_type):
"""Like :meth:`find_object` but sets the innermost object to a
new instance of `object_type` if it does not exist.
"""
rv = self.find_object(object_type)
if rv is None:
self.obj = rv = object_type()
return rv
def lookup_default(self, name):
"""Looks up the default for a parameter name. This by default
looks into the :attr:`default_map` if available.
"""
if self.default_map is not None:
rv = self.default_map.get(name)
if callable(rv):
rv = rv()
return rv
def fail(self, message):
"""Aborts the execution of the program with a specific error
message.
:param message: the error message to fail with.
"""
raise UsageError(message, self)
def abort(self):
"""Aborts the script."""
raise Abort()
def exit(self, code=0):
"""Exits the application with a given exit code."""
sys.exit(code)
def get_usage(self):
"""Helper method to get formatted usage string for the current
context and command.
"""
return self.command.get_usage(self)
def get_help(self):
"""Helper method to get formatted help page for the current
context and command.
"""
return self.command.get_help(self)
def invoke(*args, **kwargs):
"""Invokes a command callback in exactly the way it expects. There
are two ways to invoke this method:
1. the first argument can be a callback and all other arguments and
keyword arguments are forwarded directly to the function.
2. the first argument is a click command object. In that case all
arguments are forwarded as well but proper click parameters
(options and click arguments) must be keyword arguments and Click
will fill in defaults.
Note that before Click 3.2 keyword arguments were not properly filled
in against the intention of this code and no context was created. For
more information about this change and why it was done in a bugfix
release see :ref:`upgrade-to-3.2`.
"""
self, callback = args[:2]
ctx = self
# It's also possible to invoke another command which might or
# might not have a callback. In that case we also fill
# in defaults and make a new context for this command.
if isinstance(callback, Command):
other_cmd = callback
callback = other_cmd.callback
ctx = Context(other_cmd, info_name=other_cmd.name, parent=self)
if callback is None:
raise TypeError('The given command does not have a '
'callback that can be invoked.')
for param in other_cmd.params:
if param.name not in kwargs and param.expose_value:
kwargs[param.name] = param.get_default(ctx)
args = args[2:]
if getattr(callback, '__click_pass_context__', False):
args = (ctx,) + args
with augment_usage_errors(self):
with ctx:
return callback(*args, **kwargs)
def forward(*args, **kwargs):
"""Similar to :meth:`invoke` but fills in default keyword
arguments from the current context if the other command expects
it. This cannot invoke callbacks directly, only other commands.
"""
self, cmd = args[:2]
# It's also possible to invoke another command which might or
# might not have a callback.
if not isinstance(cmd, Command):
raise TypeError('Callback is not a command.')
for param in self.params:
if param not in kwargs:
kwargs[param] = self.params[param]
return self.invoke(cmd, **kwargs)
class BaseCommand(object):
"""The base command implements the minimal API contract of commands.
Most code will never use this as it does not implement a lot of useful
functionality but it can act as the direct subclass of alternative
parsing methods that do not depend on the Click parser.
For instance, this can be used to bridge Click and other systems like
argparse or docopt.
Because base commands do not implement a lot of the API that other
parts of Click take for granted, they are not supported for all
operations. For instance, they cannot be used with the decorators
usually and they have no built-in callback system.
.. versionchanged:: 2.0
Added the `context_settings` parameter.
:param name: the name of the command to use unless a group overrides it.
:param context_settings: an optional dictionary with defaults that are
passed to the context object.
"""
#: the default for the :attr:`Context.allow_extra_args` flag.
allow_extra_args = False
#: the default for the :attr:`Context.allow_interspersed_args` flag.
allow_interspersed_args = True
#: the default for the :attr:`Context.ignore_unknown_options` flag.
ignore_unknown_options = False
def __init__(self, name, context_settings=None):
#: the name the command thinks it has. Upon registering a command
#: on a :class:`Group` the group will default the command name
#: with this information. You should instead use the
#: :class:`Context`\'s :attr:`~Context.info_name` attribute.
self.name = name
if context_settings is None:
context_settings = {}
#: an optional dictionary with defaults passed to the context.
self.context_settings = context_settings
def get_usage(self, ctx):
raise NotImplementedError('Base commands cannot get usage')
def get_help(self, ctx):
raise NotImplementedError('Base commands cannot get help')
def make_context(self, info_name, args, parent=None, **extra):
"""This function when given an info name and arguments will kick
off the parsing and create a new :class:`Context`. It does not
invoke the actual command callback though.
:param info_name: the info name for this invokation. Generally this
is the most descriptive name for the script or
command. For the toplevel script it's usually
the name of the script, for commands below it it's
the name of the script.
:param args: the arguments to parse as list of strings.
:param parent: the parent context if available.
:param extra: extra keyword arguments forwarded to the context
constructor.
"""
for key, value in iteritems(self.context_settings):
if key not in extra:
extra[key] = value
ctx = Context(self, info_name=info_name, parent=parent, **extra)
self.parse_args(ctx, args)
return ctx
def parse_args(self, ctx, args):
"""Given a context and a list of arguments this creates the parser
and parses the arguments, then modifies the context as necessary.
This is automatically invoked by :meth:`make_context`.
"""
raise NotImplementedError('Base commands do not know how to parse '
'arguments.')
def invoke(self, ctx):
"""Given a context, this invokes the command. The default
implementation is raising a not implemented error.
"""
raise NotImplementedError('Base commands are not invokable by default')
def main(self, args=None, prog_name=None, complete_var=None,
standalone_mode=True, **extra):
"""This is the way to invoke a script with all the bells and
whistles as a command line application. This will always terminate
the application after a call. If this is not wanted, ``SystemExit``
needs to be caught.
This method is also available by directly calling the instance of
a :class:`Command`.
.. versionadded:: 3.0
Added the `standalone_mode` flag to control the standalone mode.
:param args: the arguments that should be used for parsing. If not
provided, ``sys.argv[1:]`` is used.
:param prog_name: the program name that should be used. By default
the program name is constructed by taking the file
name from ``sys.argv[0]``.
:param complete_var: the environment variable that controls the
bash completion support. The default is
``"_<prog_name>_COMPLETE"`` with prog name in
uppercase.
:param standalone_mode: the default behavior is to invoke the script
in standalone mode. Click will then
handle exceptions and convert them into
error messages and the function will never
return but shut down the interpreter. If
this is set to `False` they will be
propagated to the caller and the return
value of this function is the return value
of :meth:`invoke`.
:param extra: extra keyword arguments are forwarded to the context
constructor. See :class:`Context` for more information.
"""
# If we are in Python 3, we will verify that the environment is
# sane at this point of reject further execution to avoid a
# broken script.
if not PY2:
try:
import locale
fs_enc = codecs.lookup(locale.getpreferredencoding()).name
except Exception:
fs_enc = 'ascii'
if fs_enc == 'ascii':
raise RuntimeError('Click will abort further execution '
'because Python 3 was configured to use '
'ASCII as encoding for the environment. '
'Either switch to Python 2 or consult '
'http://click.pocoo.org/python3/ '
'for mitigation steps.')
if args is None:
args = sys.argv[1:]
else:
args = list(args)
if prog_name is None:
prog_name = make_str(os.path.basename(
sys.argv and sys.argv[0] or __file__))
# Hook for the Bash completion. This only activates if the Bash
# completion is actually enabled, otherwise this is quite a fast
# noop.
_bashcomplete(self, prog_name, complete_var)
try:
try:
with self.make_context(prog_name, args, **extra) as ctx:
rv = self.invoke(ctx)
if not standalone_mode:
return rv
ctx.exit()
except (EOFError, KeyboardInterrupt):
echo(file=sys.stderr)
raise Abort()
except ClickException as e:
if not standalone_mode:
raise
e.show()
sys.exit(e.exit_code)
except Abort:
if not standalone_mode:
raise
echo('Aborted!', file=sys.stderr)
sys.exit(1)
def __call__(self, *args, **kwargs):
"""Alias for :meth:`main`."""
return self.main(*args, **kwargs)
class Command(BaseCommand):
"""Commands are the basic building block of command line interfaces in
Click. A basic command handles command line parsing and might dispatch
more parsing to commands nested below it.
.. versionchanged:: 2.0
Added the `context_settings` parameter.
:param name: the name of the command to use unless a group overrides it.
:param context_settings: an optional dictionary with defaults that are
passed to the context object.
:param callback: the callback to invoke. This is optional.
:param params: the parameters to register with this command. This can
be either :class:`Option` or :class:`Argument` objects.
:param help: the help string to use for this command.
:param epilog: like the help string but it's printed at the end of the
help page after everything else.
:param short_help: the short help to use for this command. This is
shown on the command listing of the parent command.
:param add_help_option: by default each command registers a ``--help``
option. This can be disabled by this parameter.
"""
def __init__(self, name, context_settings=None, callback=None,
params=None, help=None, epilog=None, short_help=None,
options_metavar='[OPTIONS]', add_help_option=True):
BaseCommand.__init__(self, name, context_settings)
#: the callback to execute when the command fires. This might be
#: `None` in which case nothing happens.
self.callback = callback
#: the list of parameters for this command in the order they
#: should show up in the help page and execute. Eager parameters
#: will automatically be handled before non eager ones.
self.params = params or []
self.help = help
self.epilog = epilog
self.options_metavar = options_metavar
if short_help is None and help:
short_help = make_default_short_help(help)
self.short_help = short_help
self.add_help_option = add_help_option
def get_usage(self, ctx):
formatter = ctx.make_formatter()
self.format_usage(ctx, formatter)
return formatter.getvalue().rstrip('\n')
def get_params(self, ctx):
rv = self.params
help_option = self.get_help_option(ctx)
if help_option is not None:
rv = rv + [help_option]
return rv
def format_usage(self, ctx, formatter):
"""Writes the usage line into the formatter."""
pieces = self.collect_usage_pieces(ctx)
formatter.write_usage(ctx.command_path, ' '.join(pieces))
def collect_usage_pieces(self, ctx):
"""Returns all the pieces that go into the usage line and returns
it as a list of strings.
"""
rv = [self.options_metavar]
for param in self.get_params(ctx):
rv.extend(param.get_usage_pieces(ctx))
return rv
def get_help_option_names(self, ctx):
"""Returns the names for the help option."""
all_names = set(ctx.help_option_names)
for param in self.params:
all_names.difference_update(param.opts)
all_names.difference_update(param.secondary_opts)
return all_names
def get_help_option(self, ctx):
"""Returns the help option object."""
help_options = self.get_help_option_names(ctx)
if not help_options or not self.add_help_option:
return
def show_help(ctx, param, value):
if value and not ctx.resilient_parsing:
echo(ctx.get_help(), color=ctx.color)
ctx.exit()
return Option(help_options, is_flag=True,
is_eager=True, expose_value=False,
callback=show_help,
help='Show this message and exit.')
def make_parser(self, ctx):
"""Creates the underlying option parser for this command."""
parser = OptionParser(ctx)
parser.allow_interspersed_args = ctx.allow_interspersed_args
parser.ignore_unknown_options = ctx.ignore_unknown_options
for param in self.get_params(ctx):
param.add_to_parser(parser, ctx)
return parser
def get_help(self, ctx):
"""Formats the help into a string and returns it. This creates a
formatter and will call into the following formatting methods:
"""
formatter = ctx.make_formatter()
self.format_help(ctx, formatter)
return formatter.getvalue().rstrip('\n')
def format_help(self, ctx, formatter):
"""Writes the help into the formatter if it exists.
This calls into the following methods:
- :meth:`format_usage`
- :meth:`format_help_text`
- :meth:`format_options`
- :meth:`format_epilog`
"""
self.format_usage(ctx, formatter)
self.format_help_text(ctx, formatter)
self.format_options(ctx, formatter)
self.format_epilog(ctx, formatter)
def format_help_text(self, ctx, formatter):
"""Writes the help text to the formatter if it exists."""
if self.help:
formatter.write_paragraph()
with formatter.indentation():
formatter.write_text(self.help)
def format_options(self, ctx, formatter):
"""Writes all the options into the formatter if they exist."""
opts = []
for param in self.get_params(ctx):
rv = param.get_help_record(ctx)
if rv is not None:
opts.append(rv)
if opts:
with formatter.section('Options'):
formatter.write_dl(opts)
def format_epilog(self, ctx, formatter):
"""Writes the epilog into the formatter if it exists."""
if self.epilog:
formatter.write_paragraph()
with formatter.indentation():
formatter.write_text(self.epilog)
def parse_args(self, ctx, args):
parser = self.make_parser(ctx)
opts, args, param_order = parser.parse_args(args=args)
for param in iter_params_for_processing(
param_order, self.get_params(ctx)):
value, args = param.handle_parse_result(ctx, opts, args)
if args and not ctx.allow_extra_args and not ctx.resilient_parsing:
ctx.fail('Got unexpected extra argument%s (%s)'
% (len(args) != 1 and 's' or '',
' '.join(map(make_str, args))))
ctx.args = args
return args
def invoke(self, ctx):
"""Given a context, this invokes the attached callback (if it exists)
in the right way.
"""
if self.callback is not None:
return ctx.invoke(self.callback, **ctx.params)
class MultiCommand(Command):
"""A multi command is the basic implementation of a command that
dispatches to subcommands. The most common version is the
:class:`Command`.
:param invoke_without_command: this controls how the multi command itself
is invoked. By default it's only invoked
if a subcommand is provided.
:param no_args_is_help: this controls what happens if no arguments are
provided. This option is enabled by default if
`invoke_without_command` is disabled or disabled
if it's enabled. If enabled this will add
``--help`` as argument if no arguments are
passed.
:param subcommand_metavar: the string that is used in the documentation
to indicate the subcommand place.
:param chain: if this is set to `True` chaining of multiple subcommands
is enabled. This restricts the form of commands in that
they cannot have optional arguments but it allows
multiple commands to be chained together.
:param result_callback: the result callback to attach to this multi
command.
"""
allow_extra_args = True
allow_interspersed_args = False
def __init__(self, name=None, invoke_without_command=False,
no_args_is_help=None, subcommand_metavar=None,
chain=False, result_callback=None, **attrs):
Command.__init__(self, name, **attrs)
if no_args_is_help is None:
no_args_is_help = not invoke_without_command
self.no_args_is_help = no_args_is_help
self.invoke_without_command = invoke_without_command
if subcommand_metavar is None:
if chain:
subcommand_metavar = SUBCOMMANDS_METAVAR
else:
subcommand_metavar = SUBCOMMAND_METAVAR
self.subcommand_metavar = subcommand_metavar
self.chain = chain
#: The result callback that is stored. This can be set or
#: overridden with the :func:`resultcallback` decorator.
self.result_callback = result_callback
def collect_usage_pieces(self, ctx):
rv = Command.collect_usage_pieces(self, ctx)
rv.append(self.subcommand_metavar)
return rv
def format_options(self, ctx, formatter):
Command.format_options(self, ctx, formatter)
self.format_commands(ctx, formatter)
def resultcallback(self, replace=False):
"""Adds a result callback to the chain command. By default if a
result callback is already registered this will chain them but
this can be disabled with the `replace` parameter. The result
callback is invoked with the return value of the subcommand
(or the list of return values from all subcommands if chaining
is enabled) as well as the parameters as they would be passed
to the main callback.
Example::
@click.group()
@click.option('-i', '--input', default=23)
def cli(input):
return 42
@cli.resultcallback()
def process_result(result, input):
return result + input
.. versionadded:: 3.0
:param replace: if set to `True` an already existing result
callback will be removed.
"""
def decorator(f):
old_callback = self.result_callback
if old_callback is None or replace:
self.result_callback = f
return f
def function(__value, *args, **kwargs):
return f(old_callback(__value, *args, **kwargs),
*args, **kwargs)
self.result_callback = rv = update_wrapper(function, f)
return rv
return decorator
def format_commands(self, ctx, formatter):
"""Extra format methods for multi methods that adds all the commands
after the options.
"""
rows = []
for subcommand in self.list_commands(ctx):
cmd = self.get_command(ctx, subcommand)
# What is this, the tool lied about a command. Ignore it
if cmd is None:
continue
help = cmd.short_help or ''
rows.append((subcommand, help))
if rows:
with formatter.section('Commands'):
formatter.write_dl(rows)
def parse_args(self, ctx, args):
if not args and self.no_args_is_help and not ctx.resilient_parsing:
echo(ctx.get_help(), color=ctx.color)
ctx.exit()
return Command.parse_args(self, ctx, args)
def invoke(self, ctx):
def _process_result(value):
if self.result_callback is not None:
value = ctx.invoke(self.result_callback, value,
**ctx.params)
return value
if not ctx.args:
# If we are invoked without command the chain flag controls
# how this happens. If we are not in chain mode, the return
# value here is the return value of the command.
# If however we are in chain mode, the return value is the
# return value of the result processor invoked with an empty
# list (which means that no subcommand actually was executed).
if self.invoke_without_command:
if not self.chain:
return Command.invoke(self, ctx)
with ctx:
Command.invoke(self, ctx)
return _process_result([])
ctx.fail('Missing command.')
args = ctx.args
# If we're not in chain mode, we only allow the invocation of a
# single command but we also inform the current context about the
# name of the command to invoke.
if not self.chain:
# Make sure the context is entered so we do not clean up
# resources until the result processor has worked.
with ctx:
cmd_name, cmd, args = self.resolve_command(ctx, args)
ctx.invoked_subcommand = cmd_name
Command.invoke(self, ctx)
sub_ctx = cmd.make_context(cmd_name, args, parent=ctx)
with sub_ctx:
return _process_result(sub_ctx.command.invoke(sub_ctx))
# In chain mode we create the contexts step by step, but after the
# base command has been invoked. Because at that point we do not
# know the subcommands yet, the invoked subcommand attribute is
# set to ``*`` to inform the command that subcommands are executed
# but nothing else.
with ctx:
ctx.invoked_subcommand = args and '*' or None
Command.invoke(self, ctx)
# Otherwise we make every single context and invoke them in a
# chain. In that case the return value to the result processor
# is the list of all invoked subcommand's results.
contexts = []
while args:
cmd_name, cmd, args = self.resolve_command(ctx, args)
sub_ctx = cmd.make_context(cmd_name, args, parent=ctx,
allow_extra_args=True,
allow_interspersed_args=False)
contexts.append(sub_ctx)
args = sub_ctx.args
rv = []
for sub_ctx in contexts:
with sub_ctx:
rv.append(sub_ctx.command.invoke(sub_ctx))
return _process_result(rv)
def resolve_command(self, ctx, args):
cmd_name = make_str(args[0])
original_cmd_name = cmd_name
# Get the command
cmd = self.get_command(ctx, cmd_name)
# If we can't find the command but there is a normalization
# function available, we try with that one.
if cmd is None and ctx.token_normalize_func is not None:
cmd_name = ctx.token_normalize_func(cmd_name)
cmd = self.get_command(ctx, cmd_name)
# If we don't find the command we want to show an error message
# to the user that it was not provided. However, there is
# something else we should do: if the first argument looks like
# an option we want to kick off parsing again for arguments to
# resolve things like --help which now should go to the main
# place.
if cmd is None:
if split_opt(cmd_name)[0]:
self.parse_args(ctx, ctx.args)
ctx.fail('No such command "%s".' % original_cmd_name)
return cmd_name, cmd, args[1:]
def get_command(self, ctx, cmd_name):
"""Given a context and a command name, this returns a
:class:`Command` object if it exists or returns `None`.
"""
raise NotImplementedError()
def list_commands(self, ctx):
"""Returns a list of subcommand names in the order they should
appear.
"""
return []
class Group(MultiCommand):
"""A group allows a command to have subcommands attached. This is the
most common way to implement nesting in Click.
:param commands: a dictionary of commands.
"""
def __init__(self, name=None, commands=None, **attrs):
MultiCommand.__init__(self, name, **attrs)
#: the registered subcommands by their exported names.
self.commands = commands or {}
def add_command(self, cmd, name=None):
"""Registers another :class:`Command` with this group. If the name
is not provided, the name of the command is used.
"""
name = name or cmd.name
if name is None:
raise TypeError('Command has no name.')
self.commands[name] = cmd
def command(self, *args, **kwargs):
"""A shortcut decorator for declaring and attaching a command to
the group. This takes the same arguments as :func:`command` but
immediately registers the created command with this instance by
calling into :meth:`add_command`.
"""
def decorator(f):
cmd = command(*args, **kwargs)(f)
self.add_command(cmd)
return cmd
return decorator
def group(self, *args, **kwargs):
"""A shortcut decorator for declaring and attaching a group to
the group. This takes the same arguments as :func:`group` but
immediately registers the created command with this instance by
calling into :meth:`add_command`.
"""
def decorator(f):
cmd = group(*args, **kwargs)(f)
self.add_command(cmd)
return cmd
return decorator
def get_command(self, ctx, cmd_name):
return self.commands.get(cmd_name)
def list_commands(self, ctx):
return sorted(self.commands)
class CommandCollection(MultiCommand):
"""A command collection is a multi command that merges multiple multi
commands together into one. This is a straightforward implementation
that accepts a list of different multi commands as sources and
provides all the commands for each of them.
"""
def __init__(self, name=None, sources=None, **attrs):
MultiCommand.__init__(self, name, **attrs)
#: The list of registered multi commands.
self.sources = sources or []
def add_source(self, multi_cmd):
"""Adds a new multi command to the chain dispatcher."""
self.sources.append(multi_cmd)
def get_command(self, ctx, cmd_name):
for source in self.sources:
rv = source.get_command(ctx, cmd_name)
if rv is not None:
return rv
def list_commands(self, ctx):
rv = set()
for source in self.sources:
rv.update(source.list_commands(ctx))
return sorted(rv)
class Parameter(object):
"""A parameter to a command comes in two versions: they are either
:class:`Option`\s or :class:`Argument`\s. Other subclasses are currently
not supported by design as some of the internals for parsing are
intentionally not finalized.
Some settings are supported by both options and arguments.
.. versionchanged:: 2.0
Changed signature for parameter callback to also be passed the
parameter. In Click 2.0, the old callback format will still work,
but it will raise a warning to give you change to migrate the
code easier.
:param param_decls: the parameter declarations for this option or
argument. This is a list of flags or argument
names.
:param type: the type that should be used. Either a :class:`ParamType`
or a Python type. The later is converted into the former
automatically if supported.
:param required: controls if this is optional or not.
:param default: the default value if omitted. This can also be a callable,
in which case it's invoked when the default is needed
without any arguments.
:param callback: a callback that should be executed after the parameter
was matched. This is called as ``fn(ctx, param,
value)`` and needs to return the value. Before Click
2.0, the signature was ``(ctx, value)``.
:param nargs: the number of arguments to match. If not ``1`` the return
value is a tuple instead of single value. The default for
nargs is ``1`` (except if the type is a tuple, then it's
the arity of the tuple).
:param metavar: how the value is represented in the help page.
:param expose_value: if this is `True` then the value is passed onwards
to the command callback and stored on the context,
otherwise it's skipped.
:param is_eager: eager values are processed before non eager ones. This
should not be set for arguments or it will inverse the
order of processing.
:param envvar: a string or list of strings that are environment variables
that should be checked.
"""
param_type_name = 'parameter'
def __init__(self, param_decls=None, type=None, required=False,
default=None, callback=None, nargs=None, metavar=None,
expose_value=True, is_eager=False, envvar=None):
self.name, self.opts, self.secondary_opts = \
self._parse_decls(param_decls or (), expose_value)
self.type = convert_type(type, default)
# Default nargs to what the type tells us if we have that
# information available.
if nargs is None:
if self.type.is_composite:
nargs = self.type.arity
else:
nargs = 1
self.required = required
self.callback = callback
self.nargs = nargs
self.multiple = False
self.expose_value = expose_value
self.default = default
self.is_eager = is_eager
self.metavar = metavar
self.envvar = envvar
@property
def human_readable_name(self):
"""Returns the human readable name of this parameter. This is the
same as the name for options, but the metavar for arguments.
"""
return self.name
def make_metavar(self):
if self.metavar is not None:
return self.metavar
metavar = self.type.get_metavar(self)
if metavar is None:
metavar = self.type.name.upper()
if self.nargs != 1:
metavar += '...'
return metavar
def get_default(self, ctx):
"""Given a context variable this calculates the default value."""
# Otherwise go with the regular default.
if callable(self.default):
rv = self.default()
else:
rv = self.default
return self.type_cast_value(ctx, rv)
def add_to_parser(self, parser, ctx):
pass
def consume_value(self, ctx, opts):
value = opts.get(self.name)
if value is None:
value = ctx.lookup_default(self.name)
if value is None:
value = self.value_from_envvar(ctx)
return value
def type_cast_value(self, ctx, value):
"""Given a value this runs it properly through the type system.
This automatically handles things like `nargs` and `multiple` as
well as composite types.
"""
if self.type.is_composite:
if self.nargs <= 1:
raise TypeError('Attempted to invoke composite type '
'but nargs has been set to %s. This is '
'not supported; nargs needs to be set to '
'a fixed value > 1.' % self.nargs)
if self.multiple:
return tuple(self.type(x or (), self, ctx) for x in value or ())
return self.type(value or (), self, ctx)
def _convert(value, level):
if level == 0:
return self.type(value, self, ctx)
return tuple(_convert(x, level - 1) for x in value or ())
return _convert(value, (self.nargs != 1) + bool(self.multiple))
def process_value(self, ctx, value):
"""Given a value and context this runs the logic to convert the
value as necessary.
"""
# If the value we were given is None we do nothing. This way
# code that calls this can easily figure out if something was
# not provided. Otherwise it would be converted into an empty
# tuple for multiple invocations which is inconvenient.
if value is not None:
return self.type_cast_value(ctx, value)
def value_is_missing(self, value):
if value is None:
return True
if (self.nargs != 1 or self.multiple) and value == ():
return True
return False
def full_process_value(self, ctx, value):
value = self.process_value(ctx, value)
if value is None:
value = self.get_default(ctx)
if self.required and self.value_is_missing(value):
raise MissingParameter(ctx=ctx, param=self)
return value
def resolve_envvar_value(self, ctx):
if self.envvar is None:
return
if isinstance(self.envvar, (tuple, list)):
for envvar in self.envvar:
rv = os.environ.get(envvar)
if rv is not None:
return rv
else:
return os.environ.get(self.envvar)
def value_from_envvar(self, ctx):
rv = self.resolve_envvar_value(ctx)
if rv is not None and self.nargs != 1:
rv = self.type.split_envvar_value(rv)
return rv
def handle_parse_result(self, ctx, opts, args):
with augment_usage_errors(ctx, param=self):
value = self.consume_value(ctx, opts)
try:
value = self.full_process_value(ctx, value)
except Exception:
if not ctx.resilient_parsing:
raise
value = None
if self.callback is not None:
try:
value = invoke_param_callback(
self.callback, ctx, self, value)
except Exception:
if not ctx.resilient_parsing:
raise
if self.expose_value:
ctx.params[self.name] = value
return value, args
def get_help_record(self, ctx):
pass
def get_usage_pieces(self, ctx):
return []
class Option(Parameter):
"""Options are usually optional values on the command line and
have some extra features that arguments don't have.
All other parameters are passed onwards to the parameter constructor.
:param show_default: controls if the default value should be shown on the
help page. Normally, defaults are not shown.
:param prompt: if set to `True` or a non empty string then the user will
be prompted for input if not set. If set to `True` the
prompt will be the option name capitalized.
:param confirmation_prompt: if set then the value will need to be confirmed
if it was prompted for.
:param hide_input: if this is `True` then the input on the prompt will be
hidden from the user. This is useful for password
input.
:param is_flag: forces this option to act as a flag. The default is
auto detection.
:param flag_value: which value should be used for this flag if it's
enabled. This is set to a boolean automatically if
the option string contains a slash to mark two options.
:param multiple: if this is set to `True` then the argument is accepted
multiple times and recorded. This is similar to ``nargs``
in how it works but supports arbitrary number of
arguments.
:param count: this flag makes an option increment an integer.
:param allow_from_autoenv: if this is enabled then the value of this
parameter will be pulled from an environment
variable in case a prefix is defined on the
context.
:param help: the help string.
"""
param_type_name = 'option'
def __init__(self, param_decls=None, show_default=False,
prompt=False, confirmation_prompt=False,
hide_input=False, is_flag=None, flag_value=None,
multiple=False, count=False, allow_from_autoenv=True,
type=None, help=None, **attrs):
default_is_missing = attrs.get('default', _missing) is _missing
Parameter.__init__(self, param_decls, type=type, **attrs)
if prompt is True:
prompt_text = self.name.replace('_', ' ').capitalize()
elif prompt is False:
prompt_text = None
else:
prompt_text = prompt
self.prompt = prompt_text
self.confirmation_prompt = confirmation_prompt
self.hide_input = hide_input
# Flags
if is_flag is None:
if flag_value is not None:
is_flag = True
else:
is_flag = bool(self.secondary_opts)
if is_flag and default_is_missing:
self.default = False
if flag_value is None:
flag_value = not self.default
self.is_flag = is_flag
self.flag_value = flag_value
if self.is_flag and isinstance(self.flag_value, bool) \
and type is None:
self.type = BOOL
self.is_bool_flag = True
else:
self.is_bool_flag = False
# Counting
self.count = count
if count:
if type is None:
self.type = IntRange(min=0)
if default_is_missing:
self.default = 0
self.multiple = multiple
self.allow_from_autoenv = allow_from_autoenv
self.help = help
self.show_default = show_default
# Sanity check for stuff we don't support
if __debug__:
if self.nargs < 0:
raise TypeError('Options cannot have nargs < 0')
if self.prompt and self.is_flag and not self.is_bool_flag:
raise TypeError('Cannot prompt for flags that are not bools.')
if not self.is_bool_flag and self.secondary_opts:
raise TypeError('Got secondary option for non boolean flag.')
if self.is_bool_flag and self.hide_input \
and self.prompt is not None:
raise TypeError('Hidden input does not work with boolean '
'flag prompts.')
if self.count:
if self.multiple:
raise TypeError('Options cannot be multiple and count '
'at the same time.')
elif self.is_flag:
raise TypeError('Options cannot be count and flags at '
'the same time.')
def _parse_decls(self, decls, expose_value):
opts = []
secondary_opts = []
name = None
possible_names = []
for decl in decls:
if isidentifier(decl):
if name is not None:
raise TypeError('Name defined twice')
name = decl
else:
split_char = decl[:1] == '/' and ';' or '/'
if split_char in decl:
first, second = decl.split(split_char, 1)
first = first.rstrip()
possible_names.append(split_opt(first))
opts.append(first)
secondary_opts.append(second.lstrip())
else:
possible_names.append(split_opt(decl))
opts.append(decl)
if name is None and possible_names:
possible_names.sort(key=lambda x: len(x[0]))
name = possible_names[-1][1].replace('-', '_').lower()
if not isidentifier(name):
name = None
if name is None:
if not expose_value:
return None, opts, secondary_opts
raise TypeError('Could not determine name for option')
if not opts and not secondary_opts:
raise TypeError('No options defined but a name was passed (%s). '
'Did you mean to declare an argument instead '
'of an option?' % name)
return name, opts, secondary_opts
def add_to_parser(self, parser, ctx):
kwargs = {
'dest': self.name,
'nargs': self.nargs,
'obj': self,
}
if self.multiple:
action = 'append'
elif self.count:
action = 'count'
else:
action = 'store'
if self.is_flag:
kwargs.pop('nargs', None)
if self.is_bool_flag and self.secondary_opts:
parser.add_option(self.opts, action=action + '_const',
const=True, **kwargs)
parser.add_option(self.secondary_opts, action=action +
'_const', const=False, **kwargs)
else:
parser.add_option(self.opts, action=action + '_const',
const=self.flag_value,
**kwargs)
else:
kwargs['action'] = action
parser.add_option(self.opts, **kwargs)
def get_help_record(self, ctx):
any_prefix_is_slash = []
def _write_opts(opts):
rv, any_slashes = join_options(opts)
if any_slashes:
any_prefix_is_slash[:] = [True]
if not self.is_flag and not self.count:
rv += ' ' + self.make_metavar()
return rv
rv = [_write_opts(self.opts)]
if self.secondary_opts:
rv.append(_write_opts(self.secondary_opts))
help = self.help or ''
extra = []
if self.default is not None and self.show_default:
extra.append('default: %s' % (
', '.join('%s' % d for d in self.default)
if isinstance(self.default, (list, tuple))
else self.default, ))
if self.required:
extra.append('required')
if extra:
help = '%s[%s]' % (help and help + ' ' or '', '; '.join(extra))
return ((any_prefix_is_slash and '; ' or ' / ').join(rv), help)
def get_default(self, ctx):
# If we're a non boolean flag out default is more complex because
# we need to look at all flags in the same group to figure out
# if we're the the default one in which case we return the flag
# value as default.
if self.is_flag and not self.is_bool_flag:
for param in ctx.command.params:
if param.name == self.name and param.default:
return param.flag_value
return None
return Parameter.get_default(self, ctx)
def prompt_for_value(self, ctx):
"""This is an alternative flow that can be activated in the full
value processing if a value does not exist. It will prompt the
user until a valid value exists and then returns the processed
value as result.
"""
# Calculate the default before prompting anything to be stable.
default = self.get_default(ctx)
# If this is a prompt for a flag we need to handle this
# differently.
if self.is_bool_flag:
return confirm(self.prompt, default)
return prompt(self.prompt, default=default,
hide_input=self.hide_input,
confirmation_prompt=self.confirmation_prompt,
value_proc=lambda x: self.process_value(ctx, x))
def resolve_envvar_value(self, ctx):
rv = Parameter.resolve_envvar_value(self, ctx)
if rv is not None:
return rv
if self.allow_from_autoenv and \
ctx.auto_envvar_prefix is not None:
envvar = '%s_%s' % (ctx.auto_envvar_prefix, self.name.upper())
return os.environ.get(envvar)
def value_from_envvar(self, ctx):
rv = self.resolve_envvar_value(ctx)
if rv is None:
return None
value_depth = (self.nargs != 1) + bool(self.multiple)
if value_depth > 0 and rv is not None:
rv = self.type.split_envvar_value(rv)
if self.multiple and self.nargs != 1:
rv = batch(rv, self.nargs)
return rv
def full_process_value(self, ctx, value):
if value is None and self.prompt is not None \
and not ctx.resilient_parsing:
return self.prompt_for_value(ctx)
return Parameter.full_process_value(self, ctx, value)
class Argument(Parameter):
"""Arguments are positional parameters to a command. They generally
provide fewer features than options but can have infinite ``nargs``
and are required by default.
All parameters are passed onwards to the parameter constructor.
"""
param_type_name = 'argument'
def __init__(self, param_decls, required=None, **attrs):
if required is None:
if attrs.get('default') is not None:
required = False
else:
required = attrs.get('nargs', 1) > 0
Parameter.__init__(self, param_decls, required=required, **attrs)
@property
def human_readable_name(self):
if self.metavar is not None:
return self.metavar
return self.name.upper()
def make_metavar(self):
if self.metavar is not None:
return self.metavar
var = self.name.upper()
if not self.required:
var = '[%s]' % var
if self.nargs != 1:
var += '...'
return var
def _parse_decls(self, decls, expose_value):
if not decls:
if not expose_value:
return None, [], []
raise TypeError('Could not determine name for argument')
if len(decls) == 1:
name = arg = decls[0]
name = name.replace('-', '_').lower()
elif len(decls) == 2:
name, arg = decls
else:
raise TypeError('Arguments take exactly one or two '
'parameter declarations, got %d' % len(decls))
return name, [arg], []
def get_usage_pieces(self, ctx):
return [self.make_metavar()]
def add_to_parser(self, parser, ctx):
parser.add_argument(dest=self.name, nargs=self.nargs,
obj=self)
# Circular dependency between decorators and core
from .decorators import command, group
|
sourlows/rating-cruncher
|
src/lib/click/core.py
|
Python
|
apache-2.0
| 65,670
| 0.000091
|
try:
from xml.etree import cElementTree as etree
except ImportError:
from xml.etree import ElementTree as etree
import xml2nrn
# module names derived from the namespace. Add new tags in proper namespace
import neuroml
import metadata
import morphml
import biophysics
class FileWrapper:
def __init__(self, source):
self.source = source
self.lineno = 0
def read(self, bytes):
s = self.source.readline()
self.lineno += 1
return s
# for each '{namespace}element' call the corresponding module.func
def handle(x2n, fw, event, node):
tag = node.tag.split('}')
# hopefully a namespace token corresponding to an imported module name
ns = tag[0].split('/')[-2]
tag = ns+'.'+tag[1] #namespace.element should correspond to module.func
f = None
try:
if event == 'start':
f = eval(tag)
elif event == 'end':
f = eval(tag + '_end')
except:
pass
if f:
x2n.locator.lineno = fw.lineno
try:
f(x2n, node) # handle the element when it opens
except:
print tag,' failed at ', x2n.locator.getLineNumber()
elif event == 'start':
print 'ignore', node.tag # no function to handle the element
return 0
return 1
def rdxml(fname, ho = None):
f = FileWrapper(open(fname))
x2n = xml2nrn.XML2Nrn()
ig = None
for event, elem in etree.iterparse(f, events=("start", "end")):
if ig != elem:
if handle(x2n, f, event, elem) == 0:
ig = elem
if (ho):
ho.parsed(x2n)
if __name__ == '__main__':
rdxml('temp.xml')
|
neurodebian/pkg-neuron
|
share/lib/python/neuron/neuroml/rdxml.py
|
Python
|
gpl-2.0
| 1,545
| 0.029126
|
# This file is part of Scapy
# See http://www.secdev.org/projects/scapy for more information
# Copyright (C) Philippe Biondi <phil@secdev.org>
# This program is published under a GPLv2 license
"""
Aggregate top level objects from all Scapy modules.
"""
from scapy.base_classes import *
from scapy.config import *
from scapy.dadict import *
from scapy.data import *
from scapy.error import *
from scapy.themes import *
from scapy.arch import *
from scapy.interfaces import *
from scapy.plist import *
from scapy.fields import *
from scapy.packet import *
from scapy.asn1fields import *
from scapy.asn1packet import *
from scapy.utils import *
from scapy.route import *
from scapy.sendrecv import *
from scapy.sessions import *
from scapy.supersocket import *
from scapy.volatile import *
from scapy.as_resolvers import *
from scapy.automaton import *
from scapy.autorun import *
from scapy.main import *
from scapy.consts import *
from scapy.compat import raw # noqa: F401
from scapy.layers.all import *
from scapy.asn1.asn1 import *
from scapy.asn1.ber import *
from scapy.asn1.mib import *
from scapy.pipetool import *
from scapy.scapypipes import *
if conf.ipv6_enabled: # noqa: F405
from scapy.utils6 import * # noqa: F401
from scapy.route6 import * # noqa: F401
from scapy.ansmachine import *
|
gpotter2/scapy
|
scapy/all.py
|
Python
|
gpl-2.0
| 1,320
| 0
|
# -*- coding: utf-8 -*-
# Resource object code
#
# Created by: The Resource Compiler for PyQt5 (Qt v5.4.1)
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore
qt_resource_data = b"\
\x00\x00\x01\xf1\
\x00\
\x00\x09\x00\x78\x9c\xdd\x96\x51\x6f\x9b\x30\x10\xc7\xdf\xfb\x29\
\x3c\x1e\x9a\x4d\x15\xd0\x49\x7b\x98\x52\x48\x34\x92\x4c\xea\xd4\
\xaa\x54\x69\x55\xf5\xd1\x98\x0b\x71\x01\xdb\x35\x26\x09\xdf\x7e\
\x86\xb0\x96\xa4\x2c\xa4\x1d\x4f\xe3\xc5\xd8\x77\xbe\xdf\x9d\x8d\
\xff\xc6\x19\x6f\xd2\x04\xad\x40\x66\x94\x33\xd7\xf8\x6a\x9d\x1b\
\x08\x18\xe1\x21\x65\x91\x6b\xe4\x6a\x61\x7e\x37\xc6\xa3\x13\xe7\
\xd3\xf4\x66\x72\xf7\xe8\xcf\xd0\x26\x80\x44\xf7\xcb\x66\x77\xda\
\xe8\x04\xe9\xc7\x59\xf0\x24\x04\x89\xaa\x26\x74\x0d\xc6\x6b\x43\
\x65\x54\x54\x25\x30\xf2\x38\x8f\x53\x2c\xe3\x0c\x79\x58\x3a\xf6\
\x76\xf0\xd5\x29\xa8\xcd\x68\x29\x61\xe1\x1a\x4b\xa5\xc4\xd0\xb6\
\x41\x52\x62\xd2\x10\x2c\x51\xa8\x25\x67\xa6\x90\xfc\x09\x88\xca\
\x2c\x2e\x23\xbb\xc1\x68\x70\x66\x7a\x0a\x7a\x80\x00\xcd\xa9\x82\
\xb7\x1c\xfb\x0f\xa8\x93\xbd\x5e\xaf\x2d\x49\x75\xb5\x01\x66\x31\
\xe1\xa9\xc8\x95\x5e\x1e\x4b\xbf\xfd\x85\xec\x17\xb7\xea\x9d\xe4\
\x43\xeb\xd6\x88\xdc\x88\x9b\xbd\x09\xdc\x51\xc2\xb3\xb2\x28\xb7\
\xf7\x53\x6e\x0f\xde\x1e\xbb\x25\xf1\xa3\x98\x21\xac\x20\xe1\x42\
\x7f\x2e\x87\xe9\xd3\x17\xbf\x3e\xf8\x21\x27\x35\xff\x30\x94\x93\
\x3c\x05\xa6\xb0\xd2\xdf\x72\x1f\xdc\x20\xe1\xd1\x31\x60\x4f\xfb\
\xf5\xc1\x5b\x70\x99\xa7\xc7\x00\x7f\x96\x8e\x7d\x10\x45\x82\x19\
\xa8\x4e\xa4\x5f\xb9\xa1\x5b\xd5\x07\xf3\x59\x11\xbd\x49\x12\xda\
\x0e\xfc\x6e\x99\x93\xca\xaf\x1f\xa6\x89\x85\x68\xd5\x98\x1d\xa4\
\xf9\xa3\xf6\x3a\x1a\xea\xd8\xdb\x03\xff\x7e\x05\xf0\x2b\xfd\xfb\
\xb8\x0a\x6c\xf5\xb3\xa3\xa4\x1a\x72\x85\x59\x94\xe3\x08\x4a\x5a\
\xd6\x93\x2a\x88\x42\xd0\x66\x12\x65\xbf\x33\x11\x1f\x93\xb8\xcc\
\xe3\x92\x85\xb0\x19\x22\xbf\xf0\x2f\x3f\xb8\xd4\x7b\xbd\xbd\x45\
\x2f\x20\x3b\x74\x5f\x5d\x03\xcb\xff\xdb\x0b\xeb\xdb\xbf\xa1\x9f\
\xf0\x0a\x67\x44\x52\xa1\x86\x09\x27\x95\x98\x5a\x95\x65\x90\x62\
\x9a\x28\x3e\x1c\xcf\xef\xbd\x5f\xb3\xc9\x9d\x3b\x40\x67\x28\xac\
\x45\xd7\xaa\x48\x7a\x60\x70\x8a\x53\x71\xe1\xdd\x4c\x1f\x2b\x3b\
\x64\x04\x0b\xf8\xbc\x13\xe9\xcb\x45\x7b\xf2\x73\x60\x21\xba\xa2\
\x2c\xee\xcc\xfb\x75\xf3\x1d\x7b\xfb\x23\xf3\x1b\xc5\xa5\x8d\x58\
\
"
qt_resource_name = b"\
\x00\x15\
\x0c\xd3\x2e\x3c\
\x00\x44\
\x00\x65\x00\x66\x00\x61\x00\x75\x00\x6c\x00\x74\x00\x42\x00\x6f\x00\x6f\x00\x6b\x00\x6d\x00\x61\x00\x72\x00\x6b\x00\x73\x00\x2e\
\x00\x78\x00\x62\x00\x65\x00\x6c\
"
qt_resource_struct = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x01\x00\x00\x00\x01\x00\x00\x00\x00\
"
def qInitResources():
QtCore.qRegisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources()
|
testmana2/test
|
Helpviewer/Bookmarks/DefaultBookmarks_rc.py
|
Python
|
gpl-3.0
| 2,920
| 0.001712
|
from __future__ import unicode_literals
import datetime
from boto.ec2.elb.attributes import (
LbAttributes,
ConnectionSettingAttribute,
ConnectionDrainingAttribute,
AccessLogAttribute,
CrossZoneLoadBalancingAttribute,
)
from boto.ec2.elb.policies import (
Policies,
OtherPolicy,
)
from moto.core import BaseBackend, BaseModel
from moto.ec2.models import ec2_backends
from .exceptions import (
LoadBalancerNotFoundError,
TooManyTagsError,
BadHealthCheckDefinition,
DuplicateLoadBalancerName,
)
class FakeHealthCheck(BaseModel):
def __init__(self, timeout, healthy_threshold, unhealthy_threshold,
interval, target):
self.timeout = timeout
self.healthy_threshold = healthy_threshold
self.unhealthy_threshold = unhealthy_threshold
self.interval = interval
self.target = target
if not target.startswith(('HTTP', 'TCP', 'HTTPS', 'SSL')):
raise BadHealthCheckDefinition
class FakeListener(BaseModel):
def __init__(self, load_balancer_port, instance_port, protocol, ssl_certificate_id):
self.load_balancer_port = load_balancer_port
self.instance_port = instance_port
self.protocol = protocol.upper()
self.ssl_certificate_id = ssl_certificate_id
self.policy_names = []
def __repr__(self):
return "FakeListener(lbp: %s, inp: %s, pro: %s, cid: %s, policies: %s)" % (self.load_balancer_port, self.instance_port, self.protocol, self.ssl_certificate_id, self.policy_names)
class FakeBackend(BaseModel):
def __init__(self, instance_port):
self.instance_port = instance_port
self.policy_names = []
def __repr__(self):
return "FakeBackend(inp: %s, policies: %s)" % (self.instance_port, self.policy_names)
class FakeLoadBalancer(BaseModel):
def __init__(self, name, zones, ports, scheme='internet-facing', vpc_id=None, subnets=None):
self.name = name
self.health_check = None
self.instance_ids = []
self.zones = zones
self.listeners = []
self.backends = []
self.created_time = datetime.datetime.now()
self.scheme = scheme
self.attributes = FakeLoadBalancer.get_default_attributes()
self.policies = Policies()
self.policies.other_policies = []
self.policies.app_cookie_stickiness_policies = []
self.policies.lb_cookie_stickiness_policies = []
self.subnets = subnets or []
self.vpc_id = vpc_id or 'vpc-56e10e3d'
self.tags = {}
self.dns_name = "%s.us-east-1.elb.amazonaws.com" % (name)
for port in ports:
listener = FakeListener(
protocol=(port.get('protocol') or port['Protocol']),
load_balancer_port=(
port.get('load_balancer_port') or port['LoadBalancerPort']),
instance_port=(
port.get('instance_port') or port['InstancePort']),
ssl_certificate_id=port.get(
'ssl_certificate_id', port.get('SSLCertificateId')),
)
self.listeners.append(listener)
# it is unclear per the AWS documentation as to when or how backend
# information gets set, so let's guess and set it here *shrug*
backend = FakeBackend(
instance_port=(
port.get('instance_port') or port['InstancePort']),
)
self.backends.append(backend)
@classmethod
def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
properties = cloudformation_json['Properties']
elb_backend = elb_backends[region_name]
new_elb = elb_backend.create_load_balancer(
name=properties.get('LoadBalancerName', resource_name),
zones=properties.get('AvailabilityZones', []),
ports=properties['Listeners'],
scheme=properties.get('Scheme', 'internet-facing'),
)
instance_ids = properties.get('Instances', [])
for instance_id in instance_ids:
elb_backend.register_instances(new_elb.name, [instance_id])
policies = properties.get('Policies', [])
port_policies = {}
for policy in policies:
policy_name = policy["PolicyName"]
other_policy = OtherPolicy()
other_policy.policy_name = policy_name
elb_backend.create_lb_other_policy(new_elb.name, other_policy)
for port in policy.get("InstancePorts", []):
policies_for_port = port_policies.get(port, set())
policies_for_port.add(policy_name)
port_policies[port] = policies_for_port
for port, policies in port_policies.items():
elb_backend.set_load_balancer_policies_of_backend_server(
new_elb.name, port, list(policies))
health_check = properties.get('HealthCheck')
if health_check:
elb_backend.configure_health_check(
load_balancer_name=new_elb.name,
timeout=health_check['Timeout'],
healthy_threshold=health_check['HealthyThreshold'],
unhealthy_threshold=health_check['UnhealthyThreshold'],
interval=health_check['Interval'],
target=health_check['Target'],
)
return new_elb
@classmethod
def update_from_cloudformation_json(cls, original_resource, new_resource_name, cloudformation_json, region_name):
cls.delete_from_cloudformation_json(
original_resource.name, cloudformation_json, region_name)
return cls.create_from_cloudformation_json(new_resource_name, cloudformation_json, region_name)
@classmethod
def delete_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
elb_backend = elb_backends[region_name]
try:
elb_backend.delete_load_balancer(resource_name)
except KeyError:
pass
@property
def physical_resource_id(self):
return self.name
def get_cfn_attribute(self, attribute_name):
from moto.cloudformation.exceptions import UnformattedGetAttTemplateException
if attribute_name == 'CanonicalHostedZoneName':
raise NotImplementedError(
'"Fn::GetAtt" : [ "{0}" , "CanonicalHostedZoneName" ]"')
elif attribute_name == 'CanonicalHostedZoneNameID':
raise NotImplementedError(
'"Fn::GetAtt" : [ "{0}" , "CanonicalHostedZoneNameID" ]"')
elif attribute_name == 'DNSName':
return self.dns_name
elif attribute_name == 'SourceSecurityGroup.GroupName':
raise NotImplementedError(
'"Fn::GetAtt" : [ "{0}" , "SourceSecurityGroup.GroupName" ]"')
elif attribute_name == 'SourceSecurityGroup.OwnerAlias':
raise NotImplementedError(
'"Fn::GetAtt" : [ "{0}" , "SourceSecurityGroup.OwnerAlias" ]"')
raise UnformattedGetAttTemplateException()
@classmethod
def get_default_attributes(cls):
attributes = LbAttributes()
cross_zone_load_balancing = CrossZoneLoadBalancingAttribute()
cross_zone_load_balancing.enabled = False
attributes.cross_zone_load_balancing = cross_zone_load_balancing
connection_draining = ConnectionDrainingAttribute()
connection_draining.enabled = False
attributes.connection_draining = connection_draining
access_log = AccessLogAttribute()
access_log.enabled = False
attributes.access_log = access_log
connection_settings = ConnectionSettingAttribute()
connection_settings.idle_timeout = 60
attributes.connecting_settings = connection_settings
return attributes
def add_tag(self, key, value):
if len(self.tags) >= 10 and key not in self.tags:
raise TooManyTagsError()
self.tags[key] = value
def list_tags(self):
return self.tags
def remove_tag(self, key):
if key in self.tags:
del self.tags[key]
def delete(self, region):
''' Not exposed as part of the ELB API - used for CloudFormation. '''
elb_backends[region].delete_load_balancer(self.name)
class ELBBackend(BaseBackend):
def __init__(self, region_name=None):
self.region_name = region_name
self.load_balancers = {}
def reset(self):
region_name = self.region_name
self.__dict__ = {}
self.__init__(region_name)
def create_load_balancer(self, name, zones, ports, scheme='internet-facing', subnets=None):
vpc_id = None
ec2_backend = ec2_backends[self.region_name]
if subnets:
subnet = ec2_backend.get_subnet(subnets[0])
vpc_id = subnet.vpc_id
if name in self.load_balancers:
raise DuplicateLoadBalancerName(name)
new_load_balancer = FakeLoadBalancer(
name=name, zones=zones, ports=ports, scheme=scheme, subnets=subnets, vpc_id=vpc_id)
self.load_balancers[name] = new_load_balancer
return new_load_balancer
def create_load_balancer_listeners(self, name, ports):
balancer = self.load_balancers.get(name, None)
if balancer:
for port in ports:
protocol = port['protocol']
instance_port = port['instance_port']
lb_port = port['load_balancer_port']
ssl_certificate_id = port.get('sslcertificate_id')
for listener in balancer.listeners:
if lb_port == listener.load_balancer_port:
break
else:
balancer.listeners.append(FakeListener(
lb_port, instance_port, protocol, ssl_certificate_id))
return balancer
def describe_load_balancers(self, names):
balancers = self.load_balancers.values()
if names:
matched_balancers = [
balancer for balancer in balancers if balancer.name in names]
if len(names) != len(matched_balancers):
missing_elb = list(set(names) - set(matched_balancers))[0]
raise LoadBalancerNotFoundError(missing_elb)
return matched_balancers
else:
return balancers
def delete_load_balancer_listeners(self, name, ports):
balancer = self.load_balancers.get(name, None)
listeners = []
if balancer:
for lb_port in ports:
for listener in balancer.listeners:
if int(lb_port) == int(listener.load_balancer_port):
continue
else:
listeners.append(listener)
balancer.listeners = listeners
return balancer
def delete_load_balancer(self, load_balancer_name):
self.load_balancers.pop(load_balancer_name, None)
def get_load_balancer(self, load_balancer_name):
return self.load_balancers.get(load_balancer_name)
def configure_health_check(self, load_balancer_name, timeout,
healthy_threshold, unhealthy_threshold, interval,
target):
check = FakeHealthCheck(timeout, healthy_threshold, unhealthy_threshold,
interval, target)
load_balancer = self.get_load_balancer(load_balancer_name)
load_balancer.health_check = check
return check
def set_load_balancer_listener_sslcertificate(self, name, lb_port, ssl_certificate_id):
balancer = self.load_balancers.get(name, None)
if balancer:
for idx, listener in enumerate(balancer.listeners):
if lb_port == listener.load_balancer_port:
balancer.listeners[
idx].ssl_certificate_id = ssl_certificate_id
return balancer
def register_instances(self, load_balancer_name, instance_ids):
load_balancer = self.get_load_balancer(load_balancer_name)
load_balancer.instance_ids.extend(instance_ids)
return load_balancer
def deregister_instances(self, load_balancer_name, instance_ids):
load_balancer = self.get_load_balancer(load_balancer_name)
new_instance_ids = [
instance_id for instance_id in load_balancer.instance_ids if instance_id not in instance_ids]
load_balancer.instance_ids = new_instance_ids
return load_balancer
def set_cross_zone_load_balancing_attribute(self, load_balancer_name, attribute):
load_balancer = self.get_load_balancer(load_balancer_name)
load_balancer.attributes.cross_zone_load_balancing = attribute
return load_balancer
def set_access_log_attribute(self, load_balancer_name, attribute):
load_balancer = self.get_load_balancer(load_balancer_name)
load_balancer.attributes.access_log = attribute
return load_balancer
def set_connection_draining_attribute(self, load_balancer_name, attribute):
load_balancer = self.get_load_balancer(load_balancer_name)
load_balancer.attributes.connection_draining = attribute
return load_balancer
def set_connection_settings_attribute(self, load_balancer_name, attribute):
load_balancer = self.get_load_balancer(load_balancer_name)
load_balancer.attributes.connecting_settings = attribute
return load_balancer
def create_lb_other_policy(self, load_balancer_name, other_policy):
load_balancer = self.get_load_balancer(load_balancer_name)
if other_policy.policy_name not in [p.policy_name for p in load_balancer.policies.other_policies]:
load_balancer.policies.other_policies.append(other_policy)
return load_balancer
def create_app_cookie_stickiness_policy(self, load_balancer_name, policy):
load_balancer = self.get_load_balancer(load_balancer_name)
load_balancer.policies.app_cookie_stickiness_policies.append(policy)
return load_balancer
def create_lb_cookie_stickiness_policy(self, load_balancer_name, policy):
load_balancer = self.get_load_balancer(load_balancer_name)
load_balancer.policies.lb_cookie_stickiness_policies.append(policy)
return load_balancer
def set_load_balancer_policies_of_backend_server(self, load_balancer_name, instance_port, policies):
load_balancer = self.get_load_balancer(load_balancer_name)
backend = [b for b in load_balancer.backends if int(
b.instance_port) == instance_port][0]
backend_idx = load_balancer.backends.index(backend)
backend.policy_names = policies
load_balancer.backends[backend_idx] = backend
return load_balancer
def set_load_balancer_policies_of_listener(self, load_balancer_name, load_balancer_port, policies):
load_balancer = self.get_load_balancer(load_balancer_name)
listener = [l for l in load_balancer.listeners if int(
l.load_balancer_port) == load_balancer_port][0]
listener_idx = load_balancer.listeners.index(listener)
listener.policy_names = policies
load_balancer.listeners[listener_idx] = listener
return load_balancer
elb_backends = {}
for region in ec2_backends.keys():
elb_backends[region] = ELBBackend(region)
|
heddle317/moto
|
moto/elb/models.py
|
Python
|
apache-2.0
| 15,471
| 0.001357
|
from fbchat import GroupData, User
def test_group_from_graphql(session):
data = {
"name": "Group ABC",
"thread_key": {"thread_fbid": "11223344"},
"image": None,
"is_group_thread": True,
"all_participants": {
"nodes": [
{"messaging_actor": {"__typename": "User", "id": "1234"}},
{"messaging_actor": {"__typename": "User", "id": "2345"}},
{"messaging_actor": {"__typename": "User", "id": "3456"}},
]
},
"customization_info": {
"participant_customizations": [],
"outgoing_bubble_color": None,
"emoji": "😀",
},
"thread_admins": [{"id": "1234"}],
"group_approval_queue": {"nodes": []},
"approval_mode": 0,
"joinable_mode": {"mode": "0", "link": ""},
"event_reminders": {"nodes": []},
}
assert GroupData(
session=session,
id="11223344",
photo=None,
name="Group ABC",
last_active=None,
message_count=None,
plan=None,
participants=[
User(session=session, id="1234"),
User(session=session, id="2345"),
User(session=session, id="3456"),
],
nicknames={},
color="#0084ff",
emoji="😀",
admins={"1234"},
approval_mode=False,
approval_requests=set(),
join_link="",
) == GroupData._from_graphql(session, data)
|
carpedm20/fbchat
|
tests/threads/test_group.py
|
Python
|
bsd-3-clause
| 1,493
| 0
|
/// <reference path="./testBlocks/enums.ts" />
enum EnumOfFlags {
W = 1,
X = 1 << 1,
Z = 1 << 3
}
let userDefinedTest7 = EnumOfFlags.W
|
switch-education/pxt
|
tests/pydecompile-test/baselines/enum_user_defined_bit_mask_bad_sequence.py
|
Python
|
mit
| 148
| 0.027027
|
import itertools
import os
class TreeHasher():
"""uses BlockHasher recursively on a directory tree
Input and output generators are in the format: ( relative-filepath, chunk_nr, hexdigest)
"""
def __init__(self, block_hasher):
"""
:type block_hasher: BlockHasher
"""
self.block_hasher=block_hasher
def generate(self, start_path):
"""Use BlockHasher on every file in a tree, yielding the results
note that it only checks the contents of actual files. It ignores metadata like permissions and mtimes.
It also ignores empty directories, symlinks and special files.
"""
def walkerror(e):
raise e
for (dirpath, dirnames, filenames) in os.walk(start_path, onerror=walkerror):
for f in filenames:
file_path=os.path.join(dirpath, f)
if (not os.path.islink(file_path)) and os.path.isfile(file_path):
for (chunk_nr, hash) in self.block_hasher.generate(file_path):
yield ( os.path.relpath(file_path,start_path), chunk_nr, hash )
def compare(self, start_path, generator):
"""reads from generator and compares blocks
yields mismatches in the form: ( relative_filename, chunk_nr, compare_hexdigest, actual_hexdigest )
yields errors in the form: ( relative_filename, chunk_nr, compare_hexdigest, "message" )
"""
count=0
def filter_file_name( file_name, chunk_nr, hexdigest):
return ( chunk_nr, hexdigest )
for file_name, group_generator in itertools.groupby(generator, lambda x: x[0]):
count=count+1
block_generator=itertools.starmap(filter_file_name, group_generator)
for ( chunk_nr, compare_hexdigest, actual_hexdigest) in self.block_hasher.compare(os.path.join(start_path,file_name), block_generator):
yield ( file_name, chunk_nr, compare_hexdigest, actual_hexdigest )
|
psy0rz/zfs_autobackup
|
zfs_autobackup/TreeHasher.py
|
Python
|
gpl-3.0
| 2,011
| 0.015415
|
# Microcosmos: an antsy game
# Copyright (C) 2010 Cyril ADRIAN <cyril.adrian@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 exclusively.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
The Bugs model package provides bugs and their specific behaviour.
"""
from net.cadrian.microcosmos.model.bugs.antFemales import AntFemale, Target as AntFemaleTarget
from net.cadrian.microcosmos.model.bugs.antQueens import AntQueen
from net.cadrian.microcosmos.model.bugs.antSoldiers import AntSoldier
from net.cadrian.microcosmos.model.bugs.antWorkers import AntWorker
from net.cadrian.microcosmos.model.bugs.lice import Louse
|
cadrian/microcosmos
|
src/net/cadrian/microcosmos/model/bugs/__init__.py
|
Python
|
gpl-3.0
| 1,120
| 0.000893
|
import wx
import wiggler.ui.dialogs as dialogs
class ResourceManager(wx.Control):
def __init__(self, parent, resources, events):
wx.Control.__init__(self, parent)
self.parent = parent
self.events = events
self.resources = resources
self.events.subscribe(self, ['add_costume', 'del_costume',
'add_character', 'del_character',
'add_sheet', 'del_sheet',
'add_image', 'del_image',
'add_sprite', 'del_sprite',
'add_animation', 'del_animation',
'change_background'])
self.Bind(self.events.EVT_NOTICE, self.notice_handler)
def notice_handler(self, event):
if event.notice == 'change_background':
self.change_background()
elif event.notice == 'add_costume':
self.add_costume()
elif event.notice == 'del_costume':
self.del_costume()
elif event.notice == 'add_sheet':
self.add_sheet()
elif event.notice == 'del_sheet':
self.del_sheet()
elif event.notice == 'add_image':
pass
elif event.notice == 'del_image':
pass
elif event.notice == 'add_character':
self.add_character()
elif event.notice == 'del_character':
self.del_character()
elif event.notice == 'add_animation':
pass
elif event.notice == 'del_animation':
pass
elif event.notice == 'add_sprite':
self.add_sprite()
elif event.notice == 'del_sprite':
self.del_sprite()
event.Skip()
def change_background(self):
dlg = dialogs.ChangeBackgroundDialog(self.parent)
res = dlg.ShowModal()
if res == wx.ID_OK:
back_type = dlg.back_type.GetValue()
back_spec = dlg.back_spec.GetValue()
self.resources.change_default_background(back_type, back_spec)
dlg.Destroy()
def add_sheet(self):
# definition_fields = Factory_sheet.definition_fields
# dialog with definition fields, source file with browse button
# resource with same name , overwrite ?
filename = dialogs.open_sheet(self.parent)
if filename is not None:
dia = dialogs.AddSheetDialog(None, -1, "Insert sheet details",
self.resources)
result = dia.ShowModal()
if result == wx.ID_OK:
self.settings = dia.GetSettings()
try:
self.resources.add_resource(
'sheets', self.settings['name'],
{'colorkey': self.settings['colorkey'],
'abs_path': filename})
except ValueError as e:
wx.MessageBox(str(e), "Error",
wx.OK | wx.ICON_INFORMATION)
dia.Destroy()
return True
def del_sheet(self):
# LISTCTR with very large icons ?
# use resources.find_deps
# print self.resources.find_deps('sheets', 'master')
# name = 'testsheet'
# self.resources.remove_resource('sheets', name)
# and everything associated to IT!!!
dia = dialogs.DelSheetDialog(None, -1, "Delete sheet",
self.resources)
result = dia.ShowModal()
if result == wx.ID_OK:
self.settings = dia.GetSettings()
for x in self.resources.find_deps('sheets',
self.settings['sheet']):
for elem in x:
try:
self.resources.remove_resource(elem[0], elem[1])
except Exception as e:
wx.MessageBox(str(e), "Error", wx.OK |
wx.ICON_INFORMATION)
try:
self.resources.remove_resource('sheets',
self.settings['sheet'])
except Exception as e:
wx.MessageBox(str(e), "Error", wx.OK | wx.ICON_INFORMATION)
dia.Destroy()
return True
def add_costume(self):
# dialog with definitions and a area selection on the sheet
dia = dialogs.AddCostumeDialog(None, -1, "Add a new costume",
self.resources)
result = dia.ShowModal()
if result == wx.ID_OK:
self.settings = dia.GetSettings()
# print self.settings['name'], self.settings['rect'], \
# self.settings['sheet']
try:
self.resources.add_resource(
'costumes', self.settings['name'],
{'name': self.settings['name'],
'sheet': self.settings['sheet'],
'rect': self.settings['rect']})
except ValueError as e:
wx.MessageBox(str(e), "Error",
wx.OK | wx.ICON_INFORMATION)
dia.Destroy()
return True
def del_costume(self):
# LISTCTRL with large icons
dia = dialogs.DelCostumeDialog(None, -1, "Delete costume",
self.resources)
result = dia.ShowModal()
if result == wx.ID_OK:
self.settings = dia.GetSettings()
for x in self.resources.find_deps('costumes',
self.settings['costume']):
for elem in x:
try:
self.resources.remove_resource(elem[0], elem[1])
except Exception as e:
wx.MessageBox(str(e), "Error", wx.OK |
wx.ICON_INFORMATION)
try:
self.resources.remove_resource('costumes',
self.settings['costume'])
except Exception as e:
wx.MessageBox(str(e), "Error", wx.OK | wx.ICON_INFORMATION)
dia.Destroy()
return True
def add_sprite(self):
# dialog with definition, select from existing costumes,
# animations, sounds...
# or add empty
dia = dialogs.AddSpriteDialog(None, -1, "Add a new sprite",
self.resources)
result = dia.ShowModal()
if result == wx.ID_OK:
self.settings = dia.GetSettings()
try:
self.resources.add_resource('sprites', self.settings['name'],
{'name': self.settings['name'],
'base_class': self.settings
['base_class'],
'costumes': self.settings
['costumes'],
'animations': [],
'sounds': [],
'self_sufficiency': 0,
'user_code': {'__init__': ''}})
except ValueError as e:
wx.MessageBox(str(e), "Error",
wx.OK | wx.ICON_INFORMATION)
dia.Destroy()
return True
def del_sprite(self):
# LISTCTRK with name + sprite definition
dia = dialogs.DelSpriteDialog(None, -1, "Delete a sprite",
self.resources)
result = dia.ShowModal()
if result == wx.ID_OK:
self.settings = dia.GetSettings()
for x in self.resources.find_deps('sprites',
self.settings['sprite']):
for elem in x:
try:
self.resources.remove_resource(elem[0], elem[1])
except Exception as e:
wx.MessageBox(str(e), "Error", wx.OK |
wx.ICON_INFORMATION)
try:
self.resources.remove_resource('sprites',
self.settings['sprite'])
except Exception as e:
wx.MessageBox(str(e), "Error", wx.OK | wx.ICON_INFORMATION)
dia.Destroy()
return True
def add_character(self):
# dialog with definition, select from existing sprites or add empty
dia = dialogs.AddCharacterDialog(None, -1, "Add a new character",
self.resources)
result = dia.ShowModal()
if result == wx.ID_OK:
self.settings = dia.GetSettings()
try:
self.resources.add_resource('characters',
self.settings['name'],
{'sprites': []})
except ValueError as e:
wx.MessageBox(str(e), "Error",
wx.OK | wx.ICON_INFORMATION)
dia.Destroy()
return True
def del_character(self):
# LISTCTRK with name + sprite definition
dia = dialogs.DelCharacterDialog(None, -1, "Delete character",
self.resources)
result = dia.ShowModal()
if result == wx.ID_OK:
self.settings = dia.GetSettings()
for x in self.resources.find_deps('characters',
self.settings['character']):
for elem in x:
try:
self.resources.remove_resource(elem[0], elem[1])
except Exception as e:
wx.MessageBox(str(e), "Error", wx.OK |
wx.ICON_INFORMATION)
try:
self.resources.remove_resource('characters',
self.settings['character'])
except Exception as e:
wx.MessageBox(str(e), "Error", wx.OK | wx.ICON_INFORMATION)
dia.Destroy()
return True
def add_animation(self):
# dialog similar to add_costume but for every frame
pass
def del_animation(self):
# listctrl with animated gifs ?
pass
|
ProgrammaBol/wiggler
|
wiggler/ui/resources.py
|
Python
|
gpl-3.0
| 10,546
| 0
|
#!/usr/bin/env python
# Thu, 13 Mar 14 (PDT)
# bpf-filter.rb: Create a packet filter,
# use it to print udp records from a trace
# Copyright (C) 2015, Nevil Brownlee, U Auckland | WAND
from plt_testing import *
t = get_example_trace('anon-v4.pcap')
filter = plt.filter('udp port 53') # Only want DNS packets
t.conf_filter(filter)
t.conf_snaplen(500)
#t.conf_promisc(True)
# Remember: on a live interface, must sudo to capture
# on a trace file, can't set promicuous
nfp = 0; offset = 12
for pkt in t:
nfp += 1
udp = pkt.udp
test_println("%4d:" % (nfp), get_tag())
print_udp(pkt.udp, offset, get_tag("nfp:"+str(nfp)))
test_println('')
if nfp == 4:
break
test_println("%d filtered packets" % nfp, get_tag())
|
nevil-brownlee/pypy-libtrace
|
test/pypy-test-cases/test-bpf-filter.py
|
Python
|
gpl-3.0
| 780
| 0.007692
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""This file implements the CLAClassifier."""
import array
from collections import deque
import itertools
import numpy
# This determines how large one of the duty cycles must get before each of the
# duty cycles are updated to the current iteration.
# This must be less than float32 size since storage is float32 size
DUTY_CYCLE_UPDATE_INTERVAL = numpy.finfo(numpy.float32).max / (2 ** 20)
g_debugPrefix = "CLAClassifier"
def _pFormatArray(array_, fmt="%.2f"):
"""Return a string with pretty-print of a numpy array using the given format
for each element"""
return "[ " + " ".join(fmt % x for x in array_) + " ]"
class BitHistory(object):
"""Class to store an activationPattern bit history."""
__slots__ = ("_classifier", "_id", "_stats", "_lastTotalUpdate",
"_learnIteration", "_version")
__VERSION__ = 2
def __init__(self, classifier, bitNum, nSteps):
"""Constructor for bit history.
Parameters:
---------------------------------------------------------------------
classifier: instance of the CLAClassifier that owns us
bitNum: activation pattern bit number this history is for,
used only for debug messages
nSteps: number of steps of prediction this history is for, used
only for debug messages
"""
# Store reference to the classifier
self._classifier = classifier
# Form our "id"
self._id = "{0:d}[{1:d}]".format(bitNum, nSteps)
# Dictionary of bucket entries. The key is the bucket index, the
# value is the dutyCycle, which is the rolling average of the duty cycle
self._stats = array.array("f")
# lastUpdate is the iteration number of the last time it was updated.
self._lastTotalUpdate = None
# The bit's learning iteration. This is updated each time store() gets
# called on this bit.
self._learnIteration = 0
# Set the version to the latest version.
# This is used for serialization/deserialization
self._version = BitHistory.__VERSION__
def store(self, iteration, bucketIdx):
"""Store a new item in our history.
This gets called for a bit whenever it is active and learning is enabled
Parameters:
--------------------------------------------------------------------
iteration: the learning iteration number, which is only incremented
when learning is enabled
bucketIdx: the bucket index to store
Save duty cycle by normalizing it to the same iteration as
the rest of the duty cycles which is lastTotalUpdate.
This is done to speed up computation in inference since all of the duty
cycles can now be scaled by a single number.
The duty cycle is brought up to the current iteration only at inference and
only when one of the duty cycles gets too large (to avoid overflow to
larger data type) since the ratios between the duty cycles are what is
important. As long as all of the duty cycles are at the same iteration
their ratio is the same as it would be for any other iteration, because the
update is simply a multiplication by a scalar that depends on the number of
steps between the last update of the duty cycle and the current iteration.
"""
# If lastTotalUpdate has not been set, set it to the current iteration.
if self._lastTotalUpdate is None:
self._lastTotalUpdate = iteration
# Get the duty cycle stored for this bucket.
statsLen = len(self._stats) - 1
if bucketIdx > statsLen:
self._stats.extend(itertools.repeat(0.0, bucketIdx - statsLen))
# Update it now.
# duty cycle n steps ago is dc{-n}
# duty cycle for current iteration is (1-alpha)*dc{-n}*(1-alpha)**(n)+alpha
dc = self._stats[bucketIdx]
# To get the duty cycle from n iterations ago that when updated to the
# current iteration would equal the dc of the current iteration we simply
# divide the duty cycle by (1-alpha)**(n). This results in the formula
# dc'{-n} = dc{-n} + alpha/(1-alpha)**n where the apostrophe symbol is used
# to denote that this is the new duty cycle at that iteration. This is
# equivalent to the duty cycle dc{-n}
denom = ((1.0 - self._classifier.alpha) **
(iteration - self._lastTotalUpdate))
if denom > 0:
dcNew = dc + (self._classifier.alpha / denom)
# This is to prevent errors associated with inf rescale if too large
if denom == 0 or dcNew > DUTY_CYCLE_UPDATE_INTERVAL:
exp = ((1.0 - self._classifier.alpha) **
(iteration - self._lastTotalUpdate))
for (bucketIdxT, dcT) in enumerate(self._stats):
dcT *= exp
self._stats[bucketIdxT] = dcT
# Reset time since last update
self._lastTotalUpdate = iteration
# Add alpha since now exponent is 0
dc = self._stats[bucketIdx] + self._classifier.alpha
else:
dc = dcNew
self._stats[bucketIdx] = dc
if self._classifier.verbosity >= 2:
print "updated DC for {0!s}, bucket {1:d} to {2:f}".format(self._id, bucketIdx, dc)
def infer(self, votes):
"""Look up and return the votes for each bucketIdx for this bit.
Parameters:
--------------------------------------------------------------------
votes: a numpy array, initialized to all 0's, that should be filled
in with the votes for each bucket. The vote for bucket index N
should go into votes[N].
"""
# Place the duty cycle into the votes and update the running total for
# normalization
total = 0
for (bucketIdx, dc) in enumerate(self._stats):
# Not updating to current iteration since we are normalizing anyway
if dc > 0.0:
votes[bucketIdx] = dc
total += dc
# Experiment... try normalizing the votes from each bit
if total > 0:
votes /= total
if self._classifier.verbosity >= 2:
print "bucket votes for {0!s}:".format((self._id)), _pFormatArray(votes)
def __getstate__(self):
return dict((elem, getattr(self, elem)) for elem in self.__slots__)
def __setstate__(self, state):
version = 0
if "_version" in state:
version = state["_version"]
# Migrate from version 0 to version 1
if version == 0:
stats = state.pop("_stats")
assert isinstance(stats, dict)
maxBucket = max(stats.iterkeys())
self._stats = array.array("f", itertools.repeat(0.0, maxBucket + 1))
for (index, value) in stats.iteritems():
self._stats[index] = value
elif version == 1:
state.pop("_updateDutyCycles", None)
elif version == 2:
pass
else:
raise Exception("Error while deserializing {0!s}: Invalid version {1!s}".format(self.__class__, version))
for (attr, value) in state.iteritems():
setattr(self, attr, value)
self._version = BitHistory.__VERSION__
def write(self, proto):
proto.id = self._id
statsProto = proto.init("stats", len(self._stats))
for (bucketIdx, dutyCycle) in enumerate(self._stats):
statsProto[bucketIdx].index = bucketIdx
statsProto[bucketIdx].dutyCycle = dutyCycle
proto.lastTotalUpdate = self._lastTotalUpdate
proto.learnIteration = self._learnIteration
@classmethod
def read(cls, proto):
bitHistory = object.__new__(cls)
bitHistory._id = proto.id
for statProto in proto.stats:
statsLen = len(bitHistory._stats) - 1
if statProto.index > statsLen:
bitHistory._stats.extend(
itertools.repeat(0.0, statProto.index - statsLen))
bitHistory._stats[statProto.index] = statProto.dutyCycle
bitHistory._lastTotalUpdate = proto.lastTotalUpdate
bitHistory._learnIteration = proto.learnIteration
return bitHistory
class CLAClassifier(object):
"""
A CLA classifier accepts a binary input from the level below (the
"activationPattern") and information from the sensor and encoders (the
"classification") describing the input to the system at that time step.
When learning, for every bit in activation pattern, it records a history of
the classification each time that bit was active. The history is weighted so
that more recent activity has a bigger impact than older activity. The alpha
parameter controls this weighting.
For inference, it takes an ensemble approach. For every active bit in the
activationPattern, it looks up the most likely classification(s) from the
history stored for that bit and then votes across these to get the resulting
classification(s).
This classifier can learn and infer a number of simultaneous classifications
at once, each representing a shift of a different number of time steps. For
example, say you are doing multi-step prediction and want the predictions for
1 and 3 time steps in advance. The CLAClassifier would learn the associations
between the activation pattern for time step T and the classifications for
time step T+1, as well as the associations between activation pattern T and
the classifications for T+3. The 'steps' constructor argument specifies the
list of time-steps you want.
"""
__VERSION__ = 2
def __init__(self, steps=(1,), alpha=0.001, actValueAlpha=0.3, verbosity=0):
"""Constructor for the CLA classifier.
Parameters:
---------------------------------------------------------------------
steps: Sequence of the different steps of multi-step predictions to learn
alpha: The alpha used to compute running averages of the bucket duty
cycles for each activation pattern bit. A lower alpha results
in longer term memory.
verbosity: verbosity level, can be 0, 1, or 2
"""
# Save constructor args
self.steps = steps
self.alpha = alpha
self.actValueAlpha = actValueAlpha
self.verbosity = verbosity
# Init learn iteration index
self._learnIteration = 0
# This contains the offset between the recordNum (provided by caller) and
# learnIteration (internal only, always starts at 0).
self._recordNumMinusLearnIteration = None
# Max # of steps of prediction we need to support
maxSteps = max(self.steps) + 1
# History of the last _maxSteps activation patterns. We need to keep
# these so that we can associate the current iteration's classification
# with the activationPattern from N steps ago
self._patternNZHistory = deque(maxlen=maxSteps)
# These are the bit histories. Each one is a BitHistory instance, stored in
# this dict, where the key is (bit, nSteps). The 'bit' is the index of the
# bit in the activation pattern and nSteps is the number of steps of
# prediction desired for that bit.
self._activeBitHistory = dict()
# This contains the value of the highest bucket index we've ever seen
# It is used to pre-allocate fixed size arrays that hold the weights of
# each bucket index during inference
self._maxBucketIdx = 0
# This keeps track of the actual value to use for each bucket index. We
# start with 1 bucket, no actual value so that the first infer has something
# to return
self._actualValues = [None]
# Set the version to the latest version.
# This is used for serialization/deserialization
self._version = CLAClassifier.__VERSION__
def compute(self, recordNum, patternNZ, classification, learn, infer):
"""
Process one input sample.
This method is called by outer loop code outside the nupic-engine. We
use this instead of the nupic engine compute() because our inputs and
outputs aren't fixed size vectors of reals.
Parameters:
--------------------------------------------------------------------
recordNum: Record number of this input pattern. Record numbers should
normally increase sequentially by 1 each time unless there
are missing records in the dataset. Knowing this information
insures that we don't get confused by missing records.
patternNZ: List of the active indices from the output below.
- When the input is from TemporalMemory, this list should be the
indices of the active cells.
classification: dict of the classification information:
bucketIdx: index of the encoder bucket
actValue: actual value going into the encoder
learn: if true, learn this sample
infer: if true, perform inference
retval: dict containing inference results, there is one entry for each
step in self.steps, where the key is the number of steps, and
the value is an array containing the relative likelihood for
each bucketIdx starting from bucketIdx 0.
There is also an entry containing the average actual value to
use for each bucket. The key is 'actualValues'.
for example:
{1 : [0.1, 0.3, 0.2, 0.7],
4 : [0.2, 0.4, 0.3, 0.5],
'actualValues': [1.5, 3,5, 5,5, 7.6],
}
"""
# Save the offset between recordNum and learnIteration if this is the first
# compute
if self._recordNumMinusLearnIteration is None:
self._recordNumMinusLearnIteration = recordNum - self._learnIteration
# Update the learn iteration
self._learnIteration = recordNum - self._recordNumMinusLearnIteration
if self.verbosity >= 1:
print "\n{0!s}: compute".format(g_debugPrefix)
print " recordNum:", recordNum
print " learnIteration:", self._learnIteration
print " patternNZ ({0:d}):".format(len(patternNZ)), patternNZ
print " classificationIn:", classification
# Store pattern in our history
self._patternNZHistory.append((self._learnIteration, patternNZ))
# To allow multi-class classification, we need to be able to run learning
# without inference being on. So initialize retval outside
# of the inference block.
retval = None
# ------------------------------------------------------------------------
# Inference:
# For each active bit in the activationPattern, get the classification
# votes
if infer:
retval = self.infer(patternNZ, classification)
# ------------------------------------------------------------------------
# Learning:
# For each active bit in the activationPattern, store the classification
# info. If the bucketIdx is None, we can't learn. This can happen when the
# field is missing in a specific record.
if learn and classification["bucketIdx"] is not None:
# Get classification info
bucketIdx = classification["bucketIdx"]
actValue = classification["actValue"]
# Update maxBucketIndex
self._maxBucketIdx = max(self._maxBucketIdx, bucketIdx)
# Update rolling average of actual values if it's a scalar. If it's
# not, it must be a category, in which case each bucket only ever
# sees one category so we don't need a running average.
while self._maxBucketIdx > len(self._actualValues) - 1:
self._actualValues.append(None)
if self._actualValues[bucketIdx] is None:
self._actualValues[bucketIdx] = actValue
else:
if isinstance(actValue, int) or isinstance(actValue, float):
self._actualValues[bucketIdx] = ((1.0 - self.actValueAlpha)
* self._actualValues[bucketIdx]
+ self.actValueAlpha * actValue)
else:
self._actualValues[bucketIdx] = actValue
# Train each pattern that we have in our history that aligns with the
# steps we have in self.steps
for nSteps in self.steps:
# Do we have the pattern that should be assigned to this classification
# in our pattern history? If not, skip it
found = False
for (iteration, learnPatternNZ) in self._patternNZHistory:
if iteration == self._learnIteration - nSteps:
found = True;
break
if not found:
continue
# Store classification info for each active bit from the pattern
# that we got nSteps time steps ago.
for bit in learnPatternNZ:
# Get the history structure for this bit and step #
key = (bit, nSteps)
history = self._activeBitHistory.get(key, None)
if history is None:
history = self._activeBitHistory[key] = BitHistory(self,
bitNum=bit,
nSteps=nSteps)
# Store new sample
history.store(iteration=self._learnIteration,
bucketIdx=bucketIdx)
# ------------------------------------------------------------------------
# Verbose print
if infer and self.verbosity >= 1:
print " inference: combined bucket likelihoods:"
print " actual bucket values:", retval["actualValues"]
for (nSteps, votes) in retval.items():
if nSteps == "actualValues":
continue
print " {0:d} steps: ".format((nSteps)), _pFormatArray(votes)
bestBucketIdx = votes.argmax()
print (" most likely bucket idx: "
"%d, value: %s" % (bestBucketIdx,
retval["actualValues"][bestBucketIdx]))
print
return retval
def infer(self, patternNZ, classification):
"""
Return the inference value from one input sample. The actual
learning happens in compute(). The method customCompute() is here to
maintain backward compatibility.
Parameters:
--------------------------------------------------------------------
patternNZ: list of the active indices from the output below
classification: dict of the classification information:
bucketIdx: index of the encoder bucket
actValue: actual value going into the encoder
retval: dict containing inference results, one entry for each step in
self.steps. The key is the number of steps, the value is an
array containing the relative likelihood for each bucketIdx
starting from bucketIdx 0.
for example:
{'actualValues': [0.0, 1.0, 2.0, 3.0]
1 : [0.1, 0.3, 0.2, 0.7]
4 : [0.2, 0.4, 0.3, 0.5]}
"""
# Return value dict. For buckets which we don't have an actual value
# for yet, just plug in any valid actual value. It doesn't matter what
# we use because that bucket won't have non-zero likelihood anyways.
# NOTE: If doing 0-step prediction, we shouldn't use any knowledge
# of the classification input during inference.
if self.steps[0] == 0:
defaultValue = 0
else:
defaultValue = classification["actValue"]
actValues = [x if x is not None else defaultValue
for x in self._actualValues]
retval = {"actualValues": actValues}
# For each n-step prediction...
for nSteps in self.steps:
# Accumulate bucket index votes and actValues into these arrays
sumVotes = numpy.zeros(self._maxBucketIdx + 1)
bitVotes = numpy.zeros(self._maxBucketIdx + 1)
# For each active bit, get the votes
for bit in patternNZ:
key = (bit, nSteps)
history = self._activeBitHistory.get(key, None)
if history is None:
continue
bitVotes.fill(0)
history.infer(votes=bitVotes)
sumVotes += bitVotes
# Return the votes for each bucket, normalized
total = sumVotes.sum()
if total > 0:
sumVotes /= total
else:
# If all buckets have zero probability then simply make all of the
# buckets equally likely. There is no actual prediction for this
# timestep so any of the possible predictions are just as good.
if sumVotes.size > 0:
sumVotes = numpy.ones(sumVotes.shape)
sumVotes /= sumVotes.size
retval[nSteps] = sumVotes
return retval
def __getstate__(self):
return self.__dict__
def __setstate__(self, state):
if "_profileMemory" in state:
state.pop("_profileMemory")
# Set our state
self.__dict__.update(state)
# Handle version 0 case (i.e. before versioning code)
if "_version" not in state or state["_version"] < 2:
self._recordNumMinusLearnIteration = None
# Plug in the iteration number in the old patternNZHistory to make it
# compatible with the new format
historyLen = len(self._patternNZHistory)
for (i, pattern) in enumerate(self._patternNZHistory):
self._patternNZHistory[i] = (self._learnIteration - (historyLen - i),
pattern)
elif state["_version"] == 2:
# Version 2 introduced _recordNumMinusLearnIteration
pass
else:
pass
self._version = CLAClassifier.__VERSION__
@classmethod
def read(cls, proto):
classifier = object.__new__(cls)
classifier.steps = []
for step in proto.steps:
classifier.steps.append(step)
classifier.alpha = proto.alpha
classifier.actValueAlpha = proto.actValueAlpha
classifier._learnIteration = proto.learnIteration
classifier._recordNumMinusLearnIteration = (
proto.recordNumMinusLearnIteration)
classifier._patternNZHistory = deque(maxlen=max(classifier.steps) + 1)
patternNZHistoryProto = proto.patternNZHistory
learnIteration = classifier._learnIteration - len(patternNZHistoryProto) + 1
for i in xrange(len(patternNZHistoryProto)):
classifier._patternNZHistory.append((learnIteration,
list(patternNZHistoryProto[i])))
learnIteration += 1
classifier._activeBitHistory = dict()
activeBitHistoryProto = proto.activeBitHistory
for i in xrange(len(activeBitHistoryProto)):
stepBitHistories = activeBitHistoryProto[i]
nSteps = stepBitHistories.steps
for indexBitHistoryProto in stepBitHistories.bitHistories:
bit = indexBitHistoryProto.index
bitHistory = BitHistory.read(indexBitHistoryProto.history)
classifier._activeBitHistory[(bit, nSteps)] = bitHistory
classifier._maxBucketIdx = proto.maxBucketIdx
classifier._actualValues = []
for actValue in proto.actualValues:
if actValue == 0:
classifier._actualValues.append(None)
else:
classifier._actualValues.append(actValue)
classifier._version = proto.version
classifier.verbosity = proto.verbosity
return classifier
def write(self, proto):
stepsProto = proto.init("steps", len(self.steps))
for i in xrange(len(self.steps)):
stepsProto[i] = self.steps[i]
proto.alpha = self.alpha
proto.actValueAlpha = self.actValueAlpha
proto.learnIteration = self._learnIteration
proto.recordNumMinusLearnIteration = self._recordNumMinusLearnIteration
patternNZHistory = []
for (iteration, learnPatternNZ) in self._patternNZHistory:
patternNZHistory.append(learnPatternNZ)
proto.patternNZHistory = patternNZHistory
i = 0
activeBitHistoryProtos = proto.init("activeBitHistory",
len(self._activeBitHistory))
if len(self._activeBitHistory) > 0:
for nSteps in self.steps:
stepBitHistory = {bit: self._activeBitHistory[(bit, step)]
for (bit, step) in self._activeBitHistory.keys()
if step == nSteps}
stepBitHistoryProto = activeBitHistoryProtos[i]
stepBitHistoryProto.steps = nSteps
indexBitHistoryListProto = stepBitHistoryProto.init("bitHistories",
len(stepBitHistory))
j = 0
for indexBitHistory in stepBitHistory:
indexBitHistoryProto = indexBitHistoryListProto[j]
indexBitHistoryProto.index = indexBitHistory
bitHistoryProto = indexBitHistoryProto.history
stepBitHistory[indexBitHistory].write(bitHistoryProto)
j += 1
i += 1
proto.maxBucketIdx = self._maxBucketIdx
actualValuesProto = proto.init("actualValues", len(self._actualValues))
for i in xrange(len(self._actualValues)):
if self._actualValues[i] is not None:
actualValuesProto[i] = self._actualValues[i]
else:
actualValuesProto[i] = 0
proto.version = self._version
proto.verbosity = self.verbosity
|
runt18/nupic
|
src/nupic/algorithms/CLAClassifier.py
|
Python
|
agpl-3.0
| 25,671
| 0.006544
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Windowing concepts.
A WindowInto transform logically divides up or groups the elements of a
PCollection into finite windows according to a windowing function (derived from
WindowFn).
The output of WindowInto contains the same elements as input, but they have been
logically assigned to windows. The next GroupByKey(s) transforms, including one
within a composite transform, will group by the combination of keys and windows.
Windowing a PCollection allows chunks of it to be processed individually, before
the entire PCollection is available. This is especially important for
PCollection(s) with unbounded size, since the full PCollection is never
available at once, since more data is continually arriving. For PCollection(s)
with a bounded size (aka. conventional batch mode), by default, all data is
implicitly in a single window (see GlobalWindows), unless WindowInto is
applied.
For example, a simple form of windowing divides up the data into fixed-width
time intervals, using FixedWindows.
Seconds are used as the time unit for the built-in windowing primitives here.
Integer or floating point seconds can be passed to these primitives.
Internally, seconds, with microsecond granularity, are stored as
timeutil.Timestamp and timeutil.Duration objects. This is done to avoid
precision errors that would occur with floating point representations.
Custom windowing function classes can be created, by subclassing from
WindowFn.
"""
from __future__ import absolute_import
import abc
from builtins import object
from builtins import range
from functools import total_ordering
from future.utils import with_metaclass
from google.protobuf import duration_pb2
from google.protobuf import timestamp_pb2
from apache_beam.coders import coders
from apache_beam.portability import common_urns
from apache_beam.portability import python_urns
from apache_beam.portability.api import beam_runner_api_pb2
from apache_beam.portability.api import standard_window_fns_pb2
from apache_beam.transforms import timeutil
from apache_beam.utils import proto_utils
from apache_beam.utils import urns
from apache_beam.utils.timestamp import MIN_TIMESTAMP
from apache_beam.utils.timestamp import Duration
from apache_beam.utils.timestamp import Timestamp
from apache_beam.utils.windowed_value import WindowedValue
__all__ = [
'TimestampCombiner',
'WindowFn',
'BoundedWindow',
'IntervalWindow',
'TimestampedValue',
'GlobalWindow',
'NonMergingWindowFn',
'GlobalWindows',
'FixedWindows',
'SlidingWindows',
'Sessions',
]
# TODO(ccy): revisit naming and semantics once Java Apache Beam finalizes their
# behavior.
class TimestampCombiner(object):
"""Determines how output timestamps of grouping operations are assigned."""
OUTPUT_AT_EOW = beam_runner_api_pb2.OutputTime.END_OF_WINDOW
OUTPUT_AT_EARLIEST = beam_runner_api_pb2.OutputTime.EARLIEST_IN_PANE
OUTPUT_AT_LATEST = beam_runner_api_pb2.OutputTime.LATEST_IN_PANE
# TODO(robertwb): Add this to the runner API or remove it.
OUTPUT_AT_EARLIEST_TRANSFORMED = 'OUTPUT_AT_EARLIEST_TRANSFORMED'
@staticmethod
def get_impl(timestamp_combiner, window_fn):
if timestamp_combiner == TimestampCombiner.OUTPUT_AT_EOW:
return timeutil.OutputAtEndOfWindowImpl()
elif timestamp_combiner == TimestampCombiner.OUTPUT_AT_EARLIEST:
return timeutil.OutputAtEarliestInputTimestampImpl()
elif timestamp_combiner == TimestampCombiner.OUTPUT_AT_LATEST:
return timeutil.OutputAtLatestInputTimestampImpl()
elif timestamp_combiner == TimestampCombiner.OUTPUT_AT_EARLIEST_TRANSFORMED:
return timeutil.OutputAtEarliestTransformedInputTimestampImpl(window_fn)
else:
raise ValueError('Invalid TimestampCombiner: %s.' % timestamp_combiner)
class WindowFn(with_metaclass(abc.ABCMeta, urns.RunnerApiFn)):
"""An abstract windowing function defining a basic assign and merge."""
class AssignContext(object):
"""Context passed to WindowFn.assign()."""
def __init__(self, timestamp, element=None, window=None):
self.timestamp = Timestamp.of(timestamp)
self.element = element
self.window = window
@abc.abstractmethod
def assign(self, assign_context):
"""Associates windows to an element.
Arguments:
assign_context: Instance of AssignContext.
Returns:
An iterable of BoundedWindow.
"""
raise NotImplementedError
class MergeContext(object):
"""Context passed to WindowFn.merge() to perform merging, if any."""
def __init__(self, windows):
self.windows = list(windows)
def merge(self, to_be_merged, merge_result):
raise NotImplementedError
@abc.abstractmethod
def merge(self, merge_context):
"""Returns a window that is the result of merging a set of windows."""
raise NotImplementedError
def is_merging(self):
"""Returns whether this WindowFn merges windows."""
return True
@abc.abstractmethod
def get_window_coder(self):
raise NotImplementedError
def get_transformed_output_time(self, window, input_timestamp): # pylint: disable=unused-argument
"""Given input time and output window, returns output time for window.
If TimestampCombiner.OUTPUT_AT_EARLIEST_TRANSFORMED is used in the
Windowing, the output timestamp for the given window will be the earliest
of the timestamps returned by get_transformed_output_time() for elements
of the window.
Arguments:
window: Output window of element.
input_timestamp: Input timestamp of element as a timeutil.Timestamp
object.
Returns:
Transformed timestamp.
"""
# By default, just return the input timestamp.
return input_timestamp
urns.RunnerApiFn.register_pickle_urn(python_urns.PICKLED_WINDOWFN)
class BoundedWindow(object):
"""A window for timestamps in range (-infinity, end).
Attributes:
end: End of window.
"""
def __init__(self, end):
self.end = Timestamp.of(end)
def max_timestamp(self):
return self.end.predecessor()
def __eq__(self, other):
raise NotImplementedError
def __ne__(self, other):
# Order first by endpoint, then arbitrarily
return self.end != other.end or hash(self) != hash(other)
def __lt__(self, other):
if self.end != other.end:
return self.end < other.end
return hash(self) < hash(other)
def __le__(self, other):
if self.end != other.end:
return self.end <= other.end
return hash(self) <= hash(other)
def __gt__(self, other):
if self.end != other.end:
return self.end > other.end
return hash(self) > hash(other)
def __ge__(self, other):
if self.end != other.end:
return self.end >= other.end
return hash(self) >= hash(other)
def __hash__(self):
raise NotImplementedError
def __repr__(self):
return '[?, %s)' % float(self.end)
class IntervalWindow(BoundedWindow):
"""A window for timestamps in range [start, end).
Attributes:
start: Start of window as seconds since Unix epoch.
end: End of window as seconds since Unix epoch.
"""
def __init__(self, start, end):
super(IntervalWindow, self).__init__(end)
self.start = Timestamp.of(start)
def __hash__(self):
return hash((self.start, self.end))
def __eq__(self, other):
return (self.start == other.start
and self.end == other.end
and type(self) == type(other))
def __ne__(self, other):
return not self == other
def __repr__(self):
return '[%s, %s)' % (float(self.start), float(self.end))
def intersects(self, other):
return other.start < self.end or self.start < other.end
def union(self, other):
return IntervalWindow(
min(self.start, other.start), max(self.end, other.end))
@total_ordering
class TimestampedValue(object):
"""A timestamped value having a value and a timestamp.
Attributes:
value: The underlying value.
timestamp: Timestamp associated with the value as seconds since Unix epoch.
"""
def __init__(self, value, timestamp):
self.value = value
self.timestamp = Timestamp.of(timestamp)
def __eq__(self, other):
return (type(self) == type(other)
and self.value == other.value
and self.timestamp == other.timestamp)
def __hash__(self):
return hash((self.value, self.timestamp))
def __ne__(self, other):
return not self == other
def __lt__(self, other):
if type(self) != type(other):
return type(self).__name__ < type(other).__name__
if self.value != other.value:
return self.value < other.value
return self.timestamp < other.timestamp
class GlobalWindow(BoundedWindow):
"""The default window into which all data is placed (via GlobalWindows)."""
_instance = None
def __new__(cls):
if cls._instance is None:
cls._instance = super(GlobalWindow, cls).__new__(cls)
return cls._instance
def __init__(self):
super(GlobalWindow, self).__init__(GlobalWindow._getTimestampFromProto())
self.start = MIN_TIMESTAMP
def __repr__(self):
return 'GlobalWindow'
def __hash__(self):
return hash(type(self))
def __eq__(self, other):
# Global windows are always and only equal to each other.
return self is other or type(self) is type(other)
def __ne__(self, other):
return not self == other
@staticmethod
def _getTimestampFromProto():
ts_millis = int(
common_urns.constants.GLOBAL_WINDOW_MAX_TIMESTAMP_MILLIS.constant)
return Timestamp(micros=ts_millis*1000)
class NonMergingWindowFn(WindowFn):
def is_merging(self):
return False
def merge(self, merge_context):
pass # No merging.
class GlobalWindows(NonMergingWindowFn):
"""A windowing function that assigns everything to one global window."""
@classmethod
def windowed_value(cls, value, timestamp=MIN_TIMESTAMP):
return WindowedValue(value, timestamp, (GlobalWindow(),))
def assign(self, assign_context):
return [GlobalWindow()]
def get_window_coder(self):
return coders.GlobalWindowCoder()
def __hash__(self):
return hash(type(self))
def __eq__(self, other):
# Global windowfn is always and only equal to each other.
return self is other or type(self) is type(other)
def __ne__(self, other):
return not self == other
def to_runner_api_parameter(self, context):
return common_urns.global_windows.urn, None
@urns.RunnerApiFn.register_urn(common_urns.global_windows.urn, None)
def from_runner_api_parameter(unused_fn_parameter, unused_context):
return GlobalWindows()
class FixedWindows(NonMergingWindowFn):
"""A windowing function that assigns each element to one time interval.
The attributes size and offset determine in what time interval a timestamp
will be slotted. The time intervals have the following formula:
[N * size + offset, (N + 1) * size + offset)
Attributes:
size: Size of the window as seconds.
offset: Offset of this window as seconds since Unix epoch. Windows start at
t=N * size + offset where t=0 is the epoch. The offset must be a value
in range [0, size). If it is not it will be normalized to this range.
"""
def __init__(self, size, offset=0):
if size <= 0:
raise ValueError('The size parameter must be strictly positive.')
self.size = Duration.of(size)
self.offset = Timestamp.of(offset) % self.size
def assign(self, context):
timestamp = context.timestamp
start = timestamp - (timestamp - self.offset) % self.size
return [IntervalWindow(start, start + self.size)]
def get_window_coder(self):
return coders.IntervalWindowCoder()
def __eq__(self, other):
if type(self) == type(other) == FixedWindows:
return self.size == other.size and self.offset == other.offset
def __hash__(self):
return hash((self.size, self.offset))
def __ne__(self, other):
return not self == other
def to_runner_api_parameter(self, context):
return (common_urns.fixed_windows.urn,
standard_window_fns_pb2.FixedWindowsPayload(
size=proto_utils.from_micros(
duration_pb2.Duration, self.size.micros),
offset=proto_utils.from_micros(
timestamp_pb2.Timestamp, self.offset.micros)))
@urns.RunnerApiFn.register_urn(
common_urns.fixed_windows.urn,
standard_window_fns_pb2.FixedWindowsPayload)
def from_runner_api_parameter(fn_parameter, unused_context):
return FixedWindows(
size=Duration(micros=fn_parameter.size.ToMicroseconds()),
offset=Timestamp(micros=fn_parameter.offset.ToMicroseconds()))
class SlidingWindows(NonMergingWindowFn):
"""A windowing function that assigns each element to a set of sliding windows.
The attributes size and offset determine in what time interval a timestamp
will be slotted. The time intervals have the following formula:
[N * period + offset, N * period + offset + size)
Attributes:
size: Size of the window as seconds.
period: Period of the windows as seconds.
offset: Offset of this window as seconds since Unix epoch. Windows start at
t=N * period + offset where t=0 is the epoch. The offset must be a value
in range [0, period). If it is not it will be normalized to this range.
"""
def __init__(self, size, period, offset=0):
if size <= 0:
raise ValueError('The size parameter must be strictly positive.')
self.size = Duration.of(size)
self.period = Duration.of(period)
self.offset = Timestamp.of(offset) % period
def assign(self, context):
timestamp = context.timestamp
start = timestamp - ((timestamp - self.offset) % self.period)
return [
IntervalWindow(Timestamp(micros=s), Timestamp(micros=s) + self.size)
for s in range(start.micros, timestamp.micros - self.size.micros,
-self.period.micros)]
def get_window_coder(self):
return coders.IntervalWindowCoder()
def __eq__(self, other):
if type(self) == type(other) == SlidingWindows:
return (self.size == other.size
and self.offset == other.offset
and self.period == other.period)
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash((self.offset, self.period))
def to_runner_api_parameter(self, context):
return (common_urns.sliding_windows.urn,
standard_window_fns_pb2.SlidingWindowsPayload(
size=proto_utils.from_micros(
duration_pb2.Duration, self.size.micros),
offset=proto_utils.from_micros(
timestamp_pb2.Timestamp, self.offset.micros),
period=proto_utils.from_micros(
duration_pb2.Duration, self.period.micros)))
@urns.RunnerApiFn.register_urn(
common_urns.sliding_windows.urn,
standard_window_fns_pb2.SlidingWindowsPayload)
def from_runner_api_parameter(fn_parameter, unused_context):
return SlidingWindows(
size=Duration(micros=fn_parameter.size.ToMicroseconds()),
offset=Timestamp(micros=fn_parameter.offset.ToMicroseconds()),
period=Duration(micros=fn_parameter.period.ToMicroseconds()))
class Sessions(WindowFn):
"""A windowing function that groups elements into sessions.
A session is defined as a series of consecutive events
separated by a specified gap size.
Attributes:
gap_size: Size of the gap between windows as floating-point seconds.
"""
def __init__(self, gap_size):
if gap_size <= 0:
raise ValueError('The size parameter must be strictly positive.')
self.gap_size = Duration.of(gap_size)
def assign(self, context):
timestamp = context.timestamp
return [IntervalWindow(timestamp, timestamp + self.gap_size)]
def get_window_coder(self):
return coders.IntervalWindowCoder()
def merge(self, merge_context):
to_merge = []
end = MIN_TIMESTAMP
for w in sorted(merge_context.windows, key=lambda w: w.start):
if to_merge:
if end > w.start:
to_merge.append(w)
if w.end > end:
end = w.end
else:
if len(to_merge) > 1:
merge_context.merge(to_merge,
IntervalWindow(to_merge[0].start, end))
to_merge = [w]
end = w.end
else:
to_merge = [w]
end = w.end
if len(to_merge) > 1:
merge_context.merge(to_merge, IntervalWindow(to_merge[0].start, end))
def __eq__(self, other):
if type(self) == type(other) == Sessions:
return self.gap_size == other.gap_size
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(self.gap_size)
def to_runner_api_parameter(self, context):
return (common_urns.session_windows.urn,
standard_window_fns_pb2.SessionsPayload(
gap_size=proto_utils.from_micros(
duration_pb2.Duration, self.gap_size.micros)))
@urns.RunnerApiFn.register_urn(
common_urns.session_windows.urn,
standard_window_fns_pb2.SessionsPayload)
def from_runner_api_parameter(fn_parameter, unused_context):
return Sessions(
gap_size=Duration(micros=fn_parameter.gap_size.ToMicroseconds()))
|
charlesccychen/beam
|
sdks/python/apache_beam/transforms/window.py
|
Python
|
apache-2.0
| 18,027
| 0.007544
|
# Copyright (C) 2008, One Laptop Per Child
# Copyright (C) 2009, Tomeu Vizoso
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
from gettext import gettext as _
from gettext import ngettext
import locale
import logging
from gi.repository import GObject
from gi.repository import Gtk
from sugar3.graphics import style
from sugar3.graphics.icon import Icon, CellRendererIcon
from jarabe.controlpanel.sectionview import SectionView
from jarabe.model.update import updater
from jarabe.model import bundleregistry
_DEBUG_VIEW_ALL = True
class ActivityUpdater(SectionView):
def __init__(self, model, alerts):
SectionView.__init__(self)
self._model = updater.get_instance()
self._id_progresss = self._model.connect('progress',
self.__progress_cb)
self._id_updates = self._model.connect('updates-available',
self.__updates_available_cb)
self._id_error = self._model.connect('error',
self.__error_cb)
self._id_finished = self._model.connect('finished',
self.__finished_cb)
self.set_spacing(style.DEFAULT_SPACING)
self.set_border_width(style.DEFAULT_SPACING * 2)
self._top_label = Gtk.Label()
self._top_label.set_line_wrap(True)
self._top_label.set_justify(Gtk.Justification.LEFT)
self._top_label.props.xalign = 0
self.pack_start(self._top_label, False, True, 0)
self._top_label.show()
separator = Gtk.HSeparator()
self.pack_start(separator, False, True, 0)
separator.show()
self._bottom_label = Gtk.Label()
self._bottom_label.set_line_wrap(True)
self._bottom_label.set_justify(Gtk.Justification.LEFT)
self._bottom_label.props.xalign = 0
self._bottom_label.set_markup(
_('Software updates correct errors, eliminate security '
'vulnerabilities, and provide new features.'))
self.pack_start(self._bottom_label, False, True, 0)
self._bottom_label.show()
self._update_box = None
self._progress_pane = None
state = self._model.get_state()
if state in (updater.STATE_IDLE, updater.STATE_CHECKED):
self._refresh()
elif state in (updater.STATE_CHECKING, updater.STATE_DOWNLOADING,
updater.STATE_UPDATING):
self._switch_to_progress_pane()
self._progress_pane.set_message(_('Update in progress...'))
self.connect('destroy', self.__destroy_cb)
def __destroy_cb(self, widget):
self._model.disconnect(self._id_progresss)
self._model.disconnect(self._id_updates)
self._model.disconnect(self._id_error)
self._model.disconnect(self._id_finished)
self._model.clean()
def _switch_to_update_box(self, updates):
if self._update_box in self.get_children():
return
if self._progress_pane in self.get_children():
self.remove(self._progress_pane)
self._progress_pane = None
if self._update_box is None:
self._update_box = UpdateBox(updates)
self._update_box.refresh_button.connect(
'clicked',
self.__refresh_button_clicked_cb)
self._update_box.install_button.connect(
'clicked',
self.__install_button_clicked_cb)
self.pack_start(self._update_box, expand=True, fill=True, padding=0)
self._update_box.show()
def _switch_to_progress_pane(self):
if self._progress_pane in self.get_children():
return
if self._model.get_state() == updater.STATE_CHECKING:
top_message = _('Checking for updates...')
else:
top_message = _('Installing updates...')
self._top_label.set_markup('<big>%s</big>' % top_message)
if self._update_box in self.get_children():
self.remove(self._update_box)
self._update_box = None
if self._progress_pane is None:
self._progress_pane = ProgressPane()
self._progress_pane.cancel_button.connect(
'clicked',
self.__cancel_button_clicked_cb)
self.pack_start(
self._progress_pane, expand=True, fill=False, padding=0)
self._progress_pane.show()
def _clear_center(self):
if self._progress_pane in self.get_children():
self.remove(self._progress_pane)
self._progress_pane = None
if self._update_box in self.get_children():
self.remove(self._update_box)
self._update_box = None
def __progress_cb(self, model, state, bundle_name, progress):
if state == updater.STATE_CHECKING:
if bundle_name:
message = _('Checking %s...') % bundle_name
else:
message = _('Looking for updates...')
elif state == updater.STATE_DOWNLOADING:
message = _('Downloading %s...') % bundle_name
elif state == updater.STATE_UPDATING:
message = _('Updating %s...') % bundle_name
self._switch_to_progress_pane()
self._progress_pane.set_message(message)
self._progress_pane.set_progress(progress)
def __updates_available_cb(self, model, updates):
logging.debug('ActivityUpdater.__updates_available_cb')
available_updates = len(updates)
if not available_updates:
top_message = _('Your software is up-to-date')
else:
top_message = ngettext('You can install %s update',
'You can install %s updates',
available_updates)
top_message = top_message % available_updates
top_message = GObject.markup_escape_text(top_message)
self._top_label.set_markup('<big>%s</big>' % top_message)
if not available_updates:
self._clear_center()
else:
self._switch_to_update_box(updates)
def __error_cb(self, model, updates):
logging.debug('ActivityUpdater.__error_cb')
top_message = _('Can\'t connect to the activity server')
self._top_label.set_markup('<big>%s</big>' % top_message)
self._bottom_label.set_markup(
_('Verify your connection to internet and try again, '
'or try again later'))
self._clear_center()
def __refresh_button_clicked_cb(self, button):
self._refresh()
def _refresh(self):
self._model.check_updates()
def __install_button_clicked_cb(self, button):
self._model.update(self._update_box.get_bundles_to_update())
def __cancel_button_clicked_cb(self, button):
self._model.cancel()
def __finished_cb(self, model, installed_updates, failed_updates,
cancelled):
num_installed = len(installed_updates)
logging.debug('ActivityUpdater.__finished_cb')
top_message = ngettext('%s update was installed',
'%s updates were installed', num_installed)
top_message = top_message % num_installed
top_message = GObject.markup_escape_text(top_message)
self._top_label.set_markup('<big>%s</big>' % top_message)
self._clear_center()
def undo(self):
self._model.cancel()
class ProgressPane(Gtk.VBox):
"""Container which replaces the `ActivityPane` during refresh or
install."""
def __init__(self):
Gtk.VBox.__init__(self)
self.set_spacing(style.DEFAULT_PADDING)
self.set_border_width(style.DEFAULT_SPACING * 2)
self._progress = Gtk.ProgressBar()
self.pack_start(self._progress, True, True, 0)
self._progress.show()
self._label = Gtk.Label()
self._label.set_line_wrap(True)
self._label.set_property('xalign', 0.5)
self._label.modify_fg(Gtk.StateType.NORMAL,
style.COLOR_BUTTON_GREY.get_gdk_color())
self.pack_start(self._label, True, True, 0)
self._label.show()
alignment_box = Gtk.Alignment.new(xalign=0.5, yalign=0.5,
xscale=0, yscale=0)
self.pack_start(alignment_box, True, True, 0)
alignment_box.show()
self.cancel_button = Gtk.Button(stock=Gtk.STOCK_CANCEL)
alignment_box.add(self.cancel_button)
self.cancel_button.show()
def set_message(self, message):
self._label.set_text(message)
def set_progress(self, fraction):
self._progress.props.fraction = fraction
class UpdateBox(Gtk.VBox):
def __init__(self, updates):
Gtk.VBox.__init__(self)
self.set_spacing(style.DEFAULT_PADDING)
scrolled_window = Gtk.ScrolledWindow()
scrolled_window.set_policy(
Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC)
self.pack_start(scrolled_window, True, True, 0)
scrolled_window.show()
self._update_list = UpdateList(updates)
self._update_list.props.model.connect('row-changed',
self.__row_changed_cb)
scrolled_window.add(self._update_list)
self._update_list.show()
bottom_box = Gtk.HBox()
bottom_box.set_spacing(style.DEFAULT_SPACING)
self.pack_start(bottom_box, False, True, 0)
bottom_box.show()
self._size_label = Gtk.Label()
self._size_label.props.xalign = 0
self._size_label.set_justify(Gtk.Justification.LEFT)
bottom_box.pack_start(self._size_label, True, True, 0)
self._size_label.show()
self.refresh_button = Gtk.Button(stock=Gtk.STOCK_REFRESH)
bottom_box.pack_start(self.refresh_button, False, True, 0)
self.refresh_button.show()
self.install_button = Gtk.Button(_('Install selected'))
self.install_button.props.image = Icon(
icon_name='emblem-downloads',
pixel_size=style.SMALL_ICON_SIZE)
bottom_box.pack_start(self.install_button, False, True, 0)
self.install_button.show()
self._update_total_size_label()
def __row_changed_cb(self, list_model, path, iterator):
self._update_total_size_label()
self._update_install_button()
def _update_total_size_label(self):
total_size = 0
for row in self._update_list.props.model:
if row[UpdateListModel.SELECTED]:
total_size += row[UpdateListModel.SIZE]
markup = _('Download size: %s') % _format_size(total_size)
self._size_label.set_markup(markup)
def _update_install_button(self):
for row in self._update_list.props.model:
if row[UpdateListModel.SELECTED]:
self.install_button.props.sensitive = True
return
self.install_button.props.sensitive = False
def get_bundles_to_update(self):
bundles_to_update = []
for row in self._update_list.props.model:
if row[UpdateListModel.SELECTED]:
bundles_to_update.append(row[UpdateListModel.BUNDLE_ID])
return bundles_to_update
class UpdateList(Gtk.TreeView):
def __init__(self, updates):
list_model = UpdateListModel(updates)
Gtk.TreeView.__init__(self, list_model)
self.set_reorderable(False)
self.set_enable_search(False)
self.set_headers_visible(False)
toggle_renderer = Gtk.CellRendererToggle()
toggle_renderer.props.activatable = True
toggle_renderer.props.xpad = style.DEFAULT_PADDING
toggle_renderer.props.indicator_size = style.zoom(26)
toggle_renderer.connect('toggled', self.__toggled_cb)
toggle_column = Gtk.TreeViewColumn()
toggle_column.pack_start(toggle_renderer, True)
toggle_column.add_attribute(toggle_renderer, 'active',
UpdateListModel.SELECTED)
self.append_column(toggle_column)
icon_renderer = CellRendererIcon(self)
icon_renderer.props.width = style.STANDARD_ICON_SIZE
icon_renderer.props.height = style.STANDARD_ICON_SIZE
icon_renderer.props.size = style.STANDARD_ICON_SIZE
icon_renderer.props.xpad = style.DEFAULT_PADDING
icon_renderer.props.ypad = style.DEFAULT_PADDING
icon_renderer.props.stroke_color = style.COLOR_TOOLBAR_GREY.get_svg()
icon_renderer.props.fill_color = style.COLOR_TRANSPARENT.get_svg()
icon_column = Gtk.TreeViewColumn()
icon_column.pack_start(icon_renderer, True)
icon_column.add_attribute(icon_renderer, 'file-name',
UpdateListModel.ICON_FILE_NAME)
self.append_column(icon_column)
text_renderer = Gtk.CellRendererText()
description_column = Gtk.TreeViewColumn()
description_column.pack_start(text_renderer, True)
description_column.add_attribute(text_renderer, 'markup',
UpdateListModel.DESCRIPTION)
self.append_column(description_column)
def __toggled_cb(self, cell_renderer, path):
row = self.props.model[path]
row[UpdateListModel.SELECTED] = not row[UpdateListModel.SELECTED]
class UpdateListModel(Gtk.ListStore):
BUNDLE_ID = 0
SELECTED = 1
ICON_FILE_NAME = 2
DESCRIPTION = 3
SIZE = 4
def __init__(self, updates):
Gtk.ListStore.__init__(self, str, bool, str, str, int)
registry = bundleregistry.get_registry()
for bundle_update in updates:
installed = registry.get_bundle(bundle_update.bundle_id)
row = [None] * 5
row[self.BUNDLE_ID] = bundle_update.bundle_id
row[self.SELECTED] = True
if installed:
row[self.ICON_FILE_NAME] = installed.get_icon()
else:
if bundle_update.icon_file_name is not None:
row[self.ICON_FILE_NAME] = bundle_update.icon_file_name
if installed:
details = _('From version %(current)s to %(new)s (Size: '
'%(size)s)')
details = details % \
{'current': installed.get_activity_version(),
'new': bundle_update.version,
'size': _format_size(bundle_update.size)}
else:
details = _('Version %(version)s (Size: %(size)s)')
details = details % \
{'version': bundle_update.version,
'size': _format_size(bundle_update.size)}
row[self.DESCRIPTION] = '<b>%s</b>\n%s' % \
(bundle_update.name, details)
row[self.SIZE] = bundle_update.size
self.append(row)
def _format_size(size):
"""Convert a given size in bytes to a nicer better readable unit"""
if size == 0:
# TRANS: download size is 0
return _('None')
elif size < 1024:
# TRANS: download size of very small updates
return _('1 KB')
elif size < 1024 * 1024:
# TRANS: download size of small updates, e.g. '250 KB'
return locale.format_string(_('%.0f KB'), size / 1024.0)
else:
# TRANS: download size of updates, e.g. '2.3 MB'
return locale.format_string(_('%.1f MB'), size / 1024.0 / 1024)
|
rparrapy/sugar
|
extensions/cpsection/updater/view.py
|
Python
|
gpl-2.0
| 16,181
| 0
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/loot/loot_schematic/shared_death_watch_mandalorian_belt_schematic.iff"
result.attribute_template_id = -1
result.stfName("craft_item_ingredients_n","armor_mandalorian_belt")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
obi-two/Rebelion
|
data/scripts/templates/object/tangible/loot/loot_schematic/shared_death_watch_mandalorian_belt_schematic.py
|
Python
|
mit
| 509
| 0.043222
|
#
# Authors:
# Pavel Brezina <pbrezina@redhat.com>
#
# Copyright (C) 2017 Red Hat
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from collections import OrderedDict
import xml.etree.ElementTree as etree
class Introspectable:
class Element(object):
""" This is a basic introspectable object. This class will make
sure that the given xml element is of correct type and provide
some helper functions to simplify work of the children.
Children objects must implement TagName attribute, which contains
the name of the expected xml tag.
All introspectable objects contain the following properties:
- name : str -- name of the object
- annotations : OrderedDict -- available annotations
"""
def __init__(self, element):
self.check(element, self.TagName)
self.element = element
self.name = element.attrib["name"]
self.annotations = self.find(SBus.Annotation)
def find(self, object_class):
return Introspectable.FindElements(self.element, object_class)
def check(self, element, tagname):
if element.tag != tagname:
raise ValueError('Unexpected tag name "%s" (%s expected)!'
% (element.tag, tagname))
if "name" not in element.attrib:
raise ValueError('Missing attribute name!')
def getAttr(self, name, default_value):
return self.element.attrib.get(name, default_value)
def getExistingAttr(self, name):
if name not in self.element.attrib:
raise ValueError('Element %s name="%s" is missing attribute %s'
% (self.TagName, self.name, name))
return self.element.attrib[name]
class Invokable(Element):
""" This is a base class for invokable objects -- methods and signals.
Invokable objects has available additional attributes:
- input OrderedDict -- input signature and arguments
- output : OrderedDict -- output signature and arguments
"""
def __init__(self, element):
super(Introspectable.Invokable, self).__init__(element)
self.key = self.getAttr("key", None)
self.arguments = self.find(SBus.Argument)
input = self.getInputArguments()
output = self.getOutputArguments()
self.input = SBus.Signature(input, self.annotations)
self.output = SBus.Signature(output, self.annotations)
return
def getInputArguments(self):
return self.getArguments("in")
def getOutputArguments(self):
return self.getArguments("out")
def getArguments(self, type):
args = OrderedDict()
for name, arg in self.arguments.items():
if type == "in" and arg.isInput():
args[name] = arg
continue
if type == "out" and arg.isOutput():
args[name] = arg
continue
return args
@staticmethod
def Introspect(path):
root = etree.parse(path).getroot()
return Introspectable.FindElements(root, SBus.Interface)
@staticmethod
def FindElements(parent, object_class):
dict = OrderedDict()
for child in parent:
if child.tag != object_class.TagName:
continue
object = object_class(child)
if object.name in dict:
raise ValueError('%s name="%s" is already present '
'in the same parent element\n'
% (object_class.TagName, object.name))
dict[object.name] = object
"""
Arguments can't be sorted and annotations order should be left on
the author of introspection. Otherwise we want to sort the dictionary
alphabetically based on keys.
"""
if object_class in [SBus.Argument, SBus.Annotation]:
return dict
return OrderedDict(sorted(dict.items()))
class SBus:
class Interface(Introspectable.Element):
TagName = "interface"
def __init__(self, element):
super(SBus.Interface, self).__init__(element)
self.methods = self.find(SBus.Method)
self.signals = self.find(SBus.Signal)
self.properties = self.find(SBus.Property)
return
class Method(Introspectable.Invokable):
TagName = "method"
def __init__(self, element):
super(SBus.Method, self).__init__(element)
class Signal(Introspectable.Invokable):
TagName = "signal"
def __init__(self, element):
super(SBus.Signal, self).__init__(element)
class Property(Introspectable.Invokable):
TagName = "property"
def __init__(self, element):
self.name = element.attrib["name"]
self.element = element
self.access = self.getExistingAttr("access")
self.type = self.getExistingAttr("type")
super(SBus.Property, self).__init__(element)
if self.key is not None:
raise ValueError('Keying is not supported on properties: %s '
% self.name)
def getInputArguments(self):
if not self.isWritable():
return {}
return {"value": SBus.Argument.Create("value", self.type, "in")}
def getOutputArguments(self):
if not self.isReadable():
return {}
return {"value": SBus.Argument.Create("value", self.type, "out")}
def isReadable(self):
return self.access == "read" or self.access == "readwrite"
def isWritable(self):
return self.access == "write" or self.access == "readwrite"
class Annotation(Introspectable.Element):
TagName = "annotation"
def __init__(self, element):
super(SBus.Annotation, self).__init__(element)
self.value = self.getAttr("value", None)
return
@staticmethod
def Find(annotations, name, default_value):
if name in annotations:
annotation = annotations[name]
if annotation.value is None:
return default_value
return annotation.value
return default_value
@staticmethod
def FindBool(annotations, name, Assume=False):
assume = "true" if Assume else "false"
value = SBus.Annotation.Find(annotations, name, assume)
if value.lower() == "true":
return True
else:
return False
@staticmethod
def CheckIfTrue(names, annotations):
for name in names:
if SBus.Annotation.FindBool(annotations, name, False):
return True
return False
@staticmethod
def CheckIfFalse(names, annotations):
for name in names:
if not SBus.Annotation.FindBool(annotations, name, True):
return False
return True
@staticmethod
def AtleastOneIsSet(names, annotations):
for name in names:
value = SBus.Annotation.Find(annotations, name, None)
if value is not None:
return True
return False
class Argument(Introspectable.Element):
TagName = "arg"
def __init__(self, element, Name=None, Type=None, Direction=None,
Key=None):
if element is None:
self.element = None
self.name = Name
self.signature = Type
self.direction = Direction
self.key = Key
return
super(SBus.Argument, self).__init__(element)
self.signature = self.getExistingAttr("type")
self.direction = self.getAttr("direction", "in")
self.key = self.getAttr("key", None)
def isInput(self):
return self.direction == "in"
def isOutput(self):
return not self.isInput()
@staticmethod
def Create(name, type, direction):
return SBus.Argument(element=None,
Name=name,
Type=type,
Direction=direction)
class Signature:
def __init__(self, args, annotations):
self.annotations = annotations
self.signature = self.getSignature(args)
self.arguments = args
def getSignature(self, args):
signature = ""
for arg in args.values():
signature += arg.signature
return signature
|
fidencio/sssd
|
src/sbus/codegen/sbus_Introspection.py
|
Python
|
gpl-3.0
| 9,617
| 0
|
from src.platform.jboss.interfaces import JMXInterface
class FPrint(JMXInterface):
def __init__(self):
super(FPrint, self).__init__()
self.version = "5.0"
|
GHubgenius/clusterd
|
src/platform/jboss/fingerprints/JBoss5JMX.py
|
Python
|
mit
| 182
| 0.005495
|
from django.conf.urls.defaults import *
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
(r'^profiles/', include('easy_profiles.urls')),
(r'^admin/', include(admin.site.urls)),
)
|
pydanny/django-easy-profiles
|
test_project/urls.py
|
Python
|
mit
| 223
| 0.004484
|
#-------------------------------------------------------------------------------
# Copyright (c) 2012 Gael Honorez.
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the GNU Public License v3.0
# which accompanies this distribution, and is available at
# http://www.gnu.org/licenses/gpl.html
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#-------------------------------------------------------------------------------
import os
import sys
import urllib2
import re
import shutil
from PyQt4 import QtCore, QtGui
from util import strtodate, datetostr, now, PREFSFILENAME
import util
import logging
from vault import luaparser
import warnings
import cStringIO
import zipfile
logger = logging.getLogger(__name__)
MODFOLDER = os.path.join(util.PERSONAL_DIR, "My Games", "Gas Powered Games", "Supreme Commander Forged Alliance", "Mods")
MODVAULT_DOWNLOAD_ROOT = "http://content.faforever.com/faf/vault/"
installedMods = [] # This is a global list that should be kept intact. So it should be cleared using installedMods[:] = []
class ModInfo(object):
def __init__(self, **kwargs):
self.name = "Not filled in"
self.version = 0
self.folder = ""
self.__dict__.update(kwargs)
def setFolder(self, localfolder):
self.localfolder = localfolder
self.absfolder = os.path.join(MODFOLDER, localfolder)
self.mod_info = os.path.join(self.absfolder, "mod_info.lua")
def update(self):
self.setFolder(self.localfolder)
if isinstance(self.version, int):
self.totalname = "%s v%d" % (self.name, self.version)
elif isinstance(self.version, float):
s = str(self.version).rstrip("0")
self.totalname = "%s v%s" % (self.name, s)
else:
raise TypeError, "version is not an int or float"
def to_dict(self):
out = {}
for k,v in self.__dict__.items():
if isinstance(v, (unicode, str, int, float)) and not k[0] == '_':
out[k] = v
return out
def __str__(self):
return '%s in "%s"' % (self.totalname, self.localfolder)
def getAllModFolders(): #returns a list of names of installed mods
mods = []
if os.path.isdir(MODFOLDER):
mods = os.listdir(MODFOLDER)
return mods
def getInstalledMods():
installedMods[:] = []
for f in getAllModFolders():
m = None
if os.path.isdir(os.path.join(MODFOLDER,f)):
try:
m = getModInfoFromFolder(f)
except:
continue
else:
try:
m = getModInfoFromZip(f)
except:
continue
if m:
installedMods.append(m)
logger.debug("getting installed mods. Count: %d" % len(installedMods))
return installedMods
def modToFilename(mod):
return mod.absfolder
def isModFolderValid(folder):
return os.path.exists(os.path.join(folder,"mod_info.lua"))
def iconPathToFull(path):
"""
Converts a path supplied in the icon field of mod_info with an absolute path to that file.
So "/mods/modname/data/icons/icon.dds" becomes
"C:\Users\user\Documents\My Games\Gas Powered Games\Supreme Commander Forged Alliance\Mods\modname\data\icons\icon.dds"
"""
if not (path.startswith("/mods") or path.startswith("mods")):
logger.info("Something went wrong parsing the path %s" % path)
return ""
return os.path.join(MODFOLDER, os.path.normpath(path[5+int(path[0]=="/"):])) #yay for dirty hacks
def fullPathToIcon(path):
p = os.path.normpath(os.path.abspath(path))
return p[len(MODFOLDER)-5:].replace('\\','/')
def getIcon(name):
img = os.path.join(util.CACHE_DIR, name)
if os.path.isfile(img):
logger.debug("Using cached preview image for: " + name)
return img
return None
def getModInfo(modinfofile):
modinfo = modinfofile.parse({"name":"name","uid":"uid","version":"version","author":"author",
"description":"description","ui_only":"ui_only",
"icon":"icon"},
{"version":"1","ui_only":"false","description":"","icon":"","author":""})
modinfo["ui_only"] = (modinfo["ui_only"] == 'true')
if not "uid" in modinfo:
logger.warn("Couldn't find uid for mod %s" % modinfo["name"])
return None
#modinfo["uid"] = modinfo["uid"].lower()
try:
modinfo["version"] = int(modinfo["version"])
except:
try:
modinfo["version"] = float(modinfo["version"])
except:
modinfo["version"] = 0
logger.warn("Couldn't find version for mod %s" % modinfo["name"])
return (modinfofile, modinfo)
def parseModInfo(folder):
if not isModFolderValid(folder):
return None
modinfofile = luaparser.luaParser(os.path.join(folder,"mod_info.lua"))
return getModInfo(modinfofile)
modCache = {}
def getModInfoFromZip(zfile):
'''get the mod info from a zip file'''
if zfile in modCache:
return modCache[zfile]
r = None
if zipfile.is_zipfile(os.path.join(MODFOLDER,zfile)) :
zip = zipfile.ZipFile(os.path.join(MODFOLDER,zfile), "r", zipfile.ZIP_DEFLATED)
if zip.testzip() == None :
for member in zip.namelist() :
filename = os.path.basename(member)
if not filename:
continue
if filename == "mod_info.lua":
modinfofile = luaparser.luaParser("mod_info.lua")
modinfofile.iszip = True
modinfofile.zip = zip
r = getModInfo(modinfofile)
if r == None:
logger.debug("mod_info.lua not found in zip file %s" % zfile)
return None
f, info = r
if f.error:
logger.debug("Error in parsing mod_info.lua in %s" % zfile)
return None
m = ModInfo(**info)
print zfile
m.setFolder(zfile)
m.update()
modCache[zfile] = m
return m
def getModInfoFromFolder(modfolder): # modfolder must be local to MODFOLDER
if modfolder in modCache:
return modCache[modfolder]
r = parseModInfo(os.path.join(MODFOLDER,modfolder))
if r == None:
logger.debug("mod_info.lua not found in %s folder" % modfolder)
return None
f, info = r
if f.error:
logger.debug("Error in parsing %s/mod_info.lua" % modfolder)
return None
m = ModInfo(**info)
m.setFolder(modfolder)
m.update()
modCache[modfolder] = m
return m
def getActiveMods(uimods=None): # returns a list of ModInfo's containing information of the mods
"""uimods:
None - return all active mods
True - only return active UI Mods
False - only return active non-UI Mods
"""
active_mods = []
try:
if not os.path.exists(PREFSFILENAME):
logger.info("No game.prefs file found")
return []
l = luaparser.luaParser(PREFSFILENAME)
l.loweringKeys = False
modlist = l.parse({"active_mods":"active_mods"},{"active_mods":{}})["active_mods"]
if l.error:
logger.info("Error in reading the game.prefs file")
return []
uids = [uid for uid,b in modlist.items() if b == 'true']
#logger.debug("Active mods detected: %s" % str(uids))
allmods = []
for m in installedMods:
if ((uimods == True and m.ui_only) or (uimods == False and not m.ui_only) or uimods == None):
allmods.append(m)
active_mods = [m for m in allmods if m.uid in uids]
#logger.debug("Allmods uids: %s\n\nActive mods uids: %s\n" % (", ".join([mod.uid for mod in allmods]), ", ".join([mod.uid for mod in allmods])))
return active_mods
except:
return []
def setActiveMods(mods, keepuimods=True): #uimods works the same as in getActiveMods
"""
keepuimods:
None: Replace all active mods with 'mods'
True: Keep the UI mods already activated activated
False: Keep only the non-UI mods that were activated activated
So set it True if you want to set gameplay mods, and False if you want to set UI mods.
"""
if keepuimods != None:
keepTheseMods = getActiveMods(keepuimods) # returns the active UI mods if True, the active non-ui mods if False
else:
keepTheseMods = []
allmods = keepTheseMods + mods
s = "active_mods = {\n"
for mod in allmods:
s += "['%s'] = true,\n" % str(mod.uid)
s += "}"
try:
f = open(PREFSFILENAME, 'r')
data = f.read()
except:
logger.info("Couldn't read the game.prefs file")
return False
else:
f.close()
if re.search("active_mods\s*=\s*{.*?}", data, re.S):
data = re.sub("active_mods\s*=\s*{.*?}",s,data,1,re.S)
else:
data += "\n" + s
try:
f = open(PREFSFILENAME, 'w')
f.write(data)
except:
logger.info("Cound't write to the game.prefs file")
return False
else:
f.close()
return True
def updateModInfo(mod, info): #should probably not be used.
"""
Updates a mod_info.lua file with new data.
Because those files can be random lua this function can fail if the file is complicated enough
If every value however is on a seperate line, this should work.
"""
logger.warn("updateModInfo called. Probably not a good idea")
fname = mod.mod_info
try:
f = open(fname, 'r')
data = f.read()
except:
logger.info("Something went wrong reading %s" % fname)
return False
else:
f.close()
for k,v in info.items():
if type(v) in (bool,int): val = str(v).lower()
if type(v) in (unicode, str): val = '"' + v.replace('"', '\\"') + '"'
if re.search(r'^\s*'+k, data , re.M):
data = re.sub(r'^\s*' + k + r'\s*=.*$',"%s = %s" % (k,val), data, 1, re.M)
else:
if data[-1] != '\n': data += '\n'
data += "%s = %s" % (k, val)
try:
f = open(fname, 'w')
f.write(data)
except:
logger.info("Something went wrong writing to %s" % fname)
return False
else:
f.close()
return True
def generateThumbnail(sourcename, destname):
"""Given a dds file, generates a png file (or whatever the extension of dest is"""
logger.debug("Creating png thumnail for %s to %s" % (sourcename, destname))
try:
img = bytearray()
buf = bytearray(16)
file = open(sourcename,"rb")
file.seek(128) # skip header
while file.readinto(buf):
img += buf[:3] + buf[4:7] + buf[8:11] + buf[12:15]
file.close()
size = int((len(img)/3) ** (1.0/2))
imageFile = QtGui.QImage(img,size,size,QtGui.QImage.Format_RGB888).rgbSwapped().scaled(100,100,transformMode = QtCore.Qt.SmoothTransformation)
imageFile.save(destname)
except IOError:
return False
if os.path.isfile(destname):
return True
else:
return False
def downloadMod(item): #most of this function is stolen from fa.maps.downloadMap
if isinstance(item,basestring):
link = MODVAULT_DOWNLOAD_ROOT + urllib2.quote(item)
logger.debug("Getting mod from: " + link)
else:
link = item.link
logger.debug("Getting mod from: " + link)
link = urllib2.quote(link, "http://")
progress = QtGui.QProgressDialog()
progress.setCancelButtonText("Cancel")
progress.setWindowFlags(QtCore.Qt.CustomizeWindowHint | QtCore.Qt.WindowTitleHint)
progress.setAutoClose(False)
progress.setAutoReset(False)
try:
req = urllib2.Request(link, headers={'User-Agent' : "FAF Client"})
zipwebfile = urllib2.urlopen(req)
meta = zipwebfile.info()
file_size = int(meta.getheaders("Content-Length")[0])
progress.setMinimum(0)
progress.setMaximum(file_size)
progress.setModal(1)
progress.setWindowTitle("Downloading Mod")
progress.setLabelText(link)
progress.show()
#Download the file as a series of 8 KiB chunks, then uncompress it.
output = cStringIO.StringIO()
file_size_dl = 0
block_sz = 8192
while progress.isVisible():
read_buffer = zipwebfile.read(block_sz)
if not read_buffer:
break
file_size_dl += len(read_buffer)
output.write(read_buffer)
progress.setValue(file_size_dl)
progress.close()
if file_size_dl == file_size:
zfile = zipfile.ZipFile(output)
dirname = zfile.namelist()[0].split('/',1)[0]
if os.path.exists(os.path.join(MODFOLDER, dirname)):
oldmod = getModInfoFromFolder(dirname)
result = QtGui.QMessageBox.question(None, "Modfolder already exists",
"The mod is to be downloaded to the folder '%s'. This folder already exists and contains <b>%s</b>. Do you want to overwrite this mod?" % (dirname,oldmod.totalname), QtGui.QMessageBox.Yes, QtGui.QMessageBox.No)
if result == QtGui.QMessageBox.No:
return False
removeMod(oldmod)
zfile.extractall(MODFOLDER)
logger.debug("Successfully downloaded and extracted mod from: " + link)
return True
else:
logger.warn("Mod download cancelled for: " + link)
return False
except:
logger.warn("Mod download or extraction failed for: " + link)
if sys.exc_type is urllib2.HTTPError:
logger.warning("ModVault download failed with HTTPError, mod probably not in vault (or broken).")
QtGui.QMessageBox.information(None, "Mod not downloadable", "<b>This mod was not found in the vault (or is broken).</b><br/>You need to get it from somewhere else in order to use it." )
else:
logger.error("Download Exception", exc_info=sys.exc_info())
QtGui.QMessageBox.information(None, "Mod installation failed", "<b>This mod could not be installed (please report this map or bug).</b>")
return False
return True
def removeMod(mod):
logger.debug("removing mod %s" % mod.name)
real = None
for m in getInstalledMods():
if m.uid == mod.uid:
real = m
break
else:
logger.debug("Can't remove mod. Mod not found.")
return False
shutil.rmtree(real.absfolder)
if real.localfolder in modCache:
del modCache[real.localfolder]
installedMods.remove(real)
return True
#we don't update the installed mods, because the operating system takes
#some time registering the deleted folder.
|
HaraldWeber/client
|
src/modvault/utils.py
|
Python
|
gpl-3.0
| 15,457
| 0.012292
|
"""
series.py
:copyright: (c) 2014-2015 by Onni Software Ltd.
:license: New BSD License, see LICENSE for more details
This shows how to use **SeriesReader** to get the data in various ways
But you can use them with **Reader** class as well
"""
import os
from pyexcel.ext import ods3
from pyexcel import SeriesReader
from pyexcel.utils import to_dict, to_array
from pyexcel.filters import OddRowFilter, EvenColumnFilter
from pyexcel import Writer
import json
def main(base_dir):
# print all in json
#
# Column 1 Column 2 Column 3
# 1 4 7
# 2 5 8
# 3 6 9
reader = SeriesReader(os.path.join(base_dir,"example_series.ods"))
data = to_dict(reader)
print(json.dumps(data))
# output:
# {"Column 2": [4.0, 5.0, 6.0], "Column 3": [7.0, 8.0, 9.0], "Column 1": [1.0, 2.0, 3.0]}
# get the column headers
print(reader.colnames)
# [u'Column 1', u'Column 2', u'Column 3']
# get the content in one dimensional array
data = to_array(reader.enumerate())
print(data)
# [1.0, 4.0, 7.0, 2.0, 5.0, 8.0, 3.0, 6.0, 9.0]
# get the content in one dimensional array
# in reverse order
data = to_array(reader.reverse())
print(data)
# get the content in one dimensional array
# but iterate it vertically
data = to_array(reader.vertical())
print(data)
# [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]
# get the content in one dimensional array
# but iterate it vertically in revserse
# order
data = to_array(reader.rvertical())
print(data)
#[9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0]
# get a two dimensional array
data = to_array(reader.rows())
print(data)
#[[1.0, 4.0, 7.0], [2.0, 5.0, 8.0], [3.0, 6.0, 9.0]]
# get a two dimensional array in reverse
# order
data = to_array(reader.rrows())
print(data)
# [[3.0, 6.0, 9.0], [2.0, 5.0, 8.0], [1.0, 4.0, 7.0]]
# get a two dimensional array but stack columns
data = to_array(reader.columns())
print(data)
# [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]
# get a two dimensional array but stack columns
# in reverse order
data = to_array(reader.rcolumns())
print(data)
#[[7.0, 8.0, 9.0], [4.0, 5.0, 6.0], [1.0, 2.0, 3.0]]
# filter out odd rows and even columns
reader.filter(OddRowFilter())
reader.filter(EvenColumnFilter())
data = to_dict(reader)
print(data)
# {u'Column 3': [8.0], u'Column 1': [2.0]}
# and you can write the filtered results
# into a file
w = Writer("example_series_filter.xls")
w.write_reader(reader)
w.close()
if __name__ == '__main__':
main(os.getcwd())
|
lordakshaya/pyexcel
|
examples/example_usage_of_internal_apis/simple_usage/series.py
|
Python
|
bsd-3-clause
| 2,753
| 0.006902
|
#!/usr/bin/env python
print("Hello world!")
|
geography-munich/sciprog
|
material/sub/jrjohansson/scripts/hello-world.py
|
Python
|
apache-2.0
| 45
| 0
|
# Copyright 2014-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from wlauto import LinuxDevice, Parameter
class OdroidXU3LinuxDevice(LinuxDevice):
name = "odroidxu3_linux"
description = 'HardKernel Odroid XU3 development board (Ubuntu image).'
core_modules = [
'odroidxu3-fan',
]
parameters = [
Parameter('core_names', default=['a7', 'a7', 'a7', 'a7', 'a15', 'a15', 'a15', 'a15'], override=True),
Parameter('core_clusters', default=[0, 0, 0, 0, 1, 1, 1, 1], override=True),
]
abi = 'armeabi'
|
ep1cman/workload-automation
|
wlauto/devices/linux/odroidxu3_linux/__init__.py
|
Python
|
apache-2.0
| 1,073
| 0.001864
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('customers', '0009_recipient_type'),
]
operations = [
migrations.AlterModelOptions(
name='recipient',
options={'ordering': ['last_name'], 'verbose_name_plural': 'Recipients'},
),
]
|
davogler/POSTv3
|
customers/migrations/0010_auto_20170124_2322.py
|
Python
|
mit
| 412
| 0.002427
|
'''
Given: A positive integer N≤100000, a number x between 0 and 1, and a DNA string s of length at most 10 bp.
Return: The probability that if N random DNA strings having the same length as s are constructed with GC-content x
(see “Introduction to Random Strings”), then at least one of the strings equals s.
We allow for the same random string to be created more than once.
'''
# P(at least one of the strings equals to s) = 1 - P(none of the strings equals s)
def random_motif_match(N, x, s):
s_construct = {"A": (1 - x) / 2,
"T": (1 - x) / 2,
"C": x / 2,
"G": x / 2}
prob = 1
# probability of exactly equals to s
for b in s:
prob *= s_construct[b]
return 1 - (1 - prob) ** N
if __name__ == "__main__":
with open("data/rosalind_rstr.txt", "r") as f:
lines = f.readlines()
N = int(lines[0].rstrip().split(" ")[0])
x = float(lines[0].rstrip().split(" ")[1])
s = lines[1].rstrip()
with open("data/output_rstr.txt", "w") as o:
o.write(str(random_motif_match(N, x, s)))
print(random_motif_match(N, x, s))
|
jr55662003/My_Rosalind_solution
|
RSTR.py
|
Python
|
gpl-3.0
| 1,176
| 0.008547
|
#! /usr/bin/env python
"""Show file statistics by extension."""
import os
import sys
class Stats:
def __init__(self):
self.stats = {}
def statargs(self, args):
for arg in args:
if os.path.isdir(arg):
self.statdir(arg)
elif os.path.isfile(arg):
self.statfile(arg)
else:
sys.stderr.write("Can't find %s\n" % file)
self.addstats("<???>", "unknown", 1)
def statdir(self, dir):
self.addstats("<dir>", "dirs", 1)
try:
names = os.listdir(dir)
except os.error, err:
sys.stderr.write("Can't list %s: %s\n" % (file, err))
self.addstats(ext, "unlistable", 1)
return
names.sort()
for name in names:
if name.startswith(".#"):
continue # Skip CVS temp files
if name.endswith("~"):
continue# Skip Emacs backup files
full = os.path.join(dir, name)
if os.path.islink(full):
self.addstats("<lnk>", "links", 1)
elif os.path.isdir(full):
self.statdir(full)
else:
self.statfile(full)
def statfile(self, file):
head, ext = os.path.splitext(file)
head, base = os.path.split(file)
if ext == base:
ext = "" # E.g. .cvsignore is deemed not to have an extension
ext = os.path.normcase(ext)
if not ext:
ext = "<none>"
self.addstats(ext, "files", 1)
try:
f = open(file, "rb")
except IOError, err:
sys.stderr.write("Can't open %s: %s\n" % (file, err))
self.addstats(ext, "unopenable", 1)
return
data = f.read()
f.close()
self.addstats(ext, "bytes", len(data))
if '\0' in data:
self.addstats(ext, "binary", 1)
return
if not data:
self.addstats(ext, "empty", 1)
#self.addstats(ext, "chars", len(data))
lines = data.splitlines()
self.addstats(ext, "lines", len(lines))
del lines
words = data.split()
self.addstats(ext, "words", len(words))
def addstats(self, ext, key, n):
d = self.stats.setdefault(ext, {})
d[key] = d.get(key, 0) + n
def report(self):
exts = self.stats.keys()
exts.sort()
# Get the column keys
columns = {}
for ext in exts:
columns.update(self.stats[ext])
cols = columns.keys()
cols.sort()
colwidth = {}
colwidth["ext"] = max([len(ext) for ext in exts])
minwidth = 6
self.stats["TOTAL"] = {}
for col in cols:
total = 0
cw = max(minwidth, len(col))
for ext in exts:
value = self.stats[ext].get(col)
if value is None:
w = 0
else:
w = len("%d" % value)
total += value
cw = max(cw, w)
cw = max(cw, len(str(total)))
colwidth[col] = cw
self.stats["TOTAL"][col] = total
exts.append("TOTAL")
for ext in exts:
self.stats[ext]["ext"] = ext
cols.insert(0, "ext")
def printheader():
for col in cols:
print "%*s" % (colwidth[col], col),
print
printheader()
for ext in exts:
for col in cols:
value = self.stats[ext].get(col, "")
print "%*s" % (colwidth[col], value),
print
printheader() # Another header at the bottom
def main():
args = sys.argv[1:]
if not args:
args = [os.curdir]
s = Stats()
s.statargs(args)
s.report()
if __name__ == "__main__":
main()
|
OS2World/APP-INTERNET-torpak_2
|
Tools/scripts/byext.py
|
Python
|
mit
| 3,894
| 0.002311
|
# Copyright (c) 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
import re
from neutron.common import constants
from neutron.common import exceptions as n_exc
from neutron.openstack.common import log as logging
from neutron.openstack.common import uuidutils
LOG = logging.getLogger(__name__)
ATTR_NOT_SPECIFIED = object()
# Defining a constant to avoid repeating string literal in several modules
SHARED = 'shared'
# Used by range check to indicate no limit for a bound.
UNLIMITED = None
def _verify_dict_keys(expected_keys, target_dict, strict=True):
"""Allows to verify keys in a dictionary.
:param expected_keys: A list of keys expected to be present.
:param target_dict: The dictionary which should be verified.
:param strict: Specifies whether additional keys are allowed to be present.
:return: True, if keys in the dictionary correspond to the specification.
"""
if not isinstance(target_dict, dict):
msg = (_("Invalid input. '%(target_dict)s' must be a dictionary "
"with keys: %(expected_keys)s") %
{'target_dict': target_dict, 'expected_keys': expected_keys})
return msg
expected_keys = set(expected_keys)
provided_keys = set(target_dict.keys())
predicate = expected_keys.__eq__ if strict else expected_keys.issubset
if not predicate(provided_keys):
msg = (_("Validation of dictionary's keys failed."
"Expected keys: %(expected_keys)s "
"Provided keys: %(provided_keys)s") %
{'expected_keys': expected_keys,
'provided_keys': provided_keys})
return msg
def is_attr_set(attribute):
return not (attribute is None or attribute is ATTR_NOT_SPECIFIED)
def _validate_values(data, valid_values=None):
if data not in valid_values:
msg = (_("'%(data)s' is not in %(valid_values)s") %
{'data': data, 'valid_values': valid_values})
LOG.debug(msg)
return msg
def _validate_not_empty_string_or_none(data, max_len=None):
if data is not None:
return _validate_not_empty_string(data, max_len=max_len)
def _validate_not_empty_string(data, max_len=None):
msg = _validate_string(data, max_len=max_len)
if msg:
return msg
if not data.strip():
return _("'%s' Blank strings are not permitted") % data
def _validate_string_or_none(data, max_len=None):
if data is not None:
return _validate_string(data, max_len=max_len)
def _validate_string(data, max_len=None):
if not isinstance(data, basestring):
msg = _("'%s' is not a valid string") % data
LOG.debug(msg)
return msg
if max_len is not None and len(data) > max_len:
msg = (_("'%(data)s' exceeds maximum length of %(max_len)s") %
{'data': data, 'max_len': max_len})
LOG.debug(msg)
return msg
def _validate_boolean(data, valid_values=None):
try:
convert_to_boolean(data)
except n_exc.InvalidInput:
msg = _("'%s' is not a valid boolean value") % data
LOG.debug(msg)
return msg
def _validate_range(data, valid_values=None):
"""Check that integer value is within a range provided.
Test is inclusive. Allows either limit to be ignored, to allow
checking ranges where only the lower or upper limit matter.
It is expected that the limits provided are valid integers or
the value None.
"""
min_value = valid_values[0]
max_value = valid_values[1]
try:
data = int(data)
except (ValueError, TypeError):
msg = _("'%s' is not an integer") % data
LOG.debug(msg)
return msg
if min_value is not UNLIMITED and data < min_value:
msg = _("'%(data)s' is too small - must be at least "
"'%(limit)d'") % {'data': data, 'limit': min_value}
LOG.debug(msg)
return msg
if max_value is not UNLIMITED and data > max_value:
msg = _("'%(data)s' is too large - must be no larger than "
"'%(limit)d'") % {'data': data, 'limit': max_value}
LOG.debug(msg)
return msg
def _validate_no_whitespace(data):
"""Validates that input has no whitespace."""
if re.search('\s', data):
msg = _("'%s' contains whitespace") % data
LOG.debug(msg)
raise n_exc.InvalidInput(error_message=msg)
return data
def _validate_mac_address(data, valid_values=None):
try:
valid_mac = netaddr.valid_mac(_validate_no_whitespace(data))
except Exception:
valid_mac = False
# TODO(arosen): The code in this file should be refactored
# so it catches the correct exceptions. _validate_no_whitespace
# raises AttributeError if data is None.
if not valid_mac:
msg = _("'%s' is not a valid MAC address") % data
LOG.debug(msg)
return msg
def _validate_mac_address_or_none(data, valid_values=None):
if data is None:
return
return _validate_mac_address(data, valid_values)
def _validate_ip_address(data, valid_values=None):
try:
netaddr.IPAddress(_validate_no_whitespace(data))
except Exception:
msg = _("'%s' is not a valid IP address") % data
LOG.debug(msg)
return msg
def _validate_ip_pools(data, valid_values=None):
"""Validate that start and end IP addresses are present.
In addition to this the IP addresses will also be validated
"""
if not isinstance(data, list):
msg = _("Invalid data format for IP pool: '%s'") % data
LOG.debug(msg)
return msg
expected_keys = ['start', 'end']
for ip_pool in data:
msg = _verify_dict_keys(expected_keys, ip_pool)
if msg:
LOG.debug(msg)
return msg
for k in expected_keys:
msg = _validate_ip_address(ip_pool[k])
if msg:
LOG.debug(msg)
return msg
def _validate_fixed_ips(data, valid_values=None):
if not isinstance(data, list):
msg = _("Invalid data format for fixed IP: '%s'") % data
LOG.debug(msg)
return msg
ips = []
for fixed_ip in data:
if not isinstance(fixed_ip, dict):
msg = _("Invalid data format for fixed IP: '%s'") % fixed_ip
LOG.debug(msg)
return msg
if 'ip_address' in fixed_ip:
# Ensure that duplicate entries are not set - just checking IP
# suffices. Duplicate subnet_id's are legitimate.
fixed_ip_address = fixed_ip['ip_address']
if fixed_ip_address in ips:
msg = _("Duplicate IP address '%s'") % fixed_ip_address
else:
msg = _validate_ip_address(fixed_ip_address)
if msg:
LOG.debug(msg)
return msg
ips.append(fixed_ip_address)
if 'subnet_id' in fixed_ip:
msg = _validate_uuid(fixed_ip['subnet_id'])
if msg:
LOG.debug(msg)
return msg
def _validate_ip_or_hostname(host):
ip_err = _validate_ip_address(host)
if not ip_err:
return
name_err = _validate_hostname(host)
if not name_err:
return
msg = _("%(host)s is not a valid IP or hostname. Details: "
"%(ip_err)s, %(name_err)s") % {'ip_err': ip_err, 'host': host,
'name_err': name_err}
return msg
def _validate_nameservers(data, valid_values=None):
if not hasattr(data, '__iter__'):
msg = _("Invalid data format for nameserver: '%s'") % data
LOG.debug(msg)
return msg
hosts = []
for host in data:
# This may be an IP or a hostname
msg = _validate_ip_or_hostname(host)
if msg:
msg = _("'%(host)s' is not a valid nameserver. %(msg)s") % {
'host': host, 'msg': msg}
return msg
if host in hosts:
msg = _("Duplicate nameserver '%s'") % host
LOG.debug(msg)
return msg
hosts.append(host)
def _validate_hostroutes(data, valid_values=None):
if not isinstance(data, list):
msg = _("Invalid data format for hostroute: '%s'") % data
LOG.debug(msg)
return msg
expected_keys = ['destination', 'nexthop']
hostroutes = []
for hostroute in data:
msg = _verify_dict_keys(expected_keys, hostroute)
if msg:
LOG.debug(msg)
return msg
msg = _validate_subnet(hostroute['destination'])
if msg:
LOG.debug(msg)
return msg
msg = _validate_ip_address(hostroute['nexthop'])
if msg:
LOG.debug(msg)
return msg
if hostroute in hostroutes:
msg = _("Duplicate hostroute '%s'") % hostroute
LOG.debug(msg)
return msg
hostroutes.append(hostroute)
def _validate_ip_address_or_none(data, valid_values=None):
if data is None:
return None
return _validate_ip_address(data, valid_values)
def _validate_subnet(data, valid_values=None):
msg = None
try:
net = netaddr.IPNetwork(_validate_no_whitespace(data))
if '/' not in data:
msg = _("'%(data)s' isn't a recognized IP subnet cidr,"
" '%(cidr)s' is recommended") % {"data": data,
"cidr": net.cidr}
else:
return
except Exception:
msg = _("'%s' is not a valid IP subnet") % data
if msg:
LOG.debug(msg)
return msg
def _validate_subnet_list(data, valid_values=None):
if not isinstance(data, list):
msg = _("'%s' is not a list") % data
LOG.debug(msg)
return msg
if len(set(data)) != len(data):
msg = _("Duplicate items in the list: '%s'") % ', '.join(data)
LOG.debug(msg)
return msg
for item in data:
msg = _validate_subnet(item)
if msg:
return msg
def _validate_subnet_or_none(data, valid_values=None):
if data is None:
return
return _validate_subnet(data, valid_values)
def _validate_hostname(data):
# NOTE: An individual name regex instead of an entire FQDN was used
# because its easier to make correct. Feel free to replace with a
# full regex solution. The logic should validate that the hostname
# matches RFC 1123 (section 2.1) and RFC 952.
hostname_pattern = "[a-zA-Z0-9-]{1,63}$"
try:
# Trailing periods are allowed to indicate that a name is fully
# qualified per RFC 1034 (page 7).
trimmed = data if data[-1] != '.' else data[:-1]
if len(trimmed) > 255:
raise TypeError(
_("'%s' exceeds the 255 character hostname limit") % trimmed)
names = trimmed.split('.')
for name in names:
if not name:
raise TypeError(_("Encountered an empty component."))
if name[-1] == '-' or name[0] == '-':
raise TypeError(
_("Name '%s' must not start or end with a hyphen.") % name)
if not re.match(hostname_pattern, name):
raise TypeError(
_("Name '%s' must be 1-63 characters long, each of "
"which can only be alphanumeric or a hyphen.") % name)
# RFC 1123 hints that a TLD can't be all numeric. last is a TLD if
# it's an FQDN.
if len(names) > 1 and re.match("^[0-9]+$", names[-1]):
raise TypeError(_("TLD '%s' must not be all numeric") % names[-1])
except TypeError as e:
msg = _("'%(data)s' is not a valid hostname. Reason: %(reason)s") % {
'data': data, 'reason': e.message}
LOG.debug(msg)
return msg
def _validate_regex(data, valid_values=None):
try:
if re.match(valid_values, data):
return
except TypeError:
pass
msg = _("'%s' is not a valid input") % data
LOG.debug(msg)
return msg
def _validate_regex_or_none(data, valid_values=None):
if data is None:
return
return _validate_regex(data, valid_values)
def _validate_uuid(data, valid_values=None):
if not uuidutils.is_uuid_like(data):
msg = _("'%s' is not a valid UUID") % data
LOG.debug(msg)
return msg
def _validate_uuid_or_none(data, valid_values=None):
if data is not None:
return _validate_uuid(data)
def _validate_uuid_list(data, valid_values=None):
if not isinstance(data, list):
msg = _("'%s' is not a list") % data
LOG.debug(msg)
return msg
for item in data:
msg = _validate_uuid(item)
if msg:
LOG.debug(msg)
return msg
if len(set(data)) != len(data):
msg = _("Duplicate items in the list: '%s'") % ', '.join(data)
LOG.debug(msg)
return msg
def _validate_dict_item(key, key_validator, data):
# Find conversion function, if any, and apply it
conv_func = key_validator.get('convert_to')
if conv_func:
data[key] = conv_func(data.get(key))
# Find validator function
# TODO(salv-orlando): Structure of dict attributes should be improved
# to avoid iterating over items
val_func = val_params = None
for (k, v) in key_validator.iteritems():
if k.startswith('type:'):
# ask forgiveness, not permission
try:
val_func = validators[k]
except KeyError:
return _("Validator '%s' does not exist.") % k
val_params = v
break
# Process validation
if val_func:
return val_func(data.get(key), val_params)
def _validate_dict(data, key_specs=None):
if not isinstance(data, dict):
msg = _("'%s' is not a dictionary") % data
LOG.debug(msg)
return msg
# Do not perform any further validation, if no constraints are supplied
if not key_specs:
return
# Check whether all required keys are present
required_keys = [key for key, spec in key_specs.iteritems()
if spec.get('required')]
if required_keys:
msg = _verify_dict_keys(required_keys, data, False)
if msg:
LOG.debug(msg)
return msg
# Perform validation and conversion of all values
# according to the specifications.
for key, key_validator in [(k, v) for k, v in key_specs.iteritems()
if k in data]:
msg = _validate_dict_item(key, key_validator, data)
if msg:
LOG.debug(msg)
return msg
def _validate_dict_or_none(data, key_specs=None):
if data is not None:
return _validate_dict(data, key_specs)
def _validate_dict_or_empty(data, key_specs=None):
if data != {}:
return _validate_dict(data, key_specs)
def _validate_dict_or_nodata(data, key_specs=None):
if data:
return _validate_dict(data, key_specs)
def _validate_non_negative(data, valid_values=None):
try:
data = int(data)
except (ValueError, TypeError):
msg = _("'%s' is not an integer") % data
LOG.debug(msg)
return msg
if data < 0:
msg = _("'%s' should be non-negative") % data
LOG.debug(msg)
return msg
def convert_to_boolean(data):
if isinstance(data, basestring):
val = data.lower()
if val == "true" or val == "1":
return True
if val == "false" or val == "0":
return False
elif isinstance(data, bool):
return data
elif isinstance(data, int):
if data == 0:
return False
elif data == 1:
return True
msg = _("'%s' cannot be converted to boolean") % data
raise n_exc.InvalidInput(error_message=msg)
def convert_to_boolean_if_not_none(data):
if data is not None:
return convert_to_boolean(data)
def convert_to_int(data):
try:
return int(data)
except (ValueError, TypeError):
msg = _("'%s' is not a integer") % data
raise n_exc.InvalidInput(error_message=msg)
def convert_kvp_str_to_list(data):
"""Convert a value of the form 'key=value' to ['key', 'value'].
:raises: n_exc.InvalidInput if any of the strings are malformed
(e.g. do not contain a key).
"""
kvp = [x.strip() for x in data.split('=', 1)]
if len(kvp) == 2 and kvp[0]:
return kvp
msg = _("'%s' is not of the form <key>=[value]") % data
raise n_exc.InvalidInput(error_message=msg)
def convert_kvp_list_to_dict(kvp_list):
"""Convert a list of 'key=value' strings to a dict.
:raises: n_exc.InvalidInput if any of the strings are malformed
(e.g. do not contain a key) or if any
of the keys appear more than once.
"""
if kvp_list == ['True']:
# No values were provided (i.e. '--flag-name')
return {}
kvp_map = {}
for kvp_str in kvp_list:
key, value = convert_kvp_str_to_list(kvp_str)
kvp_map.setdefault(key, set())
kvp_map[key].add(value)
return dict((x, list(y)) for x, y in kvp_map.iteritems())
def convert_none_to_empty_list(value):
return [] if value is None else value
def convert_none_to_empty_dict(value):
return {} if value is None else value
def convert_to_list(data):
if data is None:
return []
elif hasattr(data, '__iter__'):
return list(data)
else:
return [data]
HEX_ELEM = '[0-9A-Fa-f]'
UUID_PATTERN = '-'.join([HEX_ELEM + '{8}', HEX_ELEM + '{4}',
HEX_ELEM + '{4}', HEX_ELEM + '{4}',
HEX_ELEM + '{12}'])
# Note: In order to ensure that the MAC address is unicast the first byte
# must be even.
MAC_PATTERN = "^%s[aceACE02468](:%s{2}){5}$" % (HEX_ELEM, HEX_ELEM)
# Dictionary that maintains a list of validation functions
validators = {'type:dict': _validate_dict,
'type:dict_or_none': _validate_dict_or_none,
'type:dict_or_empty': _validate_dict_or_empty,
'type:dict_or_nodata': _validate_dict_or_nodata,
'type:fixed_ips': _validate_fixed_ips,
'type:hostroutes': _validate_hostroutes,
'type:ip_address': _validate_ip_address,
'type:ip_address_or_none': _validate_ip_address_or_none,
'type:ip_pools': _validate_ip_pools,
'type:mac_address': _validate_mac_address,
'type:mac_address_or_none': _validate_mac_address_or_none,
'type:nameservers': _validate_nameservers,
'type:non_negative': _validate_non_negative,
'type:range': _validate_range,
'type:regex': _validate_regex,
'type:regex_or_none': _validate_regex_or_none,
'type:string': _validate_string,
'type:string_or_none': _validate_string_or_none,
'type:not_empty_string': _validate_not_empty_string,
'type:not_empty_string_or_none':
_validate_not_empty_string_or_none,
'type:subnet': _validate_subnet,
'type:subnet_list': _validate_subnet_list,
'type:subnet_or_none': _validate_subnet_or_none,
'type:uuid': _validate_uuid,
'type:uuid_or_none': _validate_uuid_or_none,
'type:uuid_list': _validate_uuid_list,
'type:values': _validate_values,
'type:boolean': _validate_boolean}
# Define constants for base resource name
NETWORK = 'network'
NETWORKS = '%ss' % NETWORK
PORT = 'port'
PORTS = '%ss' % PORT
SUBNET = 'subnet'
SUBNETS = '%ss' % SUBNET
# Note: a default of ATTR_NOT_SPECIFIED indicates that an
# attribute is not required, but will be generated by the plugin
# if it is not specified. Particularly, a value of ATTR_NOT_SPECIFIED
# is different from an attribute that has been specified with a value of
# None. For example, if 'gateway_ip' is omitted in a request to
# create a subnet, the plugin will receive ATTR_NOT_SPECIFIED
# and the default gateway_ip will be generated.
# However, if gateway_ip is specified as None, this means that
# the subnet does not have a gateway IP.
# The following is a short reference for understanding attribute info:
# default: default value of the attribute (if missing, the attribute
# becomes mandatory.
# allow_post: the attribute can be used on POST requests.
# allow_put: the attribute can be used on PUT requests.
# validate: specifies rules for validating data in the attribute.
# convert_to: transformation to apply to the value before it is returned
# is_visible: the attribute is returned in GET responses.
# required_by_policy: the attribute is required by the policy engine and
# should therefore be filled by the API layer even if not present in
# request body.
# enforce_policy: the attribute is actively part of the policy enforcing
# mechanism, ie: there might be rules which refer to this attribute.
RESOURCE_ATTRIBUTE_MAP = {
NETWORKS: {
'id': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True,
'primary_key': True},
'name': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'default': '', 'is_visible': True},
'subnets': {'allow_post': False, 'allow_put': False,
'default': [],
'is_visible': True},
'admin_state_up': {'allow_post': True, 'allow_put': True,
'default': True,
'convert_to': convert_to_boolean,
'is_visible': True},
'status': {'allow_post': False, 'allow_put': False,
'is_visible': True},
'tenant_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:string': None},
'required_by_policy': True,
'is_visible': True},
SHARED: {'allow_post': True,
'allow_put': True,
'default': False,
'convert_to': convert_to_boolean,
'is_visible': True,
'required_by_policy': True,
'enforce_policy': True},
},
PORTS: {
'id': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True,
'primary_key': True},
'name': {'allow_post': True, 'allow_put': True, 'default': '',
'validate': {'type:string': None},
'is_visible': True},
'network_id': {'allow_post': True, 'allow_put': False,
'required_by_policy': True,
'validate': {'type:uuid': None},
'is_visible': True},
'admin_state_up': {'allow_post': True, 'allow_put': True,
'default': True,
'convert_to': convert_to_boolean,
'is_visible': True},
'mac_address': {'allow_post': True, 'allow_put': False,
'default': ATTR_NOT_SPECIFIED,
'validate': {'type:mac_address': None},
'enforce_policy': True,
'is_visible': True},
'fixed_ips': {'allow_post': True, 'allow_put': True,
'default': ATTR_NOT_SPECIFIED,
'convert_list_to': convert_kvp_list_to_dict,
'validate': {'type:fixed_ips': None},
'enforce_policy': True,
'is_visible': True},
'device_id': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'default': '',
'is_visible': True},
'device_owner': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'default': '',
'is_visible': True},
'tenant_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:string': None},
'required_by_policy': True,
'is_visible': True},
'status': {'allow_post': False, 'allow_put': False,
'is_visible': True},
},
SUBNETS: {
'id': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True,
'primary_key': True},
'name': {'allow_post': True, 'allow_put': True, 'default': '',
'validate': {'type:string': None},
'is_visible': True},
'ip_version': {'allow_post': True, 'allow_put': False,
'convert_to': convert_to_int,
'validate': {'type:values': [4, 6]},
'is_visible': True},
'network_id': {'allow_post': True, 'allow_put': False,
'required_by_policy': True,
'validate': {'type:uuid': None},
'is_visible': True},
'cidr': {'allow_post': True, 'allow_put': False,
'validate': {'type:subnet': None},
'is_visible': True},
'gateway_ip': {'allow_post': True, 'allow_put': True,
'default': ATTR_NOT_SPECIFIED,
'validate': {'type:ip_address_or_none': None},
'is_visible': True},
'allocation_pools': {'allow_post': True, 'allow_put': True,
'default': ATTR_NOT_SPECIFIED,
'validate': {'type:ip_pools': None},
'is_visible': True},
'dns_nameservers': {'allow_post': True, 'allow_put': True,
'convert_to': convert_none_to_empty_list,
'default': ATTR_NOT_SPECIFIED,
'validate': {'type:nameservers': None},
'is_visible': True},
'host_routes': {'allow_post': True, 'allow_put': True,
'convert_to': convert_none_to_empty_list,
'default': ATTR_NOT_SPECIFIED,
'validate': {'type:hostroutes': None},
'is_visible': True},
'tenant_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:string': None},
'required_by_policy': True,
'is_visible': True},
'enable_dhcp': {'allow_post': True, 'allow_put': True,
'default': True,
'convert_to': convert_to_boolean,
'is_visible': True},
'ipv6_ra_mode': {'allow_post': True, 'allow_put': False,
'default': ATTR_NOT_SPECIFIED,
'validate': {'type:values': constants.IPV6_MODES},
'is_visible': True},
'ipv6_address_mode': {'allow_post': True, 'allow_put': False,
'default': ATTR_NOT_SPECIFIED,
'validate': {'type:values':
constants.IPV6_MODES},
'is_visible': True},
SHARED: {'allow_post': False,
'allow_put': False,
'default': False,
'convert_to': convert_to_boolean,
'is_visible': False,
'required_by_policy': True,
'enforce_policy': True},
}
}
# Identify the attribute used by a resource to reference another resource
RESOURCE_FOREIGN_KEYS = {
NETWORKS: 'network_id'
}
PLURALS = {NETWORKS: NETWORK,
PORTS: PORT,
SUBNETS: SUBNET,
'dns_nameservers': 'dns_nameserver',
'host_routes': 'host_route',
'allocation_pools': 'allocation_pool',
'fixed_ips': 'fixed_ip',
'extensions': 'extension'}
|
leeseuljeong/leeseulstack_neutron
|
neutron/api/v2/attributes.py
|
Python
|
apache-2.0
| 28,875
| 0.000035
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Intangible()
result.template = "object/draft_schematic/food/shared_dish_cho_nor_hoola.iff"
result.attribute_template_id = -1
result.stfName("string_id_table","")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
obi-two/Rebelion
|
data/scripts/templates/object/draft_schematic/food/shared_dish_cho_nor_hoola.py
|
Python
|
mit
| 452
| 0.04646
|
"""
A reader for corpora whose documents are in MTE format.
"""
import os
from functools import reduce
from nltk import compat
from nltk.corpus.reader import concat, TaggedCorpusReader
lxmlAvailable = False
try:
from lxml import etree
lxmlAvailable = True
except ImportError:
#first try c version of ElementTree
try:
import xml.etree.cElementTree as etree
except ImportError:
import xml.etree.ElementTree as etree
import re
def xpath(root, path, ns):
if lxmlAvailable:
return root.xpath(path, namespaces=ns)
else:
return root.findall(path, ns)
class MTEFileReader:
"""
Class for loading the content of the multext-east corpus. It
parses the xml files and does some tag-filtering depending on the
given method parameters.
"""
ns = {'tei': 'http://www.tei-c.org/ns/1.0', 'xml': 'http://www.w3.org/XML/1998/namespace'}
tag_ns = '{http://www.tei-c.org/ns/1.0}'
xml_ns = '{http://www.w3.org/XML/1998/namespace}'
def __init__(self, file_path):
tree = etree.parse(file_path)
self.__root = xpath(tree.getroot(), './tei:text/tei:body', self.ns)[0]
@classmethod
def _words(self, text_root):
return [w.text for w in xpath(text_root, './/*', self.ns) if
w.tag == self.tag_ns + "w" or w.tag == self.tag_ns + "c"]
@classmethod
def _sents(self, text_root):
return [MTEFileReader._words(s) for s in xpath(text_root, './/tei:s', self.ns)]
@classmethod
def _paras(self, text_root):
return [MTEFileReader._sents(p) for p in xpath(text_root, './/tei:p', self.ns)]
@classmethod
def _lemma_words(self, text_root):
return [(w.text, w.attrib['lemma']) for w in xpath(text_root, './/tei:w', self.ns)]
@classmethod
def _tagged_words(self, text_root, tags=""):
if tags is None or tags == "":
return [(w.text, w.attrib['ana']) for w in xpath(text_root, './/tei:w', self.ns)]
else:
tags = re.compile('^' + re.sub("-",".",tags) + '.*$')
return [(w.text, w.attrib['ana']) for w in xpath(text_root, './/tei:w', self.ns)
if tags.match(w.attrib['ana'])]
@classmethod
def _lemma_sents(self, text_root):
return [MTEFileReader._lemma_words(s) for s in xpath(text_root, './/tei:s', self.ns)]
@classmethod
def _tagged_sents(self, text_root, tags=""):
# double list comprehension to remove empty sentences in case there is a sentence only containing punctuation marks
return [t for t in [MTEFileReader._tagged_words(s, tags) for s in xpath(text_root, './/tei:s', self.ns)] if len(t) > 0]
@classmethod
def _lemma_paras(self, text_root):
return [MTEFileReader._lemma_sents(p) for p in xpath(text_root, './/tei:p', self.ns)]
@classmethod
def _tagged_paras(self, text_root, tags=""):
return [t for t in [MTEFileReader._tagged_sents(p, tags) for p in xpath(text_root, './/tei:p', self.ns)] if len(t) > 0]
def words(self):
return MTEFileReader._words(self.__root)
def sents(self):
return MTEFileReader._sents(self.__root)
def paras(self):
return MTEFileReader._paras(self.__root)
def lemma_words(self):
return MTEFileReader._lemma_words(self.__root)
def tagged_words(self, tags=""):
return MTEFileReader._tagged_words(self.__root, tags)
def lemma_sents(self):
return MTEFileReader._lemma_sents(self.__root)
def tagged_sents(self, tags=""):
return MTEFileReader._tagged_sents(self.__root)
def lemma_paras(self):
return MTEFileReader._lemma_paras(self.__root)
def tagged_paras(self, tags=""):
return MTEFileReader._tagged_paras(self.__root)
class MTETagConverter:
"""
Class for converting msd tags to universal tags, more conversion
options are currently not implemented.
"""
mapping_msd_universal = {
'A': 'ADJ', 'S': 'ADP', 'R': 'ADV', 'C': 'CONJ',
'D': 'DET', 'N': 'NOUN', 'M': 'NUM', 'Q': 'PRT',
'P': 'PRON', 'V': 'VERB', '.': '.', '-': 'X'}
@staticmethod
def msd_to_universal(tag):
"""
This function converts the annotation from the Multex-East to the universal tagset
as described in Chapter 5 of the NLTK-Book
Unknown Tags will be mapped to X. Punctuation marks are not supported in MSD tags, so
"""
indicator = tag[0] if not tag[0] == "#" else tag[1]
if not indicator in MTETagConverter.mapping_msd_universal:
indicator = '-'
return MTETagConverter.mapping_msd_universal[indicator]
class MTECorpusReader(TaggedCorpusReader):
"""
Reader for corpora following the TEI-p5 xml scheme, such as MULTEXT-East.
MULTEXT-East contains part-of-speech-tagged words with a quite precise tagging
scheme. These tags can be converted to the Universal tagset
"""
def __init__(self, root=None, fileids=None, encoding='utf8'):
"""
Construct a new MTECorpusreader for a set of documents
located at the given root directory. Example usage:
>>> root = '/...path to corpus.../'
>>> reader = MTECorpusReader(root, 'oana-*.xml', 'utf8') # doctest: +SKIP
:param root: The root directory for this corpus. (default points to location in multext config file)
:param fileids: A list or regexp specifying the fileids in this corpus. (default is oana-en.xml)
:param enconding: The encoding of the given files (default is utf8)
"""
TaggedCorpusReader.__init__(self, root, fileids, encoding)
def __fileids(self, fileids):
if fileids is None: fileids = self._fileids
elif isinstance(fileids, compat.string_types): fileids = [fileids]
# filter wrong userinput
fileids = filter(lambda x : x in self._fileids, fileids)
# filter multext-east sourcefiles that are not compatible to the teip5 specification
fileids = filter(lambda x : x not in ["oana-bg.xml", "oana-mk.xml"], fileids)
if not fileids:
print("No valid multext-east file specified")
return fileids
def readme(self):
"""
Prints some information about this corpus.
:return: the content of the attached README file
:rtype: str
"""
return self.open("00README.txt").read()
def raw(self, fileids=None):
"""
:param fileids: A list specifying the fileids that should be used.
:return: the given file(s) as a single string.
:rtype: str
"""
return concat([self.open(f).read() for f in self.__fileids(fileids)])
def words(self, fileids=None):
"""
:param fileids: A list specifying the fileids that should be used.
:return: the given file(s) as a list of words and punctuation symbols.
:rtype: list(str)
"""
return reduce(lambda a, b : a + b ,[MTEFileReader(os.path.join(self._root, f)).words() for f in self.__fileids(fileids)], [])
def sents(self, fileids=None):
"""
:param fileids: A list specifying the fileids that should be used.
:return: the given file(s) as a list of sentences or utterances,
each encoded as a list of word strings
:rtype: list(list(str))
"""
return reduce(lambda a, b : a + b ,[MTEFileReader(os.path.join(self._root, f)).sents() for f in self.__fileids(fileids)], [])
def paras(self, fileids=None):
"""
:param fileids: A list specifying the fileids that should be used.
:return: the given file(s) as a list of paragraphs, each encoded as a list
of sentences, which are in turn encoded as lists of word string
:rtype: list(list(list(str)))
"""
return reduce(lambda a, b : a + b ,[MTEFileReader(os.path.join(self._root, f)).paras() for f in self.__fileids(fileids)], [])
def lemma_words(self, fileids=None):
"""
:param fileids: A list specifying the fileids that should be used.
:return: the given file(s) as a list of words, the corresponding lemmas
and punctuation symbols, encoded as tuples (word, lemma)
:rtype: list(tuple(str,str))
"""
return reduce(lambda a, b : a + b ,[MTEFileReader(os.path.join(self._root, f)).lemma_words() for f in self.__fileids(fileids)], [])
def tagged_words(self, fileids=None, tagset="msd", tags=None):
"""
:param fileids: A list specifying the fileids that should be used.
:param tagset: The tagset that should be used in the returned object,
either "universal" or "msd", "msd" is the default
:param tags: An MSD Tag that is used to filter all parts of the used corpus
that are not more precise or at least equal to the given tag
:return: the given file(s) as a list of tagged words and punctuation symbols
encoded as tuples (word, tag)
:rtype: list(tuple(str, str))
"""
words = reduce(lambda a, b : a + b ,[MTEFileReader(os.path.join(self._root, f)).tagged_words(tags=tags) for f in self.__fileids(fileids)], [])
if tagset == "universal":
return map(lambda wt : (wt[0], MTETagConverter.msd_to_universal(wt[1])), words)
elif tagset == "msd":
return words
else:
print("Unknown tagset specified.")
def lemma_sents(self, fileids=None):
"""
:param fileids: A list specifying the fileids that should be used.
:return: the given file(s) as a list of sentences or utterances, each
encoded as a list of tuples of the word and the corresponding
lemma (word, lemma)
:rtype: list(list(tuple(str, str)))
"""
return reduce(lambda a, b : a + b ,[MTEFileReader(os.path.join(self._root, f)).lemma_sents() for f in self.__fileids(fileids)], [])
def tagged_sents(self, fileids=None, tagset="msd", tags=None):
"""
:param fileids: A list specifying the fileids that should be used.
:param tagset: The tagset that should be used in the returned object,
either "universal" or "msd", "msd" is the default
:param tags: An MSD Tag that is used to filter all parts of the used corpus
that are not more precise or at least equal to the given tag
:return: the given file(s) as a list of sentences or utterances, each
each encoded as a list of (word,tag) tuples
:rtype: list(list(tuple(str, str)))
"""
sents = reduce(lambda a, b : a + b, [MTEFileReader(os.path.join(self._root, f)).tagged_sents(tags=tags) for f in self.__fileids(fileids)], [])
if tagset == "universal":
return map(lambda s : map (lambda wt : (wt[0], MTETagConverter.msd_to_universal(wt[1])), s), sents)
elif tagset == "msd":
return sents
else:
print("Unknown tagset specified.")
def lemma_paras(self, fileids=None):
"""
:param fileids: A list specifying the fileids that should be used.
:return: the given file(s) as a list of paragraphs, each encoded as a
list of sentences, which are in turn encoded as a list of
tuples of the word and the corresponding lemma (word, lemma)
:rtype: list(List(List(tuple(str, str))))
"""
return reduce(lambda a, b : a + b ,[MTEFileReader(os.path.join(self._root, f)).lemma_paras() for f in self.__fileids(fileids)], [])
def tagged_paras(self, fileids=None, tagset="msd", tags=None):
"""
:param fileids: A list specifying the fileids that should be used.
:param tagset: The tagset that should be used in the returned object,
either "universal" or "msd", "msd" is the default
:param tags: An MSD Tag that is used to filter all parts of the used corpus
that are not more precise or at least equal to the given tag
:return: the given file(s) as a list of paragraphs, each encoded as a
list of sentences, which are in turn encoded as a list
of (word,tag) tuples
:rtype: list(list(list(tuple(str, str))))
"""
paras = reduce(lambda a, b : a + b, [MTEFileReader(os.path.join(self._root, f)).tagged_paras(tags=tags) for f in self.__fileids(fileids)], [])
if tagset == "universal":
return map(lambda p : map(lambda s : map (lambda wt : (wt[0], MTETagConverter.msd_to_universal(wt[0])), s), p), paras)
elif tagset == "msd":
return paras
else:
print("Unknown tagset specified.")
|
jwacalex/MULTEX-EAST-PoS-Tagger
|
mte.py
|
Python
|
lgpl-3.0
| 12,810
| 0.009446
|
#!/usr/bin/env python
from setuptools import setup
NAME = 'coinshot'
DESCRIPTION = 'simple python module for pushover.net'
VERSION = open('VERSION').read().strip()
LONG_DESC = open('README.rst').read()
LICENSE = "MIT License"
setup(
name=NAME,
version=VERSION,
author='Charles Thomas',
author_email='ch@rlesthom.as',
packages=['coinshot'],
url='https://github.com/charlesthomas/%s' % NAME,
license=LICENSE,
description=DESCRIPTION,
long_description=LONG_DESC,
long_description_content_type='text/x-rst',
install_requires=["simplejson >= 3.3.0"],
scripts=['bin/shoot'],
classifiers=['Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: MIT License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Topic :: Communications',
'Topic :: Software Development :: Libraries :: Python Modules']
)
|
charlesthomas/coinshot
|
setup.py
|
Python
|
mit
| 1,206
| 0.000829
|
import os
import tweepy
from query_db import query_db, send_user_queries_to_db, read_in_bb_file
from our_keys.twitter_keys import my_keys
from itertools import chain, repeat
u"""Reads in a file of cities and their bounding boxes. Queries the
database to get a list of all unique users who have tweeted from that
city. Queries Twitter api to get 200 tweets from each user, then inserts
200 tweets for up to 100 users per city into a separate database table
called "Tweet200."""
ROOT_DIR = os.path.abspath(os.getcwd())
def get_twitter_api():
u"""Gets twitter keys from key file."""
for our_set, our_keys in my_keys.items():
auth = tweepy.OAuthHandler(
our_keys['consumer_key'],
our_keys['consumer_secret']
)
auth.set_access_token(
our_keys['access_key'],
our_keys['access_secret']
)
print "Hi, I'm the key generator: ", our_keys['access_key']
yield tweepy.API(auth)
def get_unique_handles(vals):
u"""Takes in a list of tweets from a given city. Returns a dict of
unique user handles for each location."""
users = {}
for tweet in vals:
name = tweet[1]
if name in users:
users[name] += 1
else:
users[name] = 1
heavy_users = []
for user in users:
if users[user] > 2:
heavy_users.append(user)
return heavy_users
def format_tweet_history(history, user, city):
u"""Formats tweets pieces to be fed to sql query.
History is a list-like set of tweets. User is the screen name
as a string. City is the string name of the city we querried for."""
tweet_history = []
for tweet in history:
screen_name = user
text = tweet.text
if len(text) > 150:
print text
created_at = tweet.created_at.strftime('%m/%d/%Y, %H:%M')
location = tweet.geo
location_lat = None
location_lng = None
if location:
location_lat = location['coordinates'][0]
location_lng = location['coordinates'][1]
hashtags = []
# if location:
tweet = (
screen_name, text, location_lat, location_lng,
created_at, hashtags, city
)
tweet_history.append(tweet)
return tweet_history
def check_list_low_tweeters():
with open(ROOT_DIR + "text/stop_names.txt", 'r') as a_file:
names = a_file.read().split("\n")
return names
def query_twitter_for_histories(users, city=None, cap=100, data_collection=True):
u"""Calls function to return a dict of cities and the unique users for each
city. Iterates over the dict to extract the tweet text/locations/timestamps
for each tweet, bundles results into DB-friendly tuples. Returns a list of
lists of tuples."""
api_generator = get_twitter_api()
api_generator = chain.from_iterable(repeat(tuple(api_generator), 1000))
api = api_generator.next()
city_tweets = []
user_count = 0
too_low_count = 0
for user in users:
if user_count > cap:
break
if user in check_list_low_tweeters() and data_collection is True:
continue
history = []
# tweet_history = []
try:
history = api.user_timeline(screen_name=user, count=200)
except tweepy.error.TweepError as err:
print "Tweepy Error: ", err.message
api = api_generator.next()
continue
if len(history) >= 200 or not data_collection:
user_count += 1
tweet_history = format_tweet_history(history, user, city)
# if len(tweet_history):
city_tweets.append(tweet_history)
print user_count
else:
print "Too few tweets in this user's history."
with open(ROOT_DIR + "text/stop_names.txt", 'a') as a_file:
a_file.write(user)
a_file.write("\n")
too_low_count += 1
total = user_count + too_low_count
print "total requests: ", total
return city_tweets
def process_each_city():
u"""Calls functions to insert user data into Tweet200 table."""
bb_dict = read_in_bb_file()
for city, values in bb_dict.items():
with open(ROOT_DIR + "text/stop_cities.txt", "r") as ffff:
stop_cities = ffff.read()
if city not in stop_cities:
vals = query_db(city, values)
print "Now checking ", city
handles = get_unique_handles(vals)
print city, len(handles)
if len(handles) >= 200:
print "Now querying twitter for histories"
tweets = query_twitter_for_histories(handles, city)
if len(tweets) >= 100:
send_user_queries_to_db(tweets, city)
else:
print "Not enough users with twitter histories in ", city
if __name__ == "__main__":
while True:
try:
process_each_city()
except Exception:
print "I got an exception"
continue
|
lnhubbell/tweetTrack
|
streamScript/domain/get_tweets_by_user.py
|
Python
|
mit
| 5,107
| 0.000196
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Agent that gets the local keyboard input in the act() function.
Applies safety classifier(s) to process user and partner messages.
"""
from typing import Optional
from parlai.core.params import ParlaiParser
from parlai.core.opt import Opt
from parlai.core.message import Message
from parlai.utils.misc import display_messages
from parlai.utils.strings import colorize
from parlai.agents.local_human.local_human import LocalHumanAgent
from parlai.utils.safety import OffensiveStringMatcher, OffensiveLanguageClassifier
OFFENSIVE_USER_REPLY = '[ Sorry, could not process that message. Please try again. ]'
OFFENSIVE_BOT_REPLY = (
'[ Unsafe model reply detected. Clearing agent history. Please try again. ]'
)
class SafeLocalHumanAgent(LocalHumanAgent):
@classmethod
def add_cmdline_args(
cls, parser: ParlaiParser, partial_opt: Optional[Opt] = None
) -> ParlaiParser:
"""
Add command-line arguments specifically for this agent.
"""
agent = parser.add_argument_group('Safe Local Human Arguments')
agent.add_argument(
'--safety',
type=str,
default='all',
choices={'none', 'string_matcher', 'classifier', 'all'},
help='Apply safety filtering to messages',
)
super().add_cmdline_args(parser, partial_opt=partial_opt)
return parser
def __init__(self, opt, shared=None):
super().__init__(opt)
self.id = 'safeLocalHuman'
self._init_safety(opt)
def _init_safety(self, opt):
"""
Initialize safety modules.
"""
if opt['safety'] == 'string_matcher' or opt['safety'] == 'all':
self.offensive_string_matcher = OffensiveStringMatcher()
if opt['safety'] == 'classifier' or opt['safety'] == 'all':
self.offensive_classifier = OffensiveLanguageClassifier()
self.self_offensive = False
def check_offensive(self, text):
"""
Check if text is offensive using string matcher and classifier.
"""
if text == '':
return False
if (
hasattr(self, 'offensive_string_matcher')
and text in self.offensive_string_matcher
):
return True
if hasattr(self, 'offensive_classifier') and text in self.offensive_classifier:
return True
return False
def observe(self, msg):
"""
Observe bot reply if and only if it passes.
"""
if self.self_offensive:
# User was offensive, they must try again
return
# Now check if bot was offensive
bot_offensive = self.check_offensive(msg.get('text', ''))
if not bot_offensive:
# View bot message
print(
display_messages(
[msg],
add_fields=self.opt.get('display_add_fields', ''),
prettify=self.opt.get('display_prettify', False),
verbose=self.opt.get('verbose', False),
)
)
msg.force_set('bot_offensive', False)
else:
msg.force_set('bot_offensive', True)
print(OFFENSIVE_BOT_REPLY)
def get_reply(self):
reply_text = input(colorize('Enter Your Message:', 'field') + ' ')
reply_text = reply_text.replace('\\n', '\n')
return reply_text
def act(self):
# get human reply
reply = Message(
{
'id': self.getID(),
'label_candidates': self.fixedCands_txt,
'episode_done': False,
}
)
reply_text = self.get_reply()
# check if human reply is offensive
self.self_offensive = self.check_offensive(reply_text)
while self.self_offensive:
print(OFFENSIVE_USER_REPLY)
reply_text = self.get_reply()
# check if human reply is offensive
self.self_offensive = self.check_offensive(reply_text)
# check for episode done
if '[DONE]' in reply_text or self.opt.get('single_turn', False):
raise StopIteration
# set reply text
reply['text'] = reply_text
# check if finished
if '[EXIT]' in reply_text:
self.finished = True
raise StopIteration
return reply
|
facebookresearch/ParlAI
|
parlai/agents/safe_local_human/safe_local_human.py
|
Python
|
mit
| 4,597
| 0.00087
|
#!/usr/bin/env python
import roslib
roslib.load_manifest('human_model')
import rospy
import json
import tf
import numpy
from abc import ABCMeta,abstractmethod
from tf.transformations import quaternion_multiply as quatMult,quaternion_conjugate
from collections import deque,defaultdict,OrderedDict
"""Module for converting tf data to construct a human model"""
def Vec(*args):
"""returns a vector (numpy float array) with the length of number of given arguments"""
return numpy.array(args,dtype=float)
def normalize(v):
"""returns unit vector or quaternion"""
return v/numpy.linalg.norm(v)
def quatRotatePoint(q,p,o=Vec(0,0,0)):
"""returns point p rotated around quaternion q with the origin o (default (0,0,0)"""
return quatMult(
quatMult(q,numpy.append(p-o,(0,))),
quaternion_conjugate(q)
)[:3]+o
def calculateQuaternion(v1,v2):
"""calculates the quaternion for rotating v1 to v2. Note that both v1 and v2 must be unit vector"""
cross=numpy.cross(v1,v2)
return normalize(numpy.append(cross,(1+numpy.dot(v1,v2),)))
class AveragePosition(object):
"""Example Position Class
Calculates the average of the last n positions lazily
Calculated value can be accessed or changed via pos attribute:
p=AveragePosition(10)
p.pos+=Vec(1,2,3)
print(p.pos)
If an alternative position class is needed to be defined these functions,
must be defined in class:
@property
def pos(self):
...
@pos.setter
def pos(self,p):
...
def append(self,p):
...
"""
def __init__(self,n=100):
self.transformations=deque((Vec(0,0,0),),n)
self.calculated=None
@property
def pos(self):
if self.calculated is None:
self.calculated=numpy.average(self.transformations,0)
return self.calculated
@pos.setter
def pos(self,p):
self.calculated=p
"""appends the given position p to the deque, and resets the calculated
average value"""
def append(self,p):
self.calculated=None
self.transformations.append(p)
class JointTree(object):
"""Recursive data structure to define joint tree.It have following attributes:
length:distance to the parent (if not fixed frame)
fixedFrame:fixates the point to the fixedFrame+the displacement of the tree
invert:inverts the rotating axis for connected limb
displacement:used to preserve the position of the node with respect to its parent(resets on new position)
limbPos:position of the limb(resets on new position)
limbRot:orientation of the limb(resets on new position)
"""
def toDict(self,ordered=False):
"""Converts tree to dictionary which can be exported as JSON,if ordered is true
it returns an OrderedDict instead of dictionary and preserves the order of attributes"""
d=OrderedDict if ordered else dict
return d(
((
self.name,
d((
('length',self.length),
('invert',self.invert),
('fixedFrame',None if self.fixedFrame is None else tuple(self.fixedFrame)),
('children',tuple(i.toDict(ordered) for i in self.children)),
))
),))
@staticmethod
def fromDict(dictionary,pos):
"""converts a dictionary to JointTree"""
(k,v)=next(iter(dictionary.items()))
return JointTree(k,pos,**v)
def __init__(self,name,posFunc,**kwargs):
"""gets the name of the node and a function takes no argument,returns a Position class
(e.g. lambda : AveragePosition(10). It takes these optional arguments with the following default values:
length=0
invert=False
fixedFrame=None
children=[] (it can contain either dictionary or JointTree)
"""
self.name=name
self.currentPos=posFunc()
self.length=kwargs.get("length",0)
self.invert=kwargs.get("invert",False)
fixedFrame=kwargs.get("fixedFrame",None)
self.fixedFrame=None if fixedFrame is None else Vec(*fixedFrame)
self.children=[]
children=kwargs.get("children",[])
try:
if isinstance(children[0],dict):
for i in children:
(k,v)=next(iter(i.items()))
self.addChild(JointTree(k,posFunc,**v))
else:
for i in children:
self.addChild(i)
except IndexError:
pass
self.parent=None
self.__uncalculate()
def __uncalculate(self):
self.displacement=Vec(0,0,0)
self.limbPos=Vec(0,0,0)
self.limbRot=Vec(0,0,0,1)
def __iter__(self):
"""iterates over tree depth-first order"""
yield self
for i in self.children:
for j in iter(i):
yield j
def __getitem__(self,name):
"""returns the node with the given name, it raises a KeyError if there is no match"""
for c in self:
if c.name==name:
return c
raise KeyError("There is no node in tree with '{}' name".format(name))
def addChild(self,child):
"""adds new node to the tree"""
child.parent=self
self.children.append(child)
def collectPosition(self,ls):
"""gets the position of the joints from tf.TransformListener ls. It does nothing if there is no sent pose"""
try:
(trans,_)=ls.lookupTransform('/world',self.name,rospy.Time(0))
except tf.Exception as e:
return
self.currentPos.append(Vec(*trans))
self.__uncalculate()
def setPosition(self):
"""calculates the position of the joint"""
if self.fixedFrame is not None:
self.displacement+=self.fixedFrame-self.currentPos.pos
self.currentPos.pos+=self.displacement
elif self.parent is not None:
n=self.currentPos.pos+self.displacement
p=self.parent.currentPos.pos
n=normalize(n-p)*self.length+p
self.displacement=n-self.currentPos.pos
self.currentPos.pos=n
for i in self.children:
i.displacement+=self.displacement
self.displacement=Vec(0,0,0)
def connectLimbs(self):
"""calculates the pose of the limbs"""
p=self.currentPos.pos
for i in self.children:
c=i.currentPos.pos
i.limbPos=(p+c)/2
v2=normalize((p-c) if not i.invert else (c-p))
i.limbRot=calculateQuaternion(Vec(0,0,1),v2)
def sendPoses(self,br):
"""sends the pose of joints and limbs to given tf.TransformBroadcaster"""
br.sendTransform(self.currentPos.pos,(0,0,0,1),rospy.Time.now(),self.name+'_link','/world')
for i in self.children:
br.sendTransform(i.limbPos,i.limbRot,rospy.Time.now(),"{}_{}".format(self.name,i.name),'/world')
def applyDisplacement(self,displacement):
"""applies the given displacement to the parent and all of its children"""
for i in self:
i.currentPos.pos+=displacement
i.limbPos+=displacement
if __name__ == '__main__':
rospy.init_node('animator')
treeDict=json.loads(rospy.get_param("/tree"))
tree=JointTree.fromDict(treeDict,lambda : AveragePosition(10))
br = tf.TransformBroadcaster()
ls = tf.TransformListener()
rate = rospy.Rate(50.0)
while not rospy.is_shutdown():
for i in tree:
i.collectPosition(ls)
for i in tree:
i.setPosition()
(o,r,l) = ("SpineShoulder","ShoulderRight","ShoulderLeft")
#these three are special condition,They are aligned on a straight line
#Also note that the z value of ShoulderRight and ShoulderLeft equals to that of SpineShoulder
if i.name==o:
r=i[r]
l=i[l]
cr=r.currentPos.pos+r.displacement
cl=l.currentPos.pos+l.displacement
cr[2]=i.currentPos.pos[2]
cl[2]=i.currentPos.pos[2]
k=i.currentPos.pos-(cr+cl)/2
cr+=k
cl+=k
r.displacement=cr-r.currentPos.pos
l.displacement=cl-l.currentPos.pos
for i in tree:
i.connectLimbs()
#calculates the Orientation of Torso (Upper and Lower) and connected joints
q1=tree["SpineShoulder"].limbRot
q2=calculateQuaternion(Vec(0,1,0),normalize(tree["ShoulderRight"].currentPos.pos-tree["ShoulderLeft"].currentPos.pos))
tree["SpineShoulder"].limbRot=quatMult(q2,q1)
tree["ShoulderRight"].applyDisplacement(quatRotatePoint(q1,tree["ShoulderRight"].currentPos.pos,tree["SpineShoulder"].currentPos.pos)-tree["ShoulderRight"].currentPos.pos)
tree["ShoulderLeft"].applyDisplacement(quatRotatePoint(q1,tree["ShoulderLeft"].currentPos.pos,tree["SpineShoulder"].currentPos.pos)-tree["ShoulderLeft"].currentPos.pos)
v=tree["HipRight"].currentPos.pos-tree["HipLeft"].currentPos.pos
q2=calculateQuaternion(Vec(0,1,0),normalize(v))
q=quatMult(q2,q1)
tree["SpineBase"].limbRot=q
tree["HipRight"].applyDisplacement(quatRotatePoint(q,tree["SpineBase"].currentPos.pos+Vec(0.01,tree["HipRight"].length,-0.05),tree["SpineBase"].currentPos.pos)-tree["HipRight"].currentPos.pos)
tree["HipLeft"].applyDisplacement(quatRotatePoint(q,tree["SpineBase"].currentPos.pos+Vec(0.01,-tree["HipLeft"].length,-0.05),tree["SpineBase"].currentPos.pos)-tree["HipLeft"].currentPos.pos)
for i in tree:
i.sendPoses(br)
rate.sleep()
|
metu-kovan/human_model
|
src/animator.py
|
Python
|
gpl-3.0
| 8,441
| 0.050468
|
#!/usr/bin/env python3
#
# This file generates an estimation of window size for the
# two queues for _each_ sample. It will not be exact, and
# it's correctness will vary with the variation of queue delay
# in the queue.
#
# The results are saved to:
# - derived/window
# each line formatted as: <sample id> <window ecn in bits> <window nonecn in bits>
#
# Dependency:
# - calc_queuedelay.py (for per sample queue stats)
import os
import sys
def get_rates(rate_file):
rates = []
with open(rate_file, 'r') as f:
for line in f:
# skip comments
if line[0] == '#':
continue
# format of rate file:
# <sample id> <sample time> <rate in b/s>
rates.append(int(line.split()[2]))
return rates
def get_rtts_with_queue(queue_file, base_rtt):
rtts = []
with open(queue_file, 'r') as f:
for line in f:
# skip comments
if line[0] == '#':
continue
# format of queue file:
# <sample time> <average_in_us> ...
# the average might be '-' if it is unknown
queue_avg = line.split()[1]
queue_avg = 0 if queue_avg == '-' else float(queue_avg)
# add rtt and normalize to seconds
# base rtt is in ms
rtts.append((queue_avg / 1000 + base_rtt) / 1000)
return rtts
def calc_window(rates, rtts_s):
windows = []
# all data should have same amount of samples
for i, rate in enumerate(rates):
rtt = rtts_s[i] # rtt in seconds
windows.append(rate * rtt)
return windows
def write_window(file, window_ecn_list, window_nonecn_list):
with open(file, 'w') as f:
f.write('#sample_id window_ecn_in_bits window_nonecn_in_bits\n')
for i, window_ecn in enumerate(window_ecn_list):
window_nonecn = window_nonecn_list[i]
f.write('%d %d %d\n' % (i, window_ecn, window_nonecn))
def process_test(folder, base_rtt_ecn_ms, base_rtt_nonecn_ms):
write_window(
folder + '/derived/window',
calc_window(
get_rates(folder + '/ta/rate_ecn'),
get_rtts_with_queue(folder + '/derived/queue_ecn_samplestats', base_rtt_ecn_ms),
),
calc_window(
get_rates(folder + '/ta/rate_nonecn'),
get_rtts_with_queue(folder + '/derived/queue_nonecn_samplestats', base_rtt_nonecn_ms),
),
)
if __name__ == '__main__':
if len(sys.argv) < 4:
print('Usage: %s <test_folder> <rtt_ecn_ms> <rtt_nonecn_ms>' % sys.argv[0])
sys.exit(1)
process_test(
sys.argv[1],
float(sys.argv[2]),
float(sys.argv[3]),
)
print('Generated win')
|
henrist/aqmt
|
aqmt/calc_window.py
|
Python
|
mit
| 2,753
| 0.001453
|
# ex:ts=4:sw=4:sts=4:et
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
import copy
import json
import re
from svtplay_dl.error import ServiceError
from svtplay_dl.fetcher.hls import hlsparse
from svtplay_dl.fetcher.http import HTTP
from svtplay_dl.service import OpenGraphThumbMixin
from svtplay_dl.service import Service
class Vimeo(Service, OpenGraphThumbMixin):
supported_domains = ["vimeo.com", "player.vimeo.com"]
def get(self):
data = self.get_urldata()
match_cfg_url = re.search('data-config-url="([^"]+)" data-fallback-url', data)
match_clip_page_cfg = re.search(r"vimeo\.clip_page_config\s*=\s*({.+?});", data)
if match_cfg_url:
player_url = match_cfg_url.group(1).replace("&", "&")
elif match_clip_page_cfg:
page_config = json.loads(match_clip_page_cfg.group(1))
player_url = page_config["player"]["config_url"]
else:
yield ServiceError(f"Can't find video file for: {self.url}")
return
player_data = self.http.request("get", player_url).text
if player_data:
jsondata = json.loads(player_data)
if ("hls" in jsondata["request"]["files"]) and ("fastly_skyfire" in jsondata["request"]["files"]["hls"]["cdns"]):
hls_elem = jsondata["request"]["files"]["hls"]["cdns"]["fastly_skyfire"]
yield from hlsparse(self.config, self.http.request("get", hls_elem["url"]), hls_elem["url"], output=self.output)
avail_quality = jsondata["request"]["files"]["progressive"]
for i in avail_quality:
yield HTTP(copy.copy(self.config), i["url"], i["height"], output=self.output)
else:
yield ServiceError("Can't find any streams.")
return
|
spaam/svtplay-dl
|
lib/svtplay_dl/service/vimeo.py
|
Python
|
mit
| 1,817
| 0.003302
|
# Copyright (c) 2013 eBay Inc.
# Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Weighers that weigh hosts by their capacity, including following two
weighers:
1. Capacity Weigher. Weigh hosts by their available capacity.
The default is to spread volumes across all hosts evenly. If you prefer
stacking, you can set the 'capacity_weight_multiplier' option to a negative
number and the weighing has the opposite effect of the default.
2. Allocated Capacity Weigher. Weigh hosts by their allocated capacity.
The default behavior is to place new volume to the host allocated the least
space. This weigher is intended to simulate the behavior of SimpleScheduler.
If you prefer to place volumes to host allocated the most space, you can
set the 'allocated_capacity_weight_multiplier' option to a positive number
and the weighing has the opposite effect of the default.
"""
import math
from oslo.config import cfg
from cinder.openstack.common.scheduler import weights
capacity_weight_opts = [
cfg.FloatOpt('capacity_weight_multiplier',
default=1.0,
help='Multiplier used for weighing volume capacity. '
'Negative numbers mean to stack vs spread.'),
cfg.FloatOpt('allocated_capacity_weight_multiplier',
default=-1.0,
help='Multiplier used for weighing volume capacity. '
'Negative numbers mean to stack vs spread.'),
]
CONF = cfg.CONF
CONF.register_opts(capacity_weight_opts)
class CapacityWeigher(weights.BaseHostWeigher):
def _weight_multiplier(self):
"""Override the weight multiplier."""
return CONF.capacity_weight_multiplier
def _weigh_object(self, host_state, weight_properties):
"""Higher weights win. We want spreading to be the default."""
reserved = float(host_state.reserved_percentage) / 100
free_space = host_state.free_capacity_gb
if free_space == 'infinite' or free_space == 'unknown':
#(zhiteng) 'infinite' and 'unknown' are treated the same
# here, for sorting purpose.
free = float('inf')
else:
free = math.floor(host_state.free_capacity_gb * (1 - reserved))
return free
class AllocatedCapacityWeigher(weights.BaseHostWeigher):
def _weight_multiplier(self):
"""Override the weight multiplier."""
return CONF.allocated_capacity_weight_multiplier
def _weigh_object(self, host_state, weight_properties):
# Higher weights win. We want spreading (choose host with lowest
# allocated_capacity first) to be the default.
allocated_space = host_state.allocated_capacity_gb
return allocated_space
|
Thingee/cinder
|
cinder/scheduler/weights/capacity.py
|
Python
|
apache-2.0
| 3,311
| 0.000302
|
from PyQt4 import QtCore
import acq4.Manager
import acq4.util.imageAnalysis as imageAnalysis
run = True
man = acq4.Manager.getManager()
cam = man.getDevice('Camera')
frames = []
def collect(frame):
global frames
frames.append(frame)
cam.sigNewFrame.connect(collect)
def measure():
if len(frames) == 0:
QtCore.QTimer.singleShot(100, measure)
return
global run
if run:
global frames
frame = frames[-1]
frames = []
img = frame.data()
w,h = img.shape
img = img[2*w/5:3*w/5, 2*h/5:3*h/5]
w,h = img.shape
fit = imageAnalysis.fitGaussian2D(img, [100, w/2., h/2., w/4., 0])
# convert sigma to full width at 1/e
fit[0][3] *= 2 * 2**0.5
print "WIDTH:", fit[0][3] * frame.info()['pixelSize'][0] * 1e6, "um"
print " fit:", fit
else:
global frames
frames = []
QtCore.QTimer.singleShot(2000, measure)
measure()
|
tropp/acq4
|
acq4/analysis/scripts/beamProfiler.py
|
Python
|
mit
| 976
| 0.009221
|
"""
This version of julian is currently in development and is not considered stable.
"""
|
ithinksw/philo
|
philo/contrib/julian/__init__.py
|
Python
|
isc
| 89
| 0.033708
|
import timeit
import pyximport; pyximport.install()
from mod2 import cysum, cysum2
def pysum(start, step, count):
ret = start
for i in range(count):
ret += step
return ret
print('Python',
timeit.timeit('pysum(0, 1, 100)', 'from __main__ import pysum'))
print('Cython', timeit.timeit('cysum(0, 1, 100)', 'from __main__ import cysum'))
print('Cython with types',
timeit.timeit('cysum2(0, 1, 100)', 'from __main__ import cysum2'))
|
asvetlov/optimization-kaunas-2017
|
2.py
|
Python
|
apache-2.0
| 463
| 0.006479
|
# Copyright (C) 2008 LibreSoft
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Authors :
# Carlos Garcia Campos <carlosgc@gsyc.escet.urjc.es>
from pycvsanaly2.Database import (SqliteDatabase, MysqlDatabase,
TableAlreadyExists, statement)
from pycvsanaly2.extensions import (Extension, register_extension,
ExtensionRunError)
from pycvsanaly2.extensions.file_types import guess_file_type
from pycvsanaly2.utils import to_utf8, uri_to_filename
class DBFileType(object):
id_counter = 1
__insert__ = """INSERT INTO file_types (id, file_id, type)
values (?, ?, ?)"""
def __init__(self, id, type, file_id):
if id is None:
self.id = DBFileType.id_counter
DBFileType.id_counter += 1
else:
self.id = id
self.type = to_utf8(type)
self.file_id = file_id
class FileTypes(Extension):
def __init__(self):
self.db = None
def __create_table(self, cnn):
cursor = cnn.cursor()
if isinstance(self.db, SqliteDatabase):
import sqlite3.dbapi2
try:
cursor.execute("CREATE TABLE file_types (" +
"id integer primary key," +
"file_id integer," +
"type varchar" +
")")
except sqlite3.dbapi2.OperationalError:
cursor.close()
raise TableAlreadyExists
except:
raise
elif isinstance(self.db, MysqlDatabase):
import MySQLdb
try:
cursor.execute("CREATE TABLE file_types (" +
"id INT primary key," +
"file_id integer REFERENCES files(id)," +
"type mediumtext" +
") CHARACTER SET=utf8")
except MySQLdb.OperationalError, e:
if e.args[0] == 1050:
cursor.close()
raise TableAlreadyExists
raise
except:
raise
cnn.commit()
cursor.close()
def __create_indices(self, cnn):
cursor = cnn.cursor()
if isinstance(self.db, MysqlDatabase):
import MySQLdb
try:
cursor.execute("create index parent_id on file_links(parent_id)")
except MySQLdb.OperationalError, e:
if e.args[0] != 1061:
cursor.close()
raise
try:
cursor.execute("create index repository_id on files(repository_id)")
except MySQLdb.OperationalError, e:
if e.args[0] != 1061:
cursor.close()
raise
cursor.close()
def __get_files_for_repository(self, repo_id, cursor):
query = "SELECT ft.file_id from file_types ft, files f " + \
"WHERE f.id = ft.file_id and f.repository_id = ?"
cursor.execute(statement(query, self.db.place_holder), (repo_id,))
files = [res[0] for res in cursor.fetchall()]
return files
def run(self, repo, uri, db):
self.db = db
path = uri_to_filename(uri)
if path is not None:
repo_uri = repo.get_uri_for_path(path)
else:
repo_uri = uri
cnn = self.db.connect()
cursor = cnn.cursor()
cursor.execute(statement("SELECT id from repositories where uri = ?",
db.place_holder), (repo_uri,))
repo_id = cursor.fetchone()[0]
files = []
try:
self.__create_table(cnn)
except TableAlreadyExists:
cursor.execute(statement("SELECT max(id) from file_types",
db.place_holder))
id = cursor.fetchone()[0]
if id is not None:
DBFileType.id_counter = id + 1
files = self.__get_files_for_repository(repo_id, cursor)
except Exception, e:
raise ExtensionRunError(str(e))
self.__create_indices(cnn)
query = """select distinct f.id fid, f.file_name fname
from files f
where f.repository_id = ?
and not exists (select id from file_links where parent_id = f.id)"""
cursor.execute(statement(query, db.place_holder), (repo_id,))
write_cursor = cnn.cursor()
rs = cursor.fetchmany()
while rs:
types = []
for file_id, file_name in rs:
if file_id in files:
continue
type = guess_file_type(file_name)
types.append(DBFileType(None, type, file_id))
if types:
file_types = [(type.id, type.file_id, type.type) \
for type in types]
write_cursor.executemany(statement(DBFileType.__insert__,
self.db.place_holder),
file_types)
rs = cursor.fetchmany()
cnn.commit()
write_cursor.close()
cursor.close()
cnn.close()
def backout(self, repo, uri, db):
update_statement = """delete from file_types where
file_id in (select id from files f
where f.repository_id = ?)"""
self._do_backout(repo, uri, db, update_statement)
register_extension("FileTypes", FileTypes)
|
SoftwareIntrospectionLab/MininGit
|
pycvsanaly2/extensions/FileTypes.py
|
Python
|
gpl-2.0
| 6,480
| 0.00571
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
from op_test import OpTest
class TestFakeQuantizeOp(OpTest):
def setUp(self):
self.op_type = "fake_quantize"
self.attrs = {
'bit_length': 8,
'quantize_type': 'abs_max',
'window_size': 10000
}
self.inputs = {
'X': np.random.random((10, 10)).astype("float32"),
'InScales': np.zeros(self.attrs['window_size']).astype("float32"),
'InCurrentIter': np.zeros(1).astype("float32"),
'InMovingScale': np.zeros(1).astype("float32")
}
self.scale = {
'abs_max': np.max(np.abs(self.inputs['X'])).astype("float32")
}
self.outputs = {
'Out': np.round(self.inputs['X'] / self.scale['abs_max'] * (
(1 << (self.attrs['bit_length'] - 1)) - 1)),
'OutScales': np.zeros(self.attrs['window_size']).astype("float32"),
'OutMovingScale':
np.array([self.scale['abs_max']]).astype("float32"),
'OutCurrentIter': np.zeros(1).astype("float32")
}
def test_check_output(self):
self.check_output()
if __name__ == "__main__":
unittest.main()
|
jacquesqiao/Paddle
|
python/paddle/fluid/tests/unittests/test_fake_quantize_op.py
|
Python
|
apache-2.0
| 1,823
| 0
|
# -*- coding: utf-8 -*-
import re
class StringParser(object):
@staticmethod
def removeCFU(stringToParse):
updatedString = re.sub('\s?[0-9] CFU.*', '', stringToParse)
return updatedString
@staticmethod
def startsWithUpper(stringToParse):
stringToParse = stringToParse[0].upper()+stringToParse[1:]
return stringToParse
|
Veeenz/Telegram-DMI-Bot
|
classes/StringParser.py
|
Python
|
gpl-3.0
| 328
| 0.036585
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-09-22 14:42
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('news', '0002_event'),
]
operations = [
migrations.AddField(
model_name='event',
name='publish_date',
field=models.DateTimeField(default='2017-09-22 16:45', verbose_name='publish_date'),
preserve_default=False,
),
]
|
luisfer85/newspaper2
|
newspaper2/newspaper2/news/migrations/0003_event_publish_date.py
|
Python
|
apache-2.0
| 520
| 0.001923
|
"""
This module provides classes to run and analyze boltztrap on pymatgen band
structure objects. Boltztrap is a software interpolating band structures and
computing materials properties from this band structure using Boltzmann
semi-classical transport theory.
Boltztrap has been developed by Georg Madsen.
http://www.icams.de/content/research/software-development/boltztrap/
You need version 1.2.3 or higher
References are::
Madsen, G. K. H., and Singh, D. J. (2006).
BoltzTraP. A code for calculating band-structure dependent quantities.
Computer Physics Communications, 175, 67-71
"""
import logging
import math
import os
import subprocess
import tempfile
import time
import numpy as np
from monty.dev import requires
from monty.json import MSONable, jsanitize
from monty.os import cd
from monty.os.path import which
from scipy import constants
from scipy.spatial import distance
from pymatgen.core.lattice import Lattice
from pymatgen.core.units import Energy, Length
from pymatgen.electronic_structure.bandstructure import BandStructureSymmLine, Kpoint
from pymatgen.electronic_structure.core import Orbital
from pymatgen.electronic_structure.dos import CompleteDos, Dos, Spin
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.symmetry.bandstructure import HighSymmKpath
__author__ = "Geoffroy Hautier, Zachary Gibbs, Francesco Ricci, Anubhav Jain"
__copyright__ = "Copyright 2013, The Materials Project"
__version__ = "1.1"
__maintainer__ = "Geoffroy Hautier"
__email__ = "geoffroy@uclouvain.be"
__status__ = "Development"
__date__ = "August 23, 2013"
class BoltztrapRunner(MSONable):
"""
This class is used to run Boltztrap on a band structure object.
"""
@requires(
which("x_trans"),
"BoltztrapRunner requires the executables 'x_trans' to be in "
"the path. Please download the Boltztrap at http://"
"www.icams.de/content/research/software-development/boltztrap/ "
"and follow the instructions in the README to compile "
"Bolztrap accordingly. Then add x_trans to your path",
)
def __init__(
self,
bs,
nelec,
dos_type="HISTO",
energy_grid=0.005,
lpfac=10,
run_type="BOLTZ",
band_nb=None,
tauref=0,
tauexp=0,
tauen=0,
soc=False,
doping=None,
energy_span_around_fermi=1.5,
scissor=0.0,
kpt_line=None,
spin=None,
cond_band=False,
tmax=1300,
tgrid=50,
symprec=1e-3,
cb_cut=10,
timeout=7200,
):
"""
Args:
bs:
A band structure object
nelec:
the number of electrons
dos_type:
two options for the band structure integration: "HISTO"
(histogram) or "TETRA" using the tetrahedon method. TETRA
typically gives better results (especially for DOSes)
but takes more time
energy_grid:
the energy steps used for the integration (eV)
lpfac:
the number of interpolation points in the real space. By
default 10 gives 10 time more points in the real space than
the number of kpoints given in reciprocal space
run_type:
type of boltztrap usage. by default
- BOLTZ: (default) compute transport coefficients
- BANDS: interpolate all bands contained in the energy range
specified in energy_span_around_fermi variable, along specified
k-points
- DOS: compute total and partial dos (custom BoltzTraP code
needed!)
- FERMI: compute fermi surface or more correctly to
get certain bands interpolated
band_nb:
indicates a band number. Used for Fermi Surface interpolation
(run_type="FERMI")
spin:
specific spin component (1: up, -1: down) of the band selected
in FERMI mode (mandatory).
cond_band:
if a conduction band is specified in FERMI mode,
set this variable as True
tauref:
reference relaxation time. Only set to a value different than
zero if we want to model beyond the constant relaxation time.
tauexp:
exponent for the energy in the non-constant relaxation time
approach
tauen:
reference energy for the non-constant relaxation time approach
soc:
results from spin-orbit coupling (soc) computations give
typically non-polarized (no spin up or down) results but single
electron occupations. If the band structure comes from a soc
computation, you should set soc to True (default False)
doping:
the fixed doping levels you want to compute. Boltztrap provides
both transport values depending on electron chemical potential
(fermi energy) and for a series of fixed carrier
concentrations. By default, this is set to 1e16 to 1e22 in
increments of factors of 10.
energy_span_around_fermi:
usually the interpolation is not needed on the entire energy
range but on a specific range around the fermi level.
This energy gives this range in eV. by default it is 1.5 eV.
If DOS or BANDS type are selected, this range is automatically
set to cover the entire energy range.
scissor:
scissor to apply to the band gap (eV). This applies a scissor
operation moving the band edges without changing the band
shape. This is useful to correct the often underestimated band
gap in DFT. Default is 0.0 (no scissor)
kpt_line:
list of fractional coordinates of kpoints as arrays or list of
Kpoint objects for BANDS mode calculation (standard path of
high symmetry k-points is automatically set as default)
tmax:
Maximum temperature (K) for calculation (default=1300)
tgrid:
Temperature interval for calculation (default=50)
symprec: 1e-3 is the default in pymatgen. If the kmesh has been
generated using a different symprec, it has to be specified
to avoid a "factorization error" in BoltzTraP calculation.
If a kmesh that spans the whole Brillouin zone has been used,
or to disable all the symmetries, set symprec to None.
cb_cut: by default 10% of the highest conduction bands are
removed because they are often not accurate.
Tune cb_cut to change the percentage (0-100) of bands
that are removed.
timeout: overall time limit (in seconds): mainly to avoid infinite
loop when trying to find Fermi levels.
"""
self.lpfac = lpfac
self._bs = bs
self._nelec = nelec
self.dos_type = dos_type
self.energy_grid = energy_grid
self.error = []
self.run_type = run_type
self.band_nb = band_nb
self.spin = spin
self.cond_band = cond_band
self.tauref = tauref
self.tauexp = tauexp
self.tauen = tauen
self.soc = soc
self.kpt_line = kpt_line
self.cb_cut = cb_cut / 100.0
if isinstance(doping, list) and len(doping) > 0:
self.doping = doping
else:
self.doping = []
for d in [1e16, 1e17, 1e18, 1e19, 1e20, 1e21]:
self.doping.extend([1 * d, 2.5 * d, 5 * d, 7.5 * d])
self.doping.append(1e22)
self.energy_span_around_fermi = energy_span_around_fermi
self.scissor = scissor
self.tmax = tmax
self.tgrid = tgrid
self._symprec = symprec
if self.run_type in ("DOS", "BANDS"):
self._auto_set_energy_range()
self.timeout = timeout
self.start_time = time.time()
def _auto_set_energy_range(self):
"""
automatically determine the energy range as min/max eigenvalue
minus/plus the buffer_in_ev
"""
emins = [min(e_k[0] for e_k in self._bs.bands[Spin.up])]
emaxs = [max(e_k[0] for e_k in self._bs.bands[Spin.up])]
if self._bs.is_spin_polarized:
emins.append(min(e_k[0] for e_k in self._bs.bands[Spin.down]))
emaxs.append(max(e_k[0] for e_k in self._bs.bands[Spin.down]))
min_eigenval = Energy(min(emins) - self._bs.efermi, "eV").to("Ry")
max_eigenval = Energy(max(emaxs) - self._bs.efermi, "eV").to("Ry")
# set energy range to buffer around min/max EV
# buffer does not increase CPU time but will help get equal
# energies for spin up/down for band structure
const = Energy(2, "eV").to("Ry")
self._ll = min_eigenval - const
self._hl = max_eigenval + const
en_range = Energy(max((abs(self._ll), abs(self._hl))), "Ry").to("eV")
self.energy_span_around_fermi = en_range * 1.01
print("energy_span_around_fermi = ", self.energy_span_around_fermi)
@property
def bs(self):
"""
:return: The BandStructure
"""
return self._bs
@property
def nelec(self):
"""
:return: Number of electrons
"""
return self._nelec
def write_energy(self, output_file):
"""
Writes the energy to an output file.
:param output_file: Filename
"""
with open(output_file, "w") as f:
f.write("test\n")
f.write(f"{len(self._bs.kpoints)}\n")
if self.run_type == "FERMI":
sign = -1.0 if self.cond_band else 1.0
for i, kpt in enumerate(self._bs.kpoints):
eigs = []
eigs.append(
Energy(
self._bs.bands[Spin(self.spin)][self.band_nb][i] - self._bs.efermi,
"eV",
).to("Ry")
)
f.write(
"%12.8f %12.8f %12.8f %d\n"
% (
kpt.frac_coords[0],
kpt.frac_coords[1],
kpt.frac_coords[2],
len(eigs),
)
)
for e in eigs:
f.write("%18.8f\n" % (sign * float(e)))
else:
for i, kpt in enumerate(self._bs.kpoints):
eigs = []
if self.run_type == "DOS":
spin_lst = [self.spin]
else:
spin_lst = self._bs.bands
for spin in spin_lst:
# use 90% of bottom bands since highest eigenvalues
# are usually incorrect
# ask Geoffroy Hautier for more details
nb_bands = int(math.floor(self._bs.nb_bands * (1 - self.cb_cut)))
for j in range(nb_bands):
eigs.append(
Energy(
self._bs.bands[Spin(spin)][j][i] - self._bs.efermi,
"eV",
).to("Ry")
)
eigs.sort()
if self.run_type == "DOS" and self._bs.is_spin_polarized:
eigs.insert(0, self._ll)
eigs.append(self._hl)
f.write(
"%12.8f %12.8f %12.8f %d\n"
% (
kpt.frac_coords[0],
kpt.frac_coords[1],
kpt.frac_coords[2],
len(eigs),
)
)
for e in eigs:
f.write("%18.8f\n" % (float(e)))
def write_struct(self, output_file):
"""
Writes the structure to an output file.
:param output_file: Filename
"""
if self._symprec is not None:
sym = SpacegroupAnalyzer(self._bs.structure, symprec=self._symprec)
elif self._symprec is None:
pass
with open(output_file, "w") as f:
if self._symprec is not None:
f.write(
"{} {}\n".format(
self._bs.structure.composition.formula,
sym.get_space_group_symbol(),
)
)
elif self._symprec is None:
f.write("{} {}\n".format(self._bs.structure.composition.formula, "symmetries disabled"))
f.write(
"{}\n".format(
"\n".join(
[
" ".join(["%.5f" % Length(i, "ang").to("bohr") for i in row])
for row in self._bs.structure.lattice.matrix
]
)
)
)
if self._symprec is not None:
ops = sym.get_symmetry_dataset()["rotations"]
elif self._symprec is None:
ops = [[[1, 0, 0], [0, 1, 0], [0, 0, 1]]]
f.write(f"{len(ops)}\n")
for c in ops:
for row in c:
f.write("{}\n".format(" ".join(str(i) for i in row)))
def write_def(self, output_file):
"""
Writes the def to an output file.
:param output_file: Filename
"""
# This function is useless in std version of BoltzTraP code
# because x_trans script overwrite BoltzTraP.def
with open(output_file, "w") as f:
so = ""
if self._bs.is_spin_polarized or self.soc:
so = "so"
f.write(
"5, 'boltztrap.intrans', 'old', 'formatted',0\n"
+ "6,'boltztrap.outputtrans', 'unknown', "
"'formatted',0\n"
+ "20,'boltztrap.struct', 'old', 'formatted',0\n"
+ "10,'boltztrap.energy"
+ so
+ "', 'old', "
"'formatted',0\n" + "48,'boltztrap.engre', 'unknown', "
"'unformatted',0\n" + "49,'boltztrap.transdos', 'unknown', "
"'formatted',0\n" + "50,'boltztrap.sigxx', 'unknown', 'formatted',"
"0\n" + "51,'boltztrap.sigxxx', 'unknown', 'formatted',"
"0\n" + "21,'boltztrap.trace', 'unknown', "
"'formatted',0\n" + "22,'boltztrap.condtens', 'unknown', "
"'formatted',0\n" + "24,'boltztrap.halltens', 'unknown', "
"'formatted',0\n" + "30,'boltztrap_BZ.cube', 'unknown', "
"'formatted',0\n"
)
def write_proj(self, output_file_proj, output_file_def):
"""
Writes the projections to an output file.
:param output_file: Filename
"""
# This function is useless in std version of BoltzTraP code
# because x_trans script overwrite BoltzTraP.def
for oi, o in enumerate(Orbital):
for site_nb in range(0, len(self._bs.structure.sites)):
if oi < len(self._bs.projections[Spin.up][0][0]):
with open(output_file_proj + "_" + str(site_nb) + "_" + str(o), "w") as f:
f.write(self._bs.structure.composition.formula + "\n")
f.write(str(len(self._bs.kpoints)) + "\n")
for i, kpt in enumerate(self._bs.kpoints):
tmp_proj = []
for j in range(int(math.floor(self._bs.nb_bands * (1 - self.cb_cut)))):
tmp_proj.append(self._bs.projections[Spin(self.spin)][j][i][oi][site_nb])
# TODO deal with the sorting going on at
# the energy level!!!
# tmp_proj.sort()
if self.run_type == "DOS" and self._bs.is_spin_polarized:
tmp_proj.insert(0, self._ll)
tmp_proj.append(self._hl)
f.write(
"%12.8f %12.8f %12.8f %d\n"
% (
kpt.frac_coords[0],
kpt.frac_coords[1],
kpt.frac_coords[2],
len(tmp_proj),
)
)
for t in tmp_proj:
f.write("%18.8f\n" % float(t))
with open(output_file_def, "w") as f:
so = ""
if self._bs.is_spin_polarized:
so = "so"
f.write(
"5, 'boltztrap.intrans', 'old', 'formatted',0\n"
+ "6,'boltztrap.outputtrans', 'unknown', "
"'formatted',0\n"
+ "20,'boltztrap.struct', 'old', 'formatted',0\n"
+ "10,'boltztrap.energy"
+ so
+ "', 'old', "
"'formatted',0\n" + "48,'boltztrap.engre', 'unknown', "
"'unformatted',0\n" + "49,'boltztrap.transdos', 'unknown', "
"'formatted',0\n" + "50,'boltztrap.sigxx', 'unknown', 'formatted',"
"0\n" + "51,'boltztrap.sigxxx', 'unknown', 'formatted',"
"0\n" + "21,'boltztrap.trace', 'unknown', "
"'formatted',0\n" + "22,'boltztrap.condtens', 'unknown', "
"'formatted',0\n" + "24,'boltztrap.halltens', 'unknown', "
"'formatted',0\n" + "30,'boltztrap_BZ.cube', 'unknown', "
"'formatted',0\n"
)
i = 1000
for oi, o in enumerate(Orbital):
for site_nb in range(0, len(self._bs.structure.sites)):
if oi < len(self._bs.projections[Spin.up][0][0]):
f.write(
str(i)
+ ",'"
+ "boltztrap.proj_"
+ str(site_nb)
+ "_"
+ str(o.name)
+ "' 'old', 'formatted',0\n"
)
i += 1
def write_intrans(self, output_file):
"""
Writes the intrans to an output file.
:param output_file: Filename
"""
setgap = 1 if self.scissor > 0.0001 else 0
if self.run_type in ("BOLTZ", "DOS"):
with open(output_file, "w") as fout:
fout.write("GENE # use generic interface\n")
fout.write(
"1 0 %d %f # iskip (not presently used) idebug "
"setgap shiftgap \n" % (setgap, Energy(self.scissor, "eV").to("Ry"))
)
fout.write(
"0.0 %f %f %6.1f # Fermilevel (Ry),energygrid,energy "
"span around Fermilevel, number of electrons\n"
% (
Energy(self.energy_grid, "eV").to("Ry"),
Energy(self.energy_span_around_fermi, "eV").to("Ry"),
self._nelec,
)
)
fout.write("CALC # CALC (calculate expansion coeff), NOCALC read from file\n")
fout.write("%d # lpfac, number of latt-points per k-point\n" % self.lpfac)
fout.write("%s # run mode (only BOLTZ is supported)\n" % self.run_type)
fout.write(".15 # (efcut) energy range of chemical potential\n")
fout.write(f"{self.tmax} {self.tgrid} # Tmax, temperature grid\n")
fout.write("-1. # energyrange of bands given DOS output sig_xxx and dos_xxx (xxx is band number)\n")
fout.write(self.dos_type + "\n") # e.g., HISTO or TETRA
fout.write(f"{self.tauref} {self.tauexp} {self.tauen} 0 0 0\n")
fout.write(f"{2 * len(self.doping)}\n")
for d in self.doping:
fout.write(str(d) + "\n")
for d in self.doping:
fout.write(str(-d) + "\n")
elif self.run_type == "FERMI":
with open(output_file, "w") as fout:
fout.write("GENE # use generic interface\n")
fout.write("1 0 0 0.0 # iskip (not presently used) idebug setgap shiftgap \n")
fout.write(
"0.0 %f 0.1 %6.1f # Fermilevel (Ry),energygrid,"
"energy span around Fermilevel, "
"number of electrons\n" % (Energy(self.energy_grid, "eV").to("Ry"), self._nelec)
)
fout.write("CALC # CALC (calculate expansion coeff), NOCALC read from file\n")
fout.write("%d # lpfac, number of latt-points per k-point\n" % self.lpfac)
fout.write("FERMI # run mode (only BOLTZ is supported)\n")
fout.write(
str(1)
+ " # actual band selected: "
+ str(self.band_nb + 1)
+ " spin: "
+ str(self.spin)
)
elif self.run_type == "BANDS":
if self.kpt_line is None:
kpath = HighSymmKpath(self._bs.structure)
self.kpt_line = [
Kpoint(k, self._bs.structure.lattice) for k in kpath.get_kpoints(coords_are_cartesian=False)[0]
]
self.kpt_line = [kp.frac_coords for kp in self.kpt_line]
elif isinstance(self.kpt_line[0], Kpoint):
self.kpt_line = [kp.frac_coords for kp in self.kpt_line]
with open(output_file, "w") as fout:
fout.write("GENE # use generic interface\n")
fout.write(
"1 0 %d %f # iskip (not presently used) idebug "
"setgap shiftgap \n" % (setgap, Energy(self.scissor, "eV").to("Ry"))
)
fout.write(
"0.0 %f %f %6.1f # Fermilevel (Ry),energygrid,energy "
"span around Fermilevel, "
"number of electrons\n"
% (
Energy(self.energy_grid, "eV").to("Ry"),
Energy(self.energy_span_around_fermi, "eV").to("Ry"),
self._nelec,
)
)
fout.write("CALC # CALC (calculate expansion coeff), NOCALC read from file\n")
fout.write("%d # lpfac, number of latt-points per k-point\n" % self.lpfac)
fout.write("BANDS # run mode (only BOLTZ is supported)\n")
fout.write("P " + str(len(self.kpt_line)) + "\n")
for kp in self.kpt_line:
fout.writelines([str(k) + " " for k in kp])
fout.write("\n")
def write_input(self, output_dir):
"""
Writes the input files.
:param output_dir: Directory to write the input files.
"""
if self._bs.is_spin_polarized or self.soc:
self.write_energy(os.path.join(output_dir, "boltztrap.energyso"))
else:
self.write_energy(os.path.join(output_dir, "boltztrap.energy"))
self.write_struct(os.path.join(output_dir, "boltztrap.struct"))
self.write_intrans(os.path.join(output_dir, "boltztrap.intrans"))
self.write_def(os.path.join(output_dir, "BoltzTraP.def"))
if len(self.bs.projections) != 0 and self.run_type == "DOS":
self.write_proj(
os.path.join(output_dir, "boltztrap.proj"),
os.path.join(output_dir, "BoltzTraP.def"),
)
def run(
self,
path_dir=None,
convergence=True,
write_input=True,
clear_dir=False,
max_lpfac=150,
min_egrid=0.00005,
):
"""
Write inputs (optional), run BoltzTraP, and ensure
convergence (optional)
Args:
path_dir (str): directory in which to run BoltzTraP
convergence (bool): whether to check convergence and make
corrections if needed
write_input: (bool) whether to write input files before the run
(required for convergence mode)
clear_dir: (bool) whether to remove all files in the path_dir
before starting
max_lpfac: (float) maximum lpfac value to try before reducing egrid
in convergence mode
min_egrid: (float) minimum egrid value to try before giving up in
convergence mode
Returns:
"""
# TODO: consider making this a part of custodian rather than pymatgen
# A lot of this functionality (scratch dirs, handlers, monitors)
# is built into custodian framework
if convergence and not write_input:
raise ValueError("Convergence mode requires write_input to be true")
if self.run_type in ("BANDS", "DOS", "FERMI"):
convergence = False
if self.lpfac > max_lpfac:
max_lpfac = self.lpfac
if self.run_type == "BANDS" and self.bs.is_spin_polarized:
print(
"Reminder: for run_type " + str(self.run_type) + ", spin component are not separated! "
"(you have a spin polarized band structure)"
)
if self.run_type in ("FERMI", "DOS") and self.spin is None:
if self.bs.is_spin_polarized:
raise BoltztrapError("Spin parameter must be specified for spin polarized band structures!")
self.spin = 1
dir_bz_name = "boltztrap"
if path_dir is None:
temp_dir = tempfile.mkdtemp()
path_dir = os.path.join(temp_dir, dir_bz_name)
else:
path_dir = os.path.abspath(os.path.join(path_dir, dir_bz_name))
if not os.path.exists(path_dir):
os.mkdir(path_dir)
elif clear_dir:
for c in os.listdir(path_dir):
os.remove(os.path.join(path_dir, c))
FORMAT = "%(message)s"
logging.basicConfig(
level=logging.INFO,
format=FORMAT,
filename=os.path.join(path_dir, "../boltztrap.out"),
)
with cd(path_dir):
lpfac_start = self.lpfac
converged = False
while self.energy_grid >= min_egrid and not converged:
self.lpfac = lpfac_start
if time.time() - self.start_time > self.timeout:
raise BoltztrapError(f"no doping convergence after timeout of {self.timeout} s")
logging.info(f"lpfac, energy_grid: {self.lpfac} {self.energy_grid}")
while self.lpfac <= max_lpfac and not converged:
if time.time() - self.start_time > self.timeout:
raise BoltztrapError(f"no doping convergence after timeout of {self.timeout} s")
if write_input:
self.write_input(path_dir)
bt_exe = ["x_trans", "BoltzTraP"]
if self._bs.is_spin_polarized or self.soc:
bt_exe.append("-so")
with subprocess.Popen(
bt_exe,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
) as p:
p.wait()
for c in p.communicate():
logging.info(c.decode())
if "error in factorization" in c.decode():
raise BoltztrapError("error in factorization")
warning = ""
with open(os.path.join(path_dir, dir_bz_name + ".outputtrans")) as f:
for l in f:
if "Option unknown" in l:
raise BoltztrapError("DOS mode needs a custom version of BoltzTraP code is needed")
if "WARNING" in l:
warning = l
break
if "Error - Fermi level was not found" in l:
warning = l
break
if not warning and convergence:
# check convergence for warning
analyzer = BoltztrapAnalyzer.from_files(path_dir)
for doping in ["n", "p"]:
for c in analyzer.mu_doping[doping]:
if len(analyzer.mu_doping[doping][c]) != len(analyzer.doping[doping]):
warning = "length of mu_doping array is incorrect"
break
if (
doping == "p"
and sorted(analyzer.mu_doping[doping][c], reverse=True)
!= analyzer.mu_doping[doping][c]
):
warning = "sorting of mu_doping array incorrect for p-type"
break
# ensure n-type doping sorted correctly
if (
doping == "n"
and sorted(analyzer.mu_doping[doping][c]) != analyzer.mu_doping[doping][c]
):
warning = "sorting of mu_doping array incorrect for n-type"
break
if warning:
self.lpfac += 10
logging.warn(f"Warning detected: {warning}! Increase lpfac to {self.lpfac}")
else:
converged = True
if not converged:
self.energy_grid /= 10
logging.info(f"Could not converge with max lpfac; Decrease egrid to {self.energy_grid}")
if not converged:
raise BoltztrapError(
"Doping convergence not reached with lpfac="
+ str(self.lpfac)
+ ", energy_grid="
+ str(self.energy_grid)
)
return path_dir
def as_dict(self):
"""
:return: MSONable dict
"""
results = {
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"lpfac": self.lpfac,
"bs": self.bs.as_dict(),
"nelec": self._nelec,
"dos_type": self.dos_type,
"run_type": self.run_type,
"band_nb": self.band_nb,
"spin": self.spin,
"cond_band": self.cond_band,
"tauref": self.tauref,
"tauexp": self.tauexp,
"tauen": self.tauen,
"soc": self.soc,
"kpt_line": self.kpt_line,
"doping": self.doping,
"energy_span_around_fermi": self.energy_span_around_fermi,
"scissor": self.scissor,
"tmax": self.tmax,
"tgrid": self.tgrid,
"symprec": self._symprec,
}
return jsanitize(results)
class BoltztrapError(Exception):
"""
Exception class for boltztrap.
Raised when the boltztrap gives an error
"""
pass
class BoltztrapAnalyzer:
"""
Class used to store all the data from a boltztrap run
"""
def __init__(
self,
gap=None,
mu_steps=None,
cond=None,
seebeck=None,
kappa=None,
hall=None,
doping=None,
mu_doping=None,
seebeck_doping=None,
cond_doping=None,
kappa_doping=None,
hall_doping=None,
intrans=None,
dos=None,
dos_partial=None,
carrier_conc=None,
vol=None,
warning=None,
bz_bands=None,
bz_kpoints=None,
fermi_surface_data=None,
):
"""
Constructor taking directly all the data generated by Boltztrap. You
won't probably use it directly but instead use the from_files and
from_dict methods.
Args:
gap: The gap after interpolation in eV
mu_steps: The steps of electron chemical potential (or Fermi
level) in eV.
cond: The electronic conductivity tensor divided by a constant
relaxation time (sigma/tau) at different temperature and
fermi levels.
The format is {temperature: [array of 3x3 tensors at each
fermi level in mu_steps]}. The units are 1/(Ohm*m*s).
seebeck: The Seebeck tensor at different temperatures and fermi
levels. The format is {temperature: [array of 3x3 tensors at
each fermi level in mu_steps]}. The units are V/K
kappa: The electronic thermal conductivity tensor divided by a
constant relaxation time (kappa/tau) at different temperature
and fermi levels. The format is {temperature: [array of 3x3
tensors at each fermi level in mu_steps]}
The units are W/(m*K*s)
hall: The hall tensor at different temperature and fermi levels
The format is {temperature: [array of 27 coefficients list at
each fermi level in mu_steps]}
The units are m^3/C
doping: The different doping levels that have been given to
Boltztrap. The format is {'p':[],'n':[]} with an array of
doping levels. The units are cm^-3
mu_doping: Gives the electron chemical potential (or Fermi level)
for a given set of doping.
Format is {'p':{temperature: [fermi levels],'n':{temperature:
[fermi levels]}}
the fermi level array is ordered according to the doping
levels in doping units for doping are in cm^-3 and for Fermi
level in eV
seebeck_doping: The Seebeck tensor at different temperatures and
doping levels. The format is {'p': {temperature: [Seebeck
tensors]}, 'n':{temperature: [Seebeck tensors]}}
The [Seebeck tensors] array is ordered according to the
doping levels in doping units for doping are in cm^-3 and for
Seebeck in V/K
cond_doping: The electronic conductivity tensor divided by a
constant relaxation time (sigma/tau) at different
temperatures and doping levels
The format is {'p':{temperature: [conductivity tensors]},
'n':{temperature: [conductivity tensors]}}
The [conductivity tensors] array is ordered according to the
doping levels in doping units for doping are in cm^-3 and for
conductivity in 1/(Ohm*m*s)
kappa_doping: The thermal conductivity tensor divided by a constant
relaxation time (kappa/tau) at different temperatures and
doping levels.
The format is {'p':{temperature: [thermal conductivity
tensors]},'n':{temperature: [thermal conductivity tensors]}}
The [thermal conductivity tensors] array is ordered according
to the doping levels in doping units for doping are in cm^-3
and for thermal conductivity in W/(m*K*s)
hall_doping: The Hall tensor at different temperatures and doping
levels.
The format is {'p':{temperature: [Hall tensors]},
'n':{temperature: [Hall tensors]}}
The [Hall tensors] array is ordered according to the doping
levels in doping and each Hall tensor is represented by a 27
coefficients list.
The units are m^3/C
intrans: a dictionary of inputs e.g. {"scissor": 0.0}
carrier_conc: The concentration of carriers in electron (or hole)
per unit cell
dos: The dos computed by Boltztrap given as a pymatgen Dos object
dos_partial: Data for the partial DOS projected on sites and
orbitals
vol: Volume of the unit cell in angstrom cube (A^3)
warning: string if BoltzTraP outputted a warning, else None
bz_bands: Data for interpolated bands on a k-point line
(run_type=BANDS)
bz_kpoints: k-point in reciprocal coordinates for interpolated
bands (run_type=BANDS)
fermi_surface_data: energy values in a 3D grid imported from the
output .cube file.
"""
self.gap = gap
self.mu_steps = mu_steps
self._cond = cond
self._seebeck = seebeck
self._kappa = kappa
self._hall = hall
self.warning = warning
self.doping = doping
self.mu_doping = mu_doping
self._seebeck_doping = seebeck_doping
self._cond_doping = cond_doping
self._kappa_doping = kappa_doping
self._hall_doping = hall_doping
self.intrans = intrans
self._carrier_conc = carrier_conc
self.dos = dos
self.vol = vol
self._dos_partial = dos_partial
self._bz_bands = bz_bands
self._bz_kpoints = bz_kpoints
self.fermi_surface_data = fermi_surface_data
def get_symm_bands(self, structure, efermi, kpt_line=None, labels_dict=None):
"""
Function useful to read bands from Boltztrap output and get a
BandStructureSymmLine object comparable with that one from a DFT
calculation (if the same kpt_line is provided). Default kpt_line
and labels_dict is the standard path of high symmetry k-point for
the specified structure. They could be extracted from the
BandStructureSymmLine object that you want to compare with. efermi
variable must be specified to create the BandStructureSymmLine
object (usually it comes from DFT or Boltztrap calc)
"""
try:
if kpt_line is None:
kpath = HighSymmKpath(structure)
kpt_line = [
Kpoint(k, structure.lattice.reciprocal_lattice)
for k in kpath.get_kpoints(coords_are_cartesian=False)[0]
]
labels_dict = {l: k for k, l in zip(*kpath.get_kpoints(coords_are_cartesian=False)) if l}
kpt_line = [kp.frac_coords for kp in kpt_line]
elif isinstance(kpt_line[0], Kpoint):
kpt_line = [kp.frac_coords for kp in kpt_line]
labels_dict = {k: labels_dict[k].frac_coords for k in labels_dict}
idx_list = []
# kpt_dense=np.array([kp for kp in self._bz_kpoints])
for i, kp in enumerate(kpt_line):
w = []
prec = 1e-05
while len(w) == 0:
w = np.where(np.all(np.abs(kp - self._bz_kpoints) < [prec] * 3, axis=1))[0]
prec *= 10
# print( prec )
idx_list.append([i, w[0]])
# if len(w)>0:
# idx_list.append([i,w[0]])
# else:
# w=np.where(np.all(np.abs(kp.frac_coords-self._bz_kpoints)
# <[1e-04,1e-04,1e-04],axis=1))[0]
# idx_list.append([i,w[0]])
idx_list = np.array(idx_list)
# print( idx_list.shape )
bands_dict = {Spin.up: (self._bz_bands * Energy(1, "Ry").to("eV") + efermi).T[:, idx_list[:, 1]].tolist()}
# bz_kpoints = bz_kpoints[idx_list[:,1]].tolist()
sbs = BandStructureSymmLine(
kpt_line,
bands_dict,
structure.lattice.reciprocal_lattice,
efermi,
labels_dict=labels_dict,
)
return sbs
except Exception:
raise BoltztrapError(
"Bands are not in output of BoltzTraP.\nBolztrapRunner must be run with run_type=BANDS"
)
@staticmethod
def check_acc_bzt_bands(sbs_bz, sbs_ref, warn_thr=(0.03, 0.03)):
"""
Compare sbs_bz BandStructureSymmLine calculated with boltztrap with
the sbs_ref BandStructureSymmLine as reference (from MP for
instance), computing correlation and energy difference for eight bands
around the gap (semiconductors) or fermi level (metals).
warn_thr is a threshold to get a warning in the accuracy of Boltztap
interpolated bands.
Return a dictionary with these keys:
- "N": the index of the band compared; inside each there are:
- "Corr": correlation coefficient for the 8 compared bands
- "Dist": energy distance for the 8 compared bands
- "branch_name": energy distance for that branch
- "avg_corr": average of correlation coefficient over the 8 bands
- "avg_dist": average of energy distance over the 8 bands
- "nb_list": list of indexes of the 8 compared bands
- "acc_thr": list of two float corresponing to the two warning
thresholds in input
- "acc_err": list of two bools:
True if the avg_corr > warn_thr[0], and
True if the avg_dist > warn_thr[1]
See also compare_sym_bands function doc
"""
if not sbs_ref.is_metal() and not sbs_bz.is_metal():
vbm_idx = sbs_bz.get_vbm()["band_index"][Spin.up][-1]
cbm_idx = sbs_bz.get_cbm()["band_index"][Spin.up][0]
nb_list = range(vbm_idx - 3, cbm_idx + 4)
else:
bnd_around_efermi = []
delta = 0
spin = list(sbs_bz.bands.keys())[0]
while len(bnd_around_efermi) < 8 and delta < 100:
delta += 0.1
bnd_around_efermi = []
for nb in range(len(sbs_bz.bands[spin])):
for kp in range(len(sbs_bz.bands[spin][nb])):
if abs(sbs_bz.bands[spin][nb][kp] - sbs_bz.efermi) < delta:
bnd_around_efermi.append(nb)
break
if len(bnd_around_efermi) < 8:
print("Warning! check performed on " + str(len(bnd_around_efermi)))
nb_list = bnd_around_efermi
else:
nb_list = bnd_around_efermi[:8]
# print(nb_list)
bcheck = compare_sym_bands(sbs_bz, sbs_ref, nb_list)
# print(bcheck)
acc_err = [False, False]
avg_corr = sum(item[1]["Corr"] for item in bcheck.items()) / 8
avg_distance = sum(item[1]["Dist"] for item in bcheck.items()) / 8
if avg_corr > warn_thr[0]:
acc_err[0] = True
if avg_distance > warn_thr[0]:
acc_err[1] = True
bcheck["avg_corr"] = avg_corr
bcheck["avg_distance"] = avg_distance
bcheck["acc_err"] = acc_err
bcheck["acc_thr"] = warn_thr
bcheck["nb_list"] = nb_list
if True in acc_err:
print("Warning! some bands around gap are not accurate")
return bcheck
def get_seebeck(self, output="eigs", doping_levels=True):
"""
Gives the seebeck coefficient (microV/K) in either a
full 3x3 tensor form, as 3 eigenvalues, or as the average value
(trace/3.0) If doping_levels=True, the results are given at
different p and n doping
levels (given by self.doping), otherwise it is given as a series
of electron chemical potential values
Args:
output (string): the type of output. 'tensor' give the full
3x3 tensor, 'eigs' its 3 eigenvalues and
'average' the average of the three eigenvalues
doping_levels (boolean): True for the results to be given at
different doping levels, False for results
at different electron chemical potentials
Returns:
If doping_levels=True, a dictionary {temp:{'p':[],'n':[]}}.
The 'p' links to Seebeck at p-type doping
and 'n' to the Seebeck at n-type doping. Otherwise, returns a
{temp:[]} dictionary
The result contains either the sorted three eigenvalues of
the symmetric
Seebeck tensor (output='eigs') or a full tensor (3x3 array) (
output='tensor') or as an average
(output='average').
units are microV/K
"""
return BoltztrapAnalyzer._format_to_output(self._seebeck, self._seebeck_doping, output, doping_levels, 1e6)
def get_conductivity(self, output="eigs", doping_levels=True, relaxation_time=1e-14):
"""
Gives the conductivity (1/Ohm*m) in either a full 3x3 tensor
form, as 3 eigenvalues, or as the average value
(trace/3.0) If doping_levels=True, the results are given at
different p and n doping
levels (given by self.doping), otherwise it is given as a series
of electron chemical potential values
Args:
output (string): the type of output. 'tensor' give the full
3x3 tensor, 'eigs' its 3 eigenvalues and
'average' the average of the three eigenvalues
doping_levels (boolean): True for the results to be given at
different doping levels, False for results
at different electron chemical potentials
relaxation_time (float): constant relaxation time in secs
Returns:
If doping_levels=True, a dictionary {temp:{'p':[],'n':[]}}.
The 'p' links to conductivity
at p-type doping and 'n' to the conductivity at n-type
doping. Otherwise,
returns a {temp:[]} dictionary. The result contains either
the sorted three eigenvalues of the symmetric
conductivity tensor (format='eigs') or a full tensor (3x3
array) (output='tensor') or as an average
(output='average').
The result includes a given constant relaxation time
units are 1/Ohm*m
"""
return BoltztrapAnalyzer._format_to_output(
self._cond, self._cond_doping, output, doping_levels, relaxation_time
)
def get_power_factor(self, output="eigs", doping_levels=True, relaxation_time=1e-14):
"""
Gives the power factor (Seebeck^2 * conductivity) in units
microW/(m*K^2) in either a full 3x3 tensor form,
as 3 eigenvalues, or as the average value (trace/3.0) If
doping_levels=True, the results are given at
different p and n doping levels (given by self.doping), otherwise it
is given as a series of
electron chemical potential values
Args:
output (string): the type of output. 'tensor' give the full 3x3
tensor, 'eigs' its 3 eigenvalues and
'average' the average of the three eigenvalues
doping_levels (boolean): True for the results to be given at
different doping levels, False for results
at different electron chemical potentials
relaxation_time (float): constant relaxation time in secs
Returns:
If doping_levels=True, a dictionnary {temp:{'p':[],'n':[]}}. The
'p' links to power factor
at p-type doping and 'n' to the conductivity at n-type doping.
Otherwise,
returns a {temp:[]} dictionary. The result contains either the
sorted three eigenvalues of the symmetric
power factor tensor (format='eigs') or a full tensor (3x3 array) (
output='tensor') or as an average
(output='average').
The result includes a given constant relaxation time
units are microW/(m K^2)
"""
result = None
result_doping = None
if doping_levels:
result_doping = {doping: {t: [] for t in self._seebeck_doping[doping]} for doping in self._seebeck_doping}
for doping in result_doping:
for t in result_doping[doping]:
for i in range(len(self.doping[doping])):
full_tensor = np.dot(
self._cond_doping[doping][t][i],
np.dot(
self._seebeck_doping[doping][t][i],
self._seebeck_doping[doping][t][i],
),
)
result_doping[doping][t].append(full_tensor)
else:
result = {t: [] for t in self._seebeck}
for t in result:
for i in range(len(self.mu_steps)):
full_tensor = np.dot(
self._cond[t][i],
np.dot(self._seebeck[t][i], self._seebeck[t][i]),
)
result[t].append(full_tensor)
return BoltztrapAnalyzer._format_to_output(
result, result_doping, output, doping_levels, multi=1e6 * relaxation_time
)
def get_thermal_conductivity(self, output="eigs", doping_levels=True, k_el=True, relaxation_time=1e-14):
"""
Gives the electronic part of the thermal conductivity in either a
full 3x3 tensor form,
as 3 eigenvalues, or as the average value (trace/3.0) If
doping_levels=True, the results are given at
different p and n doping levels (given by self.doping), otherwise it
is given as a series of
electron chemical potential values
Args:
output (string): the type of output. 'tensor' give the full 3x3
tensor, 'eigs' its 3 eigenvalues and
'average' the average of the three eigenvalues
doping_levels (boolean): True for the results to be given at
different doping levels, False for results
at different electron chemical potentials
k_el (boolean): True for k_0-PF*T, False for k_0
relaxation_time (float): constant relaxation time in secs
Returns:
If doping_levels=True, a dictionary {temp:{'p':[],'n':[]}}. The
'p' links to thermal conductivity
at p-type doping and 'n' to the thermal conductivity at n-type
doping. Otherwise,
returns a {temp:[]} dictionary. The result contains either the
sorted three eigenvalues of the symmetric
conductivity tensor (format='eigs') or a full tensor (3x3 array) (
output='tensor') or as an average
(output='average').
The result includes a given constant relaxation time
units are W/mK
"""
result = None
result_doping = None
if doping_levels:
result_doping = {doping: {t: [] for t in self._seebeck_doping[doping]} for doping in self._seebeck_doping}
for doping in result_doping:
for t in result_doping[doping]:
for i in range(len(self.doping[doping])):
if k_el:
pf_tensor = np.dot(
self._cond_doping[doping][t][i],
np.dot(
self._seebeck_doping[doping][t][i],
self._seebeck_doping[doping][t][i],
),
)
result_doping[doping][t].append(self._kappa_doping[doping][t][i] - pf_tensor * t)
else:
result_doping[doping][t].append(self._kappa_doping[doping][t][i])
else:
result = {t: [] for t in self._seebeck}
for t in result:
for i in range(len(self.mu_steps)):
if k_el:
pf_tensor = np.dot(
self._cond[t][i],
np.dot(self._seebeck[t][i], self._seebeck[t][i]),
)
result[t].append(self._kappa[t][i] - pf_tensor * t)
else:
result[t].append(self._kappa[t][i])
return BoltztrapAnalyzer._format_to_output(result, result_doping, output, doping_levels, multi=relaxation_time)
def get_zt(self, output="eigs", doping_levels=True, relaxation_time=1e-14, kl=1.0):
"""
Gives the ZT coefficient (S^2*cond*T/thermal cond) in either a full
3x3 tensor form,
as 3 eigenvalues, or as the average value (trace/3.0) If
doping_levels=True, the results are given at
different p and n doping levels (given by self.doping), otherwise it
is given as a series of
electron chemical potential values. We assume a constant relaxation
time and a constant
lattice thermal conductivity
Args:
output (string): the type of output. 'tensor' give the full 3x3
tensor, 'eigs' its 3 eigenvalues and
'average' the average of the three eigenvalues
doping_levels (boolean): True for the results to be given at
different doping levels, False for results
at different electron chemical potentials
relaxation_time (float): constant relaxation time in secs
k_l (float): lattice thermal cond in W/(m*K)
Returns:
If doping_levels=True, a dictionary {temp:{'p':[],'n':[]}}. The
'p' links to ZT
at p-type doping and 'n' to the ZT at n-type doping. Otherwise,
returns a {temp:[]} dictionary. The result contains either the
sorted three eigenvalues of the symmetric
ZT tensor (format='eigs') or a full tensor (3x3 array) (
output='tensor') or as an average
(output='average').
The result includes a given constant relaxation time and lattice
thermal conductivity
"""
result = None
result_doping = None
if doping_levels:
result_doping = {doping: {t: [] for t in self._seebeck_doping[doping]} for doping in self._seebeck_doping}
for doping in result_doping:
for t in result_doping[doping]:
for i in range(len(self.doping[doping])):
pf_tensor = np.dot(
self._cond_doping[doping][t][i],
np.dot(
self._seebeck_doping[doping][t][i],
self._seebeck_doping[doping][t][i],
),
)
thermal_conduct = (self._kappa_doping[doping][t][i] - pf_tensor * t) * relaxation_time
result_doping[doping][t].append(
np.dot(
pf_tensor * relaxation_time * t,
np.linalg.inv(thermal_conduct + kl * np.eye(3, 3)),
)
)
else:
result = {t: [] for t in self._seebeck}
for t in result:
for i in range(len(self.mu_steps)):
pf_tensor = np.dot(
self._cond[t][i],
np.dot(self._seebeck[t][i], self._seebeck[t][i]),
)
thermal_conduct = (self._kappa[t][i] - pf_tensor * t) * relaxation_time
result[t].append(
np.dot(
pf_tensor * relaxation_time * t,
np.linalg.inv(thermal_conduct + kl * np.eye(3, 3)),
)
)
return BoltztrapAnalyzer._format_to_output(result, result_doping, output, doping_levels)
def get_average_eff_mass(self, output="eigs", doping_levels=True):
"""
Gives the average effective mass tensor. We call it average because
it takes into account all the bands
and regions in the Brillouin zone. This is different than the standard
textbook effective mass which relates
often to only one (parabolic) band.
The average effective mass tensor is defined as the integrated
average of the second derivative of E(k)
This effective mass tensor takes into account:
-non-parabolicity
-multiple extrema
-multiple bands
For more information about it. See:
Hautier, G., Miglio, A., Waroquiers, D., Rignanese, G., & Gonze,
X. (2014).
How Does Chemistry Influence Electron Effective Mass in Oxides?
A High-Throughput Computational Analysis. Chemistry of Materials,
26(19), 5447–5458. doi:10.1021/cm404079a
or
Hautier, G., Miglio, A., Ceder, G., Rignanese, G.-M., & Gonze,
X. (2013).
Identification and design principles of low hole effective mass
p-type transparent conducting oxides.
Nature Communications, 4, 2292. doi:10.1038/ncomms3292
Depending on the value of output, we have either the full 3x3
effective mass tensor,
its 3 eigenvalues or an average
Args:
output (string): 'eigs' for eigenvalues, 'tensor' for the full
tensor and 'average' for an average (trace/3)
doping_levels (boolean): True for the results to be given at
different doping levels, False for results
at different electron chemical potentials
Returns:
If doping_levels=True,a dictionary {'p':{temp:[]},'n':{temp:[]}}
with an array of effective mass tensor, eigenvalues of average
value (depending on output) for each temperature and for each
doping level.
The 'p' links to hole effective mass tensor and 'n' to electron
effective mass tensor.
"""
result = None
result_doping = None
conc = self.get_carrier_concentration()
if doping_levels:
result_doping = {doping: {t: [] for t in self._cond_doping[doping]} for doping in self.doping}
for doping in result_doping:
for temp in result_doping[doping]:
for i in range(len(self.doping[doping])):
try:
result_doping[doping][temp].append(
np.linalg.inv(np.array(self._cond_doping[doping][temp][i]))
* self.doping[doping][i]
* 10 ** 6
* constants.e ** 2
/ constants.m_e
)
except np.linalg.LinAlgError:
pass
else:
result = {t: [] for t in self._seebeck}
for temp in result:
for i in range(len(self.mu_steps)):
try:
cond_inv = np.linalg.inv(np.array(self._cond[temp][i]))
except np.linalg.LinAlgError:
pass
result[temp].append(cond_inv * conc[temp][i] * 10 ** 6 * constants.e ** 2 / constants.m_e)
return BoltztrapAnalyzer._format_to_output(result, result_doping, output, doping_levels)
def get_seebeck_eff_mass(self, output="average", temp=300, doping_levels=False, Lambda=0.5):
"""
Seebeck effective mass calculated as explained in Ref.
Gibbs, Z. M. et al., Effective mass and fermi surface complexity factor
from ab initio band structure calculations.
npj Computational Materials 3, 8 (2017).
Args:
output: 'average' returns the seebeck effective mass calculated using
the average of the three diagonal components of the seebeck tensor.
'tensor' returns the seebeck effective mass respect to the three
diagonal components of the seebeck tensor.
doping_levels: False means that the seebeck effective mass is calculated
for every value of the chemical potential
True means that the seebeck effective mass is calculated
for every value of the doping levels for both n and p types
temp: temperature of calculated seebeck.
Lambda: fitting parameter used to model the scattering (0.5 means constant
relaxation time).
Returns:
a list of values for the seebeck effective mass w.r.t the chemical potential,
if doping_levels is set at False;
a dict with n an p keys that contain a list of values for the seebeck effective
mass w.r.t the doping levels, if doping_levels is set at True;
if 'tensor' is selected, each element of the lists is a list containing
the three components of the seebeck effective mass.
"""
if doping_levels:
sbk_mass = {}
for dt in ("n", "p"):
conc = self.doping[dt]
seebeck = self.get_seebeck(output=output, doping_levels=True)[dt][temp]
sbk_mass[dt] = []
for i, c in enumerate(conc):
if output == "average":
sbk_mass[dt].append(seebeck_eff_mass_from_seebeck_carr(abs(seebeck[i]), c, temp, Lambda))
elif output == "tensor":
sbk_mass[dt].append([])
for j in range(3):
sbk_mass[dt][-1].append(
seebeck_eff_mass_from_seebeck_carr(abs(seebeck[i][j][j]), c, temp, Lambda)
)
else:
seebeck = self.get_seebeck(output=output, doping_levels=False)[temp]
conc = self.get_carrier_concentration()[temp]
sbk_mass = []
for i, c in enumerate(conc):
if output == "average":
sbk_mass.append(seebeck_eff_mass_from_seebeck_carr(abs(seebeck[i]), c, temp, Lambda))
elif output == "tensor":
sbk_mass.append([])
for j in range(3):
sbk_mass[-1].append(seebeck_eff_mass_from_seebeck_carr(abs(seebeck[i][j][j]), c, temp, Lambda))
return sbk_mass
def get_complexity_factor(self, output="average", temp=300, doping_levels=False, Lambda=0.5):
"""
Fermi surface complexity factor respect to calculated as explained in Ref.
Gibbs, Z. M. et al., Effective mass and fermi surface complexity factor
from ab initio band structure calculations.
npj Computational Materials 3, 8 (2017).
Args:
output: 'average' returns the complexity factor calculated using the average
of the three diagonal components of the seebeck and conductivity tensors.
'tensor' returns the complexity factor respect to the three
diagonal components of seebeck and conductivity tensors.
doping_levels: False means that the complexity factor is calculated
for every value of the chemical potential
True means that the complexity factor is calculated
for every value of the doping levels for both n and p types
temp: temperature of calculated seebeck and conductivity.
Lambda: fitting parameter used to model the scattering (0.5 means constant
relaxation time).
Returns:
a list of values for the complexity factor w.r.t the chemical potential,
if doping_levels is set at False;
a dict with n an p keys that contain a list of values for the complexity factor
w.r.t the doping levels, if doping_levels is set at True;
if 'tensor' is selected, each element of the lists is a list containing
the three components of the complexity factor.
"""
if doping_levels:
cmplx_fact = {}
for dt in ("n", "p"):
sbk_mass = self.get_seebeck_eff_mass(output, temp, True, Lambda)[dt]
cond_mass = self.get_average_eff_mass(output=output, doping_levels=True)[dt][temp]
if output == "average":
cmplx_fact[dt] = [(m_s / abs(m_c)) ** 1.5 for m_s, m_c in zip(sbk_mass, cond_mass)]
elif output == "tensor":
cmplx_fact[dt] = []
for i, sm in enumerate(sbk_mass):
cmplx_fact[dt].append([])
for j in range(3):
cmplx_fact[dt][-1].append((sm[j] / abs(cond_mass[i][j][j])) ** 1.5)
else:
sbk_mass = self.get_seebeck_eff_mass(output, temp, False, Lambda)
cond_mass = self.get_average_eff_mass(output=output, doping_levels=False)[temp]
if output == "average":
cmplx_fact = [(m_s / abs(m_c)) ** 1.5 for m_s, m_c in zip(sbk_mass, cond_mass)]
elif output == "tensor":
cmplx_fact = []
for i, sm in enumerate(sbk_mass):
cmplx_fact.append([])
for j in range(3):
cmplx_fact[-1].append((sm[j] / abs(cond_mass[i][j][j])) ** 1.5)
return cmplx_fact
def get_extreme(
self,
target_prop,
maximize=True,
min_temp=None,
max_temp=None,
min_doping=None,
max_doping=None,
isotropy_tolerance=0.05,
use_average=True,
):
"""
This method takes in eigenvalues over a range of carriers,
temperatures, and doping levels, and tells you what is the "best"
value that can be achieved for the given target_property. Note that
this method searches the doping dict only, not the full mu dict.
Args:
target_prop: target property, i.e. "seebeck", "power factor",
"conductivity", "kappa", or "zt"
maximize: True to maximize, False to minimize (e.g. kappa)
min_temp: minimum temperature allowed
max_temp: maximum temperature allowed
min_doping: minimum doping allowed (e.g., 1E18)
max_doping: maximum doping allowed (e.g., 1E20)
isotropy_tolerance: tolerance for isotropic (0.05 = 5%)
use_average: True for avg of eigenval, False for max eigenval
Returns:
A dictionary with keys {"p", "n", "best"} with sub-keys:
{"value", "temperature", "doping", "isotropic"}
"""
def is_isotropic(x, isotropy_tolerance):
"""
Internal method to tell you if 3-vector "x" is isotropic
Args:
x: the vector to determine isotropy for
isotropy_tolerance: tolerance, e.g. 0.05 is 5%
"""
if len(x) != 3:
raise ValueError("Invalid input to is_isotropic!")
st = sorted(x)
return bool(
all([st[0], st[1], st[2]])
and (abs((st[1] - st[0]) / st[1]) <= isotropy_tolerance)
and (abs(st[2] - st[0]) / st[2] <= isotropy_tolerance)
and (abs((st[2] - st[1]) / st[2]) <= isotropy_tolerance)
)
if target_prop.lower() == "seebeck":
d = self.get_seebeck(output="eigs", doping_levels=True)
elif target_prop.lower() == "power factor":
d = self.get_power_factor(output="eigs", doping_levels=True)
elif target_prop.lower() == "conductivity":
d = self.get_conductivity(output="eigs", doping_levels=True)
elif target_prop.lower() == "kappa":
d = self.get_thermal_conductivity(output="eigs", doping_levels=True)
elif target_prop.lower() == "zt":
d = self.get_zt(output="eigs", doping_levels=True)
else:
raise ValueError(f"Target property: {target_prop} not recognized!")
absval = True # take the absolute value of properties
x_val = None
x_temp = None
x_doping = None
x_isotropic = None
output = {}
min_temp = min_temp or 0
max_temp = max_temp or float("inf")
min_doping = min_doping or 0
max_doping = max_doping or float("inf")
for pn in ("p", "n"):
for t in d[pn]: # temperatures
if min_temp <= float(t) <= max_temp:
for didx, evs in enumerate(d[pn][t]):
doping_lvl = self.doping[pn][didx]
if min_doping <= doping_lvl <= max_doping:
isotropic = is_isotropic(evs, isotropy_tolerance)
if absval:
evs = [abs(x) for x in evs]
if use_average:
val = float(sum(evs)) / len(evs)
else:
val = max(evs)
if x_val is None or (val > x_val and maximize) or (val < x_val and not maximize):
x_val = val
x_temp = t
x_doping = doping_lvl
x_isotropic = isotropic
output[pn] = {
"value": x_val,
"temperature": x_temp,
"doping": x_doping,
"isotropic": x_isotropic,
}
x_val = None
if maximize:
max_type = "p" if output["p"]["value"] >= output["n"]["value"] else "n"
else:
max_type = "p" if output["p"]["value"] <= output["n"]["value"] else "n"
output["best"] = output[max_type]
output["best"]["carrier_type"] = max_type
return output
@staticmethod
def _format_to_output(tensor, tensor_doping, output, doping_levels, multi=1.0):
if doping_levels:
full_tensor = tensor_doping
result = {doping: {t: [] for t in tensor_doping[doping]} for doping in tensor_doping}
for doping in full_tensor:
for temp in full_tensor[doping]:
for i in range(len(full_tensor[doping][temp])):
if output in ["eig", "eigs"]:
result[doping][temp].append(sorted(np.linalg.eigh(full_tensor[doping][temp][i])[0] * multi))
elif output == "tensor":
result[doping][temp].append(np.array(full_tensor[doping][temp][i]) * multi)
elif output == "average":
result[doping][temp].append(
(
full_tensor[doping][temp][i][0][0]
+ full_tensor[doping][temp][i][1][1]
+ full_tensor[doping][temp][i][2][2]
)
* multi
/ 3.0
)
else:
raise ValueError(f"Unknown output format: {output}")
else:
full_tensor = tensor
result = {t: [] for t in tensor}
for temp in full_tensor:
for i in range(len(tensor[temp])):
if output in ["eig", "eigs"]:
result[temp].append(sorted(np.linalg.eigh(full_tensor[temp][i])[0] * multi))
elif output == "tensor":
result[temp].append(np.array(full_tensor[temp][i]) * multi)
elif output == "average":
result[temp].append(
(full_tensor[temp][i][0][0] + full_tensor[temp][i][1][1] + full_tensor[temp][i][2][2])
* multi
/ 3.0
)
else:
raise ValueError(f"Unknown output format: {output}")
return result
def get_complete_dos(self, structure, analyzer_for_second_spin=None):
"""
Gives a CompleteDos object with the DOS from the interpolated
projected band structure
Args:
the structure (necessary to identify sites for projection)
analyzer_for_second_spin must be specified to have a
CompleteDos with both Spin components
Returns:
a CompleteDos object
Example of use in case of spin polarized case:
BoltztrapRunner(bs=bs,nelec=10,run_type="DOS",spin=1).run(path_dir='dos_up/')
an_up=BoltztrapAnalyzer.from_files("dos_up/boltztrap/",dos_spin=1)
BoltztrapRunner(bs=bs,nelec=10,run_type="DOS",spin=-1).run(path_dir='dos_dw/')
an_dw=BoltztrapAnalyzer.from_files("dos_dw/boltztrap/",dos_spin=-1)
cdos=an_up.get_complete_dos(bs.structure,an_dw)
"""
pdoss = {}
spin_1 = list(self.dos.densities.keys())[0]
if analyzer_for_second_spin:
if not np.all(self.dos.energies == analyzer_for_second_spin.dos.energies):
raise BoltztrapError("Dos merging error: energies of the two dos are different")
spin_2 = list(analyzer_for_second_spin.dos.densities.keys())[0]
if spin_1 == spin_2:
raise BoltztrapError("Dos merging error: spin component are the same")
for s in self._dos_partial:
if structure.sites[int(s)] not in pdoss:
pdoss[structure.sites[int(s)]] = {}
for o in self._dos_partial[s]:
if Orbital[o] not in pdoss[structure.sites[int(s)]]:
pdoss[structure.sites[int(s)]][Orbital[o]] = {}
pdoss[structure.sites[int(s)]][Orbital[o]][spin_1] = self._dos_partial[s][o]
if analyzer_for_second_spin:
pdoss[structure.sites[int(s)]][Orbital[o]][spin_2] = analyzer_for_second_spin._dos_partial[s][o]
if analyzer_for_second_spin:
tdos = Dos(
self.dos.efermi,
self.dos.energies,
{
spin_1: self.dos.densities[spin_1],
spin_2: analyzer_for_second_spin.dos.densities[spin_2],
},
)
else:
tdos = self.dos
return CompleteDos(structure, total_dos=tdos, pdoss=pdoss)
def get_mu_bounds(self, temp=300):
"""
:param temp: Temperature.
:return: The chemical potential bounds at that temperature.
"""
return min(self.mu_doping["p"][temp]), max(self.mu_doping["n"][temp])
def get_carrier_concentration(self):
"""
gives the carrier concentration (in cm^-3)
Returns
a dictionary {temp:[]} with an array of carrier concentration
(in cm^-3) at each temperature
The array relates to each step of electron chemical potential
"""
return {temp: [1e24 * i / self.vol for i in self._carrier_conc[temp]] for temp in self._carrier_conc}
def get_hall_carrier_concentration(self):
"""
gives the Hall carrier concentration (in cm^-3). This is the trace of
the Hall tensor (see Boltztrap source code) Hall carrier concentration
are not always exactly the same than carrier concentration.
Returns
a dictionary {temp:[]} with an array of Hall carrier concentration
(in cm^-3) at each temperature The array relates to each step of
electron chemical potential
"""
result = {temp: [] for temp in self._hall}
for temp in self._hall:
for i in self._hall[temp]:
trace = (i[1][2][0] + i[2][0][1] + i[0][1][2]) / 3.0
if trace != 0.0:
result[temp].append(1e-6 / (trace * constants.e))
else:
result[temp].append(0.0)
return result
@staticmethod
def parse_outputtrans(path_dir):
"""
Parses .outputtrans file
Args:
path_dir: dir containing boltztrap.outputtrans
Returns:
tuple - (run_type, warning, efermi, gap, doping_levels)
"""
run_type = None
warning = None
efermi = None
gap = None
doping_levels = []
with open(os.path.join(path_dir, "boltztrap.outputtrans")) as f:
for line in f:
if "WARNING" in line:
warning = line
elif "Calc type:" in line:
run_type = line.split()[-1]
elif line.startswith("VBM"):
efermi = Energy(line.split()[1], "Ry").to("eV")
elif line.startswith("Egap:"):
gap = Energy(float(line.split()[1]), "Ry").to("eV")
elif line.startswith("Doping level number"):
doping_levels.append(float(line.split()[6]))
return run_type, warning, efermi, gap, doping_levels
@staticmethod
def parse_transdos(path_dir, efermi, dos_spin=1, trim_dos=False):
"""
Parses .transdos (total DOS) and .transdos_x_y (partial DOS) files
Args:
path_dir: (str) dir containing DOS files
efermi: (float) Fermi energy
dos_spin: (int) -1 for spin down, +1 for spin up
trim_dos: (bool) whether to post-process / trim DOS
Returns:
tuple - (DOS, dict of partial DOS)
"""
data_dos = {"total": [], "partial": {}}
# parse the total DOS data
# format is energy, DOS, integrated DOS
with open(os.path.join(path_dir, "boltztrap.transdos")) as f:
count_series = 0 # TODO: why is count_series needed?
for line in f:
if line.lstrip().startswith("#"):
count_series += 1
if count_series > 1:
break
else:
data_dos["total"].append(
[
Energy(float(line.split()[0]), "Ry").to("eV"),
float(line.split()[1]),
]
)
lw_l = 0
hg_l = -len(data_dos["total"])
if trim_dos:
# Francesco knows what this does
# It has something to do with a trick of adding fake energies
# at the endpoints of the DOS, and then re-trimming it. This is
# to get the same energy scale for up and down spin DOS.
tmp_data = np.array(data_dos["total"])
tmp_den = np.trim_zeros(tmp_data[:, 1], "f")[1:]
lw_l = len(tmp_data[:, 1]) - len(tmp_den)
tmp_ene = tmp_data[lw_l:, 0]
tmp_den = np.trim_zeros(tmp_den, "b")[:-1]
hg_l = len(tmp_ene) - len(tmp_den)
tmp_ene = tmp_ene[:-hg_l]
tmp_data = np.vstack((tmp_ene, tmp_den)).T
data_dos["total"] = tmp_data.tolist()
# parse partial DOS data
for file_name in os.listdir(path_dir):
if file_name.endswith("transdos") and file_name != "boltztrap.transdos":
tokens = file_name.split(".")[1].split("_")
site = tokens[1]
orb = "_".join(tokens[2:])
with open(os.path.join(path_dir, file_name)) as f:
for line in f:
if not line.lstrip().startswith(" #"):
if site not in data_dos["partial"]:
data_dos["partial"][site] = {}
if orb not in data_dos["partial"][site]:
data_dos["partial"][site][orb] = []
data_dos["partial"][site][orb].append(float(line.split()[1]))
data_dos["partial"][site][orb] = data_dos["partial"][site][orb][lw_l:-hg_l]
dos_full = {"energy": [], "density": []}
for t in data_dos["total"]:
dos_full["energy"].append(t[0])
dos_full["density"].append(t[1])
dos = Dos(efermi, dos_full["energy"], {Spin(dos_spin): dos_full["density"]})
dos_partial = data_dos["partial"] # TODO: make this real DOS object?
return dos, dos_partial
@staticmethod
def parse_intrans(path_dir):
"""
Parses boltztrap.intrans mainly to extract the value of scissor applied
to the bands or some other inputs
Args:
path_dir: (str) dir containing the boltztrap.intrans file
Returns:
intrans (dict): a dictionary containing various inputs that had
been used in the Boltztrap run.
"""
intrans = {}
with open(os.path.join(path_dir, "boltztrap.intrans")) as f:
for line in f:
if "iskip" in line:
intrans["scissor"] = Energy(float(line.split(" ")[3]), "Ry").to("eV")
if "HISTO" in line or "TETRA" in line:
intrans["dos_type"] = line[:-1]
return intrans
@staticmethod
def parse_struct(path_dir):
"""
Parses boltztrap.struct file (only the volume)
Args:
path_dir: (str) dir containing the boltztrap.struct file
Returns:
(float) volume
"""
with open(os.path.join(path_dir, "boltztrap.struct")) as f:
tokens = f.readlines()
return Lattice(
[[Length(float(tokens[i].split()[j]), "bohr").to("ang") for j in range(3)] for i in range(1, 4)]
).volume
@staticmethod
def parse_cond_and_hall(path_dir, doping_levels=None):
"""
Parses the conductivity and Hall tensors
Args:
path_dir: Path containing .condtens / .halltens files
doping_levels: ([float]) - doping lvls, parse outtrans to get this
Returns:
mu_steps, cond, seebeck, kappa, hall, pn_doping_levels,
mu_doping, seebeck_doping, cond_doping, kappa_doping,
hall_doping, carrier_conc
"""
# Step 1: parse raw data but do not convert to final format
t_steps = set()
mu_steps = set()
data_full = []
data_hall = []
data_doping_full = []
data_doping_hall = []
doping_levels = doping_levels or []
# parse the full conductivity/Seebeck/kappa0/etc data
# also initialize t_steps and mu_steps
with open(os.path.join(path_dir, "boltztrap.condtens")) as f:
for line in f:
if not line.startswith("#"):
mu_steps.add(float(line.split()[0]))
t_steps.add(int(float(line.split()[1])))
data_full.append([float(c) for c in line.split()])
# parse the full Hall tensor
with open(os.path.join(path_dir, "boltztrap.halltens")) as f:
for line in f:
if not line.startswith("#"):
data_hall.append([float(c) for c in line.split()])
if len(doping_levels) != 0:
# parse doping levels version of full cond. tensor, etc.
with open(os.path.join(path_dir, "boltztrap.condtens_fixdoping")) as f:
for line in f:
if not line.startswith("#") and len(line) > 2:
data_doping_full.append([float(c) for c in line.split()])
# parse doping levels version of full hall tensor
with open(os.path.join(path_dir, "boltztrap.halltens_fixdoping")) as f:
for line in f:
if not line.startswith("#") and len(line) > 2:
data_doping_hall.append([float(c) for c in line.split()])
# Step 2: convert raw data to final format
# sort t and mu_steps (b/c they are sets not lists)
# and convert to correct energy
t_steps = sorted(t_steps)
mu_steps = sorted(Energy(m, "Ry").to("eV") for m in mu_steps)
# initialize output variables - could use defaultdict instead
# I am leaving things like this for clarity
cond = {t: [] for t in t_steps}
seebeck = {t: [] for t in t_steps}
kappa = {t: [] for t in t_steps}
hall = {t: [] for t in t_steps}
carrier_conc = {t: [] for t in t_steps}
mu_doping = {"p": {t: [] for t in t_steps}, "n": {t: [] for t in t_steps}}
seebeck_doping = {"p": {t: [] for t in t_steps}, "n": {t: [] for t in t_steps}}
cond_doping = {"p": {t: [] for t in t_steps}, "n": {t: [] for t in t_steps}}
kappa_doping = {"p": {t: [] for t in t_steps}, "n": {t: [] for t in t_steps}}
hall_doping = {"p": {t: [] for t in t_steps}, "n": {t: [] for t in t_steps}}
# process doping levels
pn_doping_levels = {"p": [], "n": []}
for d in doping_levels:
if d > 0:
pn_doping_levels["p"].append(d)
else:
pn_doping_levels["n"].append(-d)
# process raw conductivity data, etc.
for d in data_full:
temp, doping = d[1], d[2]
carrier_conc[temp].append(doping)
cond[temp].append(np.reshape(d[3:12], (3, 3)).tolist())
seebeck[temp].append(np.reshape(d[12:21], (3, 3)).tolist())
kappa[temp].append(np.reshape(d[21:30], (3, 3)).tolist())
# process raw Hall data
for d in data_hall:
temp, doping = d[1], d[2]
hall_tens = [
np.reshape(d[3:12], (3, 3)).tolist(),
np.reshape(d[12:21], (3, 3)).tolist(),
np.reshape(d[21:30], (3, 3)).tolist(),
]
hall[temp].append(hall_tens)
# process doping conductivity data, etc.
for d in data_doping_full:
temp, doping, mu = d[0], d[1], d[-1]
pn = "p" if doping > 0 else "n"
mu_doping[pn][temp].append(Energy(mu, "Ry").to("eV"))
cond_doping[pn][temp].append(np.reshape(d[2:11], (3, 3)).tolist())
seebeck_doping[pn][temp].append(np.reshape(d[11:20], (3, 3)).tolist())
kappa_doping[pn][temp].append(np.reshape(d[20:29], (3, 3)).tolist())
# process doping Hall data
for d in data_doping_hall:
temp, doping, mu = d[0], d[1], d[-1]
pn = "p" if doping > 0 else "n"
hall_tens = [
np.reshape(d[2:11], (3, 3)).tolist(),
np.reshape(d[11:20], (3, 3)).tolist(),
np.reshape(d[20:29], (3, 3)).tolist(),
]
hall_doping[pn][temp].append(hall_tens)
return (
mu_steps,
cond,
seebeck,
kappa,
hall,
pn_doping_levels,
mu_doping,
seebeck_doping,
cond_doping,
kappa_doping,
hall_doping,
carrier_conc,
)
@staticmethod
def from_files(path_dir, dos_spin=1):
"""
get a BoltztrapAnalyzer object from a set of files
Args:
path_dir: directory where the boltztrap files are
dos_spin: in DOS mode, set to 1 for spin up and -1 for spin down
Returns:
a BoltztrapAnalyzer object
"""
(
run_type,
warning,
efermi,
gap,
doping_levels,
) = BoltztrapAnalyzer.parse_outputtrans(path_dir)
vol = BoltztrapAnalyzer.parse_struct(path_dir)
intrans = BoltztrapAnalyzer.parse_intrans(path_dir)
if run_type == "BOLTZ":
dos, pdos = BoltztrapAnalyzer.parse_transdos(path_dir, efermi, dos_spin=dos_spin, trim_dos=False)
(
mu_steps,
cond,
seebeck,
kappa,
hall,
pn_doping_levels,
mu_doping,
seebeck_doping,
cond_doping,
kappa_doping,
hall_doping,
carrier_conc,
) = BoltztrapAnalyzer.parse_cond_and_hall(path_dir, doping_levels)
return BoltztrapAnalyzer(
gap,
mu_steps,
cond,
seebeck,
kappa,
hall,
pn_doping_levels,
mu_doping,
seebeck_doping,
cond_doping,
kappa_doping,
hall_doping,
intrans,
dos,
pdos,
carrier_conc,
vol,
warning,
)
if run_type == "DOS":
trim = intrans["dos_type"] == "HISTO"
dos, pdos = BoltztrapAnalyzer.parse_transdos(path_dir, efermi, dos_spin=dos_spin, trim_dos=trim)
return BoltztrapAnalyzer(gap=gap, dos=dos, dos_partial=pdos, warning=warning, vol=vol)
if run_type == "BANDS":
bz_kpoints = np.loadtxt(os.path.join(path_dir, "boltztrap_band.dat"))[:, -3:]
bz_bands = np.loadtxt(os.path.join(path_dir, "boltztrap_band.dat"))[:, 1:-6]
return BoltztrapAnalyzer(bz_bands=bz_bands, bz_kpoints=bz_kpoints, warning=warning, vol=vol)
if run_type == "FERMI":
""" """
if os.path.exists(os.path.join(path_dir, "boltztrap_BZ.cube")):
fs_data = read_cube_file(os.path.join(path_dir, "boltztrap_BZ.cube"))
elif os.path.exists(os.path.join(path_dir, "fort.30")):
fs_data = read_cube_file(os.path.join(path_dir, "fort.30"))
else:
raise BoltztrapError("No data file found for fermi surface")
return BoltztrapAnalyzer(fermi_surface_data=fs_data)
raise ValueError(f"Run type: {run_type} not recognized!")
def as_dict(self):
"""
:return: MSONable dict.
"""
results = {
"gap": self.gap,
"mu_steps": self.mu_steps,
"intrans": self.intrans,
"cond": self._cond,
"seebeck": self._seebeck,
"kappa": self._kappa,
"hall": self._hall,
"doping": self.doping,
"mu_doping": self.mu_doping,
"seebeck_doping": self._seebeck_doping,
"cond_doping": self._cond_doping,
"kappa_doping": self._kappa_doping,
"hall_doping": self._hall_doping,
"dos": self.dos.as_dict(),
"dos_partial": self._dos_partial,
"carrier_conc": self._carrier_conc,
"vol": self.vol,
"warning": self.warning,
}
return jsanitize(results)
@staticmethod
def from_dict(data):
"""
:param data: Dict representation.
:return: BoltztrapAnalyzer
"""
def _make_float_array(a):
res = [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]
for i in range(3):
for j in range(3):
res[i][j] = float(a[i][j])
return res
def _make_float_hall(a):
return list(a[:27])
gap = data.get("gap")
mu_steps = [float(d) for d in data["mu_steps"]] if "mu_steps" in data else None
cond = (
{int(d): [_make_float_array(v) for v in data["cond"][d]] for d in data["cond"]} if "cond" in data else None
)
seebeck = (
{int(d): [_make_float_array(v) for v in data["seebeck"][d]] for d in data["seebeck"]}
if "seebeck" in data
else None
)
kappa = (
{int(d): [_make_float_array(v) for v in data["kappa"][d]] for d in data["kappa"]}
if "kappa" in data
else None
)
hall = (
{int(d): [_make_float_hall(v) for v in data["hall"][d]] for d in data["hall"]} if "hall" in data else None
)
doping = (
{
"p": [float(d) for d in data["doping"]["p"]],
"n": [float(d) for d in data["doping"]["n"]],
}
if "doping" in data
else None
)
mu_doping = (
{
"p": {int(d): [float(v) for v in data["mu_doping"]["p"][d]] for d in data["mu_doping"]["p"]},
"n": {int(d): [float(v) for v in data["mu_doping"]["n"][d]] for d in data["mu_doping"]["n"]},
}
if "mu_doping" in data
else None
)
seebeck_doping = (
{
"p": {
int(d): [_make_float_array(v) for v in data["seebeck_doping"]["p"][d]]
for d in data["seebeck_doping"]["p"]
},
"n": {
int(d): [_make_float_array(v) for v in data["seebeck_doping"]["n"][d]]
for d in data["seebeck_doping"]["n"]
},
}
if "seebeck_doping" in data
else None
)
cond_doping = (
{
"p": {
int(d): [_make_float_array(v) for v in data["cond_doping"]["p"][d]]
for d in data["cond_doping"]["p"]
},
"n": {
int(d): [_make_float_array(v) for v in data["cond_doping"]["n"][d]]
for d in data["cond_doping"]["n"]
},
}
if "cond_doping" in data
else None
)
kappa_doping = (
{
"p": {
int(d): [_make_float_array(v) for v in data["kappa_doping"]["p"][d]]
for d in data["kappa_doping"]["p"]
},
"n": {
int(d): [_make_float_array(v) for v in data["kappa_doping"]["n"][d]]
for d in data["kappa_doping"]["n"]
},
}
if "kappa_doping" in data
else None
)
hall_doping = (
{
"p": {
int(d): [_make_float_hall(v) for v in data["hall_doping"]["p"][d]] for d in data["hall_doping"]["p"]
},
"n": {
int(d): [_make_float_hall(v) for v in data["hall_doping"]["n"][d]] for d in data["hall_doping"]["n"]
},
}
if "hall_doping" in data
else None
)
dos = Dos.from_dict(data["dos"]) if "dos" in data else None
dos_partial = data.get("dos_partial")
carrier_conc = data.get("carrier_conc")
vol = data.get("vol")
warning = data.get("warning")
return BoltztrapAnalyzer(
gap=gap,
mu_steps=mu_steps,
cond=cond,
seebeck=seebeck,
kappa=kappa,
hall=hall,
doping=doping,
mu_doping=mu_doping,
seebeck_doping=seebeck_doping,
cond_doping=cond_doping,
kappa_doping=kappa_doping,
hall_doping=hall_doping,
dos=dos,
dos_partial=dos_partial,
carrier_conc=carrier_conc,
vol=vol,
warning=warning,
)
def read_cube_file(filename):
"""
:param filename: Cube filename
:return: Energy data.
"""
with open(filename) as f:
natoms = 0
count_line = 0
for line in f:
line = line.rstrip("\n")
if count_line == 0 and "CUBE" not in line:
raise ValueError("CUBE file format not recognized")
if count_line == 2:
tokens = line.split()
natoms = int(tokens[0])
if count_line == 3:
tokens = line.split()
n1 = int(tokens[0])
elif count_line == 4:
tokens = line.split()
n2 = int(tokens[0])
elif count_line == 5:
tokens = line.split()
n3 = int(tokens[0])
elif count_line > 5:
break
count_line += 1
if "fort.30" in filename:
energy_data = np.genfromtxt(filename, skip_header=natoms + 6, skip_footer=1)
nlines_data = len(energy_data)
last_line = np.genfromtxt(filename, skip_header=nlines_data + natoms + 6)
energy_data = np.append(energy_data.flatten(), last_line).reshape(n1, n2, n3)
elif "boltztrap_BZ.cube" in filename:
energy_data = np.loadtxt(filename, skiprows=natoms + 6).reshape(n1, n2, n3)
energy_data /= Energy(1, "eV").to("Ry")
return energy_data
def compare_sym_bands(bands_obj, bands_ref_obj, nb=None):
"""
Compute the mean of correlation between bzt and vasp bandstructure on
sym line, for all bands and locally (for each branches) the difference
squared (%) if nb is specified.
"""
if bands_ref_obj.is_spin_polarized:
nbands = min(bands_obj.nb_bands, 2 * bands_ref_obj.nb_bands)
else:
# TODO: why is this needed? Shouldn't pmg take care of nb_bands?
nbands = min(len(bands_obj.bands[Spin.up]), len(bands_ref_obj.bands[Spin.up]))
# print(nbands)
arr_bands = np.array(bands_obj.bands[Spin.up][:nbands])
# arr_bands_lavg = (arr_bands-np.mean(arr_bands,axis=1).reshape(nbands,1))
if bands_ref_obj.is_spin_polarized:
arr_bands_ref_up = np.array(bands_ref_obj.bands[Spin.up])
arr_bands_ref_dw = np.array(bands_ref_obj.bands[Spin.down])
# print(arr_bands_ref_up.shape)
arr_bands_ref = np.vstack((arr_bands_ref_up, arr_bands_ref_dw))
arr_bands_ref = np.sort(arr_bands_ref, axis=0)[:nbands]
# print(arr_bands_ref.shape)
else:
arr_bands_ref = np.array(bands_ref_obj.bands[Spin.up][:nbands])
# arr_bands_ref_lavg =
# (arr_bands_ref-np.mean(arr_bands_ref,axis=1).reshape(nbands,1))
# err = np.sum((arr_bands_lavg-arr_bands_ref_lavg)**2,axis=1)/nkpt
corr = np.array([distance.correlation(arr_bands[idx], arr_bands_ref[idx]) for idx in range(nbands)])
if isinstance(nb, int):
nb = [nb]
bcheck = {}
if max(nb) < nbands:
branches = [[s["start_index"], s["end_index"], s["name"]] for s in bands_ref_obj.branches]
if not bands_obj.is_metal() and not bands_ref_obj.is_metal():
zero_ref = bands_ref_obj.get_vbm()["energy"]
zero = bands_obj.get_vbm()["energy"]
if not zero:
vbm = bands_ref_obj.get_vbm()["band_index"][Spin.up][-1]
zero = max(arr_bands[vbm])
else:
zero_ref = 0 # bands_ref_obj.efermi
zero = 0 # bands_obj.efermi
print(zero, zero_ref)
for nbi in nb:
bcheck[nbi] = {}
bcheck[nbi]["Dist"] = np.mean(abs(arr_bands[nbi] - zero - arr_bands_ref[nbi] + zero_ref))
bcheck[nbi]["Corr"] = corr[nbi]
for start, end, name in branches:
# werr.append((sum((arr_bands_corr[nb][start:end+1] -
# arr_bands_ref_corr[nb][start:end+1])**2)/(end+1-start)*100,name))
bcheck[nbi][name] = np.mean(
abs(arr_bands[nbi][start : end + 1] - zero - arr_bands_ref[nbi][start : end + 1] + zero_ref)
)
else:
bcheck = "No nb given"
return bcheck
def seebeck_spb(eta, Lambda=0.5):
"""
Seebeck analytic formula in the single parabolic model
"""
from fdint import fdk
return (
constants.k
/ constants.e
* ((2.0 + Lambda) * fdk(1.0 + Lambda, eta) / ((1.0 + Lambda) * fdk(Lambda, eta)) - eta)
* 1e6
)
def eta_from_seebeck(seeb, Lambda):
"""
It takes a value of seebeck and adjusts the analytic seebeck until it's equal
Returns: eta where the two seebeck coefficients are equal
(reduced chemical potential)
"""
from scipy.optimize import fsolve
out = fsolve(lambda x: (seebeck_spb(x, Lambda) - abs(seeb)) ** 2, 1.0, full_output=True)
return out[0][0]
def seebeck_eff_mass_from_carr(eta, n, T, Lambda):
"""
Calculate seebeck effective mass at a certain carrier concentration
eta in kB*T units, n in cm-3, T in K, returns mass in m0 units
"""
try:
from fdint import fdk
except ImportError:
raise BoltztrapError(
"fdint module not found. Please, install it.\n" + "It is needed to calculate Fermi integral quickly."
)
return (2 * np.pi ** 2 * abs(n) * 10 ** 6 / (fdk(0.5, eta))) ** (2.0 / 3) / (
2 * constants.m_e * constants.k * T / (constants.h / 2 / np.pi) ** 2
)
def seebeck_eff_mass_from_seebeck_carr(seeb, n, T, Lambda):
"""
Find the chemical potential where analytic and calculated seebeck are identical
and then calculate the seebeck effective mass at that chemical potential and
a certain carrier concentration n
"""
eta = eta_from_seebeck(seeb, Lambda)
mass = seebeck_eff_mass_from_carr(eta, n, T, Lambda)
return mass
|
vorwerkc/pymatgen
|
pymatgen/electronic_structure/boltztrap.py
|
Python
|
mit
| 103,362
| 0.001887
|
from seth import versioning
from seth.tests import IntegrationTestBase
from seth.classy.rest import generics
class DefaultVersioningResource(generics.GenericApiView):
def get(self, **kwargs):
return {}
class NotShowVersionResource(generics.GenericApiView):
display_version = False
def get(self, **kwargs):
return {}
class BaseVersioningTestCase(IntegrationTestBase):
def extend_app_configuration(self, config):
config.include('seth')
config.register_resource(DefaultVersioningResource, '/test_basic')
config.register_resource(NotShowVersionResource, '/test_do_not_display_version')
def test_default_setup(self):
r = self.app.get('/test_basic')
self.assertEqual(r.status_int, 200)
self.assertIn('API-Version', r.headers.keys())
self.assertEqual(r.headers['API-Version'], '1.0')
def test_do_not_display_version(self):
r = self.app.get('/test_do_not_display_version')
self.assertEqual(r.status_int, 200)
self.assertNotIn('API-Version', r.headers.keys())
class CustomVersioningPoliciesTestCase(IntegrationTestBase):
def extend_app_configuration(self, config):
config.include('seth')
class NoGetVersionInfoPolicy(versioning.BaseVersioningPolicy):
default_version = '2.0'
class NoGetVersionInfonResource(generics.GenericApiView):
versioning_policy = NoGetVersionInfoPolicy
def get(self, **kwargs):
return {}
config.register_resource(NoGetVersionInfonResource, '/test_no_get_version_info')
class AnotherVersionPolicy(versioning.BaseVersioningPolicy):
default_version = '2.0'
def get_version_info(self, request, *args, **kwargs):
return '2.0'
class AnotherVersionResource(generics.GenericApiView):
versioning_policy = AnotherVersionPolicy
def get(self, **kwargs):
return {}
config.register_resource(AnotherVersionResource, '/test_another_version')
class PredefineVersionPolicy(versioning.BaseVersioningPolicy):
default_version = None
def get_default_version(self, request):
return '666'
def get_version_info(self, request, *args, **kwargs):
return '666'
class PredefineVersionResource(generics.GenericApiView):
versioning_policy = PredefineVersionPolicy
def get(self, **kwargs):
return {}
config.register_resource(PredefineVersionResource, '/test_predefine')
def test_raises_NotImplementedError_if_get_version_info_is_not_provided(self):
self.assertRaises(NotImplementedError, lambda: self.app.get('/test_no_get_version_info'))
def test_another_version_set(self):
r = self.app.get('/test_another_version')
self.assertEqual(r.status_int, 200)
self.assertIn('API-Version', r.headers.keys())
self.assertEqual(r.headers['API-Version'], '2.0')
def test_predefine_version(self):
r = self.app.get('/test_predefine')
self.assertEqual(r.status_int, 200)
self.assertIn('API-Version', r.headers.keys())
self.assertEqual(r.headers['API-Version'], '666')
class CheckParamsVersionPolicy(IntegrationTestBase):
def extend_app_configuration(self, config):
config.include('seth')
class CheckQueryParamsResource(generics.GenericApiView):
versioning_policy = versioning.CheckQueryParamsVersioningPolicy
def get(self, **kwargs):
return {}
config.register_resource(CheckQueryParamsResource, '/test_query_params')
class AllowVersionOnePolicy(versioning.CheckQueryParamsVersioningPolicy):
default_version = '22.0'
def get_allowed_version(self):
return ['5.0']
class CheckQueryParamsResourceSecond(generics.GenericApiView):
versioning_policy = AllowVersionOnePolicy
def get(self, **kwargs):
return {}
config.register_resource(CheckQueryParamsResourceSecond, '/test_allow_version')
def test_no_version_in_query_params_all_versions_allowed(self):
r = self.app.get('/test_query_params')
self.assertEqual(r.status_int, 200)
def test_wrong_version_in_query_params_all_versions_allowed(self):
r = self.app.get('/test_query_params?version=2.0')
self.assertEqual(r.status_int, 200)
def test_correct_version_in_query_params_all_versions_allowed(self):
r = self.app.get('/test_query_params?version=1.0')
self.assertEqual(r.status_int, 200)
def test_allow_default_version(self):
r = self.app.get('/test_allow_version?version=22.0')
self.assertEqual(r.status_int, 200)
def test_allowed_versions(self):
r = self.app.get('/test_allow_version?version=5.0')
self.assertEqual(r.status_int, 200)
def test_wrong_version_in_query_params_allowed_are_set(self):
r = self.app.get('/test_allow_version?version=1.0', expect_errors=True)
self.assertEqual(r.status_int, 404)
def test_no_version_in_query_params_allowed_are_set(self):
r = self.app.get('/test_allow_version', expect_errors=True)
self.assertEqual(r.status_int, 404)
class CheckHeaderVersionPolicy(IntegrationTestBase):
def extend_app_configuration(self, config):
config.include('seth')
class AllowVersionOnePolicy(versioning.CheckHeaderVersioningPolicy):
default_version = '22.0'
def get_allowed_version(self):
return ['5.0']
class CheckQueryParamsResourceSecond(generics.GenericApiView):
versioning_policy = AllowVersionOnePolicy
def get(self, **kwargs):
return {}
config.register_resource(CheckQueryParamsResourceSecond, '/test_allow_header')
def test_allow_default_version(self):
r = self.app.get('/test_allow_header', headers={'Api-Version': '22.0'})
self.assertEqual(r.status_int, 200)
def test_allowed_versions(self):
r = self.app.get('/test_allow_header', headers={'Api-Version': '5.0'})
self.assertEqual(r.status_int, 200)
def test_wrong_version_in_headers(self):
r = self.app.get('/test_allow_header', headers={'Api-Version': '666.0'}, expect_errors=True)
self.assertEqual(r.status_int, 404)
def test_no_header_in_request(self):
r = self.app.get('/test_allow_header', expect_errors=True)
self.assertEqual(r.status_int, 404)
def test_wrong_header_set(self):
r = self.app.get('/test_allow_header', headers={'Api-WRONG': '22.0'}, expect_errors=True)
self.assertEqual(r.status_int, 404)
|
jnosal/seth
|
seth/tests/test_versioning.py
|
Python
|
mit
| 6,814
| 0.001761
|
"""CSS selector structure items."""
import copyreg
from collections.abc import Hashable, Mapping
__all__ = (
'Selector',
'SelectorNull',
'SelectorTag',
'SelectorAttribute',
'SelectorContains',
'SelectorNth',
'SelectorLang',
'SelectorList',
'Namespaces',
'CustomSelectors'
)
SEL_EMPTY = 0x1
SEL_ROOT = 0x2
SEL_DEFAULT = 0x4
SEL_INDETERMINATE = 0x8
SEL_SCOPE = 0x10
SEL_DIR_LTR = 0x20
SEL_DIR_RTL = 0x40
SEL_IN_RANGE = 0x80
SEL_OUT_OF_RANGE = 0x100
SEL_DEFINED = 0x200
SEL_PLACEHOLDER_SHOWN = 0x400
class Immutable(object):
"""Immutable."""
__slots__ = ('_hash',)
def __init__(self, **kwargs):
"""Initialize."""
temp = []
for k, v in kwargs.items():
temp.append(type(v))
temp.append(v)
super(Immutable, self).__setattr__(k, v)
super(Immutable, self).__setattr__('_hash', hash(tuple(temp)))
@classmethod
def __base__(cls):
"""Get base class."""
return cls
def __eq__(self, other):
"""Equal."""
return (
isinstance(other, self.__base__()) and
all([getattr(other, key) == getattr(self, key) for key in self.__slots__ if key != '_hash'])
)
def __ne__(self, other):
"""Equal."""
return (
not isinstance(other, self.__base__()) or
any([getattr(other, key) != getattr(self, key) for key in self.__slots__ if key != '_hash'])
)
def __hash__(self):
"""Hash."""
return self._hash
def __setattr__(self, name, value):
"""Prevent mutability."""
raise AttributeError("'{}' is immutable".format(self.__class__.__name__))
def __repr__(self): # pragma: no cover
"""Representation."""
return "{}({})".format(
self.__base__(), ', '.join(["{}={!r}".format(k, getattr(self, k)) for k in self.__slots__[:-1]])
)
__str__ = __repr__
class ImmutableDict(Mapping):
"""Hashable, immutable dictionary."""
def __init__(self, *args, **kwargs):
"""Initialize."""
arg = args[0] if args else kwargs
is_dict = isinstance(arg, dict)
if (
is_dict and not all([isinstance(v, Hashable) for v in arg.values()]) or
not is_dict and not all([isinstance(k, Hashable) and isinstance(v, Hashable) for k, v in arg])
):
raise TypeError('All values must be hashable')
self._d = dict(*args, **kwargs)
self._hash = hash(tuple([(type(x), x, type(y), y) for x, y in sorted(self._d.items())]))
def __iter__(self):
"""Iterator."""
return iter(self._d)
def __len__(self):
"""Length."""
return len(self._d)
def __getitem__(self, key):
"""Get item: `namespace['key']`."""
return self._d[key]
def __hash__(self):
"""Hash."""
return self._hash
def __repr__(self): # pragma: no cover
"""Representation."""
return "{!r}".format(self._d)
__str__ = __repr__
class Namespaces(ImmutableDict):
"""Namespaces."""
def __init__(self, *args, **kwargs):
"""Initialize."""
# If there are arguments, check the first index.
# `super` should fail if the user gave multiple arguments,
# so don't bother checking that.
arg = args[0] if args else kwargs
is_dict = isinstance(arg, dict)
if is_dict and not all([isinstance(k, str) and isinstance(v, str) for k, v in arg.items()]):
raise TypeError('Namespace keys and values must be Unicode strings')
elif not is_dict and not all([isinstance(k, str) and isinstance(v, str) for k, v in arg]):
raise TypeError('Namespace keys and values must be Unicode strings')
super(Namespaces, self).__init__(*args, **kwargs)
class CustomSelectors(ImmutableDict):
"""Custom selectors."""
def __init__(self, *args, **kwargs):
"""Initialize."""
# If there are arguments, check the first index.
# `super` should fail if the user gave multiple arguments,
# so don't bother checking that.
arg = args[0] if args else kwargs
is_dict = isinstance(arg, dict)
if is_dict and not all([isinstance(k, str) and isinstance(v, str) for k, v in arg.items()]):
raise TypeError('CustomSelectors keys and values must be Unicode strings')
elif not is_dict and not all([isinstance(k, str) and isinstance(v, str) for k, v in arg]):
raise TypeError('CustomSelectors keys and values must be Unicode strings')
super(CustomSelectors, self).__init__(*args, **kwargs)
class Selector(Immutable):
"""Selector."""
__slots__ = (
'tag', 'ids', 'classes', 'attributes', 'nth', 'selectors',
'relation', 'rel_type', 'contains', 'lang', 'flags', '_hash'
)
def __init__(
self, tag, ids, classes, attributes, nth, selectors,
relation, rel_type, contains, lang, flags
):
"""Initialize."""
super(Selector, self).__init__(
tag=tag,
ids=ids,
classes=classes,
attributes=attributes,
nth=nth,
selectors=selectors,
relation=relation,
rel_type=rel_type,
contains=contains,
lang=lang,
flags=flags
)
class SelectorNull(Immutable):
"""Null Selector."""
def __init__(self):
"""Initialize."""
super(SelectorNull, self).__init__()
class SelectorTag(Immutable):
"""Selector tag."""
__slots__ = ("name", "prefix", "_hash")
def __init__(self, name, prefix):
"""Initialize."""
super(SelectorTag, self).__init__(
name=name,
prefix=prefix
)
class SelectorAttribute(Immutable):
"""Selector attribute rule."""
__slots__ = ("attribute", "prefix", "pattern", "xml_type_pattern", "_hash")
def __init__(self, attribute, prefix, pattern, xml_type_pattern):
"""Initialize."""
super(SelectorAttribute, self).__init__(
attribute=attribute,
prefix=prefix,
pattern=pattern,
xml_type_pattern=xml_type_pattern
)
class SelectorContains(Immutable):
"""Selector contains rule."""
__slots__ = ("text", "_hash")
def __init__(self, text):
"""Initialize."""
super(SelectorContains, self).__init__(
text=text
)
class SelectorNth(Immutable):
"""Selector nth type."""
__slots__ = ("a", "n", "b", "of_type", "last", "selectors", "_hash")
def __init__(self, a, n, b, of_type, last, selectors):
"""Initialize."""
super(SelectorNth, self).__init__(
a=a,
n=n,
b=b,
of_type=of_type,
last=last,
selectors=selectors
)
class SelectorLang(Immutable):
"""Selector language rules."""
__slots__ = ("languages", "_hash",)
def __init__(self, languages):
"""Initialize."""
super(SelectorLang, self).__init__(
languages=tuple(languages)
)
def __iter__(self):
"""Iterator."""
return iter(self.languages)
def __len__(self): # pragma: no cover
"""Length."""
return len(self.languages)
def __getitem__(self, index): # pragma: no cover
"""Get item."""
return self.languages[index]
class SelectorList(Immutable):
"""Selector list."""
__slots__ = ("selectors", "is_not", "is_html", "_hash")
def __init__(self, selectors=tuple(), is_not=False, is_html=False):
"""Initialize."""
super(SelectorList, self).__init__(
selectors=tuple(selectors),
is_not=is_not,
is_html=is_html
)
def __iter__(self):
"""Iterator."""
return iter(self.selectors)
def __len__(self):
"""Length."""
return len(self.selectors)
def __getitem__(self, index):
"""Get item."""
return self.selectors[index]
def _pickle(p):
return p.__base__(), tuple([getattr(p, s) for s in p.__slots__[:-1]])
def pickle_register(obj):
"""Allow object to be pickled."""
copyreg.pickle(obj, _pickle)
pickle_register(Selector)
pickle_register(SelectorNull)
pickle_register(SelectorTag)
pickle_register(SelectorAttribute)
pickle_register(SelectorContains)
pickle_register(SelectorNth)
pickle_register(SelectorLang)
pickle_register(SelectorList)
|
SickGear/SickGear
|
lib/soupsieve_py3/css_types.py
|
Python
|
gpl-3.0
| 8,916
| 0.001682
|
"""Tests for input validation functions"""
import warnings
from tempfile import NamedTemporaryFile
from itertools import product
import numpy as np
from numpy.testing import assert_array_equal, assert_warns
import scipy.sparse as sp
from nose.tools import assert_raises, assert_true, assert_false, assert_equal
from gplearn.skutils.testing import assert_raises_regexp
from gplearn.skutils import as_float_array, check_array, check_symmetric
from gplearn.skutils import check_X_y
from gplearn.skutils.estimator_checks import NotAnArray
from sklearn.random_projection import sparse_random_matrix
from sklearn.linear_model import ARDRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.svm import SVR
from sklearn.datasets import make_blobs
from gplearn.skutils.validation import (
NotFittedError,
has_fit_parameter,
check_is_fitted,
check_consistent_length)
from gplearn.skutils.testing import assert_raise_message
def test_as_float_array():
# Test function for as_float_array
X = np.ones((3, 10), dtype=np.int32)
X = X + np.arange(10, dtype=np.int32)
# Checks that the return type is ok
X2 = as_float_array(X, copy=False)
np.testing.assert_equal(X2.dtype, np.float32)
# Another test
X = X.astype(np.int64)
X2 = as_float_array(X, copy=True)
# Checking that the array wasn't overwritten
assert_true(as_float_array(X, False) is not X)
# Checking that the new type is ok
np.testing.assert_equal(X2.dtype, np.float64)
# Here, X is of the right type, it shouldn't be modified
X = np.ones((3, 2), dtype=np.float32)
assert_true(as_float_array(X, copy=False) is X)
# Test that if X is fortran ordered it stays
X = np.asfortranarray(X)
assert_true(np.isfortran(as_float_array(X, copy=True)))
# Test the copy parameter with some matrices
matrices = [
np.matrix(np.arange(5)),
sp.csc_matrix(np.arange(5)).toarray(),
sparse_random_matrix(10, 10, density=0.10).toarray()
]
for M in matrices:
N = as_float_array(M, copy=True)
N[0, 0] = np.nan
assert_false(np.isnan(M).any())
def test_np_matrix():
# Confirm that input validation code does not return np.matrix
X = np.arange(12).reshape(3, 4)
assert_false(isinstance(as_float_array(X), np.matrix))
assert_false(isinstance(as_float_array(np.matrix(X)), np.matrix))
assert_false(isinstance(as_float_array(sp.csc_matrix(X)), np.matrix))
def test_memmap():
# Confirm that input validation code doesn't copy memory mapped arrays
asflt = lambda x: as_float_array(x, copy=False)
with NamedTemporaryFile(prefix='sklearn-test') as tmp:
M = np.memmap(tmp, shape=100, dtype=np.float32)
M[:] = 0
for f in (check_array, np.asarray, asflt):
X = f(M)
X[:] = 1
assert_array_equal(X.ravel(), M)
X[:] = 0
def test_ordering():
# Check that ordering is enforced correctly by validation utilities.
# We need to check each validation utility, because a 'copy' without
# 'order=K' will kill the ordering.
X = np.ones((10, 5))
for A in X, X.T:
for copy in (True, False):
B = check_array(A, order='C', copy=copy)
assert_true(B.flags['C_CONTIGUOUS'])
B = check_array(A, order='F', copy=copy)
assert_true(B.flags['F_CONTIGUOUS'])
if copy:
assert_false(A is B)
X = sp.csr_matrix(X)
X.data = X.data[::-1]
assert_false(X.data.flags['C_CONTIGUOUS'])
for copy in (True, False):
Y = check_array(X, accept_sparse='csr', copy=copy, order='C')
assert_true(Y.data.flags['C_CONTIGUOUS'])
def test_check_array():
# accept_sparse == None
# raise error on sparse inputs
X = [[1, 2], [3, 4]]
X_csr = sp.csr_matrix(X)
assert_raises(TypeError, check_array, X_csr)
# ensure_2d
X_array = check_array([0, 1, 2])
assert_equal(X_array.ndim, 2)
X_array = check_array([0, 1, 2], ensure_2d=False)
assert_equal(X_array.ndim, 1)
# don't allow ndim > 3
X_ndim = np.arange(8).reshape(2, 2, 2)
assert_raises(ValueError, check_array, X_ndim)
check_array(X_ndim, allow_nd=True) # doesn't raise
# force_all_finite
X_inf = np.arange(4).reshape(2, 2).astype(np.float)
X_inf[0, 0] = np.inf
assert_raises(ValueError, check_array, X_inf)
check_array(X_inf, force_all_finite=False) # no raise
# nan check
X_nan = np.arange(4).reshape(2, 2).astype(np.float)
X_nan[0, 0] = np.nan
assert_raises(ValueError, check_array, X_nan)
check_array(X_inf, force_all_finite=False) # no raise
# dtype and order enforcement.
X_C = np.arange(4).reshape(2, 2).copy("C")
X_F = X_C.copy("F")
X_int = X_C.astype(np.int)
X_float = X_C.astype(np.float)
Xs = [X_C, X_F, X_int, X_float]
dtypes = [np.int32, np.int, np.float, np.float32, None, np.bool, object]
orders = ['C', 'F', None]
copys = [True, False]
for X, dtype, order, copy in product(Xs, dtypes, orders, copys):
X_checked = check_array(X, dtype=dtype, order=order, copy=copy)
if dtype is not None:
assert_equal(X_checked.dtype, dtype)
else:
assert_equal(X_checked.dtype, X.dtype)
if order == 'C':
assert_true(X_checked.flags['C_CONTIGUOUS'])
assert_false(X_checked.flags['F_CONTIGUOUS'])
elif order == 'F':
assert_true(X_checked.flags['F_CONTIGUOUS'])
assert_false(X_checked.flags['C_CONTIGUOUS'])
if copy:
assert_false(X is X_checked)
else:
# doesn't copy if it was already good
if (X.dtype == X_checked.dtype and
X_checked.flags['C_CONTIGUOUS'] == X.flags['C_CONTIGUOUS']
and X_checked.flags['F_CONTIGUOUS'] == X.flags['F_CONTIGUOUS']):
assert_true(X is X_checked)
# allowed sparse != None
X_csc = sp.csc_matrix(X_C)
X_coo = X_csc.tocoo()
X_dok = X_csc.todok()
X_int = X_csc.astype(np.int)
X_float = X_csc.astype(np.float)
Xs = [X_csc, X_coo, X_dok, X_int, X_float]
accept_sparses = [['csr', 'coo'], ['coo', 'dok']]
for X, dtype, accept_sparse, copy in product(Xs, dtypes, accept_sparses,
copys):
with warnings.catch_warnings(record=True) as w:
X_checked = check_array(X, dtype=dtype,
accept_sparse=accept_sparse, copy=copy)
if (dtype is object or sp.isspmatrix_dok(X)) and len(w):
message = str(w[0].message)
messages = ["object dtype is not supported by sparse matrices",
"Can't check dok sparse matrix for nan or inf."]
assert_true(message in messages)
else:
assert_equal(len(w), 0)
if dtype is not None:
assert_equal(X_checked.dtype, dtype)
else:
assert_equal(X_checked.dtype, X.dtype)
if X.format in accept_sparse:
# no change if allowed
assert_equal(X.format, X_checked.format)
else:
# got converted
assert_equal(X_checked.format, accept_sparse[0])
if copy:
assert_false(X is X_checked)
else:
# doesn't copy if it was already good
if (X.dtype == X_checked.dtype and X.format == X_checked.format):
assert_true(X is X_checked)
# other input formats
# convert lists to arrays
X_dense = check_array([[1, 2], [3, 4]])
assert_true(isinstance(X_dense, np.ndarray))
# raise on too deep lists
assert_raises(ValueError, check_array, X_ndim.tolist())
check_array(X_ndim.tolist(), allow_nd=True) # doesn't raise
# convert weird stuff to arrays
X_no_array = NotAnArray(X_dense)
result = check_array(X_no_array)
assert_true(isinstance(result, np.ndarray))
def test_check_array_min_samples_and_features_messages():
# empty list is considered 2D by default:
msg = "0 feature(s) (shape=(1, 0)) while a minimum of 1 is required."
assert_raise_message(ValueError, msg, check_array, [])
# If considered a 1D collection when ensure_2d=False, then the minimum
# number of samples will break:
msg = "0 sample(s) (shape=(0,)) while a minimum of 1 is required."
assert_raise_message(ValueError, msg, check_array, [], ensure_2d=False)
# Invalid edge case when checking the default minimum sample of a scalar
msg = "Singleton array array(42) cannot be considered a valid collection."
assert_raise_message(TypeError, msg, check_array, 42, ensure_2d=False)
# But this works if the input data is forced to look like a 2 array with
# one sample and one feature:
X_checked = check_array(42, ensure_2d=True)
assert_array_equal(np.array([[42]]), X_checked)
# Simulate a model that would need at least 2 samples to be well defined
X = np.ones((1, 10))
y = np.ones(1)
msg = "1 sample(s) (shape=(1, 10)) while a minimum of 2 is required."
assert_raise_message(ValueError, msg, check_X_y, X, y,
ensure_min_samples=2)
# The same message is raised if the data has 2 dimensions even if this is
# not mandatory
assert_raise_message(ValueError, msg, check_X_y, X, y,
ensure_min_samples=2, ensure_2d=False)
# Simulate a model that would require at least 3 features (e.g. SelectKBest
# with k=3)
X = np.ones((10, 2))
y = np.ones(2)
msg = "2 feature(s) (shape=(10, 2)) while a minimum of 3 is required."
assert_raise_message(ValueError, msg, check_X_y, X, y,
ensure_min_features=3)
# Only the feature check is enabled whenever the number of dimensions is 2
# even if allow_nd is enabled:
assert_raise_message(ValueError, msg, check_X_y, X, y,
ensure_min_features=3, allow_nd=True)
# Simulate a case where a pipeline stage as trimmed all the features of a
# 2D dataset.
X = np.empty(0).reshape(10, 0)
y = np.ones(10)
msg = "0 feature(s) (shape=(10, 0)) while a minimum of 1 is required."
assert_raise_message(ValueError, msg, check_X_y, X, y)
# nd-data is not checked for any minimum number of features by default:
X = np.ones((10, 0, 28, 28))
y = np.ones(10)
X_checked, y_checked = check_X_y(X, y, allow_nd=True)
assert_array_equal(X, X_checked)
assert_array_equal(y, y_checked)
def test_has_fit_parameter():
assert_false(has_fit_parameter(KNeighborsClassifier, "sample_weight"))
assert_true(has_fit_parameter(RandomForestRegressor, "sample_weight"))
assert_true(has_fit_parameter(SVR, "sample_weight"))
assert_true(has_fit_parameter(SVR(), "sample_weight"))
def test_check_symmetric():
arr_sym = np.array([[0, 1], [1, 2]])
arr_bad = np.ones(2)
arr_asym = np.array([[0, 2], [0, 2]])
test_arrays = {'dense': arr_asym,
'dok': sp.dok_matrix(arr_asym),
'csr': sp.csr_matrix(arr_asym),
'csc': sp.csc_matrix(arr_asym),
'coo': sp.coo_matrix(arr_asym),
'lil': sp.lil_matrix(arr_asym),
'bsr': sp.bsr_matrix(arr_asym)}
# check error for bad inputs
assert_raises(ValueError, check_symmetric, arr_bad)
# check that asymmetric arrays are properly symmetrized
for arr_format, arr in test_arrays.items():
# Check for warnings and errors
assert_warns(UserWarning, check_symmetric, arr)
assert_raises(ValueError, check_symmetric, arr, raise_exception=True)
output = check_symmetric(arr, raise_warning=False)
if sp.issparse(output):
assert_equal(output.format, arr_format)
assert_array_equal(output.toarray(), arr_sym)
else:
assert_array_equal(output, arr_sym)
def test_check_is_fitted():
# Check is ValueError raised when non estimator instance passed
assert_raises(ValueError, check_is_fitted, ARDRegression, "coef_")
assert_raises(TypeError, check_is_fitted, "SVR", "support_")
ard = ARDRegression()
svr = SVR()
try:
assert_raises(NotFittedError, check_is_fitted, ard, "coef_")
assert_raises(NotFittedError, check_is_fitted, svr, "support_")
except ValueError:
assert False, "check_is_fitted failed with ValueError"
# NotFittedError is a subclass of both ValueError and AttributeError
try:
check_is_fitted(ard, "coef_", "Random message %(name)s, %(name)s")
except ValueError as e:
assert_equal(str(e), "Random message ARDRegression, ARDRegression")
try:
check_is_fitted(svr, "support_", "Another message %(name)s, %(name)s")
except AttributeError as e:
assert_equal(str(e), "Another message SVR, SVR")
ard.fit(*make_blobs())
svr.fit(*make_blobs())
assert_equal(None, check_is_fitted(ard, "coef_"))
assert_equal(None, check_is_fitted(svr, "support_"))
def test_check_consistent_length():
check_consistent_length([1], [2], [3], [4], [5])
check_consistent_length([[1, 2], [[1, 2]]], [1, 2], ['a', 'b'])
check_consistent_length([1], (2,), np.array([3]), sp.csr_matrix((1, 2)))
assert_raises_regexp(ValueError, 'inconsistent numbers of samples',
check_consistent_length, [1, 2], [1])
assert_raises_regexp(TypeError, 'got <\w+ \'int\'>',
check_consistent_length, [1, 2], 1)
assert_raises_regexp(TypeError, 'got <\w+ \'object\'>',
check_consistent_length, [1, 2], object())
assert_raises(TypeError, check_consistent_length, [1, 2], np.array(1))
# Despite ensembles having __len__ they must raise TypeError
assert_raises_regexp(TypeError, 'estimator', check_consistent_length,
[1, 2], RandomForestRegressor())
# XXX: We should have a test with a string, but what is correct behaviour?
|
danbob123/gplearn
|
gplearn/skutils/tests/test_validation.py
|
Python
|
bsd-3-clause
| 14,136
| 0.000283
|
#!/usr/bin/env python
# Copyright (c) 2016 Orange and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
# pylint: disable=missing-docstring
import logging
import os
import unittest
import mock
from click.testing import CliRunner
with mock.patch('functest.cli.commands.cli_testcase.CliTestcase.__init__',
mock.Mock(return_value=None)), \
mock.patch('functest.cli.commands.cli_tier.CliTier.__init__',
mock.Mock(return_value=None)):
os.environ['OS_AUTH_URL'] = ''
from functest.cli import cli_base
class CliBaseTesting(unittest.TestCase):
def setUp(self):
self.runner = CliRunner()
self._openstack = cli_base.OPENSTACK
self._env = cli_base.ENV
self._testcase = cli_base.TESTCASE
self._tier = cli_base.TIER
def test_os_check(self):
with mock.patch.object(self._openstack, 'check') as mock_method:
result = self.runner.invoke(cli_base.os_check)
self.assertEqual(result.exit_code, 0)
self.assertTrue(mock_method.called)
def test_os_show_credentials(self):
with mock.patch.object(self._openstack, 'show_credentials') \
as mock_method:
result = self.runner.invoke(cli_base.os_show_credentials)
self.assertEqual(result.exit_code, 0)
self.assertTrue(mock_method.called)
def test_env_show(self):
with mock.patch.object(self._env, 'show') as mock_method:
result = self.runner.invoke(cli_base.env_show)
self.assertEqual(result.exit_code, 0)
self.assertTrue(mock_method.called)
def test_testcase_list(self):
with mock.patch.object(self._testcase, 'list') as mock_method:
result = self.runner.invoke(cli_base.testcase_list)
self.assertEqual(result.exit_code, 0)
self.assertTrue(mock_method.called)
def test_testcase_show(self):
with mock.patch.object(self._testcase, 'show') as mock_method:
result = self.runner.invoke(cli_base.testcase_show, ['testname'])
self.assertEqual(result.exit_code, 0)
self.assertTrue(mock_method.called)
def test_testcase_run(self):
with mock.patch.object(self._testcase, 'run') as mock_method:
result = self.runner.invoke(cli_base.testcase_run,
['testname', '--noclean'])
self.assertEqual(result.exit_code, 0)
self.assertTrue(mock_method.called)
def test_tier_list(self):
with mock.patch.object(self._tier, 'list') as mock_method:
result = self.runner.invoke(cli_base.tier_list)
self.assertEqual(result.exit_code, 0)
self.assertTrue(mock_method.called)
def test_tier_show(self):
with mock.patch.object(self._tier, 'show') as mock_method:
result = self.runner.invoke(cli_base.tier_show, ['tiername'])
self.assertEqual(result.exit_code, 0)
self.assertTrue(mock_method.called)
def test_tier_gettests(self):
with mock.patch.object(self._tier, 'gettests') as mock_method:
result = self.runner.invoke(cli_base.tier_gettests, ['tiername'])
self.assertEqual(result.exit_code, 0)
self.assertTrue(mock_method.called)
def test_tier_run(self):
with mock.patch.object(self._tier, 'run') as mock_method:
result = self.runner.invoke(cli_base.tier_run,
['tiername', '--noclean'])
self.assertEqual(result.exit_code, 0)
self.assertTrue(mock_method.called)
if __name__ == "__main__":
logging.disable(logging.CRITICAL)
unittest.main(verbosity=2)
|
mywulin/functest
|
functest/tests/unit/cli/test_cli_base.py
|
Python
|
apache-2.0
| 3,933
| 0
|
from pylayers.antprop.antenna import *
from pylayers.antprop.antvsh import *
import matplotlib.pylab as plt
from numpy import *
import pdb
"""
This test :
1 : loads a measured antenna
2 : applies an electrical delay obtained from data with getdelay method
3 : evaluate the antenna vsh coefficient with a downsampling factor of 2
4 : evaluates the relative error of reconstruction (vsh3) for various values of order l
5 : display the results
"""
filename = 'S1R1.mat'
A = Antenna(filename,'ant/UWBAN/Matfile')
B = Antenna(filename,'ant/UWBAN/Matfile')
#plot(freq,angle(A.Ftheta[:,maxPowerInd[1],maxPowerInd[2]]*exp(2j*pi*freq.reshape(len(freq))*electricalDelay)))
freq = A.fa.reshape(104,1,1)
delayCandidates = arange(-10,10,0.001)
electricalDelay = A.getdelay(freq,delayCandidates)
disp('Electrical Delay = ' + str(electricalDelay)+' ns')
A.Ftheta = A.Ftheta*exp(2*1j*pi*freq*electricalDelay)
B.Ftheta = B.Ftheta*exp(2*1j*pi*freq*electricalDelay)
A.Fphi = A.Fphi*exp(2*1j*pi*freq*electricalDelay)
B.Fphi = B.Fphi*exp(2*1j*pi*freq*electricalDelay)
dsf = 2
A = vsh(A,dsf)
B = vsh(B,dsf)
tn = []
tet = []
tep = []
te = []
tmse = []
l = 20
A.C.s1tos2(l)
B.C.s1tos2(l)
u = np.shape(A.C.Br.s2)
Nf = u[0]
Nk = u[1]
tr = np.arange(2,Nk)
A.C.s2tos3_new(Nk)
B.C.s2tos3(1e-6)
UA = np.sum(A.C.Cr.s3*np.conj(A.C.Cr.s3),axis=0)
UB = np.sum(B.C.Cr.s3*np.conj(B.C.Cr.s3),axis=0)
ua = A.C.Cr.ind3
ub = B.C.Cr.ind3
da ={}
db ={}
for k in range(Nk):
da[str(ua[k])]=UA[k]
db[str(ub[k])]=UB[k]
tu = []
for t in sort(da.keys()):
tu.append(da[t] - db[t])
errelTha,errelPha,errela = A.errel(l,20,dsf,typ='s3')
errelThb,errelPhb,errelb = B.errel(l,20,dsf,typ='s3')
print "a: nok",errela,errelPha,errelTha
print "b: ok ",errelb,errelPhb,errelThb
for r in tr:
E = A.C.s2tos3_new(r)
errelTh,errelPh,errel = A.errel(l,20,dsf,typ='s3')
print 'r : ',r,errel,E
tet.append(errelTh)
tep.append(errelPh)
te.append(errel)
#
line1 = plt.plot(array(tr),10*log10(array(tep)),'b')
line2 = plt.plot(array(tr),10*log10(array(tet)),'r')
line3 = plt.plot(array(tr),10*log10(array(te)),'g')
#
plt.xlabel('order l')
plt.ylabel(u'$\epsilon_{rel}$ (dB)',fontsize=18)
plt.title('Evolution of reconstruction relative error wrt order')
plt.legend((u'$\epsilon_{rel}^{\phi}$',u'$\epsilon_{rel}^{\\theta}$',u'$\epsilon_{rel}^{total}$'))
plt.legend((line1,line2,line3),('a','b','c'))
plt.show()
plt.legend(('errel_phi','errel_theta','errel'))
|
buguen/pylayers
|
pylayers/antprop/examples/ex_antenna5.py
|
Python
|
lgpl-3.0
| 2,472
| 0.029126
|
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2015 Florian Bruhin (The Compiler) <mail@qutebrowser.org>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Utilities related to QWebHistory."""
from PyQt5.QtCore import QByteArray, QDataStream, QIODevice, QUrl
from qutebrowser.utils import utils, qtutils
HISTORY_STREAM_VERSION = 2
BACK_FORWARD_TREE_VERSION = 2
class TabHistoryItem:
"""A single item in the tab history.
Attributes:
url: The QUrl of this item.
original_url: The QUrl of this item which was originally requested.
title: The title as string of this item.
active: Whether this item is the item currently navigated to.
user_data: The user data for this item.
"""
def __init__(self, url, title, *, original_url=None, active=False,
user_data=None):
self.url = url
if original_url is None:
self.original_url = url
else:
self.original_url = original_url
self.title = title
self.active = active
self.user_data = user_data
def __repr__(self):
return utils.get_repr(self, constructor=True, url=self.url,
original_url=self.original_url, title=self.title,
active=self.active, user_data=self.user_data)
def _encode_url(url):
"""Encode an QUrl suitable to pass to QWebHistory."""
data = bytes(QUrl.toPercentEncoding(url.toString(), b':/#?&+=@%*'))
return data.decode('ascii')
def _serialize_item(i, item, stream):
"""Serialize a single WebHistoryItem into a QDataStream.
Args:
i: The index of the current item.
item: The WebHistoryItem to write.
stream: The QDataStream to write to.
"""
### Source/WebCore/history/qt/HistoryItemQt.cpp restoreState
## urlString
stream.writeQString(_encode_url(item.url))
## title
stream.writeQString(item.title)
## originalURLString
stream.writeQString(_encode_url(item.original_url))
### Source/WebCore/history/HistoryItem.cpp decodeBackForwardTree
## backForwardTreeEncodingVersion
stream.writeUInt32(BACK_FORWARD_TREE_VERSION)
## size (recursion stack)
stream.writeUInt64(0)
## node->m_documentSequenceNumber
# If two HistoryItems have the same document sequence number, then they
# refer to the same instance of a document. Traversing history from one
# such HistoryItem to another preserves the document.
stream.writeInt64(i + 1)
## size (node->m_documentState)
stream.writeUInt64(0)
## node->m_formContentType
# info used to repost form data
stream.writeQString(None)
## hasFormData
stream.writeBool(False)
## node->m_itemSequenceNumber
# If two HistoryItems have the same item sequence number, then they are
# clones of one another. Traversing history from one such HistoryItem to
# another is a no-op. HistoryItem clones are created for parent and
# sibling frames when only a subframe navigates.
stream.writeInt64(i + 1)
## node->m_referrer
stream.writeQString(None)
## node->m_scrollPoint (x)
try:
stream.writeInt32(item.user_data['scroll-pos'].x())
except (KeyError, TypeError):
stream.writeInt32(0)
## node->m_scrollPoint (y)
try:
stream.writeInt32(item.user_data['scroll-pos'].y())
except (KeyError, TypeError):
stream.writeInt32(0)
## node->m_pageScaleFactor
stream.writeFloat(1)
## hasStateObject
# Support for HTML5 History
stream.writeBool(False)
## node->m_target
stream.writeQString(None)
### Source/WebCore/history/qt/HistoryItemQt.cpp restoreState
## validUserData
# We could restore the user data here, but we prefer to use the
# QWebHistoryItem API for that.
stream.writeBool(False)
def serialize(items):
"""Serialize a list of QWebHistoryItems to a data stream.
Args:
items: An iterable of WebHistoryItems.
Return:
A (stream, data, user_data) tuple.
stream: The reseted QDataStream.
data: The QByteArray with the raw data.
user_data: A list with each item's user data.
Warning:
If 'data' goes out of scope, reading from 'stream' will result in a
segfault!
"""
data = QByteArray()
stream = QDataStream(data, QIODevice.ReadWrite)
user_data = []
current_idx = None
for i, item in enumerate(items):
if item.active:
if current_idx is not None:
raise ValueError("Multiple active items ({} and {}) "
"found!".format(current_idx, i))
else:
current_idx = i
if items:
if current_idx is None:
raise ValueError("No active item found!")
else:
current_idx = 0
### Source/WebKit/qt/Api/qwebhistory.cpp operator<<
stream.writeInt(HISTORY_STREAM_VERSION)
stream.writeInt(len(items))
stream.writeInt(current_idx)
for i, item in enumerate(items):
_serialize_item(i, item, stream)
user_data.append(item.user_data)
stream.device().reset()
qtutils.check_qdatastream(stream)
return stream, data, user_data
|
Kingdread/qutebrowser
|
qutebrowser/browser/tabhistory.py
|
Python
|
gpl-3.0
| 5,900
| 0.003559
|
"""
@brief test tree node (time=50s)
"""
import sys
import os
import unittest
from pyquickhelper.loghelper import fLOG
from pyquickhelper.pycode import get_temp_folder, ExtTestCase
from pyquickhelper.pycode.venv_helper import create_virtual_env
class TestVenvHelper(ExtTestCase):
def test_venv_empty(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
if __name__ != "__main__":
# does not accept virtual environment
return
temp = get_temp_folder(__file__, "temp_venv_empty")
out = create_virtual_env(temp, fLOG=fLOG)
fLOG("-----")
fLOG(out)
fLOG("-----")
pyt = os.path.join(temp, "Scripts")
self.assertExists(pyt)
lo = os.listdir(pyt)
self.assertNotEmpty(lo)
if __name__ == "__main__":
unittest.main()
|
sdpython/pyquickhelper
|
_unittests/ut_pycode/test_venv_helper.py
|
Python
|
mit
| 902
| 0
|
# coding: utf-8
#
# This file is part of mpdav.
#
# mpdav is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# mpdav is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with mpdav. If not, see <http://www.gnu.org/licenses/>.
import md5
import mimetypes
import os.path
import shutil
import time
import multi_status
import response
import status
BLOCK_SIZE = 8192 # just an assumption
def epoch2iso8601(ts):
t = time.localtime(ts)
tz = (time.altzone if t.tm_isdst else time.timezone) / 3600 * -1
return time.strftime("%Y-%m-%dT%H:%M:%S", t) + "%+.02d:00" % tz
def epoch2iso1123(ts):
return time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime(ts))
class FileIterator(object):
def __init__(self, filename):
self.filename = filename
def __iter__(self):
remaining = os.stat(self.filename).st_size
f = open(self.filename, "rb")
while remaining > 0:
r = min(remaining, BLOCK_SIZE)
yield f.read(r)
remaining -= r
f.close()
class FileBackend(object):
def __init__(self, root, show_hidden=False, base_path="/"):
self.root = os.path.abspath(root)
self.show_hidden = show_hidden
self.base_path = base_path.rstrip("/")
def propfind(self, path, depth, request_xml):
# TODO implement support for allprop
paths = self._build_paths(path, depth)
return multi_status.MultiStatus(self._get_properties(paths, request_xml))
def _build_paths(self, path, depth):
path = path.strip("/")
path = os.path.abspath(os.path.join(self.root, path))
if path.startswith(self.root) and os.path.exists(path):
paths = [path]
if os.path.isdir(path) and depth == 1:
for p in os.listdir(path):
if self._show(p):
paths.append(os.path.join(path, p))
for i, p in enumerate(paths):
if os.path.isdir(p) and p[:-1] != "/":
paths[i] = p + "/"
return paths
raise IOError
def _show(self, filename):
return self.show_hidden or not filename.startswith(".")
def _get_properties(self, paths, request_xml):
result = []
for p in paths:
prop_stat = multi_status.PropStat(status.OK)
try:
st = os.stat(p)
fs_st = os.statvfs(p.encode("utf-8"))
except:
continue
name = self._build_displayname(p)
is_dir = os.path.isdir(p)
for property_ in request_xml.find("{DAV:}propfind", "{DAV:}prop"):
if property_ == "{DAV:}resourcetype":
prop_stat.add_resourcetype(is_dir)
elif property_ == "{DAV:}creationdate":
prop_stat.add_creationdate(epoch2iso8601(st.st_ctime))
elif property_ == "{DAV:}displayname":
prop_stat.add_displayname(name)
elif property_ == "{DAV:}getcontentlength":
if not is_dir:
prop_stat.add_getcontentlength(st.st_size)
elif property_ == "{DAV:}getcontenttype":
if not is_dir:
ct = mimetypes.guess_type(p)[0] or "application/octet-stream"
prop_stat.add_getcontenttype(ct)
elif property_ == "{DAV:}getetag":
prop_stat.add_getetag(md5.new("%s%s" % (name.encode("utf-8"), st.st_mtime)).hexdigest())
elif property_ == "{DAV:}getlastmodified":
prop_stat.add_getlastmodified(epoch2iso1123(st.st_mtime))
elif property_ == "{DAV:}quota-available-bytes":
prop_stat.add_quota_available_bytes(fs_st.f_bavail * fs_st.f_frsize)
elif property_ == "{DAV:}quota-used-bytes":
prop_stat.add_quota_used_bytes((fs_st.f_blocks - fs_st.f_bavail) * fs_st.f_frsize)
else:
print "Request for not supported property %s" % property_
href = self.base_path + p[len(self.root):]
result.append(multi_status.Response(href, prop_stat))
return result
def _build_displayname(self, path):
cut = len(self.root)
return os.path.basename(os.path.normpath(path[cut:]))
def head(self, path):
return self.get(path, False)
def get(self, path, with_body=True):
filename = os.path.abspath(os.path.join(self.root, path.strip("/")))
if not filename.startswith(self.root):
return response.Response(status.FORBIDDEN)
elif not os.path.exists(filename):
return response.Response(status.NOT_FOUND)
if os.path.isdir(filename):
body = None
content_length = "0"
if with_body:
body = self._get_collection(filename)
content_length = str(len(body))
return response.Response(status.OK,
{"Content-Type": "text/html",
"Content-Length": content_length},
[body] if with_body else None)
else:
st = os.stat(filename)
headers = {"Content-Type": mimetypes.guess_type(filename)[0] or "application/octet-stream",
"Content-Length": str(st.st_size)}
return response.Response(status.OK,
headers,
FileIterator(filename) if with_body else None)
def _get_collection(self, path):
filenames = os.listdir(path)
directories = [f for f in filenames if self._show(f) and os.path.isdir(os.path.join(path, f))]
files = [f for f in filenames if self._show(f) and os.path.isfile(os.path.join(path, f))]
directories.sort(key=lambda d: d.lower())
files.sort(key=lambda f: f.lower())
filenames = directories + files
result = u"""\
<html>
<head>
<title>Content of %s</title>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8" />
</head>
<body>
<ul style="padding:0;margin:0;list-style-type:none;">
""" % os.path.basename(path)
tplDirectory = """<li><a href="%s">[%s]</a></li>\n"""
tplFile = """<li><a href="%s">%s</a></li>\n"""
for f in filenames:
p = os.path.join(path, f)
href = self.base_path + p[len(self.root):]
if os.path.isdir(p):
result += tplDirectory % (href, f)
else:
result += tplFile % (href, f)
result += """\
</ul>
</body>
</html>
"""
return result.encode("utf-8")
def put(self, path, content_length, body):
filename = os.path.abspath(os.path.join(self.root, path.strip("/")))
if not filename.startswith(self.root):
return response.Response(status.FORBIDDEN)
elif os.path.isdir(filename):
return response.Response(status.NOT_ALLOWED)
elif not os.path.isdir(os.path.dirname(filename)):
return response.Response(status.CONFLICT)
created = not os.path.exists(filename)
f = open(filename, "wb")
if content_length:
remaining = content_length
while remaining > 0:
buf = body.read(min(remaining, BLOCK_SIZE))
if len(buf):
f.write(buf)
remaining -= len(buf)
else:
break
f.close()
if created:
return response.Response(status.CREATED)
else:
return response.Response(status.NO_CONTENT)
def mkcol(self, path):
dirname = os.path.abspath(os.path.join(self.root, path.strip("/")))
if not dirname.startswith(self.root):
return response.Response(status.FORBIDDEN)
elif os.path.exists(dirname):
return response.Response(status.NOT_ALLOWED)
elif not os.path.isdir(os.path.dirname(dirname)):
return response.Response(status.CONFLICT)
os.mkdir(dirname)
return response.Response(status.CREATED, {}, None)
def delete(self, path):
filename = os.path.abspath(os.path.join(self.root, path.strip("/")))
if not filename.startswith(self.root):
return response.Response(status.FORBIDDEN)
if os.path.isfile(filename):
os.remove(filename)
elif os.path.isdir(filename):
shutil.rmtree(filename)
elif not os.path.exists(filename):
return response.Response(status.NOT_FOUND)
return response.Response(status.NO_CONTENT)
def move(self, src, dst, overwrite):
if not dst.startswith(self.base_path):
return response.Response(status.FORBIDDEN)
source = os.path.join(self.root, src.strip("/"))
source = os.path.abspath(source)
destination = dst[len(self.base_path):]
destination = os.path.join(self.root, destination.strip("/"))
destination = os.path.abspath(destination)
if not source.startswith(self.root) or not destination.startswith(self.root):
return response.Response(status.FORBIDDEN)
elif source == destination:
return response.Response(status.FORBIDDEN)
elif not os.path.isdir(os.path.dirname(destination)):
return response.Response(status.CONFLICT)
elif not overwrite and os.path.exists(destination):
return response.Response(status.PRECONDITION_FAILED)
created = not os.path.exists(destination)
if os.path.isdir(destination):
shutil.rmtree(destination)
elif os.path.isfile(destination):
os.remove(destination)
if os.path.isdir(source):
shutil.move(source, destination)
elif os.path.isfile(source):
os.rename(source, destination) # TODO will this work between partitions?
if created:
return response.Response(status.CREATED)
else:
return response.Response(status.NO_CONTENT)
def copy(self, src, dst, overwrite):
if not dst.startswith(self.base_path):
return response.Response(status.BAD_REQUEST)
source = os.path.join(self.root, src.strip("/"))
source = os.path.abspath(source)
destination = dst[len(self.base_path):]
destination = os.path.join(self.root, destination.strip("/"))
destination = os.path.abspath(destination)
if not source.startswith(self.root) or not destination.startswith(self.root):
return response.Response(status.FORBIDDEN)
elif source == destination:
return response.Response(status.FORBIDDEN)
elif not os.path.isdir(os.path.dirname(destination)):
return response.Response(status.CONFLICT)
elif not overwrite and os.path.exists(destination):
return response.Response(status.PRECONDITION_FAILED)
created = not os.path.exists(destination)
if os.path.isdir(destination):
shutil.rmtree(destination)
elif os.path.isfile(destination):
os.remove(destination)
if os.path.isdir(source):
shutil.copytree(source, destination)
elif os.path.isfile(source):
shutil.copyfile(source, destination)
if created:
return response.Response(status.CREATED)
else:
return response.Response(status.NO_CONTENT)
|
mprochnow/mpdav
|
mpdav/file_backend.py
|
Python
|
gpl-3.0
| 12,064
| 0.001078
|
# Copyright (c) 2012-2015 The GPy authors (see AUTHORS.txt)
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as np
import scipy
from ..util.univariate_Gaussian import std_norm_cdf, std_norm_pdf
import scipy as sp
from ..util.misc import safe_exp, safe_square, safe_cube, safe_quad, safe_three_times
class GPTransformation(object):
"""
Link function class for doing non-Gaussian likelihoods approximation
:param Y: observed output (Nx1 numpy.darray)
.. note:: Y values allowed depend on the likelihood_function used
"""
def __init__(self):
pass
def transf(self,f):
"""
Gaussian process tranformation function, latent space -> output space
"""
raise NotImplementedError
def dtransf_df(self,f):
"""
derivative of transf(f) w.r.t. f
"""
raise NotImplementedError
def d2transf_df2(self,f):
"""
second derivative of transf(f) w.r.t. f
"""
raise NotImplementedError
def d3transf_df3(self,f):
"""
third derivative of transf(f) w.r.t. f
"""
raise NotImplementedError
def to_dict(self):
raise NotImplementedError
def _to_dict(self):
return {}
@staticmethod
def from_dict(input_dict):
import copy
input_dict = copy.deepcopy(input_dict)
link_class = input_dict.pop('class')
import GPy
link_class = eval(link_class)
return link_class._from_dict(link_class, input_dict)
@staticmethod
def _from_dict(link_class, input_dict):
return link_class(**input_dict)
class Identity(GPTransformation):
"""
.. math::
g(f) = f
"""
def transf(self,f):
return f
def dtransf_df(self,f):
return np.ones_like(f)
def d2transf_df2(self,f):
return np.zeros_like(f)
def d3transf_df3(self,f):
return np.zeros_like(f)
def to_dict(self):
input_dict = super(Identity, self)._to_dict()
input_dict["class"] = "GPy.likelihoods.link_functions.Identity"
return input_dict
class Probit(GPTransformation):
"""
.. math::
g(f) = \\Phi^{-1} (mu)
"""
def transf(self,f):
return std_norm_cdf(f)
def dtransf_df(self,f):
return std_norm_pdf(f)
def d2transf_df2(self,f):
return -f * std_norm_pdf(f)
def d3transf_df3(self,f):
return (safe_square(f)-1.)*std_norm_pdf(f)
def to_dict(self):
input_dict = super(Probit, self)._to_dict()
input_dict["class"] = "GPy.likelihoods.link_functions.Probit"
return input_dict
class Cloglog(GPTransformation):
"""
Complementary log-log link
.. math::
p(f) = 1 - e^{-e^f}
or
f = \log (-\log(1-p))
"""
def transf(self,f):
ef = safe_exp(f)
return 1-np.exp(-ef)
def dtransf_df(self,f):
ef = safe_exp(f)
return np.exp(f-ef)
def d2transf_df2(self,f):
ef = safe_exp(f)
return -np.exp(f-ef)*(ef-1.)
def d3transf_df3(self,f):
ef = safe_exp(f)
ef2 = safe_square(ef)
three_times_ef = safe_three_times(ef)
r_val = np.exp(f-ef)*(1.-three_times_ef + ef2)
return r_val
class Log(GPTransformation):
"""
.. math::
g(f) = \\log(\\mu)
"""
def transf(self,f):
return safe_exp(f)
def dtransf_df(self,f):
return safe_exp(f)
def d2transf_df2(self,f):
return safe_exp(f)
def d3transf_df3(self,f):
return safe_exp(f)
class Log_ex_1(GPTransformation):
"""
.. math::
g(f) = \\log(\\exp(\\mu) - 1)
"""
def transf(self,f):
return scipy.special.log1p(safe_exp(f))
def dtransf_df(self,f):
ef = safe_exp(f)
return ef/(1.+ef)
def d2transf_df2(self,f):
ef = safe_exp(f)
aux = ef/(1.+ef)
return aux*(1.-aux)
def d3transf_df3(self,f):
ef = safe_exp(f)
aux = ef/(1.+ef)
daux_df = aux*(1.-aux)
return daux_df - (2.*aux*daux_df)
class Reciprocal(GPTransformation):
def transf(self,f):
return 1./f
def dtransf_df(self, f):
f2 = safe_square(f)
return -1./f2
def d2transf_df2(self, f):
f3 = safe_cube(f)
return 2./f3
def d3transf_df3(self,f):
f4 = safe_quad(f)
return -6./f4
class Heaviside(GPTransformation):
"""
.. math::
g(f) = I_{x \\geq 0}
"""
def transf(self,f):
#transformation goes here
return np.where(f>0, 1, 0)
def dtransf_df(self,f):
raise NotImplementedError("This function is not differentiable!")
def d2transf_df2(self,f):
raise NotImplementedError("This function is not differentiable!")
|
befelix/GPy
|
GPy/likelihoods/link_functions.py
|
Python
|
bsd-3-clause
| 4,850
| 0.008454
|
def dev_nav(active=None):
from uliweb import settings
out = "<span>"
for i in settings.MENUS_DEVELOP.nav:
if active!=i["name"]:
out += "<a href='%s'>%s<a> "%(i["link"],i["title"])
else:
out += "<strong>%s</strong> "%(i["title"])
out += "</span>"
return out
|
limodou/uliweb
|
uliweb/contrib/develop/__init__.py
|
Python
|
bsd-2-clause
| 318
| 0.012579
|
import os
from traits.api import HasTraits
from traitsui.api import View, Item
from enable.savage.trait_defs.ui.svg_button import SVGButton
pause_icon = os.path.join(os.path.dirname(__file__), 'player_pause.svg')
resume_icon = os.path.join(os.path.dirname(__file__), 'player_play.svg')
class SVGDemo(HasTraits):
pause = SVGButton('Pause', filename=pause_icon,
toggle_filename=resume_icon,
toggle_state=True,
toggle_label='Resume',
toggle_tooltip='Resume',
tooltip='Pause', toggle=True)
trait_view = View(Item('pause'))
SVGDemo().configure_traits()
|
tommy-u/enable
|
examples/savage/toggle_demo.py
|
Python
|
bsd-3-clause
| 673
| 0.002972
|
"""RPC Implementation, originally written for the Python Idle IDE
For security reasons, GvR requested that Idle's Python execution server process
connect to the Idle process, which listens for the connection. Since Idle has
only one client per server, this was not a limitation.
+---------------------------------+ +-------------+
| socketserver.BaseRequestHandler | | SocketIO |
+---------------------------------+ +-------------+
^ | register() |
| | unregister()|
| +-------------+
| ^ ^
| | |
| + -------------------+ |
| | |
+-------------------------+ +-----------------+
| RPCHandler | | RPCClient |
| [attribute of RPCServer]| | |
+-------------------------+ +-----------------+
The RPCServer handler class is expected to provide register/unregister methods.
RPCHandler inherits the mix-in class SocketIO, which provides these methods.
See the Idle run.main() docstring for further information on how this was
accomplished in Idle.
"""
import builtins
import copyreg
import io
import marshal
import os
import pickle
import queue
import select
import socket
import socketserver
import struct
import sys
import threading
import traceback
import types
def unpickle_code(ms):
"Return code object from marshal string ms."
co = marshal.loads(ms)
assert isinstance(co, types.CodeType)
return co
def pickle_code(co):
"Return unpickle function and tuple with marshalled co code object."
assert isinstance(co, types.CodeType)
ms = marshal.dumps(co)
return unpickle_code, (ms,)
def dumps(obj, protocol=None):
"Return pickled (or marshalled) string for obj."
# IDLE passes 'None' to select pickle.DEFAULT_PROTOCOL.
f = io.BytesIO()
p = CodePickler(f, protocol)
p.dump(obj)
return f.getvalue()
class CodePickler(pickle.Pickler):
dispatch_table = {types.CodeType: pickle_code}
dispatch_table.update(copyreg.dispatch_table)
BUFSIZE = 8*1024
LOCALHOST = '127.0.0.1'
class RPCServer(socketserver.TCPServer):
def __init__(self, addr, handlerclass=None):
if handlerclass is None:
handlerclass = RPCHandler
socketserver.TCPServer.__init__(self, addr, handlerclass)
def server_bind(self):
"Override TCPServer method, no bind() phase for connecting entity"
pass
def server_activate(self):
"""Override TCPServer method, connect() instead of listen()
Due to the reversed connection, self.server_address is actually the
address of the Idle Client to which we are connecting.
"""
self.socket.connect(self.server_address)
def get_request(self):
"Override TCPServer method, return already connected socket"
return self.socket, self.server_address
def handle_error(self, request, client_address):
"""Override TCPServer method
Error message goes to __stderr__. No error message if exiting
normally or socket raised EOF. Other exceptions not handled in
server code will cause os._exit.
"""
try:
raise
except SystemExit:
raise
except:
erf = sys.__stderr__
print('\n' + '-'*40, file=erf)
print('Unhandled server exception!', file=erf)
print('Thread: %s' % threading.current_thread().name, file=erf)
print('Client Address: ', client_address, file=erf)
print('Request: ', repr(request), file=erf)
traceback.print_exc(file=erf)
print('\n*** Unrecoverable, server exiting!', file=erf)
print('-'*40, file=erf)
os._exit(0)
#----------------- end class RPCServer --------------------
objecttable = {}
request_queue = queue.Queue(0)
response_queue = queue.Queue(0)
class SocketIO(object):
nextseq = 0
def __init__(self, sock, objtable=None, debugging=None):
self.sockthread = threading.current_thread()
if debugging is not None:
self.debugging = debugging
self.sock = sock
if objtable is None:
objtable = objecttable
self.objtable = objtable
self.responses = {}
self.cvars = {}
def close(self):
sock = self.sock
self.sock = None
if sock is not None:
sock.close()
def exithook(self):
"override for specific exit action"
os._exit(0)
def debug(self, *args):
if not self.debugging:
return
s = self.location + " " + str(threading.current_thread().name)
for a in args:
s = s + " " + str(a)
print(s, file=sys.__stderr__)
def register(self, oid, object):
self.objtable[oid] = object
def unregister(self, oid):
try:
del self.objtable[oid]
except KeyError:
pass
def localcall(self, seq, request):
self.debug("localcall:", request)
try:
how, (oid, methodname, args, kwargs) = request
except TypeError:
return ("ERROR", "Bad request format")
if oid not in self.objtable:
return ("ERROR", "Unknown object id: %r" % (oid,))
obj = self.objtable[oid]
if methodname == "__methods__":
methods = {}
_getmethods(obj, methods)
return ("OK", methods)
if methodname == "__attributes__":
attributes = {}
_getattributes(obj, attributes)
return ("OK", attributes)
if not hasattr(obj, methodname):
return ("ERROR", "Unsupported method name: %r" % (methodname,))
method = getattr(obj, methodname)
try:
if how == 'CALL':
ret = method(*args, **kwargs)
if isinstance(ret, RemoteObject):
ret = remoteref(ret)
return ("OK", ret)
elif how == 'QUEUE':
request_queue.put((seq, (method, args, kwargs)))
return("QUEUED", None)
else:
return ("ERROR", "Unsupported message type: %s" % how)
except SystemExit:
raise
except KeyboardInterrupt:
raise
except OSError:
raise
except Exception as ex:
return ("CALLEXC", ex)
except:
msg = "*** Internal Error: rpc.py:SocketIO.localcall()\n\n"\
" Object: %s \n Method: %s \n Args: %s\n"
print(msg % (oid, method, args), file=sys.__stderr__)
traceback.print_exc(file=sys.__stderr__)
return ("EXCEPTION", None)
def remotecall(self, oid, methodname, args, kwargs):
self.debug("remotecall:asynccall: ", oid, methodname)
seq = self.asynccall(oid, methodname, args, kwargs)
return self.asyncreturn(seq)
def remotequeue(self, oid, methodname, args, kwargs):
self.debug("remotequeue:asyncqueue: ", oid, methodname)
seq = self.asyncqueue(oid, methodname, args, kwargs)
return self.asyncreturn(seq)
def asynccall(self, oid, methodname, args, kwargs):
request = ("CALL", (oid, methodname, args, kwargs))
seq = self.newseq()
if threading.current_thread() != self.sockthread:
cvar = threading.Condition()
self.cvars[seq] = cvar
self.debug(("asynccall:%d:" % seq), oid, methodname, args, kwargs)
self.putmessage((seq, request))
return seq
def asyncqueue(self, oid, methodname, args, kwargs):
request = ("QUEUE", (oid, methodname, args, kwargs))
seq = self.newseq()
if threading.current_thread() != self.sockthread:
cvar = threading.Condition()
self.cvars[seq] = cvar
self.debug(("asyncqueue:%d:" % seq), oid, methodname, args, kwargs)
self.putmessage((seq, request))
return seq
def asyncreturn(self, seq):
self.debug("asyncreturn:%d:call getresponse(): " % seq)
response = self.getresponse(seq, wait=0.05)
self.debug(("asyncreturn:%d:response: " % seq), response)
return self.decoderesponse(response)
def decoderesponse(self, response):
how, what = response
if how == "OK":
return what
if how == "QUEUED":
return None
if how == "EXCEPTION":
self.debug("decoderesponse: EXCEPTION")
return None
if how == "EOF":
self.debug("decoderesponse: EOF")
self.decode_interrupthook()
return None
if how == "ERROR":
self.debug("decoderesponse: Internal ERROR:", what)
raise RuntimeError(what)
if how == "CALLEXC":
self.debug("decoderesponse: Call Exception:", what)
raise what
raise SystemError(how, what)
def decode_interrupthook(self):
""
raise EOFError
def mainloop(self):
"""Listen on socket until I/O not ready or EOF
pollresponse() will loop looking for seq number None, which
never comes, and exit on EOFError.
"""
try:
self.getresponse(myseq=None, wait=0.05)
except EOFError:
self.debug("mainloop:return")
return
def getresponse(self, myseq, wait):
response = self._getresponse(myseq, wait)
if response is not None:
how, what = response
if how == "OK":
response = how, self._proxify(what)
return response
def _proxify(self, obj):
if isinstance(obj, RemoteProxy):
return RPCProxy(self, obj.oid)
if isinstance(obj, list):
return list(map(self._proxify, obj))
# XXX Check for other types -- not currently needed
return obj
def _getresponse(self, myseq, wait):
self.debug("_getresponse:myseq:", myseq)
if threading.current_thread() is self.sockthread:
# this thread does all reading of requests or responses
while 1:
response = self.pollresponse(myseq, wait)
if response is not None:
return response
else:
# wait for notification from socket handling thread
cvar = self.cvars[myseq]
cvar.acquire()
while myseq not in self.responses:
cvar.wait()
response = self.responses[myseq]
self.debug("_getresponse:%s: thread woke up: response: %s" %
(myseq, response))
del self.responses[myseq]
del self.cvars[myseq]
cvar.release()
return response
def newseq(self):
self.nextseq = seq = self.nextseq + 2
return seq
def putmessage(self, message):
self.debug("putmessage:%d:" % message[0])
try:
s = dumps(message)
except pickle.PicklingError:
print("Cannot pickle:", repr(message), file=sys.__stderr__)
raise
s = struct.pack("<i", len(s)) + s
while len(s) > 0:
try:
r, w, x = select.select([], [self.sock], [])
n = self.sock.send(s[:BUFSIZE])
except (AttributeError, TypeError):
raise OSError("socket no longer exists")
s = s[n:]
buff = b''
bufneed = 4
bufstate = 0 # meaning: 0 => reading count; 1 => reading data
def pollpacket(self, wait):
self._stage0()
if len(self.buff) < self.bufneed:
r, w, x = select.select([self.sock.fileno()], [], [], wait)
if len(r) == 0:
return None
try:
s = self.sock.recv(BUFSIZE)
except OSError:
raise EOFError
if len(s) == 0:
raise EOFError
self.buff += s
self._stage0()
return self._stage1()
def _stage0(self):
if self.bufstate == 0 and len(self.buff) >= 4:
s = self.buff[:4]
self.buff = self.buff[4:]
self.bufneed = struct.unpack("<i", s)[0]
self.bufstate = 1
def _stage1(self):
if self.bufstate == 1 and len(self.buff) >= self.bufneed:
packet = self.buff[:self.bufneed]
self.buff = self.buff[self.bufneed:]
self.bufneed = 4
self.bufstate = 0
return packet
def pollmessage(self, wait):
packet = self.pollpacket(wait)
if packet is None:
return None
try:
message = pickle.loads(packet)
except pickle.UnpicklingError:
print("-----------------------", file=sys.__stderr__)
print("cannot unpickle packet:", repr(packet), file=sys.__stderr__)
traceback.print_stack(file=sys.__stderr__)
print("-----------------------", file=sys.__stderr__)
raise
return message
def pollresponse(self, myseq, wait):
"""Handle messages received on the socket.
Some messages received may be asynchronous 'call' or 'queue' requests,
and some may be responses for other threads.
'call' requests are passed to self.localcall() with the expectation of
immediate execution, during which time the socket is not serviced.
'queue' requests are used for tasks (which may block or hang) to be
processed in a different thread. These requests are fed into
request_queue by self.localcall(). Responses to queued requests are
taken from response_queue and sent across the link with the associated
sequence numbers. Messages in the queues are (sequence_number,
request/response) tuples and code using this module removing messages
from the request_queue is responsible for returning the correct
sequence number in the response_queue.
pollresponse() will loop until a response message with the myseq
sequence number is received, and will save other responses in
self.responses and notify the owning thread.
"""
while 1:
# send queued response if there is one available
try:
qmsg = response_queue.get(0)
except queue.Empty:
pass
else:
seq, response = qmsg
message = (seq, ('OK', response))
self.putmessage(message)
# poll for message on link
try:
message = self.pollmessage(wait)
if message is None: # socket not ready
return None
except EOFError:
self.handle_EOF()
return None
except AttributeError:
return None
seq, resq = message
how = resq[0]
self.debug("pollresponse:%d:myseq:%s" % (seq, myseq))
# process or queue a request
if how in ("CALL", "QUEUE"):
self.debug("pollresponse:%d:localcall:call:" % seq)
response = self.localcall(seq, resq)
self.debug("pollresponse:%d:localcall:response:%s"
% (seq, response))
if how == "CALL":
self.putmessage((seq, response))
elif how == "QUEUE":
# don't acknowledge the 'queue' request!
pass
continue
# return if completed message transaction
elif seq == myseq:
return resq
# must be a response for a different thread:
else:
cv = self.cvars.get(seq, None)
# response involving unknown sequence number is discarded,
# probably intended for prior incarnation of server
if cv is not None:
cv.acquire()
self.responses[seq] = resq
cv.notify()
cv.release()
continue
def handle_EOF(self):
"action taken upon link being closed by peer"
self.EOFhook()
self.debug("handle_EOF")
for key in self.cvars:
cv = self.cvars[key]
cv.acquire()
self.responses[key] = ('EOF', None)
cv.notify()
cv.release()
# call our (possibly overridden) exit function
self.exithook()
def EOFhook(self):
"Classes using rpc client/server can override to augment EOF action"
pass
#----------------- end class SocketIO --------------------
class RemoteObject(object):
# Token mix-in class
pass
def remoteref(obj):
oid = id(obj)
objecttable[oid] = obj
return RemoteProxy(oid)
class RemoteProxy(object):
def __init__(self, oid):
self.oid = oid
class RPCHandler(socketserver.BaseRequestHandler, SocketIO):
debugging = False
location = "#S" # Server
def __init__(self, sock, addr, svr):
svr.current_handler = self ## cgt xxx
SocketIO.__init__(self, sock)
socketserver.BaseRequestHandler.__init__(self, sock, addr, svr)
def handle(self):
"handle() method required by socketserver"
self.mainloop()
def get_remote_proxy(self, oid):
return RPCProxy(self, oid)
class RPCClient(SocketIO):
debugging = False
location = "#C" # Client
nextseq = 1 # Requests coming from the client are odd numbered
def __init__(self, address, family=socket.AF_INET, type=socket.SOCK_STREAM):
self.listening_sock = socket.socket(family, type)
self.listening_sock.bind(address)
self.listening_sock.listen(1)
def accept(self):
working_sock, address = self.listening_sock.accept()
if self.debugging:
print("****** Connection request from ", address, file=sys.__stderr__)
if address[0] == LOCALHOST:
SocketIO.__init__(self, working_sock)
else:
print("** Invalid host: ", address, file=sys.__stderr__)
raise OSError
def get_remote_proxy(self, oid):
return RPCProxy(self, oid)
class RPCProxy(object):
__methods = None
__attributes = None
def __init__(self, sockio, oid):
self.sockio = sockio
self.oid = oid
def __getattr__(self, name):
if self.__methods is None:
self.__getmethods()
if self.__methods.get(name):
return MethodProxy(self.sockio, self.oid, name)
if self.__attributes is None:
self.__getattributes()
if name in self.__attributes:
value = self.sockio.remotecall(self.oid, '__getattribute__',
(name,), {})
return value
else:
raise AttributeError(name)
def __getattributes(self):
self.__attributes = self.sockio.remotecall(self.oid,
"__attributes__", (), {})
def __getmethods(self):
self.__methods = self.sockio.remotecall(self.oid,
"__methods__", (), {})
def _getmethods(obj, methods):
# Helper to get a list of methods from an object
# Adds names to dictionary argument 'methods'
for name in dir(obj):
attr = getattr(obj, name)
if callable(attr):
methods[name] = 1
if isinstance(obj, type):
for super in obj.__bases__:
_getmethods(super, methods)
def _getattributes(obj, attributes):
for name in dir(obj):
attr = getattr(obj, name)
if not callable(attr):
attributes[name] = 1
class MethodProxy(object):
def __init__(self, sockio, oid, name):
self.sockio = sockio
self.oid = oid
self.name = name
def __call__(self, *args, **kwargs):
value = self.sockio.remotecall(self.oid, self.name, args, kwargs)
return value
# XXX KBK 09Sep03 We need a proper unit test for this module. Previously
# existing test code was removed at Rev 1.27 (r34098).
def displayhook(value):
"""Override standard display hook to use non-locale encoding"""
if value is None:
return
# Set '_' to None to avoid recursion
builtins._ = None
text = repr(value)
try:
sys.stdout.write(text)
except UnicodeEncodeError:
# let's use ascii while utf8-bmp codec doesn't present
encoding = 'ascii'
bytes = text.encode(encoding, 'backslashreplace')
text = bytes.decode(encoding, 'strict')
sys.stdout.write(text)
sys.stdout.write("\n")
builtins._ = value
if __name__ == '__main__':
from unittest import main
main('idlelib.idle_test.test_rpc', verbosity=2,)
|
FFMG/myoddweb.piger
|
monitor/api/python/Python-3.7.2/Lib/idlelib/rpc.py
|
Python
|
gpl-2.0
| 21,137
| 0.000899
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import easyiceconfig as EasyIce
import jderobotComm as comm
import sys, signal
sys.path.append('/usr/local/share/jderobot/python/visualHFSM_py')
import traceback, threading, time
from automatagui import AutomataGui, QtGui, GuiSubautomata
from jderobot import MotorsPrx
from jderobot import LaserPrx
class Automata():
def __init__(self):
self.lock = threading.Lock()
self.displayGui = False
self.StatesSub1 = [
"GoForward",
"GoBack",
]
self.sub1 = "GoForward"
self.run1 = True
def calculate_obstacle(self):
self.laserData = self.KobukiLaser.getLaserData()
min_dist = 1000
for i in range(len(self.laserData.values)):
if self.laserData.values[i] < min_dist:
min_dist = self.laserData.values[i]
if min_dist < 1.0:
return True
else:
return False
def startThreads(self):
self.t1 = threading.Thread(target=self.subautomata1)
self.t1.start()
def createAutomata(self):
guiSubautomataList = []
# Creating subAutomata1
guiSubautomata1 = GuiSubautomata(1,0, self.automataGui)
guiSubautomata1.newGuiNode(1, 0, 69, 163, 1, 'GoForward')
guiSubautomata1.newGuiNode(2, 0, 255, 117, 0, 'GoBack')
guiSubautomata1.newGuiTransition((69, 163), (255, 117), (139, 78), 1, 1, 2)
guiSubautomata1.newGuiTransition((255, 117), (69, 163), (189, 196), 2, 2, 1)
guiSubautomataList.append(guiSubautomata1)
return guiSubautomataList
def shutDown(self):
self.run1 = False
def runGui(self):
app = QtGui.QApplication(sys.argv)
self.automataGui = AutomataGui()
self.automataGui.setAutomata(self.createAutomata())
self.automataGui.loadAutomata()
self.startThreads()
self.automataGui.show()
app.exec_()
def subautomata1(self):
self.run1 = True
cycle = 100
t_activated = False
t_fin = 0
while(self.run1):
totala = time.time() * 1000000
# Evaluation if
if(self.sub1 == "GoForward"):
if(self.calculate_obstacle()):
self.sub1 = "GoBack"
if self.displayGui:
self.automataGui.notifySetNodeAsActive('GoBack')
elif(self.sub1 == "GoBack"):
if(not self.calculate_obstacle()):
self.sub1 = "GoForward"
if self.displayGui:
self.automataGui.notifySetNodeAsActive('GoForward')
# Actuation if
if(self.sub1 == "GoForward"):
self.KobukiMotors.sendV(0.5)
self.KobukiMotors.sendW(0.0)
elif(self.sub1 == "GoBack"):
self.KobukiMotors.sendV(-0.3)
self.KobukiMotors.sendW(0.2)
totalb = time.time() * 1000000
msecs = (totalb - totala) / 1000;
if(msecs < 0 or msecs > cycle):
msecs = cycle
else:
msecs = cycle - msecs
time.sleep(msecs / 1000)
if(msecs < 33 ):
time.sleep(33 / 1000);
def connectToProxys(self):
self.ic = EasyIce.initialize(sys.argv)
self.ic,self.node = comm.init(self.ic)
# Contact to KobukiMotors
self.KobukiMotors = comm.getMotorsClient(self.ic, 'automata.KobukiMotors')
if(not self.KobukiMotors):
raise Exception('could not create client with KobukiMotors')
print('KobukiMotors connected')
# Contact to KobukiLaser
self.KobukiLaser = comm.getLaserClient(self.ic, 'automata.KobukiLaser')
if(not self.KobukiLaser):
raise Exception('could not create client with KobukiLaser')
print('KobukiLaser connected')
def destroyIc(self):
self.KobukiMotors.stop()
self.KobukiLaser.stop()
comm.destroy(self.ic, self.node)
def start(self):
if self.displayGui:
self.guiThread = threading.Thread(target=self.runGui)
self.guiThread.start()
else:
self.startThreads()
def join(self):
if self.displayGui:
self.guiThread.join()
self.t1.join()
def readArgs(self):
for arg in sys.argv:
splitedArg = arg.split('=')
if splitedArg[0] == '--displaygui':
if splitedArg[1] == 'True' or splitedArg[1] == 'true':
self.displayGui = True
print('runtime gui enabled')
else:
self.displayGui = False
print('runtime gui disabled')
if __name__ == '__main__':
signal.signal(signal.SIGINT, signal.SIG_DFL)
automata = Automata()
try:
automata.connectToProxys()
automata.readArgs()
automata.start()
automata.join()
sys.exit(0)
except:
traceback.print_exc()
automata.destroyIc()
sys.exit(-1)
|
okanasik/JdeRobot
|
src/tools/visualStates/samples/goforward/goforward.py
|
Python
|
gpl-3.0
| 4,201
| 0.035468
|
# Copyright (c) 2012-2021, Mark Peek <mark@peek.org>
# All rights reserved.
#
# See LICENSE file for full license.
from .aws import Action as BaseAction
from .aws import BaseARN
service_name = "AWS IoT Analytics"
prefix = "iotanalytics"
class Action(BaseAction):
def __init__(self, action: str = None) -> None:
super().__init__(prefix, action)
class ARN(BaseARN):
def __init__(self, resource: str = "", region: str = "", account: str = "") -> None:
super().__init__(
service=prefix, resource=resource, region=region, account=account
)
BatchPutMessage = Action("BatchPutMessage")
CancelPipelineReprocessing = Action("CancelPipelineReprocessing")
CreateChannel = Action("CreateChannel")
CreateDataset = Action("CreateDataset")
CreateDatasetContent = Action("CreateDatasetContent")
CreateDatastore = Action("CreateDatastore")
CreatePipeline = Action("CreatePipeline")
DeleteChannel = Action("DeleteChannel")
DeleteDataset = Action("DeleteDataset")
DeleteDatasetContent = Action("DeleteDatasetContent")
DeleteDatastore = Action("DeleteDatastore")
DeletePipeline = Action("DeletePipeline")
DescribeChannel = Action("DescribeChannel")
DescribeDataset = Action("DescribeDataset")
DescribeDatastore = Action("DescribeDatastore")
DescribeLoggingOptions = Action("DescribeLoggingOptions")
DescribePipeline = Action("DescribePipeline")
GetDatasetContent = Action("GetDatasetContent")
ListChannels = Action("ListChannels")
ListDatasetContents = Action("ListDatasetContents")
ListDatasets = Action("ListDatasets")
ListDatastores = Action("ListDatastores")
ListPipelines = Action("ListPipelines")
ListTagsForResource = Action("ListTagsForResource")
PutLoggingOptions = Action("PutLoggingOptions")
RunPipelineActivity = Action("RunPipelineActivity")
SampleChannelData = Action("SampleChannelData")
StartPipelineReprocessing = Action("StartPipelineReprocessing")
TagResource = Action("TagResource")
UntagResource = Action("UntagResource")
UpdateChannel = Action("UpdateChannel")
UpdateDataset = Action("UpdateDataset")
UpdateDatastore = Action("UpdateDatastore")
UpdatePipeline = Action("UpdatePipeline")
|
cloudtools/awacs
|
awacs/iotanalytics.py
|
Python
|
bsd-2-clause
| 2,141
| 0.000467
|
INSTALLED_APPS= ["django_nose"]
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
NOSE_ARGS = [
'--with-xunit',
'--xunit-file=jenkins/nosetests.xml',
]
|
BirkbeckCTP/janeway
|
jenkins/janeway_settings.py
|
Python
|
agpl-3.0
| 160
| 0.00625
|
import os
from anchore_engine.analyzers.syft.handlers.common import save_entry_to_findings
from anchore_engine.analyzers.utils import dig
def save_entry(findings, engine_entry, pkg_key=None):
if not pkg_key:
pkg_name = engine_entry.get("name", "")
pkg_version = engine_entry.get(
"version", engine_entry.get("latest", "")
) # rethink this... ensure it's right
pkg_key = engine_entry.get(
"location",
"/virtual/pypkg/site-packages/{}-{}".format(pkg_name, pkg_version),
)
save_entry_to_findings(findings, engine_entry, "pkgs.python", pkg_key)
def translate_and_save_entry(findings, artifact):
"""
Handler function to map syft results for the python package type into the engine "raw" document format.
"""
if "python-package-cataloger" not in artifact["foundBy"]:
# engine only includes python findings for egg and wheel installations (with rich metadata)
return
site_pkg_root = artifact["metadata"]["sitePackagesRootPath"]
name = artifact["name"]
# anchore engine always uses the name, however, the name may not be a top-level package
# instead default to the first top-level package unless the name is listed among the
# top level packages explicitly defined in the metadata. Note that the top-level package
# is optional!
pkg_key_names = dig(artifact, "metadata", "topLevelPackages", force_default=[])
pkg_key_name = None
for key_name in pkg_key_names:
if name in key_name:
pkg_key_name = name
else:
pkg_key_name = key_name
if not pkg_key_name:
pkg_key_name = name
pkg_key = os.path.join(site_pkg_root, pkg_key_name)
origin = dig(artifact, "metadata", "author", force_default="")
email = dig(artifact, "metadata", "authorEmail", default=None)
if email:
origin += " <%s>" % email
files = []
for file in dig(artifact, "metadata", "files", force_default=[]):
files.append(os.path.join(site_pkg_root, file["path"]))
# craft the artifact document
pkg_value = {
"name": name,
"version": artifact["version"],
"latest": artifact["version"],
"files": files,
"origin": origin,
"license": dig(artifact, "metadata", "license", force_default=""),
"location": site_pkg_root,
"type": "python",
"cpes": artifact.get("cpes", []),
}
# inject the artifact document into the "raw" analyzer document
save_entry(findings, pkg_value, pkg_key)
|
anchore/anchore-engine
|
anchore_engine/analyzers/syft/handlers/python.py
|
Python
|
apache-2.0
| 2,570
| 0.002724
|
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import curses
import curses.wrapper
from curses.ascii import isprint
from twisted.internet import reactor
class CursesStdIO:
def __init__(self, stdscr, callback=None):
self.statusText = "Synapse test app -"
self.searchText = ""
self.stdscr = stdscr
self.logLine = ""
self.callback = callback
self._setup()
def _setup(self):
self.stdscr.nodelay(1) # Make non blocking
self.rows, self.cols = self.stdscr.getmaxyx()
self.lines = []
curses.use_default_colors()
self.paintStatus(self.statusText)
self.stdscr.refresh()
def set_callback(self, callback):
self.callback = callback
def fileno(self):
"""We want to select on FD 0"""
return 0
def connectionLost(self, reason):
self.close()
def print_line(self, text):
"""add a line to the internal list of lines"""
self.lines.append(text)
self.redraw()
def print_log(self, text):
self.logLine = text
self.redraw()
def redraw(self):
"""method for redisplaying lines based on internal list of lines"""
self.stdscr.clear()
self.paintStatus(self.statusText)
i = 0
index = len(self.lines) - 1
while i < (self.rows - 3) and index >= 0:
self.stdscr.addstr(self.rows - 3 - i, 0, self.lines[index], curses.A_NORMAL)
i = i + 1
index = index - 1
self.printLogLine(self.logLine)
self.stdscr.refresh()
def paintStatus(self, text):
if len(text) > self.cols:
raise RuntimeError("TextTooLongError")
self.stdscr.addstr(
self.rows - 2, 0, text + " " * (self.cols - len(text)), curses.A_STANDOUT
)
def printLogLine(self, text):
self.stdscr.addstr(
0, 0, text + " " * (self.cols - len(text)), curses.A_STANDOUT
)
def doRead(self):
"""Input is ready!"""
curses.noecho()
c = self.stdscr.getch() # read a character
if c == curses.KEY_BACKSPACE:
self.searchText = self.searchText[:-1]
elif c == curses.KEY_ENTER or c == 10:
text = self.searchText
self.searchText = ""
self.print_line(">> %s" % text)
try:
if self.callback:
self.callback.on_line(text)
except Exception as e:
self.print_line(str(e))
self.stdscr.refresh()
elif isprint(c):
if len(self.searchText) == self.cols - 2:
return
self.searchText = self.searchText + chr(c)
self.stdscr.addstr(
self.rows - 1,
0,
self.searchText + (" " * (self.cols - len(self.searchText) - 2)),
)
self.paintStatus(self.statusText + " %d" % len(self.searchText))
self.stdscr.move(self.rows - 1, len(self.searchText))
self.stdscr.refresh()
def logPrefix(self):
return "CursesStdIO"
def close(self):
"""clean up"""
curses.nocbreak()
self.stdscr.keypad(0)
curses.echo()
curses.endwin()
class Callback:
def __init__(self, stdio):
self.stdio = stdio
def on_line(self, text):
self.stdio.print_line(text)
def main(stdscr):
screen = CursesStdIO(stdscr) # create Screen object
callback = Callback(screen)
screen.set_callback(callback)
stdscr.refresh()
reactor.addReader(screen)
reactor.run()
screen.close()
if __name__ == "__main__":
curses.wrapper(main)
|
matrix-org/synapse
|
contrib/experiments/cursesio.py
|
Python
|
apache-2.0
| 4,229
| 0.000473
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
#
# A Jira shell (using the Jira XML-RPC API).
#
# <https://confluence.atlassian.com/display/JIRA042/Creating+a+XML-RPC+Client>
# <http://docs.atlassian.com/software/jira/docs/api/rpc-jira-plugin/latest/com/atlassian/jira/rpc/xmlrpc/XmlRpcService.html>
#
__version__ = "1.6.0"
import warnings
warnings.filterwarnings("ignore", module="wstools.XMLSchema", lineno=3107)
# Ignore this:
# /opt/local/lib/python2.6/xmlrpclib.py:612: DeprecationWarning: The xmllib module is obsolete.
warnings.filterwarnings("ignore", module="xmlrpclib", lineno=612)
import getpass
import os
import sys
import logging
from pprint import pprint
import json
import xmlrpclib
import time
import codecs
import operator
import webbrowser
import re
TOP = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.insert(0, os.path.join(TOP, "deps"))
import cmdln
import requests
# This is a total hack for <https://github.com/trentm/jirash/issues/2>.
# It ensures that utf-8 is used for implicit string conversion deep
# in httplib.py for Python 2.7 (which changed from 2.6 resulting in
# that conversion).
if sys.version_info >= (2, 7):
reload(sys)
sys.setdefaultencoding('utf-8')
#---- globals and config
log = logging.getLogger("jirash")
#---- exceptions
class JiraShellError(Exception):
pass
class JiraShellUsageError(JiraShellError):
pass
#---- monkey-patching
def _decode(data, encoding, is8bit=re.compile("[\x80-\xff]").search):
# decode non-ascii string (if possible)
if unicode and encoding and is8bit(data):
data = unicode(data, encoding, 'replace')
return data
xmlrpclib._decode = _decode
def _isint(s):
try:
int(s)
except ValueError:
return False
else:
return True
#---- Jira API
class Jira(object):
def __init__(self, jira_url, username, password):
self.jira_url = jira_url
self.username = username
self.password = password
self.server = xmlrpclib.ServerProxy(jira_url + '/rpc/xmlrpc',
verbose=False)
self.auth = self.server.jira1.login(username, password)
# WARNING: if we allow a longer jira shell session, then caching
# might need invalidation.
self.cache = {}
_soap_server = None
_soap_auth = None
def _get_soap_server(self):
try:
import pyexpat
except ImportError:
msg = ("Your Python (%s) doesn't have the 'pyexpat' module "
"needed to call the Jira SOAP API. You must install that "
"and retry." % sys.executable)
how = howto_install_pyexpat()
if how:
msg += " You could try `%s`." % how
raise JiraShellUsageError(msg)
import SOAPpy
from StringIO import StringIO
if not self._soap_server:
soap_url = self.jira_url + '/rpc/soap/jirasoapservice-v2?wsdl'
try:
oldStdout = sys.stdout
sys.stdout = StringIO() # trap log output from WSDL parsing
self._soap_server = SOAPpy.WSDL.Proxy(soap_url)
finally:
sys.stdout = oldStdout
self._soap_auth = self._soap_server.login(
self.username, self.password)
return self._soap_server, self._soap_auth
def _jira_soap_call(self, methodName, args):
server, auth = self._get_soap_server()
authedArgs = [auth] + args
out = getattr(server, methodName)(*authedArgs)
typeName = out._typeName()
if typeName == "struct":
return out._asdict()
elif typeName == "typedArray":
outList = [item._asdict() for item in out._aslist()]
return outList
else:
raise JiraShellError("unknown SOAPpy outparam type: '%s'" % typeName)
def _jira_rest_call(self, method, path, **kwargs):
"""Typical kwargs (from `requests`) are:
- params
- data
- headers
"""
url = self.jira_url + '/rest/api/2' + path
r = requests.request(method, url, auth=(self.username, self.password),
**kwargs)
return r
def filters(self):
if "filters" not in self.cache:
filters = self.server.jira1.getFavouriteFilters(self.auth)
filters.sort(key=operator.itemgetter("name"))
self.cache["filters"] = filters
return self.cache["filters"]
def user(self, username):
return self.server.jira1.getUser(self.auth, username)
def projects(self):
if "projects" not in self.cache:
projects = self.server.jira1.getProjectsNoSchemes(self.auth)
projects = [p for p in projects if "Archived" not in p["name"]]
projects.sort(key=operator.itemgetter("key"))
self.cache["projects"] = projects
return self.cache["projects"]
def project(self, key):
projects = self.projects()
for p in projects:
if p["key"] == key:
return p
else:
raise JiraShellError("unknown project: %r" % key)
def priorities(self):
if "priorities" not in self.cache:
priorities = self.server.jira1.getPriorities(self.auth)
self.cache["priorities"] = priorities
return self.cache["priorities"]
def priority(self, priority_id):
assert isinstance(priority_id, str)
for p in self.priorities():
if p["id"] == priority_id:
return p
else:
raise JiraShellError("unknown priority: %r" % priority_id)
def issue_link_types(self):
if "issue_link_types" not in self.cache:
res = self._jira_rest_call("GET", "/issueLinkType")
if res.status_code != 200:
raise JiraShellError("error getting issue link types: %s"
% res.text)
self.cache["issue_link_types"] = res.json()["issueLinkTypes"]
return self.cache["issue_link_types"]
def link(self, link_type_name, inward_issue_key, outward_issue_key):
"""Link issue.
E.g. making PROJ-123 a dup of PROJ-100 would be:
<jira>.link('Duplicate', 'PROJ-123', 'PROJ-100')
where 'Duplicate' is the link type "name" (as from `.link_types()`).
"""
data = {
"type": {
"name": link_type_name
},
"inwardIssue": {
"key": inward_issue_key
},
"outwardIssue": {
"key": outward_issue_key
}
}
res = self._jira_rest_call('POST', '/issueLink',
headers={'content-type': 'application/json'},
data=json.dumps(data))
if res.status_code != 201:
raise JiraShellError('error linking (%s, %s, %s): %s %s'
% (link_type_name, inward_issue_key, outward_issue_key,
res.status_code, res.text))
def issue(self, key):
#XXX
# It's right under 'issuelinks' in each issue's JSON representation. Example:
#
#https://jira.atlassian.com/rest/api/latest/issue/JRA-9?fields=summary,issuelinks
return self.server.jira1.getIssue(self.auth, key)
def issues_from_filter(self, filter):
"""Return all issues for the given filter.
@param filter {String} Filter (saved search) to use. The given
argument can be the filter id, name, or a unique substring or
multi-term substring (e.g. 'foo bar' would match 'Filter foo
and bar') of the name.
"""
# Find the filter.
filterObj = None
filters = self.filters()
# - if int, then try id match first
if _isint(filter):
filter = int(filter)
for f in filters:
if int(f["id"]) == filter:
filterObj = f
break
else:
raise JiraShellError("no filter with id %r" % filter)
if not filterObj:
# - try full name match
for f in filters:
if f["name"] == filter:
filterObj = f
break
if not filterObj:
# - try full word substring match
for f in filters:
if re.search(r'\b%s\b' % filter, f["name"]):
filterObj = f
break
if not filterObj:
# - try substring match
for f in filters:
if filter in f["name"]:
filterObj = f
break
if not filterObj and len(filter.split()) > 1:
# - try multi-term substring match
terms = filter.strip().split()
for f in filters:
found_terms = [t for t in terms if t in f["name"]]
if len(found_terms) == len(terms):
filterObj = f
break
if not filterObj:
raise JiraShellError("no filter found matching %r" % filter)
log.debug("filter match for %r: %s", filter, json.dumps(filterObj))
return self.server.jira1.getIssuesFromFilter(self.auth, filterObj["id"])
def issues_from_search(self, terms, project_keys=None):
"""Search for issues.
@param terms {str} A single stream of search term(s).
@param project_keys {list} Optional list of project keys to which to
limit the search.
"""
if isinstance(terms, (list, tuple)):
terms = ' '.join(terms)
if not project_keys:
#XXX
# TODO: This errors out against my Jira 4.2:
# jirash: ERROR: <Fault 0: 'java.lang.NoSuchMethodException: com.atlassian.jira.rpc.xmlrpc.JiraXmlRpcService.getIssuesFromTextSearch(java.lang.String, java.util.Vector)'> (/Library/Frameworks/Python.framework/Versions/2.6/lib/python2.6/xmlrpclib.py:838 in close)
# but this says it should exist:
# http://docs.atlassian.com/software/jira/docs/api/rpc-jira-plugin/4.2/index.html?com/atlassian/jira/rpc/xmlrpc/XmlRpcService.html
issues = self.server.jira1.getIssuesFromTextSearch(self.auth, terms)
else:
# Note: I don't want to bother with `maxNumResults` so we set
# it to a big number.
BIG = 1000000
issues = self.server.jira1.getIssuesFromTextSearchWithProject(
self.auth, project_keys, terms, BIG)
if len(issues) == BIG:
log.warn("*%s* matches returned for %r (projects %s), "
"the result might not include a matches",
BIG, terms, ', '.join(project_keys))
return issues
def issue_types(self, project_key=None):
if project_key:
project = self.project(project_key)
issue_types = self.server.jira1.getIssueTypesForProject(
self.auth, project["id"])
else:
if "issue_types" not in self.cache:
self.cache["issue_types"] = self.server.jira1.getIssueTypes(self.auth)
issue_types = self.cache["issue_types"]
return issue_types
def issue_type(self, issue_id):
assert isinstance(issue_id, str)
for t in self.issue_types():
if t["id"] == issue_id:
return t
else:
raise JiraShellError("unknown issue type: %r" % issue_id)
def components(self, project_key):
if "components" not in self.cache:
self.cache["components"] = {}
if project_key not in self.cache["components"]:
components = self.server.jira1.getComponents(self.auth, project_key)
components.sort(key=operator.itemgetter("name"))
self.cache["components"][project_key] = components
return self.cache["components"][project_key]
def component(self, project_key, component_id):
assert isinstance(component_id, str)
for c in self.components(project_key):
if c["id"] == component_id:
return c
else:
raise JiraShellError("unknown component id: %r" % component_id)
def component_id(self, project_key, name):
"""Return the project component id from the given id, name, or unique
substring match on the name.
"""
componentObj = None
components = self.components(project_key)
name_lower = name.lower()
# - if int, then try id match first
if isinstance(name, int):
for r in components:
if int(r["id"]) == name:
componentObj = r
break
else:
raise JiraShellError("no component with id %r" % name)
if not componentObj:
# - try full name match
for r in components:
if r["name"].lower() == name_lower:
componentObj = r
break
if not componentObj:
# - try substring match
matches = [r for r in components
if name_lower in r["name"].lower()]
if len(matches) == 1:
componentObj = matches[0]
elif len(matches) > 1:
raise JiraShellError(
"'%s' is ambiguous: matching components: \"%s\"" % (
name, '", "'.join([r["name"] for r in matches])))
if not componentObj:
raise JiraShellError("no component found matching %r" % name)
return componentObj["id"]
def versions(self, project_key, exclude_archived=None,
exclude_released=None):
versions = self.server.jira1.getVersions(self.auth, project_key)
if exclude_archived:
versions = [v for v in versions if v["archived"] != "true"]
if exclude_released:
versions = [v for v in versions if v["released"] != "true"]
versions.sort(key=lambda v: int(v["sequence"]))
return versions
def version(self, version_id):
assert isinstance(version_id, str)
for v in self.versions():
if v["id"] == version_id:
return v
else:
raise JiraShellError("unknown version: %r" % version_id)
def resolutions(self):
if "resolutions" not in self.cache:
self.cache["resolutions"] = self.server.jira1.getResolutions(self.auth)
return self.cache["resolutions"]
def resolution_id(self, name):
"""Return the resolution id from the given id, name, or unique
substring match on the name.
"""
resolutionObj = None
resolutions = self.resolutions()
name_lower = name.lower()
# - if int, then try id match first
if isinstance(name, int):
for r in resolutions:
if int(r["id"]) == name:
resolutionObj = r
break
else:
raise JiraShellError("no resolution with id %r" % name)
if not resolutionObj:
# - try full name match
for r in resolutions:
if r["name"].lower() == name_lower:
resolutionObj = r
break
if not resolutionObj:
# - try substring match
matches = [r for r in resolutions
if name_lower in r["name"].lower()]
if len(matches) == 1:
resolutionObj = matches[0]
elif len(matches) > 1:
raise JiraShellError(
"'%s' is ambiguous: matching resolutions: \"%s\"" % (
name, '", "'.join([r["name"] for r in matches])))
if not resolutionObj:
raise JiraShellError("no resolution found matching %r" % name)
return resolutionObj["id"]
def resolve(self, key):
"""Resolve the given issue.
TODO: what is the result when the workflow change is illegal?
"""
# 5 === "Resolved". Is that true for all Jiras?
res = self._jira_soap_call("progressWorkflowAction", [key, "5"])
def statuses(self):
if "statuses" not in self.cache:
self.cache["statuses"] = self.server.jira1.getStatuses(self.auth)
return self.cache["statuses"]
def status(self, status_id):
assert isinstance(status_id, str)
for s in self.statuses():
if s["id"] == status_id:
return s
else:
raise JiraShellError("unknown status: %r" % status_id)
def status_id(self, name):
"""Get the id of the status matching the given name.
@param name {str} Case-insensitive status name.
"""
statuses = self.statuses()
name_lower = name.lower()
for s in statuses:
if name_lower == s["name"].lower():
return s["id"]
else:
raise JiraShellError("unknown status name: %r" % name)
def create_issue(self, data):
return self.server.jira1.createIssue(self.auth, data)
def update_issue(self, key, data):
# Actual helpful docs on updateIssue():
# https://jira.atlassian.com/browse/JRA-10588
if log.isEnabledFor(logging.DEBUG):
log.debug("calling updateIssue(%r, %s)", key, json.dumps(data))
return self.server.jira1.updateIssue(self.auth, key, data)
#---- JiraShell
class JiraShell(cmdln.Cmdln):
name = "jirash"
jira_url = None
def get_optparser(self):
parser = cmdln.Cmdln.get_optparser(self)
parser.add_option("--version", action="store_true",
help="show version and exit")
parser.add_option("-d", "--debug", action="store_true",
help="debug logging")
parser.add_option("-J", "--jira-url", dest="jira_url",
help="Jira base URL. Otherwise defaults to 'jira_url' value from config file.")
return parser
def _generate_cfg(self, cfg_path):
url = raw_input("Jira URL: ")
username = raw_input("Username: ")
password = getpass.getpass("Password: ")
# TODO Attempt login to validate before saving
config = {
'jira_url': url,
url: {
'username': username,
'password': password,
},
}
f = codecs.open(cfg_path, 'w', 'utf8')
f.write(json.dumps(config, indent=2))
f.close()
def _load_cfg(self, cfg_path=None):
if not cfg_path:
cfg_path = os.path.expanduser("~/.jirash.json")
if not os.path.exists(cfg_path):
print "This appears to be your first time running jirash, let me generate your config"
if self._generate_cfg(cfg_path):
print "Config file generated! [%s]" % cfg_path
f = codecs.open(cfg_path, 'r', 'utf8')
try:
return json.load(f)
finally:
f.close()
def postoptparse(self):
if self.options.debug:
log.setLevel(logging.DEBUG)
if self.options.version:
print "jirash %s" % __version__
sys.exit(0)
self.cfg = self._load_cfg()
self.jira_url = self.options.jira_url or self.cfg["jira_url"]
if not self.cfg[self.jira_url].has_key("password"):
prompt = "Jira (%s) password: " % self.jira_url
self.cfg[self.jira_url]["password"] = getpass.getpass(prompt)
_jira_cache = None
@property
def jira(self):
if not self._jira_cache:
self._jira_cache = Jira(self.jira_url, self.cfg[self.jira_url]["username"],
self.cfg[self.jira_url]["password"])
return self._jira_cache
@cmdln.option("-j", "--json", action="store_true", help="JSON output")
def do_projects(self, subcmd, opts):
"""List projects (excluding "Archived" projects).
Usage:
${cmd_name}
${cmd_option_list}
"""
projects = self.jira.projects()
if opts.json:
print json.dumps(projects, indent=2)
else:
template = "%-10s %-32s %s"
print template % ("KEY", "NAME", "LEAD")
for p in projects:
print template % (
clip(p["key"], 10),
clip(p["name"], 32),
p["lead"]
)
@cmdln.option("-j", "--json", action="store_true", help="JSON output")
def do_filters(self, subcmd, opts):
"""List "favourite" filters for the current user.
Usage:
${cmd_name}
${cmd_option_list}
"""
filters = self.jira.filters()
if opts.json:
print json.dumps(filters, indent=2)
else:
template = "%-5s %-15s %s"
print template % ("ID", "AUTHOR", "NAME")
for f in filters:
print template % (f["id"], f["author"], f["name"])
@cmdln.option("-j", "--json", action="store_true", help="JSON output")
def do_priorities(self, subcmd, opts):
"""List all issue priorities.
Usage:
${cmd_name}
${cmd_option_list}
"""
priorities = self.jira.priorities()
if opts.json:
print json.dumps(priorities, indent=2)
else:
template = "%-3s %-8s %s"
print template % ("ID", "NAME", "DESCRIPTION")
for p in priorities:
print template % (p["id"], p["name"], p["description"])
@cmdln.option("-j", "--json", action="store_true", help="JSON output")
def do_statuses(self, subcmd, opts):
"""List all possible issue statuses.
Usage:
${cmd_name}
${cmd_option_list}
"""
statuses = self.jira.statuses()
if opts.json:
print json.dumps(statuses, indent=2)
else:
template = "%-5s %-15s %s"
print template % ("ID", "NAME", "DESCRIPTION")
for s in statuses:
print template % (s["id"], s["name"], s["description"])
@cmdln.option("-j", "--json", action="store_true", help="JSON output")
def do_resolutions(self, subcmd, opts):
"""List all possible issue resolutions.
Usage:
${cmd_name}
${cmd_option_list}
"""
resolutions = self.jira.resolutions()
if opts.json:
print json.dumps(resolutions, indent=2)
else:
template = "%-5s %-16s %s"
print template % ("ID", "NAME", "DESCRIPTION")
for r in resolutions:
print template % (r["id"], r["name"], r["description"])
@cmdln.option("-j", "--json", action="store_true", help="JSON output")
def do_user(self, subcmd, opts, username):
"""List a given user's information.
Usage:
${cmd_name}
${cmd_option_list}
"""
user = self.jira.user(username)
if not user:
log.error("no such user: %r", username)
return 1
elif opts.json:
print json.dumps(user, indent=2)
else:
template = "%-20s %-20s %s"
print template % ("NAME", "FULLNAME", "EMAIL")
print template % (
clip(user["name"], 20),
clip(user["fullname"], 20),
user["email"])
@cmdln.option("-j", "--json", action="store_true", help="JSON output")
def do_issue(self, subcmd, opts, key):
"""Get an issue.
Usage:
${cmd_name} KEY
${cmd_option_list}
"""
issue = self.jira.issue(key)
if opts.json:
print json.dumps(issue, indent=2)
else:
print self._issue_repr_flat(issue)
@cmdln.option("-f", "--filter",
help="Filter (saved search) to use. See `jirash filters`. The given "
"argument can be the filter id, name, or a unique substring or "
"multi-term substring (e.g. 'foo bar' would match 'Filter foo "
"and bar') of the name.")
@cmdln.option("-s", "--status", action="append", dest="statuses",
help="Limit to issues with the given status string, e.g. 'open'. "
"Can be specified multiple times.")
@cmdln.option("-o", "--open", action="store_true",
help="Limit to open issues, where open here is a shortcut for "
"`-s 'Open' -s 'In Progress' -s 'Reopened'`. Note: Use the "
"'open_status_names' config var to configure the names of 'open'"
"statuses.")
@cmdln.option("-p", "--project", action="append", dest="project_keys",
help="Project key(s) to which to limit a text search")
@cmdln.option("-l", "--long", action="store_true", help="Long output")
@cmdln.option("-j", "--json", action="store_true", help="JSON output")
def do_issues(self, subcmd, opts, *terms):
"""List issues from a filter (saved search) or text search.
By default not all data on each ticket is displayed to try to
keep the width of the table small. Use '-l' for more data. Use
'-j' for all data.
Usage:
${cmd_name} TERMS...
${cmd_name} -f FILTER
${cmd_option_list}
"""
if opts.filter:
# Ignore 'terms' with a filter for now. TODO: subsearch
if opts.project_keys:
log.warn("ignoring project scoping for a *filter* search: '%s'",
"', '".join(opts.project_keys))
if terms:
log.warn("ignoring search terms for a *filter* search: '%s'",
"', '".join(terms))
try:
issues = self.jira.issues_from_filter(opts.filter)
except JiraShellError, e:
log.error(e)
return 1
elif not terms:
log.error("no search terms given")
return 1
else:
# TODO: Consider separate search for each term and merge results
# if that is more useful.
term = ' '.join(terms)
issues = self.jira.issues_from_search(terms,
project_keys=opts.project_keys)
status_ids = []
if opts.statuses:
status_ids += [self.jira.status_id(name) for name in opts.statuses]
if opts.open:
open_status_names = self.cfg.get('open_status_names',
["Open", "In Progress", "Reopened"])
status_ids = [] # TODO: cache these
for name in open_status_names:
try:
status_ids.append(self.jira.status_id(name))
except JiraShellError, e:
log.warn(e)
if status_ids:
issues = [i for i in issues if i["status"] in status_ids]
if opts.json:
print json.dumps(issues, indent=2)
else:
self._print_issue_table(issues, long_format=opts.long)
def default(self, argv):
key_re = re.compile(r'^\b[A-Z]+\b-\d+$')
if key_re.search(argv[0]):
return self.onecmd(['issue'] + argv)
return cmdln.Cmdln.default(self, argv)
#TODO
#def completedefault(self, text, line, begidx, endidx):
# # Complete paths in the cwd.
# start = line[begidx:endidx]
# print "XXX %r %r %r" % (test, line, start)
# return []
@cmdln.option("-j", "--json", action="store_true", help="JSON output")
def do_linktypes(self, subcmd, opts):
"""List issue link types.
Usage:
${cmd_name}
${cmd_option_list}
"""
types = self.jira.issue_link_types()
if opts.json:
print json.dumps(types, indent=2)
else:
template = "%-6s %-12s %s"
print template % ("ID", "NAME", "OUTWARD")
for t in types:
print template % (t["id"], t["name"], t["outward"])
#@cmdln.option("-j", "--json", action="store_true", help="JSON output")
def do_link(self, subcmd, opts, *args):
"""Link a Jira issue to another.
Usage:
${cmd_name} <issue> <relation> <issue>
${cmd_option_list}
`<relation>` is a "outward" field from this Jira's issue link types
(list with `jirash linktypes`). A unique substring is supported as well
Examples:
jirash link MON-123 depends on MON-100
jirash link OS-2000 duplicates OS-1999
jirash link IMGAPI-123 dup IMGAPI-101 # "dup" is a substring
"""
if len(args) < 3:
raise JiraShellError('not enough arguments: %s' % ' '.join(args))
link_types = self.jira.issue_link_types()
first = args[0]
reln = ' '.join(args[1:-1])
second = args[-1]
candidates = [lt for lt in link_types
if reln.lower() in lt["outward"].lower()]
if len(candidates) != 1:
raise JiraShellError("no unique link type match for '%s': "
"must match one of in '%s'"
% (reln, "', '".join(lt["outward"] for lt in link_types)))
link_type = candidates[0]
self.jira.link(link_type["name"], first, second)
print "Linked: %s %s %s" % (first, link_type["outward"], second)
@cmdln.option("-p", "--project", dest="project_key",
help="Project for which to get issue types.")
@cmdln.option("-j", "--json", action="store_true", help="JSON output")
def do_issuetypes(self, subcmd, opts):
"""List issue types (e.g. bug, task, ...).
Usage:
${cmd_name}
${cmd_option_list}
"""
types = self.jira.issue_types(opts.project_key)
if opts.json:
print json.dumps(types, indent=2)
else:
template = "%-2s %-20s %s"
print template % ("ID", "NAME", "DESCRIPTION")
for t in types:
print template % (t["id"], t["name"], t["description"])
@cmdln.option("-j", "--json", action="store_true", help="JSON output")
@cmdln.option("-a", dest="exclude_archived", action="store_true",
help="exclude archived versions")
@cmdln.option("-r", dest="exclude_released", action="store_true",
help="exclude released versions")
def do_versions(self, subcmd, opts, project_key):
"""Get available versions for the given project.
Usage:
${cmd_name} PROJECT-KEY
${cmd_option_list}
"""
versions = self.jira.versions(project_key,
exclude_archived=opts.exclude_archived,
exclude_released=opts.exclude_released)
if opts.json:
print json.dumps(versions, indent=2)
else:
template = "%-5s %-30s %8s %8s"
print template % ("ID", "NAME", "RELEASED", "ARCHIVED")
for v in versions:
print template % (
v["id"],
v["name"],
(v["released"] == "true" and "released" or "-"),
(v["archived"] == "true" and "archived" or "-"))
@cmdln.option("-j", "--json", action="store_true", help="JSON output")
def do_components(self, subcmd, opts, project_key):
"""Get available components for the given project.
Usage:
${cmd_name} PROJECT-KEY
${cmd_option_list}
"""
components = self.jira.components(project_key)
if opts.json:
print json.dumps(components, indent=2)
else:
template = "%-5s %s"
print template % ("ID", "NAME")
for c in components:
print template % (c["id"], c["name"])
#TODO: -t, --type option (default to bug)
# createbug, createtask, ... aliases for this
#TODO: --browse to open the ticket
#TODO: attachments?
@cmdln.option("-d", "--description",
help="issue description. If not given, this will prompt.")
@cmdln.option("-t", "--type",
help="Issue type or a case-insensitive substring match against valid "
"issue types for the given project. This defaults to `1` for "
"bwcompat reasons, which "
"in Joyent's Jira is 'Bug'. Use `jirash issuetypes -p "
"PROJECT-NAME` to list valid issue types for a project.")
@cmdln.option("-a", "--assignee",
help="Assignee username. Note that this is the username field, "
"NOT their full name. (XXX Don't have a good way to list "
"available usernames right now.)")
@cmdln.option("-c", "--component", dest="components", action="append",
metavar="COMPONENT",
help="Component id or substring match. Use `jirash components PROJ` "
"to list them. Some Jira projects require a component and don't "
"have a default, but jirash can't detect that so doesn't know "
"when to require a component.")
@cmdln.option("-e", dest="editor",
help="Edit issue summary/description in your editor.")
@cmdln.option("-E", dest="editor_template",
help="Template to use for editing issue summary/description. "
"Implies '-e'.")
@cmdln.option("-B", "--no-browse", action="store_true",
help="Do *not* attempt to open the browser to the created issue.")
def do_createissue(self, subcmd, opts, project_key, *summary):
"""Create a new issue.
Usage:
${cmd_name} PROJECT-KEY [SUMMARY]
${cmd_option_list}
"""
data = {
"project": project_key,
}
if opts.type:
issue_types = self.jira.issue_types(project_key=project_key)
# First try exact match.
for it in issue_types:
if it["name"] == opts.type:
data["type"] = int(it["id"])
break
else:
# Try case-insensitive full match.
for it in issue_types:
if it["name"].lower() == opts.type.lower():
data["type"] = int(it["id"])
break
else:
# Try case-insensitive substring match (require unique).
matches = [it for it in issue_types if
opts.type.lower() in it["name"].lower()]
if len(matches) == 1:
data["type"] = int(matches[0]["id"])
else:
raise JiraShellError(
"no issue types for project %s match '%s', use "
"`jirash issuetypes -f %s` to list valid issue "
"types" % (project_key, opts.type, project_key))
else:
# Hardcoded to '1' for bwcompat. This is "Bug" in Joyent's Jira.
data["type"] = 1
use_editor = (opts.editor is not None
or opts.editor_template is not None
or self.cfg.get("createissue_use_editor", False))
if summary:
summary = u' '.join(summary)
print u"Summary: %s" % summary
elif not use_editor:
summary = query("Summary")
else:
summary = None
if opts.assignee:
assignee = opts.assignee
elif use_editor:
assignee = None
else:
assignee = query(
"Assignee (blank for default, 'me' for yourself)")
if assignee:
if assignee == "me":
data["assignee"] = self.cfg[self.jira_url]["username"]
else:
data["assignee"] = assignee
if opts.components:
component_ids = [self.jira.component_id(project_key, s)
for s in opts.components]
data["components"] = [{"id": cid} for cid in component_ids]
print "Components: %s" % ', '.join(
self.jira.component(project_key, cid)["name"]
for cid in component_ids)
if opts.description:
description = opts.description
elif not use_editor:
description = query_multiline("Description")
else:
description = None
if use_editor and (not summary or not description):
text = """# Edit the new issue *summary* and *description*:
#
# My summary on one line at the top
#
# Then some lines
# of description
# here.
#
# Leading lines starting with '#' are dropped.
"""
if opts.editor_template:
text = codecs.open(opts.editor_template, 'r', 'utf8').read()
cursor_line = 10
if summary:
text += summary + '\n\n\n'
cursor_line = 12
elif description:
text += 'SUMMARY\n\n'
if description:
text += description
if not summary and not description:
text += "\n"
while True:
text = edit_in_editor('%s-NNN.jirash' % project_key, text,
cursor_line)
lines = text.splitlines(False)
while lines and lines[0].startswith('#'):
lines.pop(0)
if len(lines) >= 3 and not lines[1].strip():
summary = lines[0]
description = '\n'.join(lines[2:]).strip()
break
sys.stderr.write('error: content is not "SUMMARY\\n\\nDESCRIPTION"\n')
raw_input("Press any key to re-edit...")
data["summary"] = summary.encode('utf-8')
data["description"] = description.encode('utf-8')
try:
issue = self.jira.create_issue(data)
except:
if use_editor:
# Save 'text' out so it isn't all lost data.
save_file = '%s-NNN.%d.jirash' % (project_key, int(time.time()))
fout = codecs.open(save_file, 'w', 'utf8')
fout.write(text)
fout.close()
sys.stderr.write(
'Note: Your edits have been saved to %s, reload with:\n'
' jirash createissue -E %s ...\n'
% (save_file, save_file))
raise
print "created:", self._issue_repr_flat(issue)
no_browse = (opts.no_browse
or self.cfg.get("createissue_no_browse", False))
if not no_browse:
url = "%s/browse/%s" % (self.jira_url, issue["key"])
webbrowser.open(url)
def _do_soap(self, subcmd, opts):
res = self.jira._jira_soap_call("getIssue", ["MON-113"])
#res = self.jira._jira_soap_call("progressWorkflowAction",
# ["TOOLS-158", "5"]) # 5 === "Resolved"
pprint(res)
#@cmdln.option("-r", "--resolution",
# help="Resolution. Default is 'fixed'. See `jira resolutions`. The "
# "given value can be a resolution id, name or unique name "
# "substring.")
def do_resolve(self, subcmd, opts, key):
"""Resolve an issue.
Limitation: AFAICT there is no way to *set* to resolution (i.e.
"Fixed" vs "Won't Fix" ... `jirash resolutions`) via the Jira API,
so there is no option for that here.
Usage:
${cmd_name} ISSUE-KEY
${cmd_option_list}
"""
self.jira.resolve(key)
issue = self.jira.issue(key)
print "updated:", self._issue_repr_flat(issue)
def _print_issue_table(self, issues, long_format=False):
if long_format:
template = "%-11s %-8s %-8s %-11s %-10s %-10s %s"
columns = ("KEY", "PRIO", "STATUS", "TYPE", "REPORTER",
"ASSIGNEE", "SUMMARY")
print template % columns
for issue in issues:
try:
try:
issue_type = self.jira.issue_type(issue["type"])["name"]
except JiraShellError, e:
# The issue type may have been removed. Just use the id.
issue_type = issue["type"]
priority = self.jira.priority(issue["priority"])
status = self.jira.status(issue["status"])
print template % (
issue["key"],
priority["name"],
clip(status["name"], 8),
clip(issue_type, 11),
clip(issue["reporter"], 10),
clip(issue.get("assignee", "unassigned"), 10),
issue["summary"],
)
except Exception, e:
log.error("error making issue repr: %s (issue=%r)",
e, issue)
raise
else:
if issues:
key_width = max(len(i["key"]) for i in issues)
template = u"%%-%ds %%-13s %%-10s %%s" % key_width
term_width = getTerminalSize()[1]
summary_width = term_width - key_width - 2 - 13 - 2 - 10 - 2
columns = ("KEY", "STATE", "ASSIGNEE", "SUMMARY")
print template % columns
for issue in issues:
try:
try:
issue_type = self.jira.issue_type(issue["type"])["name"]
except JiraShellError, e:
# The issue type may have been removed. Just use the id.
issue_type = issue["type"]
status = self.jira.status(issue["status"])
if "priority" in issue:
priority = self.jira.priority(issue["priority"])["name"]
else:
priority = "-"
state = "%s/%s/%s" % (
clip(priority, 4, False),
clip(status["name"].replace(' ', ''), 4, False),
clip(issue_type, 3, False))
safeprint(template % (
issue["key"],
state,
clip(issue.get("assignee", "unassigned"), 10),
#issue["summary"],
clip(issue["summary"], summary_width),
))
except Exception, e:
log.error("error making issue repr: %s (issue=%r)",
e, issue)
raise
def _issue_repr_flat(self, issue):
try:
try:
issue_type = self.jira.issue_type(issue["type"])["name"]
except JiraShellError, e:
# The issue type may have been removed. Just use the id.
issue_type = "type:" + issue["type"]
if "priority" in issue:
priority = self.jira.priority(issue["priority"])["name"]
else:
priority = "<no priority>"
status = self.jira.status(issue["status"])
return "%s: %s (%s -> %s, %s, %s, %s)" % (
issue["key"],
issue["summary"],
issue["reporter"],
issue.get("assignee", "<unassigned>"),
issue_type,
priority,
status["name"])
except Exception, e:
log.error("error making issue repr: %s (issue=%r)", e, issue)
raise
#---- support stuff
def howto_install_pyexpat():
"""Return a short suggestion string for installing pyexpat on
the current OS. Or None if no suggestion.
"""
pyver = "%d.%d" % tuple(sys.version_info[0:2])
if sys.platform.startswith("sunos"):
if os.path.exists("/opt/local/etc/pkg_install.conf"):
return "pkgin -y py%s-expat" % pyver
# http://stackoverflow.com/questions/566746/how-to-get-console-window-width-in-python
# with a tweak.
def getTerminalSize():
import os
env = os.environ
def ioctl_GWINSZ(fd):
try:
import fcntl, termios, struct, os
cr = struct.unpack('hh', fcntl.ioctl(fd, termios.TIOCGWINSZ, '1234'))
except:
return None
return cr[1], cr[0]
cr = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2)
if not cr:
try:
fd = os.open(os.ctermid(), os.O_RDONLY)
cr = ioctl_GWINSZ(fd)
os.close(fd)
except:
pass
if not cr:
try:
cr = (env['LINES'], env['COLUMNS'])
except:
cr = (25, 80)
return int(cr[1]), int(cr[0])
def safeprint(s, stream=sys.stdout):
if stream.encoding not in ('UTF-8',):
s = s.encode('ascii', 'replace')
print s
def clip(s, length, ellipsis=True):
if len(s) > length:
if ellipsis:
if sys.stdout.encoding in ('UTF-8',):
s = s[:length-1] + u'\u2026'
else:
s = s[:length-3] + '...'
else:
s = s[:length]
return s
## {{{ http://code.activestate.com/recipes/577099/ (r1)
def query(question, default=None):
s = question
if default:
s += " [%s]" % default
s += ": "
answer = raw_input(s)
answer = answer.strip()
if not answer:
return default
return answer
## end of http://code.activestate.com/recipes/577099/ }}}
def query_multiline(question):
print "%s (use '.' on a line by itself to finish):" % question
lines = []
while True:
line = raw_input()
if line.rstrip() == '.':
break
lines.append(line.decode('utf-8'))
answer = u'\n'.join(lines)
return answer
def edit_in_editor(filename, before_text, cursor_line=None):
import tempfile
(fd, tmp_path) = tempfile.mkstemp(filename)
fout = os.fdopen(fd, 'w')
#XXX
#tmp_path = tempfile(None, filename + ".tmp.")
#fout = codecs.open(tmp_path, 'w', 'utf8')
fout.write(before_text)
fout.close()
editor = os.environ['EDITOR']
line_cmd = ""
if editor in ('vi', 'vim') and cursor_line is not None:
line_cmd = "+%d" % cursor_line
os.system('%s %s -f "%s"' % (editor, line_cmd, tmp_path))
fin = codecs.open(tmp_path, 'r', 'utf8')
after_text = fin.read()
fin.close()
return after_text
#---- mainline
def main(argv=sys.argv):
# Support `complete -C 'jirash --bash-completion' jirash` for Bash
# completion.
if len(argv) > 1 and argv[1] == "--bash-completion":
# exec: 'python /path/to/cmdln.py /path/to/script.py CmdlnClass'
_dir = os.path.dirname(os.path.realpath(__file__))
_jirashell_py = os.path.join(_dir, "jirashell.py")
_cmdln_py = os.path.join(_dir, "cmdln.py")
_cmd = '"%s" "%s" "%s" JiraShell %s' % (
sys.executable, _cmdln_py, _jirashell_py, ' '.join(sys.argv[2:]))
#print("calling `%s`" % _cmd)
return os.system(_cmd)
logging.basicConfig(format='%(name)s: %(levelname)s: %(message)s')
log.setLevel(logging.INFO)
shell = JiraShell()
return shell.main(argv, loop=cmdln.LOOP_IF_EMPTY)
if __name__ == "__main__":
try:
retval = main(sys.argv)
except KeyboardInterrupt:
sys.exit(1)
except SystemExit:
raise
except JiraShellUsageError, ex:
print("error: %s" % ex)
sys.exit(1)
except:
import platform
import traceback
print("")
traceback.print_exc()
print("""
Python: %s
OS: %s
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
* If this is obviously not user error, please log a bug at *
* https://github.com/trentm/jirash/issues *
* to report this error. Thanks! *
* -- Trent *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *""" % (
sys.version, platform.platform()))
sys.exit(1)
else:
sys.exit(retval)
|
m42e/jirash
|
lib/jirashell.py
|
Python
|
mit
| 48,170
| 0.003405
|
#!/usr/bin/env python
# runTests.py -- Portage Unit Test Functionality
# Copyright 2006-2017 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import os, sys
import os.path as osp
import grp
import platform
import pwd
import signal
def debug_signal(signum, frame):
import pdb
pdb.set_trace()
if platform.python_implementation() == 'Jython':
debug_signum = signal.SIGUSR2 # bug #424259
else:
debug_signum = signal.SIGUSR1
signal.signal(debug_signum, debug_signal)
# Pretend that the current user's uid/gid are the 'portage' uid/gid,
# so things go smoothly regardless of the current user and global
# user/group configuration.
os.environ["PORTAGE_USERNAME"] = pwd.getpwuid(os.getuid()).pw_name
os.environ["PORTAGE_GRPNAME"] = grp.getgrgid(os.getgid()).gr_name
# Insert our parent dir so we can do shiny import "tests"
# This line courtesy of Marienz and Pkgcore ;)
repoman_pym = osp.dirname(osp.dirname(osp.dirname(osp.realpath(__file__))))
sys.path.insert(0, repoman_pym)
# Add in the parent portage python modules
portage_pym = osp.dirname(osp.dirname(repoman_pym))+'/pym'
sys.path.insert(0, portage_pym)
# import our centrally initialized portage instance
from repoman._portage import portage
portage._internal_caller = True
# Ensure that we don't instantiate portage.settings, so that tests should
# work the same regardless of global configuration file state/existence.
portage._disable_legacy_globals()
if os.environ.get('NOCOLOR') in ('yes', 'true'):
portage.output.nocolor()
import repoman.tests as tests
from portage.const import PORTAGE_BIN_PATH
path = os.environ.get("PATH", "").split(":")
path = [x for x in path if x]
insert_bin_path = True
try:
insert_bin_path = not path or \
not os.path.samefile(path[0], PORTAGE_BIN_PATH)
except OSError:
pass
if insert_bin_path:
path.insert(0, PORTAGE_BIN_PATH)
os.environ["PATH"] = ":".join(path)
if __name__ == "__main__":
sys.exit(tests.main())
|
dol-sen/portage
|
repoman/pym/repoman/tests/runTests.py
|
Python
|
gpl-2.0
| 1,959
| 0.009188
|
import os
import py
import pytest
import numpy as np
import openpnm as op
from openpnm.models.misc import from_neighbor_pores
@pytest.mark.skip(reason="'netgen' is only available on conda")
class STLTest:
def setup_class(self):
np.random.seed(10)
self.net = op.network.Cubic(shape=[2, 2, 2])
self.net["pore.diameter"] = 0.5 + np.random.rand(self.net.Np) * 0.5
Dt = from_neighbor_pores(target=self.net, prop="pore.diameter") * 0.5
self.net["throat.diameter"] = Dt
self.net["throat.length"] = 1.0
def teardown_class(self):
os.remove(f"{self.net.name}.stl")
os.remove("custom_stl.stl")
def test_export_data_stl(self):
op.io.to_stl(network=self.net)
assert os.path.isfile(f"{self.net.name}.stl")
op.io.to_stl(network=self.net, filename="custom_stl")
assert os.path.isfile("custom_stl.stl")
if __name__ == '__main__':
# All the tests in this file can be run with 'playing' this file
t = STLTest()
self = t # For interacting with the tests at the command line
t.setup_class()
for item in t.__dir__():
if item.startswith('test'):
print(f'Running test: {item}')
try:
t.__getattribute__(item)()
except TypeError:
t.__getattribute__(item)(tmpdir=py.path.local())
t.teardown_class()
|
PMEAL/OpenPNM
|
tests/unit/io/STLTest.py
|
Python
|
mit
| 1,388
| 0
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2010 Anso Labs, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
:mod:`nova` -- Cloud IaaS Platform
===================================
.. automodule:: nova
:platform: Unix
:synopsis: Infrastructure-as-a-Service Cloud platform.
.. moduleauthor:: Jesse Andrews <jesse@ansolabs.com>
.. moduleauthor:: Devin Carlen <devin.carlen@gmail.com>
.. moduleauthor:: Vishvananda Ishaya <vishvananda@yahoo.com>
.. moduleauthor:: Joshua McKenty <joshua@cognition.ca>
.. moduleauthor:: Manish Singh <yosh@gimp.org>
.. moduleauthor:: Andy Smith <andy@anarkystic.com>
"""
from exception import *
|
sorenh/cc
|
nova/__init__.py
|
Python
|
apache-2.0
| 1,336
| 0.000749
|
"""
EtcUdevRules - file ``/etc/udev/rules.d/``
==========================================
This module is similar to the :py:mod:`insights.parsers.udev_rules`
but parse .rules files under ``/etc/ude/rules.d/`` directory instead.
The parsers included in this module are:
UdevRules40Redhat - file ``/etc/udev/rules.d/40-redhat.rules``
--------------------------------------------------------------
"""
from insights import parser
from insights.core import LogFileOutput
from insights.specs import Specs
from insights.util import deprecated
@parser(Specs.etc_udev_40_redhat_rules)
class UdevRules40Redhat(LogFileOutput):
"""
.. warning::
This parser is deprecated, please use
:py:class:`insights.parsers.udev_rules.UdevRules40Redhat` instead.
Read the content of ``/etc/udev/rules.d/40-redhat.rules`` file.
.. note::
The syntax of the `.rules` file is complex, and no rules require to
get the serialized parsed result currently. An only existing rule's
supposed to check the syntax of some specific line, so here the
:class:`insights.core.LogFileOutput` is the base class.
Sample input::
# do not edit this file, it will be overwritten on update
# CPU hotadd request
SUBSYSTEM=="cpu", ACTION=="add", TEST=="online", ATTR{online}=="0", ATTR{online}="1"
# Memory hotadd request
SUBSYSTEM!="memory", ACTION!="add", GOTO="memory_hotplug_end"
PROGRAM="/bin/uname -p", RESULT=="s390*", GOTO="memory_hotplug_end"
LABEL="memory_hotplug_end"
Examples:
>>> 'LABEL="memory_hotplug_end"' in udev_rules.lines
True
"""
def __init__(self, *args, **kwargs):
deprecated(UdevRules40Redhat, "Import UdevRules40Redhat from insights.parsers.udev_rules instread.")
super(UdevRules40Redhat, self).__init__(*args, **kwargs)
|
RedHatInsights/insights-core
|
insights/parsers/etc_udev_rules.py
|
Python
|
apache-2.0
| 1,878
| 0.001065
|
import numpy as np
import theano
from theano import tensor as T
from generateTrainDataonText import createTrain
from neuralmodels.utils import permute
from neuralmodels.loadcheckpoint import *
from neuralmodels.costs import softmax_loss
from neuralmodels.models import *
from neuralmodels.predictions import OutputMaxProb, OutputSampleFromDiscrete
from neuralmodels.layers import *
def text_prediction(class_ids_reverse,p_labels):
N = p_labels.shape[1]
T = p_labels.shape[0]
text_output = []
for i in range(N):
t = ''
for j in p_labels[:,i]:
t = t + class_ids_reverse[j]
text_output.append(t)
return text_output
if __name__ == '__main__':
num_samples = 10000
num_validation = 100
num_train = num_samples - num_validation
len_samples = 300
epochs = 30
batch_size = 100
learning_rate_decay = 0.97
decay_after=5
[X,Y,num_classes,class_ids_reverse] = createTrain('shakespeare_input.txt',num_samples,len_samples)
inputD = num_classes
outputD = num_classes
permutation = permute(num_samples)
X = X[:,permutation]
Y = Y[:,permutation]
X_tr = X[:,:num_train]
Y_tr = Y[:,:num_train]
X_valid = X[:,num_train:]
Y_valid = Y[:,num_train:]
# Creating network layers
layers = [OneHot(num_classes),LSTM(),LSTM(),LSTM(),softmax(num_classes)]
trY = T.lmatrix()
# Initializing network
rnn = RNN(layers,softmax_loss,trY,1e-3)
# Fitting model
rnn.fitModel(X_tr,Y_tr,1,'checkpoints/',epochs,batch_size,learning_rate_decay,decay_after)
# Printing a generated sentence
out = rnn.predict_language_model(X_valid[:,:1],1000,OutputSampleFromDiscrete)
# Print the sentence here
text_produced = text_prediction(class_ids_reverse,out)
|
avisingh599/NeuralModels
|
character-rnn/char-rnn.py
|
Python
|
mit
| 1,663
| 0.048707
|
import getpass
import json
import getopt
from genericpath import isfile
from os.path import sep
from pingdumb.main_module import url_type
def read_config():
f_path = "." + sep + "pingdumb.json"
if not isfile(f_path):
f = open(f_path, 'w')
conf = {
"url": "jellyms.kr",
"smtpServer": "smtp.gmail.com:587",
"smtpUser": "",
"toEmail": "",
"interval": 300,
}
f.write(json.dumps(conf))
f.close()
return conf
else:
f = open(f_path, 'r+b')
conf = json.loads(f.read().decode('utf-8'))
f.close()
return conf
def write_config(conf):
if 'smtpPw' in conf:
del conf['smtpPw']
f_path = "." + sep + "pingdumb.json"
f = open(f_path, 'w')
f.truncate()
f.write(json.dumps(conf))
f.close()
def input_conf(message, default):
value = input(message)
if not value:
return default
return value
def set_config():
configure = read_config()
url_for_test = input_conf(
"URL to test? (" + configure["url"] + ")", configure["url"]
)
url_for_test = url_type(url_for_test)
recv_mail = input_conf(
"Receive mail? (" + configure["toEmail"] + ")",
configure["toEmail"]
)
s_server = input_conf(
"SMTP server? (" + configure["smtpServer"] + ")",
configure["smtpServer"]
)
s_user = input_conf(
"SMTP Server username? (" + configure["smtpUser"] + ")",
configure["smtpUser"]
)
s_pw = getpass.getpass("SMTP Server password?", "")
interval = input_conf(
"interval of seconds? (" + str(configure["interval"]) + ")",
configure["interval"]
)
interval = int(interval)
configure["url"] = url_for_test
configure["toEmail"] = recv_mail
configure["smtpServer"] = s_server
configure["smtpUser"] = s_user
configure["smtpPw"] = s_pw
configure["interval"] = interval
return configure
def configure_to_tuple():
configure = read_config()
return configure["url"], configure["smtpServer"], \
configure["smtpUser"], configure["toEmail"], configure["interval"]
def extract_password_with_argv(argv):
opts, args = getopt.getopt(argv, 'p')
for o, a in opts:
if o == "-p":
return getpass.getpass("SMTP Server password", "")
|
kyunooh/pingdumb
|
pingdumb/conf.py
|
Python
|
apache-2.0
| 2,422
| 0.002064
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import cStringIO
import difflib
import os
import sys
import unittest
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
BUILD_TOOLS_DIR = os.path.dirname(SCRIPT_DIR)
sys.path.append(BUILD_TOOLS_DIR)
import easy_template
class EasyTemplateTestCase(unittest.TestCase):
def _RunTest(self, template, expected, template_dict):
src = cStringIO.StringIO(template)
dst = cStringIO.StringIO()
easy_template.RunTemplate(src, dst, template_dict)
if dst.getvalue() != expected:
expected_lines = expected.splitlines(1)
actual_lines = dst.getvalue().splitlines(1)
diff = ''.join(difflib.unified_diff(
expected_lines, actual_lines,
fromfile='expected', tofile='actual'))
self.fail('Unexpected output:\n' + diff)
def testEmpty(self):
self._RunTest('', '', {})
def testNewlines(self):
self._RunTest('\n\n', '\n\n', {})
def testNoInterpolation(self):
template = """I love paris in the
the springtime [don't you?]
{this is not interpolation}.
"""
self._RunTest(template, template, {})
def testSimpleInterpolation(self):
self._RunTest(
'{{foo}} is my favorite number',
'42 is my favorite number',
{'foo': 42})
def testLineContinuations(self):
template = "Line 1 \\\nLine 2\n"""
self._RunTest(template, template, {})
def testIfStatement(self):
template = r"""
[[if foo:]]
foo
[[else:]]
not foo
[[]]"""
self._RunTest(template, "\n foo\n", {'foo': True})
self._RunTest(template, "\n not foo\n", {'foo': False})
def testForStatement(self):
template = r"""[[for beers in [99, 98, 1]:]]
{{beers}} bottle{{(beers != 1) and 's' or ''}} of beer on the wall...
[[]]"""
expected = r"""99 bottles of beer on the wall...
98 bottles of beer on the wall...
1 bottle of beer on the wall...
"""
self._RunTest(template, expected, {})
def testListVariables(self):
template = r"""
[[for i, item in enumerate(my_list):]]
{{i+1}}: {{item}}
[[]]
"""
self._RunTest(template, "\n1: Banana\n2: Grapes\n3: Kumquat\n",
{'my_list': ['Banana', 'Grapes', 'Kumquat']})
def testListInterpolation(self):
template = "{{', '.join(growing[0:-1]) + ' and ' + growing[-1]}} grow..."
self._RunTest(template, "Oats, peas, beans and barley grow...",
{'growing': ['Oats', 'peas', 'beans', 'barley']})
self._RunTest(template, "Love and laughter grow...",
{'growing': ['Love', 'laughter']})
def testComplex(self):
template = r"""
struct {{name}} {
[[for field in fields:]]
[[ if field['type'] == 'array':]]
{{field['basetype']}} {{field['name']}}[{{field['size']}}];
[[ else:]]
{{field['type']}} {{field['name']}};
[[ ]]
[[]]
};"""
expected = r"""
struct Foo {
std::string name;
int problems[99];
};"""
self._RunTest(template, expected, {
'name': 'Foo',
'fields': [
{'name': 'name', 'type': 'std::string'},
{'name': 'problems', 'type': 'array', 'basetype': 'int', 'size': 99}]})
def testModulo(self):
self._RunTest('No expression %', 'No expression %', {})
self._RunTest('% before {{3 + 4}}', '% before 7', {})
self._RunTest('{{2**8}} % after', '256 % after', {})
self._RunTest('inside {{8 % 3}}', 'inside 2', {})
self._RunTest('Everywhere % {{8 % 3}} %', 'Everywhere % 2 %', {})
if __name__ == '__main__':
unittest.main()
|
Jonekee/chromium.src
|
native_client_sdk/src/build_tools/tests/easy_template_test.py
|
Python
|
bsd-3-clause
| 3,559
| 0.006182
|
"""
WSGI config for Tuteria-Application-Test project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
from django.core.wsgi import get_wsgi_application
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "config.settings.production"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.production")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
|
Tuteria/Recruitment-test
|
config/wsgi.py
|
Python
|
mit
| 1,461
| 0
|
from sklearn.cluster import KMeans
kmeans = KMeans(n_clusters=10)
clusters = kmeans.fit_predict(digits.data)
print kmeans.cluster_centers_.shape
#------------------------------------------------------------
# visualize the cluster centers
fig = plt.figure(figsize=(8, 3))
for i in range(10):
ax = fig.add_subplot(2, 5, 1 + i)
ax.imshow(kmeans.cluster_centers_[i].reshape((8, 8)),
cmap=plt.cm.binary)
from sklearn.manifold import Isomap
X_iso = Isomap(n_neighbors=10).fit_transform(digits.data)
#------------------------------------------------------------
# visualize the projected data
fig, ax = plt.subplots(1, 2, figsize=(8, 4))
ax[0].scatter(X_iso[:, 0], X_iso[:, 1], c=clusters)
ax[1].scatter(X_iso[:, 0], X_iso[:, 1], c=digits.target)
|
ageek/confPyNotebooks
|
sklearn-scipy-2013/solutions/08B_digits_clustering.py
|
Python
|
gpl-2.0
| 767
| 0.003911
|
from typing import List
from collections import defaultdict, Counter
class Solution:
def shortestCompletingWordV1(self, licensePlate: str, words: List[str]) -> str:
# build the signature of licensePlate
sig = defaultdict(int)
for c in licensePlate.upper():
if c.isalpha():
sig[c] += 1
# search for the min length word matching the signature
ans = ''
for word in words:
wsig = sig.copy()
for c in word:
cu = c.upper()
if cu not in wsig:
continue
wsig[cu] -= 1
if wsig[cu] == 0:
del wsig[cu]
if len(wsig) == 0 and (len(word) < len(ans) or ans == ''):
ans = word
break
return ans
def shortestCompletingWordV2(self, licensePlate: str, words: List[str]) -> str:
"""
In first line, just filter out all none letters from the plate and make sure all letters are lower case.
In second line, produce Counter of each words and use Counter operater & (intersection) to extract the count of shared letters between the word and the plate.
If all the counts are equal, this returns true. Then, just extract the word that satisfies this condition and has the shortest length.
This is slower than V1 though
"""
pc = Counter(filter(lambda x : x.isalpha(), licensePlate.lower()))
return min([w for w in words if Counter(w) & pc == pc], key=len)
# TESTS
tests = [
{
'licensePlate': "1s3 PSt",
'words': ["step", "steps", "stripe", "stepple"],
'expected': "steps"
},
{
'licensePlate': "1s3 456",
'words': ["looks", "pest", "stew", "show"],
'expected': "pest"
},
{
'licensePlate': "AN87005",
'words': ["participant","individual","start","exist","above","already","easy","attack","player","important"],
'expected': "important"
}
]
for t in tests:
sol = Solution()
actual = sol.shortestCompletingWordV2(t['licensePlate'], t['words'])
print('Shorted completing word matching', t['licensePlate'], 'in', t['words'], '->', actual)
assert(actual == t['expected'])
assert(t['expected'] == sol.shortestCompletingWordV2(t['licensePlate'], t['words']))
|
l33tdaima/l33tdaima
|
p748e/shortest_completing_word.py
|
Python
|
mit
| 2,378
| 0.009672
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2017-04-28 15:02
from __future__ import unicode_literals
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('base', '0106_auto_20170428_1119'),
]
operations = [
migrations.AddField(
model_name='learningunit',
name='learning_container',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='base.LearningContainer'),
),
]
|
uclouvain/OSIS-Louvain
|
base/migrations/0107_learningunit_learning_container.py
|
Python
|
agpl-3.0
| 577
| 0.001733
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Processes DAGs."""
import enum
import importlib
import inspect
import logging
import multiprocessing
import os
import random
import signal
import sys
import time
from abc import ABCMeta, abstractmethod
from collections import defaultdict
from datetime import datetime, timedelta
from importlib import import_module
from multiprocessing.connection import Connection as MultiprocessingConnection
from typing import TYPE_CHECKING, Any, Callable, Dict, List, NamedTuple, Optional, Tuple, Union, cast
from setproctitle import setproctitle # pylint: disable=no-name-in-module
from sqlalchemy import or_
from tabulate import tabulate
import airflow.models
from airflow.configuration import conf
from airflow.models import DagModel, errors
from airflow.models.serialized_dag import SerializedDagModel
from airflow.models.taskinstance import SimpleTaskInstance
from airflow.settings import STORE_DAG_CODE
from airflow.stats import Stats
from airflow.utils import timezone
from airflow.utils.callback_requests import CallbackRequest, SlaCallbackRequest, TaskCallbackRequest
from airflow.utils.file import list_py_file_paths
from airflow.utils.log.logging_mixin import LoggingMixin
from airflow.utils.mixins import MultiprocessingStartMethodMixin
from airflow.utils.net import get_hostname
from airflow.utils.process_utils import kill_child_processes_by_pids, reap_process_group
from airflow.utils.session import provide_session
from airflow.utils.state import State
if TYPE_CHECKING:
import pathlib
class AbstractDagFileProcessorProcess(metaclass=ABCMeta):
"""Processes a DAG file. See SchedulerJob.process_file() for more details."""
@abstractmethod
def start(self) -> None:
"""Launch the process to process the file"""
raise NotImplementedError()
@abstractmethod
def terminate(self, sigkill: bool = False):
"""Terminate (and then kill) the process launched to process the file"""
raise NotImplementedError()
@abstractmethod
def kill(self) -> None:
"""Kill the process launched to process the file, and ensure consistent state."""
raise NotImplementedError()
@property
@abstractmethod
def pid(self) -> int:
""":return: the PID of the process launched to process the given file"""
raise NotImplementedError()
@property
@abstractmethod
def exit_code(self) -> Optional[int]:
"""
After the process is finished, this can be called to get the return code
:return: the exit code of the process
:rtype: int
"""
raise NotImplementedError()
@property
@abstractmethod
def done(self) -> bool:
"""
Check if the process launched to process this file is done.
:return: whether the process is finished running
:rtype: bool
"""
raise NotImplementedError()
@property
@abstractmethod
def result(self) -> Optional[Tuple[int, int]]:
"""
A list of simple dags found, and the number of import errors
:return: result of running SchedulerJob.process_file() if available. Otherwise, none
:rtype: Optional[Tuple[int, int]]
"""
raise NotImplementedError()
@property
@abstractmethod
def start_time(self) -> datetime:
"""
:return: When this started to process the file
:rtype: datetime
"""
raise NotImplementedError()
@property
@abstractmethod
def file_path(self) -> str:
"""
:return: the path to the file that this is processing
:rtype: unicode
"""
raise NotImplementedError()
@property
@abstractmethod
def waitable_handle(self):
"""A "waitable" handle that can be passed to ``multiprocessing.connection.wait()``"""
raise NotImplementedError()
class DagParsingStat(NamedTuple):
"""Information on processing progress"""
done: bool
all_files_processed: bool
class DagFileStat(NamedTuple):
"""Information about single processing of one file"""
num_dags: int
import_errors: int
last_finish_time: Optional[datetime]
last_duration: Optional[float]
run_count: int
class DagParsingSignal(enum.Enum):
"""All signals sent to parser."""
AGENT_RUN_ONCE = 'agent_run_once'
TERMINATE_MANAGER = 'terminate_manager'
END_MANAGER = 'end_manager'
class DagFileProcessorAgent(LoggingMixin, MultiprocessingStartMethodMixin):
"""
Agent for DAG file processing. It is responsible for all DAG parsing
related jobs in scheduler process. Mainly it can spin up DagFileProcessorManager
in a subprocess, collect DAG parsing results from it and communicate
signal/DAG parsing stat with it.
This class runs in the main `airflow scheduler` process.
:param dag_directory: Directory where DAG definitions are kept. All
files in file_paths should be under this directory
:type dag_directory: str
:param max_runs: The number of times to parse and schedule each file. -1
for unlimited.
:type max_runs: int
:param processor_factory: function that creates processors for DAG
definition files. Arguments are (dag_definition_path, log_file_path)
:type processor_factory: ([str, List[CallbackRequest], Optional[List[str]], bool]) -> (
AbstractDagFileProcessorProcess
)
:param processor_timeout: How long to wait before timing out a DAG file processor
:type processor_timeout: timedelta
:param dag_ids: if specified, only schedule tasks with these DAG IDs
:type dag_ids: list[str]
:param pickle_dags: whether to pickle DAGs.
:type: pickle_dags: bool
:param async_mode: Whether to start agent in async mode
:type async_mode: bool
"""
def __init__(
self,
dag_directory: str,
max_runs: int,
processor_factory: Callable[
[str, List[CallbackRequest], Optional[List[str]], bool], AbstractDagFileProcessorProcess
],
processor_timeout: timedelta,
dag_ids: Optional[List[str]],
pickle_dags: bool,
async_mode: bool,
):
super().__init__()
self._file_path_queue: List[str] = []
self._dag_directory: str = dag_directory
self._max_runs = max_runs
self._processor_factory = processor_factory
self._processor_timeout = processor_timeout
self._dag_ids = dag_ids
self._pickle_dags = pickle_dags
self._async_mode = async_mode
# Map from file path to the processor
self._processors: Dict[str, AbstractDagFileProcessorProcess] = {}
# Pipe for communicating signals
self._process: Optional[multiprocessing.process.BaseProcess] = None
self._done: bool = False
# Initialized as true so we do not deactivate w/o any actual DAG parsing.
self._all_files_processed = True
self._parent_signal_conn: Optional[MultiprocessingConnection] = None
self._last_parsing_stat_received_at: float = time.monotonic()
def start(self) -> None:
"""Launch DagFileProcessorManager processor and start DAG parsing loop in manager."""
mp_start_method = self._get_multiprocessing_start_method()
context = multiprocessing.get_context(mp_start_method)
self._last_parsing_stat_received_at = time.monotonic()
self._parent_signal_conn, child_signal_conn = context.Pipe()
process = context.Process(
target=type(self)._run_processor_manager,
args=(
self._dag_directory,
self._max_runs,
# getattr prevents error while pickling an instance method.
getattr(self, "_processor_factory"),
self._processor_timeout,
child_signal_conn,
self._dag_ids,
self._pickle_dags,
self._async_mode,
),
)
self._process = process
process.start()
self.log.info("Launched DagFileProcessorManager with pid: %s", process.pid)
def run_single_parsing_loop(self) -> None:
"""
Should only be used when launched DAG file processor manager in sync mode.
Send agent heartbeat signal to the manager, requesting that it runs one
processing "loop".
Call wait_until_finished to ensure that any launched processors have
finished before continuing
"""
if not self._parent_signal_conn or not self._process:
raise ValueError("Process not started.")
if not self._process.is_alive():
return
try:
self._parent_signal_conn.send(DagParsingSignal.AGENT_RUN_ONCE)
except ConnectionError:
# If this died cos of an error then we will noticed and restarted
# when harvest_serialized_dags calls _heartbeat_manager.
pass
def send_callback_to_execute(self, request: CallbackRequest) -> None:
"""
Sends information about the callback to be executed by DagFileProcessor.
:param request: Callback request to be executed.
:type request: CallbackRequest
"""
if not self._parent_signal_conn:
raise ValueError("Process not started.")
try:
self._parent_signal_conn.send(request)
except ConnectionError:
# If this died cos of an error then we will noticed and restarted
# when harvest_serialized_dags calls _heartbeat_manager.
pass
def send_sla_callback_request_to_execute(self, full_filepath: str, dag_id: str) -> None:
"""
Sends information about the SLA callback to be executed by DagFileProcessor.
:param full_filepath: DAG File path
:type full_filepath: str
:param dag_id: DAG ID
:type dag_id: str
"""
if not self._parent_signal_conn:
raise ValueError("Process not started.")
try:
request = SlaCallbackRequest(full_filepath=full_filepath, dag_id=dag_id)
self._parent_signal_conn.send(request)
except ConnectionError:
# If this died cos of an error then we will noticed and restarted
# when harvest_serialized_dags calls _heartbeat_manager.
pass
def wait_until_finished(self) -> None:
"""Waits until DAG parsing is finished."""
if not self._parent_signal_conn:
raise ValueError("Process not started.")
if self._async_mode:
raise RuntimeError("wait_until_finished should only be called in sync_mode")
while self._parent_signal_conn.poll(timeout=None):
try:
result = self._parent_signal_conn.recv()
except EOFError:
return
self._process_message(result)
if isinstance(result, DagParsingStat):
# In sync mode (which is the only time we call this function) we don't send this message from
# the Manager until all the running processors have finished
return
@staticmethod
def _run_processor_manager(
dag_directory: str,
max_runs: int,
processor_factory: Callable[[str, List[CallbackRequest]], AbstractDagFileProcessorProcess],
processor_timeout: timedelta,
signal_conn: MultiprocessingConnection,
dag_ids: Optional[List[str]],
pickle_dags: bool,
async_mode: bool,
) -> None:
# Make this process start as a new process group - that makes it easy
# to kill all sub-process of this at the OS-level, rather than having
# to iterate the child processes
os.setpgid(0, 0)
setproctitle("airflow scheduler -- DagFileProcessorManager")
# Reload configurations and settings to avoid collision with parent process.
# Because this process may need custom configurations that cannot be shared,
# e.g. RotatingFileHandler. And it can cause connection corruption if we
# do not recreate the SQLA connection pool.
os.environ['CONFIG_PROCESSOR_MANAGER_LOGGER'] = 'True'
os.environ['AIRFLOW__LOGGING__COLORED_CONSOLE_LOG'] = 'False'
# Replicating the behavior of how logging module was loaded
# in logging_config.py
importlib.reload(import_module(airflow.settings.LOGGING_CLASS_PATH.rsplit('.', 1)[0])) # type: ignore
importlib.reload(airflow.settings)
airflow.settings.initialize()
del os.environ['CONFIG_PROCESSOR_MANAGER_LOGGER']
processor_manager = DagFileProcessorManager(
dag_directory,
max_runs,
processor_factory,
processor_timeout,
signal_conn,
dag_ids,
pickle_dags,
async_mode,
)
processor_manager.start()
def heartbeat(self) -> None:
"""Check if the DagFileProcessorManager process is alive, and process any pending messages"""
if not self._parent_signal_conn:
raise ValueError("Process not started.")
# Receive any pending messages before checking if the process has exited.
while self._parent_signal_conn.poll(timeout=0.01):
try:
result = self._parent_signal_conn.recv()
except (EOFError, ConnectionError):
break
self._process_message(result)
# If it died unexpectedly restart the manager process
self._heartbeat_manager()
def _process_message(self, message):
self.log.debug("Received message of type %s", type(message).__name__)
if isinstance(message, DagParsingStat):
self._sync_metadata(message)
else:
raise RuntimeError(f"Unexpected message received of type {type(message).__name__}")
def _heartbeat_manager(self):
"""Heartbeat DAG file processor and restart it if we are not done."""
if not self._parent_signal_conn:
raise ValueError("Process not started.")
if self._process and not self._process.is_alive():
self._process.join(timeout=0)
if not self.done:
self.log.warning(
"DagFileProcessorManager (PID=%d) exited with exit code %d - re-launching",
self._process.pid,
self._process.exitcode,
)
self.start()
if self.done:
return
parsing_stat_age = time.monotonic() - self._last_parsing_stat_received_at
if parsing_stat_age > self._processor_timeout.total_seconds():
Stats.incr('dag_processing.manager_stalls')
self.log.error(
"DagFileProcessorManager (PID=%d) last sent a heartbeat %.2f seconds ago! Restarting it",
self._process.pid,
parsing_stat_age,
)
reap_process_group(self._process.pid, logger=self.log)
self.start()
def _sync_metadata(self, stat):
"""Sync metadata from stat queue and only keep the latest stat."""
self._done = stat.done
self._all_files_processed = stat.all_files_processed
self._last_parsing_stat_received_at = time.monotonic()
@property
def done(self) -> bool:
"""Has DagFileProcessorManager ended?"""
return self._done
@property
def all_files_processed(self):
"""Have all files been processed at least once?"""
return self._all_files_processed
def terminate(self):
"""
Send termination signal to DAG parsing processor manager
and expect it to terminate all DAG file processors.
"""
if self._process and self._process.is_alive():
self.log.info("Sending termination message to manager.")
try:
self._parent_signal_conn.send(DagParsingSignal.TERMINATE_MANAGER)
except ConnectionError:
pass
def end(self):
"""
Terminate (and then kill) the manager process launched.
:return:
"""
if not self._process:
self.log.warning('Ending without manager process.')
return
# Give the Manager some time to cleanly shut down, but not too long, as
# it's better to finish sooner than wait for (non-critical) work to
# finish
self._process.join(timeout=1.0)
reap_process_group(self._process.pid, logger=self.log)
self._parent_signal_conn.close()
class DagFileProcessorManager(LoggingMixin): # pylint: disable=too-many-instance-attributes
"""
Given a list of DAG definition files, this kicks off several processors
in parallel to process them and put the results to a multiprocessing.Queue
for DagFileProcessorAgent to harvest. The parallelism is limited and as the
processors finish, more are launched. The files are processed over and
over again, but no more often than the specified interval.
:param dag_directory: Directory where DAG definitions are kept. All
files in file_paths should be under this directory
:type dag_directory: unicode
:param max_runs: The number of times to parse and schedule each file. -1
for unlimited.
:type max_runs: int
:param processor_factory: function that creates processors for DAG
definition files. Arguments are (dag_definition_path)
:type processor_factory: (unicode, unicode, list) -> (AbstractDagFileProcessorProcess)
:param processor_timeout: How long to wait before timing out a DAG file processor
:type processor_timeout: timedelta
:param signal_conn: connection to communicate signal with processor agent.
:type signal_conn: MultiprocessingConnection
:param dag_ids: if specified, only schedule tasks with these DAG IDs
:type dag_ids: list[str]
:param pickle_dags: whether to pickle DAGs.
:type pickle_dags: bool
:param async_mode: whether to start the manager in async mode
:type async_mode: bool
"""
def __init__(
self,
dag_directory: Union[str, "pathlib.Path"],
max_runs: int,
processor_factory: Callable[[str, List[CallbackRequest]], AbstractDagFileProcessorProcess],
processor_timeout: timedelta,
signal_conn: MultiprocessingConnection,
dag_ids: Optional[List[str]],
pickle_dags: bool,
async_mode: bool = True,
):
super().__init__()
self._file_paths: List[str] = []
self._file_path_queue: List[str] = []
self._dag_directory = dag_directory
self._max_runs = max_runs
self._processor_factory = processor_factory
self._signal_conn = signal_conn
self._pickle_dags = pickle_dags
self._dag_ids = dag_ids
self._async_mode = async_mode
self._parsing_start_time: Optional[int] = None
# Set the signal conn in to non-blocking mode, so that attempting to
# send when the buffer is full errors, rather than hangs for-ever
# attempting to send (this is to avoid deadlocks!)
#
# Don't do this in sync_mode, as we _need_ the DagParsingStat sent to
# continue the scheduler
if self._async_mode:
os.set_blocking(self._signal_conn.fileno(), False)
self._parallelism = conf.getint('scheduler', 'parsing_processes')
if conf.get('core', 'sql_alchemy_conn').startswith('sqlite') and self._parallelism > 1:
self.log.warning(
"Because we cannot use more than 1 thread (parsing_processes = "
"%d ) when using sqlite. So we set parallelism to 1.",
self._parallelism,
)
self._parallelism = 1
# Parse and schedule each file no faster than this interval.
self._file_process_interval = conf.getint('scheduler', 'min_file_process_interval')
# How often to print out DAG file processing stats to the log. Default to
# 30 seconds.
self.print_stats_interval = conf.getint('scheduler', 'print_stats_interval')
# How many seconds do we wait for tasks to heartbeat before mark them as zombies.
self._zombie_threshold_secs = conf.getint('scheduler', 'scheduler_zombie_task_threshold')
# Should store dag file source in a database?
self.store_dag_code = STORE_DAG_CODE
# Map from file path to the processor
self._processors: Dict[str, AbstractDagFileProcessorProcess] = {}
self._num_run = 0
# Map from file path to stats about the file
self._file_stats: Dict[str, DagFileStat] = {}
self._last_zombie_query_time = None
# Last time that the DAG dir was traversed to look for files
self.last_dag_dir_refresh_time = timezone.make_aware(datetime.fromtimestamp(0))
# Last time stats were printed
self.last_stat_print_time = 0
# TODO: Remove magic number
self._zombie_query_interval = 10
# How long to wait before timing out a process to parse a DAG file
self._processor_timeout = processor_timeout
# How often to scan the DAGs directory for new files. Default to 5 minutes.
self.dag_dir_list_interval = conf.getint('scheduler', 'dag_dir_list_interval')
# Mapping file name and callbacks requests
self._callback_to_execute: Dict[str, List[CallbackRequest]] = defaultdict(list)
self._log = logging.getLogger('airflow.processor_manager')
self.waitables: Dict[Any, Union[MultiprocessingConnection, AbstractDagFileProcessorProcess]] = {
self._signal_conn: self._signal_conn,
}
def register_exit_signals(self):
"""Register signals that stop child processes"""
signal.signal(signal.SIGINT, self._exit_gracefully)
signal.signal(signal.SIGTERM, self._exit_gracefully)
# So that we ignore the debug dump signal, making it easier to send
signal.signal(signal.SIGUSR2, signal.SIG_IGN)
def _exit_gracefully(self, signum, frame): # pylint: disable=unused-argument
"""Helper method to clean up DAG file processors to avoid leaving orphan processes."""
self.log.info("Exiting gracefully upon receiving signal %s", signum)
self.log.debug("Current Stacktrace is: %s", '\n'.join(map(str, inspect.stack())))
self.terminate()
self.end()
self.log.debug("Finished terminating DAG processors.")
sys.exit(os.EX_OK)
def start(self):
"""
Use multiple processes to parse and generate tasks for the
DAGs in parallel. By processing them in separate processes,
we can get parallelism and isolation from potentially harmful
user code.
"""
self.register_exit_signals()
# Start a new process group
os.setpgid(0, 0)
self.log.info("Processing files using up to %s processes at a time ", self._parallelism)
self.log.info("Process each file at most once every %s seconds", self._file_process_interval)
self.log.info(
"Checking for new files in %s every %s seconds", self._dag_directory, self.dag_dir_list_interval
)
return self._run_parsing_loop()
def _run_parsing_loop(self):
# In sync mode we want timeout=None -- wait forever until a message is received
if self._async_mode:
poll_time = 0.0
else:
poll_time = None
self._refresh_dag_dir()
self.prepare_file_path_queue()
if self._async_mode:
# If we're in async mode, we can start up straight away. If we're
# in sync mode we need to be told to start a "loop"
self.start_new_processes()
while True:
loop_start_time = time.monotonic()
# pylint: disable=no-else-break
ready = multiprocessing.connection.wait(self.waitables.keys(), timeout=poll_time)
if self._signal_conn in ready:
agent_signal = self._signal_conn.recv()
self.log.debug("Received %s signal from DagFileProcessorAgent", agent_signal)
if agent_signal == DagParsingSignal.TERMINATE_MANAGER:
self.terminate()
break
elif agent_signal == DagParsingSignal.END_MANAGER:
self.end()
sys.exit(os.EX_OK)
elif agent_signal == DagParsingSignal.AGENT_RUN_ONCE:
# continue the loop to parse dags
pass
elif isinstance(agent_signal, CallbackRequest):
self._add_callback_to_queue(agent_signal)
else:
raise ValueError(f"Invalid message {type(agent_signal)}")
if not ready and not self._async_mode:
# In "sync" mode we don't want to parse the DAGs until we
# are told to (as that would open another connection to the
# SQLite DB which isn't a good practice
# This shouldn't happen, as in sync mode poll should block for
# ever. Lets be defensive about that.
self.log.warning(
"wait() unexpectedly returned nothing ready after infinite timeout (%r)!", poll_time
)
continue
for sentinel in ready:
if sentinel is self._signal_conn:
continue
processor = self.waitables.get(sentinel)
if not processor:
continue
self._collect_results_from_processor(processor)
self.waitables.pop(sentinel)
self._processors.pop(processor.file_path)
self._refresh_dag_dir()
self._find_zombies() # pylint: disable=no-value-for-parameter
self._kill_timed_out_processors()
# Generate more file paths to process if we processed all the files
# already.
if not self._file_path_queue:
self.emit_metrics()
self.prepare_file_path_queue()
self.start_new_processes()
# Update number of loop iteration.
self._num_run += 1
if not self._async_mode:
self.log.debug("Waiting for processors to finish since we're using sqlite")
# Wait until the running DAG processors are finished before
# sending a DagParsingStat message back. This means the Agent
# can tell we've got to the end of this iteration when it sees
# this type of message
self.wait_until_finished()
# Collect anything else that has finished, but don't kick off any more processors
self.collect_results()
self._print_stat()
all_files_processed = all(self.get_last_finish_time(x) is not None for x in self.file_paths)
max_runs_reached = self.max_runs_reached()
try:
self._signal_conn.send(
DagParsingStat(
max_runs_reached,
all_files_processed,
)
)
except BlockingIOError:
# Try again next time around the loop!
# It is better to fail, than it is deadlock. This should
# "almost never happen" since the DagParsingStat object is
# small, and in async mode this stat is not actually _required_
# for normal operation (It only drives "max runs")
self.log.debug("BlockingIOError received trying to send DagParsingStat, ignoring")
if max_runs_reached:
self.log.info(
"Exiting dag parsing loop as all files have been processed %s times", self._max_runs
)
break
if self._async_mode:
loop_duration = time.monotonic() - loop_start_time
if loop_duration < 1:
poll_time = 1 - loop_duration
else:
poll_time = 0.0
def _add_callback_to_queue(self, request: CallbackRequest):
self._callback_to_execute[request.full_filepath].append(request)
# Callback has a higher priority over DAG Run scheduling
if request.full_filepath in self._file_path_queue:
# Remove file paths matching request.full_filepath from self._file_path_queue
# Since we are already going to use that filepath to run callback,
# there is no need to have same file path again in the queue
self._file_path_queue = [
file_path for file_path in self._file_path_queue if file_path != request.full_filepath
]
self._file_path_queue.insert(0, request.full_filepath)
def _refresh_dag_dir(self):
"""Refresh file paths from dag dir if we haven't done it for too long."""
now = timezone.utcnow()
elapsed_time_since_refresh = (now - self.last_dag_dir_refresh_time).total_seconds()
if elapsed_time_since_refresh > self.dag_dir_list_interval:
# Build up a list of Python files that could contain DAGs
self.log.info("Searching for files in %s", self._dag_directory)
self._file_paths = list_py_file_paths(self._dag_directory)
self.last_dag_dir_refresh_time = now
self.log.info("There are %s files in %s", len(self._file_paths), self._dag_directory)
self.set_file_paths(self._file_paths)
try:
self.log.debug("Removing old import errors")
self.clear_nonexistent_import_errors() # pylint: disable=no-value-for-parameter
except Exception: # noqa pylint: disable=broad-except
self.log.exception("Error removing old import errors")
SerializedDagModel.remove_deleted_dags(self._file_paths)
DagModel.deactivate_deleted_dags(self._file_paths)
if self.store_dag_code:
from airflow.models.dagcode import DagCode
DagCode.remove_deleted_code(self._file_paths)
def _print_stat(self):
"""Occasionally print out stats about how fast the files are getting processed"""
if 0 < self.print_stats_interval < time.monotonic() - self.last_stat_print_time:
if self._file_paths:
self._log_file_processing_stats(self._file_paths)
self.last_stat_print_time = time.monotonic()
@provide_session
def clear_nonexistent_import_errors(self, session):
"""
Clears import errors for files that no longer exist.
:param session: session for ORM operations
:type session: sqlalchemy.orm.session.Session
"""
query = session.query(errors.ImportError)
if self._file_paths:
query = query.filter(~errors.ImportError.filename.in_(self._file_paths))
query.delete(synchronize_session='fetch')
session.commit()
def _log_file_processing_stats(self, known_file_paths):
"""
Print out stats about how files are getting processed.
:param known_file_paths: a list of file paths that may contain Airflow
DAG definitions
:type known_file_paths: list[unicode]
:return: None
"""
# File Path: Path to the file containing the DAG definition
# PID: PID associated with the process that's processing the file. May
# be empty.
# Runtime: If the process is currently running, how long it's been
# running for in seconds.
# Last Runtime: If the process ran before, how long did it take to
# finish in seconds
# Last Run: When the file finished processing in the previous run.
headers = ["File Path", "PID", "Runtime", "# DAGs", "# Errors", "Last Runtime", "Last Run"]
rows = []
now = timezone.utcnow()
for file_path in known_file_paths:
last_runtime = self.get_last_runtime(file_path)
num_dags = self.get_last_dag_count(file_path)
num_errors = self.get_last_error_count(file_path)
file_name = os.path.basename(file_path)
file_name = os.path.splitext(file_name)[0].replace(os.sep, '.')
processor_pid = self.get_pid(file_path)
processor_start_time = self.get_start_time(file_path)
runtime = (now - processor_start_time) if processor_start_time else None
last_run = self.get_last_finish_time(file_path)
if last_run:
seconds_ago = (now - last_run).total_seconds()
Stats.gauge(f'dag_processing.last_run.seconds_ago.{file_name}', seconds_ago)
if runtime:
Stats.timing(f'dag_processing.last_duration.{file_name}', runtime)
rows.append((file_path, processor_pid, runtime, num_dags, num_errors, last_runtime, last_run))
# Sort by longest last runtime. (Can't sort None values in python3)
rows = sorted(rows, key=lambda x: x[3] or 0.0)
formatted_rows = []
for file_path, pid, runtime, num_dags, num_errors, last_runtime, last_run in rows:
formatted_rows.append(
(
file_path,
pid,
f"{runtime.total_seconds():.2f}s" if runtime else None,
num_dags,
num_errors,
f"{last_runtime:.2f}s" if last_runtime else None,
last_run.strftime("%Y-%m-%dT%H:%M:%S") if last_run else None,
)
)
log_str = (
"\n"
+ "=" * 80
+ "\n"
+ "DAG File Processing Stats\n\n"
+ tabulate(formatted_rows, headers=headers)
+ "\n"
+ "=" * 80
)
self.log.info(log_str)
def get_pid(self, file_path):
"""
:param file_path: the path to the file that's being processed
:type file_path: unicode
:return: the PID of the process processing the given file or None if
the specified file is not being processed
:rtype: int
"""
if file_path in self._processors:
return self._processors[file_path].pid
return None
def get_all_pids(self):
"""
:return: a list of the PIDs for the processors that are running
:rtype: List[int]
"""
return [x.pid for x in self._processors.values()]
def get_last_runtime(self, file_path):
"""
:param file_path: the path to the file that was processed
:type file_path: unicode
:return: the runtime (in seconds) of the process of the last run, or
None if the file was never processed.
:rtype: float
"""
stat = self._file_stats.get(file_path)
return stat.last_duration if stat else None
def get_last_dag_count(self, file_path):
"""
:param file_path: the path to the file that was processed
:type file_path: unicode
:return: the number of dags loaded from that file, or None if the file
was never processed.
:rtype: int
"""
stat = self._file_stats.get(file_path)
return stat.num_dags if stat else None
def get_last_error_count(self, file_path):
"""
:param file_path: the path to the file that was processed
:type file_path: unicode
:return: the number of import errors from processing, or None if the file
was never processed.
:rtype: int
"""
stat = self._file_stats.get(file_path)
return stat.import_errors if stat else None
def get_last_finish_time(self, file_path):
"""
:param file_path: the path to the file that was processed
:type file_path: unicode
:return: the finish time of the process of the last run, or None if the
file was never processed.
:rtype: datetime
"""
stat = self._file_stats.get(file_path)
return stat.last_finish_time if stat else None
def get_start_time(self, file_path):
"""
:param file_path: the path to the file that's being processed
:type file_path: unicode
:return: the start time of the process that's processing the
specified file or None if the file is not currently being processed
:rtype: datetime
"""
if file_path in self._processors:
return self._processors[file_path].start_time
return None
def get_run_count(self, file_path):
"""
:param file_path: the path to the file that's being processed
:type file_path: unicode
:return: the number of times the given file has been parsed
:rtype: int
"""
stat = self._file_stats.get(file_path)
return stat.run_count if stat else 0
def set_file_paths(self, new_file_paths):
"""
Update this with a new set of paths to DAG definition files.
:param new_file_paths: list of paths to DAG definition files
:type new_file_paths: list[unicode]
:return: None
"""
self._file_paths = new_file_paths
self._file_path_queue = [x for x in self._file_path_queue if x in new_file_paths]
# Stop processors that are working on deleted files
filtered_processors = {}
for file_path, processor in self._processors.items():
if file_path in new_file_paths:
filtered_processors[file_path] = processor
else:
self.log.warning("Stopping processor for %s", file_path)
Stats.decr('dag_processing.processes')
processor.terminate()
self._file_stats.pop(file_path)
self._processors = filtered_processors
def wait_until_finished(self):
"""Sleeps until all the processors are done."""
for processor in self._processors.values():
while not processor.done:
time.sleep(0.1)
def _collect_results_from_processor(self, processor) -> None:
self.log.debug("Processor for %s finished", processor.file_path)
Stats.decr('dag_processing.processes')
last_finish_time = timezone.utcnow()
if processor.result is not None:
num_dags, count_import_errors = processor.result
else:
self.log.error(
"Processor for %s exited with return code %s.", processor.file_path, processor.exit_code
)
count_import_errors = -1
num_dags = 0
stat = DagFileStat(
num_dags=num_dags,
import_errors=count_import_errors,
last_finish_time=last_finish_time,
last_duration=(last_finish_time - processor.start_time).total_seconds(),
run_count=self.get_run_count(processor.file_path) + 1,
)
self._file_stats[processor.file_path] = stat
def collect_results(self) -> None:
"""Collect the result from any finished DAG processors"""
ready = multiprocessing.connection.wait(self.waitables.keys() - [self._signal_conn], timeout=0)
for sentinel in ready:
if sentinel is self._signal_conn:
continue
processor = cast(AbstractDagFileProcessorProcess, self.waitables[sentinel])
self.waitables.pop(processor.waitable_handle)
self._processors.pop(processor.file_path)
self._collect_results_from_processor(processor)
self.log.debug("%s/%s DAG parsing processes running", len(self._processors), self._parallelism)
self.log.debug("%s file paths queued for processing", len(self._file_path_queue))
def start_new_processes(self):
"""Start more processors if we have enough slots and files to process"""
while self._parallelism - len(self._processors) > 0 and self._file_path_queue:
file_path = self._file_path_queue.pop(0)
# Stop creating duplicate processor i.e. processor with the same filepath
if file_path in self._processors.keys():
continue
callback_to_execute_for_file = self._callback_to_execute[file_path]
processor = self._processor_factory(
file_path, callback_to_execute_for_file, self._dag_ids, self._pickle_dags
)
del self._callback_to_execute[file_path]
Stats.incr('dag_processing.processes')
processor.start()
self.log.debug("Started a process (PID: %s) to generate tasks for %s", processor.pid, file_path)
self._processors[file_path] = processor
self.waitables[processor.waitable_handle] = processor
def prepare_file_path_queue(self):
"""Generate more file paths to process. Result are saved in _file_path_queue."""
self._parsing_start_time = time.perf_counter()
# If the file path is already being processed, or if a file was
# processed recently, wait until the next batch
file_paths_in_progress = self._processors.keys()
now = timezone.utcnow()
# Sort the file paths by the parsing order mode
list_mode = conf.get("scheduler", "file_parsing_sort_mode")
files_with_mtime = {}
file_paths = []
is_mtime_mode = list_mode == "modified_time"
file_paths_recently_processed = []
for file_path in self._file_paths:
if is_mtime_mode:
files_with_mtime[file_path] = os.path.getmtime(file_path)
file_modified_time = timezone.make_aware(datetime.fromtimestamp(files_with_mtime[file_path]))
else:
file_paths.append(file_path)
file_modified_time = None
# Find file paths that were recently processed to exclude them
# from being added to file_path_queue
# unless they were modified recently and parsing mode is "modified_time"
# in which case we don't honor "self._file_process_interval" (min_file_process_interval)
last_finish_time = self.get_last_finish_time(file_path)
if (
last_finish_time is not None
and (now - last_finish_time).total_seconds() < self._file_process_interval
and not (is_mtime_mode and file_modified_time and (file_modified_time > last_finish_time))
):
file_paths_recently_processed.append(file_path)
# Sort file paths via last modified time
if is_mtime_mode:
file_paths = sorted(files_with_mtime, key=files_with_mtime.get, reverse=True)
elif list_mode == "alphabetical":
file_paths = sorted(file_paths)
elif list_mode == "random_seeded_by_host":
# Shuffle the list seeded by hostname so multiple schedulers can work on different
# set of files. Since we set the seed, the sort order will remain same per host
random.Random(get_hostname()).shuffle(file_paths)
files_paths_at_run_limit = [
file_path for file_path, stat in self._file_stats.items() if stat.run_count == self._max_runs
]
file_paths_to_exclude = set(file_paths_in_progress).union(
file_paths_recently_processed, files_paths_at_run_limit
)
# Do not convert the following list to set as set does not preserve the order
# and we need to maintain the order of file_paths for `[scheduler] file_parsing_sort_mode`
files_paths_to_queue = [
file_path for file_path in file_paths if file_path not in file_paths_to_exclude
]
for file_path, processor in self._processors.items():
self.log.debug(
"File path %s is still being processed (started: %s)",
processor.file_path,
processor.start_time.isoformat(),
)
self.log.debug("Queuing the following files for processing:\n\t%s", "\n\t".join(files_paths_to_queue))
for file_path in files_paths_to_queue:
if file_path not in self._file_stats:
self._file_stats[file_path] = DagFileStat(
num_dags=0, import_errors=0, last_finish_time=None, last_duration=None, run_count=0
)
self._file_path_queue.extend(files_paths_to_queue)
@provide_session
def _find_zombies(self, session):
"""
Find zombie task instances, which are tasks haven't heartbeated for too long
and update the current zombie list.
"""
now = timezone.utcnow()
if (
not self._last_zombie_query_time
or (now - self._last_zombie_query_time).total_seconds() > self._zombie_query_interval
):
# to avoid circular imports
from airflow.jobs.local_task_job import LocalTaskJob as LJ
self.log.info("Finding 'running' jobs without a recent heartbeat")
TI = airflow.models.TaskInstance
DM = airflow.models.DagModel
limit_dttm = timezone.utcnow() - timedelta(seconds=self._zombie_threshold_secs)
self.log.info("Failing jobs without heartbeat after %s", limit_dttm)
zombies = (
session.query(TI, DM.fileloc)
.join(LJ, TI.job_id == LJ.id)
.join(DM, TI.dag_id == DM.dag_id)
.filter(TI.state == State.RUNNING)
.filter(
or_(
LJ.state != State.RUNNING,
LJ.latest_heartbeat < limit_dttm,
)
)
.all()
)
self._last_zombie_query_time = timezone.utcnow()
for ti, file_loc in zombies:
request = TaskCallbackRequest(
full_filepath=file_loc,
simple_task_instance=SimpleTaskInstance(ti),
msg="Detected as zombie",
)
self.log.info("Detected zombie job: %s", request)
self._add_callback_to_queue(request)
Stats.incr('zombies_killed')
def _kill_timed_out_processors(self):
"""Kill any file processors that timeout to defend against process hangs."""
now = timezone.utcnow()
for file_path, processor in self._processors.items():
duration = now - processor.start_time
if duration > self._processor_timeout:
self.log.error(
"Processor for %s with PID %s started at %s has timed out, killing it.",
file_path,
processor.pid,
processor.start_time.isoformat(),
)
Stats.decr('dag_processing.processes')
Stats.incr('dag_processing.processor_timeouts')
# TODO: Remove after Airflow 2.0
Stats.incr('dag_file_processor_timeouts')
processor.kill()
def max_runs_reached(self):
""":return: whether all file paths have been processed max_runs times"""
if self._max_runs == -1: # Unlimited runs.
return False
for stat in self._file_stats.values():
if stat.run_count < self._max_runs:
return False
if self._num_run < self._max_runs:
return False
return True
def terminate(self):
"""
Stops all running processors
:return: None
"""
for processor in self._processors.values():
Stats.decr('dag_processing.processes')
processor.terminate()
def end(self):
"""
Kill all child processes on exit since we don't want to leave
them as orphaned.
"""
pids_to_kill = self.get_all_pids()
if pids_to_kill:
kill_child_processes_by_pids(pids_to_kill)
def emit_metrics(self):
"""
Emit metrics about dag parsing summary
This is called once every time around the parsing "loop" - i.e. after
all files have been parsed.
"""
parse_time = time.perf_counter() - self._parsing_start_time
Stats.gauge('dag_processing.total_parse_time', parse_time)
Stats.gauge('dagbag_size', sum(stat.num_dags for stat in self._file_stats.values()))
Stats.gauge(
'dag_processing.import_errors', sum(stat.import_errors for stat in self._file_stats.values())
)
# pylint: disable=missing-docstring
@property
def file_paths(self):
return self._file_paths
|
nathanielvarona/airflow
|
airflow/utils/dag_processing.py
|
Python
|
apache-2.0
| 49,728
| 0.002413
|
import socket
import fcntl
import struct
def get_ip_address(ifname):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return socket.inet_ntoa(fcntl.ioctl(
s.fileno(),
0x8915, # SIOCGIFADDR
struct.pack('256s', ifname[:15])
)[20:24])
>>> get_ip_address('lo')
'127.0.0.1'
>>> get_ip_address('eth0')
'38.113.228.130'
|
ActiveState/code
|
recipes/Python/439094_get_IP_address_associated_network_interface/recipe-439094.py
|
Python
|
mit
| 357
| 0.011204
|
# Copyright (c) 2011 Florian Mounier
# Copyright (c) 2012, 2014 Tycho Andersen
# Copyright (c) 2013 Craig Barnes
# Copyright (c) 2014 Sean Vig
# Copyright (c) 2014 Adi Sieker
# Copyright (c) 2014 Sebastien Blot
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import pytest
import libqtile.manager
import libqtile.config
from libqtile import layout, bar, widget
from libqtile.config import Screen
LEFT_ALT = 'mod1'
WINDOWS = 'mod4'
FONTSIZE = 13
CHAM1 = '8AE234'
CHAM3 = '4E9A06'
GRAPH_KW = dict(line_width=1,
graph_color=CHAM3,
fill_color=CHAM3 + '.3',
border_width=1,
border_color=CHAM3
)
# screens look like this
# 600 300
# |-------------|-----|
# | 480| |580
# | A | B |
# |----------|--| |
# | 400|--|-----|
# | C | |400
# |----------| D |
# 500 |--------|
# 400
#
# Notice there is a hole in the middle
# also D goes down below the others
class FakeScreenConfig(object):
auto_fullscreen = True
main = None
groups = [
libqtile.config.Group("a"),
libqtile.config.Group("b"),
libqtile.config.Group("c"),
libqtile.config.Group("d")
]
layouts = [
layout.Max(),
layout.RatioTile(),
layout.Tile(),
]
floating_layout = libqtile.layout.floating.Floating()
keys = []
mouse = []
fake_screens = [
Screen(
bottom=bar.Bar(
[
widget.GroupBox(this_screen_border=CHAM3,
borderwidth=1,
fontsize=FONTSIZE,
padding=1, margin_x=1, margin_y=1),
widget.AGroupBox(),
widget.Prompt(),
widget.Sep(),
widget.WindowName(fontsize=FONTSIZE, margin_x=6),
widget.Sep(),
widget.CPUGraph(**GRAPH_KW),
widget.MemoryGraph(**GRAPH_KW),
widget.SwapGraph(foreground='20C020', **GRAPH_KW),
widget.Sep(),
widget.Systray(),
widget.Sep(),
widget.Clock(format='%H:%M:%S %d.%m.%Y',
fontsize=FONTSIZE, padding=6),
],
24,
background="#555555"
),
left=bar.Gap(16),
right=bar.Gap(20),
x=0, y=0, width=600, height=480
),
Screen(
top=bar.Bar(
[
widget.GroupBox(),
widget.WindowName(),
widget.Clock()
],
30,
),
bottom=bar.Gap(24),
left=bar.Gap(12),
x=600, y=0, width=300, height=580
),
Screen(
top=bar.Bar(
[
widget.GroupBox(),
widget.WindowName(),
widget.Clock()
],
30,
),
bottom=bar.Gap(16),
right=bar.Gap(40),
x=0, y=480, width=500, height=400
),
Screen(
top=bar.Bar(
[
widget.GroupBox(),
widget.WindowName(),
widget.Clock()
],
30,
),
left=bar.Gap(20),
right=bar.Gap(24),
x=500, y=580, width=400, height=400
),
]
screens = fake_screens
xephyr_config = {
"xinerama": False,
"two_screens": False,
"width": 900,
"height": 980
}
fakescreen_config = pytest.mark.parametrize("xephyr, qtile", [(xephyr_config, FakeScreenConfig)], indirect=True)
@fakescreen_config
def test_basic(qtile):
qtile.testWindow("zero")
assert qtile.c.layout.info()["clients"] == ["zero"]
assert qtile.c.screen.info() == {
'y': 0, 'x': 0, 'index': 0, 'width': 600, 'height': 480}
qtile.c.to_screen(1)
qtile.testWindow("one")
assert qtile.c.layout.info()["clients"] == ["one"]
assert qtile.c.screen.info() == {
'y': 0, 'x': 600, 'index': 1, 'width': 300, 'height': 580}
qtile.c.to_screen(2)
qtile.testXeyes()
assert qtile.c.screen.info() == {
'y': 480, 'x': 0, 'index': 2, 'width': 500, 'height': 400}
qtile.c.to_screen(3)
qtile.testXclock()
assert qtile.c.screen.info() == {
'y': 580, 'x': 500, 'index': 3, 'width': 400, 'height': 400}
@fakescreen_config
def test_gaps(qtile):
g = qtile.c.screens()[0]["gaps"]
assert g["bottom"] == (0, 456, 600, 24)
assert g["left"] == (0, 0, 16, 456)
assert g["right"] == (580, 0, 20, 456)
g = qtile.c.screens()[1]["gaps"]
assert g["top"] == (600, 0, 300, 30)
assert g["bottom"] == (600, 556, 300, 24)
assert g["left"] == (600, 30, 12, 526)
g = qtile.c.screens()[2]["gaps"]
assert g["top"] == (0, 480, 500, 30)
assert g["bottom"] == (0, 864, 500, 16)
assert g["right"] == (460, 510, 40, 354)
g = qtile.c.screens()[3]["gaps"]
assert g["top"] == (500, 580, 400, 30)
assert g["left"] == (500, 610, 20, 370)
assert g["right"] == (876, 610, 24, 370)
@fakescreen_config
def test_maximize_with_move_to_screen(qtile):
"""Ensure that maximize respects bars"""
qtile.testXclock()
qtile.c.window.toggle_maximize()
assert qtile.c.window.info()['width'] == 564
assert qtile.c.window.info()['height'] == 456
assert qtile.c.window.info()['x'] == 16
assert qtile.c.window.info()['y'] == 0
assert qtile.c.window.info()['group'] == 'a'
# go to second screen
qtile.c.to_screen(1)
assert qtile.c.screen.info() == {
'y': 0, 'x': 600, 'index': 1, 'width': 300, 'height': 580}
assert qtile.c.group.info()['name'] == 'b'
qtile.c.group['a'].toscreen()
assert qtile.c.window.info()['width'] == 288
assert qtile.c.window.info()['height'] == 526
assert qtile.c.window.info()['x'] == 612
assert qtile.c.window.info()['y'] == 30
assert qtile.c.window.info()['group'] == 'a'
@fakescreen_config
def test_float_first_on_second_screen(qtile):
qtile.c.to_screen(1)
assert qtile.c.screen.info() == {
'y': 0, 'x': 600, 'index': 1, 'width': 300, 'height': 580}
qtile.testXclock()
# I don't know where y=30, x=12 comes from...
assert qtile.c.window.info()['float_info'] == {
'y': 30, 'x': 12, 'width': 164, 'height': 164
}
qtile.c.window.toggle_floating()
assert qtile.c.window.info()['width'] == 164
assert qtile.c.window.info()['height'] == 164
assert qtile.c.window.info()['x'] == 612
assert qtile.c.window.info()['y'] == 30
assert qtile.c.window.info()['group'] == 'b'
assert qtile.c.window.info()['float_info'] == {
'y': 30, 'x': 12, 'width': 164, 'height': 164
}
@fakescreen_config
def test_float_change_screens(qtile):
# add some eyes, and float clock
qtile.testXeyes()
qtile.testXclock()
qtile.c.window.toggle_floating()
assert set(qtile.c.group.info()['windows']) == set(('xeyes', 'xclock'))
assert qtile.c.group.info()['floating_info']['clients'] == ['xclock']
assert qtile.c.window.info()['width'] == 164
assert qtile.c.window.info()['height'] == 164
# 16 is given by the left gap width
assert qtile.c.window.info()['x'] == 16
assert qtile.c.window.info()['y'] == 0
assert qtile.c.window.info()['group'] == 'a'
# put on group b
assert qtile.c.screen.info() == {
'y': 0, 'x': 0, 'index': 0, 'width': 600, 'height': 480}
assert qtile.c.group.info()['name'] == 'a'
qtile.c.to_screen(1)
assert qtile.c.group.info()['name'] == 'b'
assert qtile.c.screen.info() == {
'y': 0, 'x': 600, 'index': 1, 'width': 300, 'height': 580}
qtile.c.group['a'].toscreen()
assert qtile.c.group.info()['name'] == 'a'
assert set(qtile.c.group.info()['windows']) == set(('xeyes', 'xclock'))
assert qtile.c.window.info()['name'] == 'xclock'
# width/height unchanged
assert qtile.c.window.info()['width'] == 164
assert qtile.c.window.info()['height'] == 164
# x is shifted by 600, y is shifted by 0
assert qtile.c.window.info()['x'] == 616
assert qtile.c.window.info()['y'] == 0
assert qtile.c.window.info()['group'] == 'a'
assert qtile.c.group.info()['floating_info']['clients'] == ['xclock']
# move to screen 3
qtile.c.to_screen(2)
assert qtile.c.screen.info() == {
'y': 480, 'x': 0, 'index': 2, 'width': 500, 'height': 400}
assert qtile.c.group.info()['name'] == 'c'
qtile.c.group['a'].toscreen()
assert qtile.c.group.info()['name'] == 'a'
assert set(qtile.c.group.info()['windows']) == set(('xeyes', 'xclock'))
assert qtile.c.window.info()['name'] == 'xclock'
# width/height unchanged
assert qtile.c.window.info()['width'] == 164
assert qtile.c.window.info()['height'] == 164
# x is shifted by 0, y is shifted by 480
assert qtile.c.window.info()['x'] == 16
assert qtile.c.window.info()['y'] == 480
# now screen 4 for fun
qtile.c.to_screen(3)
assert qtile.c.screen.info() == {
'y': 580, 'x': 500, 'index': 3, 'width': 400, 'height': 400}
assert qtile.c.group.info()['name'] == 'd'
qtile.c.group['a'].toscreen()
assert qtile.c.group.info()['name'] == 'a'
assert set(qtile.c.group.info()['windows']) == set(('xeyes', 'xclock'))
assert qtile.c.window.info()['name'] == 'xclock'
# width/height unchanged
assert qtile.c.window.info()['width'] == 164
assert qtile.c.window.info()['height'] == 164
# x is shifted by 500, y is shifted by 580
assert qtile.c.window.info()['x'] == 516
assert qtile.c.window.info()['y'] == 580
# and back to one
qtile.c.to_screen(0)
assert qtile.c.screen.info() == {
'y': 0, 'x': 0, 'index': 0, 'width': 600, 'height': 480}
assert qtile.c.group.info()['name'] == 'b'
qtile.c.group['a'].toscreen()
assert qtile.c.group.info()['name'] == 'a'
assert set(qtile.c.group.info()['windows']) == set(('xeyes', 'xclock'))
assert qtile.c.window.info()['name'] == 'xclock'
# back to the original location
assert qtile.c.window.info()['width'] == 164
assert qtile.c.window.info()['height'] == 164
assert qtile.c.window.info()['x'] == 16
assert qtile.c.window.info()['y'] == 0
@fakescreen_config
def test_float_outside_edges(qtile):
qtile.testXclock()
qtile.c.window.toggle_floating()
assert qtile.c.window.info()['width'] == 164
assert qtile.c.window.info()['height'] == 164
# 16 is given by the left gap width
assert qtile.c.window.info()['x'] == 16
assert qtile.c.window.info()['y'] == 0
# empty because window is floating
assert qtile.c.layout.info() == {
'clients': [], 'group': 'a', 'name': 'max'}
# move left, but some still on screen 0
qtile.c.window.move_floating(-30, 20, 42, 42)
assert qtile.c.window.info()['width'] == 164
assert qtile.c.window.info()['height'] == 164
assert qtile.c.window.info()['x'] == -14
assert qtile.c.window.info()['y'] == 20
assert qtile.c.window.info()['group'] == 'a'
# move up, but some still on screen 0
qtile.c.window.set_position_floating(-10, -20, 42, 42)
assert qtile.c.window.info()['width'] == 164
assert qtile.c.window.info()['height'] == 164
assert qtile.c.window.info()['x'] == -10
assert qtile.c.window.info()['y'] == -20
assert qtile.c.window.info()['group'] == 'a'
# move above a
qtile.c.window.set_position_floating(50, -20, 42, 42)
assert qtile.c.window.info()['width'] == 164
assert qtile.c.window.info()['height'] == 164
assert qtile.c.window.info()['x'] == 50
assert qtile.c.window.info()['y'] == -20
assert qtile.c.window.info()['group'] == 'a'
# move down so still left, but next to screen c
qtile.c.window.set_position_floating(-10, 520, 42, 42)
assert qtile.c.window.info()['height'] == 164
assert qtile.c.window.info()['x'] == -10
assert qtile.c.window.info()['y'] == 520
assert qtile.c.window.info()['group'] == 'c'
# move above b
qtile.c.window.set_position_floating(700, -10, 42, 42)
assert qtile.c.window.info()['width'] == 164
assert qtile.c.window.info()['height'] == 164
assert qtile.c.window.info()['x'] == 700
assert qtile.c.window.info()['y'] == -10
assert qtile.c.window.info()['group'] == 'b'
@fakescreen_config
def test_hammer_tile(qtile):
# change to tile layout
qtile.c.next_layout()
qtile.c.next_layout()
for i in range(7):
qtile.testXclock()
for i in range(30):
old_group = (i + 1) % 4
if old_group == 0:
name = 'a'
elif old_group == 1:
name = 'b'
elif old_group == 2:
name = 'c'
elif old_group == 3:
name = 'd'
qtile.c.to_screen((i + 1) % 4)
qtile.c.group['a'].toscreen()
assert qtile.c.group['a'].info()['windows'] == [
'xclock', 'xclock', 'xclock', 'xclock',
'xclock', 'xclock', 'xclock']
@fakescreen_config
def test_hammer_ratio_tile(qtile):
# change to ratio tile layout
qtile.c.next_layout()
for i in range(7):
qtile.testXclock()
for i in range(30):
old_group = (i + 1) % 4
if old_group == 0:
name = 'a'
elif old_group == 1:
name = 'b'
elif old_group == 2:
name = 'c'
elif old_group == 3:
name = 'd'
qtile.c.to_screen((i + 1) % 4)
qtile.c.group['a'].toscreen()
assert qtile.c.group['a'].info()['windows'] == [
'xclock', 'xclock', 'xclock', 'xclock',
'xclock', 'xclock', 'xclock']
@fakescreen_config
def test_ratio_to_fourth_screen(qtile):
# change to ratio tile layout
qtile.c.next_layout()
for i in range(7):
qtile.testXclock()
qtile.c.to_screen(1)
qtile.c.group['a'].toscreen()
assert qtile.c.group['a'].info()['windows'] == [
'xclock', 'xclock', 'xclock', 'xclock',
'xclock', 'xclock', 'xclock']
# now move to 4th, fails...
qtile.c.to_screen(3)
qtile.c.group['a'].toscreen()
assert qtile.c.group['a'].info()['windows'] == [
'xclock', 'xclock', 'xclock', 'xclock',
'xclock', 'xclock', 'xclock']
|
de-vri-es/qtile
|
test/test_fakescreen.py
|
Python
|
mit
| 15,532
| 0.000064
|
# Copyright (C) 2015 https://github.com/thof
#
# This file is part of decapromolist.
#
# decapromolist is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import json
import urllib2
from lxml import html
from utils import Utils
class GetSubcategories:
def getCategories2(self):
dataCat = []
headers = {'User-agent': 'Mozilla/5.0'}
req = urllib2.Request('https://www.decathlon.pl/pl/menu-load-sub-categories?categoryId=394904', None, headers)
req = urllib2.urlopen(req)
content = req.read().decode('UTF-8')
response = html.fromstring(content)
for cat in response.xpath('//a'):
url = cat.attrib['href']
start = url.find('-')+1
subId = url[start:url.find('-', start)]
# subId = cat.attrib['data-secondary-category-id']
subName = cat.text
data = {'subId': int(subId), 'url': Utils.getConfig()['siteURL'] + url, 'subName': subName}
dataCat.append(data)
return dataCat
def getCategories(self):
categories = []
catUrl = []
content = urllib2.urlopen(Utils.getConfig()['siteURL']).read()
response = html.fromstring(content)
for cat in response.xpath('//li/@primarycategoryid'):
if cat not in categories:
categories.append(cat)
for cat in categories:
url = "{}/pl/getSubNavigationMenu?primaryCategoryId={}".format(Utils.getConfig()['siteURL'], cat)
catUrl.append(url)
return catUrl
def getSubcategories(self, catUrl):
dataCat = []
for url in catUrl:
content = urllib2.urlopen(url).read()
jsonData = json.loads(content)
for cat in jsonData['category']['categories']:
for subcat in cat['categories']:
data = {'id': int(cat['id']), 'name': cat['label'], 'subId': int(subcat['id']),
'subName': subcat['label'], 'url': Utils.getConfig()['siteURL'] + subcat['uri']}
dataCat.append(data)
return dataCat
@staticmethod
def getThirdLevelCat(catUrl):
dataCat = []
for url in catUrl:
content = urllib2.urlopen(url).read()
jsonData = json.loads(content)
for cat in jsonData['category']['categories']:
data = {'id': int(jsonData['category']['id']), 'name': jsonData['category']['label'],
'subId': int(cat['id']), 'subName': cat['label']}
if cat['uri'].find(Utils.getConfig()['siteURL']) == -1:
data['url'] = Utils.getConfig()['siteURL'] + cat['uri']
else:
data['url'] = cat['uri']
data['subId'] = int(cat['uri'][cat['uri'].find("C-")+2:cat['uri'].find("-", cat['uri'].find("C-")+2)])
dataCat.append(data)
return dataCat
def saveSubcategories(self, dataCat):
Utils.renameFile(Utils.getConfig()['subcatFile'])
Utils.saveJsonFile(Utils.getConfig()['subcatFile'], dataCat)
if __name__ == "__main__":
proc = GetSubcategories()
# catUrl = proc.getCategories()
# dataCat = proc.getSubcategories(catUrl)
dataCat = proc.getCategories2()
proc.saveSubcategories(dataCat)
print "Done"
|
thof/decapromolist
|
src/get_subcategories.py
|
Python
|
gpl-3.0
| 3,880
| 0.001804
|
from django.db.backends import BaseDatabaseIntrospection
class DatabaseIntrospection(BaseDatabaseIntrospection):
def get_table_list(self, cursor):
"Returns a list of table names in the current database."
cursor.execute("SHOW TABLES")
return [row[0] for row in cursor.fetchall()]
|
ikeikeikeike/django-impala-backend
|
impala/introspection.py
|
Python
|
mit
| 310
| 0
|
"""
2x2 plotting analysis for 2 datasets pluggin
| ------------- | ------------- |
| contours from | pcolor from |
| both datasets | dataset 1 |
| ------------- | ------------- |
| pcolor diff | pcolor from |
| both datasets | dataset 2 |
| ------------- | ------------- |
colorbar location = bottom
"""
class SampleException(Exception):
pass
_NDATASETS = 2
_NPANNELS = 4
def run(cases, compares, domain, **kwargs):
"""plugin run function"""
case_names = cases.keys()
compare_names = compares.keys()
dsets = cases.values()+compares.values()
if len(dsets) != _NDATASETS:
raise SampleException('Incorrect number of datasets provided')
# get_monthly_means(*dsets)
# get_seasonal_means()
# get_annual_means()
# get_full_means()
return
def __plot():
return
|
jhamman/rasmlib
|
rasmlib/analysis/plugins/sample.py
|
Python
|
gpl-3.0
| 865
| 0.001156
|
# import logging
from ast.visit import visit as v
from ast.node import Node
from ast.body.methoddeclaration import MethodDeclaration
from ast.stmt.minrepeatstmt import MinrepeatStmt
class Desugar(object):
def __init__(self):
self._cur_mtd = None
@v.on("node")
def visit(self, node):
"""
This is the generic method to initialize the dynamic dispatcher
"""
@v.when(Node)
def visit(self, node):
for c in node.childrenNodes: c.accept(self)
@v.when(MethodDeclaration)
def visit(self, node):
self._cur_mtd = node
for c in node.childrenNodes: c.accept(self)
@v.when(MinrepeatStmt)
def visit(self, node):
raise NotImplementedError
# Old impl
# @v.when(Statement)
# def visit(self, node):
# if node.kind == C.S.MINREPEAT:
# b = '\n'.join(map(str, node.b))
# body = u""
# for i in xrange(9): # TODO: parameterize
# body += u"""
# if (??) {{ {} }}
# """.format(b)
# logging.debug(
# "desugaring minrepeat @ {}".format(self._cur_mtd.name))
# return to_statements(self._cur_mtd, body)
# return [node]
|
plum-umd/java-sketch
|
java_sk/rewrite/desugar.py
|
Python
|
mit
| 1,242
| 0.002415
|
# -*- coding: utf-8 -*-
# Simple script to test sending UTF8 text with the GrowlNotifier class
import logging
logging.basicConfig(level=logging.DEBUG)
from gntp.notifier import GrowlNotifier
import platform
growl = GrowlNotifier(notifications=['Testing'],password='password',hostname='ayu')
growl.subscribe(platform.node(),platform.node(),12345)
|
kfdm/gntp
|
test/subscribe.py
|
Python
|
mit
| 347
| 0.020173
|
from __future__ import absolute_import
from unittest import TestCase, skip
from ..goodman_ccd import get_args, MainApp
class MainAppTest(TestCase):
def setUp(self):
self.main_app = MainApp()
def test___call__(self):
self.assertRaises(SystemExit, self.main_app)
def test___call___show_version(self):
arguments = ['--version']
args = get_args(arguments=arguments)
self.assertRaises(SystemExit, self.main_app, args)
|
soar-telescope/goodman
|
goodman_pipeline/images/tests/test_goodman_ccd.py
|
Python
|
bsd-3-clause
| 471
| 0
|
#!flask/bin/python
from gb import app
app.run(debug=True)
|
mbiokyle29/geno-browser
|
runserver.py
|
Python
|
mit
| 58
| 0
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2016, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: postgresql_schema
short_description: Add or remove PostgreSQL schema from a remote host
description:
- Add or remove PostgreSQL schema from a remote host.
version_added: "2.3"
options:
name:
description:
- Name of the schema to add or remove.
required: true
database:
description:
- Name of the database to connect to.
default: postgres
login_user:
description:
- The username used to authenticate with.
login_password:
description:
- The password used to authenticate with.
login_host:
description:
- Host running the database.
default: localhost
login_unix_socket:
description:
- Path to a Unix domain socket for local connections.
owner:
description:
- Name of the role to set as owner of the schema.
port:
description:
- Database port to connect to.
default: 5432
session_role:
version_added: "2.8"
description: |
Switch to session_role after connecting. The specified session_role must be a role that the current login_user is a member of.
Permissions checking for SQL commands is carried out as though the session_role were the one that had logged in originally.
state:
description:
- The schema state.
default: present
choices: [ "present", "absent" ]
cascade_drop:
description:
- Drop schema with CASCADE to remove child objects
type: bool
default: false
version_added: '2.8'
ssl_mode:
description:
- Determines whether or with what priority a secure SSL TCP/IP connection
will be negotiated with the server.
- See U(https://www.postgresql.org/docs/current/static/libpq-ssl.html) for
more information on the modes.
- Default of C(prefer) matches libpq default.
default: prefer
choices: ["disable", "allow", "prefer", "require", "verify-ca", "verify-full"]
version_added: '2.8'
ssl_rootcert:
description:
- Specifies the name of a file containing SSL certificate authority (CA)
certificate(s). If the file exists, the server's certificate will be
verified to be signed by one of these authorities.
version_added: '2.8'
notes:
- This module uses I(psycopg2), a Python PostgreSQL database adapter. You must ensure that psycopg2 is installed on
the host before using this module. If the remote host is the PostgreSQL server (which is the default case), then PostgreSQL must also be installed
on the remote host. For Ubuntu-based systems, install the C(postgresql), C(libpq-dev), and C(python-psycopg2) packages on the remote host before
using this module.
requirements: [ psycopg2 ]
author:
- Flavien Chantelot (@Dorn-) <contact@flavien.io>
- Thomas O'Donnell (@andytom)
'''
EXAMPLES = '''
# Create a new schema with name "acme"
- postgresql_schema:
name: acme
# Create a new schema "acme" with a user "bob" who will own it
- postgresql_schema:
name: acme
owner: bob
# Drop schema "acme" with cascade
- postgresql_schema:
name: acme
ensure: absent
cascade_drop: yes
'''
RETURN = '''
schema:
description: Name of the schema
returned: success, changed
type: str
sample: "acme"
'''
import traceback
PSYCOPG2_IMP_ERR = None
try:
import psycopg2
import psycopg2.extras
except ImportError:
PSYCOPG2_IMP_ERR = traceback.format_exc()
postgresqldb_found = False
else:
postgresqldb_found = True
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils.database import SQLParseError, pg_quote_identifier
from ansible.module_utils.six import iteritems
from ansible.module_utils._text import to_native
class NotSupportedError(Exception):
pass
# ===========================================
# PostgreSQL module specific support methods.
#
def set_owner(cursor, schema, owner):
query = "ALTER SCHEMA %s OWNER TO %s" % (
pg_quote_identifier(schema, 'schema'),
pg_quote_identifier(owner, 'role'))
cursor.execute(query)
return True
def get_schema_info(cursor, schema):
query = """
SELECT schema_owner AS owner
FROM information_schema.schemata
WHERE schema_name = %(schema)s
"""
cursor.execute(query, {'schema': schema})
return cursor.fetchone()
def schema_exists(cursor, schema):
query = "SELECT schema_name FROM information_schema.schemata WHERE schema_name = %(schema)s"
cursor.execute(query, {'schema': schema})
return cursor.rowcount == 1
def schema_delete(cursor, schema, cascade):
if schema_exists(cursor, schema):
query = "DROP SCHEMA %s" % pg_quote_identifier(schema, 'schema')
if cascade:
query += " CASCADE"
cursor.execute(query)
return True
else:
return False
def schema_create(cursor, schema, owner):
if not schema_exists(cursor, schema):
query_fragments = ['CREATE SCHEMA %s' % pg_quote_identifier(schema, 'schema')]
if owner:
query_fragments.append('AUTHORIZATION %s' % pg_quote_identifier(owner, 'role'))
query = ' '.join(query_fragments)
cursor.execute(query)
return True
else:
schema_info = get_schema_info(cursor, schema)
if owner and owner != schema_info['owner']:
return set_owner(cursor, schema, owner)
else:
return False
def schema_matches(cursor, schema, owner):
if not schema_exists(cursor, schema):
return False
else:
schema_info = get_schema_info(cursor, schema)
if owner and owner != schema_info['owner']:
return False
else:
return True
# ===========================================
# Module execution.
#
def main():
module = AnsibleModule(
argument_spec=dict(
login_user=dict(default="postgres"),
login_password=dict(default="", no_log=True),
login_host=dict(default=""),
login_unix_socket=dict(default=""),
port=dict(default="5432"),
schema=dict(required=True, aliases=['name']),
owner=dict(default=""),
database=dict(default="postgres"),
cascade_drop=dict(type="bool", default=False),
state=dict(default="present", choices=["absent", "present"]),
ssl_mode=dict(default='prefer', choices=[
'disable', 'allow', 'prefer', 'require', 'verify-ca', 'verify-full']),
ssl_rootcert=dict(default=None),
session_role=dict(),
),
supports_check_mode=True
)
if not postgresqldb_found:
module.fail_json(msg=missing_required_lib('psycopg2'), exception=PSYCOPG2_IMP_ERR)
schema = module.params["schema"]
owner = module.params["owner"]
state = module.params["state"]
sslrootcert = module.params["ssl_rootcert"]
cascade_drop = module.params["cascade_drop"]
session_role = module.params["session_role"]
changed = False
# To use defaults values, keyword arguments must be absent, so
# check which values are empty and don't include in the **kw
# dictionary
params_map = {
"login_host": "host",
"login_user": "user",
"login_password": "password",
"port": "port",
"database": "database",
"ssl_mode": "sslmode",
"ssl_rootcert": "sslrootcert"
}
kw = dict((params_map[k], v) for (k, v) in iteritems(module.params)
if k in params_map and v != "" and v is not None)
# If a login_unix_socket is specified, incorporate it here.
is_localhost = "host" not in kw or kw["host"] == "" or kw["host"] == "localhost"
if is_localhost and module.params["login_unix_socket"] != "":
kw["host"] = module.params["login_unix_socket"]
if psycopg2.__version__ < '2.4.3' and sslrootcert is not None:
module.fail_json(
msg='psycopg2 must be at least 2.4.3 in order to user the ssl_rootcert parameter')
try:
db_connection = psycopg2.connect(**kw)
# Enable autocommit so we can create databases
if psycopg2.__version__ >= '2.4.2':
db_connection.autocommit = True
else:
db_connection.set_isolation_level(psycopg2
.extensions
.ISOLATION_LEVEL_AUTOCOMMIT)
cursor = db_connection.cursor(
cursor_factory=psycopg2.extras.DictCursor)
except TypeError as e:
if 'sslrootcert' in e.args[0]:
module.fail_json(
msg='Postgresql server must be at least version 8.4 to support sslrootcert')
module.fail_json(msg="unable to connect to database: %s" % to_native(e), exception=traceback.format_exc())
except Exception as e:
module.fail_json(msg="unable to connect to database: %s" % to_native(e), exception=traceback.format_exc())
if session_role:
try:
cursor.execute('SET ROLE %s' % pg_quote_identifier(session_role, 'role'))
except Exception as e:
module.fail_json(msg="Could not switch role: %s" % to_native(e), exception=traceback.format_exc())
try:
if module.check_mode:
if state == "absent":
changed = not schema_exists(cursor, schema)
elif state == "present":
changed = not schema_matches(cursor, schema, owner)
module.exit_json(changed=changed, schema=schema)
if state == "absent":
try:
changed = schema_delete(cursor, schema, cascade_drop)
except SQLParseError as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
elif state == "present":
try:
changed = schema_create(cursor, schema, owner)
except SQLParseError as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
except NotSupportedError as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
except SystemExit:
# Avoid catching this on Python 2.4
raise
except Exception as e:
module.fail_json(msg="Database query failed: %s" % to_native(e), exception=traceback.format_exc())
module.exit_json(changed=changed, schema=schema)
if __name__ == '__main__':
main()
|
Jorge-Rodriguez/ansible
|
lib/ansible/modules/database/postgresql/postgresql_schema.py
|
Python
|
gpl-3.0
| 10,842
| 0.002583
|
#####################################################################################
#
# Copyright (c) Crossbar.io Technologies GmbH
#
# Unless a separate license agreement exists between you and Crossbar.io GmbH (e.g.
# you have purchased a commercial license), the license terms below apply.
#
# Should you enter into a separate license agreement after having received a copy of
# this software, then the terms of such license agreement replace the terms below at
# the time at which such license agreement becomes effective.
#
# In case a separate license agreement ends, and such agreement ends without being
# replaced by another separate license agreement, the license terms below apply
# from the time at which said agreement ends.
#
# LICENSE TERMS
#
# This program is free software: you can redistribute it and/or modify it under the
# terms of the GNU Affero General Public License, version 3, as published by the
# Free Software Foundation. This program is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See the GNU Affero General Public License Version 3 for more details.
#
# You should have received a copy of the GNU Affero General Public license along
# with this program. If not, see <http://www.gnu.org/licenses/agpl-3.0.en.html>.
#
#####################################################################################
from __future__ import absolute_import, division, print_function
import json
import os
import sys
from six import PY3
from twisted.internet.selectreactor import SelectReactor
from twisted.internet.task import LoopingCall
from crossbar.controller import cli
from .test_cli import CLITestBase
# Turn this to `True` to print the stdout/stderr of the Crossbars spawned
DEBUG = False
def make_lc(self, reactor, func):
if DEBUG:
self.stdout_length = 0
self.stderr_length = 0
def _(lc, reactor):
if DEBUG:
stdout = self.stdout.getvalue()
stderr = self.stderr.getvalue()
if self.stdout.getvalue()[self.stdout_length:]:
print(self.stdout.getvalue()[self.stdout_length:],
file=sys.__stdout__)
if self.stderr.getvalue()[self.stderr_length:]:
print(self.stderr.getvalue()[self.stderr_length:],
file=sys.__stderr__)
self.stdout_length = len(stdout)
self.stderr_length = len(stderr)
return func(lc, reactor)
lc = LoopingCall(_)
lc.a = (lc, reactor)
lc.clock = reactor
lc.start(0.1)
return lc
class ContainerRunningTests(CLITestBase):
def setUp(self):
CLITestBase.setUp(self)
# Set up the configuration directories
self.cbdir = os.path.abspath(self.mktemp())
os.mkdir(self.cbdir)
self.config = os.path.abspath(os.path.join(self.cbdir, "config.json"))
self.code_location = os.path.abspath(self.mktemp())
os.mkdir(self.code_location)
def _start_run(self, config, app, stdout_expected, stderr_expected,
end_on):
with open(self.config, "wb") as f:
f.write(json.dumps(config, ensure_ascii=False).encode('utf8'))
with open(self.code_location + "/myapp.py", "w") as f:
f.write(app)
reactor = SelectReactor()
make_lc(self, reactor, end_on)
# In case it hard-locks
reactor.callLater(self._subprocess_timeout, reactor.stop)
cli.run("crossbar",
["start",
"--cbdir={}".format(self.cbdir),
"--logformat=syslogd"],
reactor=reactor)
out = self.stdout.getvalue()
err = self.stderr.getvalue()
for i in stdout_expected:
if i not in out:
self.fail(u"Error: '{}' not in:\n{}".format(i, out))
for i in stderr_expected:
if i not in err:
self.fail(u"Error: '{}' not in:\n{}".format(i, err))
def test_start_run(self):
"""
A basic start, that enters the reactor.
"""
expected_stdout = [
"Entering reactor event loop", "Loaded the component!"
]
expected_stderr = []
def _check(lc, reactor):
if "Loaded the component!" in self.stdout.getvalue():
lc.stop()
try:
reactor.stop()
except:
pass
config = {
"controller": {
},
"workers": [
{
"type": "router",
"options": {
"pythonpath": ["."]
},
"realms": [
{
"name": "realm1",
"roles": [
{
"name": "anonymous",
"permissions": [
{
"uri": "*",
"publish": True,
"subscribe": True,
"call": True,
"register": True
}
]
}
]
}
],
"transports": [
{
"type": "web",
"endpoint": {
"type": "tcp",
"port": 8080
},
"paths": {
"/": {
"directory": ".",
"type": "static"
},
"ws": {
"type": "websocket"
}
}
}
]
},
{
"type": "container",
"options": {
"pythonpath": [self.code_location]
},
"components": [
{
"type": "class",
"classname": "myapp.MySession",
"realm": "realm1",
"transport": {
"type": "websocket",
"endpoint": {
"type": "tcp",
"host": "127.0.0.1",
"port": 8080
},
"url": "ws://127.0.0.1:8080/ws"
}
}
]
}
]
}
myapp = """#!/usr/bin/env python
from twisted.logger import Logger
from autobahn.twisted.wamp import ApplicationSession
from autobahn.wamp.exception import ApplicationError
class MySession(ApplicationSession):
log = Logger()
def onJoin(self, details):
self.log.info("Loaded the component!")
"""
self._start_run(config, myapp, expected_stdout, expected_stderr,
_check)
def test_start_run_guest(self):
"""
A basic start of a guest.
"""
expected_stdout = [
"Entering reactor event loop", "Loaded the component!"
]
expected_stderr = []
def _check(lc, reactor):
if "Loaded the component!" in self.stdout.getvalue():
lc.stop()
try:
reactor.stop()
except:
pass
config = {
"controller": {
},
"workers": [
{
"type": "router",
"options": {
"pythonpath": ["."]
},
"realms": [
{
"name": "realm1",
"roles": [
{
"name": "anonymous",
"permissions": [
{
"uri": "*",
"publish": True,
"subscribe": True,
"call": True,
"register": True
}
]
}
]
}
],
"transports": [
{
"type": "web",
"endpoint": {
"type": "tcp",
"port": 8080
},
"paths": {
"/": {
"directory": ".",
"type": "static"
},
"ws": {
"type": "websocket"
}
}
}
]
},
{
"type": "guest",
"executable": sys.executable,
"arguments": [os.path.join(self.code_location, "myapp.py")]
}
]
}
myapp = """#!/usr/bin/env python
print("Loaded the component!")
"""
self._start_run(config, myapp, expected_stdout, expected_stderr,
_check)
def test_start_utf8_logging(self):
"""
Logging things that are UTF8 but not Unicode should work fine.
"""
expected_stdout = [
"Entering reactor event loop", u"\u2603"
]
expected_stderr = []
def _check(lc, reactor):
if u"\u2603" in self.stdout.getvalue():
lc.stop()
try:
reactor.stop()
except:
pass
config = {
"controller": {
},
"workers": [
{
"type": "router",
"options": {
"pythonpath": ["."]
},
"realms": [
{
"name": "realm1",
"roles": [
{
"name": "anonymous",
"permissions": [
{
"uri": "*",
"publish": True,
"subscribe": True,
"call": True,
"register": True
}
]
}
]
}
],
"transports": [
{
"type": "web",
"endpoint": {
"type": "tcp",
"port": 8080
},
"paths": {
"/": {
"directory": ".",
"type": "static"
},
"ws": {
"type": "websocket"
}
}
}
]
},
{
"type": "container",
"options": {
"pythonpath": [self.code_location]
},
"components": [
{
"type": "class",
"classname": "myapp.MySession",
"realm": "realm1",
"transport": {
"type": "websocket",
"endpoint": {
"type": "tcp",
"host": "127.0.0.1",
"port": 8080
},
"url": "ws://127.0.0.1:8080/ws"
}
}
]
}
]
}
myapp = """#!/usr/bin/env python
from twisted.logger import Logger
from autobahn.twisted.wamp import ApplicationSession
from autobahn.wamp.exception import ApplicationError
class MySession(ApplicationSession):
log = Logger()
def onJoin(self, details):
self.log.info(u"\\u2603")
"""
self._start_run(config, myapp, expected_stdout, expected_stderr,
_check)
def test_run_exception_utf8(self):
"""
Raising an ApplicationError with Unicode will raise that error through
to the caller.
"""
config = {
"workers": [
{
"type": "router",
"realms": [
{
"name": "realm1",
"roles": [
{
"name": "anonymous",
"permissions": [
{
"uri": "*",
"publish": True,
"subscribe": True,
"call": True,
"register": True
}
]
}
]
}
],
"transports": [
{
"type": "web",
"endpoint": {
"type": "tcp",
"port": 8080
},
"paths": {
"/": {
"type": "static",
"directory": ".."
},
"ws": {
"type": "websocket"
}
}
}
]
},
{
"type": "container",
"options": {
"pythonpath": [self.code_location]
},
"components": [
{
"type": "class",
"classname": "myapp.MySession",
"realm": "realm1",
"transport": {
"type": "websocket",
"endpoint": {
"type": "tcp",
"host": "127.0.0.1",
"port": 8080
},
"url": "ws://127.0.0.1:8080/ws"
}
}
]
}
]
}
myapp = """from __future__ import absolute_import, print_function
from twisted.logger import Logger
from autobahn.twisted.wamp import ApplicationSession
from autobahn.wamp.exception import ApplicationError
from twisted.internet.defer import inlineCallbacks
class MySession(ApplicationSession):
log = Logger()
@inlineCallbacks
def onJoin(self, details):
def _err():
raise ApplicationError(u"com.example.error.form_error", u"\\u2603")
e = yield self.register(_err, u'com.example.err')
try:
yield self.call(u'com.example.err')
except ApplicationError as e:
assert e.args[0] == u"\\u2603"
print("Caught error:", e)
except:
print('other err:', e)
self.log.info("Loaded the component")
"""
if PY3:
expected_stdout = ["Loaded the component", "\u2603", "Caught error:"]
else:
expected_stdout = ["Loaded the component", "\\u2603", "Caught error:"]
expected_stderr = []
def _check(lc, reactor):
if "Loaded the component" in self.stdout.getvalue():
lc.stop()
try:
reactor.stop()
except:
pass
self._start_run(config, myapp, expected_stdout, expected_stderr,
_check)
def test_failure1(self):
config = {
"workers": [
{
"type": "router",
"realms": [
{
"name": "realm1",
"roles": [
{
"name": "anonymous",
"permissions": [
{
"uri": "*",
"publish": True,
"subscribe": True,
"call": True,
"register": True
}
]
}
]
}
],
"transports": [
{
"type": "web",
"endpoint": {
"type": "tcp",
"port": 8080
},
"paths": {
"/": {
"type": "static",
"directory": ".."
},
"ws": {
"type": "websocket"
}
}
}
]
},
{
"type": "container",
"components": [
{
"type": "class",
"classname": "myapp.MySession",
"realm": "realm1",
"transport": {
"type": "websocket",
"endpoint": {
"type": "tcp",
"host": "127.0.0.1",
"port": 8080
},
"url": "ws://127.0.0.1:8080/ws"
}
}
]
}
]
}
myapp = """from twisted.logger import Logger
from autobahn.twisted.wamp import ApplicationSession
class MySession(ApplicationSession):
log = Logger()
def __init__(self, config):
self.log.info("MySession.__init__()")
ApplicationSession.__init__(self, config)
def onJoin(self, details):
self.log.info("MySession.onJoin()")
"""
expected_stdout = []
expected_stderr = ["No module named"]
def _check(_1, _2):
pass
self._start_run(config, myapp, expected_stdout, expected_stderr,
_check)
def test_failure2(self):
config = {
"workers": [
{
"type": "router",
"realms": [
{
"name": "realm1",
"roles": [
{
"name": "anonymous",
"permissions": [
{
"uri": "*",
"publish": True,
"subscribe": True,
"call": True,
"register": True
}
]
}
]
}
],
"transports": [
{
"type": "web",
"endpoint": {
"type": "tcp",
"port": 8080
},
"paths": {
"/": {
"type": "static",
"directory": ".."
},
"ws": {
"type": "websocket"
}
}
}
]
},
{
"type": "container",
"options": {
"pythonpath": [self.code_location]
},
"components": [
{
"type": "class",
"classname": "myapp.MySession2",
"realm": "realm1",
"transport": {
"type": "websocket",
"endpoint": {
"type": "tcp",
"host": "127.0.0.1",
"port": 8080
},
"url": "ws://127.0.0.1:8080/ws"
}
}
]
}
]
}
myapp = """
from twisted.logger import Logger
from autobahn.twisted.wamp import ApplicationSession
class MySession(ApplicationSession):
log = Logger()
def __init__(self, config):
self.log.info("MySession.__init__()")
ApplicationSession.__init__(self, config)
def onJoin(self, details):
self.log.info("MySession.onJoin()")
"""
def _check(_1, _2):
pass
expected_stdout = []
if sys.version_info >= (3, 5):
expected_stderr = ["module 'myapp' has no attribute 'MySession2'"]
else:
expected_stderr = ["'module' object has no attribute 'MySession2'"]
self._start_run(config, myapp, expected_stdout, expected_stderr,
_check)
def test_failure3(self):
config = {
"workers": [
{
"type": "router",
"realms": [
{
"name": "realm1",
"roles": [
{
"name": "anonymous",
"permissions": [
{
"uri": "*",
"publish": True,
"subscribe": True,
"call": True,
"register": True
}
]
}
]
}
],
"transports": [
{
"type": "web",
"endpoint": {
"type": "tcp",
"port": 8080
},
"paths": {
"/": {
"type": "static",
"directory": ".."
},
"ws": {
"type": "websocket"
}
}
}
]
},
{
"type": "container",
"options": {
"pythonpath": [self.code_location]
},
"components": [
{
"type": "class",
"classname": "myapp.MySession",
"realm": "realm1",
"transport": {
"type": "websocket",
"endpoint": {
"type": "tcp",
"host": "127.0.0.1",
"port": 8080
},
"url": "ws://127.0.0.1:8080/ws"
}
}
]
}
]
}
myapp = """
from twisted.logger import Logger
from autobahn.twisted.wamp import ApplicationSession
class MySession(ApplicationSession):
log = Logger()
def __init__(self, config):
a = 1 / 0
self.log.info("MySession.__init__()")
ApplicationSession.__init__(self, config)
def onJoin(self, details):
self.log.info("MySession.onJoin()")
"""
def _check(_1, _2):
pass
expected_stdout = []
expected_stderr = ["Component instantiation failed"]
if PY3:
expected_stderr.append("division by zero")
else:
expected_stderr.append("integer division")
expected_stderr.append("by zero")
self._start_run(config, myapp, expected_stdout, expected_stderr,
_check)
def test_failure4(self):
config = {
"workers": [
{
"type": "router",
"realms": [
{
"name": "realm1",
"roles": [
{
"name": "anonymous",
"permissions": [
{
"uri": "*",
"publish": True,
"subscribe": True,
"call": True,
"register": True
}
]
}
]
}
],
"transports": [
{
"type": "web",
"endpoint": {
"type": "tcp",
"port": 8080
},
"paths": {
"/": {
"type": "static",
"directory": ".."
},
"ws": {
"type": "websocket"
}
}
}
]
},
{
"type": "container",
"options": {
"pythonpath": [self.code_location]
},
"components": [
{
"type": "class",
"classname": "myapp.MySession",
"realm": "realm1",
"transport": {
"type": "websocket",
"endpoint": {
"type": "tcp",
"host": "127.0.0.1",
"port": 8080
},
"url": "ws://127.0.0.1:8080/ws"
}
}
]
}
]
}
myapp = """
from twisted.logger import Logger
from autobahn.twisted.wamp import ApplicationSession
class MySession(ApplicationSession):
log = Logger()
def __init__(self, config):
self.log.info("MySession.__init__()")
ApplicationSession.__init__(self, config)
def onJoin(self, details):
self.log.info("MySession.onJoin()")
a = 1 / 0 # trigger exception
"""
def _check(_1, _2):
pass
expected_stdout = []
expected_stderr = ["Fatal error in component", "While firing onJoin"]
if PY3:
expected_stderr.append("division by zero")
else:
expected_stderr.append("integer division")
expected_stderr.append("by zero")
self._start_run(config, myapp, expected_stdout, expected_stderr,
_check)
def test_failure5(self):
config = {
"controller": {
},
"workers": [
{
"type": "router",
"realms": [
{
"name": "realm1",
"roles": [
{
"name": "anonymous",
"permissions": [
{
"uri": "*",
"publish": True,
"subscribe": True,
"call": True,
"register": True
}
]
}
]
}
],
"transports": [
{
"type": "web",
"endpoint": {
"type": "tcp",
"port": 8080
},
"paths": {
"/": {
"type": "static",
"directory": ".."
},
"ws": {
"type": "websocket"
}
}
}
]
},
{
"type": "container",
"options": {
"pythonpath": [self.code_location]
},
"components": [
{
"type": "class",
"classname": "myapp.MySession",
"realm": "realm1",
"transport": {
"type": "websocket",
"endpoint": {
"type": "tcp",
"host": "127.0.0.1",
"port": 8080
},
"url": "ws://127.0.0.1:8080/ws"
}
}
]
}
]
}
myapp = """
from twisted.logger import Logger
from autobahn.twisted.wamp import ApplicationSession
class MySession(ApplicationSession):
log = Logger()
def __init__(self, config):
self.log.info("MySession.__init__()")
ApplicationSession.__init__(self, config)
def onJoin(self, details):
self.log.info("MySession.onJoin()")
self.leave()
def onLeave(self, details):
self.log.info("Session ended: {details}", details=details)
self.disconnect()
"""
def _check(_1, _2):
pass
expected_stdout = []
expected_stderr = [
"Component 'component1' failed to start; shutting down node."
]
self._start_run(config, myapp, expected_stdout, expected_stderr,
_check)
def test_failure6(self):
config = {
"controller": {
},
"workers": [
{
"type": "router",
"realms": [
{
"name": "realm1",
"roles": [
{
"name": "anonymous",
"permissions": [
{
"uri": "*",
"publish": True,
"subscribe": True,
"call": True,
"register": True
}
]
}
]
}
],
"transports": [
{
"type": "web",
"endpoint": {
"type": "tcp",
"port": 8080
},
"paths": {
"/": {
"type": "static",
"directory": ".."
},
"ws": {
"type": "websocket"
}
}
}
]
},
{
"type": "container",
"options": {
"pythonpath": [self.code_location]
},
"components": [
{
"type": "class",
"classname": "myapp.MySession",
"realm": "realm1",
"transport": {
"type": "websocket",
"endpoint": {
"type": "tcp",
"host": "127.0.0.1",
"port": 8080
},
"url": "ws://127.0.0.1:8080/ws"
}
}
]
}
]
}
myapp = """
from twisted.logger import Logger
from twisted.internet.defer import inlineCallbacks
from autobahn.twisted.wamp import ApplicationSession
from autobahn.twisted.util import sleep
class MySession(ApplicationSession):
log = Logger()
def __init__(self, config):
self.log.info("MySession.__init__()")
ApplicationSession.__init__(self, config)
@inlineCallbacks
def onJoin(self, details):
self.log.info("MySession.onJoin()")
self.log.info("Sleeping a couple of secs and then shutting down ..")
yield sleep(2)
self.leave()
def onLeave(self, details):
self.log.info("Session ended: {details}", details=details)
self.disconnect()
"""
def _check(_1, _2):
pass
expected_stdout = [
"Session ended: CloseDetails",
"Sleeping a couple of secs and then shutting down",
"Container is hosting no more components: shutting down"
]
expected_stderr = []
self._start_run(config, myapp, expected_stdout, expected_stderr,
_check)
def test_failure7(self):
config = {
"workers": [
{
"type": "router",
"realms": [
{
"name": "realm1",
"roles": [
{
"name": "anonymous",
"permissions": [
{
"uri": "*",
"publish": True,
"subscribe": True,
"call": True,
"register": True
}
]
}
]
}
],
"transports": [
{
"type": "web",
"endpoint": {
"type": "tcp",
"port": 8080
},
"paths": {
"/": {
"type": "static",
"directory": ".."
},
"ws": {
"type": "websocket"
}
}
}
]
},
{
"type": "container",
"options": {
"pythonpath": [self.code_location]
},
"components": [
{
"type": "class",
"classname": "myapp.MySession",
"realm": "realm1",
"transport": {
"type": "websocket",
"endpoint": {
"type": "tcp",
"host": "127.0.0.1",
"port": 8090
},
"url": "ws://127.0.0.1:8090/ws"
}
}
]
}
]
}
myapp = """
from twisted.logger import Logger
from autobahn.twisted.wamp import ApplicationSession
class MySession(ApplicationSession):
log = Logger()
def __init__(self, config):
self.log.info("MySession.__init__()")
ApplicationSession.__init__(self, config)
def onJoin(self, details):
self.log.info("MySession.onJoin()")
self.leave()
"""
def _check(_1, _2):
pass
expected_stdout = []
expected_stderr = [
("Could not connect container component to router - transport "
"establishment failed")
]
self._start_run(config, myapp, expected_stdout, expected_stderr,
_check)
class InitTests(CLITestBase):
def test_hello(self):
def _check(lc, reactor):
if "published to 'oncounter'" in self.stdout.getvalue():
lc.stop()
try:
reactor.stop()
except:
pass
appdir = self.mktemp()
cbdir = os.path.join(appdir, ".crossbar")
reactor = SelectReactor()
cli.run("crossbar",
["init",
"--appdir={}".format(appdir),
"--template=hello:python"],
reactor=reactor)
self.assertIn("Application template initialized",
self.stdout.getvalue())
reactor = SelectReactor()
make_lc(self, reactor, _check)
# In case it hard-locks
reactor.callLater(self._subprocess_timeout, reactor.stop)
cli.run("crossbar",
["start",
"--cbdir={}".format(cbdir.path),
"--logformat=syslogd"],
reactor=reactor)
stdout_expected = ["published to 'oncounter'"]
for i in stdout_expected:
self.assertIn(i, self.stdout.getvalue())
if not os.environ.get("CB_FULLTESTS"):
del ContainerRunningTests
del InitTests
|
NinjaMSP/crossbar
|
crossbar/controller/test/test_run.py
|
Python
|
agpl-3.0
| 42,995
| 0.000442
|
# Copyright 2017 Huawei Technologies Co.,LTD.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import pecan
import wsme
from wsme import types as wtypes
from pecan import rest
class APIBase(wtypes.Base):
created_at = wsme.wsattr(datetime.datetime, readonly=True)
"""The time in UTC at which the object is created"""
updated_at = wsme.wsattr(datetime.datetime, readonly=True)
"""The time in UTC at which the object is updated"""
def as_dict(self):
"""Render this object as a dict of its fields."""
return dict((k, getattr(self, k))
for k in self.fields
if hasattr(self, k) and getattr(self, k) != wsme.Unset)
class CyborgController(rest.RestController):
def _handle_patch(self, method, remainder, request=None):
"""Routes ``PATCH`` _custom_actions."""
# route to a patch_all or get if no additional parts are available
if not remainder or remainder == ['']:
controller = self._find_controller('patch_all', 'patch')
if controller:
return controller, []
pecan.abort(404)
controller = getattr(self, remainder[0], None)
if controller and not inspect.ismethod(controller):
return pecan.routing.lookup_controller(controller, remainder[1:])
# route to custom_action
match = self._handle_custom_action(method, remainder, request)
if match:
return match
# finally, check for the regular patch_one/patch requests
controller = self._find_controller('patch_one', 'patch')
if controller:
return controller, remainder
pecan.abort(405)
|
openstack/nomad
|
cyborg/api/controllers/base.py
|
Python
|
apache-2.0
| 2,260
| 0
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Intangible()
result.template = "object/draft_schematic/space/chassis/shared_hutt_medium_s02.iff"
result.attribute_template_id = -1
result.stfName("string_id_table","")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
obi-two/Rebelion
|
data/scripts/templates/object/draft_schematic/space/chassis/shared_hutt_medium_s02.py
|
Python
|
mit
| 458
| 0.048035
|
# Copyright (C) 2015 ycmd contributors
#
# This file is part of ycmd.
#
# ycmd is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ycmd is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ycmd. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from builtins import * # noqa
from future.utils import iterkeys
from future import standard_library
standard_library.install_aliases()
import logging
import os
import requests
import threading
import traceback
from subprocess import PIPE
from ycmd import utils, responses
from ycmd.completers.completer import Completer
from ycmd.completers.completer_utils import GetFileContents
_logger = logging.getLogger( __name__ )
PATH_TO_TERN_BINARY = os.path.abspath(
os.path.join(
os.path.dirname( __file__ ),
'..',
'..',
'..',
'third_party',
'tern_runtime',
'node_modules',
'tern',
'bin',
'tern' ) )
PATH_TO_NODE = utils.PathToFirstExistingExecutable( [ 'node' ] )
# host name/address on which the tern server should listen
# note: we use 127.0.0.1 rather than localhost because on some platforms
# localhost might not be correctly configured as an alias for the loopback
# address. (ahem: Windows)
SERVER_HOST = '127.0.0.1'
def ShouldEnableTernCompleter():
"""Returns whether or not the tern completer is 'installed'. That is whether
or not the tern submodule has a 'node_modules' directory. This is pretty much
the only way we can know if the user added '--tern-completer' on
install or manually ran 'npm install' in the tern submodule directory."""
if not PATH_TO_NODE:
_logger.warning( 'Not using Tern completer: unable to find node' )
return False
_logger.info( 'Using node binary from: ' + PATH_TO_NODE )
installed = os.path.exists( PATH_TO_TERN_BINARY )
if not installed:
_logger.info( 'Not using Tern completer: not installed at ' +
PATH_TO_TERN_BINARY )
return False
return True
def GlobalConfigExists( tern_config ):
"""Returns whether or not the global config file with the supplied path
exists. This method primarily exists to allow testability and simply returns
whether the supplied file exists."""
return os.path.exists( tern_config )
def FindTernProjectFile( starting_directory ):
for folder in utils.PathsToAllParentFolders( starting_directory ):
tern_project = os.path.join( folder, '.tern-project' )
if os.path.exists( tern_project ):
return tern_project
# As described here: http://ternjs.net/doc/manual.html#server a global
# .tern-config file is also supported for the Tern server. This can provide
# meaningful defaults (for libs, and possibly also for require paths), so
# don't warn if we find one. The point is that if the user has a .tern-config
# set up, then she has deliberately done so and a ycmd warning is unlikely
# to be anything other than annoying.
tern_config = os.path.expanduser( '~/.tern-config' )
if GlobalConfigExists( tern_config ):
return tern_config
return None
class TernCompleter( Completer ):
"""Completer for JavaScript using tern.js: http://ternjs.net.
The protocol is defined here: http://ternjs.net/doc/manual.html#protocol"""
def __init__( self, user_options ):
super( TernCompleter, self ).__init__( user_options )
self._server_keep_logfiles = user_options[ 'server_keep_logfiles' ]
# Used to ensure that starting/stopping of the server is synchronised
self._server_state_mutex = threading.RLock()
self._do_tern_project_check = False
with self._server_state_mutex:
self._server_stdout = None
self._server_stderr = None
self._Reset()
self._StartServer()
def _WarnIfMissingTernProject( self ):
# The Tern server will operate without a .tern-project file. However, it
# does not operate optimally, and will likely lead to issues reported that
# JavaScript completion is not working properly. So we raise a warning if we
# aren't able to detect some semblance of manual Tern configuration.
# We do this check after the server has started because the server does
# have nonzero use without a project file, however limited. We only do this
# check once, though because the server can only handle one project at a
# time. This doesn't catch opening a file which is not part of the project
# or any of those things, but we can only do so much. We'd like to enhance
# ycmd to handle this better, but that is a FIXME for now.
if self._ServerIsRunning() and self._do_tern_project_check:
self._do_tern_project_check = False
tern_project = FindTernProjectFile( os.getcwd() )
if not tern_project:
_logger.warning( 'No .tern-project file detected: ' + os.getcwd() )
raise RuntimeError( 'Warning: Unable to detect a .tern-project file '
'in the hierarchy before ' + os.getcwd() +
' and no global .tern-config file was found. '
'This is required for accurate JavaScript '
'completion. Please see the User Guide for '
'details.' )
else:
_logger.info( 'Detected .tern-project file at: ' + tern_project )
def _GetServerAddress( self ):
return 'http://' + SERVER_HOST + ':' + str( self._server_port )
def ComputeCandidatesInner( self, request_data ):
query = {
'type': 'completions',
'types': True,
'docs': True,
'filter': False,
'caseInsensitive': True,
'guess': False,
'sort': False,
'includeKeywords': False,
'expandWordForward': False,
'omitObjectPrototype': False
}
completions = self._GetResponse( query,
request_data[ 'start_codepoint' ],
request_data ).get( 'completions', [] )
def BuildDoc( completion ):
doc = completion.get( 'type', 'Unknown type' )
if 'doc' in completion:
doc = doc + '\n' + completion[ 'doc' ]
return doc
return [ responses.BuildCompletionData( completion[ 'name' ],
completion.get( 'type', '?' ),
BuildDoc( completion ) )
for completion in completions ]
def OnFileReadyToParse( self, request_data ):
self._WarnIfMissingTernProject()
# Keep tern server up to date with the file data. We do this by sending an
# empty request just containing the file data
try:
self._PostRequest( {}, request_data )
except:
# The server might not be ready yet or the server might not be running.
# in any case, just ignore this we'll hopefully get another parse request
# soon.
pass
def GetSubcommandsMap( self ):
return {
'RestartServer': ( lambda self, request_data, args:
self._RestartServer() ),
'StopServer': ( lambda self, request_data, args:
self._StopServer() ),
'GoToDefinition': ( lambda self, request_data, args:
self._GoToDefinition( request_data ) ),
'GoTo': ( lambda self, request_data, args:
self._GoToDefinition( request_data ) ),
'GoToReferences': ( lambda self, request_data, args:
self._GoToReferences( request_data ) ),
'GetType': ( lambda self, request_data, args:
self._GetType( request_data) ),
'GetDoc': ( lambda self, request_data, args:
self._GetDoc( request_data) ),
'RefactorRename': ( lambda self, request_data, args:
self._Rename( request_data, args ) ),
}
def SupportedFiletypes( self ):
return [ 'javascript' ]
def DebugInfo( self, request_data ):
with self._server_state_mutex:
if self._ServerIsRunning():
return ( 'JavaScript completer debug information:\n'
' Tern running at: {0}\n'
' Tern process ID: {1}\n'
' Tern executable: {2}\n'
' Tern logfiles:\n'
' {3}\n'
' {4}'.format( self._GetServerAddress(),
self._server_handle.pid,
PATH_TO_TERN_BINARY,
self._server_stdout,
self._server_stderr ) )
if self._server_stdout and self._server_stderr:
return ( 'JavaScript completer debug information:\n'
' Tern no longer running\n'
' Tern executable: {0}\n'
' Tern logfiles:\n'
' {1}\n'
' {2}\n'.format( PATH_TO_TERN_BINARY,
self._server_stdout,
self._server_stderr ) )
return ( 'JavaScript completer debug information:\n'
' Tern is not running\n'
' Tern executable: {0}'.format( PATH_TO_TERN_BINARY ) )
def Shutdown( self ):
_logger.debug( "Shutting down Tern server" )
self._StopServer()
def ServerIsHealthy( self, request_data = {} ):
if not self._ServerIsRunning():
return False
try:
target = self._GetServerAddress() + '/ping'
response = requests.get( target )
return response.status_code == requests.codes.ok
except requests.ConnectionError:
return False
def _Reset( self ):
with self._server_state_mutex:
if not self._server_keep_logfiles:
if self._server_stdout:
utils.RemoveIfExists( self._server_stdout )
self._server_stdout = None
if self._server_stderr:
utils.RemoveIfExists( self._server_stderr )
self._server_stderr = None
self._server_handle = None
self._server_port = 0
def _PostRequest( self, request, request_data ):
"""Send a raw request with the supplied request block, and
return the server's response. If the server is not running, it is started.
This method is useful where the query block is not supplied, i.e. where just
the files are being updated.
The request block should contain the optional query block only. The file
data are added automatically."""
if not self._ServerIsRunning():
raise ValueError( 'Not connected to server' )
def MakeIncompleteFile( name, file_data ):
return {
'type': 'full',
'name': name,
'text': file_data[ 'contents' ],
}
file_data = request_data.get( 'file_data', {} )
full_request = {
'files': [ MakeIncompleteFile( x, file_data[ x ] )
for x in iterkeys( file_data )
if 'javascript' in file_data[ x ][ 'filetypes' ] ],
}
full_request.update( request )
response = requests.post( self._GetServerAddress(),
json = full_request )
if response.status_code != requests.codes.ok:
raise RuntimeError( response.text )
return response.json()
def _GetResponse( self, query, codepoint, request_data ):
"""Send a standard file/line request with the supplied query block, and
return the server's response. If the server is not running, it is started.
This method should be used for almost all requests. The exception is when
just updating file data in which case _PostRequest should be used directly.
The query block should contain the type and any parameters. The files,
position, etc. are added automatically.
NOTE: the |codepoint| parameter is usually the current cursor position,
though it should be the "completion start column" codepoint for completion
requests."""
def MakeTernLocation( request_data ):
return {
'line': request_data[ 'line_num' ] - 1,
'ch': codepoint - 1
}
full_query = {
'file': request_data[ 'filepath' ],
'end': MakeTernLocation( request_data ),
'lineCharPositions': True,
}
full_query.update( query )
return self._PostRequest( { 'query': full_query }, request_data )
# TODO: this function is way too long. Consider refactoring it.
def _StartServer( self ):
with self._server_state_mutex:
if self._ServerIsRunning():
return
_logger.info( 'Starting Tern server...' )
self._server_port = utils.GetUnusedLocalhostPort()
if _logger.isEnabledFor( logging.DEBUG ):
extra_args = [ '--verbose' ]
else:
extra_args = []
command = [ PATH_TO_NODE,
PATH_TO_TERN_BINARY,
'--port',
str( self._server_port ),
'--host',
SERVER_HOST,
'--persistent',
'--no-port-file' ] + extra_args
_logger.debug( 'Starting tern with the following command: '
+ ' '.join( command ) )
try:
logfile_format = os.path.join( utils.PathToCreatedTempDir(),
u'tern_{port}_{std}.log' )
self._server_stdout = logfile_format.format(
port = self._server_port,
std = 'stdout' )
self._server_stderr = logfile_format.format(
port = self._server_port,
std = 'stderr' )
# We need to open a pipe to stdin or the Tern server is killed.
# See https://github.com/ternjs/tern/issues/740#issuecomment-203979749
# For unknown reasons, this is only needed on Windows and for Python
# 3.4+ on other platforms.
with utils.OpenForStdHandle( self._server_stdout ) as stdout:
with utils.OpenForStdHandle( self._server_stderr ) as stderr:
self._server_handle = utils.SafePopen( command,
stdin = PIPE,
stdout = stdout,
stderr = stderr )
except Exception:
_logger.warning( 'Unable to start Tern server: '
+ traceback.format_exc() )
self._Reset()
if self._server_port > 0 and self._ServerIsRunning():
_logger.info( 'Tern Server started with pid: ' +
str( self._server_handle.pid ) +
' listening on port ' +
str( self._server_port ) )
_logger.info( 'Tern Server log files are: ' +
self._server_stdout +
' and ' +
self._server_stderr )
self._do_tern_project_check = True
else:
_logger.warning( 'Tern server did not start successfully' )
def _RestartServer( self ):
with self._server_state_mutex:
self._StopServer()
self._StartServer()
def _StopServer( self ):
with self._server_state_mutex:
if self._ServerIsRunning():
_logger.info( 'Stopping Tern server with PID {0}'.format(
self._server_handle.pid ) )
self._server_handle.terminate()
try:
utils.WaitUntilProcessIsTerminated( self._server_handle,
timeout = 5 )
_logger.info( 'Tern server stopped' )
except RuntimeError:
_logger.exception( 'Error while stopping Tern server' )
self._Reset()
def _ServerIsRunning( self ):
return utils.ProcessIsRunning( self._server_handle )
def _GetType( self, request_data ):
query = {
'type': 'type',
}
response = self._GetResponse( query,
request_data[ 'column_codepoint' ],
request_data )
return responses.BuildDisplayMessageResponse( response[ 'type' ] )
def _GetDoc( self, request_data ):
# Note: we use the 'type' request because this is the best
# way to get the name, type and doc string. The 'documentation' request
# doesn't return the 'name' (strangely), wheras the 'type' request returns
# the same docs with extra info.
query = {
'type': 'type',
'docFormat': 'full',
'types': True
}
response = self._GetResponse( query,
request_data[ 'column_codepoint' ],
request_data )
doc_string = 'Name: {name}\nType: {type}\n\n{doc}'.format(
name = response.get( 'name', 'Unknown' ),
type = response.get( 'type', 'Unknown' ),
doc = response.get( 'doc', 'No documentation available' ) )
return responses.BuildDetailedInfoResponse( doc_string )
def _GoToDefinition( self, request_data ):
query = {
'type': 'definition',
}
response = self._GetResponse( query,
request_data[ 'column_codepoint' ],
request_data )
return responses.BuildGoToResponseFromLocation(
_BuildLocation( utils.SplitLines( GetFileContents( request_data,
response[ 'file' ] ) ),
response[ 'file' ],
response[ 'start' ][ 'line' ],
response[ 'start' ][ 'ch' ] ) )
def _GoToReferences( self, request_data ):
query = {
'type': 'refs',
}
response = self._GetResponse( query,
request_data[ 'column_codepoint' ],
request_data )
return [
responses.BuildGoToResponseFromLocation(
_BuildLocation( utils.SplitLines( GetFileContents( request_data,
ref[ 'file' ] ) ),
ref[ 'file' ],
ref[ 'start' ][ 'line' ],
ref[ 'start' ][ 'ch' ] ) )
for ref in response[ 'refs' ]
]
def _Rename( self, request_data, args ):
if len( args ) != 1:
raise ValueError( 'Please specify a new name to rename it to.\n'
'Usage: RefactorRename <new name>' )
query = {
'type': 'rename',
'newName': args[ 0 ],
}
response = self._GetResponse( query,
request_data[ 'column_codepoint' ],
request_data )
# Tern response format:
# 'changes': [
# {
# 'file'
# 'start' {
# 'line'
# 'ch' (codepoint offset)
# }
# 'end' {
# 'line'
# 'ch' (codepoint offset)
# }
# 'text'
# }
# ]
# ycmd response format:
#
# {
# 'fixits': [
# 'chunks': (list<Chunk>) [
# {
# 'replacement_text',
# 'range' (Range) {
# 'start_' (Location): {
# 'line_number_',
# 'column_number_', (byte offset)
# 'filename_'
# },
# 'end_' (Location): {
# 'line_number_',
# 'column_number_', (byte offset)
# 'filename_'
# }
# }
# }
# ],
# 'location' (Location) {
# 'line_number_',
# 'column_number_',
# 'filename_'
# }
#
# ]
# }
def BuildRange( file_contents, filename, start, end ):
return responses.Range(
_BuildLocation( file_contents,
filename,
start[ 'line' ],
start[ 'ch' ] ),
_BuildLocation( file_contents,
filename,
end[ 'line' ],
end[ 'ch' ] ) )
def BuildFixItChunk( change ):
filename = os.path.abspath( change[ 'file' ] )
file_contents = utils.SplitLines( GetFileContents( request_data,
filename ) )
return responses.FixItChunk(
change[ 'text' ],
BuildRange( file_contents,
filename,
change[ 'start' ],
change[ 'end' ] ) )
# From an API perspective, Refactor and FixIt are the same thing - it just
# applies a set of changes to a set of files. So we re-use all of the
# existing FixIt infrastructure.
return responses.BuildFixItResponse( [
responses.FixIt(
responses.Location( request_data[ 'line_num' ],
request_data[ 'column_num' ],
request_data[ 'filepath' ] ),
[ BuildFixItChunk( x ) for x in response[ 'changes' ] ] ) ] )
def _BuildLocation( file_contents, filename, line, ch ):
# tern returns codepoint offsets, but we need byte offsets, so we must
# convert
return responses.Location(
line = line + 1,
column = utils.CodepointOffsetToByteOffset( file_contents[ line ],
ch + 1 ),
filename = os.path.realpath( filename ) )
|
rfguri/vimfiles
|
bundle/ycm/third_party/ycmd/ycmd/completers/javascript/tern_completer.py
|
Python
|
mit
| 22,105
| 0.026646
|
import csv
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as stats
from scipy.optimize import curve_fit
def countKey(key,listDataDicts):
outDict = {}
for row in listDataDicts:
try:
outDict[row[key]] += 1
except KeyError:
outDict[row[key]] = 1
return outDict
def avgUse30Days(key, listDataDicts):
totalDays = 0
numberUsers = 0
for person in listDataDicts:
if int(person[key]) < 31 :
totalDays += int(person[key])
numberUsers += 1
return (1.0*totalDays/numberUsers)
def avgUse30DaysWithZeros(key, listDataDicts):
totalDays = 0
numberUsers = 0
for person in listDataDicts:
if ( int(person[key]) < 31 ):
totalDays += int(person[key])
numberUsers += 1
elif ( int(person[key]) == 93 ):
numberUsers += 1
else:
pass
return (1.0*totalDays/numberUsers)
def powerLaw(x,a,b):
return a*(x**(-b))
def expDecay(x,a,b):
return a*np.exp(b*x)
listDataDicts = []
with open('34933-0001-Data.tsv', 'rb') as tsvFile:
tsvReader = csv.DictReader(tsvFile,delimiter='\t')
for row in tsvReader:
listDataDicts.append(row)
ageFirstUseKeys = ['CIGTRY', 'SNUFTRY', 'CHEWTRY', 'CIGARTRY', 'ALCTRY', 'MJAGE', 'COCAGE', 'HERAGE', 'HALLAGE', 'INHAGE', 'ANALAGE', 'TRANAGE', 'STIMAGE', 'SEDAGE']
useLast30Keys = ['CIG30USE','SNF30USE','CHW30USE','CGR30USE','ALCDAYS','MJDAY30A','COCUS30A','HER30USE','HAL30USE','INHDY30A','PRDAYPMO','TRDAYPMO','STDAYPMO','SVDAYPMO']
xdata = []
ydata = []
for person in listDataDicts:
for i in range(len(ageFirstUseKeys)):
if (int(person[ageFirstUseKeys[i]]) < 900) and (int(person[useLast30Keys[i]]) < 31):
xdata.append(int(person[ageFirstUseKeys[i]]))
ydata.append(int(person[useLast30Keys[i]]))
slope,intercept,rValue,pValue,stdErr = stats.linregress(xdata,ydata)
print "Drug First Use Age vs Usage Frequency Linear Regression"
print "Slope: %f, Intercept: %f, RSQ-Value: %f, P-Value: %f, Standard Error: %f,\n 95%% Confidence Interval: %f +- %f\n" %(slope,intercept,rValue*rValue,pValue,stdErr, slope, 1.96*stdErr)
'''# Curve fit with a power law
xfit = range(90)
popt1, pcov1 = curve_fit(powerLaw, xdata, ydata)
print "Power Law Curve fit: ",popt1,np.sqrt(np.diag(pcov1)),"\n"
fitLiney1 = np.zeros(len(xfit))
for i in range(len(xfit)):
fitLiney1[i] = powerLaw( xfit[i], popt1[0], popt1[1] )
'''
xdata2 = [ x for x in range(89) ]
ydata2 = [ (x*slope + intercept) for x in range(89) ]
plt.plot(xdata,ydata,'b.',xdata2,ydata2,'r-')
plt.title("Age of First Use vs Usage in the Last 30 Days")
plt.xlabel("Age of First Use")
plt.ylabel("Usage in the Past 30 Days)")
plt.legend(["Data","Linear Fit"])
plt.xlim(0,90)
plt.ylim(0,31)
plt.tight_layout()
plt.show()
|
cvanoort/USDrugUseAnalysis
|
Report1/Code/afu_use30.py
|
Python
|
isc
| 2,851
| 0.020694
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.