repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
eeshangarg/zulip | zerver/migrations/0342_realm_demo_organization_scheduled_deletion_date.py | Python | apache-2.0 | 438 | 0 | # Generated by Django 3.2.5 on 2021-08-12 18:41
from django.db import migrations, models
class Migra | tion(migrations.Migration):
dependencies = [
("zerver", "0341_usergroup_is_system_group"),
]
operations = [
migrations.AddField(
model_name="realm",
name="demo_organization_scheduled_deletion_date",
field=models.DateTimeField(default=None, null=True) | ,
),
]
|
tscheff/Davis | davis_c.py | Python | mit | 10,939 | 0.003382 | """
Interface module for the C-version of davis.
Molecular Dynamics Simulation on a Sphere
Please see README.md for details.
MIT license
Copyright(c) 2015 - 2019 Tim Scheffler
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALING | S IN THE SOFTWARE.
"""
import ctypes as c
import numpy as np
import math
import queue
import threadi | ng
import fibonacci_sphere
import glob
import os
import sys
possible_clibs = glob.glob("davis_clib*")
if "win32" in sys.platform:
possible_clibs = [lib for lib in possible_clibs if "-win_" in lib]
elif "darwin" in sys.platform:
possible_clibs = [lib for lib in possible_clibs if "-darwin" in lib]
if len(possible_clibs) == 1:
lib_name = possible_clibs[0]
print("Loading %s" % lib_name)
clib = c.CDLL(os.path.join(".", lib_name))
elif len(possible_clibs) == 0:
print("Cannot find the davis clib part. Have you run the 'python setup.py build_ext -i' step?")
exit(-1)
else:
print("Found %d possible davis_clib modules (%s). Must be unique!" % (
len(possible_clibs), ", ".join(possible_clibs)))
exit(-1)
class Vec(c.Structure):
_fields_ = [("x", c.c_double), ("y", c.c_double), ("z", c.c_double)]
def __str__(self):
return "(%f, %f, %f)" % (self.x, self.y, self.z)
def magnitude2(self):
return self.x**2 + self.y**2 + self.z**2
class Particle(c.Structure):
_fields_ = [("r", Vec), ("v", Vec), ("a", Vec), ("next", c.c_long)]
class Stats(c.Structure):
_fields_ = [("ww_counter", c.c_long), ("real_ww_counter", c.c_long),
("E_pot", c.c_double)]
def __str__(self):
names = [x[0] for x in self._fields_]
return ", ".join( ("%s: %s" % (key, getattr(self, key))) for key in names)
def reset(self):
names = [x[0] for x in self._fields_]
for name in names:
setattr(self, name, 0)
def collect(self, other):
names = [x[0] for x in self._fields_]
for name in names:
my_val = getattr(self, name)
other_val = getattr(other, name)
setattr(self, name, my_val + other_val)
class Cells(object):
def __init__(self, binning, num_particles):
_make_cells = clib.Cells_new
_make_cells.restype = c.c_void_p
maker = lambda: c.cast(_make_cells(binning, num_particles), c.c_void_p)
self.pointer = maker()
def __del__(self):
clib.Cells_free(self.pointer)
class World:
"""Single-threaded simulation with force calculations based on cells."""
def __init__(self, num_particles, dt=0.1, gamma=0.0, cutoff=0.0, binning=None, *args, **kwargs):
self.ArrayType = Particle * num_particles
self.particles = self.ArrayType()
self.num_particles = c.c_long(num_particles)
self.dt = c.c_double(dt)
self.gamma = c.c_double(gamma)
self.cutoff = c.c_double(cutoff)
self.binning = binning
self.num_cells = binning**3
self.cells = Cells(binning, self.num_particles)
self.pos_ArrayType = c.c_double * (3 * num_particles)
self.positions = self.pos_ArrayType()
self.stats = Stats()
points = fibonacci_sphere.create_points(num_particles)
for p, point in zip(self.particles, points):
p.r.x, p.r.y, p.r.z = point
def timestep(self):
clib.dvs_advance(self.num_particles, self.particles, self.dt)
clib.dvs_populate_cells(self.num_particles, self.particles, self.cells.pointer)
clib.dvs_calc_forces(self.particles, self.cells.pointer, 0, self.num_cells,
self.cutoff, self.gamma, c.pointer(self.stats))
clib.dvs_correct(self.num_particles, self.particles, self.dt)
def get_3dPositions(self):
clib.dvs_visualise_positions(self.num_particles, self.particles, self.positions)
a = np.frombuffer(self.positions)
return a.reshape(-1, 3)
def stop(self):
pass
class BruteForceWorld(World):
"""Single-threaded simulation with O(N^2) force calculations"""
i_0 = c.c_long(0)
def timestep(self):
clib.dvs_advance(self.num_particles, self.particles, self.dt)
clib.dvs_calc_brute_forces(self.num_particles, self.particles,
self.i_0, self.num_particles, self.cutoff, self.gamma, c.pointer(self.stats))
clib.dvs_correct(self.num_particles, self.particles, self.dt)
class ParallelWorld(World):
"""Multi-threaded simulation with force calculations based on cells."""
uses_cells = True
def __init__(self, *args, **kwargs):
self.num_workers = kwargs.setdefault('num_workers', 1)
del kwargs['num_workers']
assert self.num_workers > 1
super().__init__(*args, **kwargs)
self.queue = queue.Queue()
self.particles_pw = [self.particles]
#self.stats_pw = [self.stats]
self.stats_pw = [Stats()]
for i in range(self.num_workers - 1):
self.particles_pw.append(self.ArrayType())
self.stats_pw.append(Stats())
delta = int(math.ceil(self.num_cells / (1.0*self.num_workers)))
self.intervals = []
cell0 = 0
for i in range(self.num_workers):
if i == self.num_workers - 1:
cell1 = self.num_cells
else:
cell1 = cell0 + delta
self.intervals.append((cell0, cell1))
t = threading.Thread(target=self.worker_starter(i))
t.daemon = True
t.start()
cell0 = cell1
def stop(self):
print("STOP")
for _ in range(self.num_workers):
self.queue.put('STOP')
def worker_starter(self, i):
# separate method for closure handling
def start():
print("started worker", i)
while True:
q = self.queue.get()
if q == 'STOP':
break
else:
start_cell, end_cell = q
clib.dvs_calc_forces(self.particles_pw[i], self.cells.pointer,
start_cell, end_cell,
self.cutoff, self.gamma,
c.pointer(self.stats_pw[i]))
self.queue.task_done()
return start
def timestep(self):
clib.dvs_advance(self.num_particles, self.particles, self.dt)
if self.uses_cells:
clib.dvs_populate_cells(self.num_particles, self.particles, self.cells.pointer)
for i in range(1, self.num_workers):
clib.dvs_copy_particles(self.num_particles, self.particles, self.particles_pw[i])
for i in self.intervals:
self.queue.put(i)
self.queue.join()
for i in range(1, self.num_workers):
clib.dvs_collect_forces(self.num_particles, self.particles, self.particles_pw[i])
self.stats.reset()
for i in range(0, self.num_workers):
self.stats.collect(self.stats_pw[i])
# ATTENTION!
# Calculating E_kin below will decrease performance massively!
# (approx 10x! in ParallelSimulation and 2x in BruteParallelSimulation)
# self.E_kin += sum(p.v.mag |
hzj123/56th | pombola/core/management/commands/core_import_basic_positions_csv.py | Python | agpl-3.0 | 2,345 | 0.010235 | # This is a very simple script to import basic CSV with the following columns:
#
# person_name
# position_title
# place_kind
# place_name
# organisation_kind
# organisation_name
#
# The various person etc entries will be created if needed (ie if there is no
# exact match).
#
# GOTCHAS
#
# * If the organisation or place already exist the associated place_kind will
# not be used, but will be created.
#
# TODO
#
# * Add tests
import csv
from django.core.management.base import LabelCommand, CommandError
from django.utils.text import slugify
from pombola.core.models import (Organisation, OrganisationKind, Identifier,
PlaceKind, Person,
PositionTitle, Position,
Place, PlaceKind)
def get_or_create(model, name, field="name", defaults={}):
if not name:
return None
defaults['slug'] = slugify(name)
kwargs = {
field: name
}
(obj, create | d) = model.objects.get_or_create(defaults=defaults, **kwargs)
return obj
class Command(LabelCommand):
| help = 'Import positions from a very basic CSV format'
args = '<positions CSV file>'
# option_list = LabelCommand.option_list + (
# make_option('--commit', action='store_true', dest='commit', help='Actually update the database'),
# )
def handle_label(self, input_filename, **options):
csv_file = csv.DictReader(open(input_filename, 'rb'))
for line in csv_file:
person = get_or_create(Person, line['person_name'], field="legal_name")
organisation_kind = get_or_create(OrganisationKind, line['organisation_kind'])
organisation = get_or_create(Organisation, line['organisation_name'], defaults={ "kind": organisation_kind })
place_kind = get_or_create(PlaceKind, line['place_kind'])
place = get_or_create(Place, line['place_name'], defaults={"kind": place_kind})
position_title = get_or_create(PositionTitle, line['position_title'])
# Now create the position
Position.objects.get_or_create(
person = person,
place = place,
organisation = organisation,
title = position_title,
category = "political",
)
|
jakemalley/training-log | manage.py | Python | mit | 4,168 | 0.010797 | # manage.py
# Jake Malley
# 15/01/2015
"""
Manage.py file uses Flask-Script to create a Manager
to allow us to run the app server and open a shell
inside the application context. It also provided the ability
to create custom commands. As well as perform database migrations.
"""
# Imports
from flask.ext.script import Manager,Server
from flask.ext.migrate import Migrate, MigrateCommand
from traininglog import app, db
# Create the manager object.
manager = Manager(app)
# Create the migration object. (For database migrations.)
migrate = Migrate(app, db)
# Get options for the server from the configuration.
try:
# Get host from the configuration object.
host = app.config['HOST']
except KeyError:
# If the option isn't present use the default.
host = '127.0.0.1'
try:
# Get the port from the configuration object.
port = app.config['PORT']
except KeyError:
# If the option isn't present use the default.
port=5000
# Create a development server to run the application.
server = Server(host=host, port=port)
# Specify custom commands for the manager.
@manager.command
def help():
"""
Displays a simple help message. Instructing how to run the server.
"""
print("""
To run the development server use $ python manage.py runserver
To run the production server use $ python manage.py tornadoserver
""")
@manager.command
def init_db():
"""
Create a new database as per the models specified
in the models.py file. Using the database file specified
in the config.py
"""
# Import all the models
from traininglog.models import Member, Exercise, RunningLookUp, CyclingLookUp, SwimmingLookUp
# Create the models.
db.create_all()
# Commit the changes.
db.session.commit()
# Generate the look up tables.
# As per the data for caloried_burned per hour for someone
# of weight 80KG (Data given on the task sheet.)
db.session.add(RunningLookUp(1, 472)) # 5 Mph (12 Minute mile)
db.session.add(RunningLookUp(2, 590)) # 6 Mph (10 Minute mile)
db.session.add(RunningLookUp(3, 679)) # 7 Mph (8.5 Minute mile)
db.session.add(RunningLookUp(4, 797)) # 8 Mph (7.5 Minute mile)
db.session.add(RunningLookUp(5, 885)) # 9 Mph (6.5 Minute mile)
db.session.add(RunningLookUp(6, 944)) # 10 Mph (6 Minute mile)
db.session.add(CyclingLookUp(1,236)) # <10 Mph, Leisure Cycling
db.session.add(CyclingLookUp(2,354)) # 10-11.9 Mph, Gentle
db.session.add(CyclingLookUp(3,472)) # 12-13.9 Mph, Moderate
db.session.add(CyclingLookUp(4,590)) # 14-15.9 Mph, Vigorous
db.session.add(CyclingLookUp(5,708)) # 16-20 Mph, Very Fast
db.session.add(CyclingLookUp(6,944)) # >20 Mph, Racing
db.session.add(SwimmingLookUp(1,413)) # Freestyle, Slow
db.session.add(SwimmingLookUp(2,590)) # Freestyle, Fast
db.session.add(SwimmingLookUp(3,413)) # Backstroke
db.session.add(SwimmingLookUp(4,590)) # Breaststroke
db.session.add(SwimmingLookUp(5,649)) # Butterfly
# Commit the Changes.
db.session.commit()
@manager.command
def tornadoserver():
"""
Create a tornado server to run the application
(used in production).
"""
# Import tornado for our tornado server.
from tornado.wsgi import WSGIContainer
from t | orna | do.httpserver import HTTPServer
from tornado.ioloop import IOLoop
# Import signal used to stop tornado with ctrl-c.
import signal
# Define a callback to stop the server.
def stop_tornado(signum, frame):
# Stop the loop.
IOLoop.instance().stop()
signal.signal(signal.SIGINT, stop_tornado)
# Create the HTTP server and WSGI Container.
http_server = HTTPServer(WSGIContainer(app))
# Listen on the port specified in the config.
http_server.listen(port)
# Start the loop.
IOLoop.instance().start()
# Add command for db migrations.
manager.add_command('db', MigrateCommand)
# Add the default runserver command for the application server
# we have specified.
manager.add_command('runserver', server)
# If it wasn't imported, but run directly.
if __name__ == "__main__":
# Run the script manager.
manager.run() |
Gorgel/khd_projects | khd_projects/khd_projects/urls.py | Python | mit | 890 | 0.007865 | from django.conf.urls import patterns, include, url
from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
admin.au | todiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'openlc.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^$', include( | 'projects.urls', namespace="index")),
url(r'^blockly/', include('blockly.urls', namespace="blockly")),
url(r'^projects/', include('projects.urls', namespace="projects"))
)
if settings.DEBUG:
urlpatterns += patterns('',
url(r'^media/(?P<path>.*)$', 'django.views.static.serve', {
'document_root': settings.MEDIA_ROOT,
}),
url(r'^static/(?P<path>.*)$', 'django.views.static.serve', {
'document_root': settings.STATIC_ROOT,
}),
)
|
ospaceteam/outerspace | server/lib/rsa/core.py | Python | gpl-2.0 | 1,834 | 0.003273 | # -*- coding: utf-8 -*-
#
# Copyright 2011 Sybre | n A. Stüvel <sybren@stuvel.eu>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the Licens | e at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Core mathematical operations.
This is the actual core RSA implementation, which is only defined
mathematically on integers.
'''
import types
def assert_int(var, name):
if type(var) in (types.IntType, types.LongType):
return
raise TypeError('%s should be an integer, not %s' % (name, var.__class__))
def encrypt_int(message, ekey, n):
"""Encrypts a message using encryption key 'ekey', working modulo n"""
assert_int(message, 'message')
assert_int(ekey, 'ekey')
assert_int(n, 'n')
if message < 0:
raise ValueError('Only non-negative numbers are supported')
if message > n:
raise OverflowError("The message %i is too long for n=%i" % (message, n))
return pow(message, ekey, n)
def decrypt_int(cyphertext, dkey, n):
"""Decrypts a cypher text using the decryption key 'dkey', working
modulo n"""
if type(cyphertext) not in (types.IntType, types.LongType):
raise TypeError('cyphertext should be an integer, not %s' %
cyphertext.__type__)
assert_int(cyphertext, 'cyphertext')
assert_int(dkey, 'dkey')
assert_int(n, 'n')
message = pow(cyphertext, dkey, n)
return message
|
jijianwen/Learn | Linux/linux_tests/nfs_network_load/test.py | Python | gpl-2.0 | 4,473 | 0.00626 | #!/usr/bin/python
#
# Usage:
# sudo ./reproducer.py num_threads num_iterations_per_thread nfs_share
#
# Example:
# sudo ./reproducer.py 5 10 IPADDRESS:/path/to/dir
# Will run 5 threads, each with 10 iterations, writing to the NFS share at IPADDRESS:/path/to/dir
#
# Prerequisites:
# 1. Install a base trusty container using the ubuntu template:
# sudo lxc-create -n base -t ubuntu
# 2. Run the base container, attach to it, and install nfs-common via apt-get. Then shut down base container:
# sudo lxc-start -d --name=base
# ... wait for it to start up ...
# sudo lxc-attach --name=base -- apt-get -y install nfs-common
# sudo lxc-stop --name=base
# 3. Add the following to the default apparmor profile for containers in /etc/apparmor.d/lxc/lxc-default:
# mount fstype=nfs,
# mount fstype=nfs4,
# mount fstype=rpc_pipefs,
# 4. Source the new lxc-default profile:
# sudo apparmor_parser -r /etc/apparmor.d/lxc-containers
# 5. Have access to an NFS server on a seperate machine from the host running this script.
import os
import subprocess
import sys
import threading
import time
#
#class WorkerThread (threading.Thread):
#
# def __init__(self, id, num_iterations, nfs_server):
# threading.Thread.__init__(self)
# self.id = id
# self.num_iterations = num_iterations
# self.nfs_server = nfs_server
#
# def run(self):
# print "Starting thread%d" % self.id
# clone_name = "clone%d" % self.id
# for i in range(self.num_iterations):
# print "Thread%d at iteration %d/%d" % (self.id, i, self.num_iterations)
# os.system("lxc-clone -s base %s" % clone_name)
# os.system("lxc-start -d --name=%s" % clone_name)
# while True:
# output = subprocess.check_output("lxc-info -i --name=%s" % clone_name, shell=True)
# if "IP:" in output:
# break
# time.sleep(1)
# os.system("lxc-attach --name=%s -- mount %s /mnt" % (clone_name, self.nfs_server))
# os.system("lxc-attach --name=%s -- dd if=/dev/zero of=/mnt/%s-%d bs=1024 count=100000" % (clone_name, clone_name, i))
# os.system("lxc-attach --name=%s -- umount /mnt" % clone_name)
# os.system("lxc-stop -k --name=%s" % clone_name)
# os.system("lxc-destroy --name=%s" % clone_name)
# print "Thread%d done." % self.id
#
#num_threads = int(sys.argv[1])
#num_iterations = int(sys.argv[2])
#nfs_server = sys.argv[3]
#
#threads = []
#
#for i in range(num_threads):
# t = WorkerThread(i, num_iterations, nfs_server)
# threads.append(t)
# t.start()
#
#f | or t in threads:
# t.join()
# Re-write above code by making use of docker
# Run the build.sh to build the docker image first.
# # sh build.sh
import re
class WorkerThread (threading.Thread):
def __init__(self, id, num_iterations, nfs_server):
threading.Thread.__init__(self)
self.id = id
self.num_iterations = num_iterations
| self.nfs_server = nfs_server
def run(self):
print("Starting thread%d" % self.id)
clone_name = "clone%d" % self.id
for i in range(self.num_iterations):
print("Thread%d at iteration %d/%d" % (self.id, i, self.num_iterations))
os.system("docker run --name=%s --privileged -i -t -d nfstest_ubuntu bash" % clone_name)
while True:
output = subprocess.check_output("docker inspect -f '{{ .NetworkSettings.IPAddress }}' %s" % clone_name, shell=True).decode('utf-8')
pattern = re.compile("[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}")
if pattern.match(output) != None:
break
time.sleep(1)
os.system("docker exec %s mount %s /mnt" % (clone_name, self.nfs_server))
os.system("docker exec %s dd if=/dev/zero of=/mnt/jiji/%s-%d bs=1024 count=100000" % (clone_name, clone_name, i))
os.system("docker exec %s umount /mnt" % clone_name)
os.system("docker stop %s" % clone_name)
os.system("docker rm %s" % clone_name)
print("Thread%d done." % self.id)
num_threads = int(sys.argv[1])
num_iterations = int(sys.argv[2])
nfs_server = sys.argv[3]
threads = []
for i in range(num_threads):
t = WorkerThread(i, num_iterations, nfs_server)
threads.append(t)
t.start()
for t in threads:
t.join()
|
ArchaeoPY/ArchaeoPY | GUI_Templates/mplwidget.py | Python | gpl-2.0 | 1,485 | 0.003367 | #!/usr/bin/env python
# Python Qt4 bindings for GUI objects
from PyQt4 import QtGui
# import the Qt4Agg FigureCanvas object, that binds Figure to
# Qt4Agg backend. It also inherits from QWidget
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
# Matplotlib Figure object
from matplotlib.figure import Figure
class MplCanvas(FigureCanvas):
""" | Class to represent the FigureCanvas widget"""
def __init__(self):
# setup Matplotlib Figure and Axis
self.fig = Figure()
self.ax = self.fig.add_subplot(111)
# initialization of the canvas
FigureCanvas.__init__(self, self.fig)
# we define the widget as expandable
FigureCanvas.setSizePolicy(self,
QtGui.QSizePolicy.Expanding,
QtGui.QSizePolicy.Expanding)
# notify the sys | tem of updated policy
FigureCanvas.updateGeometry(self)
class MplWidget(QtGui.QWidget):
"""Widget defined in Qt Designer"""
def __init__(self, parent = None):
# initialization of Qt MainWindow widget
QtGui.QWidget.__init__(self, parent)
# set the canvas to the Matplotlib widget
self.canvas = MplCanvas()
# create a vertical box layout
self.vbl = QtGui.QVBoxLayout()
# add mpl widget to the vertical box
self.vbl.addWidget(self.canvas)
# set the layout to the vertical box
self.setLayout(self.vbl) |
napalm-automation/napalm-yang | napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/traffic_engineering/tlvs/tlv/link/sub_tlvs/sub_tlv/__init__.py | Python | apache-2.0 | 37,904 | 0.001214 | # -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
from . import state
from . import unknown_subtlv
from . import unreserved_bandwidths
from . import administrative_groups
class sub_tlv(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa-types/lsa-type/lsas/lsa/opaque-lsa/traffic-engineering/tlvs/tlv/link/sub-tlvs/sub-tlv. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: The Sub-TLVs included within the Traffic Engineering
LSA's sub-TLV
"""
__slots__ = (
"_path_helper",
"_extmethods",
"__state",
"__unknown_subtlv",
"__unreserved_bandwidths",
"__administrative_groups",
)
_yang_name = "sub-tlv"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
self.__unknown_subtlv = YANGDynClass(
base=unknown_subtlv.unknown_subtlv,
is_container="container",
yang_name="unknown-subtlv",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
self.__unreserved_bandwidths = YANGDynClass(
base=unreserved_bandwidths.unreserved_bandwidths,
is_container="container",
yang_name="unreserved-bandwidths",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
self.__administrative_groups = YANGDynClass(
base=administrative_groups.administrative_groups,
is_container="container",
yang_name="administrative-groups",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"ospfv2",
"areas",
"area",
"lsdb",
"lsa-types",
"lsa-type",
"lsas",
"lsa",
"opaque-lsa",
"traffic-engineering",
"tlvs",
"tlv",
"link",
"sub-tlvs",
"sub-tlv",
]
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/traffic_engineering/tlvs/tlv/link/sub_tlvs/sub_tlv/state (container)
YANG Description: State parameters of the Link Sub-TLV
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/traffic_engineering/tlvs/tlv/link/sub_tlvs/sub_tlv/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: State parameters of the Link Sub-TLV
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig. | net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
ba | se=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions |
Damicristi/Ising-Model-in-2D | Tools/Initialize_lattice_plot.py | Python | gpl-3.0 | 547 | 0.042357 | #Author: Damodar Rajbhandari
#!/usr/bin/python3
import numpy as np
import matplotlib.pyplot as plot
from random import *
L = 10
"""
This represents, there are "L" (eg. 3) either in
one row or column. Hence,
Total sites = L*L
"""
for i in range(L):
for j in range(L):
if randint(0, 1) > 0.5:
plot.scatter(i,j, color = 'red') # Dipole has spin up
else:
plot.scatter(i,j, color = 'black') # Dipole has | spin down
|
plot.xlabel('x →')
plot.ylabel('y →')
plot.title('Initial configuration of our lattice')
plot.show()
|
mhbu50/erpnext | erpnext/accounts/report/share_ledger/share_ledger.py | Python | gpl-3.0 | 1,816 | 0.026432 | # Copyright (c) 2017, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
import frappe
from frappe import _
def execute(fil | ters=None):
if not filters: filters = {}
if not filters.get("date"):
frappe.throw(_("Please select date"))
columns = get_columns(filters)
date = filters.get("date")
data = []
if not filters.get("shareholder"):
pass
else:
transfers = get_all_transfers(date, filters.get("shareholder"))
for transfer in transfers:
if transfer.transfer_type == | 'Transfer':
if transfer.from_shareholder == filters.get("shareholder"):
transfer.transfer_type += ' to {}'.format(transfer.to_shareholder)
else:
transfer.transfer_type += ' from {}'.format(transfer.from_shareholder)
row = [filters.get("shareholder"), transfer.date, transfer.transfer_type,
transfer.share_type, transfer.no_of_shares, transfer.rate, transfer.amount,
transfer.company, transfer.name]
data.append(row)
return columns, data
def get_columns(filters):
columns = [
_("Shareholder") + ":Link/Shareholder:150",
_("Date") + ":Date:100",
_("Transfer Type") + "::140",
_("Share Type") + "::90",
_("No of Shares") + "::90",
_("Rate") + ":Currency:90",
_("Amount") + ":Currency:90",
_("Company") + "::150",
_("Share Transfer") + ":Link/Share Transfer:90"
]
return columns
def get_all_transfers(date, shareholder):
condition = ' '
# if company:
# condition = 'AND company = %(company)s '
return frappe.db.sql("""SELECT * FROM `tabShare Transfer`
WHERE (DATE(date) <= %(date)s AND from_shareholder = %(shareholder)s {condition})
OR (DATE(date) <= %(date)s AND to_shareholder = %(shareholder)s {condition})
ORDER BY date""".format(condition=condition),
{'date': date, 'shareholder': shareholder}, as_dict=1)
|
z01nl1o02/tests | learn_svd.py | Python | gpl-2.0 | 1,675 | 0.015522 | import os,sys,cv2,pdb
from sklearn.decomposition import TruncatedSV | D
from pylab import *
def get_feature(imgpath):
img = cv2.imread(imgpath,0)
img = cv2.resize(img,(32,64))
img = np.float32(img)
img = img / 255
img = np.reshape(img, (1,32*64))
return img
def extract_sample_from_image(imgdir):
feats = []
for rdir, pdir, names in os.walk(imgdir+'pos'):
for name in names:
sname,ext = os.path.splitext(name)
if 0 == cmp(ext, '.jpg'):
| fname = os.path.join(rdir, name)
feats.append(get_feature(fname))
for rdir, pdir, names in os.walk(imgdir+'neg'):
for name in names:
sname,ext = os.path.splitext(name)
if 0 == cmp(ext, '.jpg'):
fname = os.path.join(rdir, name)
feats.append(get_feature(fname))
sample_num = len(feats)
sample_size = feats[0].shape[1]
samples = np.zeros((sample_num, sample_size))
for k in range(sample_num):
samples[k,:] = feats[k]
print 'samples ', samples.shape[0], samples.shape[1]
return samples
def run_svd(samples):
svd = TruncatedSVD(2)
svd.fit(samples)
newsamples = svd.transform(samples)
return (svd, newsamples)
def show_svd(transformed):
sample_num = transformed.shape[0]
for k in range(sample_num):
if k*2<sample_num:
mark = 'rx'
else:
mark = 'bo'
x,y = (transformed[k,0], transformed[k,1])
plot(x,y,mark)
show()
if __name__=="__main__":
samples = extract_sample_from_image('img/')
svd, transformed = run_svd(samples)
show_svd(transformed)
|
caw/curriculum | db/migrations/0053_auto_20161112_2146.py | Python | gpl-3.0 | 543 | 0.001842 | # -*- | coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-11-12 10:46
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('db', '0052_auto_20161112_2141'),
]
operations = [
migrations.AlterField(
| model_name='yearlevel',
name='name',
field=models.CharField(blank=True, choices=[(1, '1'), (2, '2'), (3, 'A'), (4, '3B'), (5, '4C'), (6, '5D')], max_length=30, null=True),
),
]
|
AdrianaDinca/bitcoin | qa/rpc-tests/import-rescan.py | Python | mit | 7,007 | 0.00157 | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (start_nodes, connect_nodes, sync_blocks, assert_equal)
from decimal import Decimal
import collections
import enum
import itertools
import functools
Call = enum.Enum("Call", "single multi")
Data = enum.Enum("Data", "address pub priv")
ImportNode = collections.namedtuple("ImportNode", "rescan")
def call_import_rpc(call, data, address, scriptPubKey, pubkey, key, label, node, rescan):
"""Helper that calls a wallet import RPC on a bitcoin node."""
watchonly = data != Data.priv
if call == Call.single:
if data == Data.address:
response = node.importaddress(address, label, rescan)
elif data == Data.pub:
response = node.importpubkey(pubkey, label, rescan)
elif data == Data.priv:
response = node.importprivkey(key, label, rescan)
assert_equal(response, None)
elif call == Call.multi:
response = node.importmulti([{
"scriptPubKey": {
"address": address
},
"pubkeys": [pubkey] if data == Data.pub else [],
"keys": [key] if data == Data.priv else [],
"label": label,
"watchonly": watchonly
}], {"rescan": rescan})
assert_equal(response, [{"success": True}])
return watchonly
# List of RPCs that import a wallet key or address in various ways.
IMPORT_RPCS = [functools.partial(call_import_rpc, call, data) for call, data in itertools.product(Call, Data)]
# List of bitcoind nodes that will import keys.
IMPORT_NODES = [
ImportNode(rescan=True),
ImportNode(rescan=False),
]
class ImportRescanTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 1 + len(IMPORT_NODES)
def setup_network(self):
extra_args = [["-debug=1"] for _ in range(self.num_nodes)]
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, extra_args)
for i in range(1, self.num_nodes):
connect_nodes(self.nodes[i], 0)
def run_test(self):
# Create one transaction on node 0 with a unique amount and label for
# each possible type of wallet import RPC.
import_rpc_variants = []
for i, import_rpc in enumerate(IMPORT_RPCS):
label = "label{}".format(i)
addr = self.nodes[0].validateaddress(self.nodes[0].getnewaddress(label))
key = self.nodes[0].dumpprivkey(addr["address"])
amount = 24.9375 - i * .0625
txid = self.nodes[0].sendtoaddress(addr["address"], amount)
import_rpc = functools.partial(import_rpc, addr["address"], addr["scriptPubKey"], addr["pubkey"], key,
label)
import_rpc_variants.append((import_rpc, label, amount, txid, addr))
self.nodes[0].generate(1)
assert_equal(self.nodes[0].getrawmempool(), [])
sync_blocks(self.nodes)
# For each importing node and variation of wallet import RPC, invoke
# the RPC and check the results from getbalance and listtransactions.
for node, import_node in zip(self.nodes[1:], IMPORT_NODES):
for import_rpc, label, amount, txid, addr in import_rpc_variants:
watchonly = import_rpc(node, import_node.rescan)
balance = node.getbalance(label, 0, True)
if import_node.rescan:
assert_equal(balance, amount)
else:
assert_equal(balance, 0)
txs = node.listtransactions(label, 10000, 0, True)
if import_node.rescan:
assert_equal(len(txs), 1)
assert_equal(txs[0]["account"], label)
assert_equal(txs[0]["address"], addr["address"])
assert_equal(txs[0]["amount"], amount)
assert_equal(txs[0]["category"], "receive")
assert_equal(txs[0]["label"], label)
assert_equal(txs[0]["txid"], txid)
assert_equal(txs[0]["confirmations"], 1)
assert_equal("trusted" not in txs[0], True)
if watchonly:
assert_equal(txs[0]["involvesWatchonly"], True)
else:
assert_equal("involvesWatchonly" not in txs[0], True)
else:
assert_equal(len(txs), 0)
# Create spends for all the imported addresses.
spend_txids = []
fee = self.nodes[0].getnetworkinfo()["relayfee"]
for import_rpc, label, amount, txid, addr in import_rpc_variants:
raw_tx = self.nodes[0].getrawtransaction(txid)
decoded_tx = self.nodes[0].decoderawtransaction(raw_tx)
input_vout = next(out["n"] for out in decoded_tx["vout"]
if out["scriptPubKey"]["addresses"] == [addr["address"]])
inputs = [{"txid": txid, "vout": input_vout}]
outputs = {self.nodes[0].getnewaddress(): Decimal(amount) - fee}
raw_spend_tx = self.nodes[0].createrawtransaction(inputs, outputs)
signed_spend_tx = self.nodes[0].signrawtransaction(raw_spend_tx)
spend_txid = self.nodes[0].sendrawtransaction(signed_spend_tx["hex"])
spend_txids.append(spend_txid)
self.nodes[0].generate(1)
assert_equal(self.nodes[0].getrawmempool(), [])
sync_blocks(self.nodes)
# Check the results from getbalance and listtransactions after the spends.
for node, import_node in zip(self.nodes[1:], IMPORT_NODES):
txs = node.listtransactions("*", 10000, 0, True)
for (import_rpc, label, amount, txid, addr), spend_txid in zip(import_rpc_variants, spend_txids):
balance = node.getbalance(label, 0, True)
spend_tx = [tx for tx in txs if tx["txid"] == spend_tx | id]
if import_node.rescan:
assert_equal(balance, amount)
assert_equal(len(spend_tx), 1)
assert_equal(spend_tx[0]["account"], "")
assert_equal(spend_tx[0]["amount"] + spend_tx[0]["fee"], -amount)
assert_equal(spend_tx[0]["category"], "send")
assert_equal("label" not in spend_tx[0], True)
assert_equal(spend_tx[0]["confirmations"], 1)
| assert_equal("trusted" not in spend_tx[0], True)
assert_equal("involvesWatchonly" not in txs[0], True)
else:
assert_equal(balance, 0)
assert_equal(spend_tx, [])
if __name__ == "__main__":
ImportRescanTest().main()
|
iulian787/spack | var/spack/repos/builtin/packages/pngwriter/package.py | Python | lgpl-2.1 | 1,625 | 0.003077 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Pngwriter(CMakePackage):
"""PNGwriter is a very easy to use open source graphics library that uses
PNG | as its output format. The interface has been designed to be as simple
and intuitive as possible. It supports plotting and reading pixels in the
RGB (red, green, blue), H | SV (hue, saturation, value/brightness) and CMYK
(cyan, magenta, yellow, black) colour spaces, basic shapes, scaling,
bilinear interpolation, full TrueType antialiased and rotated text support,
bezier curves, opening existing PNG images and more.
"""
homepage = "http://pngwriter.sourceforge.net/"
url = "https://github.com/pngwriter/pngwriter/archive/0.5.6.tar.gz"
git = "https://github.com/pngwriter/pngwriter.git"
maintainers = ['ax3l']
version('develop', branch='dev')
version('master', branch='master')
version('0.7.0', sha256='82d46eef109f434f95eba9cf5908710ae4e75f575fd3858178ad06e800152825')
version('0.6.0', sha256='5107c6be0bfadf76ba4d01a553f7e060b5a7763ca7d9374ef3e7e59746b3911e')
version('0.5.6', sha256='0c5f3c1fd6f2470e88951f4b8add64cf5f5a7e7038115dba69604139359b08f1')
depends_on('libpng')
depends_on('zlib')
depends_on('freetype')
def cmake_args(self):
spec = self.spec
args = []
if spec.satisfies('@0.7.0:'):
args += ['-DPNGwriter_USE_FREETYPE:BOOL=ON']
return args
|
TNG/svnfiltereddump | src/svnfiltereddump/__init__.py | Python | gpl-3.0 | 1,146 | 0.000873 |
from Main import run
#
# The Big Plan
#
# Decide what to do with a revision
from DumpController import DumpController, STRATEGY_DUMP_HEADER, STRATEGY_IGNORE, STRATEGY_SYNTHETIC_DELETES, STRATEGY_DUMP_SCAN, STRATEGY_BOOTSTRAP, DUMP_HEADER_PSEUDO_REV
# Generate the lumps for t | hat
from DumpHeaderGenerator import DumpHeaderGenerator
from BootsTrapper import BootsTrapper
from DumpFilter import DumpFilter, UnsupportedDumpVersionException
from SyntheticDeleter import SyntheticDeleter
from Revi | sionIgnorer import RevisionIgnorer
# Build the lumps
from LumpBuilder import LumpBuilder
# Fix length fields, drop empty revisions
from LumpPostProcessor import LumpPostProcessor
#
# SVN Abstraction layer
#
from SvnLump import SvnLump
from SvnDumpReader import SvnDumpReader
from SvnDumpWriter import SvnDumpWriter
from SvnRepository import SvnRepository
#
# Helpers
#
from Config import Config
from InterestingPaths import InterestingPaths
from RevisionMapper import RevisionMapper
from CheckedCommandFileHandle import CheckedCommandFileHandle
from ContentTin import ContentTin
from ParentDirectoryLumpGenerator import ParentDirectoryLumpGenerator
|
dhalperi/beam | sdks/python/apache_beam/runners/runner.py | Python | apache-2.0 | 12,102 | 0.006858 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""PipelineRunner, an abstract base runner object."""
from __future__ import absolute_import
import logging
import os
import shelve
import shutil
import tempfile
__all__ = ['PipelineRunner', 'PipelineState', 'PipelineResult']
def _get_runner_map(runner_names, module_path):
"""Create a map of runner name in lower case to full import path to the
runner class.
"""
return {runner_name.lower(): module_path + runner_name
for runner_name in runner_names}
_DIRECT_RUNNER_PATH = 'apache_beam.runners.direct.direct_runner.'
_DATAFLOW_RUNNER_PATH = (
'apache_beam.runners.dataflow.dataflow_runner.')
_TEST_RUNNER_PATH = 'apache_beam.runners.test.'
_KNOWN_DIRECT_RUNNERS = ('DirectRunner', 'EagerRunner')
_KNOWN_DATAFLOW_RUNNERS = ('DataflowRunner',)
_KNOWN_TEST_RUNNERS = ('TestDataflowRunner',)
_RUNNER_MAP = {}
_RUNNER_MAP.update(_get_runner_map(_KNOWN_DIRECT_RUNNERS,
_DIRECT_RUNNER_PATH))
_RUNNER_MAP.update(_get_runner_map(_KNOWN_DATAFLOW_RUNNERS,
_DATAFLOW_RUNNER_PATH))
_RUNNER_MAP.update(_get_runner_map(_KNOWN_TEST_RUNNERS,
_TEST_RUNNER_PATH))
_ALL_KNOWN_RUNNERS = (
_KNOWN_DIRECT_RUNNERS + _KNOWN_DATAFLOW_RUNNERS + _KNOWN_TEST_RUNNERS)
def create_runner(runner_name):
"""For internal use only; no backwards-compatibility guarantees.
Creates a runner instance from a runner class name.
Args:
runner_name: Name of the pipeline runner. Possible values are:
DirectRunner, DataflowRunner and TestDataflowRunner.
Returns:
A runner object.
Raises:
RuntimeError: if an invalid runner name is used.
"""
# Get the qualified runner name by using the lower case runner name. If that
# fails try appending the name with 'runner' and check if it matches.
# If that also fails, use the given runner name as is.
runner_name = _RUNNER_MAP.get(
runner_name.lower(),
_RUNNER_MAP.get(runner_name.lower() + 'runner', runner_name))
if '.' in runner_name:
module, runner = runner_name.rsplit('.', 1)
try:
return getattr(__import__(module, {}, {}, [runner], -1), runner)()
except ImportError:
if runner_name in _KNOWN_DATAFLOW_RUNNERS:
raise ImportError(
'Google Cloud Dataflow runner not available, '
'please install apache_beam[gcp]')
else:
raise
else:
raise ValueError(
'Unexpected pipeline runner: %s. Valid values are %s '
'or the fully qualified name of a PipelineRunner subclass.' % (
runner_name, ', '.join(_ALL_KNOWN_RUNNERS)))
class PipelineRunner(object):
"""A runner of a pipeline object.
The base runner provides a run() method for visiting every node in the
pipeline's DAG and executing the transforms computing the PValue in the node.
A custom runner will typically provide implementations for some of the
transform methods (ParDo, GroupByKey, Create, etc.). It may also
provide a new implementation for clear_pvalue(), which is used to wipe out
materialized values in order to reduce footprint.
"""
def run(self, pipeline):
"""Execute the entire pipeline or the sub-DAG reachable from a node."""
# Imported here to avoid circular dependencies.
# pylint: disable=wrong-import-order, wrong-import-position
from apache_beam.pipeline import PipelineVisitor
class RunVisitor(PipelineVisitor):
def __init__(self, runner):
self.runner = runner
def visit_transform(self, transform_node):
try:
self.runner.run_transform(transform_node)
except:
logging.error('Error while visiting %s', transform_node.full_label)
raise
pipeline.visit(RunVisitor(self))
def apply(self, transform, input):
"""Runner callback for a pipeline.apply call.
Args:
transform: the transform to apply.
input: transform's input (typically a PCollection).
A concrete implementation of the Runner class may want to do custom
pipeline construction for a given transform. To override the behavior
for a transform class Xyz, implement an apply_Xyz method with this same
signature.
"""
for cls in transform.__class__.mro():
m = getattr(self, 'apply_%s' % cls.__name__, None)
if m:
return m(transform, input)
raise NotImplementedError(
'Execution of [%s] not implemented in runner %s.' % (transform, self))
def apply_PTransform(self, transform, input):
# The base case of apply is to call the transform's expand.
return transform.expand(input)
def run_transform(self, transform_node):
"""Runner callback for a pipeline.run call.
Args:
transform_node: transform node for the transform to run.
A concrete implementation of the Runner class must implement run_Abc for
some class Abc in the method resolution order for every non-composite
transform Xyz in the pipeline.
"""
for cls in transform_node.transform.__class__.mro():
m = getattr(self, 'run_%s' % cls.__name__, None)
if m:
return m(transform_node)
raise NotImplementedError(
'Execution of [%s] not implemented in runner %s.' % (
transform_node.transform, self))
class PValueCache(object):
"""For internal use only; no backwards-compatibility guarantees.
Local cache for arbitrary information computed for PValue objects."""
def __init__(self, use_disk_backed_cache=False):
# Cache of values computed while a runner executes a pipeline. This is a
# dictionary of PValues and their computed values. Note that in principle
# the runner could contain PValues from several pipelines without clashes
# since a PValue is associated with one and only one pipeline. The keys of
# the dictionary are tuple of PValue instance addresses obtained using id()
# and tag names converted to strings.
self._use_disk_backed_cache = use_disk_backed_cache
if use_disk_backed_cache:
self._tempdir = tempfile.mkdtemp()
self._cache = shelve.open(os.path.join(self._tempdir, 'shelve'))
else:
self._cache = {}
def __del__(self):
if self._use_disk_backed_cache:
self._cache.close()
shutil.rmtree(self._tempdir)
def __len__(self):
return len(self._cache)
def to_cache_key(self, transform, tag):
return transform.full_label, tag
def _ensure_pvalue_has_real_producer(self, pvalue):
"""Ensure the passed-in PValue has the real_producer attribute. |
Args:
pvalue: A PValue instance whose cached value is requested.
During the runner's execution only the | results of the primitive transforms
are cached. Whenever we are looking for a PValue that is the output of a
composite transform we need to find the output of its rightmost transform
part.
"""
if not hasattr(pvalue, 'real_producer'):
real_producer = pvalue.producer
while real_producer.parts:
real_producer = real_producer.parts[-1]
pvalue.real_producer = real_producer
def is_cached(self, pobj):
from apache_beam.pipeline import AppliedPTransform
if isinstance(pobj, AppliedPTransform):
transform = pobj
tag = None
else:
self._ensure_pvalue_has_real_producer(pobj)
transform = pobj.real_producer
tag = pobj.tag
return self.to_cache_key(transform, tag) i |
paulmand3l/elevendance | registration/urls.py | Python | mit | 154 | 0.006494 | from django.conf. | urls import patterns, include, url
urlpatterns = patterns('checki | n.views',
url(r'^checkin/$', 'checkin', name="checkin-checkin"),
)
|
showyou/anzu | common/model.py | Python | mit | 4,110 | 0.037796 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# マルコフテーブル等定義
import sqlalchemy
from sqlalchemy.orm import scoped_session, sessionmaker, mapper
from sqlalchemy import MetaData
from sqlalchemy import Column, MetaData, Table, types
from datetime import datetime
class OhayouTime(object):
pass
class Markov(object):
pass
class Tweet(object):
pass
class RetQueue(object):
pass
class Hot(object):
pass
class Collocation(object):
pass
class Reply(object):
pass
init = False
metadata = sqlalchemy.MetaData()
ohayouTime = Table("ohayouTime",metadata,
Column('id', types.Integer, primary_key=True),
Column('user', types.Unicode(32)),
Column('type', types.Unicode(32)),
Column('datetime', types.DateTime, default=datetime.now),
mysql_engine = 'MyISAM',
mysql_charset = 'utf8'
)
markovOneColumn = Table("markov",metadata,
Column('id', types.Integer, primary_key=True),
Column('prev', types.Unicode(32) | ),
Column('now', types.Unicode(32)),
Column('next', types.Unicode(32)),
Column('count', types.Float,default=1),
Column('lastupdate', types.DateTime, default=datetime.now),
mysql_engine = 'InnoDB',
mysql_charset= 'utf8'
)
# 応答キュー。順番固定
retQueue = Table("retQueue",metadata,
Column('id', types.Integer, primary_key=True),
Column('user', types.Unicode(32)),
Column('text', types.Unicode(140)), |
Column('reply_id', types.BigInteger(20), default=0),
mysql_engine = 'MyISAM',
mysql_charset = 'utf8'
)
# hotな単語一覧
hot = Table("hot",metadata,
Column('id', types.Integer, primary_key=True),
Column('word', types.Unicode(140)),
Column('datetime',types.DateTime, default=datetime.now),
mysql_engine = 'MyISAM',
mysql_charset = 'utf8'
)
tweet = Table("tweet",metadata,
Column('id', types.Integer, primary_key=True),
Column('user', types.Unicode(32)),
Column('text', types.Unicode(140)),
Column('datetime', types.DateTime, default=datetime.now),
Column('replyID', types.String(64), default=-1),
Column('isAnalyze', types.SmallInteger, default=False),
Column('isReplyAnalyze',types.SmallInteger, default=0),
Column('tweetID', types.BigInteger(20)),
mysql_engine = 'InnoDB',
mysql_charset = 'utf8'
)
collocation = Table("collocation",metadata,
Column('id', types.Integer, primary_key=True),
Column('a', types.Unicode(32)),
Column('b', types.Unicode(32)),
Column('colloc_count', types.Integer,default=1),
Column('sentence_count',types.Integer,default=1),
mysql_engine = 'InnoDB',
mysql_charset = 'utf8'
)
reply = Table("reply",metadata,
Column('id', types.Integer, primary_key=True),
Column('tweet_id', types.BigInteger(20)),
Column('reply_text', types.Text),
Column('src_id', types.BigInteger(20)),
Column('src_text', types.Text),
Column('is_analyze', types.SmallInteger, default=False),
mysql_engine = 'InnoDB',
mysql_charset = 'utf8'
)
def startSession(conf):
global init
config = {"sqlalchemy.url":
"mysql://"+conf["dbuser"]+":"+conf["dbpass"]+"@"+conf["dbhost"]+"/"+conf["db"]+"?charset=utf8","sqlalchemy.echo":"False"}
engine = sqlalchemy.engine_from_config(config)
dbSession = scoped_session(
sessionmaker(
autoflush = True,
autocommit = False,
bind = engine
)
)
if init == False:
mapper(Tweet, tweet)
mapper(Hot, hot)
mapper(Markov,markovOneColumn)
mapper(RetQueue, retQueue)
mapper(OhayouTime, ohayouTime)
mapper(Collocation, collocation)
mapper(Reply, reply)
init = True
metadata.create_all(bind=engine)
print ("--start DB Session--")
return dbSession
"""
# テスト内容
a = startSession()
>>> --start DB Session--
"""
|
Lyla-Fischer/xblock-sdk | workbench/test/test_runtime.py | Python | agpl-3.0 | 6,112 | 0.002291 | """Test Workbench Runtime"""
from unittest import TestCase
import mock
from django.conf import settings
from xblock.fields import Scope
from xblock.runtime import KeyValueStore
from xblock.runtime import KvsFieldData
from xblock.reference.user_service import UserService
from ..runtime import WorkbenchRuntime, ScenarioIdManager, WorkbenchDjangoKeyValueStore
class TestScenarioIds(TestCase):
"""
Test XBlock Scenario IDs
"""
def setUp(self):
# Test basic ID generation meets our expectations
self.id_mgr = ScenarioIdManager()
def test_no_scenario_loaded(self):
self.assertEqual(self.id_mgr.create_definition("my_block"), ".my_block.d0")
def test_should_increment(self):
self.assertEqual(self.id_mgr.create_definition("my_block"), ".my_block.d0")
self.assertEqual(self.id_mgr.create_definition("my_block"), ".my_block.d1")
def test_slug_support(self):
self.assertEqual(
self.id_mgr.create_definition("my_block", "my_slug"),
".my_block.my_slug.d0"
)
self.assertEqual(
self.id_mgr.create_definition("my_block", "my_slug"),
".my_block.my_slug.d1"
)
def test_scenario_support(self):
self.test_should_increment()
# Now that we have a scenario, our definition numbering starts over again.
self.id_mgr.set_scenario("my_scenario")
self.assertEqual(self.id_mgr.create_definition("my_block"), "my_scenario.my_block.d0")
self.assertEqual(self.id_mgr.create_definition("my_block"), "my_scenario.my_block.d1")
self.id_mgr.set_scenario("another_scenario")
self.assertEqual(self.id_mgr.create_definition("my_block"), "another_scenario.my_block.d0")
def test_usages(self):
# Now make sure our usages are attached to definitions
self.assertIsNone(self.id_mgr.last_created_usage_id())
self.assertEqual(
self.id_mgr.create_usage("my_scenario.my_block.d0"),
"my_scenario.my_block.d0.u0"
)
self.assertEqual(
self.id_mgr.create_usage("my_scenario.my_block.d0"),
"my_scenario.my_block.d0.u1"
)
self.assertEqual(self.id_mgr.last_created_usage_id(), "my_scenario.my_block.d0.u1")
def test_asides(self):
definition_id = self.id_mgr.create_definition('my_block')
usage_id = self.id_mgr.create_usage(definition_id)
aside_definition, aside_usage = self.id_mgr.create_aside(definition_id, usage_id, 'my_aside')
self.assertEqual(self.id_mgr.get_aside_type_from_definition(aside_definition), 'my_aside')
self.assertEqual(self.id_mgr.get_definition_id_from_aside(aside_definition), definition_id)
self.assertEqual(self.id_mgr.get_aside_type_from_usage(aside_usage), 'my_aside')
self.assertEqual(self.id_mgr.get_usage_id_from_aside(aside_usage), usage_id)
class TestKVStore(TestCase):
"""
Test the Workbench KVP Store
"""
def setUp(self):
self.kvs = WorkbenchDjangoKeyValueStore()
self.key = KeyValueStore.Key(
scope=Scope.content,
user_id="rusty",
block_scope_id="my_scenario.my_block.d0",
field_name="age"
)
def test_storage(self):
self.assertFalse(self.kvs.has(self.key))
self.kvs.set(self.key, 7)
self.assertTrue(self.kvs.has(self.key))
self.assertEqual(self.kvs.get(self.key), 7)
self.kvs.delete(self.key)
self.assertFalse(self.kvs.has(self.key))
class StubService(object):
"""Empty service to test loading additional services. """
pass
class ExceptionService(object):
"""Stub service that raises an exception on init. """
def __init__(self):
raise Exception("Kaboom!")
class TestServices(TestCase):
"""
Test XBlock runtime services
"""
def setUp(self):
super(TestServices, self).setUp()
self.xblock = mock.Mock()
def test_default_services(self):
runtime = WorkbenchRuntime('test_user')
self._assert_default_services(runtime)
@mock.patch.dict(settings.WORKBENCH['services'], {
'stub': 'workbench.test.test_runtime.StubService'
})
def test_settings_adds_services(self):
runtime = Workbench | Runtime('test_user')
# Default services should still be available
self._assert_default_services(runtime)
# An additiona | l service should be provided
self._assert_service(runtime, 'stub', StubService)
# Check that the service has the runtime attribute set
service = runtime.service(self.xblock, 'stub')
self.assertIs(service.runtime, runtime)
@mock.patch.dict(settings.WORKBENCH['services'], {
'not_found': 'workbench.test.test_runtime.NotFoundService'
})
def test_could_not_find_service(self):
runtime = WorkbenchRuntime('test_user')
# Default services should still be available
self._assert_default_services(runtime)
# The additional service should NOT be available
self.assertIs(runtime.service(self.xblock, 'not_found'), None)
@mock.patch.dict(settings.WORKBENCH['services'], {
'exception': 'workbench.test.test_runtime.ExceptionService'
})
def test_runtime_service_initialization_failed(self):
runtime = WorkbenchRuntime('test_user')
# Default services should still be available
self._assert_default_services(runtime)
# The additional service should NOT be available
self.assertIs(runtime.service(self.xblock, 'exception'), None)
def _assert_default_services(self, runtime):
"""Check that the default services are available. """
self._assert_service(runtime, 'field-data', KvsFieldData)
self._assert_service(runtime, 'user', UserService)
def _assert_service(self, runtime, service_name, service_class):
"""Check that a service is loaded. """
service_instance = runtime.service(self.xblock, service_name)
self.assertIsInstance(service_instance, service_class)
|
olimp-web/quest_manager | quest_map/admin.py | Python | mit | 124 | 0.008065 | from django.contrib impor | t admin
f | rom .models import Station, Quest
admin.site.register(Quest)
admin.site.register(Station) |
alphagov/digitalmarketplace-api | migrations/versions/1290_sf_allow_declaration_reuse_add_column.py | Python | mit | 747 | 0.006693 | """sf_allow_declaration_reuse_add_column
create supplier_frameworks.allow_declaration_reuse column as nullable in an initial, small
transaction. we will backfill it with defaults la | ter to minimize table locking time.
Revision ID: 1290
Revises: 1280
Create Date: 2019-06-10 17:00:02.464675
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '1290'
down_revision = '1280'
def upgrade():
# create column as nullable in an initial, small transaction. we will backfill it with defaults later
op.add_column('supplier_frameworks', sa.Column('allow_declaration_reuse', sa.Boolean(), | nullable=True))
def downgrade():
op.drop_column('supplier_frameworks', 'allow_declaration_reuse')
|
polyaxon/polyaxon | platform/coredb/coredb/managers/runs.py | Python | apache-2.0 | 2,268 | 0.000441 | #!/usr/bin/python
#
# Copyright 2018-2021 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, List
from coredb.abstracts.getter import get_run_model
from coredb.abstracts.runs import BaseRun
from polyaxon.lifecycle import V1StatusCondition, V1Statuses
from polyaxon.polyflow import V1CompiledOperation, V1RunKind
from polyaxon.schemas import V1RunPending
def create_run(
project_id: int,
user_id: int,
name: str = None,
description: str = None,
readme: str = None,
tags: List[int] = None,
raw_content: str = None,
meta_info: Dict = None,
) -> BaseRun:
instance = g | et_run_model().objects.create(
project_id=project_id,
user_id=user_id,
name=name,
description=description,
readme=readme,
tags=tags,
kind=V1RunKind.JOB,
is_managed=False,
raw_content=raw_content,
meta_info=meta_info,
status_conditions=[
V1StatusCondition.get_condition(
type=V1Statuses.CREATED,
status="True",
reason="ModelManager",
message= | "Run is created",
).to_dict()
],
)
return instance
def base_approve_run(run: BaseRun):
pending = run.pending
if pending:
new_pending = None
if (
(pending == V1RunPending.BUILD and run.status == V1Statuses.CREATED)
or pending == V1RunPending.UPLOAD
) and run.content:
compiled_operation = V1CompiledOperation.read(run.content)
if compiled_operation.is_approved is False:
new_pending = V1RunPending.APPROVAL
run.pending = new_pending
run.save(update_fields=["pending", "updated_at"])
|
pytroll/satpy | utils/fetch_avhrr_calcoeffs.py | Python | gpl-3.0 | 4,773 | 0 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Satpy developers
#
# This file is part of satpy.
#
# satpy is free software: you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# satpy is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# satpy. If not, see <http://www.gnu.org/licenses/>.
"""Fetch avhrr calibration coefficients."""
import datetime as dt
import os.path
import sys
import h5py
import urllib2
BASE_URL = "http://www.star.nesdis.noaa.gov/smcd/spb/fwu/homepage/" + \
"AVHRR/Op_Cal_AVHRR/"
URLS = {
"Metop-B":
{"ch1": BASE_URL + "Metop1_AVHRR_Libya_ch1.txt",
"ch2": BASE_URL + "Metop1_AVHRR_Libya_ch2.txt",
"ch3a": BASE_URL + "Metop1_AVHRR_Libya_ch3a.txt"},
"Metop-A":
{"ch1": BASE_URL + "Metop2_AVHRR_Libya_ch1.txt",
"ch2": BASE_URL + "Metop2_AVHRR_Libya_ch2.txt",
"ch3a": BASE_URL + "Metop2_AVHRR_Libya_ch3a.txt"},
"NOAA-16":
{"ch1": BASE_URL + "N16_AVHRR_Libya_ch1.txt",
"ch2": BASE_URL + "N16_AVHRR_Libya_ch2.txt"},
"NOAA-17":
{"ch1": BASE_URL + "N17_AVHRR_Libya_ch1.txt",
"ch2": BASE_URL + "N17_AVHRR_Libya_ch2.txt",
"ch3a": BASE_URL + "N17_AVHRR_Libya_ch3a.txt"},
"NOAA-18":
{"ch1": BASE_URL + "N18_AVHRR_Libya_ch1.txt",
"ch2": BASE_URL + "N18_AVHRR_Libya_ch2.txt"},
"NOAA-19":
{"ch1": BASE_URL + "N19_AVHRR_Libya_ch1.txt",
"ch2": BASE_URL + "N19_AVHRR_Libya_c | h2.txt"}
}
def get_page(url):
"""Retrieve the given page."""
return ur | llib2.urlopen(url).read()
def get_coeffs(page):
"""Parse coefficients from the page."""
coeffs = {}
coeffs['datetime'] = []
coeffs['slope1'] = []
coeffs['intercept1'] = []
coeffs['slope2'] = []
coeffs['intercept2'] = []
slope1_idx, intercept1_idx, slope2_idx, intercept2_idx = \
None, None, None, None
date_idx = 0
for row in page.lower().split('\n'):
row = row.split()
if len(row) == 0:
continue
if row[0] == 'update':
# Get the column indices from the header line
slope1_idx = row.index('slope_lo')
intercept1_idx = row.index('int_lo')
slope2_idx = row.index('slope_hi')
intercept2_idx = row.index('int_hi')
continue
if slope1_idx is None:
continue
# In some cases the fields are connected, skip those rows
if max([slope1_idx, intercept1_idx,
slope2_idx, intercept2_idx]) >= len(row):
continue
try:
dat = dt.datetime.strptime(row[date_idx], "%m/%d/%Y")
except ValueError:
continue
coeffs['datetime'].append([dat.year, dat.month, dat.day])
coeffs['slope1'].append(float(row[slope1_idx]))
coeffs['intercept1'].append(float(row[intercept1_idx]))
coeffs['slope2'].append(float(row[slope2_idx]))
coeffs['intercept2'].append(float(row[intercept2_idx]))
return coeffs
def get_all_coeffs():
"""Get all available calibration coefficients for the satellites."""
coeffs = {}
for platform in URLS:
if platform not in coeffs:
coeffs[platform] = {}
for chan in URLS[platform].keys():
url = URLS[platform][chan]
print(url)
page = get_page(url)
coeffs[platform][chan] = get_coeffs(page)
return coeffs
def save_coeffs(coeffs, out_dir=''):
"""Save calibration coefficients to HDF5 files."""
for platform in coeffs.keys():
fname = os.path.join(out_dir, "%s_calibration_data.h5" % platform)
fid = h5py.File(fname, 'w')
for chan in coeffs[platform].keys():
fid.create_group(chan)
fid[chan]['datetime'] = coeffs[platform][chan]['datetime']
fid[chan]['slope1'] = coeffs[platform][chan]['slope1']
fid[chan]['intercept1'] = coeffs[platform][chan]['intercept1']
fid[chan]['slope2'] = coeffs[platform][chan]['slope2']
fid[chan]['intercept2'] = coeffs[platform][chan]['intercept2']
fid.close()
print("Calibration coefficients saved for %s" % platform)
def main():
"""Create calibration coefficient files for AVHRR."""
out_dir = sys.argv[1]
coeffs = get_all_coeffs()
save_coeffs(coeffs, out_dir=out_dir)
if __name__ == "__main__":
main()
|
adamjace/envcheckr | tests/test_envcheckr.py | Python | mit | 983 | 0 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_envcheckr
----------------------------------
Tests for `envcheckr` module.
"""
import pytest
from envcheckr import envcheckr
def test_parse_lines():
lines_a = envcheckr.parse_lines('tests/env')
asser | t len(lines_a) == 3
lines_b = envcheckr.parse_lines('tests/env.example')
assert len(lines_b) == 7
def test_parse_key():
lines = envcheckr.parse_lines('tests/env')
assert(envcheckr.parse_key(lines[0])) == 'FRUIT'
assert(envcheckr.parse_key(lines[1])) == 'DRINK'
assert(envcheckr.parse_key(lines[2])) | == 'ANIMAL'
def test_get_missing_keys():
file_a = 'tests/env'
file_b = 'tests/env.example'
missing_keys = envcheckr.get_missing_keys(file_a, file_b)
assert(len(missing_keys)) == 4
assert(missing_keys[0]) == 'FOOD=Pizza\n'
assert(missing_keys[1]) == 'CODE=Python\n'
assert(missing_keys[2]) == 'SPORT=Football\n'
assert(missing_keys[3]) == 'CITY=Brisbane\n'
|
pi-bot/.v2 | python-code/ultrasound.py | Python | gpl-3.0 | 680 | 0.017647 | import arduino
from arduino import Commands
from arduino im | port Arduino
from time import sleep
TRIGGER_PIN = arduino.A3
class Ultrasound():
def __init__(self):
self.board = Arduino()
self.board.connect()
def getDistance(self):
distance = int(self.board.sendCommand(Commands.READ_ULTRASOUND,TRIGGER_PIN,0))
# large distances are reported as 0, so change to max distance
if distance == 0:
distance = 100
# limit to 100
elif distance > 100:
distance = 100
return di | stance
if __name__ == '__main__':
ultra = Ultrasound()
while True:
print(ultra.getDistance())
sleep(1)
|
cathyyul/sumo-0.18 | tools/output/generateITetrisIntersectionMetrics.py | Python | gpl-3.0 | 12,974 | 0.00316 | #!/usr/bin/env python
"""
@file generateITetrisIntersectionMetrics.py
@author Daniel Krajzewicz
@author Lena Kalleske
@author Michael Behrisch
@date 2007-10-25
@version $Id: generateITetrisIntersectionMetrics.py 14425 2013-08-16 20:11:47Z behrisch $
SUMO, Simulation of Urban MObility; see http://sumo-sim.org/
Copyright (C) 2009-2013 DLR (http://www.dlr.de/) and contributors
All rights reserved
"""
from optparse import OptionParser
import os, sys
from numpy import mean
from xml.sax import parse, handler
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import sumolib.net
def getBasicStats(net, lanesInfo, T):
tlsInfo = {}
for tl in net._tlss:
tlID = tl._id
mQueueLen = []
# mWaitTime = []
nbStops = []
tWaitTime = []
seenLanes = set()
for conn in tl._connections:
lane = conn[0]
if lane in seenLanes:
continue
seenLanes.add(lane)
mQueueLenInfo = sum(lanesInfo[lane.getID()]['mQueueLen'])
mQueueLen.append(mQueueLenInfo)
# mWaitTimeInfo = mean(lanesInfo[lane.getID()]['mWaitTime'])
# mWaitTime.append(mWaitTimeInfo)
nbStopsInfo = sum(lanesInfo[lane.getID()] | ['nbStops'])
nbStops.append(nbStopsInfo)
tWaitTimeInfo = sum(lanesInfo[lane.getID()]['tWaitTime'])
tWaitTime.append(tWaitTimeInfo)
tlsInfo[tlID] = {}
tlsInfo[tlID]['mQueueLen'] = mean(mQueueLen) / T
tlsInfo[tlID]['mWaitTime'] = mean(tWa | itTime) / T
tlsInfo[tlID]['nbStops'] = sum(nbStops)
tlsInfo[tlID]['tWaitTime'] = sum(tWaitTime)
return tlsInfo
def mergeInfos(tlsInfoAll, tlsInfoOne, metric):
for tl in tlsInfoOne.keys():
tlsInfoAll[tl][metric] = tlsInfoOne[tl]
def getStatisticsOutput(tlsInfo, outputfile):
opfile = file(outputfile, 'w')
for tl in tlsInfo.keys():
opfile.write('Traffic Light %s\n' % tl)
opfile.write('=================\n')
opfile.write('mean queue length in front of the junction: %s\n' % tlsInfo[tl]['mQueueLen'])
opfile.write('mean waiting time in front of the junction: %s\n' % tlsInfo[tl]['mWaitTime'])
if 'noise' in tlsInfo[tl]:
opfile.write('mean noise emission: %s\n' % tlsInfo[tl]['noise'])
if 'CO' in tlsInfo[tl]:
opfile.write('mean CO emission: %s\n' % tlsInfo[tl]['CO'])
opfile.write('mean CO2 emission: %s\n' % tlsInfo[tl]['CO2'])
opfile.write('mean HC emission: %s\n' % tlsInfo[tl]['HC'])
opfile.write('mean PMx emission: %s\n' % tlsInfo[tl]['PMx'])
opfile.write('mean NOx emission: %s\n' % tlsInfo[tl]['NOx'])
opfile.write('mean fuel consumption: %s\n' % tlsInfo[tl]['fuel'])
opfile.write('number of stops: %s\n' % tlsInfo[tl]['nbStops'])
opfile.write('total waiting time at junction: %s\n\n' % tlsInfo[tl]['tWaitTime'])
def tlsIDToNodeID(net):
tlsID2NodeID = {}
for tls in net._tlss:
tlsID = tls._id
tlsID2NodeID[tlsID] = []
seenNodes = set()
for conn in tls._connections:
lane = conn[0]
edge = lane._edge
node = edge._to
nodeID = node._id
if nodeID not in seenNodes:
tlsID2NodeID[tlsID].append(nodeID)
seenNodes.add(nodeID)
return tlsID2NodeID
class E2OutputReader(handler.ContentHandler):
def __init__(self):
self._lanes = {}
self._maxT = 0
def startElement(self, name, attrs):
if name == 'interval':
detID = attrs['id']
laneID = detID[6:len(detID)]
if not self._lanes.has_key(laneID):
self._lanes[laneID] = {}
self._lanes[laneID]['mQueueLen'] = []
# self._lanes[laneID]['mWaitTime'] = []
self._lanes[laneID]['nbStops'] = []
self._lanes[laneID]['tWaitTime'] = []
if float(attrs['end']) < 100000000:
self._lanes[laneID]['mQueueLen'].append(float(attrs['jamLengthInMetersSum']))
# self._lanes[laneID]['mWaitTime'].append(float(attrs['meanHaltingDuration']))
self._lanes[laneID]['nbStops'].append(float(attrs['startedHalts']))
self._lanes[laneID]['tWaitTime'].append(float(attrs['haltingDurationSum']))
self._maxT = max(float(attrs['end']), self._maxT)
class HarmonoiseReader(handler.ContentHandler):
def __init__(self, net, tlsID2NodeID):
self._nodeIntervalNoise = {}
self._maxT = 0
self._net = net
self._tlsNoise = {}
self._tlsID2NodeID = tlsID2NodeID
def startElement(self, name, attrs):
if name == 'interval':
self._maxT = max(float(attrs['end']), self._maxT)
if name == 'edge':
edgeID = attrs['id']
noiseStr = attrs['noise']
if len(noiseStr) < 10:
noise = float(noiseStr)
else:
noise = 0
if edgeID[0]==':':
nodeID = edgeID[1:edgeID.find('_')]
if nodeID not in self._nodeIntervalNoise:
self._nodeIntervalNoise[nodeID] = []
self._nodeIntervalNoise[nodeID].append(noise)
else:
fromNodeID = net.getEdge(edgeID)._from._id
if fromNodeID not in self._nodeIntervalNoise:
self._nodeIntervalNoise[fromNodeID] = []
self._nodeIntervalNoise[fromNodeID].append(noise)
toNodeID = net.getEdge(edgeID)._to._id
if toNodeID not in self._nodeIntervalNoise:
self._nodeIntervalNoise[toNodeID] = []
self._nodeIntervalNoise[toNodeID].append(noise)
def endElement(self, name):
if name == 'interval':
self.sumIntervalNoise()
if name == 'netstats':
self.sumNoise()
def sumIntervalNoise(self):
for tls in net._tlss:
sum = 0
tlsID = tls._id
if tlsID not in self._tlsNoise:
self._tlsNoise[tlsID] = []
for nodeID in self._tlsID2NodeID[tlsID]:
for noise in self._nodeIntervalNoise[nodeID]:
sum = sum + pow(10, noise/10)
self._tlsNoise[tlsID].append(10 * log(sum)/log(10))
def sumNoise(self):
for tls in net._tlss:
tlsID = tls._id
self._tlsNoise[tlsID] = sum(self._tlsNoise[tlsID]) / self._maxT
class HBEFAReader(handler.ContentHandler):
def __init__(self, net, tlsID2NodeID):
self._maxT = 0
self._net = net
self._nodeIntervalCO = {}
self._nodeIntervalCO2 = {}
self._nodeIntervalHC = {}
self._nodeIntervalPMx = {}
self._nodeIntervalNOx = {}
self._nodeIntervalfuel = {}
self._tlsCO = {}
self._tlsCO2 = {}
self._tlsHC = {}
self._tlsPMx = {}
self._tlsNOx = {}
self._tlsfuel = {}
self._tlsID2NodeID = tlsID2NodeID
def startElement(self, name, attrs):
if name == 'interval':
self._maxT = max(float(attrs['end']), self._maxT)
if name == 'edge':
edgeID = attrs['id']
CO = float(attrs['CO_perVeh'])
CO2 = float(attrs['CO2_perVeh'])
HC = float(attrs['HC_perVeh'])
PMx = float(attrs['PMx_perVeh'])
NOx = float(attrs['NOx_perVeh'])
fuel = float(attrs['fuel_perVeh'])
if edgeID[0]==':':
nodeIDs = edgeID[1:edgeID.find('_')]
else:
fromNodeID = net.getEdge(edgeID)._from._id
toNodeID = net.getEdge(edgeID)._to._id
nodeIDs = [fromNodeID, toNodeID]
for nodeID in nodeIDs:
if nodeID not in self._nodeIntervalCO:
self._nodeIntervalCO[nodeID] = []
self._nodeIntervalCO2[nodeID] = []
self._nodeIntervalHC[nodeID] = [ |
sonicrules1234/sonicbot | oldplugins/randfact.py | Python | bsd-3-clause | 378 | 0.007937 | import re, urllib2
arguments = ["self", "info", "args"]
helpstring = "randfact"
minlevel = 1
def main(connection, info, args) :
"""Returns a random fact"""
source = urllib2.urlopen("http://randomfunfacts.com/").read()
| fact = re.search(r"<strong><i>(.*)</i></strong>", source)
connection.msg(info["channel"], "%s: %s" % (info["sender | "], fact.group(1)))
|
grondo/flux-core | src/bindings/python/flux/resource/__init__.py | Python | lgpl-3.0 | 469 | 0 | ###############################################################
# Copyright 2020 Lawrence Livermore National Security, LLC
# (c | .f. AUTHORS, NOTICE.LLNS, COPYING)
#
# This file is part of the Flux resource manager framework.
# For details, see https://github.com/flux-framework.
#
# SPDX-License-Identifier: LGPL-3.0
###############################################################
from flux.resource.Rlist import Rl | ist
from flux.resource.ResourceSet import ResourceSet
|
helmuthb/devfest-at-site | handlers/api.py | Python | mit | 1,264 | 0.037975 | from google.appengine.ext import ndb
import settings
from core import model
import common
from webapp2_extras.i18n import gettext as _
class RPCHandler(common.BaseAPIHandler):
def get(self, action, *args):
args = self.request.GET
for arg in args:
args[arg] = self.request.get(arg)
if not action in settings.APIS:
self.prep_json_response(400, key = 'unsupported')
else:
getattr(self, action)(args)
def signup_mailing_list(self, args):
if 'email' in args:
if not model.EmailAddr.query().filter(model.EmailAddr.email == args['email']).get():
model.EmailAddr(email = args['email']).put()
self.prep_json_response(200, message = _("Thanks for signing up!"))
else:
self.prep_json_response(400, key = "noemail")
@common.with_login
def change_email_addr(self, args):
if 'email' in args:
self.current_user.em | ail = args | ['email']
self.current_user.put()
self.prep_json_response(200, message = _("Email updated!"))
else:
self.prep_json_response(400, key = "noemail")
class RESTHandler(common.BaseRESTHandler):
def get(self, *args, **kwargs):
pass
|
andresailer/DIRAC | ResourceStatusSystem/scripts/dirac-rss-list-status.py | Python | gpl-3.0 | 5,951 | 0.043018 | #!/usr/bin/env python
"""
dirac-rss-list-status
Script that dumps the DB information for the elements into the standard output.
If returns information concerning the StatusType and Status attributes.
Usage:
dirac-rss-list-status
--element= Element family to be Synchronized ( Site, Resource or Node )
--elementType= ElementType narrows the search | ; None if default
--name= ElementName; None if default
--tokenOwner= Owner of the token; None if default
--statusType= StatusType; None if default
--status= Status; None if default
Verbosity:
-o LogLevel=LEVEL NOTICE by default, levels available: INFO, DEBUG, VERBOSE..
"""
from DIRAC import gLogger, exit as DIRACExit | , version
from DIRAC.Core.Base import Script
from DIRAC.ResourceStatusSystem.Client import ResourceStatusClient
from DIRAC.Core.Utilities.PrettyPrint import printTable
__RCSID__ = '$Id:$'
subLogger = None
switchDict = {}
def registerSwitches():
'''
Registers all switches that can be used while calling the script from the
command line interface.
'''
switches = (
( 'element=', 'Element family to be Synchronized ( Site, Resource or Node )' ),
( 'elementType=', 'ElementType narrows the search; None if default' ),
( 'name=', 'ElementName; None if default' ),
( 'tokenOwner=', 'Owner of the token; None if default' ),
( 'statusType=', 'StatusType; None if default' ),
( 'status=', 'Status; None if default' ),
)
for switch in switches:
Script.registerSwitch( '', switch[ 0 ], switch[ 1 ] )
def registerUsageMessage():
'''
Takes the script __doc__ and adds the DIRAC version to it
'''
hLine = ' ' + '='*78 + '\n'
usageMessage = hLine
usageMessage += ' DIRAC %s\n' % version
usageMessage += __doc__
usageMessage += '\n' + hLine
Script.setUsageMessage( usageMessage )
def parseSwitches():
'''
Parses the arguments passed by the user
'''
Script.parseCommandLine( ignoreErrors = True )
args = Script.getPositionalArgs()
if args:
subLogger.error( "Found the following positional args '%s', but we only accept switches" % args )
subLogger.error( "Please, check documentation below" )
Script.showHelp()
DIRACExit( 1 )
switches = dict( Script.getUnprocessedSwitches() )
# Default values
switches.setdefault( 'elementType', None )
switches.setdefault( 'name', None )
switches.setdefault( 'tokenOwner', None )
switches.setdefault( 'statusType', None )
switches.setdefault( 'status', None )
if 'element' not in switches:
subLogger.error( "element Switch missing" )
subLogger.error( "Please, check documentation below" )
Script.showHelp()
DIRACExit( 1 )
if not switches[ 'element' ] in ( 'Site', 'Resource', 'Node' ):
subLogger.error( "Found %s as element switch" % switches[ 'element' ] )
subLogger.error( "Please, check documentation below" )
Script.showHelp()
DIRACExit( 1 )
subLogger.debug( "The switches used are:" )
map( subLogger.debug, switches.iteritems() )
return switches
#...............................................................................
def getElements():
'''
Given the switches, gets a list of elements with their respective statustype
and status attributes.
'''
rssClient = ResourceStatusClient.ResourceStatusClient()
meta = { 'columns' : [] }
for key in ( 'Name', 'StatusType', 'Status', 'ElementType', 'TokenOwner' ):
#Transforms from upper lower case to lower upper case
if switchDict[ key[0].lower() + key[1:] ] is None:
meta[ 'columns' ].append( key )
elements = rssClient.selectStatusElement(
switchDict[ 'element' ], 'Status',
name = switchDict[ 'name' ].split(',') if switchDict['name'] else None,
statusType = switchDict[ 'statusType' ].split(',') if switchDict['statusType'] else None,
status = switchDict[ 'status' ].split(',') if switchDict['status'] else None,
elementType = switchDict[ 'elementType' ].split(',') if switchDict['elementType'] else None,
tokenOwner = switchDict[ 'tokenOwner' ].split(',') if switchDict['tokenOwner'] else None,
meta = meta )
return elements
def tabularPrint( elementsList ):
'''
Prints the list of elements on a tabular
'''
subLogger.notice( '' )
subLogger.notice( 'Selection parameters:' )
subLogger.notice( ' %s: %s' % ( 'element'.ljust( 15 ), switchDict[ 'element' ] ) )
titles = []
for key in ( 'Name', 'StatusType', 'Status', 'ElementType', 'TokenOwner' ):
#Transforms from upper lower case to lower upper case
keyT = key[0].lower() + key[1:]
if switchDict[ keyT ] is None:
titles.append( key )
else:
subLogger.notice( ' %s: %s' % ( key.ljust( 15 ), switchDict[ keyT ] ) )
subLogger.notice( '' )
subLogger.notice( printTable( titles, elementsList, printOut = False,
numbering = False, columnSeparator = ' | ' ) )
#...............................................................................
def run():
'''
Main function of the script
'''
elements = getElements()
if not elements[ 'OK' ]:
subLogger.error( elements )
DIRACExit( 1 )
elements = elements[ 'Value' ]
tabularPrint( elements )
#...............................................................................
if __name__ == "__main__":
subLogger = gLogger.getSubLogger( __file__ )
#Script initialization
registerSwitches()
registerUsageMessage()
switchDict = parseSwitches()
#Run script
run()
#Bye
DIRACExit( 0 )
################################################################################
#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF
|
sippy/b2bua | sippy/UaStateGeneric.py | Python | bsd-2-clause | 1,909 | 0.003667 | # Copyright (c) 2003-2005 Maxim Sobolev. All rights reserved.
# Copyright (c) 2006-2014 Sippy Software, Inc. All rights reserved.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce t | he above c | opyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
class UaStateGeneric(object):
sname = 'Generic'
ua = None
connected = False
dead = False
def __init__(self, ua):
self.ua = ua
def recvRequest(self, req):
return None
def recvResponse(self, resp, tr):
return None
def recvEvent(self, event):
return None
def cancel(self, rtime, req):
return None
def onStateChange(self, newstate):
pass
def __str__(self):
return self.sname
|
pfalcon/micropython | examples/rp2/pio_uart_tx.py | Python | mit | 1,250 | 0.0056 | # Example using PIO to create a UART TX interface
from machine import Pin
from rp2 import PIO, StateMachine, asm_pio
UART_BAUD = 115200
PIN_BASE = 10
NUM_UARTS = 8
@asm | _pio(sideset_init=PIO.OUT_HIGH, out_init=PIO.OUT_HIGH, out_shiftdir=PIO.SHIFT_RIGHT)
def uart_tx():
# fmt: off
# Block with TX deasserted until data available
pull( | )
# Initialise bit counter, assert start bit for 8 cycles
set(x, 7) .side(0) [7]
# Shift out 8 data bits, 8 execution cycles per bit
label("bitloop")
out(pins, 1) [6]
jmp(x_dec, "bitloop")
# Assert stop bit for 8 cycles total (incl 1 for pull())
nop() .side(1) [6]
# fmt: on
# Now we add 8 UART TXs, on pins 10 to 17. Use the same baud rate for all of them.
uarts = []
for i in range(NUM_UARTS):
sm = StateMachine(
i, uart_tx, freq=8 * UART_BAUD, sideset_base=Pin(PIN_BASE + i), out_base=Pin(PIN_BASE + i)
)
sm.active(1)
uarts.append(sm)
# We can print characters from each UART by pushing them to the TX FIFO
def pio_uart_print(sm, s):
for c in s:
sm.put(ord(c))
# Print a different message from each UART
for i, u in enumerate(uarts):
pio_uart_print(u, "Hello from UART {}!\n".format(i))
|
pcu4dros/pandora-core | workspace/lib/python3.5/site-packages/setuptools/command/easy_install.py | Python | mit | 87,125 | 0.000161 | #!/usr/bin/env python
"""
Easy Install
------------
A tool for doing automatic download/extract/build of distutils-based Python
packages. For detailed documentation, see the accompanying EasyInstall.txt
file, or visit the `EasyInstall home page`__.
__ https://setuptools.readthedocs.io/en/latest/easy_in | stall.html
"""
from glob import glob
from distutils.util import get_platform
from distutils.util import convert_path, subst_vars
from distutils.errors import (
DistutilsArgError, DistutilsOptionError,
DistutilsError, DistutilsPlatformError,
)
from distutils.command.install import INSTALL_SCHEMES, SCHEME_KEYS
from distutils import log, dir_util
from distutils.command.build_scripts import first_line_re
from distutils.spawn import find_executable
import sys
import os
import zipimport
import s | hutil
import tempfile
import zipfile
import re
import stat
import random
import textwrap
import warnings
import site
import struct
import contextlib
import subprocess
import shlex
import io
from setuptools.extern import six
from setuptools.extern.six.moves import configparser, map
from setuptools import Command
from setuptools.sandbox import run_setup
from setuptools.py31compat import get_path, get_config_vars
from setuptools.py27compat import rmtree_safe
from setuptools.command import setopt
from setuptools.archive_util import unpack_archive
from setuptools.package_index import (
PackageIndex, parse_requirement_arg, URL_SCHEME,
)
from setuptools.command import bdist_egg, egg_info
from setuptools.wheel import Wheel
from pkg_resources import (
yield_lines, normalize_path, resource_string, ensure_directory,
get_distribution, find_distributions, Environment, Requirement,
Distribution, PathMetadata, EggMetadata, WorkingSet, DistributionNotFound,
VersionConflict, DEVELOP_DIST,
)
import pkg_resources.py31compat
# Turn on PEP440Warnings
warnings.filterwarnings("default", category=pkg_resources.PEP440Warning)
__all__ = [
'samefile', 'easy_install', 'PthDistributions', 'extract_wininst_cfg',
'main', 'get_exe_prefixes',
]
def is_64bit():
return struct.calcsize("P") == 8
def samefile(p1, p2):
"""
Determine if two paths reference the same file.
Augments os.path.samefile to work on Windows and
suppresses errors if the path doesn't exist.
"""
both_exist = os.path.exists(p1) and os.path.exists(p2)
use_samefile = hasattr(os.path, 'samefile') and both_exist
if use_samefile:
return os.path.samefile(p1, p2)
norm_p1 = os.path.normpath(os.path.normcase(p1))
norm_p2 = os.path.normpath(os.path.normcase(p2))
return norm_p1 == norm_p2
if six.PY2:
def _to_ascii(s):
return s
def isascii(s):
try:
six.text_type(s, 'ascii')
return True
except UnicodeError:
return False
else:
def _to_ascii(s):
return s.encode('ascii')
def isascii(s):
try:
s.encode('ascii')
return True
except UnicodeError:
return False
_one_liner = lambda text: textwrap.dedent(text).strip().replace('\n', '; ')
class easy_install(Command):
"""Manage a download/build/install process"""
description = "Find/get/install Python packages"
command_consumes_arguments = True
user_options = [
('prefix=', None, "installation prefix"),
("zip-ok", "z", "install package as a zipfile"),
("multi-version", "m", "make apps have to require() a version"),
("upgrade", "U", "force upgrade (searches PyPI for latest versions)"),
("install-dir=", "d", "install package to DIR"),
("script-dir=", "s", "install scripts to DIR"),
("exclude-scripts", "x", "Don't install scripts"),
("always-copy", "a", "Copy all needed packages to install dir"),
("index-url=", "i", "base URL of Python Package Index"),
("find-links=", "f", "additional URL(s) to search for packages"),
("build-directory=", "b",
"download/extract/build in DIR; keep the results"),
('optimize=', 'O',
"also compile with optimization: -O1 for \"python -O\", "
"-O2 for \"python -OO\", and -O0 to disable [default: -O0]"),
('record=', None,
"filename in which to record list of installed files"),
('always-unzip', 'Z', "don't install as a zipfile, no matter what"),
('site-dirs=', 'S', "list of directories where .pth files work"),
('editable', 'e', "Install specified packages in editable form"),
('no-deps', 'N', "don't install dependencies"),
('allow-hosts=', 'H', "pattern(s) that hostnames must match"),
('local-snapshots-ok', 'l',
"allow building eggs from local checkouts"),
('version', None, "print version information and exit"),
('no-find-links', None,
"Don't load find-links defined in packages being installed")
]
boolean_options = [
'zip-ok', 'multi-version', 'exclude-scripts', 'upgrade', 'always-copy',
'editable',
'no-deps', 'local-snapshots-ok', 'version'
]
if site.ENABLE_USER_SITE:
help_msg = "install in user site-package '%s'" % site.USER_SITE
user_options.append(('user', None, help_msg))
boolean_options.append('user')
negative_opt = {'always-unzip': 'zip-ok'}
create_index = PackageIndex
def initialize_options(self):
# the --user option seems to be an opt-in one,
# so the default should be False.
self.user = 0
self.zip_ok = self.local_snapshots_ok = None
self.install_dir = self.script_dir = self.exclude_scripts = None
self.index_url = None
self.find_links = None
self.build_directory = None
self.args = None
self.optimize = self.record = None
self.upgrade = self.always_copy = self.multi_version = None
self.editable = self.no_deps = self.allow_hosts = None
self.root = self.prefix = self.no_report = None
self.version = None
self.install_purelib = None # for pure module distributions
self.install_platlib = None # non-pure (dists w/ extensions)
self.install_headers = None # for C/C++ headers
self.install_lib = None # set to either purelib or platlib
self.install_scripts = None
self.install_data = None
self.install_base = None
self.install_platbase = None
if site.ENABLE_USER_SITE:
self.install_userbase = site.USER_BASE
self.install_usersite = site.USER_SITE
else:
self.install_userbase = None
self.install_usersite = None
self.no_find_links = None
# Options not specifiable via command line
self.package_index = None
self.pth_file = self.always_copy_from = None
self.site_dirs = None
self.installed_projects = {}
self.sitepy_installed = False
# Always read easy_install options, even if we are subclassed, or have
# an independent instance created. This ensures that defaults will
# always come from the standard configuration file(s)' "easy_install"
# section, even if this is a "develop" or "install" command, or some
# other embedding.
self._dry_run = None
self.verbose = self.distribution.verbose
self.distribution._set_command_options(
self, self.distribution.get_option_dict('easy_install')
)
def delete_blockers(self, blockers):
extant_blockers = (
filename for filename in blockers
if os.path.exists(filename) or os.path.islink(filename)
)
list(map(self._delete_path, extant_blockers))
def _delete_path(self, path):
log.info("Deleting %s", path)
if self.dry_run:
return
is_tree = os.path.isdir(path) and not os.path.islink(path)
remover = rmtree if is_tree else os.unlink
remover(path)
@staticmethod
def _render_version():
"""
Render the Setuptools version and installation details, then exit.
"""
ver = sys.version[: |
gmatteo/pymatgen | pymatgen/optimization/linear_assignment_numpy.py | Python | mit | 8,207 | 0.000731 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module contains an algorithm to solve the Linear Assignment Problem.
It has the same functionality as linear_assignment.pyx, but is much slower
as it is vectorized in numpy rather than cython
"""
__author__ = "Will Richards"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "1.0"
__maintainer__ = "Will Richards"
__email__ = "wrichards@mit.edu"
__date__ = "Jan 28, 2013"
import numpy as np
class LinearAssignment:
"""
This class finds the solution to the Linear Assignment Problem.
It finds a minimum cost matching between two sets, given a cost
matrix.
This class is an implementation of the LAPJV algorithm described in:
R. Jonker, A. Volgenant. A Shortest Augmenting Path Algorithm for
Dense and Sparse Linear Assignment Problems. Computing 38, 325-340
(1987)
.. attribute: min_cost:
The minimum cost of the matching
.. attribute: solution:
The matching of the rows to columns. i.e solution = [1, 2, 0]
would match row 0 to column 1, row 1 to column 2 and row 2
to column 0. Total cost would be c[0, 1] + c[1, 2] + c[2, 0]
"""
def __init__(self, costs, epsilon=1e-6):
"""
Args:
costs: The cost matrix of the problem. cost[i,j] should be the
cost of matching x[i] to y[j]. The cost matrix may be
rectangular
epsilon: Tolerance for determining if solution vector is < 0
"""
self.orig_c = np.array(costs, dtype=np.float64)
self.nx, self.ny = self.orig_c.shape
self.n = self.ny
self._inds = np.arange(self.n)
self.epsilon = abs(epsilon)
# check that cost matrix is square
if self.nx > self.ny:
raise ValueError("cost matrix must have at least as many columns as rows")
if self.nx == self.ny:
self.c = self.orig_c
else:
# Can run into precision issues if np.max is used as the fill value (since a
# value of this size doesn't necessarily end up in the solution). A value
# at least as large as the maximin is, however, guaranteed to appear so it
# is a safer choice. The fill value is not zero to avoid choosing the extra
# rows in the initial column reduction step
self.c = np.full((self.n, self.n), np.max(np.min(self.orig_c, axis=1)))
self.c[: self.nx] = self.orig_c
# initialize solution vectors
self._x = np.zeros(self.n, dtype=np.int_) - 1
self._y = self._x.copy()
# if column reduction doesn't find a solution, augment with shortest
# paths until one is found
if self._column_reduction():
self._augmenting_row_reduction()
# initialize the reduced costs
self._update_cred()
while -1 in self._x:
self._augment()
self.solution = self._x[: self.nx]
self._min_cost = None
@property
def min_cost(self):
"""
Returns the cost of the best assignment
"""
if self._min_cost:
return self._min_cost
self._min_cost = np.sum(self.c[np.arange(self.nx), self.solution])
return self._min_cost
def _column_reduction(self):
"""
Column reduction and reduction transfer steps from LAPJV algorithm
"""
# assign each column to its lowest cost row, ensuring that only row
# or column is assigned once
i1, j = np.unique(np.argmin(self.c, axis=0), return_index=True)
self._x[i1] = j
# if problem is solved, return
if len(i1) == self.n:
return False
self._y[j] = i1
# reduction_transfer
# tempc is array with previously assigned matchings masked
self._v = np.min(self.c, axis=0)
tempc = self.c.copy()
tempc[i1, j] = np.inf
mu = np.min(tempc[i1, :] - self._v[None, :], axis=1)
self._v[j] -= mu
return True
def _augmenting_row_reduction(self):
"""
Augmenting row reduction step from LAPJV algorithm
"""
unassigned = np.where(self._x == -1)[0]
for i in unassigned:
for _ in range(self.c.size):
# Time in this loop can be proportional to 1/epsilon
# This step is not strictly necessary, so cutoff early
# to avoid near-infinite loops
# find smallest 2 values and indices
temp = self.c[i] - self._v
j1 = np.argmin(tem | p)
u1 = temp[j1]
temp[j1] = np.inf
j2 = np.argmin(temp)
u2 = temp[j2]
if u1 < u2:
self._v | [j1] -= u2 - u1
elif self._y[j1] != -1:
j1 = j2
k = self._y[j1]
if k != -1:
self._x[k] = -1
self._x[i] = j1
self._y[j1] = i
i = k
if k == -1 or abs(u1 - u2) < self.epsilon:
break
def _update_cred(self):
"""
Updates the reduced costs with the values from the
dual solution
"""
ui = self.c[self._inds, self._x] - self._v[self._x]
self.cred = self.c - ui[:, None] - self._v[None, :]
def _augment(self):
"""
Finds a minimum cost path and adds it to the matching
"""
# build a minimum cost tree
_pred, _ready, istar, j, mu = self._build_tree()
# update prices
self._v[_ready] += self._d[_ready] - mu
# augment the solution with the minimum cost path from the
# tree. Follows an alternating path along matched, unmatched
# edges from X to Y
while True:
i = _pred[j]
self._y[j] = i
k = j
j = self._x[i]
self._x[i] = k
if i == istar:
break
self._update_cred()
def _build_tree(self):
"""
Builds the tree finding an augmenting path. Alternates along
matched and unmatched edges between X and Y. The paths are
stored in _pred (new predecessor of nodes in Y), and
self._x and self._y
"""
# find unassigned i*
istar = np.argmin(self._x)
# compute distances
self._d = self.c[istar] - self._v
_pred = np.zeros(self.n, dtype=np.int_) + istar
# initialize sets
# READY: set of nodes visited and in the path (whose price gets
# updated in augment)
# SCAN: set of nodes at the bottom of the tree, which we need to
# look at
# T0DO: unvisited nodes
_ready = np.zeros(self.n, dtype=np.bool)
_scan = np.zeros(self.n, dtype=np.bool)
_todo = np.zeros(self.n, dtype=np.bool) + True
while True:
# populate scan with minimum reduced distances
if True not in _scan:
mu = np.min(self._d[_todo])
_scan[self._d == mu] = True
_todo[_scan] = False
j = np.argmin(self._y * _scan)
if self._y[j] == -1 and _scan[j]:
return _pred, _ready, istar, j, mu
# pick jstar from scan (scan always has at least 1)
_jstar = np.argmax(_scan)
# pick i associated with jstar
i = self._y[_jstar]
_scan[_jstar] = False
_ready[_jstar] = True
# find shorter distances
newdists = mu + self.cred[i, :]
shorter = np.logical_and(newdists < self._d, _todo)
# update distances
self._d[shorter] = newdists[shorter]
# update predecessors
_pred[shorter] = i
for j in np.nonzero(np.logical_and(self._d == mu, _todo))[0]:
if self._y[j] == -1:
return _pred, _ready, istar, j, mu
_scan[j] = True
|
adampresley/trackathon | model/StringHelper.py | Python | mit | 668 | 0.031437 | import re, random, string
from Service import Service
class StringHelper(Service):
articles = ["a", "an", "the", "of", "is"]
def randomLetterString(self, numCharacters = 8):
return "".join(random.choice(string.ascii | _letters) for i in range(numCharacters))
def tagsToTuple(self, tags):
return tuple(self.titleCase(tag) for tag in tags.split(",") if tag.strip())
def titleCase(self, s):
wordList = s.split(" ")
resu | lt = [wordList[0].capitalize()]
for word in wordList[1:]:
result.append(word in self.articles and word or word.capitalize())
return " ".join(result)
def validEmail(self, email):
return re.match(r"[^@]+@[^@]+\.[^@]+", email)
|
Ingenico-ePayments/connect-sdk-python3 | ingenico/connect/sdk/domain/payment/definitions/refund_output.py | Python | mit | 9,736 | 0.004827 | # -*- coding: utf-8 -*-
#
# This class was auto-generated from the API references found at
# https://epayments-api.developer-ingenico.com/s2sapi/v1/
#
from ingenico.connect.sdk.domain.payment.definitions.order_output import OrderOutput
from ingenico.connect.sdk.domain.payment.definitions.refund_bank_method_specific_output import RefundBankMethodSpecificOutput
from ingenico.connect.sdk.domain.payment.definitions.refund_card_method_specific_output import RefundCardMethodSpecificOutput
from ingenico.connect.sdk.domain.payment.definitions.refund_cash_method_specific_output import RefundCashMethodSpecificOutput
from ingenico.connect.sdk.domain.payment.definitions.refund_e_invoice_method_specific_output import RefundEInvoiceMethodSpecificOutput
from ingenico.connect.sdk.domain.payment.definitions.refund_e_wallet_method_specific_output import RefundEWalletMethodSpecificOutput
from ingenico.connect.sdk.domain.payment.definitions.refund_mobile_method_specific_output import RefundMobileMethodSpecificOutput
class RefundOutput(OrderOutput):
__amount_paid = None
__bank_refund_method_specific_output = None
__card_refund_method_specific_output = None
__cash_refund_method_specific_output = None
__e_invoice_refund_method_specific_output = None
__e_wallet_refund_method_specific_output = None
__mobile_refund_method_specific_output = None
__payment_method = None
@property
def amount_paid(self):
"""
| Amount paid
Type: int
"""
return self.__amount_paid
@amount_paid.setter
def amount_paid(self, value):
self.__amount_paid = value
@property
def bank_refund_method_specific_output(self):
"""
| Object containing specific bank refund details
Type: :class:`ingenico.connect.sdk.domain.payment.definitions.refund_bank_method_specific_output.RefundBankMethodSpecificOutput`
"""
return self.__bank_refund_method_specific_output
@bank_refund_method_specific_output.setter
def bank_refund_method_specific_output(self, value):
self.__bank_refund_method_specific_output = value
@property
def card_refund_method_specific_output(self):
"""
| Object containing specific card refund details
Type: :class:`ingenico.connect.sdk.domain.payment.definitions.refund_card_method_specific_output.RefundCardMethodSpecificOutput`
"""
return self.__card_refund_method_specific_output
@card_refund_method_specific_output.setter
def card_refund_method_specific_output(self, value):
self.__card_refund_method_specific_output = value
@property
def cash_refund_method_specific_output(self):
"""
| Object containing specific cash refund details
Type: :class:`ingenico.connect.sdk.domain.payment.definitions.refund_cash_method_specific_output.RefundCashMethodSpecificOutput`
"""
return self.__cash_refund_method_specific_output
@cash_refund_method_specific_output.setter
def cash_refund_method_specific_output(self, value):
self.__cash_refund_method_specific_output = value
@property
def e_invoice_refund_method_specific_output(self):
"""
| Object containing specific e-invoice refund details
Type: :class:`ingenico.connect.sdk.domain.payment.definitions.refund_e_invoice_method_specific_output.RefundEInvoiceMethodSpecificOutput`
"""
return self.__e_invoice_refund_method_specific_output
@e_invoice_refund_method_specific_output.setter
def e_invoice_refund_method_specific_output(self, value):
self.__e_invoice_refund_method_specific_output = value
@property
def e_wallet_refund_method_specific_output(self):
"""
| Object containing specific eWallet refund details
Type: :class:`ingenico.connect.sdk.domain.payment.definitions.refund_e_wallet_method_specific_output.RefundEWalletMethodSpecificOutput`
"""
return self.__e_wallet_refund_method_specific_output
@e_wallet_refund_method_specific_output.setter
def e_wallet_refund_method_specific_output(self, value):
self.__e_wallet_refund_method_specific_output = value
@property
def mobile_refund_method_specific_output(self):
"""
| Object containing specific mobile refund details
Type: :class:`ingenico.connect.sdk.domain.payment.definitions.refund_mobile_method_specific_output.RefundMobileMethodSpecificOutput`
"""
return self.__mobile_refund_method_specific_output
@mobile_refund_method_specific_output.setter
def mobile_refund_method_specific_output(self, value):
self.__mobile_refund_method_specific_output = value
@property
def payment_method(self):
"""
| Payment method identifier used by the our payment engine with the following possible values:
* bankRefund
* bankTransfer
* card
* cash
* directDebit
* eInvoice
* invoice
* redirect
Type: str
"""
return self.__payment_method
@payment_method.setter
def payment_method(self, value):
self.__payment_method = value
def to_dictionary(self):
dictionary = super(RefundOutput, self).to_dictionary()
if self.amount_paid is not None:
dictionary['amountPaid'] = self.amount_paid
if self.bank_refund_method_specific_output is not None:
dictionary['bankRefundMethodSpecificOutput'] = self.bank_refund_method_specific_output.to_dictionary()
if self.card_refund_method_specific_output is not None:
dictionary['cardRefundMethodSpecificOutput'] = self.card_refund_method_specific_output.to_dictionary()
if self.cash_refund_method_specific_output is not None:
dictionary['cashRefundMethodSpecificOutput'] = self.cash_refund_method_specific_output.to_dictionary()
if self.e_invoice_refund_method_specific_output is not None:
dictionary['eInvoiceRefundMethodSpecificOutput'] = self.e_invoice_refund_method_specific_output.to_dictionary()
if self.e_wallet_refund_method_specific_output is not None:
dictionary['eWalletRefundMethodSpecificOutput'] = self.e_wallet_refund_method_specific_output.to_dictionary()
if self.mobile_refund_method_specific_output is not None:
dictionary['mobileRefundMethodSpecificOutput'] = self.mobile_refund_method_specific_output.to_dictionary()
if self.payment_method is not None:
dictionary['paymentMethod'] = self.payment_method
return dictionary |
def from_dictionary(self, dictionary):
super(RefundOutput, self).from_dictionary(dictionary)
if 'amountPaid' in dictionary:
self.amount_paid = dictionary['amountPaid']
if 'bankRefundMethodSpecificOutput' in dictionary:
if not isinstance(dictionary['bankRefundMethodSpecificOutput'], dict):
raise TypeError('value \'{}\' is not a dictionary'.format(dictionary['bankRefundMethodSpecificOutput']))
| value = RefundBankMethodSpecificOutput()
self.bank_refund_method_specific_output = value.from_dictionary(dictionary['bankRefundMethodSpecificOutput'])
if 'cardRefundMethodSpecificOutput' in dictionary:
if not isinstance(dictionary['cardRefundMethodSpecificOutput'], dict):
raise TypeError('value \'{}\' is not a dictionary'.format(dictionary['cardRefundMethodSpecificOutput']))
value = RefundCardMethodSpecificOutput()
self.card_refund_method_specific_output = value.from_dictionary(dictionary['cardRefundMethodSpecificOutput'])
if 'cashRefundMethodSpecificOutput' in dictionary:
if not isinstance(dictionary['cashRefundMethodSpecificOutput'], dict):
raise TypeError('value \'{}\' is not a dictionary'.format(dictionary['cashRefundMethodSpecificOutput']))
value = RefundCashMethodSpecificOutput()
self.cash_refund_method_specific_output = value. |
moreati/pyscard | smartcard/Examples/scard-api/sample_control.py | Python | lgpl-2.1 | 4,389 | 0.000911 | #! /usr/bin/env python
"""
Sample for python PCSC wrapper module: send a Control Code to a card or
reader
__author__ = "Ludovic Rousseau"
Copyright 2007-2010 Ludovic Rousseau
Author: Ludovic Rousseau, mailto:ludovic.rousseau@free.fr
This file is part of pyscard.
pyscard is free software; you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation; either version 2.1 of the License, or
(at your option) any later version.
pyscard is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with pyscard; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
from __future__ import print_function
from smartcard.scard import *
from smartcard.util import toBytes
try:
hresult, hcontext = SCardEstablishContext(SCARD_SCOPE_USER)
if hresult != SCARD_S_SUCCESS:
raise error(
'Failed to establish context: ' + SCardGetErrorMessage(hresult))
print('Context established!')
try:
hresult, readers = SCardListReaders(hcontext, [])
if hresult != SCARD_S_SUCCESS:
raise e | rror(
'Failed to list readers: ' + SCardGetErrorMessage(hresult))
print('PCSC Readers:', readers)
if len(readers) < 1:
raise error('No | smart card readers')
for zreader in readers:
print('Trying to Control reader:', zreader)
try:
hresult, hcard, dwActiveProtocol = SCardConnect(
hcontext, zreader, SCARD_SHARE_DIRECT, SCARD_PROTOCOL_T0)
if hresult != SCARD_S_SUCCESS:
raise error(
'Unable to connect: ' + SCardGetErrorMessage(hresult))
print('Connected with active protocol', dwActiveProtocol)
try:
if 'winscard' == resourceManager:
# IOCTL_SMARTCARD_GET_ATTRIBUTE = SCARD_CTL_CODE(2)
hresult, response = SCardControl(
hcard,
SCARD_CTL_CODE(2),
toBytes("%.8lx" % SCARD_ATTR_VENDOR_NAME))
if hresult != SCARD_S_SUCCESS:
raise error(
'SCardControl failed: ' +\
SCardGetErrorMessage(hresult))
r = ""
for i in range(len(response)):
r += "%c" % response[i]
print('SCARD_ATTR_VENDOR_NAME:', r)
elif 'pcsclite' == resourceManager:
# get firmware on Gemplus readers
hresult, response = SCardControl(
hcard,
SCARD_CTL_CODE(1),
[0x02])
if hresult != SCARD_S_SUCCESS:
raise error(
'SCardControl failed: ' + \
SCardGetErrorMessage(hresult))
r = ""
for i in range(len(response)):
r += "%c" % response[i]
print('Control:', r)
finally:
hresult = SCardDisconnect(hcard, SCARD_UNPOWER_CARD)
if hresult != SCARD_S_SUCCESS:
raise error(
'Failed to disconnect: ' + \
SCardGetErrorMessage(hresult))
print('Disconnected')
except error as message:
print(error, message)
finally:
hresult = SCardReleaseContext(hcontext)
if hresult != SCARD_S_SUCCESS:
raise error(
'Failed to release context: ' + \
SCardGetErrorMessage(hresult))
print('Released context.')
except error as e:
print(e)
import sys
if 'win32' == sys.platform:
print('press Enter to continue')
sys.stdin.read(1)
|
remybaranx/qtaste | TestSuites/TestSuite_QTaste/EngineSuite/QTASTE_DATA/QTASTE_DATA_02/TestScript.py | Python | gpl-3.0 | 1,538 | 0.015605 | # encoding= utf-8
# Copyright 2007-2009 QSpin - www.qspin.be
#
# This file is part of QTaste framework.
#
# QTaste is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# QTaste is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GN | U Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with QTaste. If not, see <http://www.gnu.org/licenses/>.
##
# QTaste Data driven test: Check the TIMEOUT data handling.
# <p>
# This test case has the goal to verify that the TIMEOUT data value is used if it is defined in the CSV file.<p>
# This test will execute a test case that takes more time than the defined TIMEOUT data
# @preparation None
# @data TI | MEOUT [Integer] specify the TIMEOUT value to 5
##
from qtaste import *
def Step1():
"""
@step In CSV file, define TIMEOUT to 5
@expected None
"""
pass
def Step2():
"""
@step Call the verb neverReturn()
@expected Test is "Failed", reason: <i>Test execution timeout.</i><p>
Script call stack is reported.<p>
Elapsed time is more or less 5 seconds.
"""
testAPI.getEngineTest().neverReturn()
doStep(Step1)
doStep(Step2)
|
arvinsahni/ml4 | flask/app/__init__.py | Python | mit | 73 | 0.013699 | from flask import Flask
app = Flask(__name__)
from app im | port vizarvin
| |
tiagocardosos/stoq | stoqlib/lib/test/test_stringutils.py | Python | gpl-2.0 | 2,658 | 0.004891 | # -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2013 Async Open Source
##
## This program is free software; you can redistribute it and/or
## modify it under the terms of the GNU Lesser General Public License
## as published by the Free Software Foundation; either version 2
## of the License, or (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
##
## Author(s): Stoq Team <stoq-devel@async.com.br>
##
__tests__ = 'stoqlib.lib.stringutils'
import unittest
from stoqlib.lib.stringutils import next_value_for, max_value_for
class TestStringUtils(unittest.TestCase):
def test_next_value_for(self):
# Trivial cases
self.assertEqual(next_value_for(u''), u'1')
self.assertEqual(next_value_for(u'1'), u'2')
self.assertEqual(next_value_for(u'999'), u'1000')
# Ending with digit cases
self.assertEqual(next_value_for(u'A999'), u'A1000')
self.assertEqual(next_value_for(u'A8'), u'A9')
self.assertEqual(next_value_for(u'A9'), u'A10')
self.assertEqual(next_value_for(u'A99'), u'A100')
self.assertEqual(next_value_for(u'A199'), u'A200')
self.assertEqual(next_value_for(u'999A1'), u'999A2')
self.assertEqual(next_value_for(u'A009'), u'A010')
self.assertEqual(next_value_for(u'AB0099'), u'AB0100')
# Ending with alphanumeric cases
self.assertEqual(next_value_for(u'999A'), u'999B')
self.assertEqual(next_value_for(u'A999A'), u'A999B')
self.assertEqual(next_value_for(u'A99AZ'), u'A99B0')
self.assertEqual(next_value_for(u'A999Z'), u'A10000')
self.assertEqual(next_value_for(u'A999-A'), u'A999-B')
self.assertEqual(next_value_for(u'A999-Z'), u'A999-00')
# Not handled cases
self.assertEqual(next_value_for(u'A999-'), u'A999-0')
def test_max_value_for(self):
self.assertEqual(max_value_for([u'']), u'')
self.assertEqual(max_value_for([u'1']), u'1')
self.assertEqual(max_value_for([u'1', u'2'] | ), u'2')
self.assertEqual(max_value_for([u'9', u'10']), u'10')
| self.assertEqual(max_value_for([u'009', u'10']), u'010')
self.assertEqual(max_value_for([u'a09', u'999']), u'a09')
|
ElsaMJohnson/pythonprograms | galaxyperlin.py | Python | mit | 4,116 | 0.054665 | #!/home/elsa/Ureka/variants/common/bin/python
#Code to generate an image with perlin noise as background.
#Used in UO scientific computing course Spring 2016
#Perlin code and noise package is from Casey Duncan
#https://github.com/caseman/noise/examples/2dtexture.py
#Remaining code by Elsa M. Johnson
from noise import pnoise2, snoise2
import numpy as np
from matplotlib import pyplot as plt
import random as random
from PIL import Image
import matplotlib.image as mpimg
#Create noise -
#size is image size
#octaves creates different sized noise regions
def nbackground(sx=1000,sy=1000,octaves=50):
# size=1000
# array = np.zeros((size, size), np.float)
array = np.zeros((sx, sy), np.float)
# octaves = 50
freq = 16.0 * octaves
for y in xrange(sy):
for x in xrange(sx):
data=snoise2(x / freq, y / freq, octaves) * 127.0 + 128.0
array[x, y] += data
return array
plt.imshow(array, cmap=plt.cm.Greys_r)
#To get the pasting right, use cpaste, paste isn't working.
#Now creating a small image note that the image must be smaller than sx and sy
#make sure scspiral.png is in directory
#Will later make the size more variable
#the
def mkimg(infile='scspiral.png',sz=1000):
data =[]
# infile = 'scspiral.png'
im=Image.open(infile).convert('L')
plt.imshow(im)
imlist = list(im.getdata())
x=int(im.size[1])
y=im.size[0]
im.format
data=np.array(imlist).reshape(x,y)
cdata=data[50:150,50:150] #cropping image a bit
#pad with zeros to fit noise image:
xx,yy=cdata.shape #size of object
#randomly pick a beginning number for
#location of image and pad with zeros
xlim = sz-xx-10
ylim = sz-yy-10
# These were specific numbers based on the image
# Which in the end might be better than random placement
# begx=436
# begy=596
begx=random.randint(1,xlim)
begy=random.randint(1,ylim)
print 'center of embedded image',begx+50,begy+50
# Create limits to pad with zeros
zx1 = begx-1
zx2 = sz-zx1-xx
zy1 = begy-1
zy2 = sz-zy1-yy
bimg=np.lib.pad(cdata,((zx1,zx2),(zy1,zy2)),'constant',constant_values=(0,0))
return bimg
# This combines both images and scales the added image based on the S/N ratio
# imarr is the image from mkimg and noisearr is from nbackground
# sz = is the size of box for doing S/N calculations and s2n is the desired
# S/N ratio
def combineimg(imarr,noisearr,s2n=5.0,thresh=100,sz=10,):
b=imarr
b[b<thresh]=0
x,y=np.where(b==b.max())
sig=b[x[0]-5:x[0]+5, y[0]-5:y[0]+5].sum()
nse=noisearr[x[0]-5:x[0]+5, y[0]-5:y[0]+5].sum()
#quadratic formula to find fct so that S/N is correct
fs | = (s2n*s2n +np.sqrt(s2n**4.+4*ns | e*s2n*s2n))/2
fct=sig/fs
b=b/fct
#note to find location of b max: where(b==b.max())
totimg = b+noisearr
plt.figure()
plt.imshow(totimg,cmap=plt.cm.Greys_r)
return totimg
#Next routine calculates the mean and stand dev of random pixels
def imgstats(arr,sz=100):
# imm=np.mean(arr[x-sz/2:x+sz/2,y-sz/2:y+sz/2])
# ims=np.std(arr[x-sz/2:x+sz/2,y-sz/2:y+sz/2])
ax,ay=arr.shape
begx=np.random.randint(1,ax-sz)
begy=np.random.randint(1,ay-sz)
rm = np.mean(arr[begx:begx+sz,begy:begy+sz])
rs = np.std(arr[begx:begx+sz,begy:begy+sz])
# print 'mean,std about image', imm,ims
print 'random center, mean, std',begx,begy,rm,rs
#previous values
#np.mean(stuff[436:536,596:696])
#np.std(stuff[436:536,596:696])
#np.mean(stuff[461:511,621:671])
#np.std(stuff[461:511,621:671])
def svimg(totarr):
#print it out:
x,y=totarr.shape
vl = np.around(totarr.flatten(),5)#round to 5 digits
xx = np.repeat(np.arange(x),x)+1
yy = np.tile(np.arange(y),y)+1
big =np.column_stack((xx,yy,vl))
np.savetxt("noisyimage.txt",big,fmt=('%4.1f','%4.1f','%10.5f'))
##Add this if you want to
##read it out to make sure it works
##Otherwise slows down routine.
#row,col,data=np.loadtxt("noisyimage.txt",unpack=True)
#rsize = int(max(row))
#csize = int(max(col))
#data=np.array(data).reshape(rsize,csize)
# plt.imshow(data, interpolation='None',cmap=plt.cm.Greys_r)
def main():
noiseimg = nbackground()
hiddenimg = mkimg()
timg = combineimg(hiddenimg,noiseimg)
imgstats(timg)
svimg(timg)
main()
plt.show()
|
saulshanabrook/django-dumper | test/models.py | Python | mit | 2,381 | 0.00042 | from django.db import models
from django.core.urlresolvers import reverse
from django.contrib.contenttypes.models import ContentType
try: # new import added in Django 1.7
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.fields import GenericRelation
except ImportError:
from django.contrib.contenttypes import generic
GenericForeignKey = generic.GenericForeignKey
GenericRelation = generic.GenericRelation
import dumper
class LoggingModel(models.Model):
text = models.CharField(max_length=200)
def __unicode__(self):
return self.text
class SimpleModel(models.Model):
slug = models.CharField(max_length=200, default='slug')
def get_absolute_url(self):
return reverse('simple-detail', kwargs={'slug': self.slug})
def dependent_paths(self):
yield self.get_absolute_url()
for model in self.related_set.all():
yield model.get_absolute_url()
class RelatedModel(models.Model):
slug = models.CharField(max_length=200, default='slug')
related = models.ManyToManyField(Sim | pleModel, related_name='related_set')
def dependent_paths(self):
yield self.get_absolute_url()
def get_absolute_url(self):
return reverse('related-detail', kwargs={'slug': self.slug})
class GenericRelationModel(models.Model):
slug = models.CharField(max_length=200, default='slug')
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
conten | t_object = GenericForeignKey('content_type', 'object_id')
def dependent_paths(self):
yield self.content_object.get_absolute_url()
class RelatedToGenericModel(models.Model):
slug = models.CharField(max_length=200, default='slug')
generic_related = GenericRelation(GenericRelationModel)
def get_absolute_url(self):
return reverse('related-to-generic-detail', kwargs={'slug': self.slug})
class GenericRelationNotRegisteredModel(models.Model):
slug = models.CharField(max_length=200, default='slug')
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey('content_type', 'object_id')
def dependent_paths(self):
pass
dumper.register(SimpleModel)
dumper.register(RelatedModel)
dumper.register(GenericRelationModel)
|
JimJiangX/BoneDragon | example/tests/db/base.py | Python | apache-2.0 | 912 | 0 | # Copyright (c) 2012 NTT DOCOMO, INC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
| # a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Example DB te | st base class."""
from example.common import context as example_context
from example.tests import base
class DbTestCase(base.TestCase):
def setUp(self):
super(DbTestCase, self).setUp()
self.context = example_context.get_admin_context()
|
tmcrosario/odoo-sicon | sicon/report/unrelated_documents.py | Python | agpl-3.0 | 2,065 | 0 | from odoo import api, fields, models, tools
class UnrelatedDocumentsReport(models.Model):
_name = "sicon.unrelated_documents.report"
_description = 'Documents not related yet to any concession'
_auto = False
dependence_id = fields.Many2one(comodel_name='tmc.dependence',
readonly=True)
document_type_id = fields.Many2one(comodel_name='tmc.document_type',
readonly=True)
number = fields.Integer(readonly=True)
period = fields.Integer(readonly=True)
document_object = fields.Char(readonly=True)
name = fields.Char(string='Document', readonly=True)
_depends = {
'tmc.document': ['name', 'document_object', 'main_topic_ids'],
'sicon.event': ['document_id']
}
def init(self):
tools.drop_view_if_exists(self.env.cr, self._table)
self.env.cr.execute("""
CREATE OR REPLACE VIEW sicon_unrelated_documents_report AS (
SELECT
doc.id,
doc.document_object,
doc.name
FROM (
tmc_document doc
LEFT JOIN document_main_topic_rel rel
ON (rel.tmc_document_id = doc.id)
LEFT JOIN tmc_document_topic doc_topic
ON (rel.tmc_document_topic_id = doc_topic.id)
LEFT JOIN tmc_dependence dep
O | N doc.dependence_id = dep.id
LEFT JOIN tmc_document_type doc_type
ON doc.document_type_id = doc_type.id
| )
WHERE doc_topic.name = 'Concesiones Generales'
AND doc_type.abbreviation = 'DEC'
AND doc.id NOT IN (
SELECT
document_id
FROM sicon_event e WHERE document_id IS NOT NULL)
ORDER BY doc.period, doc.number
)
""")
|
weso/CWR-DataApi | tests/parser/dictionary/encoder/record/test_publisher_for_writer.py | Python | mit | 1,487 | 0 | # -*- coding: utf-8 -*-
import unittest
from cwr.parser.encoder.dictionary import PublisherForWriterDictionaryEncoder
from cwr.interested_party import PublisherForWriterRecord
"""
Publisher for Writer record to dictionary encoding tests.
The following cases are tested:
"""
__author__ = 'Bernardo Martínez Garrido'
__license__ = 'MIT'
__status__ = 'Development'
class TestPublisherForWriterRecordDictionaryEncoding(unitte | st.TestCase):
def setUp(self):
self._encoder = PublisherForWriterDictionaryEncoder()
def test_encoded(self):
data = Pub | lisherForWriterRecord(record_type='SPU',
transaction_sequence_n=3,
record_sequence_n=15,
publisher_ip_n='111',
writer_ip_n='222',
submitter_agreement_n='333',
society_assigned_agreement_n='444')
encoded = self._encoder.encode(data)
self.assertEqual('SPU', encoded['record_type'])
self.assertEqual(3, encoded['transaction_sequence_n'])
self.assertEqual(15, encoded['record_sequence_n'])
self.assertEqual('111', encoded['publisher_ip_n'])
self.assertEqual('222', encoded['writer_ip_n'])
self.assertEqual('333', encoded['submitter_agreement_n'])
self.assertEqual('444', encoded['society_assigned_agreement_n'])
|
jmvasquez/redashtest | tests/models/test_api_keys.py | Python | bsd-2-clause | 634 | 0.001577 | from tests import BaseTestCase
from redash.models import ApiKey
class TestApiKeyGetByObject(BaseTestCase):
| def test_returns_none_if_not_exists(self):
dashboard | = self.factory.create_dashboard()
self.assertIsNone(ApiKey.get_by_object(dashboard))
def test_returns_only_active_key(self):
dashboard = self.factory.create_dashboard()
api_key = self.factory.create_api_key(object=dashboard, active=False)
self.assertIsNone(ApiKey.get_by_object(dashboard))
api_key = self.factory.create_api_key(object=dashboard)
self.assertEqual(api_key, ApiKey.get_by_object(dashboard))
|
funbaker/astropy | astropy/io/votable/tests/exception_test.py | Python | bsd-3-clause | 1,250 | 0 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# LOCAL
from ....tests.helper import catch_warnings
from .. import converters
from .. import exceptions
from .. import tree
def test_reraise():
def fail():
raise RuntimeError("This failed")
try:
try:
fail()
except RuntimeError as e:
exceptions.vo_reraise(e, additional="From here")
except RuntimeError as e:
assert "From here" in str(e)
| else:
assert False
def test_parse_vowarning():
config = {'pedantic': True,
'filename': 'foo.xml'}
pos = (42, 64)
with catch_warnings(exceptions.W47) as w:
field = tree.Field(
None, name='c', datatype='char',
| config=config, pos=pos)
c = converters.get_converter(field, config=config, pos=pos)
parts = exceptions.parse_vowarning(str(w[0].message))
match = {
'number': 47,
'is_exception': False,
'nchar': 64,
'warning': 'W47',
'is_something': True,
'message': 'Missing arraysize indicates length 1',
'doc_url': 'io/votable/api_exceptions.html#w47',
'nline': 42,
'is_warning': True
}
assert parts == match
|
mk23/snmpy | lib/snmpy/module/disk_utilization.py | Python | mit | 1,260 | 0.001587 | import datetime
import logging
import os
import snmpy.module
import subprocess
LOG = logging.getLogger()
class disk_utilization(snmpy.module.TableModule):
def __init__(self, conf):
conf['table'] = [
{'dev': 'string'},
{'wait': 'integer'},
{'util': 'integer'},
]
snmpy.module.TableModule.__init__(self, conf)
def update(self):
os.environ['LC_TIME'] = 'POSIX'
disk = {}
date = datetime.datetime.now() - datetime.timedelta(minutes=20)
comm = [self.conf.get('sar_command', '/usr/bin/sar'), '-d', '-f', self.conf.get('sysstat_log', '/var/log/sysstat/sa%02d') % date.day, '-s', date.strftime('%H:%M:00')]
LOG.debug('running sar command | : %s', ' '.join(comm))
for line in subprocess.check_output(comm, stderr=open(os.devnull, 'w')).split('\n'):
LOG.debug('line: | %s', line)
part = line.split()
if part and part[0] != 'Average:' and part[1].startswith('dev'):
disk[part[-9]] = [int(float(part[-3])), int(float(part[-1]))]
for line in open('/proc/diskstats'):
name = 'dev{}-{}'.format(*line.split()[0:2])
self.append([line.split()[2]] + disk.get(name, [0, 0]))
|
opps/opps-feedcrawler | setup.py | Python | mit | 1,484 | 0.000674 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
from setuptools import setup, find_packages
from opps import feedcrawler
install_requires = ["opps"]
classifiers = ["Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Framework :: Opps",
'Programming Language :: Python',
"Programming Language :: Python :: 2.7",
"Operating System :: OS Independent",
"Topic :: Internet :: WWW/HTTP :: Dynamic Content",
'Topic :: Software Development :: Libraries :: Python Modules']
try:
long_description = open('README.md').read()
except:
long_description = feedcrawler.__description__
setup(
name='opps-feedcrawler',
namespace_packages=['opps', 'opps.feedcrawler'],
version=feedcrawler.__version__,
description=feedcrawler.__description__,
long_description=long_description,
classifiers=classifiers,
keywords='rss parser opps cms django apps magazines websites',
author=feedcrawler.__author__,
author_email=feedcrawler.__email__,
url='http://oppsproject.org',
download_url="https://github.com/ | opps/opps-feedcrawler/tarball/master",
license=feedcrawler.__license__,
packages=find_packages(exclude=('doc', 'docs',)),
package_dir={'opps': 'opps'},
install_requires | =install_requires,
)
|
dukhlov/oslo.messaging | oslo_messaging/_drivers/protocols/amqp/drivertasks.py | Python | apache-2.0 | 4,126 | 0 | # Copyright 2014, Red Hat, Inc.
#
# Licensed under the Apache License, Version | 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://w | ww.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import threading
import time
from oslo_messaging._drivers.protocols.amqp import controller
from oslo_messaging._i18n import _LW
from oslo_messaging import exceptions
from six import moves
LOG = logging.getLogger(__name__)
class SendTask(controller.Task):
"""A task that sends a message to a target, and optionally waits for a
reply message. The caller may block until the remote confirms receipt or
the reply message has arrived.
"""
def __init__(self, target, request, wait_for_reply, deadline):
super(SendTask, self).__init__()
self._target = target
self._request = request
self._deadline = deadline
self._wait_for_reply = wait_for_reply
self._results_queue = moves.queue.Queue()
def wait(self, timeout):
"""Wait for the send to complete, and, optionally, a reply message from
the remote. Will raise MessagingTimeout if the send does not complete
or no reply is received within timeout seconds. If the request has
failed for any other reason, a MessagingException is raised.
"""
try:
result = self._results_queue.get(timeout=timeout)
except moves.queue.Empty:
if self._wait_for_reply:
reason = "Timed out waiting for a reply."
else:
reason = "Timed out waiting for send to complete."
raise exceptions.MessagingTimeout(reason)
if result["status"] == "OK":
return result.get("response", None)
raise result["error"]
def execute(self, controller):
"""Runs on eventloop thread - sends request."""
if not self._deadline or self._deadline > time.time():
controller.request(self._target, self._request,
self._results_queue, self._wait_for_reply)
else:
LOG.warning(_LW("Send request to %s aborted: TTL expired."),
self._target)
class ListenTask(controller.Task):
"""A task that creates a subscription to the given target. Messages
arriving from the target are given to the listener.
"""
def __init__(self, target, listener, notifications=False):
"""Create a subscription to the target."""
super(ListenTask, self).__init__()
self._target = target
self._listener = listener
self._notifications = notifications
def execute(self, controller):
"""Run on the eventloop thread - subscribes to target. Inbound messages
are queued to the listener's incoming queue.
"""
if self._notifications:
controller.subscribe_notifications(self._target,
self._listener.incoming)
else:
controller.subscribe(self._target, self._listener.incoming)
class ReplyTask(controller.Task):
"""A task that sends 'response' message to 'address'.
"""
def __init__(self, address, response, log_failure):
super(ReplyTask, self).__init__()
self._address = address
self._response = response
self._log_failure = log_failure
self._wakeup = threading.Event()
def wait(self):
"""Wait for the controller to send the message.
"""
self._wakeup.wait()
def execute(self, controller):
"""Run on the eventloop thread - send the response message."""
controller.response(self._address, self._response)
self._wakeup.set()
|
andnovar/networkx | networkx/algorithms/centrality/__init__.py | Python | bsd-3-clause | 382 | 0 | from .betweenness import *
from | .betweenness_subset import *
from .closeness import *
from .subgraph_alg import *
from .current_flow_closeness import *
from .current_flow_betweenness import *
from .current_flow_betweenness_subset import *
from .degree_alg import *
from .dispersion import *
from .eigenvector import *
from .h | armonic import *
from .katz import *
from .load import *
|
aptivate/econsensus | django/econsensus/publicweb/tests/settings_test.py | Python | gpl-3.0 | 9,245 | 0.002055 | from django.test.testcases import SimpleTestCase
from publicweb.extra_models import (NotificationSettings, OrganizationSettings,
NO_NOTIFICATIONS, FEEDBACK_MAJOR_CHANGES)
from django.contrib.auth.models import User, AnonymousUser
from organizations.models import Organization
from django.db.models.fields.related import OneToOneField
from publicweb.tests.factories import UserFactory, OrganizationFactory
from mock import patch, MagicMock
from django.test.client import RequestFactory
from publicweb.views import UserNotificationSettings
from publicweb.forms import NotificationSettingsForm
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
def create_fake_organization(**kwargs):
return OrganizationFactory.build(**kwargs)
class SettingsTest(SimpleTestCase):
def test_notification_settings_have_user_field(self):
self.assertTrue(hasattr(NotificationSettings, 'user'))
def test_notification_settings_are_linked_to_user(self):
self.assertEqual(NotificationSettings.user.field.rel.to, User)
def test_notification_settings_have_organization_field(self):
self.assertTrue(hasattr(NotificationSettings, 'organization'))
def test_notification_settings_are_linked_to_organization(self):
self.assertEqual(
NotificationSettings.organization.field.rel.to, Organization)
def test_organization_settings_have_organization_field(self):
self.assertTrue(hasattr(OrganizationSettings, 'organization'))
def test_organization_settings_are_linked_to_organization(self):
self.assertEqual(
OrganizationSettings.organization.field.rel.to, Organization)
def test_each_organization_has_only_one_set_of_settings(self):
self.assertIsInstance(
OrganizationSettings.organization.field, OneToOneField)
def test_notification_settings_are_unique_for_an_organization_and_user(self):
self.assertEqual((('user', 'organization'),),
NotificationSettings()._meta.unique_together)
def test_notifitication_settings_default_value_is_main_items_only(self):
the_settings = NotificationSettings()
self.assertEqual(FEEDBACK_MAJOR_CHANGES,
the_settings.notification_level)
@patch('publicweb.views.Organization.objects',
new=MagicMock(
spec=Organization.objects,
get=create_fake_organization,
filter=create_fake_organization
)
)
def test_notification_settings_view_uses_a_form(self):
user = UserFactory.build(id=1)
organization = create_fake_organization(id=2, slug='test')
request = RequestFactory().get('/')
request.user = user
context = UserNotificationSettings.as_view()(
request,
org_slug=organization.slug
).context_data
self.assertIn('form', context)
def test_notifcation_settings_view_redirects_to_organization_list(self):
notification_settings_view = UserNotificationSettings()
self.assertEqual(reverse('organization_list'),
notification_settings_view.get_success_url())
def test_user_notification_settings_view_context_contains_organisation(self):
notification_settings_view = UserNotificationSettings()
notification_settings_view.object = MagicMock(spec=NotificationSettings)
notification_settings_view.organization = create_fake_organization(id=2)
context = notification_settings_view.ge | t_context_data()
self.assertIn('organization', context)
self.assertTrue(context['organization'])
@patch('publicweb.views.Organization.objects',
new=MagicMock(
spec=Organization.objects,
get=create_fake_organization,
filter=create_fake_organization
)
)
def test_notification_settings_view_uses_notification_setti | ngs_form(self):
user = UserFactory.build(id=1)
organization = create_fake_organization(id=2, slug='test')
request = RequestFactory().get('/')
request.user = user
context = UserNotificationSettings.as_view()(
request,
org_slug=organization.slug
).context_data
self.assertIsInstance(context['form'], NotificationSettingsForm)
def test_notification_settings_view_requires_login(self):
request = RequestFactory().get('/')
user = AnonymousUser()
organization = create_fake_organization(id=2)
request.user = user
response = UserNotificationSettings.as_view()(request,
organization=organization.id)
self.assertIsInstance(response, HttpResponseRedirect)
@patch('publicweb.views.Organization.objects',
new=MagicMock(
spec=Organization.objects,
get=create_fake_organization,
filter=create_fake_organization
)
)
@patch('publicweb.views.UserNotificationSettings.model',
return_value=MagicMock(
spec=NotificationSettings,
_meta=MagicMock(fields=[], many_to_many=[]),
root_id=None
)
)
def test_posting_valid_data_saves_settings(self, settings_obj):
organization = create_fake_organization(id=2, slug='test')
request = RequestFactory().post(
reverse('notification_settings', args=[organization.slug]),
{'notification_level': unicode(NO_NOTIFICATIONS)}
)
user = UserFactory.build(id=1)
request.user = user
# This patch depends on the UsertNotificationSettings.model patch
# It needs to return the object created by that patch, which is passed
# in as a parameter.
# The only way I've found to handle the dependency is to do this patch
# here
with patch('publicweb.views.UserNotificationSettings.model.objects',
get=lambda organization, user: settings_obj):
UserNotificationSettings.as_view()(
request,
org_slug=organization.slug
)
self.assertTrue(settings_obj.save.called)
@patch('publicweb.views.Organization.objects',
new=MagicMock(
spec=Organization.objects,
get=create_fake_organization,
filter=create_fake_organization
)
)
@patch('publicweb.views.UserNotificationSettings.model',
return_value=MagicMock(
spec=NotificationSettings,
_meta=MagicMock(fields=[], many_to_many=[]),
root_id=None
)
)
def test_posting_invalid_data_returns_form_with_errors(self, settings_obj):
user = UserFactory.build(id=1)
organization = create_fake_organization(id=2, slug='test')
request = RequestFactory().post(
reverse('notification_settings', args=[organization.id]))
request.user = user
# This patch depends on the UsertNotificationSettings.model patch
# It needs to return the object created by that patch, which is passed
# in as a parameter.
# The only way I've found to handle the dependency is to do this patch
# here
with patch('publicweb.views.UserNotificationSettings.model.objects',
get=lambda organization, user: settings_obj):
response = UserNotificationSettings.as_view()(
request,
org_slug=organization.slug
)
self.assertIn('form', response.context_data)
self.assertTrue(response.context_data['form'].errors)
@patch('publicweb.views.Organization.objects',
new=MagicMock(
spec=Organization.objects,
get=create_fake_organization,
filter=create_fake_organization
)
)
@patch('publicweb.views.UserNotificationSettings.model',
return_value=MagicMock(
spec=NotificationSettings,
_meta=MagicMock(fields=[], many_to_many=[]),
r |
botherder/volatility | volatility/plugins/linux/ifconfig.py | Python | gpl-2.0 | 3,392 | 0.008255 | # Volatility
# Copyright (C) 2007-2013 Volatility Foundation
#
# This file is part of Vola | tility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Publ | ic License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
"""
@author: Andrew Case
@license: GNU General Public License 2.0
@contact: atcuno@gmail.com
@organization:
"""
import volatility.plugins.linux.common as linux_common
import volatility.debug as debug
import volatility.obj as obj
class linux_ifconfig(linux_common.AbstractLinuxCommand):
"""Gathers active interfaces"""
def _get_devs_base(self):
net_device_ptr = obj.Object("Pointer", offset = self.addr_space.profile.get_symbol("dev_base"), vm = self.addr_space)
net_device = net_device_ptr.dereference_as("net_device")
for net_dev in linux_common.walk_internal_list("net_device", "next", net_device):
yield net_dev
def _get_devs_namespace(self):
nslist_addr = self.addr_space.profile.get_symbol("net_namespace_list")
nethead = obj.Object("list_head", offset = nslist_addr, vm = self.addr_space)
# walk each network namespace
# http://www.linuxquestions.org/questions/linux-kernel-70/accessing-ip-address-from-kernel-ver-2-6-31-13-module-815578/
for net in nethead.list_of_type("net", "list"):
# walk each device in the current namespace
for net_dev in net.dev_base_head.list_of_type("net_device", "dev_list"):
yield net_dev
def _gather_net_dev_info(self, net_dev):
mac_addr = net_dev.mac_addr
promisc = str(net_dev.promisc)
in_dev = obj.Object("in_device", offset = net_dev.ip_ptr, vm = self.addr_space)
for dev in in_dev.devices():
ip_addr = dev.ifa_address.cast('IpAddress')
name = dev.ifa_label
yield (name, ip_addr, mac_addr, promisc)
def calculate(self):
linux_common.set_plugin_members(self)
# newer kernels
if self.addr_space.profile.get_symbol("net_namespace_list"):
func = self._get_devs_namespace
elif self.addr_space.profile.get_symbol("dev_base"):
func = self._get_devs_base
else:
debug.error("Unable to determine ifconfig information")
for net_dev in func():
for (name, ip_addr, mac_addr, promisc) in self._gather_net_dev_info(net_dev):
yield (name, ip_addr, mac_addr, promisc)
def render_text(self, outfd, data):
self.table_header(outfd, [("Interface", "16"),
("IP Address", "20"),
("MAC Address", "18"),
("Promiscous Mode", "5")])
for (name, ip_addr, mac_addr, promisc) in data:
self.table_row(outfd, name, ip_addr, mac_addr, promisc)
|
wearespindle/quickly.press | manage.py | Python | mit | 276 | 0 | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE",
"qu | ickly.settings")
f | rom django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
tseaver/google-cloud-python | websecurityscanner/synth.py | Python | apache-2.0 | 1,969 | 0.00965 | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This script is used to synthesize generated parts of | this library."""
import synthtool as s
from synthtool import gcp
gapic = gcp.GAPICGenerator()
common = gcp.CommonTemplates()
# ----------------------------------------------------- | -----------------------
# Generate websecurityscanner GAPIC layer
# ----------------------------------------------------------------------------
versions = ["v1alpha", "v1beta"]
for version in versions:
library = gapic.py_library(
"websecurityscanner",
version,
config_path=f"/google/cloud/websecurityscanner"
f"/artman_websecurityscanner_{version}.yaml",
artman_output_name=f"websecurityscanner-{version}",
include_protos=True,
)
s.move(library / f"google/cloud/websecurityscanner_{version}/proto")
s.move(library / f"google/cloud/websecurityscanner_{version}/gapic")
s.move(library / f"google/cloud/websecurityscanner_{version}/*.py")
s.move(library / f"docs/gapic/{version}")
s.move(library / f"tests/unit/gapic/{version}")
s.move(library / "google/cloud/websecurityscanner.py")
# ----------------------------------------------------------------------------
# Add templated files
# ----------------------------------------------------------------------------
templated_files = common.py_library(unit_cov_level=97, cov_level=100)
s.move(templated_files)
s.shell.run(["nox", "-s", "blacken"], hide_output=False)
|
willprice/arduino-sphere-project | scripts/example_direction_finder/temboo/Library/RightScale/ShowServer.py | Python | gpl-2.0 | 4,124 | 0.005092 | # -*- coding: utf-8 -*-
############ | ###################################################################
#
# ShowServer
# Display a comrephensive set of information about the querried server such as: state information, serv | er templates used, SSH key href, etc.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class ShowServer(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the ShowServer Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(ShowServer, self).__init__(temboo_session, '/Library/RightScale/ShowServer')
def new_input_set(self):
return ShowServerInputSet()
def _make_result_set(self, result, path):
return ShowServerResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return ShowServerChoreographyExecution(session, exec_id, path)
class ShowServerInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the ShowServer
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AccountID(self, value):
"""
Set the value of the AccountID input for this Choreo. ((required, string) The RightScale Account ID.)
"""
super(ShowServerInputSet, self)._set_input('AccountID', value)
def set_Password(self, value):
"""
Set the value of the Password input for this Choreo. ((required, password) The RightScale account password.)
"""
super(ShowServerInputSet, self)._set_input('Password', value)
def set_ServerID(self, value):
"""
Set the value of the ServerID input for this Choreo. ((required, integer) The RightScale Server ID that is to be stopped.)
"""
super(ShowServerInputSet, self)._set_input('ServerID', value)
def set_SubDomain(self, value):
"""
Set the value of the SubDomain input for this Choreo. ((conditional, string) The Rightscale sub-domain appropriate for your Rightscale account. Defaults to "my" for legacy accounts. Other sub-domains include: jp-8 (Legacy Cloud Platform), us-3, us-4 (Unified Cloud Platform).)
"""
super(ShowServerInputSet, self)._set_input('SubDomain', value)
def set_Username(self, value):
"""
Set the value of the Username input for this Choreo. ((required, string) The RightScale username.)
"""
super(ShowServerInputSet, self)._set_input('Username', value)
class ShowServerResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the ShowServer Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. ((xml) The response from Rightscale in XML format.)
"""
return self._output.get('Response', None)
class ShowServerChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return ShowServerResultSet(response, path)
|
pmisik/buildbot | master/buildbot/test/unit/test_mq_simple.py | Python | gpl-2.0 | 2,849 | 0 | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
import mock
from twisted.internet import defer
from twisted.trial import unittest
from buildbot.mq import simple
from buildbot.test.fake import fakemaster
from buildbot.test.reactor import TestReactorMixin
class SimpleMQ(TestReactorMixin, unittest.TestCase):
@defer.inlineCallbacks
def setUp(self):
self.setup_test_reactor()
self.master = fakemaster.make_master(self)
self.mq = simple.SimpleMQ()
self.mq.setServiceParent(self.master)
yield self.mq.startService()
@defer.inlineCallbacks
def tearDown(self):
if self.mq.running:
yield self.mq.stopService()
@defer.inlineCallbacks
def test_forward_data(self):
callback = mock.Mock()
yield self.mq.startConsuming(callback, ('a', 'b'))
# _produce returns a deferred
yield self.mq.produce(('a', 'b'), 'foo')
# calling produce should eventually call the callback with decoding of
# topic
callback.assert_called_with(('a', 'b'), 'foo')
@defer.inlineCallbacks
def test_forward_data_wildcard(self):
callback = mock.Mock()
yield self.mq.startConsuming(callback, ('a', None))
# _produce returns a deferred
yield self.mq.produce(('a', 'b'), 'foo')
# calling produce should eventually call the callback with decoding of
# topic
callback.assert_called_with(('a', 'b'), 'foo')
@defer.inlineCallbacks
def test_waits_for_called_callback(self):
def callback(_, __):
return defer.succeed(None)
yield self.mq.startConsuming(callback, ('a', None))
yield self.mq.produce(('a', 'b'), 'foo')
d = self.mq.stopService()
self.assertTrue(d.called)
@defer.inlineCallbacks
def test_waits_for_non_called_callback(self):
d1 = defer.Deferred()
def callback(_, __):
return d1
yield | self.mq.startConsuming(callback, ('a', None))
yield self.mq.produce(('a', 'b'), 'foo')
d = self.mq.stopService()
self.assertFalse(d.called)
d1.callback(None)
self.as | sertTrue(d.called)
|
Elico-Corp/openerp-7.0 | stock_back2back_order_proc/stock.py | Python | agpl-3.0 | 26,042 | 0.008064 | # -*- encoding: utf-8 -*-
# © 2014 Elico Corp (https://www.elico-corp.com)
# Licence AGPL-3.0 or later(http://www.gnu.org/licenses/agpl.html)
from datetime import datetime
from dateutil.relativedelta import relativedelta
import time
from osv import fields, orm, osv
from tools.translate import _
import netsvc
import tools
class stock_location(orm.Model):
_inherit = "stock.location"
_columns = {
'retention_mode': fields.selection(
[('retention', 'Retention Mode'), ('thru', 'Thru mode')],
'Retention Mode',
required=True,
help="In 'Retention mode' the system wait for the\
whole quantity before the stuff is processed.\n"
"In 'Thru mode' the shipped quantity is processed regardless\
of the ordered quantity."
),
}
_defaults = {
'retention_mode': 'retention',
}
class stock_picking(orm.Model):
_inherit = "stock.picking"
def get_move_chain(self, cr, uid, move_id, context=None, move_obj=False):
'''Recursively get the chained moves
@return list of the chained moves
'''
if not move_obj:
move_obj = self.pool.get('stock.move')
move_tbc = move_obj.browse(cr, uid, move_id, context, move_obj)
if move_tbc.move_dest_id: # If there is move_dest_id in the chain
move_chain = self.get_move_chain(cr, uid, move_tbc.move_dest_id.id, context)
else:
move_chain = []
move_chain.append(move_tbc)
return move_chain
def copy_pick_chain(self, cr, uid, all_moves, context=None):
'''Copy all the picking related to this order
@return the dictionary of couple: old_pick_id => new_pick_id
'''
new_picks = {}
all_chained_moves = []
sequence_obj = self.pool.get('ir.sequence')
for move in all_moves:
all_chained_moves.extend(self.get_move_chain(cr, uid, move.id, context))
for move in all_chained_moves:
if move.picking_id.id and not new_picks.has_key(move.picking_id.id):
pick_tbc = self.browse(cr, uid, move.picking_id.id, context)
new_note = ((pick_tbc.note if pick_tbc.note else '') + ' Copy of stock.pick[%d].') % move.picking_id.id
new_pick_id = self.copy(cr, uid, move.picking_id.id, {
'state': 'draft',
'note': new_note,
'name': sequence_obj.get(cr, uid, 'stock.picking.%s'%(pick_tbc.type)),
'move_lines' : [],
})
new_picks[move.picking_id.id] = new_pick_id
return new_picks
def copy_move_chain(self, cr, uid, move_id, product_qty, | new_picks, context=None, move_obj=False):
'''Recursively copy the chained move until a location in retention mode or the end.
@return id of the new first move.
'''
if not move_obj:
move_obj = self.pool.get('stock.move')
move_tbc = move_obj.browse(cr, | uid, move_id, context)
move_dest_id = False
if move_tbc.move_dest_id and move_tbc.location_dest_id.retention_mode == 'thru': # If there is move_dest_id in the chain and the current location is in thru mode, we need to make a copy of that, then use it as new move_dest_id.
move_dest_id = self.copy_move_chain(cr, uid, move_tbc.move_dest_id.id, product_qty, new_picks, context, move_obj)
my_picking_id = (new_picks[move_tbc.picking_id.id] if new_picks.has_key(move_tbc.picking_id.id) else False)
new_note = ((move_tbc.note if move_tbc.note else '') + ' Copy of stock.move[%d].') % move_id
new_move_id = move_obj.copy(cr, uid, move_id, {
'move_dest_id': move_dest_id,
'state': 'waiting',
'note': new_note,
'move_history_ids': False, # Don't inherit child, populate it in next step. The same to next line.
'move_history_ids2': False,
'product_qty' : product_qty,
'product_uos_qty': product_qty,
'picking_id' : my_picking_id,
'price_unit': move_tbc.price_unit,
})
if move_dest_id: # Create the move_history_ids (child) if there is.
move_obj.write(cr, uid, [new_move_id], {'move_history_ids': [(4, move_dest_id)]})
return new_move_id
def update_move_chain_pick(self, cr, uid, move_id, vals, new_picks, context=None):
'''Recursively update the new chained move with the new related picking by the first move id until a location in retention mode or the end.
@return True if ok.
'''
move_obj = self.pool.get('stock.move')
move_tbu = move_obj.browse(cr, uid, move_id, context)
while True:
vals.update(picking_id=new_picks[move_tbu.picking_id.id])
move_obj.write(cr, uid, [move_tbu.id], vals, context)
if not move_tbu.move_dest_id or move_tbu.location_dest_id.retention_mode != 'thru':
break
move_tbu = move_tbu.move_dest_id
return True
def update_move_chain(self, cr, uid, move_id, vals, context=None):
'''Recursively update the old chained move by the first move id until a location in retention mode or the end.
@return True if ok.
'''
ids = [move_id]
move_obj = self.pool.get('stock.move')
move_tbu = move_obj.browse(cr, uid, move_id, context)
while move_tbu.move_dest_id and move_tbu.location_dest_id.retention_mode == 'thru':
ids.append(move_tbu.move_dest_id.id)
move_tbu = move_tbu.move_dest_id
move_obj.write(cr, uid, ids, vals, context)
return True
def isPickNotEmpty(self, cr, uid, pick_id, move_obj, context=None):
cpt = move_obj.search(
cr, uid,
[('picking_id', '=', pick_id)],
context=context, count=True)
return cpt > 0
def check_production_node_move_chain(
self, cr, uid, move_tbc, context=None):
if move_tbc.location_id.usage == 'production' or \
move_tbc.location_dest_id.usage == 'production':
return True
return False
def has_production_mode(self, cr, uid, all_moves, context=None):
for move in all_moves:
if self.check_production_node_move_chain(cr, uid, move, context):
return True
return False
def do_partial(self, cr, uid, ids, partial_datas, context=None):
""" Makes partial picking and moves done.
@param partial_datas : Dictionary containing details of partial picking
like partner_id, address_id, delivery_date,
delivery moves with product_id, product_qty, uom
@return: Dictionary of values
"""
if context is None:
context = {}
else:
context = dict(context)
res = {}
move_obj = self.pool.get('stock.move')
product_obj = self.pool.get('product.product')
currency_obj = self.pool.get('res.currency')
uom_obj = self.pool.get('product.uom')
wf_service = netsvc.LocalService("workflow")
for pick in self.browse(cr, uid, ids, context=context):
new_picks = False
complete, too_many, too_few, all_moves = [], [], [], []
move_product_qty = {}
prodlot_ids = {}
product_avail = {}
for move in pick.move_lines:
|
nsalomonis/AltAnalyze | import_scripts/peakAnnotation.py | Python | apache-2.0 | 4,178 | 0.0146 | import sys,string,os
sys.path.insert(1, os.path.join(sys.path[0], '..')) ### import parent dir dependencies
import export
import unique
import traceback
""" Intersecting Coordinate Files """
def cleanUpLine(line):
line = string.replace(line,'\n','')
lin | e = string.replace(line,'\c','')
data = string.replace( | line,'\r','')
data = string.replace(data,'"','')
return data
def eCLIPimport(folder):
eCLIP_dataset_peaks={}
annotations=[]
files = unique.read_directory(folder)
for file in files:
if '.bed' in file:
peaks={}
dataset = file[:-4]
print dataset
key_db={}
fn = unique.filepath(folder+'/'+file)
eo = export.ExportFile(folder+'/3-prime-peaks/'+file)
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
chr = t[0]
start = int(t[1])
end = int(t[2])
strand = t[5]
gene = string.split(t[-3],'.')[0]
annotation = string.split(t[-3],';')[-1]
if 'three_prime_utrs' in annotation:
eo.write(line)
if annotation not in annotations:
annotations.append(annotation)
symbol = t[-2]
key = chr,start,strand
#"""
if gene in coding_db:
coding_type = coding_db[gene][-1]
if 'protein_coding' in coding_type:
coding_type = 'protein_coding'
##"""
if key in key_db:
gene_db = key_db[key]
if gene in gene_db:
gene_db[gene].append(annotation)
else:
gene_db[gene]=[annotation]
else:
gene_db={}
gene_db[gene]=[annotation]
key_db[key]=gene_db
for key in key_db:
ranking=[]
for gene in key_db[key]:
ranking.append((len(key_db[key][gene]),gene))
ranking.sort()
gene = ranking[-1][-1]
for annotation in key_db[key][gene]:
if annotation in peaks:
peaks[annotation]+=1
else:
peaks[annotation]=1
eCLIP_dataset_peaks[dataset]=peaks
eo.close()
annotations.sort()
eo = export.ExportFile(folder+'/summary-annotations/summary.txt')
header = string.join(['RBP']+map(str,annotations),'\t')+'\n'
eo.write(header)
for dataset in eCLIP_dataset_peaks:
annot=[]
peaks = eCLIP_dataset_peaks[dataset]
for annotation in annotations:
if annotation in peaks:
annot.append(peaks[annotation])
else:
annot.append(0)
annot = map(lambda x: (1.000*x/sum(annot)), annot)
values = string.join([dataset]+map(str,annot),'\t')+'\n'
eo.write(values)
eo.close()
if __name__ == '__main__':
################ Comand-line arguments ################
import getopt
CLIP_dir = None
species = 'Hs'
""" Usage:
bedtools intersect -wb -a /Clip_merged_reproducible_ENCODE/K562/AARS-human.bed -b /annotations/combined/hg19_annotations-full.bed > /test.bed
"""
if len(sys.argv[1:])<=1: ### Indicates that there are insufficient number of command-line arguments
print 'WARNING!!!! Too commands supplied.'
else:
options, remainder = getopt.getopt(sys.argv[1:],'', ['species=','clip='])
#print sys.argv[1:]
for opt, arg in options:
if opt == '--species':
species = arg
elif opt == '--clip':
CLIP_dir = arg
import ExpressionBuilder
coding_db = ExpressionBuilder.importTranscriptBiotypeAnnotations(species)
dataset_peaks = eCLIPimport(CLIP_dir)
|
Kronuz/sublime-rst-completion | indent_list_item.py | Python | bsd-3-clause | 3,382 | 0.003253 | import re
import sublime
import sublime_plugin
class IndentListItemCommand(sublime_plugin.TextCommand):
bullet_pattern = r'([-+*]|([(]?(\d+|#|[a-y]|[A-Y]|[MDCLXVImdclxvi]+))([).]))'
bullet_pattern_re = re.compile(bullet_pattern)
line_pattern_re = re.compile(r'^\s*' + bullet_pattern)
spaces_re = re.compile(r'^\s*')
def run(self, edit, reverse=False):
for region in self.view.sel():
if region.a != region.b:
continue
line = self.view.line(region)
line_content = self.view.substr(line)
new_line = line_content
m = self.line_pattern_re.match(new_line)
if not m:
return
# Determine how to indent (tab or spaces)
tab_str = self.view.settings().get('tab_size', 4) * ' '
sep_str = ' ' if m.group(4) else ''
prev_line = self.view.line(sublime.Region(line.begin() - 1, line.begin() - 1))
prev_line_content = self.view.substr(prev_line)
prev_prev_line = self.view.line(sublime.Region(prev_line.begin() - 1, prev_line.begin() - 1))
prev_prev_line_content = self.view.substr(prev_prev_line)
if not reverse:
# Do the indentation
new_line = self.bullet_pattern_re.sub(tab_str + sep_str + r'\1', new_line)
# Insert the new item
if prev_line_content:
new_line = '\n' + new_line
else:
if not new_line.startswith(tab_str):
continue
# Do the unindentation
new_line = re.sub(tab_str + sep_str + self.bullet_pattern, r'\1', new_line)
# Insert the new item
if prev_line_content:
new_line = '\n' + new_line
else:
prev_spaces = self.spaces_re.match(prev_prev_line_content).group(0)
spac | es = self.spaces_re.match(new_line).group(0)
if prev_spaces == spaces:
line = sublime.Region(line.begin() - 1, line.end())
endings = ['.', ')']
# Transform the bullet to the next/previous bu | llet type
if self.view.settings().get('list_indent_auto_switch_bullet', True):
bullets = self.view.settings().get('list_indent_bullets', ['*', '-', '+'])
def change_bullet(m):
bullet = m.group(1)
try:
return bullets[(bullets.index(bullet) + (1 if not reverse else -1)) % len(bullets)]
except ValueError:
pass
n = m.group(2)
ending = endings[(endings.index(m.group(4)) + (1 if not reverse else -1)) % len(endings)]
if n.isdigit():
return '${1:a}' + ending
elif n != '#':
return '${1:0}' + ending
return m.group(2) + ending
new_line = self.bullet_pattern_re.sub(change_bullet, new_line)
self.view.replace(edit, line, '')
self.view.run_command('insert_snippet', {'contents': new_line})
def is_enabled(self):
return bool(self.view.score_selector(self.view.sel()[0].a, 'text.restructuredtext'))
|
arthurSena/processors | tests/fixtures/api/fda_applications.py | Python | mit | 543 | 0.001842 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import pytest
@pytest.fixture
def fda_application(conn, organization):
fda_application = {
'id': 'ANDA018659',
'organisation_id': organization,
'drug_name': 'ALLOPURINOL',
'a | ctive_ingredients': 'ALLOPURINOL',
}
fda_application_id = conn['database']['fda_applications'].insert(fda_application) |
return fda_application_id
|
alexjarosch/sia-fluxlim | sia_fluxlim/__init__.py | Python | gpl-3.0 | 57 | 0 | from sia_fluxlim | .oggm_flowline import MUSCLSuperBeeMod | el
|
turdusmerula/kipartman | kipartman/dialogs/panel_modules.py | Python | gpl-3.0 | 13,205 | 0.064142 | # -*- coding: utf-8 -*-
###########################################################################
## Python code generated with wxFormBuilder (version Dec 22 2017)
## http://www.wxformbuilder.org/
##
## PLEASE DO *NOT* EDIT THIS FILE!
###########################################################################
import wx
import wx.xrc
import wx.dataview
### | ########################################################################
## Class PanelModules
###########################################################################
class PanelModules ( wx.Panel ):
def __init__( self, parent ):
wx.Panel.__init__ ( self, parent, id = wx.ID_ANY, pos = wx.DefaultPosition, size = wx.Size( 1086,756 ), style = wx.TAB_TRAVERSAL )
|
bSizer1 = wx.BoxSizer( wx.VERTICAL )
self.m_splitter2 = wx.SplitterWindow( self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.SP_3D|wx.SP_LIVE_UPDATE )
self.m_splitter2.Bind( wx.EVT_IDLE, self.m_splitter2OnIdle )
self.m_splitter2.SetMinimumPaneSize( 300 )
self.panel_path = wx.Panel( self.m_splitter2, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TAB_TRAVERSAL )
bSizer2 = wx.BoxSizer( wx.VERTICAL )
bSizer4 = wx.BoxSizer( wx.HORIZONTAL )
self.button_refresh_categories = wx.BitmapButton( self.panel_path, wx.ID_ANY, wx.Bitmap( u"resources/refresh.png", wx.BITMAP_TYPE_ANY ), wx.DefaultPosition, wx.DefaultSize, wx.BU_AUTODRAW )
bSizer4.Add( self.button_refresh_categories, 0, wx.ALL|wx.ALIGN_BOTTOM, 5 )
bSizer2.Add( bSizer4, 0, wx.ALIGN_RIGHT, 5 )
self.tree_libraries = wx.dataview.DataViewCtrl( self.panel_path, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.dataview.DV_SINGLE )
bSizer2.Add( self.tree_libraries, 1, wx.ALL|wx.EXPAND, 5 )
self.panel_path.SetSizer( bSizer2 )
self.panel_path.Layout()
bSizer2.Fit( self.panel_path )
self.menu_libraries = wx.Menu()
self.menu_libraries_add_folder = wx.MenuItem( self.menu_libraries, wx.ID_ANY, u"Add folder", wx.EmptyString, wx.ITEM_NORMAL )
self.menu_libraries.Append( self.menu_libraries_add_folder )
self.menu_libraries_add_library = wx.MenuItem( self.menu_libraries, wx.ID_ANY, u"Add library", wx.EmptyString, wx.ITEM_NORMAL )
self.menu_libraries.Append( self.menu_libraries_add_library )
self.menu_libraries_rename = wx.MenuItem( self.menu_libraries, wx.ID_ANY, u"Rename", wx.EmptyString, wx.ITEM_NORMAL )
self.menu_libraries.Append( self.menu_libraries_rename )
self.menu_libraries_remove = wx.MenuItem( self.menu_libraries, wx.ID_ANY, u"Remove", wx.EmptyString, wx.ITEM_NORMAL )
self.menu_libraries.Append( self.menu_libraries_remove )
self.menu_libraries.AppendSeparator()
self.menu_libraries_add_module = wx.MenuItem( self.menu_libraries, wx.ID_ANY, u"Add module", wx.EmptyString, wx.ITEM_NORMAL )
self.menu_libraries.Append( self.menu_libraries_add_module )
self.panel_path.Bind( wx.EVT_RIGHT_DOWN, self.panel_pathOnContextMenu )
self.m_panel3 = wx.Panel( self.m_splitter2, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TAB_TRAVERSAL )
bSizer3 = wx.BoxSizer( wx.VERTICAL )
self.module_splitter = wx.SplitterWindow( self.m_panel3, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.SP_3D )
self.module_splitter.Bind( wx.EVT_IDLE, self.module_splitterOnIdle )
self.panel_modules = wx.Panel( self.module_splitter, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TAB_TRAVERSAL )
bSizer12 = wx.BoxSizer( wx.VERTICAL )
self.filters_panel = wx.Panel( self.panel_modules, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TAB_TRAVERSAL )
bSizer161 = wx.BoxSizer( wx.HORIZONTAL )
self.m_staticText15 = wx.StaticText( self.filters_panel, wx.ID_ANY, u"Filters: ", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText15.Wrap( -1 )
bSizer161.Add( self.m_staticText15, 0, wx.ALL, 5 )
self.filters_panel.SetSizer( bSizer161 )
self.filters_panel.Layout()
bSizer161.Fit( self.filters_panel )
bSizer12.Add( self.filters_panel, 0, wx.EXPAND|wx.RIGHT|wx.LEFT, 5 )
bSizer7 = wx.BoxSizer( wx.VERTICAL )
bSizer11 = wx.BoxSizer( wx.HORIZONTAL )
bSizer10 = wx.BoxSizer( wx.HORIZONTAL )
self.toolbar_module = wx.ToolBar( self.panel_modules, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TB_FLAT )
self.toolbar_module.SetForegroundColour( wx.SystemSettings.GetColour( wx.SYS_COLOUR_WINDOW ) )
self.toolbar_module.SetBackgroundColour( wx.SystemSettings.GetColour( wx.SYS_COLOUR_WINDOW ) )
self.toggle_module_path = self.toolbar_module.AddLabelTool( wx.ID_ANY, u"tool", wx.Bitmap( u"resources/tree_mode.png", wx.BITMAP_TYPE_ANY ), wx.NullBitmap, wx.ITEM_CHECK, wx.EmptyString, wx.EmptyString, None )
self.toolbar_module.AddSeparator()
self.toggle_show_both_changes = self.toolbar_module.AddLabelTool( wx.ID_ANY, u"tool", wx.Bitmap( u"resources/show_both.png", wx.BITMAP_TYPE_ANY ), wx.NullBitmap, wx.ITEM_CHECK, wx.EmptyString, wx.EmptyString, None )
self.toggle_show_conflict_changes = self.toolbar_module.AddLabelTool( wx.ID_ANY, u"tool", wx.Bitmap( u"resources/show_conf.png", wx.BITMAP_TYPE_ANY ), wx.NullBitmap, wx.ITEM_CHECK, wx.EmptyString, wx.EmptyString, None )
self.toggle_show_incoming_changes = self.toolbar_module.AddLabelTool( wx.ID_ANY, u"tool", wx.Bitmap( u"resources/show_in.png", wx.BITMAP_TYPE_ANY ), wx.NullBitmap, wx.ITEM_CHECK, wx.EmptyString, wx.EmptyString, None )
self.toggle_show_outgoing_changes = self.toolbar_module.AddLabelTool( wx.ID_ANY, u"tool", wx.Bitmap( u"resources/show_out.png", wx.BITMAP_TYPE_ANY ), wx.NullBitmap, wx.ITEM_CHECK, wx.EmptyString, wx.EmptyString, None )
self.toolbar_module.Realize()
bSizer10.Add( self.toolbar_module, 1, wx.EXPAND, 5 )
bSizer61 = wx.BoxSizer( wx.HORIZONTAL )
self.search_modules = wx.SearchCtrl( self.panel_modules, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, wx.TE_PROCESS_ENTER )
self.search_modules.ShowSearchButton( True )
self.search_modules.ShowCancelButton( False )
self.search_modules.SetMinSize( wx.Size( 200,-1 ) )
bSizer61.Add( self.search_modules, 0, wx.ALL|wx.EXPAND, 5 )
self.button_refresh_modules = wx.BitmapButton( self.panel_modules, wx.ID_ANY, wx.Bitmap( u"resources/refresh.png", wx.BITMAP_TYPE_ANY ), wx.DefaultPosition, wx.DefaultSize, wx.BU_AUTODRAW )
bSizer61.Add( self.button_refresh_modules, 0, wx.ALL, 5 )
bSizer10.Add( bSizer61, 0, 0, 5 )
bSizer11.Add( bSizer10, 1, wx.EXPAND, 5 )
bSizer7.Add( bSizer11, 0, wx.EXPAND, 5 )
self.tree_modules = wx.dataview.DataViewCtrl( self.panel_modules, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.dataview.DV_MULTIPLE )
bSizer7.Add( self.tree_modules, 1, wx.ALL|wx.EXPAND, 5 )
bSizer12.Add( bSizer7, 1, wx.EXPAND, 5 )
self.panel_modules.SetSizer( bSizer12 )
self.panel_modules.Layout()
bSizer12.Fit( self.panel_modules )
self.module_splitter.Initialize( self.panel_modules )
bSizer3.Add( self.module_splitter, 1, wx.EXPAND, 5 )
self.m_panel3.SetSizer( bSizer3 )
self.m_panel3.Layout()
bSizer3.Fit( self.m_panel3 )
self.m_splitter2.SplitVertically( self.panel_path, self.m_panel3, 294 )
bSizer1.Add( self.m_splitter2, 1, wx.EXPAND, 5 )
self.SetSizer( bSizer1 )
self.Layout()
self.menu_modules = wx.Menu()
self.menu_modules_update = wx.MenuItem( self.menu_modules, wx.ID_ANY, u"Update", wx.EmptyString, wx.ITEM_NORMAL )
self.menu_modules_update.SetBitmap( wx.Bitmap( u"resources/update.png", wx.BITMAP_TYPE_ANY ) )
self.menu_modules.Append( self.menu_modules_update )
self.menu_modules_force_update = wx.MenuItem( self.menu_modules, wx.ID_ANY, u"Force update", wx.EmptyString, wx.ITEM_NORMAL )
self.menu_modules_force_update.SetBitmap( wx.Bitmap( u"resources/update.png", wx.BITMAP_TYPE_ANY ) )
self.menu_modules.Append( self.menu_modules_force_update )
self.menu_modules.AppendSeparator()
self.menu_modules_commit = wx.MenuItem( self.menu_modules, wx.ID_ANY, u"Commit", wx.EmptyString, wx.ITEM_NORMAL )
self.menu_modules_commit.SetBitmap( wx.Bitmap( u"resources/commit.png", wx.BITMAP_TYPE_ANY ) )
self.menu_modules.Append( self.menu_modules_commit )
self.m |
emmdim/guifiAnalyzer | traffic/tests/testUrl.py | Python | gpl-3.0 | 295 | 0.016949 |
import urllib2
url = "http://ifolderlinks.ru/404"
req = urllib2.Req | uest(url)
#try:
response = urllib2.urlopen(req,timeout=3)
#except urllib2.HTTPError as e:
# print 'The server couldn\'t fulfill the request.'
# print 'Error code: ', e.code
print response | .info()
#print response.read()
|
LibreTime/libretime | playout/setup.py | Python | agpl-3.0 | 1,522 | 0.000657 | from os import chdir
from pathlib import Path
from setuptools import setup
# Change directory since setuptools uses relative paths
here = Path(__file__).parent.resolve()
chdir(here)
setup(
name="libretime-playout",
version="1.0",
description="LibreTime Playout",
author="LibreTime Contributors",
url="https://github.com/libretime/libretime",
project_urls={
"Bug Tracker": "https://github.com/libretime/libretime/issues",
"Documentation": "https://libretime.org",
"Source Code": "https://github.com/libretime/libretime",
},
license="AGPLv3",
packages=[
"libretim | e_playout",
"libretime_playout.notify",
"libretime_liquidsoap",
],
package_data={"": ["**/*.liq", "*.cfg", "*.types"]},
entry_points={
"console_scripts": [
"libretime- | playout=libretime_playout.main:cli",
"libretime-liquidsoap=libretime_liquidsoap.main:cli",
"libretime-playout-notify=libretime_playout.notify.main:cli",
]
},
python_requires=">=3.6",
install_requires=[
"amqplib",
"configobj",
"defusedxml",
"kombu",
"mutagen",
"packaging",
"pytz",
"requests",
"typing-extensions",
],
extras_require={
"dev": [
f"libretime-api-client @ file://localhost{here.parent / 'api_client'}",
f"libretime-shared @ file://localhost{here.parent / 'shared'}",
],
},
zip_safe=False,
)
|
bryndivey/ohmanizer | electronics/migrations/0011_auto_20141120_2136.py | Python | apache-2.0 | 2,732 | 0.001098 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
class Migration(migrations.Migration):
dependencies = [
('electronics', '0010_auto_20141120_0704'),
]
operations = [
migrations.CreateModel(
name='Order',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('reference', models.CharField(max_length=500)),
('shipping_num', models.CharField(max_length=20, blank=True)),
('supplier', models.CharField(max_length=100, blank=True)),
('notes', models.TextField(default=b'', blank=True)),
('date', models.DateField(default=datetime.datetime.now)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='OrderComponent',
fields=[
('id', models.AutoField(verbose_name='ID', | serialize=False, auto_created=True, primary_key=True)),
('quantity', models.IntegerField(default=1)),
('price', models.IntegerField(blank=Tr | ue)),
('component', models.ForeignKey(to='electronics.Component')),
('order', models.ForeignKey(to='electronics.Order')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='WishItem',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('notes', models.TextField(default=b'', blank=True)),
('component', models.ForeignKey(to='electronics.Component')),
],
options={
},
bases=(models.Model,),
),
migrations.AddField(
model_name='component',
name='notes',
field=models.TextField(default=b'', blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='componenttype',
name='notes',
field=models.TextField(default=b'', blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='parameter',
name='notes',
field=models.TextField(default=b'', blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='stock',
name='notes',
field=models.TextField(default=b'', blank=True),
preserve_default=True,
),
]
|
partofthething/home-assistant | homeassistant/components/vera/climate.py | Python | apache-2.0 | 4,982 | 0.000401 | """Support for Vera thermostats."""
from typing import Any, Callable, List, Optional
import pyvera as veraApi
from homeassistant.components.climate import (
| DOMAIN as PLATFORM_DOMAIN,
ENTITY_ID_FORMAT,
ClimateEntity,
)
from homeassistant.components.climate.const import (
FAN_AUTO,
FAN_ON,
HVAC_MODE_COOL,
HVAC_MODE_HEAT,
HVAC_MODE_HEAT_COOL,
HVAC_MODE_OFF,
SUPPORT_FAN_MODE,
SUPPORT_TARGET_TEMPERATURE,
)
from homeassistant.config_entries import ConfigEntry
f | rom homeassistant.const import ATTR_TEMPERATURE, TEMP_CELSIUS, TEMP_FAHRENHEIT
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity import Entity
from homeassistant.util import convert
from . import VeraDevice
from .common import ControllerData, get_controller_data
FAN_OPERATION_LIST = [FAN_ON, FAN_AUTO]
SUPPORT_FLAGS = SUPPORT_TARGET_TEMPERATURE | SUPPORT_FAN_MODE
SUPPORT_HVAC = [HVAC_MODE_COOL, HVAC_MODE_HEAT, HVAC_MODE_HEAT_COOL, HVAC_MODE_OFF]
async def async_setup_entry(
hass: HomeAssistant,
entry: ConfigEntry,
async_add_entities: Callable[[List[Entity], bool], None],
) -> None:
"""Set up the sensor config entry."""
controller_data = get_controller_data(hass, entry)
async_add_entities(
[
VeraThermostat(device, controller_data)
for device in controller_data.devices.get(PLATFORM_DOMAIN)
],
True,
)
class VeraThermostat(VeraDevice[veraApi.VeraThermostat], ClimateEntity):
"""Representation of a Vera Thermostat."""
def __init__(
self, vera_device: veraApi.VeraThermostat, controller_data: ControllerData
):
"""Initialize the Vera device."""
VeraDevice.__init__(self, vera_device, controller_data)
self.entity_id = ENTITY_ID_FORMAT.format(self.vera_id)
@property
def supported_features(self) -> Optional[int]:
"""Return the list of supported features."""
return SUPPORT_FLAGS
@property
def hvac_mode(self) -> str:
"""Return hvac operation ie. heat, cool mode.
Need to be one of HVAC_MODE_*.
"""
mode = self.vera_device.get_hvac_mode()
if mode == "HeatOn":
return HVAC_MODE_HEAT
if mode == "CoolOn":
return HVAC_MODE_COOL
if mode == "AutoChangeOver":
return HVAC_MODE_HEAT_COOL
return HVAC_MODE_OFF
@property
def hvac_modes(self) -> List[str]:
"""Return the list of available hvac operation modes.
Need to be a subset of HVAC_MODES.
"""
return SUPPORT_HVAC
@property
def fan_mode(self) -> Optional[str]:
"""Return the fan setting."""
mode = self.vera_device.get_fan_mode()
if mode == "ContinuousOn":
return FAN_ON
return FAN_AUTO
@property
def fan_modes(self) -> Optional[List[str]]:
"""Return a list of available fan modes."""
return FAN_OPERATION_LIST
def set_fan_mode(self, fan_mode) -> None:
"""Set new target temperature."""
if fan_mode == FAN_ON:
self.vera_device.fan_on()
else:
self.vera_device.fan_auto()
self.schedule_update_ha_state()
@property
def current_power_w(self) -> Optional[float]:
"""Return the current power usage in W."""
power = self.vera_device.power
if power:
return convert(power, float, 0.0)
@property
def temperature_unit(self) -> str:
"""Return the unit of measurement."""
vera_temp_units = self.vera_device.vera_controller.temperature_units
if vera_temp_units == "F":
return TEMP_FAHRENHEIT
return TEMP_CELSIUS
@property
def current_temperature(self) -> Optional[float]:
"""Return the current temperature."""
return self.vera_device.get_current_temperature()
@property
def operation(self) -> str:
"""Return current operation ie. heat, cool, idle."""
return self.vera_device.get_hvac_mode()
@property
def target_temperature(self) -> Optional[float]:
"""Return the temperature we try to reach."""
return self.vera_device.get_current_goal_temperature()
def set_temperature(self, **kwargs: Any) -> None:
"""Set new target temperatures."""
if kwargs.get(ATTR_TEMPERATURE) is not None:
self.vera_device.set_temperature(kwargs.get(ATTR_TEMPERATURE))
self.schedule_update_ha_state()
def set_hvac_mode(self, hvac_mode) -> None:
"""Set new target hvac mode."""
if hvac_mode == HVAC_MODE_OFF:
self.vera_device.turn_off()
elif hvac_mode == HVAC_MODE_HEAT_COOL:
self.vera_device.turn_auto_on()
elif hvac_mode == HVAC_MODE_COOL:
self.vera_device.turn_cool_on()
elif hvac_mode == HVAC_MODE_HEAT:
self.vera_device.turn_heat_on()
self.schedule_update_ha_state()
|
mozilla/mozilla-ignite | apps/challenges/migrations/0019_add_judge_group.py | Python | bsd-3-clause | 13,599 | 0.007501 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
"""Give members of the judge group permission to judge all submissions.
Create the group and the permission if necessary.
"""
Group, Permission = orm['auth.Group'], orm['auth.Permission']
ContentType = orm['contenttypes.ContentType']
try:
submissions = ContentType.objects.get(app_label='challenges',
model='submission')
except ContentType.DoesNotExist:
submissions = ContentType.objects.create(app_label='challenges',
model='submission',
name='submission')
judge_group, _ = Group.objects.get_or_create(name='Judges')
judge_perm, _ = Permission.objects.get_or_create(
codename='judge_submission',
name='Can judge submissions',
content_type=submissions)
# Judges gonna judge
judge_group.permissions.add(judge_perm)
def backwards(self, orm):
"Write your backwards methods here."
# Don't want to muck up existing data, and the migration is idempotent
pass
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'challenges.category': {
'Meta': {'object_name': 'Category'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '60'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '60', 'db_index': 'True'})
},
'challenges.challenge': {
'Meta': {'object_name': 'Challenge'},
'allow_voting': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'description': ('django.db.models.fields.TextField' | , [], {}),
'end_date': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageF | ield', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'moderate': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['projects.Project']"}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '60', 'db_index': 'True'}),
'start_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'summary': ('django.db.models.fields.TextField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '60'})
},
'challenges.externallink': {
'Meta': {'object_name': 'ExternalLink'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'submission': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['challenges.Submission']", 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '255'})
},
'challenges.phase': {
'Meta': {'ordering': "('order',)", 'unique_together': "(('challenge', 'name'),)", 'object_name': 'Phase'},
'challenge': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'phases'", 'to': "orm['challenges.Challenge']"}),
'end_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 7, 17, 3, 23, 56, 440637)'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'order': ('django.db.models.fields.IntegerField', [], {}),
'start_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 1, 17, 3, 23, 56, 440577)'})
},
'challenges.submission': {
'Meta': {'ordering': "['-id']", 'object_name': 'Submission'},
'brief_description': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['challenges.Category']"}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['users.Profile']"}),
'created_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 1, 17, 11, 23, 56, 445407)'}),
'description': ('django.db.models.fields.TextField', [], {}),
'flagged_offensive': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'flagged_offensive_reason': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_draft': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_live': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_winner': ('django.db.models.fields.BooleanField |
jmcomber/FlaskDB | flaskr/__init__.py | Python | mit | 4,142 | 0.00169 | #!/usr/bin/python3
# -*- coding: latin-1 -*-
import os
import sys
import psycopg2
import json
from bson import json_util
from pymongo import MongoClient
from flask import Flask, request, session, g, redirect, url_for, abort, \
render_template, flash
def create_app():
app = Flask(__name__)
return app
app = create_app()
# REPLACE WITH YOUR DATABASE NAME
MONGODATABASE = "test"
MONGOSERVER = "localhost"
MONGOPORT = 27017
client = MongoClient(MONGOSERVER, MONGOPORT)
mongodb = client[MONGODATABASE]
'''# Uncomment for postgres connection
# REPLACE WITH YOUR DATABASE NAME, USER AND PASS
POSTGRESDATABASE = "grupo1"
POSTGRESUSER = "grupo1"
POSTGRESPASS = "grupo1"
postgresdb = psycopg2.connect(
database=POSTGRESDATABASE,
user=POSTGRESUSER,
password=POSTGRESPASS)
'''
#Cambiar por Path Absoluto en el servidor
QUERIES_FILENAME = '/var/www/FlaskDB/queries'
# QUERIES_FILENAME = 'queries'
@app.route("/", methods=['GET','POST'])
def home():
if request.method == "GET":
with open(QUERIES_FILENAME, 'r', encoding='utf-8') as queries_file:
json_file = json.load(queries_file)
pairs = [(x["name"],
x["database"],
x["description"],
x["query"]) for x in json_file]
return render_template('file.html', results=pairs)
else:
with open(QUERIES_FILENAME, 'r', encoding='utf-8') as queries_file:
json_file = json.load(queries_file)
pairs = []
for x in json_file:
if "PARAMETER1" in x["query"]:
try:
dia = request.form['dia']
mes = request.form['mes']
ano = request.form['ano']
mes = str(mes)
if len(mes) < 2:
mes = "0" + mes
dia = str(dia)
if len(dia) < 2:
dia = "0" + dia
ano = str(ano)
param = "{}-{}-{}".format(ano, mes, dia)
pairs.append((x["name"],
x["database"],
x["description"],
param.join(x["query"].split("PARAMETER1"))))
except Exception as e:
print(e)
elif "PARAMETER2" in x["query"]:
try:
param = request.form['text']
param2 = request.form['text2']
aux = param.join(x["query"].split("PARAMETER2"))
aux = param2.join(aux.split("PARAMETER3"))
pairs.append((x["name"],
x["database"],
x["description"],
aux))
except Exception as e:
print(e)
else:
try:
param = request.form['text3']
pairs.append((x["name"],
x["database"],
x["description"],
param.join(x["query"].split("PARAMETER4"))))
| except Exception as e:
print(e)
return render_template('file.html', results=pairs)
@app.route("/mongo | ", methods=['GET','POST'])
def mongo():
query = request.args.get("query")
print(query)
results = eval('mongodb.'+query)
results = json_util.dumps(results, sort_keys=True, indent=4)
if "find" in query:
return render_template('mongo.html', results=results)
else:
return "ok"
@app.route("/postgres")
def postgres():
query = request.args.get("query")
cursor = postgresdb.cursor()
cursor.execute(query)
results = [[a for a in result] for result in cursor]
print(results)
return render_template('postgres.html', results=results)
@app.route("/example")
def example():
return render_template('example.html')
if __name__ == "__main__":
app.run()
|
kinow-io/kinow-python-sdk | kinow_client/apis/directors_api.py | Python | apache-2.0 | 71,299 | 0.002216 | # coding: utf-8
"""
Server API
Reference for Server API (REST/Json)
OpenAPI spec version: 2.0.6
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class DirectorsApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def attach_director_to_category(self, category_id, director_id, **kwargs):
"""
Attach director to category
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.attach_director_to_category(category_id, director_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int category_id: Category ID to fetch (required)
:param int director_id: Director ID to attach (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.attach_director_to_category_with_http_info(category_id, director_id, **kwargs)
else:
(data) = self.attach_director_to_category_with_http_info(category_id, director_id, **kwargs)
return data
def attach_director_to_category_with_http_info(self, category_id, director_id, **kwargs):
"""
Attach director to category
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.attach_director_to_category_with_http_info(category_id, director_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int category_id: Category ID to fetch (required)
:param int director_id: Director ID to attach (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['category_id', 'director_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method attach_director_to_category" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'category_id' is set
if ('category_id' not in params) or (params['category_id'] is None):
raise ValueError("Missing the required parameter `category_id` when calling `attach_director_to_category`")
# verify the required parameter 'director_id' is set
if ('director_id' not in params) or (params['director_id'] is None):
raise ValueError("Missing the required parameter `director_id` when calling `attach_director_to_category`")
collection_formats = {}
resource_path = '/categories/{category_id}/directors'.replace('{format}', 'json')
path_params = {}
if 'category_id' in params:
path_params['category_id'] = params['category_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
if 'director_id' in params:
form_params.append(('director_id', params['director_id']))
self.api_client.set_default_header('Content-Type', 'application/x-www-form-urlencoded')
body_params = None
# Authentication setting
auth_settings = ['ApiClientId', 'ApiClientSecret']
return self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def attach_director_to_product(self, product_id, director_id, **kwargs):
"""
Attach director to product
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.attach_director_to_product(product_id, director_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int product_id: Product ID to fetch (required)
:param int director_id: Director ID to attach (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.attach_director_to_product_with_http_info(product_id, director_id, **kwargs)
else:
(data) = self.attach_director_to_product_with_http_info(product_id, director_id, **kwargs)
return data
def attach_director_to_product_with_http_info(self, product_id, director_id, **kwargs):
"""
Attach director to product
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def ca | llback_function(response):
>>> pprint(response)
>>>
>>> thread = api.attach_director_to_product_with_http_info(product_id, director_id, callback=callback_function)
:param callback function: The callback | function
for asynchronous request. (optional)
:param int product_id: Product ID to fetch (required)
:param int director_id: Director ID to attach (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['product_id', 'director_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_req |
Forage/Gramps | gramps/plugins/drawreport/descendtree.py | Python | gpl-2.0 | 66,269 | 0.007092 | #
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2007 Donald N. Allingham
# Copyright (C) 2007-2008 Brian G. Matherly
# Copyright (C) 2010 Jakim Friant
# Copyright (C) 2009-2010 Craig J. Anderson
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# $Id$
"""
Reports/Graphical Reports/Familial Tree
Reports/Graphical Reports/Personal Tree
"""
from __future__ import division
#------------------------------------------------------------------------
#
# GRAMPS modules
#
#------------------------------------------------------------------------
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.get_translation().sgettext
from gramps.gen.errors import ReportError
from gramps.gen.plug.menu import TextOption
from gramps.gen.plug.menu import NumberOption
from gramps.gen.plug.menu import EnumeratedListOption
from gramps.gen.plug.menu import StringOption
from gramps.gen.plug.menu import BooleanOption
from gramps.gen.plug.menu import PersonOption
from gramps.gen.plug.menu import FamilyOption
from gramps.gen.plug.report import Report
from gramps.gen.plug.report import utils as ReportUtils
from gramps.gen.plug.report import MenuReportOptions
PT2CM = ReportUtils.pt2cm
#------------------------------------------------------------------------
#
# Constants
#
#--------------------------------------- | ---------------------------------
_BORN = _('short for born|b.')
_DIED = _('short for died|d.')
_MARR = _('short for married|m.')
_RPT_NAME = 'descend_chart'
from gramps.plugins.lib.libtreebase import *
#------------------------------------------------------------------------
#
# Box classes
#
#------------------------------------------------------------------------
class DescendantBoxBase(BoxBase):
"""
Base for all descendant boxes.
Set the boxstr and some new attributes that are | needed
"""
def __init__(self, boxstr):
BoxBase.__init__(self)
self.boxstr = boxstr
self.next = None
self.father = None
def calc_text(self, database, person, family):
""" A single place to calculate box text """
gui = GuiConnect()
calc = gui.calc_lines(database)
self.text = calc.calc_lines(person, family,
gui.working_lines(self))
class PersonBox(DescendantBoxBase):
"""
Calculates information about the box that will print on a page
"""
def __init__(self, level, boldable = 0):
DescendantBoxBase.__init__(self, "CG2-box")
self.level = level
def set_bold(self):
""" update me to a bolded box """
self.boxstr = "CG2b-box"
class FamilyBox(DescendantBoxBase):
"""
Calculates information about the box that will print on a page
"""
def __init__(self, level):
DescendantBoxBase.__init__(self, "CG2-fam-box")
self.level = level
class PlaceHolderBox(BoxBase):
"""
I am a box that does not print. I am used to make sure information
does not run over areas that we don't want information (boxes)
"""
def __init__(self, level):
BoxBase.__init__(self)
self.boxstr = "None"
self.level = level
self.line_to = None
self.next = None
def calc_text(self, database, person, family):
""" move along. Nothing to see here """
return
#------------------------------------------------------------------------
#
# Titles Class(es)
#
#------------------------------------------------------------------------
class DescendantTitleBase(TitleBox):
def __init__(self, dbase, doc, boxstr = "CG2-Title"):
TitleBox.__init__(self, doc, boxstr)
self.database = dbase
def descendant_print(self, person_list, person_list2 = []):
""" calculate the Descendant title
Person_list will always be passed
If in the Family reports and there are two families, person_list2
will be used.
"""
if len(person_list) == len(person_list2) == 1:
person_list = person_list + person_list2
person_list2 = []
names = self._get_names(person_list)
if person_list2:
names2 = self._get_names(person_list2)
if len(names) + len(names2) == 3:
if len(names) == 1:
title = _("Descendant Chart for %(person)s and "
"%(father1)s, %(mother1)s") % \
{'person': names[0],
'father1': names2[0],
'mother1': names2[1],
}
else: # Should be 2 items in names list
title = _("Descendant Chart for %(person)s, %(father1)s "
"and %(mother1)s") % \
{'father1': names[0],
'mother1': names[1],
'person': names2[0],
}
else: # Should be 2 items in both names and names2 lists
title = _("Descendant Chart for %(father1)s, %(father2)s "
"and %(mother1)s, %(mother2)s") % \
{'father1': names[0],
'mother1': names[1],
'father2': names2[0],
'mother2': names2[1],
}
else: # No person_list2: Just one family
if len(names) == 1:
title = _("Descendant Chart for %(person)s") % \
{'person': names[0]}
else: # Should be two items in names list
title = _("Descendant Chart for %(father)s and %(mother)s") % \
{'father': names[0],
'mother': names[1],
}
return title
def get_parents(self, family_id):
""" For a family_id, return the father and mother """
family1 = self.database.get_family_from_gramps_id(family_id)
father_h = family1.get_father_handle()
mother_h = family1.get_mother_handle()
parents = [self.database.get_person_from_handle(handle)
for handle in [father_h, mother_h] if handle]
return parents
class TitleNone(TitleNoDisplay):
"""Family Chart Title class for the report """
def __init__(self, dbase, doc):
TitleNoDisplay.__init__(self, doc, "CG2-Title")
def calc_title(self, persons):
"""Calculate the title of the report"""
self.text = 'Descendant Graph'
class TitleDPY(DescendantTitleBase):
"""Descendant (Person yes start with parents) Chart
Title class for the report """
def __init__(self, dbase, doc):
DescendantTitleBase.__init__(self, dbase, doc)
def calc_title(self, person_id):
"""Calculate the title of the report"""
center = self.database.get_person_from_gramps_id(person_id)
family2_h = center.get_main_parents_family_handle()
family2 = self.database.get_family_from_handle(family2_h)
person_list = None
if family2:
father2_h = family2.get_father_handle()
mother2_h = family2.get_mother_handle()
person_list = [self.database.get_person_from_handle(handle)
for handle in [father2_h, mother2_h] if handle]
if not |
dabear/torrentstatus | torrentstatus/plugins/builtin.torrent.onstart.settorrentlabels.py | Python | lgpl-3.0 | 3,296 | 0.003337 | from torrentstatus.plugin import iTorrentAction
from torrentstatus.utorrent.connection import Connection
from contextlib import contextmanager
from torrentstatus.bearlang import BearLang
from torrentstatus.settings import config, labels_config
from torrentstatus.utils import intTryParse
@contextmanager
def utorrent_connection(host, username, password):
try:
conn = Connection(host, username, password).utorrent(None)
except Exception as err:
yield None, err
else:
try:
yield conn, None
finally:
pass
def get_new_torrent_labels(labels, args):
"""Transforms torrent labels and args passing them into a BearLang Instance
Parameters:
labels (Dict) A dict of label and rules for that label
args (Dict) A dict of arguments, will be passed to Bearlang
Returns:
a list of labels that match the rules defined.
"""
new_labels = []
for label, ruleset in labels.items():
# multiple rules accepted when configparser uses MultiOrderedDict
rules = ruleset.split("\n")
for rule in rules:
rule = rule.strip()
parser = BearLang(rule, args)
is_match = parser.execute()
print("\nrule:{0}, label:{1}, ismatch: {2}\n".format(rule, label, is_match))
if is_match:
new_labels.append(label)
return new_labels
settings = config.getSettingsAsDict()
class SetLabelsOnStart(iTorrentAction):
def onstart(self, pluginconfig, utorrentargs):
tempargs = vars(utorrentargs)
# Use labels definition from config file and match them up against
# provided input to the main script
labels = labels_config.getSettingsAsDict()
new_labels = get_new_torrent_labels(labels, tempargs)
#only connect to utorrent if we need to do a label change
if new_labels and intTryParse(settings["webui_enable"]) == 1:
with utorrent_connection(settings["webui_host"],
settings["webui_username"],
settings["webui_password"]) as (conn, err):
if err:
print("Could not connect to webui, make sure webui_host, "
"webui_username and webui_password is correctly "
| "defined in configuration file. Error:{0}".format(err))
else:
print("Connection to utorrent web ui ok")
print ("Got torrent '{0}' with hash {1} and tracker {2}. \n Setting new_labels: {3}"
.format(utorrentargs.torrentname, utorrentargs.hash, utorrentargs.tracker, new_labels))
if utorrentargs.debug:
print("debug mode on, not doing update")
return
#remove existing label
conn.torrent_set_props([{utorrentargs.hash: {'label': ''}}])
#set new labels
for new_label in new_labels:
conn.torrent_set_props([{utorrentargs.hash: {'label': new_label}}])
return True
else:
print("Not trying to connect to webui")
return False
| |
gslab-econ/gslab_python | gslab_scons/log_paths_dict.py | Python | mit | 5,515 | 0.009066 | import os
import sys
import scandir
import pymmh3 as mmh3
import misc
def log_paths_dict(d, record_key = 'input', nest_depth = 1, sep = ':',
cl_args_list = sys.argv):
'''
Records contents of dictionary d at record_key on nest_depth.
Assumes unnested elements of d follow human-name: file-path.
Values of d at record_key can be string or (nested) dict.
'''
if misc.is_scons_dry_run(cl_args_list = cl_args_list):
return None
record_dict = misc.flatten_dict(d)
record_dict = [(key, val) for key, val in sorted(record_dict.items())
if key.count(sep) >= nest_depth and val not in [None, 'None', '']]
for name, path in record_dict:
if record_key == name.split(sep)[nest_depth]:
record_dir(path, name)
return None
def record_dir(inpath, name,
include_checksum = False,
file_limit = 5000,
outpath = 'state_of_input.log'):
'''
Record relative path, size, and (optionally) checksum of all files within inpath.
Relative paths are from inpath.
Append info in |-delimited format to outpath below a heading made from inpath.
'''
inpath, name, this_file_only, do_walk = check_inpath(inpath, name)
if do_walk:
files_info = walk(inpath, include_checksum, file_limit, this_file_only)
else:
files_info = None
check_outpath(outpath)
write_log(name, files_info, outpath)
return None
def check_inpath(inpath, name):
'''
Check that inpath exists as file or directory.
If file, make inpath the file's directory and only record info for that file.
'''
this_file_only = None
do_walk = True
if os.path.isfile(inpath):
this_file_only = inpath
inpath = os.path.dirname(inpath)
elif os.path.isdir(inpath):
pass
else:
name = name + ', could not find at runtime.'
do_walk = False
return inpath, name, this_file_only, do_walk
def check_outpath(outpath):
'''
Ensure that the directory for outpath exists.
'''
dirname = os.path.dirname(outpath)
if dirname and not os.path.isdir(dirname):
os.makedirs(dirname)
return None
def walk(inpath, include_checksum, file_limit, this_file_only):
'''
Walk through inpath and grab paths to all subdirs and info on all files.
Walk in same order as os.walk.
Keep walking until there are no more subdirs or there's info on file_limit files.
'''
dirs = [inpath]
files_info, file_limit = prep_files_info(include_checksum, file_limit)
while dirs and do_more_files(files_info, file_limit):
dirs, files_info = scan_dir_wrapper(
dirs, files_info, inpath, include_checksum, file_limit, this_file_only)
return files_info
def prep_files_info( | include_checksum, file_limit):
'''
Create a header for the file characteristics to grab.
Adjusts file_limit for existence of header.
'''
files_info = [['file path', 'file size in bytes']]
if include_checksum:
files_info[0].append('MurmurHash3')
f | ile_limit += 1
return files_info, file_limit
def do_more_files(files_info, file_limit):
'''
True if files_info has fewer then file_limit elements.
'''
return bool(len(files_info) < file_limit)
def scan_dir_wrapper(dirs, files_info, inpath, include_checksum, file_limit,
this_file_only):
'''
Drop down access and output management for scan_dir.
Keep running the while loop in walk as directories are removed and added.
'''
dir_to_scan = dirs.pop(0)
subdirs, files_info = scan_dir(
dir_to_scan, files_info, inpath, include_checksum, file_limit, this_file_only)
dirs += subdirs
return dirs, files_info
def scan_dir(dir_to_scan, files_info, inpath, include_checksum, file_limit,
this_file_only):
'''
Collect names of all subdirs and all information on files.
'''
subdirs = []
entries = scandir.scandir(dir_to_scan)
for entry in entries:
if entry.is_dir(follow_symlinks = False):
if '.git' in entry.path or '.svn' in entry.path:
continue
else:
subdirs.append(entry.path)
elif entry.is_file() and (this_file_only is None or this_file_only == entry.path):
f_info = get_file_information(entry, inpath, include_checksum)
files_info.append(f_info)
if not do_more_files(files_info, file_limit):
break
return subdirs, files_info
def get_file_information(f, inpath, include_checksum):
'''
Grabs path and size from scandir file object.
Will compute file's checksum if asked.
'''
f_path = os.path.relpath(f.path, inpath).strip()
f_size = str(f.stat().st_size)
f_info = [f_path, f_size]
if include_checksum:
with open(f.path, 'rU') as infile:
f_checksum = str(mmh3.hash128(infile.read(), 2017))
f_info.append(f_checksum)
return f_info
def write_log(name, files_info, outpath):
'''
Write file information to outpath under a nice header.
'''
out_name = misc.make_heading(name)
if files_info is not None:
out_files_info = ['|'.join(l) for l in files_info]
out_files_info = '\n'.join(out_files_info)
else:
out_files_info = ''
with open(outpath, 'ab') as f:
f.write(out_name)
f.write(out_files_info)
f.write('\n\n')
return None
|
EugenePig/gcloud-python | gcloud/bigquery/test_job.py | Python | apache-2.0 | 50,613 | 0.00002 | # Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest2
class _Base(object):
PROJECT = 'project'
SOURCE1 = 'http://example.com/source1.csv'
DS_NAME = 'datset_name'
TABLE_NAME = 'table_name'
JOB_NAME = 'job_name'
def _makeOne(self, *args, **kw):
return self._getTargetClass()(*args, **kw)
def _setUpConstants(self):
import datetime
from gcloud._helpers import UTC
self.WHEN_TS = 1437767599.006
self.WHEN = datetime.datetime.utcfromtimestamp(self.WHEN_TS).replace(
tzinfo=UTC)
self.ETAG = 'ETAG'
self.JOB_ID = '%s:%s' % (self.PROJECT, self.JOB_NAME)
self.RESOURCE_URL = 'http://example.com/path/to/resource'
self.USER_EMAIL = 'phred@example.com'
def _makeResource(self, started=False, ended=False):
self._setUpConstants()
resource = {
'configuration': {
self.JOB_TYPE: {
},
},
'statistics': {
'creationTime': self.WHEN_TS * 1000,
self.JOB_TYPE: {
}
},
'etag': self.ETAG,
'id': self.JOB_ID,
'jobReference': {
'projectId': self.PROJECT,
'jobId': self.JOB_NAME,
},
'selfLink': self.RESOURCE_URL,
'user_email': | self.USER_EMAIL,
}
if st | arted or ended:
resource['statistics']['startTime'] = self.WHEN_TS * 1000
if ended:
resource['statistics']['endTime'] = (self.WHEN_TS + 1000) * 1000
return resource
def _verifyInitialReadonlyProperties(self, job):
# root elements of resource
self.assertEqual(job.etag, None)
self.assertEqual(job.job_id, None)
self.assertEqual(job.self_link, None)
self.assertEqual(job.user_email, None)
# derived from resource['statistics']
self.assertEqual(job.created, None)
self.assertEqual(job.started, None)
self.assertEqual(job.ended, None)
# derived from resource['status']
self.assertEqual(job.error_result, None)
self.assertEqual(job.errors, None)
self.assertEqual(job.state, None)
def _verifyReadonlyResourceProperties(self, job, resource):
from datetime import timedelta
self.assertEqual(job.job_id, self.JOB_ID)
statistics = resource.get('statistics', {})
if 'creationTime' in statistics:
self.assertEqual(job.created, self.WHEN)
else:
self.assertEqual(job.created, None)
if 'startTime' in statistics:
self.assertEqual(job.started, self.WHEN)
else:
self.assertEqual(job.started, None)
if 'endTime' in statistics:
self.assertEqual(job.ended, self.WHEN + timedelta(seconds=1000))
else:
self.assertEqual(job.ended, None)
if 'etag' in resource:
self.assertEqual(job.etag, self.ETAG)
else:
self.assertEqual(job.etag, None)
if 'selfLink' in resource:
self.assertEqual(job.self_link, self.RESOURCE_URL)
else:
self.assertEqual(job.self_link, None)
if 'user_email' in resource:
self.assertEqual(job.user_email, self.USER_EMAIL)
else:
self.assertEqual(job.user_email, None)
class TestLoadTableFromStorageJob(unittest2.TestCase, _Base):
JOB_TYPE = 'load'
def _getTargetClass(self):
from gcloud.bigquery.job import LoadTableFromStorageJob
return LoadTableFromStorageJob
def _setUpConstants(self):
super(TestLoadTableFromStorageJob, self)._setUpConstants()
self.INPUT_FILES = 2
self.INPUT_BYTES = 12345
self.OUTPUT_BYTES = 23456
self.OUTPUT_ROWS = 345
def _makeResource(self, started=False, ended=False):
resource = super(TestLoadTableFromStorageJob, self)._makeResource(
started, ended)
if ended:
resource['statistics']['load']['inputFiles'] = self.INPUT_FILES
resource['statistics']['load']['inputFileBytes'] = self.INPUT_BYTES
resource['statistics']['load']['outputBytes'] = self.OUTPUT_BYTES
resource['statistics']['load']['outputRows'] = self.OUTPUT_ROWS
return resource
def _verifyBooleanConfigProperties(self, job, config):
if 'allowJaggedRows' in config:
self.assertEqual(job.allow_jagged_rows,
config['allowJaggedRows'])
else:
self.assertTrue(job.allow_jagged_rows is None)
if 'allowQuotedNewlines' in config:
self.assertEqual(job.allow_quoted_newlines,
config['allowQuotedNewlines'])
else:
self.assertTrue(job.allow_quoted_newlines is None)
if 'ignoreUnknownValues' in config:
self.assertEqual(job.ignore_unknown_values,
config['ignoreUnknownValues'])
else:
self.assertTrue(job.ignore_unknown_values is None)
def _verifyEnumConfigProperties(self, job, config):
if 'createDisposition' in config:
self.assertEqual(job.create_disposition,
config['createDisposition'])
else:
self.assertTrue(job.create_disposition is None)
if 'encoding' in config:
self.assertEqual(job.encoding,
config['encoding'])
else:
self.assertTrue(job.encoding is None)
if 'sourceFormat' in config:
self.assertEqual(job.source_format,
config['sourceFormat'])
else:
self.assertTrue(job.source_format is None)
if 'writeDisposition' in config:
self.assertEqual(job.write_disposition,
config['writeDisposition'])
else:
self.assertTrue(job.write_disposition is None)
def _verifyResourceProperties(self, job, resource):
self._verifyReadonlyResourceProperties(job, resource)
config = resource.get('configuration', {}).get('load')
self._verifyBooleanConfigProperties(job, config)
self._verifyEnumConfigProperties(job, config)
if 'fieldDelimiter' in config:
self.assertEqual(job.field_delimiter,
config['fieldDelimiter'])
else:
self.assertTrue(job.field_delimiter is None)
if 'maxBadRecords' in config:
self.assertEqual(job.max_bad_records,
config['maxBadRecords'])
else:
self.assertTrue(job.max_bad_records is None)
if 'quote' in config:
self.assertEqual(job.quote_character,
config['quote'])
else:
self.assertTrue(job.quote_character is None)
if 'skipLeadingRows' in config:
self.assertEqual(job.skip_leading_rows,
config['skipLeadingRows'])
else:
self.assertTrue(job.skip_leading_rows is None)
def test_ctor(self):
client = _Client(self.PROJECT)
table = _Table()
job = self._makeOne(self.JOB_NAME, table, [self.SOURCE1], client)
self.assertTrue(job.destination is table)
self.assertEqual(list(job.source_uris), [self.SOURCE1])
self.assertTrue(job._client is client)
self.assertEqual(
job.path,
'/projects/%s/jobs/%s' % (self.PROJECT, self.JOB_NAME))
self. |
kurainooni/nw.js | test/remoting/package/test.py | Python | mit | 2,877 | 0.004519 | import | time
import os
import shutil
import zipfile
import platform
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
t | estdir = os.path.dirname(os.path.abspath(__file__))
nwdist = os.path.join(os.path.dirname(os.environ['CHROMEDRIVER']), 'nwdist')
appdir = os.path.join(testdir, 'app')
pkg1 = os.path.join(testdir, 'pkg1')
pkg2 = os.path.join(testdir, 'pkg2')
pkg3 = os.path.join(testdir, 'pkg3')
try:
shutil.rmtree(pkg1)
shutil.rmtree(pkg2)
shutil.rmtree(pkg3)
except:
pass
def compress(from_dir, to_file):
from_dir = os.path.normpath(from_dir)
to_file = os.path.normpath(to_file)
z = zipfile.ZipFile(to_file, 'w', compression=zipfile.ZIP_DEFLATED)
for root, dirs, files in os.walk(from_dir):
for f in files:
_path = os.path.join(root, f)
z.write(_path, _path.replace(from_dir+os.sep, ''))
z.close()
def copytree(src, dst, symlinks=False, ignore=None):
if not os.path.exists(dst):
os.makedirs(dst)
for item in os.listdir(src):
s = os.path.join(src, item)
d = os.path.join(dst, item)
if os.path.isdir(s):
copytree(s, d, symlinks, ignore)
else:
if not os.path.exists(d) or os.stat(s).st_mtime - os.stat(d).st_mtime > 1:
shutil.copy2(s, d)
os.mkdir(pkg1)
print "copying %s to %s" % (nwdist, pkg1)
copytree(nwdist, pkg1)
if platform.system() == 'Darwin':
appnw = os.path.join(pkg1, 'nwjs.app', 'Contents', 'Resources', 'app.nw')
print "copying %s to %s" % (appdir, appnw)
copytree(appdir, appnw)
else:
print "copying %s to %s" % (appdir, pkg1)
copytree(appdir, pkg1)
#chrome_options = Options()
#chrome_options.add_argument("nwapp=" + pkg1)
driver_path=os.path.join(pkg1, 'chromedriver')
driver = webdriver.Chrome(executable_path=driver_path)
time.sleep(1)
try:
print driver.current_url
result = driver.find_element_by_id('result')
print result.get_attribute('innerHTML')
assert("success" in result.get_attribute('innerHTML'))
finally:
driver.quit()
######## test compressed package
os.mkdir(pkg2)
print "copying %s to %s" % (nwdist, pkg2)
copytree(nwdist, pkg2)
if platform.system() == 'Darwin':
appnw = os.path.join(pkg2, 'nwjs.app', 'Contents', 'Resources', 'app.nw')
print "compressing %s to %s" % (appdir, appnw)
compress(appdir, appnw)
else:
package_nw = os.path.join(pkg2, 'package.nw')
print "compressing %s to %s" % (appdir, package_nw)
compress(appdir, package_nw)
driver_path=os.path.join(pkg2, 'chromedriver')
driver2 = webdriver.Chrome(executable_path=driver_path)
time.sleep(1)
try:
print driver2.current_url
result = driver2.find_element_by_id('result')
print result.get_attribute('innerHTML')
assert("success" in result.get_attribute('innerHTML'))
finally:
driver2.quit()
|
Nickito12/stepmania-server | smserver/chat_commands/general.py | Python | mit | 14,861 | 0.007604 | #!/usr/bin/env python3
# -*- coding: utf8 -*-
from smserver import models
from smserver import smutils
from smserver.chathelper import with_color
from smserver.chatplugin import ChatPlugin
from smserver.models import ranked_chart
from smserver.models.ranked_chart import Diffs
class ChatHelp(ChatPlugin):
command = "help"
helper = "Show help"
def __call__(self, serv, message):
for command, action in sorted(serv.server.chat_commands.items()):
if not action.can(serv):
continue
serv.send_message("/%s: %s" % (command, action.helper), to="me")
class ChatUserListing(ChatPlugin):
command = "users"
helper = "List users"
def __call__(self, serv, message):
users = serv.session.query(models.User).filter_by(online=True)
max_users = serv.server.config.server.get("max_users")
if serv.room:
users = users.filter_by(room_id=serv.room.id)
max_users = serv.room.max_users
users = users.all()
serv.send_message("%s/%s players online" % (len(users), max_users), to="me")
for user in users:
serv.send_message(
"%s (in %s)" % (
user.fullname_colored(serv.conn.room),
user.enum_status.name),
to="me")
class ChatTimestamp(ChatPlugin):
command = "timestamp"
helper = "Enable chat timestamps"
def __call__(self, serv, message):
if serv.conn.chat_timestamp:
serv.conn.chat_timestamp = False
serv.send_message("Chat timestamp disabled", to="me")
else:
serv.send_message("Chat timestamp enabled", to="me")
serv.conn.chat_timestamp = True
for user in serv.active_users:
user.chat_timestamp = serv.conn.chat_timestamp
class ShowOffset(ChatPlugin):
command = "showoffset"
helper = "Enable average offset display after a song"
def __call__(self, serv, message):
for user in serv.active_users:
if user.show_offset:
user.show_offset = False
serv.send_message("Offset diplay disabled", to="me")
else:
user.show_offset = True
serv.send_message("Offset diplay enabled", to="me")
class Profile(ChatPlugin):
command = "profile"
helper = "Display profile information"
def __call__(self, serv, message):
if not message:
for user in serv.active_users:
serv.send_message("Name: %s" % with_color(user.name), to="me")
serv.send_message("XP: %20.0f" % user.xp, to="me")
serv.send_message("Rank: %s" % user.skillrank, to="me")
serv.send_message("Rating: %12.2f" % user.rating, to="me")
else:
user = serv.session.query(models.User).filter_by(name=message).first()
if not user:
serv.send_message("Could not find user %s" % with_color(message), to="me")
else:
serv.send_message("Name: %s" % with_color(user.name), to="me")
serv.send_message("XP: %s" % user.xp, to="me")
serv.send_message("Rank: %s" % user.skillrank, to="me")
serv.send_message("Rating: %12.2f" % user.rating, to="me")
#for skillset in models.ranked_song.Skillsets:
# rating = eval("user.rating_" + skillset.name)
# serv.send_message(skillset.name.capitalize()+": %f" % rating, to="me")
class FriendNotification(ChatPlugin):
command = "friendnotif"
helper = "Enable notifications whenever a friend gets on/off line"
def __call__(self, serv, message):
for user in serv.active_users:
if user.friend_notifications:
user.friend_notifications = False
serv.send_message("Friend notifications disabled", to="me")
else:
user.friend_notifications = True
serv.send_message("Friend notifications enabled", to="me")
#This lags a lot if there are many ranked charts
# class RankedCharts(ChatPlugin):
# command = "rankedcharts"
# helper = "Show all ranked songs"
# def __call__(self, serv, message):
# charts = serv.session.query(models.RankedChart, models.Song.title).join(models.Song).all()
# serv.send_message("Ranked Charts:", to="me")
# for chart in charts:
# serv.send_message("Title: " + chart[1] + " Pack: " + chart[0].pack_name + " Diff: " + Diffs(chart[0].diff).name, to="me")
class AddFriend(ChatPlugin):
command = "addfriend"
helper = "Add a friend. /addfriend user"
def __call__(self, serv, message):
for user in serv.active_users:
if not user:
return
newfriend = serv.session.query(models.User) | .filter_by(name=message).first()
if not newfriend:
serv.send_message("Unknown user %s" % with_color(message), to="me")
return
if newfriend.name == user.name:
serv.send_message("Cant befriend yourself", to="me")
return
friendships = serv.session.query(models.Friendship).filter( \
((models.Friendship.user1_id == user.id) & (models.Frie | ndship.user2_id == newfriend.id)) | \
(models.Friendship.user2_id == user.id) & (models.Friendship.user1_id == newfriend.id))
if not friendships.first():
serv.session.add(models.Friendship(user1_id = user.id, user2_id = newfriend.id, state = 0))
serv.send_message("Friend request sent to %s" % with_color(message), to="me")
else:
friendships = friendships.all()
if len(friendships) != 1:
if friendship[0].state == 2:
if friendship.user1_id == user.id:
Unignore.__call__(self, serv, message)
friendship = friendships[1]
if friendship[1].state == 2:
if friendship.user1_id == user.id:
Unignore.__call__(self, serv, message)
friendship = friendships[0]
else:
friendship = friendships[0]
if friendship.state == 1:
serv.send_message("%s is already friends with you" % with_color(message), to="me")
return
if friendship.state == 2:
serv.send_message("Cant send %s a friend request" % with_color(message), to="me")
return
if friendship.user1_id == user.id:
serv.send_message("Already sent a friend request to %s" % with_color(message), to="me")
return
friendship.state = 1
serv.send_message("Accepted friend request from %s" % with_color(message), to="me")
serv.session.commit()
class Ignore(ChatPlugin):
command = "ignore"
helper = "Ignore someone(Can't send friend requests or pm). /ignore user"
def __call__(self, serv, message):
for user in serv.active_users:
if not user:
return
newignore = serv.session.query(models.User).filter_by(name=message).first()
if not newignore:
serv.send_message("Unknown user %s" % with_color(message), to="me")
return
if newignore.name == user.name:
serv.send_message("Cant ignore yourself", to="me")
return
friendships = serv.session.query(models.Friendship).filter( \
((models.Friendship.user1_id == user.id) & (models.Friendship.user2_id == newignore.id)) | \
(models.Friendship.user2_id == user.id) & (models.Friendship.user1_id == newignore.id))
friendship = friendships.first()
if not friendship:
serv.session.add(models.Friendship(user1_id = user.id, user2_id = newignore.id, state = 2))
serv.send_message("%s ignored" % with_ |
keenondrums/sovrin-node | sovrin_node/test/upgrade/test_pool_upgrade_no_loop_reinstall.py | Python | apache-2.0 | 1,232 | 0.000812 | from copy import deepcopy
import pytest
from sovrin_node.test import waits
from stp_core.loop.eventually import eventually
from plenum.common.constants import VERSION
from sovrin_common.constants import REINSTALL
from sovrin_node.test.upgrade.helper import bumpedVersion, checkUpgradeScheduled, \
ensureUpgradeSent, check_no_loop
from sovrin_node.server.upgra | de_log import UpgradeLog
import sovrin_node
def test_upgrade_does_not_get_into_loop_if_reinstall(
looper,
tconf,
nodeSet,
validUpgrade,
trustee | ,
trusteeWallet,
monkeypatch):
new_version = bumpedVersion()
upgr1 = deepcopy(validUpgrade)
upgr1[VERSION] = new_version
upgr1[REINSTALL] = True
# An upgrade scheduled, it should pass
ensureUpgradeSent(looper, trustee, trusteeWallet, upgr1)
looper.run(
eventually(
checkUpgradeScheduled,
nodeSet,
upgr1[VERSION],
retryWait=1,
timeout=waits.expectedUpgradeScheduled()))
# here we make nodes think they have upgraded successfully
monkeypatch.setattr(sovrin_node.__metadata__, '__version__', new_version)
check_no_loop(nodeSet, UpgradeLog.UPGRADE_SUCCEEDED)
|
rdkit/rdkit-orig | rdkit/Chem/SATIS.py | Python | bsd-3-clause | 3,432 | 0.015734 | # $Id$
#
# Copyright (C) 2001-2006 greg Landrum and Rational Discovery LLC
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
""" Functionality for SATIS typing atoms
"""
from rdkit import Chem
_debug = 0
#
# These are SMARTS patterns for the special cases used in
# SATIS typing.
#
aldehydePatt = Chem.MolFromSmarts('[CD2]=[OD1]')
ketonePatt = Chem.MolFromSmarts('[CD3]=[OD1]')
amidePatt = Chem.MolFromSmarts('[CD3](=[OD1])-[#7]')
esterPatt = Chem.MolFromSmarts('C(=[OD1])-O-[#6]')
carboxylatePatt = Chem.MolFromSmarts('C(=[OD1])-[OX1]')
carboxylPatt = Chem.MolFromSmarts('C(=[OD1])-[OX2]')
specialCases = ((carboxylatePatt,97),
(esterPatt,96),
(carboxylPatt,98),
(amidePatt,95),
(ketonePatt,94),
(aldehydePatt,93))
def SATISTypes(mol,neighborsToInclude=4):
""" returns SATIS codes for all atoms in a molecule
The SATIS definition used is from:
J. Chem. Inf. Comput. Sci. _39_ 751-757 (1999)
each SATIS code is a string consisting of _neighborsToInclude_ + 1
2 digit numbers
**Arguments**
- mol: a molecule
- neighborsToInclude (optional): the number of neighbors to include
in the SATIS codes
**Returns**
a list of strings nAtoms long
"""
global specialCases
nAtoms = mol.GetNumAtoms()
atomicNums = [0]*nAtoms
atoms = mol.GetAtoms()
for i in xrange(nAtoms):
atomicNums[i] = atoms[i].GetAtomicNum()
nSpecialCases = len(specialCases)
specialCaseMatches = [None]*nSpecialCases
for i,(patt,idx) in enumerate(specialCases):
if mol.HasSubstructMatch(patt):
specialCaseMatches[i] = mol.GetSubstructMatches(patt)
else:
specialCaseMatches[i] = ()
codes = [None]*nAtoms
for i in range(nAtoms):
code = [99]*(neighborsToInclude+1)
atom = atoms[i]
atomIdx = atom.GetIdx()
code[0] = min(atom.GetAtomicNum(),99)
bonds = atom.GetBonds()
nBonds = len(bonds)
otherIndices = [-1]*nBonds
if _debug: print code[0],
for j in range(nBonds):
otherIndices[j] = bonds[j].GetOtherAtom(atom).GetIdx()
if _debug: print otherIndices[j],
if _debug: print
otherNums = [atomicNums[x] for x in otherIndices] + \
[1]*atom.GetTotalNumHs()
otherNums.sort()
nOthers = len(otherNums)
if nOthers > neighborsToInclude:
otherNums.reverse()
otherNums = otherNums[:neighborsToInclude]
otherNums.reverse()
for j in range(neighborsToInclude):
code[j+1] = min(otherNums[j],99)
else:
for j in range(nOthers):
code[j+1] = min(otherNums[j],99)
if nOthers < neighborsToInclude and code[0] in [6,8]:
found = 0
for j in range(nSpecialCases):
for matchTuple in specialCaseMatches[j]:
if atomIdx in matchTuple:
code[-1] = specialCas | es[j][1]
found = 1
break
if found:
break
codes[i] = ''.join(['%02d'%(x) for x in code])
return codes
if __name__ == '__main__':
smis = ['CC(=O)NC','CP(F)(Cl)(Br)(O)',
'O=CC(=O)C','C(=O)OCC(=O)O','C(=O)[O-]']
for smi in smis | :
print smi
m = Chem.MolFromSmiles(smi)
codes = SATISTypes(m)
print codes
|
alumarcu/dream-framework | dream/core/models/team.py | Python | gpl-3.0 | 598 | 0 | from django.db.models import Model, CharField, DateTimeField, ForeignKey
from django.utils.translation im | port ugettext_lazy as _
from dream.core.definitions import GENDER_CHOICES, GENDER_UNDEFINED
from . import Club
class Team(Model):
club = ForeignKey(Club)
name = CharField(_('team name'), max_length=60)
gender = CharField(
_('gender'),
max_length=1,
choices=GENDER_CHOICES,
default=GENDER_UNDEFINED
)
created = DateTimeField(auto_now_add=True)
mod | ified = DateTimeField(auto_now=True)
def __str__(self):
return self.name
|
lightd22/smartDraft | src/models/base_model.py | Python | apache-2.0 | 599 | 0.006678 | import tensorflow as tf
class Ba | seModel():
def __init__(self, name, path):
self._name = name
self._path_to_model = path
self._graph = tf.Graph()
self.sess = tf.Session(graph=self._graph)
def _ | _del__(self):
try:
self.sess.close()
del self.sess
finally:
print("Model closed..")
def build_model(self):
raise NotImplementedError
def init_saver(self):
raise NotImplementedError
def save(self):
raise NotImplementedError
def load(self):
raise NotImplementedError
|
dsparrow27/zoocore | zoo/libs/utils/timeutils.py | Python | gpl-3.0 | 1,936 | 0.001033 | from datetime import datetime, timedelta
def formatFrameToTime(start, current, fr | ameRate):
total = current - start
seconds = float(total) / float(frameRate)
minutes = int(seconds / 60.0)
seconds -= minutes * 60
return ":".join(["00", str(minutes).zfill(2),
str(round(seconds, 1)).zfill(2),
str(int(current)).zfill(2)])
def formatModifiedDateTime(dateTime):
"""Format a data/time into a nice human-friendly string.
:para | m dateTime: The datetime instance to be formatted
:type dateTime: :class:`datatime`
:returns A string representing the datetime in a nice format
:rtype: str
.. code-block:: python
from datetime import datetime
now = datetime.now()
format_modified_date_time_str(now)
# result: 'Today, 9:23am'
"""
date = dateTime.date()
timeDiff = datetime.now().date() - date
if timeDiff < timedelta(days=1):
date_str = "Today"
elif timeDiff < timedelta(days=2):
date_str = "Yesterday"
else:
date_str = "{:d}{} {}".format(date.day,
daySuffix(date.day),
date.strftime("%b %Y"))
# format the modified time into a 12-hour am/pm format
dataTime = dateTime.time()
hour = dataTime.hour
suffix = "am" if hour < 12 else "pm"
hour = hour if hour == 12 else hour % 12 # 0-11am, 12pm, 1-11pm
date_str += (", {:d}:{:02d}{}".format(hour, dataTime.minute, suffix))
return date_str
def daySuffix(day):
"""Figure out the suffix to use for the specified day of the month (e.g. 1st, 3rd,
15th, 32nd, etc.)
:param day: The day of the month
:type day: int
:returns: A string containing the shorthand suffix for the day of the month
:rtype: str
"""
return ["th", "st", "nd", "rd"][day % 10 if not 11 <= day <= 13 and day % 10 < 4 else 0]
|
alphagov/stagecraft | stagecraft/apps/dashboards/tests/factories/factories.py | Python | mit | 1,931 | 0 | import factory
from ...models import Dashboard, Link, ModuleType, Module
from ....organisati | on.tests.factories import NodeFactory, NodeTypeFactory
class DashboardFactory(factory.DjangoModelFactory):
class Meta:
model = Dashboard
status = 'published'
title = "title"
slug = factory.S | equence(lambda n: 'slug%s' % n)
class LinkFactory(factory.DjangoModelFactory):
class Meta:
model = Link
url = factory.Sequence(lambda n: 'https://www.gov.uk/link-%s' % n)
title = 'Link title'
link_type = 'transaction'
dashboard = factory.SubFactory(DashboardFactory)
class ModuleTypeFactory(factory.DjangoModelFactory):
class Meta:
model = ModuleType
name = factory.Sequence(lambda n: 'name %s' % n)
schema = {}
class ModuleFactory(factory.DjangoModelFactory):
class Meta:
model = Module
type = factory.SubFactory(ModuleTypeFactory)
dashboard = factory.SubFactory(DashboardFactory)
slug = factory.Sequence(lambda n: 'slug{}'.format(n))
title = 'title'
info = []
options = {}
order = factory.Sequence(lambda n: n)
class DepartmentTypeFactory(NodeTypeFactory):
name = 'department'
class AgencyTypeFactory(NodeTypeFactory):
name = 'agency'
class ServiceTypeFactory(NodeTypeFactory):
name = 'service'
class DepartmentFactory(NodeFactory):
name = factory.Sequence(lambda n: 'department-%s' % n)
typeOf = factory.SubFactory(DepartmentTypeFactory)
class AgencyFactory(NodeFactory):
name = factory.Sequence(lambda n: 'agency-%s' % n)
typeOf = factory.SubFactory(AgencyTypeFactory)
class AgencyWithDepartmentFactory(AgencyFactory):
parent = factory.SubFactory(DepartmentFactory)
class ServiceFactory(NodeFactory):
parent = factory.SubFactory(AgencyWithDepartmentFactory)
name = factory.Sequence(lambda n: 'service-%s' % n)
typeOf = factory.SubFactory(ServiceTypeFactory)
|
jmesteve/saas3 | openerp/addons/xml_export/__init__.py | Python | agpl-3.0 | 64 | 0 | #
# X | ML Export, (C) Agaplan 2011
#
import models
| import wizard
|
m48/sgl | docs/source/conf.py | Python | gpl-3.0 | 11,475 | 0.006536 | # -*- coding: utf-8 -*-
#
# sgl documentation build configuration file, created by
# sphinx-quickstart on Sat Mar 12 15:33:34 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path | here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. T | hey can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.todo',
'sphinx.ext.coverage',
# 'sphinx.ext.viewcode',
]
# Misc config
napoleon_numpy_docstring = False
autodoc_member_order = "bysource"
autoclass_content = "both"
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'sgl'
copyright = u'2016, m48'
author = u'm48'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.0.1'
# The full version, including alpha/beta/rc tags.
release = u'0.0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'sgldoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'sgl.tex', u'sgl Documentation',
u'm48', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses |
Edanprof/ScoreM | script_execute.py | Python | gpl-3.0 | 768 | 0.002604 | from s | elenium import webdriver
import os
driver = webdriver.PhantomJS()
driver.get("http://info.nowgoal.com/en/team/summar | y.aspx?TeamID=59")
driver.execute_script('leftSide(selectTeamID, arrTeam);\
if (coach.length>0){ mainTitle(teamDetail, coach[0][2 + lang], coach[0][0]);\
} else { mainTitle(teamDetail,"", 0);\
}\
var mainDiv=document.getElementById("div_Table2");\
if (leagueData.length == 0) {\
mainDiv.innerHTML = "no data";\
} else {\
mainDiv.innerHTML = ShowLeaScore() + ShowLeaLetGoal() + ShowBigSmall();\
}\
rightSide();')
print driver.execute_script('return document.body.innerHTML;')
|
rwl/PyCIM | CIM15/IEC61970/WiresPhaseModel/SwitchPhase.py | Python | mit | 3,967 | 0.003025 | # Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDE | RS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN | THE SOFTWARE.
from CIM15.IEC61970.Core.PowerSystemResource import PowerSystemResource
class SwitchPhase(PowerSystemResource):
"""Single phase of a multi-phase switch when its attributes might be different per phase.Single phase of a multi-phase switch when its attributes might be different per phase.
"""
def __init__(self, phaseSide1="C", phaseSide2="C", normalOpen=False, Switch=None, *args, **kw_args):
"""Initialises a new 'SwitchPhase' instance.
@param phaseSide1: Phase of this SwitchPhase on the “from” (Switch.Terminal.sequenceNumber=1) side. Should be a phase contained in that terminal’s Terminal.phases attribute. Values are: "C", "N", "s1", "B", "s2", "A"
@param phaseSide2: Phase of this SwitchPhase on the “to” (Switch.Terminal.sequenceNumber=2) side. Should be a phase contained in that terminal’s Terminal.phases attribute. Values are: "C", "N", "s1", "B", "s2", "A"
@param normalOpen: Used in cases when no Measurement for the status value is present. If the SwitchPhase has a status measurement the Discrete.normalValue is expected to match with this value.
@param Switch:
"""
#: Phase of this SwitchPhase on the “from” (Switch.Terminal.sequenceNumber=1) side. Should be a phase contained in that terminal’s Terminal.phases attribute. Values are: "C", "N", "s1", "B", "s2", "A"
self.phaseSide1 = phaseSide1
#: Phase of this SwitchPhase on the “to” (Switch.Terminal.sequenceNumber=2) side. Should be a phase contained in that terminal’s Terminal.phases attribute. Values are: "C", "N", "s1", "B", "s2", "A"
self.phaseSide2 = phaseSide2
#: Used in cases when no Measurement for the status value is present. If the SwitchPhase has a status measurement the Discrete.normalValue is expected to match with this value.
self.normalOpen = normalOpen
self._Switch = None
self.Switch = Switch
super(SwitchPhase, self).__init__(*args, **kw_args)
_attrs = ["phaseSide1", "phaseSide2", "normalOpen"]
_attr_types = {"phaseSide1": str, "phaseSide2": str, "normalOpen": bool}
_defaults = {"phaseSide1": "C", "phaseSide2": "C", "normalOpen": False}
_enums = {"phaseSide1": "SinglePhaseKind", "phaseSide2": "SinglePhaseKind"}
_refs = ["Switch"]
_many_refs = []
def getSwitch(self):
return self._Switch
def setSwitch(self, value):
if self._Switch is not None:
filtered = [x for x in self.Switch.SwitchPhases if x != self]
self._Switch._SwitchPhases = filtered
self._Switch = value
if self._Switch is not None:
if self not in self._Switch._SwitchPhases:
self._Switch._SwitchPhases.append(self)
Switch = property(getSwitch, setSwitch)
|
Mikescher/Project-Euler_Befunge | compiled/Python3/Euler_Problem-091.py | Python | mit | 1,054 | 0.050285 | #!/usr/bin/env python3
# transpiled with BefunCompile v1.3.0 (c) 2017
def td(a,b):
return ((0)if(b==0)else(a//b))
def tm(a,b):
return ((0)if(b==0)else(a%b))
x0=50
x1=0
x2=50
x3=88
x4=88
def _0():
return 1
def _1():
global x3
global x0
global x4
x3=x0
x4=x3+1
return 2
def _2():
global x3
global x4
global x2
global x0
return (3)if((((1)if((x3*(x4-x3))>(x2*x2))else(0))+((1)if(x4>x0)else(0)))!=0)else(7)
def _3():
global t0
global x3
global | t1
t0=x3-1
t1=6
x3=x3-1
return (6)if((t0)!=0)else(4)
def _4():
global t0
global x2
global t1
t0=x2-1
t1=x2-1
x2=t1
return (1)if((t0)!=0)else(5)
def _5():
global x1
global x0
print(x1+(3*x0*x0),end=" ",flush=True)
return 8
def _6():
global x4
global x3
x4=x3+1
return 2
def _7():
global x1
global x3
global x4
global x2
x1=x | 1+(((0)if(tm((x3-x4)*x3,x2)!=0)else(1))*2)
x4=x4+1
return 2
m=[_0,_1,_2,_3,_4,_5,_6,_7]
c=0
while c<8:
c=m[c]()
|
ivotkv/neolixir | neolixir/dummy.py | Python | mit | 2,382 | 0.007976 | # -*- coding: utf-8 -*-
#
# The MIT License (MIT)
#
# Copyright (c) 2013 Ivo Tzvetkov
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
class DummyEntity(object):
__slots__ = ['id', 'properties']
def __init__(self, id, properties=None):
self.id = id
self.properties = properties or {}
| def __repr__(self):
return "<{0} (0x{1:x}): ({2}) {3}>".format(self.__class__.__name__, id(self),
self.id, self.properties)
class DummyNode(DummyEntity):
__slots__ = []
class DummyRelationship(DummyEntit | y):
__slots__ = ['start_node', 'type', 'end_node']
def __init__(self, id, start_node, type, end_node, properties=None):
self.id = id
self.start_node = start_node
self.type = type
self.end_node = end_node
self.properties = properties or {}
def __repr__(self):
return "<{0} (0x{1:x}): ({2})-[{3}:{4}]->({5}) {6}>".format(self.__class__.__name__, id(self),
getattr(self.start_node, 'id', None),
self.id, self.type,
getattr(self.end_node, 'id', None),
self.properties)
|
jmbergmann/yogi | yogi-python/yogi/private/tcp.py | Python | gpl-3.0 | 10,200 | 0.002647 | from .connections import *
import threading
import weakref
class TcpConnection(NonLocalConnection):
def __init__(self, handle: c_void_p):
NonLocalConnection.__init__(self, handle)
yogi.YOGI_CreateTcpClient.restype = api_result_handler
yogi.YOGI_CreateTcpClient.argtypes = [POINTER(c_void_p), c_void_p, c_void_p, c_uint]
yogi.YOGI_AsyncTcpConnect.restype = api_result_handler
yogi.YOGI_AsyncTcpConnect.argtypes = [c_void_p, c_char_p, c_uint, c_int, CFUNCTYPE(None, c_int, c_void_p, c_void_p),
c_void_p]
yogi.YOGI_CancelTcpConnect.restype = api_result_handler
yogi.YOGI_CancelTcpConnect.argtypes = [c_void_p]
class TcpClient(Object):
def __init__(self, scheduler: Scheduler, identification: Optional[str] = None):
handle = c_void_p()
if identification is None:
yogi.YOGI_CreateTcpClient(byref(handle), scheduler._handle, c_void_p(), 0)
else:
buffer = create_string_buffer(identification.encode('utf-8'))
yogi.YOGI_CreateTcpClient(byref(handle), scheduler._handle, buffer, sizeof(buffer))
Object.__init__(self, handle)
self._scheduler = scheduler
self._identification = identification
@property
def scheduler(self) -> Scheduler:
return self._scheduler
@property
def identification(self) -> Optional[str]:
return self._identification
def async_connect(self, host: str, port: int, handshake_timeout: Optional[float],
completion_handler: Callable[[Result, Optional[TcpConnection]], None]) -> None:
def fn(res, connection_handle):
connection = None
if res:
connection = TcpConnection(connection_handle)
completion_handler(res, connection)
with WrappedCallback(yogi.YOGI_AsyncTcpConnect.argtypes[4], fn) as clb_fn:
yogi.YOGI_AsyncTcpConnect(self._handle, host.encode('utf-8'), port, make_api_timeout(handshake_timeout),
clb_fn, c_void_p())
| def cancel_connect(self) -> None:
yogi.YOGI_CancelTcpConnect(self._hand | le)
yogi.YOGI_CreateTcpServer.restype = api_result_handler
yogi.YOGI_CreateTcpServer.argtypes = [POINTER(c_void_p), c_void_p, c_char_p, c_uint, c_void_p, c_uint]
yogi.YOGI_AsyncTcpAccept.restype = api_result_handler
yogi.YOGI_AsyncTcpAccept.argtypes = [c_void_p, c_int, CFUNCTYPE(None, c_int, c_void_p, c_void_p), c_void_p]
yogi.YOGI_CancelTcpAccept.restype = api_result_handler
yogi.YOGI_CancelTcpAccept.argtypes = [c_void_p]
class TcpServer(Object):
def __init__(self, scheduler: Scheduler, address: str, port: int, identification: Optional[str] = None):
handle = c_void_p()
if identification is None:
yogi.YOGI_CreateTcpServer(byref(handle), scheduler._handle, address.encode('utf-8'), port, c_void_p(), 0)
else:
buffer = create_string_buffer(identification.encode('utf-8'))
yogi.YOGI_CreateTcpServer(byref(handle), scheduler._handle, address.encode('utf-8'), port, buffer,
sizeof(buffer))
Object.__init__(self, handle)
self._scheduler = scheduler
self._address = address
self._port = port
self._identification = identification
@property
def scheduler(self) -> Scheduler:
return self._scheduler
@property
def address(self) -> str:
return self._address
@property
def port(self) -> int:
return self._port
@property
def identification(self) -> Optional[str]:
return self._identification
def async_accept(self, handshake_timeout: Optional[float],
completion_handler: Callable[[Result, Optional[TcpConnection]], None]) -> None:
def fn(res, connection_handle):
connection = None
if res:
connection = TcpConnection(connection_handle)
completion_handler(res, connection)
with WrappedCallback(yogi.YOGI_AsyncTcpAccept.argtypes[2], fn) as clb_fn:
yogi.YOGI_AsyncTcpAccept(self._handle, make_api_timeout(handshake_timeout), clb_fn, c_void_p())
def cancel_accept(self) -> None:
yogi.YOGI_CancelTcpAccept(self._handle)
class AutoConnectingTcpClient:
def __init__(self, endpoint: Endpoint, host: str, port: int, timeout: Optional[float] = None,
identification: Optional[str] = None):
# TODO: Allow ProcessInterface and Configuration as ctor parameters
self._endpoint = endpoint
self._host = host
self._port = port
self._timeout = timeout
self._identification = identification
self._connect_observer = None
self._disconnect_observer = None
self._client = TcpClient(endpoint.scheduler, identification)
self._cv = threading.Condition()
self._reconnectThread = threading.Thread(target=self._reconnect_thread_fn, args=(weakref.ref(self), self._cv),
name="Reconnect Thread")
self._reconnectThreadInitialised = False
self._running = False
self._connection = None
self._connected = False
self._reconnectThread.start()
with self._cv:
while not self._reconnectThreadInitialised:
self._cv.wait()
@property
def endpoint(self) -> Endpoint:
return self._endpoint
@property
def host(self) -> str:
return self._host
@property
def port(self) -> int:
return self._port
@property
def timeout(self) -> Optional[float]:
return self._timeout
@property
def identification(self) -> Optional[str]:
return self._identification
@property
def connect_observer(self) -> Callable[[Result, Optional[TcpConnection]], None]:
with self._cv:
return self._connect_observer
@connect_observer.setter
def connect_observer(self, fn: Callable[[Result, Optional[TcpConnection]], None]):
with self._cv:
self._connect_observer = fn
@property
def disconnect_observer(self) -> Callable[[Failure], None]:
with self._cv:
return self._disconnect_observer
@disconnect_observer.setter
def disconnect_observer(self, fn: Callable[[Failure], None]):
with self._cv:
self._disconnect_observer = fn
@property
def connected(self) -> bool:
return self._connected
@classmethod
def _reconnect_thread_fn(cls, weak_self, cv):
with cv:
weak_self()._reconnectThreadInitialised = True
cv.notify()
while True:
cv.wait()
self = weak_self()
if not self or not self._running:
break
if self._connection is not None:
self._connection.destroy()
self._connection = None
self = None
cv.wait(timeout=1.0)
self = weak_self()
if not self or not self._running:
return
self._start_connect()
self = None
def _start_connect(self):
# TODO: logging
cls = type(self)
weak_self = weakref.ref(self)
self._client.async_connect(self._host, self._port, self._timeout,
lambda res, conn: cls._on_connect_completed(weak_self, res, conn))
@classmethod
def _on_connect_completed(cls, weak_self, res, connection):
self = weak_self()
if not self or res == Canceled():
return
with self._cv:
if not self._running:
return
if res == Success():
try:
connection.assign(self._endpoint, self._timeout)
connection.async_await_death(lambda err: cls._on_connection_died(weak_self, err))
self._connection = connection
# TODO: Logging
self._connected = True
if self._ |
RRMoelker/codecook-bash | config.py | Python | mit | 277 | 0.018051 | # -*- coding: utf-8 -*
"""
Config file path resolution
""" |
import os
from xdg import BaseDirectory
def get_config_file():
dir_full_path = BaseDirectory.save_data_path('codecook-bash.config')
file_full_path = os | .path.join(dir_full_path, 'user.config')
return file_full_path |
DailyActie/Surrogate-Model | 01-codes/scikit-learn-master/examples/applications/plot_tomography_l1_reconstruction.py | Python | mit | 5,461 | 0.001282 | """
======================================================================
Compressive sensing: tomography reconstruction with L1 prior (Lasso)
======================================================================
This example shows the reconstruction of an image from a set of parallel
projections, acquired along different angles. Such a dataset is acquired in
**computed tomography** (CT).
Without any prior information on the sample, the number of projections
required to reconstruct the image is of the order of the linear size
``l`` of the image (in pixels). For simplicity we consider here a sparse
image, where only pixels on the boundary of objects have a non-zero
value. Such data could correspond for example to a cellular material.
Note however that most images are sparse in a different basis, such as
the Haar wavelets. Only ``l/7`` projections are acquired, therefore it is
necessary to use prior information available on the sample (its
sparsity): this is an example of **compressive sensing**.
The tomography projection operation is a linear transformation. In
addition to the data-fidelity term corresponding to a linear regression,
we penalize the L1 norm of the image to account for its sparsity. The
resulting optimization problem is called the :ref:`lasso`. We use the
class :class:`sklearn.linear_model.Lasso`, that uses the coordinate descent
algorithm. Importantly, this implementation is more computationally efficient
on a sparse matrix, than the projection operator used here.
The reconstruction with L1 penalization gives a result with zero error
(all pixels are successfully labeled with 0 or 1), even if noise was
added to the projections. In comparison, an L2 penalization
(:class:`sklearn.linear_model.Ridge`) produces a large number of labeling
errors for the pixels. Important artifacts are observed on the
reconstructed image, contrary to the L1 penalization. Note in particular
the circular artifact separating the pixels in the corners, that have
contributed to fewer projections than the central disk.
"""
print(__doc__)
# Author: Emmanuelle Gouillart <emmanuelle.gouillart@nsup.org>
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from scipy import ndimage
from scipy import sparse
from sklearn.linear_model import Lasso
from sklearn.linear_model import Ridge
def _weights(x, dx=1, orig=0):
x = np.ravel(x)
floor_x = np.floor((x - orig) / dx)
alpha = (x - orig - floor_x * dx) / dx
return np.hstack((floor_x, floor_x + 1)), np.hstack((1 - alpha, alpha))
def _generate_center_coordinates(l_x):
X, Y = np.mgrid[:l_x, :l_x].astype(np.float64)
center = l_x / 2.
X += 0.5 - center
Y += 0.5 - center
return X, Y
def build_projection_operator(l_x, n_dir):
""" Compute the tomography design matrix.
Parameters
----------
l_x : int
linear size of image array
n_dir : int
number of angles at which projections are acquired.
Returns
-------
p : sparse matrix of shape (n_dir l_x, l_x**2)
"""
X, Y = _generate_center_coordinates(l_x)
angles = np.linspace(0, np.pi, n_dir, endpoint=False)
data_inds, weights, camera_inds = [], [], []
data_unravel_indices = np.arange(l_x ** 2)
data_unravel_indices = np.hstack((data_unravel_indices,
data_unravel_indices))
for i, angle in enumerate(angles):
Xrot = np.cos(angle) * X - np.sin(angle) * Y
inds, w = _weights(Xrot, dx=1, orig=X.min())
mask = np.logical_and(inds >= 0, inds < l_x)
weights += list(w[mask])
camera_inds += list(inds[mask] + i * l_x)
data_inds += list(data_unravel_indices[mask])
proj_operator = sparse.coo_matrix((weights, (camera_inds, data_inds)))
return proj_operator
def generate_synthetic_data():
""" Synthetic binary data """
rs = np.random.RandomState(0)
n_pts = 36.
x, y = np.ogrid[0:l, 0:l]
mask_outer = (x - l / 2) ** 2 + (y - l / 2) ** 2 < (l / 2) ** 2
mask = np.zeros((l, l))
points = l * rs.rand(2, n_pts)
mask[(points[0]).astype(np.int), (points[1]).astype(np.int)] = 1
mask = ndimage.gaussian_filter(mask, sigma=l / n_pts)
res = np.logical_and(mask > mask.mean(), mask_outer)
return res - ndimage.binary_erosion(res)
# Generate synthetic images, and projections
l = 128
proj_operator = build_projection_operator(l, l / 7.)
data = generate_synthetic_data()
proj = proj_operator * data.ravel()[:, np.newaxis]
proj += 0.15 * np.random.randn(*proj.shape)
# Reconstruction with L2 (Ridge) penalization
rgr_ridge = Ridge(alpha=0.2)
rgr_ridge.fit(proj_operator, proj.ravel())
rec_l2 = rgr_ridge.coef_.reshape(l, l)
# Reconstruction with L1 (Lasso) penalization
# the best value of alpha was determined using cross va | lidation
# with LassoCV
rgr_lasso = Lasso(alpha=0.001)
rgr_lasso.fit(proj_operator, proj.ravel( | ))
rec_l1 = rgr_lasso.coef_.reshape(l, l)
plt.figure(figsize=(8, 3.3))
plt.subplot(131)
plt.imshow(data, cmap=plt.cm.gray, interpolation='nearest')
plt.axis('off')
plt.title('original image')
plt.subplot(132)
plt.imshow(rec_l2, cmap=plt.cm.gray, interpolation='nearest')
plt.title('L2 penalization')
plt.axis('off')
plt.subplot(133)
plt.imshow(rec_l1, cmap=plt.cm.gray, interpolation='nearest')
plt.title('L1 penalization')
plt.axis('off')
plt.subplots_adjust(hspace=0.01, wspace=0.01, top=1, bottom=0, left=0,
right=1)
plt.show()
|
soerendip42/rdkit | rdkit/VLib/NodeLib/SmartsMolFilter.py | Python | bsd-3-clause | 2,727 | 0.017968 | # $Id$
#
# Copyright (C) 2003 Rational Discovery LLC
# All Rights Reserved
#
from rdkit import RDConfig
import sys,os,types
from rdkit import Chem
from rdkit.VLib.Filter import FilterNode
class SmartsFilter(FilterNode):
""" filter out molecules matching one or more SMARTS patterns
There is a count associated with each pattern. Molecules are
allowed to match the pattern up to this number of times.
Assumptions:
- inputs are molecules
Sample Usage:
>>> smis = ['C1CCC1','C1CCC1C=O','CCCC','CCC=O','CC(=O)C','CCN','NCCN','NCC=O']
>>> mols = [Chem.MolFromSmiles(x) for x in smis]
>>> from rdkit.VLib.Supply import SupplyNode
>>> suppl = SupplyNode(contents=mols)
| >>> ms = [x for x in suppl]
>>> len(ms)
8
We can pass in | SMARTS strings:
>>> smas = ['C=O','CN']
>>> counts = [1,2]
>>> filt = SmartsFilter(patterns=smas,counts=counts)
>>> filt.AddParent(suppl)
>>> ms = [x for x in filt]
>>> len(ms)
5
Alternatively, we can pass in molecule objects:
>>> mols =[Chem.MolFromSmarts(x) for x in smas]
>>> counts = [1,2]
>>> filt.Destroy()
>>> filt = SmartsFilter(patterns=mols,counts=counts)
>>> filt.AddParent(suppl)
>>> ms = [x for x in filt]
>>> len(ms)
5
Negation does what you'd expect:
>>> filt.SetNegate(1)
>>> ms = [x for x in filt]
>>> len(ms)
3
"""
def __init__(self,patterns=[],counts=[],**kwargs):
FilterNode.__init__(self,func=self.filter,**kwargs)
self._initPatterns(patterns,counts)
def _initPatterns(self,patterns,counts):
nPatts = len(patterns)
if len(counts) and len(counts)!=nPatts:
raise ValueError('if counts is specified, it must match patterns in length')
if not len(counts):
counts = [1]*nPatts
targets = [None]*nPatts
for i in range(nPatts):
p = patterns[i]
c = counts[i]
if type(p) in types.StringTypes:
m = Chem.MolFromSmarts(p)
if not m:
raise ValueError('bad smarts: %s'%(p))
p = m
targets[i] = p,c
self._patterns = tuple(targets)
def filter(self,cmpd):
neg = self.Negate()
res = 0
#sys.stderr.write('\tFILTER: %s\n'%(Chem.MolToSmiles(cmpd)))
for patt,count in self._patterns:
ms = cmpd.GetSubstructMatches(patt)
nMatches = len(ms)
if nMatches >= count:
# this query is an or, so we short circuit true:
res = 1
break
return res
#------------------------------------
#
# doctest boilerplate
#
def _test():
import doctest,sys
return doctest.testmod(sys.modules["__main__"])
if __name__ == '__main__':
import sys
failed,tried = _test()
sys.exit(failed)
|
michaelconnor00/gbdxtools | tests/unit/test_catalog.py | Python | mit | 9,207 | 0.006951 | '''
Authors: Donnie Marino, Kostas Stamatiou
Contact: dmarino@digitalglobe.com
Unit tests for the gbdxtools.Catalog class
'''
from gbdxtools import Interface
from gbdxtools.catalog import Catalog
from auth_mock import get_mock_gbdx_session
import vcr
import unittest
"""
How to use the mock_gbdx_session and vcr to create unit tests:
1. Add a new test that is dependent upon actually hitting GBDX APIs.
2. Decorate the test with @vcr appropriately, supply a yaml file path to gbdxtools/tests/unit/cassettes
note: a yaml file will be created after the test is run
3. Replace "dummytoken" with a real gbdx token after running test successfully
4. Run the tests (existing test shouldn't be affected by use of a real token). This will record a "cassette".
5. Replace the real gbdx token with "dummytoken" again
6. Edit the cassette to remove any possibly sensitive information (s3 creds for example)
"""
class TestCatalog(unittest.TestCase):
@classmethod
def setUpClass(cls):
mock_gbdx_session = get_mock_gbdx_session(token="dummytoken")
cls.gbdx = Interface(gbdx_connection=mock_gbdx_session)
def test_init(self):
c = Catalog(self.gbdx)
self.assertTrue(isinstance(c, Catalog))
@vcr.use_cassette('tests/unit/cassettes/test_catalog_get_address_coords.yaml', filter_headers=['authorization'])
def test_catalog_get_address_coords(self):
c = Catalog(self.gbdx)
lat, lng = c.get_address_coords('Boulder, CO')
self.assertTrue(lat == 40.0149856)
self.assertTrue(lng == -105.2705456)
@vcr.use_cassette('tests/unit/cassettes/test_catalog_get_record.yaml', filter_headers=['authorization'])
def test_catalog_get_record(self):
c = Catalog(self.gbdx)
catid = '1040010019B4A600'
record = c.get(catid)
self.assertEqual(record['identifier'], '1040010019B4A600')
self.assertEqual(record['type'], 'DigitalGlobeAcquisition')
self.assertTrue('inEdges' not in list(record.keys()))
@vcr.use_cassette('tests/unit/cassettes/test_catalog_get_record_with_relationships.yaml', filter_headers=['authorization'])
def test_catalog_get_record_with_relationships(self):
c = Catalog(self.gbdx)
catid = '1040010019B4A600'
record = c.get(catid, includeRelationships=True)
self.assertEqual(record['identifier'], '1040010019B4A600')
self.assertEqual(record['type'], 'DigitalGlobeAcquisition')
self.assertTrue('inEdges' in list(record.keys()))
@vcr.use_cassette('tests/unit/cassettes/test_catalog_search_point.yaml', filter_headers=['authorization'])
def test_catalog_search_point(self):
c = Catalog(self.gbdx)
lat = 40.0149856
lng = -105.2705456
results = c.search_point(lat, lng)
self.assertEqual(len(results),310)
@vcr.use_cassette('tests/unit/cassettes/test_catalog_search_address.yaml', filter_headers=['authorization'])
def test_catalog_search_address(self):
c = Catalog(self.gbdx)
results = c.search_address('Boulder, CO')
self.assertEqual(len(results), 310)
@vcr.use_cassette('tests/unit/cassettes/test_catalog_search_wkt_only.yaml',filter_headers=['authorization'])
def test_catalog_search_wkt_only(self):
c = Catalog(self.gbdx)
results = c.search(searchAreaWkt="POLYGON ((30.1 9.9, 30.1 10.1, 29.9 10.1, 29.9 9.9, 30.1 9.9))")
assert len(results) == 395
@vcr.use_cassette('tests/unit/cassettes/test_catalog_search_wkt_and_startDate.yaml',filter_headers=['authorization'])
def test_catalog_search_wkt_and_startDate(self):
c = Catalog(self.gbdx)
results = c.search(searchAreaWkt="POLYGON ((30.1 9.9, 30.1 10.1, 29.9 10.1, 29.9 9.9, 30.1 9.9))",
startDate='2012-01-01T00:00:00.000Z')
assert len(results) == 317
@vcr.use_cassette('tests/unit/cassettes/test_catalog_search_wkt_and_endDate.yaml',filter_headers=['authorization'])
def test_catalog_search_wkt_and_endDate(self):
c = Catalog(self.gbdx)
results = c.search(searchAreaWkt="POLYGON ((30.1 9.9, 30.1 10.1, 29.9 10.1, 29.9 9.9, 30.1 9.9))",
endDate='2012-01-01T00:00:00.000Z')
assert len(results) == 78
@vcr.use_cassette('tests/unit/cassettes/test_catalog_search_startDate_and_endDate_only_more_than_one_week_apart.yaml',filter_headers=['authorization'])
def test_catalog_search_startDate_and_endDate_only_more_than_one_week_apart(self):
c = Catalog(self.gbdx)
try:
results = c.search(startDate='2004-01-01T00:00:00.000Z',
endDate='2012-01-01T00:00:00.000Z')
except Exception as e:
pass
else:
raise Exception('failed test')
@vcr.use_cassette('tests/unit/cassettes/test_catalog_search_startDate_and_endDate_only_less_than_one_week_apart.yaml',filter_headers=['authorization'])
def test_catalog_search_startDate_and_endDate_only_less_than_one_week_apart(self):
c = Catalog(self.gbdx)
results = c.search(startDate='2008-01-01T00:00:00.000Z',
e | ndDate='2008-01-03T00:00:00.000Z')
assert len(results) == 759
@vcr.use_cassette('tests/unit/cassettes/test_catalog_search_filters1.yaml',filter_headers=['authorization'])
def test_catalog_search_filters1(self):
c = Catalog(self.gbdx)
filters = [
"(sensorPlatformName = 'WORLDVIEW01' OR sensorPlatformName ='QUICKBIRD02')",
"cloudCover < 1 | 0",
"offNadirAngle < 10"
]
results = c.search(startDate='2008-01-01T00:00:00.000Z',
endDate='2012-01-03T00:00:00.000Z',
filters=filters,
searchAreaWkt="POLYGON ((30.1 9.9, 30.1 10.1, 29.9 10.1, 29.9 9.9, 30.1 9.9))")
for result in results:
assert result['properties']['sensorPlatformName'] in ['WORLDVIEW01','QUICKBIRD02']
assert float(result['properties']['cloudCover']) < 10
assert float(result['properties']['offNadirAngle']) < 10
@vcr.use_cassette('tests/unit/cassettes/test_catalog_search_filters2.yaml',filter_headers=['authorization'])
def test_catalog_search_filters2(self):
c = Catalog(self.gbdx)
filters = [
"sensorPlatformName = 'WORLDVIEW03'"
]
results = c.search(filters=filters,
searchAreaWkt="POLYGON ((30.1 9.9, 30.1 10.1, 29.9 10.1, 29.9 9.9, 30.1 9.9))")
for result in results:
assert result['properties']['sensorPlatformName'] in ['WORLDVIEW03']
@vcr.use_cassette('tests/unit/cassettes/test_catalog_search_types1.yaml',filter_headers=['authorization'])
def test_catalog_search_types1(self):
c = Catalog(self.gbdx)
types = [ "LandsatAcquisition" ]
results = c.search(types=types,
searchAreaWkt="POLYGON ((30.1 9.9, 30.1 10.1, 29.9 10.1, 29.9 9.9, 30.1 9.9))")
for result in results:
assert result['type'] == 'LandsatAcquisition'
@vcr.use_cassette('tests/unit/cassettes/test_catalog_search_huge_aoi.yaml',filter_headers=['authorization'])
def test_catalog_search_huge_aoi(self):
"""
Search an AOI the size of utah, broken into multiple smaller searches
"""
c = Catalog(self.gbdx)
results = c.search(searchAreaWkt = "POLYGON((-113.88427734375 40.36642741921034,-110.28076171875 40.36642741921034,-110.28076171875 37.565262680889965,-113.88427734375 37.565262680889965,-113.88427734375 40.36642741921034))")
assert len(results) == 2736
@vcr.use_cassette('tests/unit/cassettes/test_catalog_get_data_location_DG.yaml',filter_headers=['authorization'])
def test_catalog_get_data_location_DG(self):
c = Catalog(self.gbdx)
s3path = c.get_data_location(catalog_id='1030010045539700')
assert s3path == 's3://receiving-dgcs-tdgplatform-com/055158926010_01_003/055158926010_01'
|
kret0s/gnuhealth-live | tryton/server/trytond-3.8.3/trytond/modules/health_icd10/tests/__init__.py | Python | gpl-3.0 | 36 | 0 | fr | om test_health_icd10 import suite
| |
tiancj/emesene | emesene/gui/qt4ui/widgets/StatusButton.py | Python | gpl-3.0 | 2,828 | 0.001414 | # -*- coding: utf-8 -*-
# This file is part of emesene.
#
# emesene is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# emesene is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with emesene; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import e3
import gui
import extension
import PyQt4.QtGui as QtGui
import PyQt4.QtCore as QtCore
class StatusButton(QtGui.QToolButton):
'''a button that when clicked displays a popup that allows the user to
select a status'''
NAME = 'Status Button'
DESCRIPTION = 'A button to select the status'
AUTHOR = 'Jose Rostagno'
WEBSITE = 'www.emesene.org'
def __init__(self, session=None):
QtGui.QToolButton.__init__(self, None)
self.session = session
# a cache of gtk.Images to not load the images everytime we change
# our status
self.cache_imgs = {}
self.setAutoRaise(True)
StatusMenu = extension.get_default('menu status')
self.menu = StatusMenu(self.set_status)
self.invertStatus = {}
for stat in e3.status.STATUS:
self.invertStatus[unicode(e3.status.STATUS[stat])] = stat
if self.session:
self.status = self.session.account.status
else:
self.status = e3.sta | tus.OFFLINE
self.set_status(self | .status)
self.menu.triggered.connect(self.statusactionchange)
self.setMenu(self.menu)
# show status menu on button click
self.clicked.connect(self.showMenu)
def statusactionchange(self, action):
status = self.invertStatus[str(action.text())]
self.set_status(status)
def set_status(self, stat):
'''load an image representing a status and store it on cache'''
current_status = -1
if self.session:
current_status = self.session.account.status
if stat not in self.cache_imgs:
qt_icon = QtGui.QIcon(\
gui.theme.image_theme.status_icons[stat])
self.cache_imgs[stat] = qt_icon
else:
qt_icon = self.cache_imgs[stat]
self.setIcon(qt_icon)
if stat not in e3.status.ALL or stat == current_status:
return
self.status = stat
if self.session:
self.session.set_status(stat)
|
tgerdes/toolbot | toolbot/adapter/irc.py | Python | mit | 3,433 | 0 | import irc3
from toolbot.adapter import Adapter
from toolbot.message import (
TextMessage,
EnterMessage,
LeaveMessage,
TopicMessage)
@irc3.plugin
class Irc3ToToolbot:
requires = ['irc3.plugins.core', ]
def __init__(self, bot):
self.bot = bot
@irc3.event(irc3.rfc.PRIVMSG)
def message(self, mask, event, target, data):
adapter = self.bot.config['adapter']
bot = adapter.bot
user = bot.brain.userForId(mask, name=mask.nick, room=target)
adapter.receive(TextMessage(user, data))
@irc3.event(irc3.rfc.JOIN)
def join(self, mask, channel):
adapter = self.bot.config['adapter']
bot = adapter.bot
if channel.st | artswith(":"):
channel = channel[1:]
user = bot.brain.userForId(mask, name=mask.n | ick, room=channel)
adapter.receive(EnterMessage(user))
@irc3.event(irc3.rfc.PART)
def part(self, mask, channel, data):
adapter = self.bot.config['adapter']
bot = adapter.bot
if channel.startswith(":"):
channel = channel[1:]
user = bot.brain.userForId(mask, name=mask.nick, room=channel)
adapter.receive(LeaveMessage(user, data))
@irc3.event(irc3.rfc.QUIT)
def quit(self, mask, data):
adapter = self.bot.config['adapter']
bot = adapter.bot
user = bot.brain.userForId(mask, name=mask.nick)
adapter.receive(LeaveMessage(user, data))
@irc3.event(r':(?P<mask>\S+) TOPIC (?P<channel>\S+)( :(?P<data>.*)|$)')
def topic(self, mask, channel, data):
adapter = self.bot.config['adapter']
bot = adapter.bot
user = bot.brain.userForId(mask, name=mask.nick, room=channel)
adapter.receive(TopicMessage(user, data))
@irc3.event(irc3.rfc.RPL_TOPIC)
def topic_rpl(self, srv, me, channel, data):
# TODO: store topic? Wait for RPL_TOPICWHOTIME? also?
pass
@irc3.event(r"^:(?P<srv>\S+) 353 (?P<me>\S+) (?P<mode>[@*=]) "
r"(?P<channel>\S+) :(?P<data>.*)")
def rpl_namreply(self, srv, me, mode, channel, data):
names = data.split(" ")
for name in names:
if name.startswith('@') or name.startswith('+'):
name = name[1:]
# TODO: store names?
class IrcAdapter(Adapter):
def __init__(self, bot):
super().__init__(bot)
self.irc = irc3.IrcBot(
nick=bot.name,
autojoins=['#irc3'],
host='localhost', port=6667, ssl=False,
includes=[__name__],
adapter=self)
def send(self, envelope, *strings):
for string in strings:
self.irc.privmsg(envelope['room'], string)
def emote(self, envelope, *strings):
self.send(envelope, *("\u0001ACTION {}\u0001".format(string)
for string in strings))
def reply(self, envelope, *strings):
self.send(envelope, *("{}: {}".format(envelope['user'].name, string)
for string in strings))
def topic(self, envelope, *strings):
data = ":" + " / ".join(strings)
channel = envelope['room']
self.irc.send("TOPIC {} {}".format(channel, data))
def run(self, loop):
self.irc.create_connection()
def close(self):
if getattr(self.irc, 'protocol'):
self.irc.quit("quitting")
self.irc.protocol.transport.close()
|
incuna/django-user-management | user_management/api/avatar/serializers.py | Python | bsd-2-clause | 2,513 | 0.000796 | from django.contrib.auth import get_user_model
from django.db import models
from imagekit.cachefiles import ImageCacheFile
from imagekit.registry import generator_registry
from imagekit.templatetags.imagekit import DEFAULT_THUMBNAIL_GENERATOR
from rest_framework import serializers
User = get_user_model()
class ThumbnailField(serializers.ImageField):
"""
Image field that returns an images url.
Pass get parameters to thumbnail the image.
Options are:
width: Specify the width (in pixels) to resize / crop to.
height: Specify the height (in pixels) to resize / crop to.
crop: Whether to crop or not [1,0]
anchor: Where to anchor the crop [t,r,b,l]
upscale: Whether to upscale or not [1,0 | ]
If no options are specified the users avatar is returned.
To crop to 100x100 anchored to the top right:
?width=100&height=100&crop=1&anchor=tr
"""
def __init__(self, *args, **kwargs):
self.generator_id = kwargs.pop('generator_id', DEFAULT_THUMBNA | IL_GENERATOR)
super(ThumbnailField, self).__init__(*args, **kwargs)
def get_generator_kwargs(self, query_params):
width = int(query_params.get('width', 0)) or None
height = int(query_params.get('height', 0)) or None
return {
'width': width,
'height': height,
'anchor': query_params.get('anchor', None),
'crop': query_params.get('crop', None),
'upscale': query_params.get('upscale', None)
}
def generate_thumbnail(self, source, **kwargs):
generator = generator_registry.get(
self.generator_id,
source=source,
**kwargs)
return ImageCacheFile(generator)
def to_native(self, image):
if not image.name:
return None
request = self.context.get('request', None)
if request is None:
return image.url
kwargs = self.get_generator_kwargs(request.query_params)
if kwargs.get('width') or kwargs.get('height'):
image = self.generate_thumbnail(image, **kwargs)
return request.build_absolute_uri(image.url)
class AvatarSerializer(serializers.ModelSerializer):
# Override default field_mapping to map ImageField to HyperlinkedImageField.
# As there is only one field this is the only mapping needed.
field_mapping = {
models.ImageField: ThumbnailField,
}
class Meta:
model = User
fields = ('avatar',)
|
bwhite/picarus | server/build_site.py | Python | apache-2.0 | 2,094 | 0.00382 | #!/usr/bin/env python
import glob
import subprocess
import argparse
def render_app():
template_names = 'data_prefixes data_projects data_usage models_list models_create models_single models_slice process_thumbnail process_delete process_exif process_modify process_copy workflow_classifier jobs_list jobs_crawlFlickr jobs_annotationClass jobs_annotationQA visualize_thumbnails visualize_metadata visualize_exif visualize_locations visualize_times visualize_annotations evaluate_classifier'.split()
app_template = open('app_template.html').read()
templates = []
scripts = []
for template_name in template_names:
fn = 'tabs/%s.html' % template_name
templates.append(open(fn).read())
fn = 'tabs/%s.js' % template_name
scripts.append(open(fn).read())
open('static/app.html', 'w').write(app_template.replace('{{ TEMPLATES }}', '\n'.join(templates)))
open('js/tabs.js', 'w').write('\n'.join(scripts))
preinclude_css = ['bootstrap.min.css', 'hint.min.css', 'custom.css']
open('static/style.css', 'w').write('\n'.join([open('css/' + x).read() for x in preinclude_css]))
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--debug', action='store | _true',
help="Don't minify the source")
args = parser.parse_args()
render_app()
preinclude = ['jquery.min.js', 'bootstrap.min.js', 'undersc | ore-min.js', 'underscore.string.min.js', 'backbone-min.js', 'base64.js', 'jquery.cookie.min.js']
preinclude = ['js/' + x for x in preinclude]
postinclude = ['picarus_api.js', 'app.js']
postinclude = ['js/' + x for x in postinclude]
a = preinclude + list(set(glob.glob('js/*.js')) - set(preinclude) - set(postinclude)) + postinclude
if args.debug:
open('static/compressed.js', 'wb').write(';\n'.join([open(x, 'rb').read() for x in a]))
else:
a= ' '.join(['--js %s' % x for x in a])
cmd = 'java -jar compiler.jar %s --js_output_file static/compressed.js' % a
subprocess.call(cmd.split(' '))
if __name__ == '__main__':
main()
|
pymedusa/Medusa | ext/github/Hook.py | Python | gpl-3.0 | 9,634 | 0.004463 | # -*- coding: utf-8 -*-
############################ Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2012 Zearin <zearin@gonk.net> #
# Copyright 2013 AKFish <akfish@gmail.com> #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2014 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2016 Jannis Gebauer <ja.geb@me.com> #
# Copyright 2016 Peter Buckley <dx-pbuckley@users.noreply.github.com> #
# Copyright 2017 W | an Liuyang <tsfdye@gmail.com> #
# Copyright 2018 Wan Liuyang <tsfdye@gmail.com> #
# Copyright 2018 sfdye <tsfdye@gmail.com> #
# #
# This file is part of PyGithub. | #
# http://pygithub.readthedocs.io/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
from __future__ import absolute_import
import six
import github.GithubObject
import github.HookResponse
class Hook(github.GithubObject.CompletableGithubObject):
"""
This class represents Hooks. The reference can be found here http://developer.github.com/v3/repos/hooks
"""
def __repr__(self):
return self.get__repr__({"id": self._id.value, "url": self._url.value})
@property
def active(self):
"""
:type: bool
"""
self._completeIfNotSet(self._active)
return self._active.value
@property
def config(self):
"""
:type: dict
"""
self._completeIfNotSet(self._config)
return self._config.value
@property
def created_at(self):
"""
:type: datetime.datetime
"""
self._completeIfNotSet(self._created_at)
return self._created_at.value
@property
def events(self):
"""
:type: list of string
"""
self._completeIfNotSet(self._events)
return self._events.value
@property
def id(self):
"""
:type: integer
"""
self._completeIfNotSet(self._id)
return self._id.value
@property
def last_response(self):
"""
:type: :class:`github.HookResponse.HookResponse`
"""
self._completeIfNotSet(self._last_response)
return self._last_response.value
@property
def name(self):
"""
:type: string
"""
self._completeIfNotSet(self._name)
return self._name.value
@property
def test_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._test_url)
return self._test_url.value
@property
def updated_at(self):
"""
:type: datetime.datetime
"""
self._completeIfNotSet(self._updated_at)
return self._updated_at.value
@property
def url(self):
"""
:type: string
"""
self._completeIfNotSet(self._url)
return self._url.value
@property
def ping_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._ping_url)
return self._ping_url.value
def delete(self):
"""
:calls: `DELETE /repos/:owner/:repo/hooks/:id <http://developer.github.com/v3/repos/hooks>`_
:rtype: None
"""
headers, data = self._requester.requestJsonAndCheck("DELETE", self.url)
def edit(
self,
name,
config,
events=github.GithubObject.NotSet,
add_events=github.GithubObject.NotSet,
remove_events=github.GithubObject.NotSet,
active=github.GithubObject.NotSet,
):
"""
:calls: `PATCH /repos/:owner/:repo/hooks/:id <http://developer.github.com/v3/repos/hooks>`_
:param name: string
:param config: dict
:param events: list of string
:param add_events: list of string
:param remove_events: list of string
:param active: bool
:rtype: None
"""
assert isinstance(name, (str, six.text_type)), name
assert isinstance(config, dict), config
assert events is github.GithubObject.NotSet or all(
isinstance(element, (str, six.text_type)) for element in events
), events
assert add_events is github.GithubObject.NotSet or all(
isinstance(element, (str, six.text_type)) for element in add_events
), add_events
assert remove_events is github.GithubObject.NotSet or all(
isinstance(element, (str, six.text_type)) for element in remove_events
), remove_events
assert active is github.GithubObject.NotSet or isinstance(active, bool), active
post_parameters = {
"name": name,
"config": config,
}
if events is not github.GithubObject.NotSet:
post_parameters["events"] = events
if add_events is not github.GithubObject.NotSet:
post_parameters["add_events"] = add_events
if remove_events is not github.GithubObject.NotSet:
post_parameters["remove_events"] = remove_events
if active is not github.GithubObject.NotSet:
post_parameters["active"] = active
headers, data = self._requester.requestJsonAndCheck(
"PATCH", self.url, input=post_parameters
)
self._useAttributes(data)
def test(self):
"""
:calls: `POST /repos/:owner/:repo/hooks/:id/tests <http://developer.github.com/v3/repos/hooks>`_
:rtype: None
"""
headers, data = self._requester.requestJsonAndCheck("POST", self.url + "/tests")
def ping(self):
"""
:calls: `POST /repos/:owner/:repo/hooks/:id/pings <http://developer.github.com/v3/repos/hooks>`_
:rtype: None
"""
headers, data = self._requester.requestJsonAndCheck("POST", self.url + "/pings")
def _initAttributes(self):
self._active = github.GithubObject.NotSet
self._config = github.GithubObject.NotSet
self._created_at = github.GithubObject.NotSet
self._events = github.GithubObject.NotSet
self._id = github.GithubObject.NotSet
self._last_response = github.GithubObject.NotSet
self._name = github.GithubObject.NotSet
self._test_url = github.GithubObject.NotSet
self._updated_at = github.GithubObject.NotSet
self._url = github.GithubObject.NotSet
self._ping_url = github.GithubObject.NotSet
def _useAttributes(self, attributes):
if "active" in attributes: # pragma no branch
self._ |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.