code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
|---|---|---|---|---|---|
from turtle import *
from itertools import cycle
from math import sqrt, sin
def cercle(rayon, couleur):
fillcolor(couleur)
pencolor(couleur)
begin_fill()
circle(rayon)
end_fill()
def positionne_tortue(pas):
penup()
left(90)
forward(pas)
right(90)
pendown()
colormode(255)
blanc_etoile = (240, 234, 236)
couleurs = cycle([(230, 50, 53), (244, 244, 244), (230, 50, 53), (107, 138, 195)])
rayon = 240
pas = 40
speed(10)
# init tortue
penup()
right(90)
forward(rayon)
left(90)
pendown()
# dessin des cercles concentriques
while rayon > 120:
cercle(rayon, next(couleurs))
positionne_tortue(pas)
rayon = rayon - pas
cercle(rayon, next(couleurs))
# penup()
# left(90)
# forward(200)
# right(150)
# pendown()
# pencolor(blanc_etoile)
# forward(90)
phi = (1 + sqrt(5)) / 2
coefficient = sqrt(3 - phi)
cote_pentagone = coefficient * rayon
cote_oppose = cote_pentagone / 2
hypothenus = cote_oppose / sin(54)
penup()
left(90)
forward(2 * rayon)
right(126)
pendown()
pencolor(blanc_etoile)
fillcolor(blanc_etoile)
sommets = dict()
sommets[1] = position()
forward(coefficient * rayon)
right(180 - 108)
sommets[2] = position()
forward(coefficient * rayon)
right(180 - 108)
sommets[3] = position()
forward(coefficient * rayon)
right(180 - 108)
sommets[4] = position()
forward(coefficient * rayon)
right(180 - 108)
sommets[5] = position()
forward(coefficient * rayon)
hideturtle()
done()
|
TGITS/programming-workouts
|
erri/python/lesson_39/bouclier.py
|
Python
|
mit
| 1,438
|
from .base_encrypted_field import BaseEncryptedField
from .irreversible_rsa_encryption_field import IrreversibleRsaEncryptionField
from .restricted_rsa_encryption_field import RestrictedRsaEncryptionField
from .local_aes_encryption_field import LocalAesEncryptionField
from .local_rsa_encryption_field import LocalRsaEncryptionField
from .encrypted_identity_field import EncryptedIdentityField
from .encrypted_lastname_field import EncryptedLastnameField
from .encrypted_firstname_field import EncryptedFirstnameField
from .encrypted_char_field import EncryptedCharField
from .encrypted_aes_char_field import EncryptedAesCharField
from .encrypted_other_char_field import EncryptedOtherCharField
from .encrypted_text_field import EncryptedTextField
from .salt_field import SaltField
from .encrypted_integer_field import EncryptedIntegerField
from .encrypted_decimal_field import EncryptedDecimalField
from .encrypted_date_field import EncryptedDateField
|
botswana-harvard/edc-crypto-fields
|
edc_crypto_fields/fields/__init__.py
|
Python
|
gpl-2.0
| 953
|
#!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Verify that the mem -C and --chdir options change directory before
globbing for files.
"""
import os.path
import TestSCons_time
test = TestSCons_time.TestSCons_time()
test.subdir('logs')
lines = [
' pre-read post-read pre-build post-build\n'
]
line_fmt = ' 1000 2000 3000 4000 %s\n'
for i in range(9):
logfile_name = os.path.join('logs', 'foo-%s.log' % i)
test.fake_logfile(logfile_name)
lines.append(line_fmt % logfile_name)
expect = ''.join(lines)
test.run(arguments = 'mem -C logs foo-*.log', stdout = expect)
test.run(arguments = 'mem --chdir logs foo-?.log', stdout = expect)
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
andrewyoung1991/scons
|
test/scons-time/mem/chdir.py
|
Python
|
mit
| 1,948
|
"""Stocastic graph."""
# Copyright (C) 2010-2013 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
import networkx as nx
from networkx.utils import not_implemented_for
__author__ = "Aric Hagberg <aric.hagberg@gmail.com>"
__all__ = ['stochastic_graph']
@not_implemented_for('multigraph')
@not_implemented_for('undirected')
def stochastic_graph(G, copy=True, weight='weight'):
"""Return a right-stochastic representation of G.
A right-stochastic graph is a weighted digraph in which all of
the node (out) neighbors edge weights sum to 1.
Parameters
-----------
G : directed graph
A NetworkX DiGraph
copy : boolean, optional
If True make a copy of the graph, otherwise modify the original graph
weight : edge attribute key (optional, default='weight')
Edge data key used for weight. If no attribute is found for an edge
the edge weight is set to 1. Weights must be positive numbers.
"""
import warnings
if copy:
W = nx.DiGraph(G)
else:
W = G # reference original graph, no copy
degree = W.out_degree(weight=weight)
for (u,v,d) in W.edges(data=True):
if degree[u] == 0:
warnings.warn('zero out-degree for node %s'%u)
d[weight] = 0.0
else:
d[weight] = float(d.get(weight,1.0))/degree[u]
return W
|
jni/networkx
|
networkx/generators/stochastic.py
|
Python
|
bsd-3-clause
| 1,460
|
"""Appliance update plugin
If update_urls is set in the env, re-trigger the update_rhel configuration
step to update the appliance with the new URLs
"""
import os
import pytest
def pytest_parallel_configured():
if pytest.store.parallelizer_role != 'master' and 'update_urls' in os.environ:
pytest.store.write_line('updating appliance before testing')
pytest.store.current_appliance.update_rhel(*str(os.environ['update_urls']).split())
|
thom-at-redhat/cfme_tests
|
fixtures/update_appliance.py
|
Python
|
gpl-2.0
| 460
|
import matplotlib.pyplot as plt
import numpy as np
import urllib
SDSS_File = '/Users/compastro/jenkins/SDSS_z+04_no_4363.csv'
SDSS_Data = np.genfromtxt(SDSS_File,skip_header=2, delimiter = ',',dtype=float,unpack=True)
NII_6583 = SDSS_Data[28,:]
Ha_6562 = SDSS_Data[27,:]
OIII_5006 = SDSS_Data[20,:]
Hb_4861 = SDSS_Data[18,:]
OIII_4363 = SDSS_Data[14,:]
OIII_Hb = np.log10(OIII_5006/Hb_4861)
NII_Ha = np.log10(NII_6583/Ha_6562)
plt.figure()
plt.xlim(-1.5,0.5)
plt.ylim(-1,1.5)
#plt.scatter(NII_Ha,OIII_Hb,s=30,c='b')
x=np.linspace(-1.5,0.3,50)
y=((.61/(x-.47))+1.19)
plt.plot(x,y,color='k')
x3=np.linspace(-1,-0.2,50)
y3=((.61/(x3-.05)+1.3))
plt.plot(x3,y3,linestyle='--',color='red')
counter=0
for i in range(0,len(SDSS_Data[0,:])):
if OIII_5006[i]/OIII_4363[i]<100.0:
plt.scatter(NII_Ha[i],OIII_Hb[i],color='r')
counter=counter+1
#print ("madeit")
elif OIII_5006[i]/OIII_4363[i]>100.0 and OIII_5006[i]/OIII_4363[i]<1000.0:
plt.scatter(NII_Ha[i],OIII_Hb[i],color='g')
counter=counter+1
#print("k")
elif OIII_5006[i]/OIII_4363[i]>1000.0:
plt.scatter(NII_Ha[i],OIII_Hb[i],color='k')
counter=counter+1
#print ("r")
else:
print ("error")
print(counter)
plt.ylabel(r"log ([OIII] $\lambda$5007/H$\beta$)")
plt.xlabel(r"log ([NII] $\lambda$6584/H$\alpha$)")
plt.title("BPT Diagram")
plt.show()
|
crichardson17/emgtemp
|
den_u_sims/no_4363_no_sims_plots.py
|
Python
|
mit
| 1,383
|
""" Discovers Belkin Wemo devices. """
from . import SSDPDiscoverable
class Discoverable(SSDPDiscoverable):
""" Adds support for discovering Belkin WeMo platform devices. """
def info_from_entry(self, entry):
""" Returns most important info from a uPnP entry. """
device = entry.description.find('device')
return (device.find('friendlyName').text,
device.find('modelName').text,
entry.values['location'])
def get_entries(self):
""" Returns all Belkin Wemo entries. """
return self.find_by_device_description(
{'manufacturer': 'Belkin International Inc.'})
|
toddeye/netdisco
|
netdisco/discoverables/belkin_wemo.py
|
Python
|
mit
| 658
|
from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
from django.utils import timezone
from datetime import timedelta
from sslscout.models import Profile, SiteGroup, Site, CheckEngine, SiteCheck, SiteCheck
from sslscout.engines import www_ssllabs_com, sslcheck_globalsign_com
from threading import Thread
import os, socket, sys, datetime
class Command(BaseCommand):
args = 'none'
help = 'Find idle engines and sites that need checking and run checks'
### function to run sitechecks
def handle(self, *args, **options):
### open listening socket (instead of writing a pidfile)
pidsocket = "/tmp/sslscout-engine-%s.sock" % settings.ENVIRONMENT
if os.path.exists(pidsocket):
### bail out
self.stdout.write('socket %s already exists, bailing out' % pidsocket)
sys.exit(1)
else:
try:
s = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
s.bind(pidsocket)
os.unlink(pidsocket)
except:
self.stdout.write('unable to bind pidsocket %s, bailing out' % pidsocket)
sys.exit(1)
### get a list of active engines
engines = CheckEngine.objects.filter(active=True)
enginethreads = []
for engine in engines:
#print "############ Working on engine %s" % engine.name
### check if this engine already has a job running
if SiteCheck.objects.filter(finish_time=None,engine=engine).count() > 0:
### skipping this engine
self.stdout.write('engine %s is already busy running a job' % engine.name)
continue
### find a site that needs checking
sites = Site.objects.all()
for site in sites:
#print "############ Working on site %s" % site.hostname
try:
### find the latest sitecheck for this hostname with this engine
latest_sitecheck = SiteCheck.objects.filter(engine=engine,hostname=site.hostname).latest('finish_time')
except SiteCheck.DoesNotExist:
### no previous checks registered for this hostname
latest_sitecheck = None
### if we have a latest_sitecheck, find out if it is more than interval_hours old
if latest_sitecheck:
if latest_sitecheck.finish_time + timedelta(hours=site.sitegroup.interval_hours) > timezone.now():
### not yet
#print "############ this site does not need to be checked yet, skipping..."
continue
### OK, time to do a new check for this site
print "############ starting new sitecheck thread for site %s with engine %s" % (site.hostname,engine.name)
sitecheck = SiteCheck(hostname=site.hostname,engine=engine)
sitecheck.save()
if engine.engineclass == 'www_ssllabs_com':
thread = www_ssllabs_com(sitecheck.id)
elif engine.engineclass == 'sslcheck_globalsign_com':
thread = sslcheck_globalsign_com(sitecheck.id)
else:
self.stdout.write('unknown engine, error')
thread.start()
enginethreads.append(thread)
break
### finished looping through engines, wait for any spawned threads to finish
if len(enginethreads) > 0:
self.stdout.write('waiting for %s threads to finish...' % len(enginethreads))
for et in enginethreads:
et.join()
### all threads finished
self.stdout.write('all threads finished')
#for et in enginethreads:
# print et.result
else:
print "no threads started"
print "############ done"
|
tykling/sslscout
|
src/sslscout/management/commands/runengines.py
|
Python
|
bsd-3-clause
| 4,070
|
from flask import current_app
from simplecoin import create_app
from simplecoin.tasks import celery
from celery.bin.worker import main
app = create_app(celery=True)
with app.app_context():
# import celerybeat settings
celery.conf.update(current_app.config['celery'])
current_app.logger.info("Celery worker powering up... BBBVVVRRR!")
main(app=celery)
|
simplecrypto/simplecoin
|
simplecoin/celery_entry.py
|
Python
|
mit
| 371
|
import os
from utils.enums import DeployStrategy
DEBUG = False
# Server primary configuration
SERVER_CONFIG = {
# Port of service
"PORT": 7722,
# Mongo Section
"MONGO_HOST": "192.168.100.1",
"MONGO_PORT": 27017,
"MONGO_USER": "superuser",
"MONGO_PWD": "******",
# Resource
"RESOURCE_DIR": "./resource",
# Log Section
"LOG_DIR": "/log/",
"LOG_FILE_NAME": "deploy_server",
# Biz Section
"TAG_LIST_SIZE": 10 # size of tag list in admin interface
}
# Configuration of Redis
REDIS = {
"HOST": "192.168.100.5",
"PORT": 6379,
"DBID": 3
}
# Webhook secret of github
GITHUB = {
"SECRET": "********"
}
# SMTP to send email
EMAIL = {
"SMTP": "smtp.exmail.qq.com",
"USER": "zqhua@zqhua.cn",
"PASSWORD": "********"
}
# ! Configurations of Repos. Using list if watching more than one repos
REPOSITORY = {
"repoA": { # repo name
"GIT_PATH": "/home/deploy/_github/repoA/", # path where repo resides in, needed in both production/test mode
"DEPLOY_PATH": "/home/deploy/_online/", # path where deploy to, needed in production mode
"PACKAGE_PATH": "/home/deploy/_package/", # path where packages save to, need in production mode
"BACKUP_PATH": "/home/deploy/_backup/", # path where backup tar file save to, need in production mode
"STRATEGY": DeployStrategy.PRO_MODE, # mode switcher
"BRANCH": "master", # branch filter
# services should restart when files have changed, key is first child directory of repo root('*' matches anything else like finally), value is service name in supervisor, 'None' means no service need restart, also support list if multi services need restart.
"SERVICES": {
"admin": "admin:admin_3377",
"api": "api:api_2919",
"dw": None,
"config": ["mf2:mf2_3333", "poster:poster_2234", "telesales:telesales_3335"],
"*": "ts:ts_3335",
},
# services priority as restart order, Key is service name in supervisor, value is priority level, little numbers have higher priorities.
"SERVICES_PRI": {
"admin:admin_3377": 3,
"api:api_2919": 1,
"poster:poster_2234": 2,
"pyds:pyds_3355": 2,
"telesales:telesales_3335": 3,
"mf2:mf2_3333": 2,
},
# map from hostname to roles of host
"HOSTS": {
"zqhua01": ["web", "data"],
"zqhua02": ["web", "data", "weixin"]
},
# map from host role to service names
"HOST_ROLE": {
"web": [
"admin:admin_3377",
"api:api_2919",
"mf2:mf2_3333",
"telesales:telesales_3335"
],
"data": [
"pyds:pyds_3355",
],
},
# Command Strings to run after NPM or package install
"POST_ACTIONS": [
{"cmd": "npm start", "cwd": "/home/deploy/foo"},
],
# Exclude filename which contains file pattern should not rsync
"EXCLUDE_FILENAME": None
}
}
LOGGING = {
"version": 1,
"formatters": {
"verbose": {
"format": "[%(levelname)s][%(module)s-%(lineno)d][thread-%(thread)d]%(asctime)s %(name)s:%(message)s"
}
},
"handlers": {
"console": {
"level": "DEBUG",
"class": "logging.StreamHandler",
"formatter": "verbose"
},
"file": {
"level": "DEBUG",
"class": "logging.handlers.TimedRotatingFileHandler",
"when": "D",
"formatter": "verbose",
"filename": SERVER_CONFIG["LOG_DIR"] + os.sep + SERVER_CONFIG["LOG_FILE_NAME"] + '.log'
},
"err_file": {
"level": "ERROR",
"class": "logging.handlers.TimedRotatingFileHandler",
"when": "D",
"formatter": "verbose",
"filename": SERVER_CONFIG["LOG_DIR"] + os.sep + SERVER_CONFIG["LOG_FILE_NAME"] + '.err'
},
"t_access_file": {
"level": "ERROR",
"class": "logging.handlers.TimedRotatingFileHandler",
"when": "D",
"formatter": "verbose",
"filename": SERVER_CONFIG["LOG_DIR"] + os.sep + 'tornado.access'
},
"t_error_file": {
"level": "ERROR",
"class": "logging.handlers.TimedRotatingFileHandler",
"when": "D",
"formatter": "verbose",
"filename": SERVER_CONFIG["LOG_DIR"] + os.sep + 'tornado.error'
}
},
"loggers": {
"DeployServer": {
"handlers": ["console", "file", "err_file"],
"propagate": False,
"level": "DEBUG"
},
"tornado.access": {
"handlers": ["t_access_file"],
"propagate": False
},
"tornado": {
"handlers": ["t_error_file"],
"propagate": False
}
}
}
|
magus0219/niner
|
config/example.py
|
Python
|
mit
| 5,016
|
import webapp2
from handlers import MainPage, BlogFront, NewPost, PostPage, Register
from handlers import Login, Logout, Like, PostEdit, PostDelete, CommentEdit
from handlers import CommentDelete
app = webapp2.WSGIApplication([('/', MainPage),
('/blog/?', BlogFront),
('/blog/([0-9]+)', PostPage),
('/blog/newpost', NewPost),
('/signup', Register),
('/login', Login),
('/logout', Logout),
('/like/([0-9]+)', Like),
('/edit/([0-9]+)', PostEdit),
('/delete/([0-9]+)', PostDelete),
('/c_edit/([0-9]+)', CommentEdit),
('/c_delete/([0-9]+)', CommentDelete)],
debug=True)
|
YuhanLin1105/Multi-User-Blog
|
multi_blog.py
|
Python
|
mit
| 940
|
# Generated by Django 2.2 on 2021-06-11 08:25
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('donation', '0004_donorinfo'),
]
operations = [
migrations.AddField(
model_name='donorinfo',
name='is_indian',
field=models.BooleanField(default=False),
),
]
|
PARINetwork/pari
|
donation/migrations/0005_donorinfo_is_indian.py
|
Python
|
bsd-3-clause
| 383
|
# Copyright 2014 OpenCore LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import os
import sys
import time
import sh
from string import Template
from ferry.install import FERRY_HOME
from ferry.config.hadoop.hiveconfig import *
from ferry.config.hadoop.metastore import *
class HadoopInitializer(object):
def __init__(self, system):
"""
Create a new initializer
Param user The user login for the git repo
"""
self.system = system
self.template_dir = None
self.template_repo = None
self.container_data_dir = HadoopConfig.data_directory
self.container_log_dir = HadoopConfig.log_directory
self.hive_client = HiveClientInitializer(system)
self.hive_ms = MetaStoreInitializer(system)
self.hive_client.template_dir = FERRY_HOME + '/data/templates/hive-metastore/'
self.hive_ms.template_dir = FERRY_HOME + '/data/templates/hive-metastore/'
def new_host_name(self, instance_id):
"""
Generate a new hostname
"""
return 'hadoop' + str(instance_id)
def _execute_service(self, containers, entry_point, fabric, cmd):
"""
Start the service on the containers.
"""
yarn_master = entry_point['yarn']
hdfs_master = None
# Now start the HDFS cluster.
if entry_point['hdfs_type'] == 'hadoop':
hdfs_master = entry_point['hdfs']
for c in containers:
if c.service_type == 'hadoop':
if c.internal_ip == hdfs_master:
output = fabric.cmd([c], '/service/sbin/startnode %s namenode' % cmd)
elif c.internal_ip != yarn_master:
output = fabric.cmd([c], '/service/sbin/startnode %s datanode' % cmd)
# Now wait a couple seconds to make sure
# everything has started.
time.sleep(5)
elif entry_point['hdfs_type'] == 'gluster':
mount_url = entry_point['gluster_url']
output = fabric.cmd(containers,
'/service/sbin/startnode %s gluster %s' % (cmd, mount_url))
# Now start the YARN cluster.
for c in containers:
if c.service_type == 'hadoop' or c.service_type == 'yarn':
if c.internal_ip == yarn_master:
output = fabric.cmd([c], '/service/sbin/startnode %s yarnmaster' % cmd)
elif c.internal_ip != hdfs_master:
output = fabric.cmd([c], '/service/sbin/startnode %s yarnslave' % cmd)
# Now start the Hive metastore.
for c in containers:
if c.service_type == 'hive':
self.hive_ms._execute_service([c], None, fabric, cmd)
def start_service(self, containers, entry_point, fabric):
self._execute_service(containers, entry_point, fabric, "start")
def restart_service(self, containers, entry_point, fabric):
self._execute_service(containers, entry_point, fabric, "restart")
def stop_service(self, containers, entry_point, fabric):
output = fabric.cmd(containers, '/service/sbin/startnode stop')
def _generate_config_dir(self, uuid, container):
"""
Generate a new configuration.
"""
return 'hadoop_' + str(uuid) + '_' + str(container['data_ip'])
def get_public_ports(self, num_instances):
"""
Ports to expose to the outside world.
"""
return []
def get_internal_ports(self, num_instances):
"""
Ports needed for communication within the network.
This is usually used for internal IPC.
"""
return ["0-65535"]
def get_working_ports(self, num_instances):
"""
Ports necessary to get things working.
"""
ports = []
ports.append(HadoopConfig.YARN_SCHEDULER)
ports.append(HadoopConfig.YARN_TRACKER)
ports.append(HadoopConfig.YARN_ADMIN)
ports.append(HadoopConfig.YARN_IPC)
ports.append(HadoopConfig.YARN_LOCALIZER)
ports.append(HadoopConfig.YARN_RESOURCE)
ports.append(HadoopConfig.YARN_NODE)
ports.append(HadoopConfig.YARN_TRACKER)
ports.append(HadoopConfig.YARN_HTTP)
ports.append(HadoopConfig.YARN_HTTPS)
ports.append(HadoopConfig.YARN_JOB_HISTORY)
ports.append(HadoopConfig.YARN_JOB_HISTORY_HTTP)
ports.append(HadoopConfig.HDFS_MASTER)
ports.append(HadoopConfig.HDFS_HTTP)
ports.append(HadoopConfig.HDFS_TRANSFER)
ports.append(HadoopConfig.HDFS_IPC)
ports.append(HadoopConfig.DATA_HTTP)
ports.append(HadoopConfig.SECOND_HTTP)
ports.append(HadoopConfig.YARN_RPC_PORTS)
ports.append(HadoopConfig.HIVE_META)
ports.append(HadoopConfig.HIVE_SERVER)
return ports
def get_total_instances(self, num_instances, layers):
"""
Get total number of instances. For Hadoop we must have additional containers
for the YARN master, HDFS master, and possibly the Hive metastore.
"""
instances = []
for i in range(num_instances + 2):
instances.append('hadoop')
if len(layers) > 0 and layers[0] == "hive":
instances.append('hive')
return instances
def generate(self, num):
"""
Generate a new configuration
"""
return HadoopConfig(num)
def _generate_gluster_core_site(self, new_config_dir, container):
"""
Generate the core-site configuration for a local filesystem.
"""
core_in_file = open(self.template_dir + '/core-site.xml.template', 'r')
core_out_file = open(new_config_dir + '/core-site.xml', 'w+')
changes = { "DEFAULT_NAME":"file:///",
"DATA_TMP":"/service/data/%s/tmp" % container['host_name'] }
for line in core_in_file:
s = Template(line).substitute(changes)
core_out_file.write(s)
core_in_file.close()
core_out_file.close()
def _generate_core_site(self, hdfs_master, new_config_dir):
"""
Generate the core-site configuration.
"""
core_in_file = open(self.template_dir + '/core-site.xml.template', 'r')
core_out_file = open(new_config_dir + '/core-site.xml', 'w+')
default_name = "%s://%s:%s" % ("hdfs",
hdfs_master['data_ip'],
HadoopConfig.HDFS_MASTER)
changes = { "DEFAULT_NAME":default_name,
"DATA_TMP":"/service/data/tmp" }
for line in core_in_file:
s = Template(line).substitute(changes)
core_out_file.write(s)
core_in_file.close()
core_out_file.close()
def _generate_hdfs_site(self, config, hdfs_master, new_config_dir):
"""
Generate the hdfs-site configuration.
"""
hdfs_in_file = open(self.template_dir + '/hdfs-site.xml.template', 'r')
hdfs_out_file = open(new_config_dir + '/hdfs-site.xml', 'w+')
changes = { "DATA_DIR":config.data_directory }
for line in hdfs_in_file:
s = Template(line).substitute(changes)
hdfs_out_file.write(s)
hdfs_in_file.close()
hdfs_out_file.close()
def _generate_httpfs_site(self, config, new_config_dir):
"""
Generate the hdfs-site configuration.
"""
in_file = open(self.template_dir + '/httpfs-site.xml.template', 'r')
out_file = open(new_config_dir + '/httpfs-site.xml', 'w+')
changes = {}
for line in in_file:
s = Template(line).substitute(changes)
out_file.write(s)
in_file.close()
out_file.close()
def _generate_yarn_site(self, yarn_master, new_config_dir, container=None):
"""
Generate the yarn-site configuration.
"""
yarn_in_file = open(self.template_dir + '/yarn-site.xml.template', 'r')
yarn_out_file = open(new_config_dir + '/yarn-site.xml', 'w+')
changes = { "YARN_MASTER":yarn_master['data_ip'] }
# Get memory information.
mem = self.system.get_total_memory()
if mem < 1024:
logging.warning("hadoop requires at least 1024MB (%sMB given)" % str(mem))
mem = 1024
changes['MEM'] = mem
changes['CMEM'] = max(mem / 8, 512)
changes['RMEM'] = 2 * changes['CMEM']
changes['ROPTS'] = '-Xmx' + str(int(0.8 * changes['RMEM'])) + 'm'
cores = self.system.get_num_cores() / 2
if cores < 1:
cores = 1
changes['CORES'] = cores
# Generate the staging table. This differs depending on whether
# we need to be container specific or not.
if container:
changes['DATA_STAGING'] = '/service/data/%s/staging' % container['host_name']
else:
changes['DATA_STAGING'] = '/service/data/staging'
for line in yarn_in_file:
s = Template(line).substitute(changes)
yarn_out_file.write(s)
yarn_in_file.close()
yarn_out_file.close()
def _generate_log4j(self, new_config_dir):
in_file = open(self.template_dir + '/log4j.properties', 'r')
out_file = open(new_config_dir + '/log4j.properties', 'w+')
for line in in_file:
out_file.write(line)
in_file.close()
out_file.close()
def _generate_yarn_env(self, yarn_master, new_config_dir):
"""
Generate the yarn-env configuration.
"""
yarn_in_file = open(self.template_dir + '/yarn-env.sh.template', 'r')
yarn_out_file = open(new_config_dir + '/yarn-env.sh', 'w+')
for line in yarn_in_file:
yarn_out_file.write(line)
yarn_in_file.close()
yarn_out_file.close()
def _generate_mapred_env(self, new_config_dir):
"""
Generate the yarn-env configuration.
"""
in_file = open(self.template_dir + '/mapred-env.sh', 'r')
out_file = open(new_config_dir + '/mapred-env.sh', 'w+')
for line in in_file:
out_file.write(line)
in_file.close()
out_file.close()
def _generate_mapred_site(self, yarn_master, config, containers, new_config_dir, container=None):
"""
Generate the mapred-site configuration.
"""
mapred_in_file = open(self.template_dir + '/mapred-site.xml.template', 'r')
mapred_out_file = open(new_config_dir + '/mapred-site.xml', 'w+')
changes = {"HISTORY_SERVER":yarn_master['data_ip']}
# Get memory information.
mem = self.system.get_total_memory()
if mem < 1024:
mem = 1024
changes['MMEM'] = max(mem / 8, 512)
changes['RMEM'] = 2 * changes['MMEM']
changes['MOPTS'] = '-Xmx' + str(int(0.8 * changes['MMEM'])) + 'm'
changes['ROPTS'] = '-Xmx' + str(int(0.8 * changes['RMEM'])) + 'm'
# These are the mapred variables.
cores = self.system.get_num_cores()
if cores < 1:
cores = 1
# changes['NODE_REDUCES'] = mem / ( len(containers) - 2 ) / 2
changes['NODE_REDUCES'] = cores
changes['NODE_MAPS'] = changes['NODE_REDUCES'] * 4
changes['JOB_MAPS'] = changes['NODE_MAPS'] * ( len(containers) - 2 )
changes['JOB_REDUCES'] = changes['NODE_REDUCES'] * ( len(containers) - 2 )
# Generate the temp area. This differs depending on whether
# we need to be container specific or not.
if container:
changes['DATA_TMP'] = '/service/data/%s/tmp' % container['host_name']
else:
changes['DATA_TMP'] = '/service/data/tmp'
for line in mapred_in_file:
s = Template(line).substitute(changes)
mapred_out_file.write(s)
mapred_in_file.close()
mapred_out_file.close()
def _apply_hive_metastore(self, config, containers):
"""
Apply the Hive metastore configuration
"""
return self.hive_ms.apply(config, containers)
def _apply_hive_client(self, config, containers):
"""
Apply the Hive client configuration
"""
return self.hive_client.apply(config, containers)
def _apply_hadoop(self, config, containers):
"""
Apply the Hadoop configuration
"""
entry_point = { 'type' : 'hadoop' }
# Pick out the various master nodes. The Hadoop configuration assumes
# that the first two containers are used for metadata purposes.
yarn_master = containers[0]
hdfs_master = containers[1]
# Remember the entry points
entry_point['yarn'] = str(yarn_master['data_ip'])
entry_point['hdfs'] = str(hdfs_master['data_ip'])
entry_point['instances'] = []
# Create a new configuration directory, and place
# into the template directory.
config_dirs = []
for c in containers:
new_config_dir = "/tmp/" + self._generate_config_dir(config.uuid, c)
try:
sh.mkdir('-p', new_config_dir)
except:
sys.stderr.write('could not create config dir ' + new_config_dir)
# Only add the container to the instances list once.
entry_point['instances'].append([c['data_ip'], c['host_name']])
# Generate some mapred-site config
self._generate_mapred_site(yarn_master, config, containers, new_config_dir)
self._generate_mapred_env(new_config_dir)
# Now generate the yarn config files
self._generate_yarn_site(yarn_master, new_config_dir)
self._generate_yarn_env(yarn_master, new_config_dir)
# Now generate the core config
self._generate_core_site(hdfs_master, new_config_dir)
# Now generate the HDFS config
self._generate_hdfs_site(config, hdfs_master, new_config_dir)
# Now generate the HDFS config
self._generate_httpfs_site(config, new_config_dir)
# Generate the log4j config
self._generate_log4j(new_config_dir)
config_dirs.append([c['container'],
new_config_dir + '/*',
config.config_directory])
return config_dirs, entry_point
def _find_hadoop_storage(self, containers):
"""
Find a Hadoop compatible storage entry.
"""
for c in containers:
for s in c['storage']:
if s['type'] == 'gluster' or s['type'] == 'hadoop':
return s
def _apply_yarn(self, config, containers):
"""
Apply the YARN-only configuration
"""
entry_point = { 'type' : 'yarn' }
# Pick out the various master nodes. The Hadoop configuration assumes
# that the first two containers are used for metadata purposes.
yarn_master = containers[0]
entry_point['yarn'] = str(yarn_master['data_ip'])
entry_point['instances'] = []
# Create a new configuration directory, and place
# into the template directory.
config_dirs = []
for c in containers:
new_config_dir = "/tmp/" + self._generate_config_dir(config.uuid, c)
try:
sh.mkdir('-p', new_config_dir)
except:
sys.stderr.write('could not create config dir ' + new_config_dir)
# Slaves file used to figure out who hosts the actual work/data
for server in containers:
entry_point['instances'].append([server['data_ip'], server['host_name']])
# Generate the log4j config
self._generate_log4j(new_config_dir)
# Generate some mapred-site config
self._generate_mapred_site(yarn_master, config, containers, new_config_dir, c)
self._generate_mapred_env(new_config_dir)
# Now generate the yarn config files
self._generate_yarn_site(yarn_master, new_config_dir, c)
self._generate_yarn_env(yarn_master, new_config_dir)
# Now we need to configure additional storage parameters. For example,
# for Gluster, etc.
storage_entry = self._find_hadoop_storage(containers)
entry_point['hdfs_type'] = storage_entry['type']
if storage_entry['type'] == 'gluster':
url = self._apply_gluster(config, storage_entry, new_config_dir, c)
entry_point['gluster_url'] = url
config_dirs.append([c['container'],
new_config_dir + '/*',
config.config_directory])
return config_dirs, entry_point
def _apply_hive(self, config, hadoop_entry, hadoop_dirs, hadoop_containers, hive_containers):
# First configure the metastore service
ms_config = MetaStoreConfig(1)
ms_config.uuid = config.uuid
ms_config.hadoop_dirs = hadoop_dirs
ms_dirs, ms_entry = self._apply_hive_metastore(ms_config, hive_containers)
# Now configure the Hive client. This configuration
# gets applied to the Hadoop containers.
hive_config = HiveClientConfig(1)
hive_config.uuid = config.uuid
hive_config.hadoop_config_dir = config.config_directory
hive_config.metastore = ms_entry['db']
hive_dirs, hive_entry = self._apply_hive_client(hive_config, hadoop_containers)
hive_dirs.extend(ms_dirs)
return hive_dirs, hive_config
def _apply_gluster(self, config, storage_entry, new_config_dir, container):
# We assume that the new configuration directory has already
# been created. In the future, may want to check for this.
self._generate_gluster_core_site(new_config_dir, container)
# The mount URL specifies how to connect to Gluster.
mount_url = "%s:/%s" % (storage_entry['gluster'], storage_entry['volume'])
return mount_url
def apply(self, config, containers):
"""
Apply the configuration to the instances
"""
# First separate the Hadoop and Hive containers.
hadoop_containers = []
hive_containers = []
for c in containers:
if c['type'] == 'hadoop' or c['type'] == 'yarn':
hadoop_containers.append(c)
elif c['type'] == 'hive':
hive_containers.append(c)
if 'storage' in hadoop_containers[0]:
# This Hadoop instance is being applied to an existing
# storage mechanism. So just configure yarn.
hadoop_dirs, hadoop_entry = self._apply_yarn(config, hadoop_containers)
else:
# This Hadoop instance is being applied for both storage
# and compute. Right now there's no way to just instantiate HDFS.
hadoop_dirs, hadoop_entry = self._apply_hadoop(config, hadoop_containers)
hadoop_entry['hdfs_type'] = 'hadoop'
hive_entry = {}
if len(hive_containers) > 0:
# We also need to configure some Hive services
hive_dirs, hive_config = self._apply_hive(config, hadoop_entry, hadoop_dirs, hadoop_containers, hive_containers)
# Now merge the configuration dirs.
hadoop_dirs.extend(hive_dirs)
hadoop_entry['db'] = hive_config.metastore
return hadoop_dirs, hadoop_entry
class HadoopConfig(object):
data_directory = '/service/data/main'
log_directory = '/service/data/logs'
tmp_directory = '/service/data/tmp'
config_directory = '/service/conf/hadoop'
YARN_SCHEDULER = '8030'
YARN_TRACKER = '8031'
YARN_IPC = '8032'
YARN_ADMIN = '8033'
YARN_LOCALIZER = '8040'
YARN_RESOURCE = '8041'
YARN_NODE = '8042'
YARN_HTTP = '8088'
YARN_HTTPS = '8090'
# Not sure this is necessary?
YARN_TRACKER = '8025'
YARN_JOB_HISTORY = '10020'
YARN_JOB_HISTORY_HTTP = '19888'
HDFS_MASTER = '9000'
HDFS_HTTP = '50070'
HDFS_TRANSFER = '50010'
HDFS_IPC = '50020'
DATA_HTTP = '50075'
SECOND_HTTP = '50090'
YARN_RPC_PORTS = '50100-50200'
HIVE_META = '9083'
HIVE_SERVER = '10000'
def __init__(self, num):
self.num = num
self.data_directory = HadoopConfig.data_directory
self.log_directory = HadoopConfig.log_directory
self.tmp_directory = HadoopConfig.tmp_directory
self.config_directory = HadoopConfig.config_directory
self.system_info = None
|
jhorey/ferry
|
ferry/config/hadoop/hadoopconfig.py
|
Python
|
apache-2.0
| 21,276
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-05-22 10:36
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('server', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='question',
name='user_input',
field=models.TextField(default=''),
preserve_default=False,
),
migrations.AlterField(
model_name='question',
name='answer',
field=models.TextField(blank=True, help_text='Answer', null=True),
),
migrations.AlterField(
model_name='question',
name='question',
field=models.TextField(blank=True, help_text='Text that show as question', null=True),
),
]
|
jgsogo/neutron
|
webapp/server/migrations/0002_auto_20160522_1236.py
|
Python
|
gpl-2.0
| 863
|
# let setuptools to the monkeypatching for wheel
import setuptools
import distutils.core
import os
from platform import system
import shutil
from distutils.core import Extension
from distutils.command.install_lib import install_lib
from distutils.command.build_ext import build_ext
from os.path import join, isdir, exists, abspath, dirname
curdir = abspath(dirname(__file__))
class MyInstall(install_lib):
def install(self):
if system() == 'Windows':
library = 'build/python/pycapnqml.pyd'
else:
library = 'build/python/pycapnqml.so'
install_dir = os.path.abspath(self.install_dir)
if not os.path.exists(install_dir):
os.makedirs(install_dir)
shutil.copy(library, install_dir)
target = join(install_dir,'capnqml')
if not exists(target):
shutil.copytree('bindings/python/capnqml',target)
return [join(install_dir, library.split(os.sep)[-1])]
class MyBuildExtension(build_ext):
def run(self):
pass
def recursive_list(dirpath, prepend_path):
data_list = []
fs = []
for f in os.listdir(dirpath):
fpath = join(dirpath, f)
if isdir(fpath):
data_list.extend(recursive_list(fpath, join(prepend_path, f)))
else:
fs.append(fpath)
if len(fs) != 0:
data_list.append((prepend_path, fs))
return data_list
def get_datafiles():
module_path = join(distutils.sysconfig.get_python_lib(), 'capnqml')
ret = []
if system() == 'Windows':
curlst = []
ret.append((module_path, [join(curdir, 'build/python/pycapnqml.pyd')]))
for f in os.listdir(join(curdir, 'build/qtdlls')):
if f.endswith('dll'):
curlst.append(join(curdir, 'build/qtdlls', f))
ret.append(('', curlst))
curlst = recursive_list(join(curdir, 'build/qtdlls/qml'), 'qml')
ret.extend(curlst)
curlst = []
for f in os.listdir(join(curdir, 'build/qtdlls/platforms')):
if f.endswith('dll'):
curlst.append(join(curdir, 'build/qtdlls/platforms', f))
ret.append(('platforms', curlst))
else:
files = [
join(curdir, 'src/schema/json.capnp'),
join(curdir, 'src/schema/error.capnp'),
join(curdir, 'src/schema/message.capnp'),
join(curdir, 'build/capnqml-zmqlauncher')
]
ret.append((module_path, files))
return ret
module = Extension('pycapnqml', sources=[])
with open(os.path.join(curdir, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
distutils.core.setup(
name='capnqml',
description='Python interface to capnqml - A library to link any programming language with Qml over capnproto',
long_description=long_description,
url='https://github.com/raffber/capnqml',
author='Raphael Bernhard',
author_email='beraphae@gmail.com',
license='LGPL',
ext_modules=[module],
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Developers',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: LGPL License',
'Programming Language :: Python :: 3.4',
],
cmdclass={
'build_ext': MyBuildExtension,
'install_lib': MyInstall
},
keywords='PyQt5 capnproto',
package_dir = {'': 'bindings/python'},
packages=['capnqml'],
data_files=get_datafiles()
)
|
raffber/capnqml
|
setup.py
|
Python
|
mpl-2.0
| 3,678
|
import Base
import sys
import industrial_lib
time_of_day='_day'
(landing_platform,bar,weap) = industrial_lib.MakeCorisc (time_of_day,'bases/bartender_union.py')
|
vinni-au/vega-strike
|
data/bases/university_ISO_sunset.py
|
Python
|
gpl-2.0
| 162
|
from pybeans.const import UNDEFINED
from pybeans.exceptions import EncodingException
from pybeans.nodes import *
class SchemaEncoder(object):
visitors = None
@classmethod
def create_instance(cls):
instance = cls()
instance.visitors = {
StrNode: instance._visit_value,
UnicodeNode: instance._visit_value,
IntNode: instance._visit_value,
BoolNode: instance._visit_value,
FloatNode: instance._visit_value,
DecimalNode: instance._visit_value,
TupleNode: instance._visit_tuple,
ListNode: instance._visit_list,
DictNode: instance._visit_dict,
BeanNode: instance._visit_bean,
}
return instance
def __call__(self, node, value, data):
return self._visit_node(node, value, data)
def _visit_bean(self, node, value, data):
attr_node_list = node.bean.__pybeansschema__.get_nodes()
for attr, node in attr_node_list:
data[attr] = self._visit_node(node, getattr(value, attr, node.default), data.get(attr, {}), attr)
return data
@staticmethod
def _visit_value(node, value, data):
if node.encode:
value = node.encode(value)
return value
def _visit_tuple(self, node, value, data):
assert not data
ret = []
for item, item_node in zip(value, node.node_list):
ret.append(self._visit_node(item_node, item, {}))
return self._visit_value(node, value, data)
def _visit_list(self, node, value, data):
if not data:
data = list()
for item in value:
data.append(self._visit_node(node.node, item, {}))
return data
def _visit_dict(self, node, value, data):
for key, item in value.iteritems():
key = self._visit_node(node.key_node, key, {})
data[key] = self._visit_node(node.value_node, item, {})
return data
def _visit_node(self, node, value, data, attr=None):
if value is UNDEFINED:
raise EncodingException('{0} is not defined'.format(attr))
if value is None:
return value
if value is NotImplemented:
value = node.default
visitor = self._get_visitor(node)
return visitor(node, value, data)
def _get_visitor(self, node):
return self.visitors[type(node)]
|
cordis/pybeans
|
pybeans/encoder.py
|
Python
|
mit
| 2,421
|
#!/usr/bin/env python
#=========================================================================
# This is OPEN SOURCE SOFTWARE governed by the Gnu General Public
# License (GPL) version 3, as described at www.opensource.org.
# Copyright (C)2017 William H. Majoros (martiandna@gmail.com).
#=========================================================================
from __future__ import (absolute_import, division, print_function,
unicode_literals, generators, nested_scopes, with_statement)
from builtins import (bytes, dict, int, list, object, range, str, ascii,
chr, hex, input, next, oct, open, pow, round, super, filter, map, zip)
# The above imports should allow this program to run in both Python 2 and
# Python 3. You might need to update your version of module "future".
import sys
import os
import ProgramName
from SlurmWriter import SlurmWriter
from Rex import Rex
rex=Rex()
ROOT="/home/bmajoros/PopSTARR/graham"
MEM=50000
NICE=500
jobName="TRIM"
maxParallel=1000
THREADS=31
TRIMMOMATIC="java -jar /data/reddylab/software/Trimmomatic-0.33/Trimmomatic-0.33/trimmomatic-0.33.jar PE"
#=========================================================================
# main()
#=========================================================================
if(len(sys.argv)!=5):
exit(ProgramName.get()+" <adapters.fasta> <fastq-in> <fastq-out> <full-path-to-slurms>\n")
(adaptersFasta,fastqIn,fastqOut,slurmDir)=sys.argv[1:]
files=os.listdir(fastqIn)
writer=SlurmWriter()
for file in files:
if(not rex.find("(.*[_-])R1([_-].*)\.fastq.gz",file)): continue
file1=file
file2=rex[1]+"R2"+rex[2]+".fastq.gz"
cmd=TRIMMOMATIC+" -threads "+str(THREADS)+" -phred33 "+\
fastqIn+"/"+file1+" "+fastqIn+"/"+file2+" "+\
fastqOut+"/"+rex[1]+"_FWD_paired.fq.gz "+\
fastqOut+"/"+rex[1]+"_FWD_unpaired.fq.gz "+\
fastqOut+"/"+rex[1]+"_REV_paired.fq.gz "+\
fastqOut+"/"+rex[1]+"_REV_unpaired.fq.gz "+\
"ILLUMINACLIP:"+adaptersFasta+\
":2:30:10:8:TRUE HEADCROP:1 LEADING:30 TRAILING:30 "+\
"SLIDINGWINDOW:4:15 MINLEN:36"
writer.addCommand("cd "+ROOT+"\n"+cmd)
writer.nice(NICE) # turns on "nice" (sets it to 100 by default)
writer.mem(MEM)
writer.threads(THREADS)
writer.setQueue("new,all")
writer.writeArrayScript(slurmDir,jobName,maxParallel,
"#SBATCH --exclude=x2-01-1,x2-01-2,x2-01-3,x2-01-4,x2-02-1,x2-02-2,x2-02-3,x2-02-4,x2-03-1 ")
|
ReddyLab/POPSTARR2
|
make-trim-slurms.py
|
Python
|
gpl-3.0
| 2,439
|
#
# Copyright (c) 2008--2011 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
#
import os
import sys
from spacewalk.common.rhnLog import log_debug, log_error
class Loader:
# Class that saves the state of imported objects
_imports = {}
def load(self, dir, interface_signature='rpcClasses'):
# The key we use for caching
root_dir = "/usr/share/rhn"
key = (dir, root_dir, interface_signature)
if self._imports.has_key(key):
return self._imports[key]
dirname = "%s/%s" % (root_dir, dir)
# We need to import things
if root_dir is not None and root_dir not in sys.path:
sys.path.append(root_dir)
fromcomps = dir.split('/')
_imports = {}
# Keep track of the modules we've already tried to load, to avoid loading
# them twice
modules = []
# Load each module (that is not internal - i.e. doesn't start with _)
for module in os.listdir(dirname):
log_debug(5, "Attempting to load module %s from %s %s" % (
module, '.'.join(fromcomps), dirname))
if module[0] in ('_', '.'):
# We consider it 'internal' and we don't load it
log_debug(6, "Ignoring module %s" % module)
continue
# Importing files or directories with . in them is broken, so keep
# only the first part
module = module.split('.', 1)[0]
if module in modules:
log_debug(6, "Already tried to load Module %s" % (module, ))
continue
# Add it to the list, so we don't load it again
modules.append(module)
# We use fromclause to build the full module path
fromclause = '.'.join(fromcomps + [module])
# Try to import the module
try:
m = __import__(fromclause, {}, {}, [module])
except ImportError, e:
log_error("Error importing %s: %s" % (module, e))
log_debug(6, "Details: sys.path: %s" % (sys.path, ))
continue
if not hasattr(m, interface_signature):
# The module does not support our API
log_error("Module %s doesn't support our API" % (module, ))
continue
log_debug(5, "Module %s loaded" % (module, ))
_imports[module] = getattr(m, interface_signature)
self._imports[key] = _imports
return _imports
def load(dir, root_dir = None, interface_signature='rpcClasses'):
"""
Load modules (handlers) beneath the handlers/ tree.
root_dir: which directory to use as a top-level directory
"""
l = Loader()
return l.load(dir, interface_signature=interface_signature)
|
dmacvicar/spacewalk
|
backend/server/rhnImport.py
|
Python
|
gpl-2.0
| 3,346
|
"""
Tests for transformers.py
"""
from mock import MagicMock, patch
from nose.plugins.attrib import attr
from unittest import TestCase
from ..block_structure import BlockStructureModulestoreData
from ..exceptions import TransformerException
from ..transformers import BlockStructureTransformers
from .helpers import (
ChildrenMapTestMixin, MockTransformer, MockFilteringTransformer, mock_registered_transformers
)
@attr('shard_2')
class TestBlockStructureTransformers(ChildrenMapTestMixin, TestCase):
"""
Test class for testing BlockStructureTransformers
"""
class UnregisteredTransformer(MockTransformer):
"""
Mock transformer that is not registered.
"""
pass
def setUp(self):
super(TestBlockStructureTransformers, self).setUp()
self.transformers = BlockStructureTransformers(usage_info=MagicMock())
self.registered_transformers = [MockTransformer(), MockFilteringTransformer()]
def add_mock_transformer(self):
"""
Adds the registered transformers to the self.transformers collection.
"""
with mock_registered_transformers(self.registered_transformers):
self.transformers += self.registered_transformers
def test_add_registered(self):
self.add_mock_transformer()
self.assertIn(
self.registered_transformers[0],
self.transformers._transformers['no_filter'] # pylint: disable=protected-access
)
self.assertIn(
self.registered_transformers[1],
self.transformers._transformers['supports_filter'] # pylint: disable=protected-access
)
def test_add_unregistered(self):
with self.assertRaises(TransformerException):
self.transformers += [self.UnregisteredTransformer()]
self.assertEquals(self.transformers._transformers['no_filter'], []) # pylint: disable=protected-access
self.assertEquals(self.transformers._transformers['supports_filter'], []) # pylint: disable=protected-access
def test_collect(self):
with mock_registered_transformers(self.registered_transformers):
with patch(
'openedx.core.lib.block_structure.tests.helpers.MockTransformer.collect'
) as mock_collect_call:
self.transformers.collect(block_structure=MagicMock())
self.assertTrue(mock_collect_call.called)
def test_transform(self):
self.add_mock_transformer()
with patch(
'openedx.core.lib.block_structure.tests.helpers.MockTransformer.transform'
) as mock_transform_call:
self.transformers.transform(block_structure=MagicMock())
self.assertTrue(mock_transform_call.called)
def test_is_collected_outdated(self):
block_structure = self.create_block_structure(
self.SIMPLE_CHILDREN_MAP,
BlockStructureModulestoreData
)
with mock_registered_transformers(self.registered_transformers):
self.assertTrue(self.transformers.is_collected_outdated(block_structure))
self.transformers.collect(block_structure)
self.assertFalse(self.transformers.is_collected_outdated(block_structure))
|
Learningtribes/edx-platform
|
openedx/core/lib/block_structure/tests/test_transformers.py
|
Python
|
agpl-3.0
| 3,251
|
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class crvserver_binding(base_resource):
""" Binding class showing the resources that can be bound to crvserver_binding.
"""
def __init__(self) :
self._name = ""
self.crvserver_filterpolicy_binding = []
self.crvserver_cmppolicy_binding = []
self.crvserver_lbvserver_binding = []
self.crvserver_policymap_binding = []
self.crvserver_cspolicy_binding = []
self.crvserver_crpolicy_binding = []
@property
def name(self) :
ur"""Name of a cache redirection virtual server about which to display detailed information.<br/>Minimum length = 1.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
ur"""Name of a cache redirection virtual server about which to display detailed information.<br/>Minimum length = 1
"""
try :
self._name = name
except Exception as e:
raise e
@property
def crvserver_policymap_bindings(self) :
ur"""policymap that can be bound to crvserver.
"""
try :
return self._crvserver_policymap_binding
except Exception as e:
raise e
@property
def crvserver_lbvserver_bindings(self) :
ur"""lbvserver that can be bound to crvserver.
"""
try :
return self._crvserver_lbvserver_binding
except Exception as e:
raise e
@property
def crvserver_filterpolicy_bindings(self) :
ur"""filterpolicy that can be bound to crvserver.
"""
try :
return self._crvserver_filterpolicy_binding
except Exception as e:
raise e
@property
def crvserver_cmppolicy_bindings(self) :
ur"""cmppolicy that can be bound to crvserver.
"""
try :
return self._crvserver_cmppolicy_binding
except Exception as e:
raise e
@property
def crvserver_cspolicy_bindings(self) :
ur"""cspolicy that can be bound to crvserver.
"""
try :
return self._crvserver_cspolicy_binding
except Exception as e:
raise e
@property
def crvserver_crpolicy_bindings(self) :
ur"""crpolicy that can be bound to crvserver.
"""
try :
return self._crvserver_crpolicy_binding
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
ur""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(crvserver_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.crvserver_binding
except Exception as e :
raise e
def _get_object_name(self) :
ur""" Returns the value of object identifier argument
"""
try :
if self.name is not None :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def get(self, service, name) :
ur""" Use this API to fetch crvserver_binding resource.
"""
try :
if type(name) is not list :
obj = crvserver_binding()
obj.name = name
response = obj.get_resource(service)
else :
if name and len(name) > 0 :
obj = [crvserver_binding() for _ in range(len(name))]
for i in range(len(name)) :
obj[i].name = name[i];
response[i] = obj[i].get_resource(service)
return response
except Exception as e:
raise e
class crvserver_binding_response(base_response) :
def __init__(self, length=1) :
self.crvserver_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.crvserver_binding = [crvserver_binding() for _ in range(length)]
|
benfinke/ns_python
|
nssrc/com/citrix/netscaler/nitro/resource/config/cr/crvserver_binding.py
|
Python
|
apache-2.0
| 4,757
|
from .core import jackknife
from .parallel import fold
|
obmarg/toolz
|
toolz/sandbox/__init__.py
|
Python
|
bsd-3-clause
| 55
|
# -*- coding: utf-8 -*-
def toflat(obj, ns=""):
res = {}
for key in obj:
if type(obj[key]) is dict:
subdict = toflat(obj[key], "%s%s" % (ns,key[0].upper()+key[1:]))
for k in subdict:
res[k[0].upper()+k[1:]] = subdict[k]
else:
res["%s%s" % (ns, key[0].upper()+key[1:])] = str(obj[key])
return res
def todict(obj):
res = {}
for key in obj:
if type(obj[key]) is dict:
subdict = todict(obj[key])
for k in subdict:
res[k] = subdict[k]
else:
res[key] = obj[key]
return res
|
fraoustin/flask-monitor
|
flask_monitor/util.py
|
Python
|
gpl-2.0
| 634
|
import sys
import unittest
import threading
import os
from nose.tools import eq_
from pydev_imports import StringIO, SimpleXMLRPCServer
from pydev_localhost import get_localhost
from pydev_console_utils import StdIn
import socket
# make it as if we were executing from the directory above this one
sys.argv[0] = os.path.dirname(sys.argv[0])
# twice the dirname to get the previous level from this file.
sys.path.insert(1, os.path.join(os.path.dirname(sys.argv[0])))
# PyDevFrontEnd depends on singleton in IPython, so you
# can't make multiple versions. So we reuse front_end for
# all the tests
orig_stdout = sys.stdout
orig_stderr = sys.stderr
stdout = sys.stdout = StringIO()
stderr = sys.stderr = StringIO()
from pydev_ipython_console_011 import PyDevFrontEnd
s = socket.socket()
s.bind(('', 0))
client_port = s.getsockname()[1]
s.close()
front_end = PyDevFrontEnd(get_localhost(), client_port)
def addExec(code, expected_more=False):
more = front_end.addExec(code)
eq_(expected_more, more)
class TestBase(unittest.TestCase):
def setUp(self):
front_end.input_splitter.reset()
stdout.truncate(0)
stdout.seek(0)
stderr.truncate(0)
stderr.seek(0)
def tearDown(self):
pass
class TestPyDevFrontEnd(TestBase):
def testAddExec_1(self):
addExec('if True:', True)
def testAddExec_2(self):
addExec('if True:\n testAddExec_a = 10\n', True)
def testAddExec_3(self):
assert 'testAddExec_a' not in front_end.getNamespace()
addExec('if True:\n testAddExec_a = 10\n\n')
assert 'testAddExec_a' in front_end.getNamespace()
eq_(front_end.getNamespace()['testAddExec_a'], 10)
def testGetNamespace(self):
assert 'testGetNamespace_a' not in front_end.getNamespace()
addExec('testGetNamespace_a = 10')
assert 'testGetNamespace_a' in front_end.getNamespace()
eq_(front_end.getNamespace()['testGetNamespace_a'], 10)
def testComplete(self):
unused_text, matches = front_end.complete('%')
assert len(matches) > 1, 'at least one magic should appear in completions'
def testCompleteDoesNotDoPythonMatches(self):
# Test that IPython's completions do not do the things that
# PyDev's completions will handle
addExec('testComplete_a = 5')
addExec('testComplete_b = 10')
addExec('testComplete_c = 15')
unused_text, matches = front_end.complete('testComplete_')
assert len(matches) == 0
def testGetCompletions_1(self):
# Test the merged completions include the standard completions
addExec('testComplete_a = 5')
addExec('testComplete_b = 10')
addExec('testComplete_c = 15')
res = front_end.getCompletions('testComplete_', 'testComplete_')
matches = [f[0] for f in res]
assert len(matches) == 3
eq_(set(['testComplete_a', 'testComplete_b', 'testComplete_c']), set(matches))
def testGetCompletions_2(self):
# Test that we get IPython completions in results
# we do this by checking kw completion which PyDev does
# not do by default
addExec('def ccc(ABC=123): pass')
res = front_end.getCompletions('ccc(', '')
matches = [f[0] for f in res]
assert 'ABC=' in matches
def testGetCompletions_3(self):
# Test that magics return IPYTHON magic as type
res = front_end.getCompletions('%cd', '%cd')
assert len(res) == 1
eq_(res[0][3], '12') # '12' == IToken.TYPE_IPYTHON_MAGIC
assert len(res[0][1]) > 100, 'docstring for %cd should be a reasonably long string'
class TestRunningCode(TestBase):
def testPrint(self):
addExec('print("output")')
eq_(stdout.getvalue(), 'output\n')
def testQuestionMark_1(self):
addExec('?')
assert len(stdout.getvalue()) > 1000, 'IPython help should be pretty big'
def testQuestionMark_2(self):
addExec('int?')
assert stdout.getvalue().find('Convert') != -1
def testGui(self):
from pydev_ipython.inputhook import get_inputhook, set_stdin_file
set_stdin_file(sys.stdin)
assert get_inputhook() is None
addExec('%gui tk')
# we can't test the GUI works here because we aren't connected to XML-RPC so
# nowhere for hook to run
assert get_inputhook() is not None
addExec('%gui none')
assert get_inputhook() is None
def testHistory(self):
''' Make sure commands are added to IPython's history '''
addExec('a=1')
addExec('b=2')
_ih = front_end.getNamespace()['_ih']
eq_(_ih[-1], 'b=2')
eq_(_ih[-2], 'a=1')
addExec('history')
hist = stdout.getvalue().split('\n')
eq_(hist[-1], '')
eq_(hist[-2], 'history')
eq_(hist[-3], 'b=2')
eq_(hist[-4], 'a=1')
def testEdit(self):
''' Make sure we can issue an edit command '''
called_RequestInput = [False]
called_OpenEditor = [False]
def startClientThread(client_port):
class ClientThread(threading.Thread):
def __init__(self, client_port):
threading.Thread.__init__(self)
self.client_port = client_port
def run(self):
class HandleRequestInput:
def RequestInput(self):
called_RequestInput[0] = True
return '\n'
def OpenEditor(self, name, line):
called_OpenEditor[0] = (name, line)
return True
handle_request_input = HandleRequestInput()
import pydev_localhost
client_server = SimpleXMLRPCServer((pydev_localhost.get_localhost(), self.client_port), logRequests=False)
client_server.register_function(handle_request_input.RequestInput)
client_server.register_function(handle_request_input.OpenEditor)
client_server.serve_forever()
client_thread = ClientThread(client_port)
client_thread.setDaemon(True)
client_thread.start()
return client_thread
startClientThread(client_port)
orig_stdin = sys.stdin
sys.stdin = StdIn(self, get_localhost(), client_port)
try:
filename = 'made_up_file.py'
addExec('%edit ' + filename)
eq_(called_OpenEditor[0], (os.path.abspath(filename), 0))
assert called_RequestInput[0], "Make sure the 'wait' parameter has been respected"
finally:
sys.stdin = orig_stdin
if __name__ == '__main__':
#Just doing: unittest.main() was not working for me when run directly (not sure why)
#And doing it the way below the test with the import: from pydev_ipython.inputhook import get_inputhook, set_stdin_file
#is failing (but if I do a Ctrl+F9 in PyDev to run it, it works properly, so, I'm a bit puzzled here).
unittest.TextTestRunner(verbosity=1).run(unittest.makeSuite(TestRunningCode))
unittest.TextTestRunner(verbosity=1).run(unittest.makeSuite(TestPyDevFrontEnd))
|
AMOboxTV/AMOBox.LegoBuild
|
script.module.pydevd/lib/tests/test_pydev_ipython_011.py
|
Python
|
gpl-2.0
| 7,272
|
import numpy as nm
from sfepy.terms.terms import Term, terms
from sfepy.base.base import get_default
def grad_as_vector(grad):
grad = grad.transpose((0, 1, 3, 2))
sh = grad.shape
return grad.reshape((sh[0], sh[1], sh[2] * sh[3], 1))
class AdjDivGradTerm(Term):
r"""
Gateaux differential of :math:`\Psi(\ul{u}) = \int_{\Omega} \nu\
\nabla \ul{v} : \nabla \ul{u}` w.r.t. :math:`\ul{u}` in the direction
:math:`\ul{v}` or adjoint term to `dw_div_grad`.
:Definition:
.. math::
w \delta_{u} \Psi(\ul{u}) \circ \ul{v}
:Arguments:
- material_1 : :math:`w` (weight)
- material_2 : :math:`\nu` (viscosity)
- virtual : :math:`\ul{v}`
- state : :math:`\ul{u}`
"""
name = 'dw_adj_div_grad'
arg_types = ('material_1', 'material_2', 'virtual', 'parameter')
arg_shapes = {'material_1' : '1, 1', 'material_2' : '1, 1',
'virtual' : ('D', None), 'parameter' : 'D'}
function = staticmethod(terms.term_ns_asm_div_grad)
def get_fargs(self, mat1, mat2, virtual, state,
mode=None, term_mode=None, diff_var=None, **kwargs):
vg, _ = self.get_mapping(state)
if diff_var is None:
grad = grad_as_vector(self.get(state, 'grad'))
fmode = 0
else:
grad = nm.array([0], ndmin=4, dtype=nm.float64)
fmode = 1
return grad, mat1 * mat2, vg, fmode
class AdjConvect1Term(Term):
r"""
The first adjoint term to nonlinear convective term `dw_convect`.
:Definition:
.. math::
\int_{\Omega} ((\ul{v} \cdot \nabla) \ul{u}) \cdot \ul{w}
:Arguments:
- virtual : :math:`\ul{v}`
- state : :math:`\ul{w}`
- parameter : :math:`\ul{u}`
"""
name = 'dw_adj_convect1'
arg_types = ('virtual', 'state', 'parameter' )
arg_shapes = {'virtual' : ('D', 'state'), 'state' : 'D', 'parameter' : 'D'}
function = staticmethod(terms.dw_adj_convect1)
def get_fargs(self, virtual, state, parameter,
mode=None, term_mode=None, diff_var=None, **kwargs):
vg, _ = self.get_mapping(state)
val_w = self.get(state, 'val')
grad_u = self.get(parameter, 'grad') # No transposition here!
fmode = diff_var is not None
return val_w, grad_u, vg, fmode
class AdjConvect2Term(Term):
r"""
The second adjoint term to nonlinear convective term `dw_convect`.
:Definition:
.. math::
\int_{\Omega} ((\ul{u} \cdot \nabla) \ul{v}) \cdot \ul{w}
:Arguments:
- virtual : :math:`\ul{v}`
- state : :math:`\ul{w}`
- parameter : :math:`\ul{u}`
"""
name = 'dw_adj_convect2'
arg_types = ('virtual', 'state', 'parameter' )
arg_shapes = {'virtual' : ('D', 'state'), 'state' : 'D', 'parameter' : 'D'}
function = staticmethod(terms.dw_adj_convect2)
def get_fargs(self, virtual, state, parameter,
mode=None, term_mode=None, diff_var=None, **kwargs):
vg, _ = self.get_mapping(state)
val_w = self.get(state, 'val')
val_u = self.get(parameter, 'val')
fmode = diff_var is not None
return val_w, val_u, vg, fmode
class SUPGCAdjStabilizationTerm(Term):
r"""
Adjoint term to SUPG stabilization term `dw_st_supg_c`.
:Definition:
.. math::
\sum_{K \in \Ical_h}\int_{T_K} \delta_K\ [ ((\ul{v} \cdot \nabla)
\ul{u}) ((\ul{u} \cdot \nabla) \ul{w}) + ((\ul{u} \cdot \nabla)
\ul{u}) ((\ul{v} \cdot \nabla) \ul{w}) ]
:Arguments:
- material : :math:`\delta_K`
- virtual : :math:`\ul{v}`
- state : :math:`\ul{w}`
- parameter : :math:`\ul{u}`
"""
name = 'dw_st_adj_supg_c'
arg_types = ('material', 'virtual', 'parameter', 'state')
arg_shapes = {'material' : '1, 1', 'virtual' : ('D', 'state'),
'state' : 'D', 'parameter' : 'D'}
function = staticmethod(terms.dw_st_adj_supg_c)
def get_fargs(self, mat, virtual, state, parameter,
mode=None, term_mode=None, diff_var=None, **kwargs):
ap, vg = self.get_approximation(state)
val_u = self.get(parameter, 'val')
grad_u = self.get(parameter, 'grad').transpose((0, 1, 3, 2)).copy()
conn = ap.get_connectivity(self.region, self.integration)
fmode = diff_var is not None
return state(), val_u, grad_u, mat, vg, conn, fmode
class SUPGPAdj1StabilizationTerm(Term):
r"""
The first adjoint term to SUPG stabilization term `dw_st_supg_p`.
:Definition:
.. math::
\sum_{K \in \Ical_h}\int_{T_K} \delta_K\ \nabla p (\ul{v} \cdot
\nabla \ul{w})
:Arguments:
- material : :math:`\delta_K`
- virtual : :math:`\ul{v}`
- state : :math:`\ul{w}`
- parameter : :math:`p`
"""
name = 'dw_st_adj1_supg_p'
arg_types = ('material', 'virtual', 'state', 'parameter')
arg_shapes = {'material' : '1, 1', 'virtual' : ('D', 'state'),
'state' : 'D', 'parameter' : 1}
function = staticmethod(terms.dw_st_adj1_supg_p)
def get_fargs(self, mat, virtual, state, parameter,
mode=None, term_mode=None, diff_var=None, **kwargs):
ap_w, vg_w = self.get_approximation(state)
grad_p = self.get(parameter, 'grad')
conn_w = ap_w.get_connectivity(self.region, self.integration)
fmode = diff_var is not None
return state(), grad_p, mat, vg_w, conn_w, fmode
class SUPGPAdj2StabilizationTerm(Term):
r"""
The second adjoint term to SUPG stabilization term `dw_st_supg_p`
as well as adjoint term to PSPG stabilization term `dw_st_pspg_c`.
:Definition:
.. math::
\sum_{K \in \Ical_h}\int_{T_K} \tau_K\ \nabla r (\ul{v} \cdot \nabla
\ul{u})
:Arguments:
- material : :math:`\tau_K`
- virtual : :math:`\ul{v}`
- parameter : :math:`\ul{u}`
- state : :math:`r`
"""
name = 'dw_st_adj2_supg_p'
arg_types = ('material', 'virtual', 'parameter', 'state')
arg_shapes = {'material' : '1, 1', 'virtual' : ('D', 'state'),
'state' : 1, 'parameter' : 'D'}
function = staticmethod(terms.dw_st_adj2_supg_p)
def get_fargs(self, mat, virtual, parameter, state,
mode=None, term_mode=None, diff_var=None, **kwargs):
ap_r, vg_r = self.get_approximation(state)
vg_u, _ = self.get_mapping(parameter)
grad_u = self.get(parameter, 'grad').transpose((0, 1, 3, 2)).copy()
conn_r = ap_r.get_connectivity(self.region, self.integration)
fmode = diff_var is not None
return grad_u, state(), mat, vg_u, vg_r, conn_r, fmode
class SDDotVolumeTerm(Term):
r"""
Sensitivity (shape derivative) of dot product of scalars or vectors.
:Definition:
.. math::
\int_{\Omega_D} p q (\nabla \cdot \ul{\Vcal}) \mbox{ , }
\int_{\Omega_D} (\ul{u} \cdot \ul{w}) (\nabla \cdot \ul{\Vcal})
:Arguments:
- parameter_1 : :math:`p` or :math:`\ul{u}`
- parameter_2 : :math:`q` or :math:`\ul{w}`
- parameter_mesh_velocity : :math:`\ul{\Vcal}`
"""
name = 'd_sd_volume_dot'
arg_types = ('parameter_1', 'parameter_2', 'parameter_mesh_velocity')
arg_shapes = {'parameter_1' : 'D', 'parameter_2' : 'D',
'parameter_mesh_velocity' : 'D'}
function = staticmethod(terms.d_sd_volume_dot)
def get_fargs(self, par1, par2, par_mv,
mode=None, term_mode=None, diff_var=None, **kwargs):
vg, _ = self.get_mapping(par1)
val1 = self.get(par1, 'val')
val2 = self.get(par2, 'val')
div_mv = self.get(par_mv, 'div')
return val1, val2, div_mv, vg, get_default(term_mode, 1)
def get_eval_shape(self, par1, par2, par_mv,
mode=None, term_mode=None, diff_var=None, **kwargs):
n_el, n_qp, dim, n_en, n_c = self.get_data_shape(par1)
return (n_el, 1, 1, 1), par1.dtype
class SDDivTerm(Term):
r"""
Sensitivity (shape derivative) of Stokes term `dw_stokes` in 'div' mode.
Supports the following term modes: 1 (sensitivity) or 0 (original term
value).
:Definition:
.. math::
\int_{\Omega_D} p [ (\nabla \cdot \ul{w}) (\nabla \cdot \ul{\Vcal})
- \pdiff{\Vcal_k}{x_i} \pdiff{w_i}{x_k} ]
:Arguments:
- parameter_u : :math:`\ul{u}`
- parameter_p : :math:`p`
- parameter_mesh_velocity : :math:`\ul{\Vcal}`
"""
name = 'd_sd_div'
arg_types = ('parameter_u', 'parameter_p', 'parameter_mesh_velocity')
arg_shapes = {'parameter_u' : 'D', 'parameter_p' : 1,
'parameter_mesh_velocity' : 'D'}
function = staticmethod(terms.d_sd_div)
def get_fargs(self, par_u, par_p, par_mv,
mode=None, term_mode=None, diff_var=None, **kwargs):
vg, _ = self.get_mapping(par_u)
div_u = self.get(par_u, 'div')
grad_u = grad_as_vector(self.get(par_u, 'grad'))
val_p = self.get(par_p, 'val')
div_mv = self.get(par_mv, 'div')
grad_mv = grad_as_vector(self.get(par_mv, 'grad'))
return (div_u, grad_u, val_p, div_mv, grad_mv, vg,
get_default(term_mode, 1))
def get_eval_shape(self, par_u, par_p, par_mv,
mode=None, term_mode=None, diff_var=None, **kwargs):
n_el, n_qp, dim, n_en, n_c = self.get_data_shape(par_u)
return (n_el, 1, 1, 1), par_u.dtype
class SDDivGradTerm(Term):
r"""
Sensitivity (shape derivative) of diffusion term `dw_div_grad`.
Supports the following term modes: 1 (sensitivity) or 0 (original term
value).
:Definition:
.. math::
w \nu \int_{\Omega_D} [ \pdiff{u_i}{x_k} \pdiff{w_i}{x_k}
(\nabla \cdot \ul{\Vcal})
- \pdiff{\Vcal_j}{x_k} \pdiff{u_i}{x_j} \pdiff{w_i}{x_k}
- \pdiff{u_i}{x_k} \pdiff{\Vcal_l}{x_k} \pdiff{w_i}{x_k} ]
:Arguments:
- material_1 : :math:`w` (weight)
- material_2 : :math:`\nu` (viscosity)
- parameter_u : :math:`\ul{u}`
- parameter_w : :math:`\ul{w}`
- parameter_mesh_velocity : :math:`\ul{\Vcal}`
"""
name = 'd_sd_div_grad'
arg_types = ('material_1', 'material_2', 'parameter_u', 'parameter_w',
'parameter_mesh_velocity')
arg_shapes = {'material_1' : '1, 1', 'material_2' : '1, 1',
'parameter_u' : 'D', 'parameter_w' : 'D',
'parameter_mesh_velocity' : 'D'}
function = staticmethod(terms.d_sd_div_grad)
def get_fargs(self, mat1, mat2, par_u, par_w, par_mv,
mode=None, term_mode=None, diff_var=None, **kwargs):
vg, _ = self.get_mapping(par_u)
grad_u = grad_as_vector(self.get(par_u, 'grad'))
grad_w = grad_as_vector(self.get(par_w, 'grad'))
div_mv = self.get(par_mv, 'div')
grad_mv = grad_as_vector(self.get(par_mv, 'grad'))
return (grad_u, grad_w, div_mv, grad_mv, mat1 * mat2, vg,
get_default(term_mode, 1))
def get_eval_shape(self, mat1, mat2, par_u, par_w, par_mv,
mode=None, term_mode=None, diff_var=None, **kwargs):
n_el, n_qp, dim, n_en, n_c = self.get_data_shape(par_u)
return (n_el, 1, 1, 1), par_u.dtype
class SDConvectTerm(Term):
r"""
Sensitivity (shape derivative) of convective term `dw_convect`.
Supports the following term modes: 1 (sensitivity) or 0 (original term
value).
:Definition:
.. math::
\int_{\Omega_D} [ u_k \pdiff{u_i}{x_k} w_i (\nabla \cdot \Vcal)
- u_k \pdiff{\Vcal_j}{x_k} \pdiff{u_i}{x_j} w_i ]
:Arguments:
- parameter_u : :math:`\ul{u}`
- parameter_w : :math:`\ul{w}`
- parameter_mesh_velocity : :math:`\ul{\Vcal}`
"""
name = 'd_sd_convect'
arg_types = ('parameter_u', 'parameter_w', 'parameter_mesh_velocity')
arg_shapes = {'parameter_u' : 'D', 'parameter_w' : 'D',
'parameter_mesh_velocity' : 'D'}
function = staticmethod(terms.d_sd_convect)
def get_fargs(self, par_u, par_w, par_mv,
mode=None, term_mode=None, diff_var=None, **kwargs):
vg, _ = self.get_mapping(par_u)
val_u = self.get(par_u, 'val')
grad_u = grad_as_vector(self.get(par_u, 'grad'))
val_w = self.get(par_w, 'val')
div_mv = self.get(par_mv, 'div')
grad_mv = grad_as_vector(self.get(par_mv, 'grad'))
return (val_u, grad_u, val_w, div_mv, grad_mv, vg,
get_default(term_mode, 1))
def get_eval_shape(self, par_u, par_w, par_mv,
mode=None, term_mode=None, diff_var=None, **kwargs):
n_el, n_qp, dim, n_en, n_c = self.get_data_shape(par_u)
return (n_el, 1, 1, 1), par_u.dtype
class NSOFMinGradTerm(Term):
name = 'd_of_ns_min_grad'
arg_types = ('material_1', 'material_2', 'parameter')
arg_shapes = {'material_1' : '1, 1', 'material_2' : '1, 1',
'parameter' : 1}
function = staticmethod(terms.d_of_nsMinGrad)
def get_fargs(self, weight, mat, parameter,
mode=None, term_mode=None, diff_var=None, **kwargs):
vg, _ = self.get_mapping(parameter)
grad = grad_as_vector(self.get(parameter, 'grad'))
return grad, weight * mat, vg
def get_eval_shape(self, weight, mat, parameter,
mode=None, term_mode=None, diff_var=None, **kwargs):
return (1, 1, 1, 1), parameter.dtype
class NSOFSurfMinDPressTerm(Term):
r"""
Sensitivity of :math:`\Psi(p)`.
:Definition:
.. math::
\delta \Psi(p) = \delta \left( \int_{\Gamma_{in}}p -
\int_{\Gamma_{out}}bpress \right)
:Arguments:
- material_1 : :math:`w` (weight)
- material_2 : :math:`bpress` (given pressure)
- parameter : :math:`p`
"""
name = 'd_of_ns_surf_min_d_press'
arg_types = ('material_1', 'material_2', 'parameter')
arg_shapes = {'material_1' : 1, 'material_2' : 1,
'parameter' : 1}
integration = 'surface'
function = staticmethod(terms.d_of_nsSurfMinDPress)
def get_fargs(self, weight, bpress, parameter,
mode=None, term_mode=None, diff_var=None, **kwargs):
sg, _ = self.get_mapping(parameter)
val_p = self.get(parameter, 'val')
return val_p, weight, bpress, sg, 0
def get_eval_shape(self, weight, bpress, parameter,
mode=None, term_mode=None, diff_var=None, **kwargs):
return (1, 1, 1, 1), parameter.dtype
class NSOFSurfMinDPressDiffTerm(NSOFSurfMinDPressTerm):
r"""
Gateaux differential of :math:`\Psi(p)` w.r.t. :math:`p` in the
direction :math:`q`.
:Definition:
.. math::
w \delta_{p} \Psi(p) \circ q
:Arguments:
- material : :math:`w` (weight)
- virtual : :math:`q`
"""
name = 'dw_of_ns_surf_min_d_press_diff'
arg_types = ('material', 'virtual')
arg_shapes = {'material' : 1, 'virtual' : (1, None)}
def get_fargs(self, weight, virtual,
mode=None, term_mode=None, diff_var=None, **kwargs):
sg, _ = self.get_mapping(virtual)
aux = nm.array([0], ndmin=4, dtype=nm.float64)
return aux, weight, 0.0, sg, 1
class SDGradDivStabilizationTerm(Term):
r"""
Sensitivity (shape derivative) of stabilization term `dw_st_grad_div`.
:Definition:
.. math::
\gamma \int_{\Omega_D} [ (\nabla \cdot \ul{u}) (\nabla \cdot \ul{w})
(\nabla \cdot \ul{\Vcal})
- \pdiff{u_i}{x_k} \pdiff{\Vcal_k}{x_i} (\nabla \cdot \ul{w})
- (\nabla \cdot \ul{u}) \pdiff{w_i}{x_k} \pdiff{\Vcal_k}{x_i} ]
:Arguments:
- material : :math:`\gamma`
- parameter_u : :math:`\ul{u}`
- parameter_w : :math:`\ul{w}`
- parameter_mesh_velocity : :math:`\ul{\Vcal}`
- mode : 1 (sensitivity) or 0 (original term value)
"""
name = 'd_sd_st_grad_div'
arg_types = ('material', 'parameter_u', 'parameter_w',
'parameter_mesh_velocity')
arg_shapes = {'material' : '1, 1',
'parameter_u' : 'D', 'parameter_w' : 'D',
'parameter_mesh_velocity' : 'D'}
function = staticmethod(terms.d_sd_st_grad_div)
def get_fargs(self, mat, par_u, par_w, par_mv,
mode=None, term_mode=None, diff_var=None, **kwargs):
vg, _ = self.get_mapping(par_u)
div_u = self.get(par_u, 'div')
grad_u = grad_as_vector(self.get(par_u, 'grad'))
div_w = self.get(par_w, 'div')
grad_w = grad_as_vector(self.get(par_w, 'grad'))
div_mv = self.get(par_mv, 'div')
grad_mv = grad_as_vector(self.get(par_mv, 'grad'))
return (div_u, grad_u, div_w, grad_w, div_mv, grad_mv, mat, vg,
get_default(term_mode, 1))
def get_eval_shape(self, mat, par_u, par_w, par_mv,
mode=None, term_mode=None, diff_var=None, **kwargs):
n_el, n_qp, dim, n_en, n_c = self.get_data_shape(par_u)
return (n_el, 1, 1, 1), par_u.dtype
class SDSUPGCStabilizationTerm(Term):
r"""
Sensitivity (shape derivative) of stabilization term `dw_st_supg_c`.
:Definition:
.. math::
\sum_{K \in \Ical_h}\int_{T_K} \delta_K\ [ (\ul{b} \cdot \nabla u_k)
(\ul{b} \cdot \nabla w_k) (\nabla \cdot \Vcal) -
(\ul{b} \cdot \nabla \Vcal_i) \pdiff{u_k}{x_i}
(\ul{b} \cdot \nabla w_k) - (\ul{u} \cdot \nabla u_k)
(\ul{b} \cdot \nabla \Vcal_i) \pdiff{w_k}{x_i} ]
:Arguments:
- material : :math:`\delta_K`
- parameter_b : :math:`\ul{b}`
- parameter_u : :math:`\ul{u}`
- parameter_w : :math:`\ul{w}`
- parameter_mesh_velocity : :math:`\ul{\Vcal}`
- mode : 1 (sensitivity) or 0 (original term value)
"""
name = 'd_sd_st_supg_c'
arg_types = ('material', 'parameter_b', 'parameter_u', 'parameter_w',
'parameter_mesh_velocity')
arg_shapes = {'material' : '1, 1',
'parameter_b' : 'D', 'parameter_u' : 'D', 'parameter_w' : 'D',
'parameter_mesh_velocity' : 'D'}
function = staticmethod(terms.d_sd_st_supg_c)
def get_fargs(self, mat, par_b, par_u, par_w, par_mv,
mode=None, term_mode=None, diff_var=None, **kwargs):
vg, _ = self.get_mapping(par_u)
val_b = self.get(par_b, 'val')
grad_u = self.get(par_u, 'grad').transpose((0, 1, 3, 2)).copy()
grad_w = self.get(par_w, 'grad').transpose((0, 1, 3, 2)).copy()
div_mv = self.get(par_mv, 'div')
grad_mv = self.get(par_mv, 'grad').transpose((0, 1, 3, 2)).copy()
return (val_b, grad_u, grad_w, div_mv, grad_mv, mat, vg,
get_default(term_mode, 1))
def get_eval_shape(self, mat, par_b, par_u, par_w, par_mv,
mode=None, term_mode=None, diff_var=None, **kwargs):
n_el, n_qp, dim, n_en, n_c = self.get_data_shape(par_u)
return (n_el, 1, 1, 1), par_u.dtype
class SDPSPGCStabilizationTerm(Term):
r"""
Sensitivity (shape derivative) of stabilization terms `dw_st_supg_p` or
`dw_st_pspg_c`.
:Definition:
.. math::
\sum_{K \in \Ical_h}\int_{T_K} \delta_K\
[ \pdiff{r}{x_i} (\ul{b} \cdot \nabla u_i) (\nabla \cdot \Vcal) -
\pdiff{r}{x_k} \pdiff{\Vcal_k}{x_i} (\ul{b} \cdot \nabla u_i)
- \pdiff{r}{x_k} (\ul{b} \cdot \nabla \Vcal_k) \pdiff{u_i}{x_k} ]
:Arguments:
- material : :math:`\delta_K`
- parameter_b : :math:`\ul{b}`
- parameter_u : :math:`\ul{u}`
- parameter_r : :math:`r`
- parameter_mesh_velocity : :math:`\ul{\Vcal}`
- mode : 1 (sensitivity) or 0 (original term value)
"""
name = 'd_sd_st_pspg_c'
arg_types = ('material', 'parameter_b', 'parameter_u', 'parameter_r',
'parameter_mesh_velocity')
arg_shapes = {'material' : '1, 1',
'parameter_b' : 'D', 'parameter_u' : 'D', 'parameter_r' : 1,
'parameter_mesh_velocity' : 'D'}
function = staticmethod(terms.d_sd_st_pspg_c)
def get_fargs(self, mat, par_b, par_u, par_r, par_mv,
mode=None, term_mode=None, diff_var=None, **kwargs):
vg, _ = self.get_mapping(par_u)
val_b = self.get(par_b, 'val')
grad_u = self.get(par_u, 'grad').transpose((0, 1, 3, 2)).copy()
grad_r = self.get(par_r, 'grad')
div_mv = self.get(par_mv, 'div')
grad_mv = self.get(par_mv, 'grad').transpose((0, 1, 3, 2)).copy()
return (val_b, grad_u, grad_r, div_mv, grad_mv, mat, vg,
get_default(term_mode, 1))
def get_eval_shape(self, mat, par_b, par_u, par_r, par_mv,
mode=None, term_mode=None, diff_var=None, **kwargs):
n_el, n_qp, dim, n_en, n_c = self.get_data_shape(par_u)
return (n_el, 1, 1, 1), par_u.dtype
class SDPSPGPStabilizationTerm(Term):
r"""
Sensitivity (shape derivative) of stabilization term `dw_st_pspg_p`.
:Definition:
.. math::
\sum_{K \in \Ical_h}\int_{T_K} \tau_K\ [ (\nabla r \cdot \nabla p)
(\nabla \cdot \Vcal) - \pdiff{r}{x_k} (\nabla \Vcal_k \cdot \nabla p) -
(\nabla r \cdot \nabla \Vcal_k) \pdiff{p}{x_k} ]
:Arguments:
- material : :math:`\tau_K`
- parameter_r : :math:`r`
- parameter_p : :math:`p`
- parameter_mesh_velocity : :math:`\ul{\Vcal}`
- mode : 1 (sensitivity) or 0 (original term value)
"""
name = 'd_sd_st_pspg_p'
arg_types = ('material', 'parameter_r', 'parameter_p',
'parameter_mesh_velocity')
arg_shapes = {'material' : '1, 1',
'parameter_r' : 1, 'parameter_p' : 1,
'parameter_mesh_velocity' : 'D'}
function = staticmethod(terms.d_sd_st_pspg_p)
def get_fargs(self, mat, par_r, par_p, par_mv,
mode=None, term_mode=None, diff_var=None, **kwargs):
vg, _ = self.get_mapping(par_p)
grad_r = self.get(par_r, 'grad')
grad_p = self.get(par_p, 'grad')
div_mv = self.get(par_mv, 'div')
grad_mv = self.get(par_mv, 'grad').transpose((0, 1, 3, 2)).copy()
return (grad_r, grad_p, div_mv, grad_mv, mat, vg,
get_default(term_mode, 1))
def get_eval_shape(self, mat, par_r, par_p, par_mv,
mode=None, term_mode=None, diff_var=None, **kwargs):
n_el, n_qp, dim, n_en, n_c = self.get_data_shape(par_p)
return (n_el, 1, 1, 1), par_p.dtype
|
RexFuzzle/sfepy
|
sfepy/terms/terms_adj_navier_stokes.py
|
Python
|
bsd-3-clause
| 22,730
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "storytest.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
rich9005/CmpE_272_Text_to_Braille
|
storytest/manage.py
|
Python
|
gpl-2.0
| 252
|
#!/usr/bin/env python3
""" NumPy is the fundamental package for array computing with Python.
It provides:
- a powerful N-dimensional array object
- sophisticated (broadcasting) functions
- tools for integrating C/C++ and Fortran code
- useful linear algebra, Fourier transform, and random number capabilities
- and much more
Besides its obvious scientific uses, NumPy can also be used as an efficient
multi-dimensional container of generic data. Arbitrary data-types can be
defined. This allows NumPy to seamlessly and speedily integrate with a wide
variety of databases.
All NumPy wheels distributed on PyPI are BSD licensed.
"""
DOCLINES = (__doc__ or '').split("\n")
import os
import sys
import subprocess
import textwrap
import sysconfig
if sys.version_info[:2] < (3, 6):
raise RuntimeError("Python version >= 3.6 required.")
import builtins
CLASSIFIERS = """\
Development Status :: 5 - Production/Stable
Intended Audience :: Science/Research
Intended Audience :: Developers
License :: OSI Approved
Programming Language :: C
Programming Language :: Python
Programming Language :: Python :: 3
Programming Language :: Python :: 3.6
Programming Language :: Python :: 3.7
Programming Language :: Python :: 3.8
Programming Language :: Python :: 3 :: Only
Programming Language :: Python :: Implementation :: CPython
Topic :: Software Development
Topic :: Scientific/Engineering
Operating System :: Microsoft :: Windows
Operating System :: POSIX
Operating System :: Unix
Operating System :: MacOS
"""
MAJOR = 1
MINOR = 19
MICRO = 0
ISRELEASED = False
VERSION = '%d.%d.%d' % (MAJOR, MINOR, MICRO)
# Return the git revision as a string
def git_version():
def _minimal_ext_cmd(cmd):
# construct minimal environment
env = {}
for k in ['SYSTEMROOT', 'PATH', 'HOME']:
v = os.environ.get(k)
if v is not None:
env[k] = v
# LANGUAGE is used on win32
env['LANGUAGE'] = 'C'
env['LANG'] = 'C'
env['LC_ALL'] = 'C'
out = subprocess.check_output(cmd, stderr=subprocess.STDOUT, env=env)
return out
try:
out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD'])
GIT_REVISION = out.strip().decode('ascii')
except (subprocess.SubprocessError, OSError):
GIT_REVISION = "Unknown"
if not GIT_REVISION:
# this shouldn't happen but apparently can (see gh-8512)
GIT_REVISION = "Unknown"
return GIT_REVISION
# BEFORE importing setuptools, remove MANIFEST. Otherwise it may not be
# properly updated when the contents of directories change (true for distutils,
# not sure about setuptools).
if os.path.exists('MANIFEST'):
os.remove('MANIFEST')
# This is a bit hackish: we are setting a global variable so that the main
# numpy __init__ can detect if it is being loaded by the setup routine, to
# avoid attempting to load components that aren't built yet. While ugly, it's
# a lot more robust than what was previously being used.
builtins.__NUMPY_SETUP__ = True
def get_version_info():
# Adding the git rev number needs to be done inside write_version_py(),
# otherwise the import of numpy.version messes up the build under Python 3.
FULLVERSION = VERSION
if os.path.exists('.git'):
GIT_REVISION = git_version()
elif os.path.exists('numpy/version.py'):
# must be a source distribution, use existing version file
try:
from numpy.version import git_revision as GIT_REVISION
except ImportError:
raise ImportError("Unable to import git_revision. Try removing "
"numpy/version.py and the build directory "
"before building.")
else:
GIT_REVISION = "Unknown"
if not ISRELEASED:
FULLVERSION += '.dev0+' + GIT_REVISION[:7]
return FULLVERSION, GIT_REVISION
def write_version_py(filename='numpy/version.py'):
cnt = """
# THIS FILE IS GENERATED FROM NUMPY SETUP.PY
#
# To compare versions robustly, use `numpy.lib.NumpyVersion`
short_version = '%(version)s'
version = '%(version)s'
full_version = '%(full_version)s'
git_revision = '%(git_revision)s'
release = %(isrelease)s
if not release:
version = full_version
"""
FULLVERSION, GIT_REVISION = get_version_info()
a = open(filename, 'w')
try:
a.write(cnt % {'version': VERSION,
'full_version': FULLVERSION,
'git_revision': GIT_REVISION,
'isrelease': str(ISRELEASED)})
finally:
a.close()
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration(None, parent_package, top_path)
config.set_options(ignore_setup_xxx_py=True,
assume_default_configuration=True,
delegate_options_to_subpackages=True,
quiet=True)
config.add_subpackage('numpy')
config.add_data_files(('numpy', 'LICENSE.txt'))
config.add_data_files(('numpy', 'numpy/__init__.pxd'))
config.get_version('numpy/version.py') # sets config.version
return config
def check_submodules():
""" verify that the submodules are checked out and clean
use `git submodule update --init`; on failure
"""
if not os.path.exists('.git'):
return
with open('.gitmodules') as f:
for l in f:
if 'path' in l:
p = l.split('=')[-1].strip()
if not os.path.exists(p):
raise ValueError(f'Submodule {p} missing')
proc = subprocess.Popen(['git', 'submodule', 'status'],
stdout=subprocess.PIPE)
status, _ = proc.communicate()
status = status.decode("ascii", "replace")
for line in status.splitlines():
if line.startswith('-') or line.startswith('+'):
raise ValueError(f'Submodule not clean: {line}')
class concat_license_files():
"""Merge LICENSE.txt and LICENSES_bundled.txt for sdist creation
Done this way to keep LICENSE.txt in repo as exact BSD 3-clause (see
gh-13447). This makes GitHub state correctly how NumPy is licensed.
"""
def __init__(self):
self.f1 = 'LICENSE.txt'
self.f2 = 'LICENSES_bundled.txt'
def __enter__(self):
"""Concatenate files and remove LICENSES_bundled.txt"""
with open(self.f1, 'r') as f1:
self.bsd_text = f1.read()
with open(self.f1, 'a') as f1:
with open(self.f2, 'r') as f2:
self.bundled_text = f2.read()
f1.write('\n\n')
f1.write(self.bundled_text)
def __exit__(self, exception_type, exception_value, traceback):
"""Restore content of both files"""
with open(self.f1, 'w') as f:
f.write(self.bsd_text)
from distutils.command.sdist import sdist
class sdist_checked(sdist):
""" check submodules on sdist to prevent incomplete tarballs """
def run(self):
check_submodules()
with concat_license_files():
sdist.run(self)
def get_build_overrides():
"""
Custom build commands to add `-std=c99` to compilation
"""
from numpy.distutils.command.build_clib import build_clib
from numpy.distutils.command.build_ext import build_ext
def _is_using_gcc(obj):
is_gcc = False
if obj.compiler.compiler_type == 'unix':
cc = sysconfig.get_config_var("CC")
if not cc:
cc = ""
compiler_name = os.path.basename(cc)
is_gcc = "gcc" in compiler_name
return is_gcc
class new_build_clib(build_clib):
def build_a_library(self, build_info, lib_name, libraries):
if _is_using_gcc(self):
args = build_info.get('extra_compiler_args') or []
args.append('-std=c99')
build_info['extra_compiler_args'] = args
build_clib.build_a_library(self, build_info, lib_name, libraries)
class new_build_ext(build_ext):
def build_extension(self, ext):
if _is_using_gcc(self):
if '-std=c99' not in ext.extra_compile_args:
ext.extra_compile_args.append('-std=c99')
build_ext.build_extension(self, ext)
return new_build_clib, new_build_ext
def generate_cython():
cwd = os.path.abspath(os.path.dirname(__file__))
print("Cythonizing sources")
for d in ('random',):
p = subprocess.call([sys.executable,
os.path.join(cwd, 'tools', 'cythonize.py'),
'numpy/{0}'.format(d)],
cwd=cwd)
if p != 0:
raise RuntimeError("Running cythonize failed!")
def parse_setuppy_commands():
"""Check the commands and respond appropriately. Disable broken commands.
Return a boolean value for whether or not to run the build or not (avoid
parsing Cython and template files if False).
"""
args = sys.argv[1:]
if not args:
# User forgot to give an argument probably, let setuptools handle that.
return True
info_commands = ['--help-commands', '--name', '--version', '-V',
'--fullname', '--author', '--author-email',
'--maintainer', '--maintainer-email', '--contact',
'--contact-email', '--url', '--license', '--description',
'--long-description', '--platforms', '--classifiers',
'--keywords', '--provides', '--requires', '--obsoletes']
for command in info_commands:
if command in args:
return False
# Note that 'alias', 'saveopts' and 'setopt' commands also seem to work
# fine as they are, but are usually used together with one of the commands
# below and not standalone. Hence they're not added to good_commands.
good_commands = ('develop', 'sdist', 'build', 'build_ext', 'build_py',
'build_clib', 'build_scripts', 'bdist_wheel', 'bdist_rpm',
'bdist_wininst', 'bdist_msi', 'bdist_mpkg', 'build_src')
for command in good_commands:
if command in args:
return True
# The following commands are supported, but we need to show more
# useful messages to the user
if 'install' in args:
print(textwrap.dedent("""
Note: if you need reliable uninstall behavior, then install
with pip instead of using `setup.py install`:
- `pip install .` (from a git repo or downloaded source
release)
- `pip install numpy` (last NumPy release on PyPi)
"""))
return True
if '--help' in args or '-h' in sys.argv[1]:
print(textwrap.dedent("""
NumPy-specific help
-------------------
To install NumPy from here with reliable uninstall, we recommend
that you use `pip install .`. To install the latest NumPy release
from PyPi, use `pip install numpy`.
For help with build/installation issues, please ask on the
numpy-discussion mailing list. If you are sure that you have run
into a bug, please report it at https://github.com/numpy/numpy/issues.
Setuptools commands help
------------------------
"""))
return False
# The following commands aren't supported. They can only be executed when
# the user explicitly adds a --force command-line argument.
bad_commands = dict(
test="""
`setup.py test` is not supported. Use one of the following
instead:
- `python runtests.py` (to build and test)
- `python runtests.py --no-build` (to test installed numpy)
- `>>> numpy.test()` (run tests for installed numpy
from within an interpreter)
""",
upload="""
`setup.py upload` is not supported, because it's insecure.
Instead, build what you want to upload and upload those files
with `twine upload -s <filenames>` instead.
""",
upload_docs="`setup.py upload_docs` is not supported",
easy_install="`setup.py easy_install` is not supported",
clean="""
`setup.py clean` is not supported, use one of the following instead:
- `git clean -xdf` (cleans all files)
- `git clean -Xdf` (cleans all versioned files, doesn't touch
files that aren't checked into the git repo)
""",
check="`setup.py check` is not supported",
register="`setup.py register` is not supported",
bdist_dumb="`setup.py bdist_dumb` is not supported",
bdist="`setup.py bdist` is not supported",
build_sphinx="""
`setup.py build_sphinx` is not supported, use the
Makefile under doc/""",
flake8="`setup.py flake8` is not supported, use flake8 standalone",
)
bad_commands['nosetests'] = bad_commands['test']
for command in ('upload_docs', 'easy_install', 'bdist', 'bdist_dumb',
'register', 'check', 'install_data', 'install_headers',
'install_lib', 'install_scripts', ):
bad_commands[command] = "`setup.py %s` is not supported" % command
for command in bad_commands.keys():
if command in args:
print(textwrap.dedent(bad_commands[command]) +
"\nAdd `--force` to your command to use it anyway if you "
"must (unsupported).\n")
sys.exit(1)
# Commands that do more than print info, but also don't need Cython and
# template parsing.
other_commands = ['egg_info', 'install_egg_info', 'rotate']
for command in other_commands:
if command in args:
return False
# If we got here, we didn't detect what setup.py command was given
import warnings
warnings.warn("Unrecognized setuptools command, proceeding with "
"generating Cython sources and expanding templates", stacklevel=2)
return True
def setup_package():
src_path = os.path.dirname(os.path.abspath(__file__))
old_path = os.getcwd()
os.chdir(src_path)
sys.path.insert(0, src_path)
# Rewrite the version file every time
write_version_py()
# The f2py scripts that will be installed
if sys.platform == 'win32':
f2py_cmds = [
'f2py = numpy.f2py.f2py2e:main',
]
else:
f2py_cmds = [
'f2py = numpy.f2py.f2py2e:main',
'f2py%s = numpy.f2py.f2py2e:main' % sys.version_info[:1],
'f2py%s.%s = numpy.f2py.f2py2e:main' % sys.version_info[:2],
]
cmdclass={"sdist": sdist_checked,
}
metadata = dict(
name = 'numpy',
maintainer = "NumPy Developers",
maintainer_email = "numpy-discussion@python.org",
description = DOCLINES[0],
long_description = "\n".join(DOCLINES[2:]),
url = "https://www.numpy.org",
author = "Travis E. Oliphant et al.",
download_url = "https://pypi.python.org/pypi/numpy",
project_urls={
"Bug Tracker": "https://github.com/numpy/numpy/issues",
"Documentation": "https://docs.scipy.org/doc/numpy/",
"Source Code": "https://github.com/numpy/numpy",
},
license = 'BSD',
classifiers=[_f for _f in CLASSIFIERS.split('\n') if _f],
platforms = ["Windows", "Linux", "Solaris", "Mac OS-X", "Unix"],
test_suite='nose.collector',
cmdclass=cmdclass,
python_requires='>=3.5',
zip_safe=False,
entry_points={
'console_scripts': f2py_cmds
},
)
if "--force" in sys.argv:
run_build = True
sys.argv.remove('--force')
else:
# Raise errors for unsupported commands, improve help output, etc.
run_build = parse_setuppy_commands()
if run_build:
# patches distutils, even though we don't use it
import setuptools # noqa: F401
from numpy.distutils.core import setup
cwd = os.path.abspath(os.path.dirname(__file__))
if not 'sdist' in sys.argv:
# Generate Cython sources, unless we're generating an sdist
generate_cython()
metadata['configuration'] = configuration
# Customize extension building
cmdclass['build_clib'], cmdclass['build_ext'] = get_build_overrides()
else:
from setuptools import setup
# Version number is added to metadata inside configuration() if build
# is run.
metadata['version'] = get_version_info()[0]
try:
setup(**metadata)
finally:
del sys.path[0]
os.chdir(old_path)
return
if __name__ == '__main__':
setup_package()
# This may avoid problems where numpy is installed via ``*_requires`` by
# setuptools, the global namespace isn't reset properly, and then numpy is
# imported later (which will then fail to load numpy extension modules).
# See gh-7956 for details
del builtins.__NUMPY_SETUP__
|
endolith/numpy
|
setup.py
|
Python
|
bsd-3-clause
| 17,447
|
from __future__ import absolute_import
import datetime
import time
from celery.events.state import Task
from .search import satisfies_search_terms
def iter_tasks(events, limit=None, type=None, worker=None, state=None,
sort_by=None, received_start=None, received_end=None,
started_start=None, started_end=None, search_terms=None):
i = 0
tasks = events.state.tasks_by_timestamp()
if sort_by is not None:
tasks = sort_tasks(tasks, sort_by)
convert = lambda x: time.mktime(
datetime.datetime.strptime(x, '%Y-%m-%d %H:%M').timetuple()
)
any_value_search_term = search_terms.get('any', None)
result_search_term = search_terms.get('result', None)
kwargs_search_terms = search_terms.get('kwargs', None)
for uuid, task in tasks:
if type and task.name != type:
continue
if worker and task.worker and task.worker.hostname != worker:
continue
if state and task.state != state:
continue
if received_start and task.received and\
task.received < convert(received_start):
continue
if received_end and task.received and\
task.received > convert(received_end):
continue
if started_start and task.started and\
task.started < convert(started_start):
continue
if started_end and task.started and\
task.started > convert(started_end):
continue
if not satisfies_search_terms(task, any_value_search_term, result_search_term, kwargs_search_terms):
continue
yield uuid, task
i += 1
if i == limit:
break
sort_keys = {'name': str, 'state': str, 'received': float, 'started': float}
def sort_tasks(tasks, sort_by):
assert sort_by.lstrip('-') in sort_keys
reverse = False
if sort_by.startswith('-'):
sort_by = sort_by.lstrip('-')
reverse = True
for task in sorted(tasks,
key=lambda x: getattr(x[1], sort_by) or sort_keys[sort_by](),
reverse=reverse):
yield task
def get_task_by_id(events, task_id):
if hasattr(Task, '_fields'): # Old version
return events.state.tasks.get(task_id)
else:
_fields = Task._defaults.keys()
task = events.state.tasks.get(task_id)
if task is not None:
task._fields = _fields
return task
|
raphaelmerx/flower
|
flower/utils/tasks.py
|
Python
|
bsd-3-clause
| 2,482
|
# vim: ts=4:sw=4:expandtab
# BleachBit
# Copyright (C) 2008-2015 Andrew Ziem
# http://bleachbit.sourceforge.net
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Test case for module Worker
"""
import sys
import tempfile
import unittest
sys.path.append('.')
import TestCleaner
from bleachbit import CLI
from bleachbit.Worker import *
class WorkerTestCase(unittest.TestCase):
"""Test case for module Worker"""
def test_TestActionProvider(self):
"""Test Worker using Action.TestActionProvider"""
ui = CLI.CliCallback()
(fd, filename) = tempfile.mkstemp(prefix='bleachbit-test-worker')
os.write(fd, '123')
os.close(fd)
self.assert_(os.path.exists(filename))
astr = '<action command="test" path="%s"/>' % filename
cleaner = TestCleaner.action_to_cleaner(astr)
backends['test'] = cleaner
operations = {'test': ['option1']}
worker = Worker(ui, True, operations)
run = worker.run()
while run.next():
pass
self.assert_(not os.path.exists(filename),
"Path still exists '%s'" % filename)
self.assertEqual(worker.total_special, 3)
self.assertEqual(worker.total_errors, 2)
if 'posix' == os.name:
self.assertEqual(worker.total_bytes, 4096 + 10 + 10)
self.assertEqual(worker.total_deleted, 3)
elif 'nt' == os.name:
self.assertEqual(worker.total_bytes, 3 + 3 + 10 + 10)
self.assertEqual(worker.total_deleted, 4)
def test_deep_scan(self):
"""Test for deep scan"""
# load cleaners from XML
import bleachbit.CleanerML
bleachbit.CleanerML.load_cleaners()
# DeepScan itself is tested elsewhere, so replace it here
import bleachbit.DeepScan
SaveDeepScan = bleachbit.DeepScan.DeepScan
self.scanned = 0
self_assertequal = self.assertEqual
self_assert = self.assert_
def increment_count():
self.scanned = self.scanned + 1
class MyDeepScan:
def add_search(self, dirname, regex):
self_assertequal(dirname, os.path.expanduser('~'))
self_assert(
regex in ('^Thumbs\\.db$', '^Thumbs\\.db:encryptable$'))
def scan(self):
increment_count()
yield True
bleachbit.DeepScan.DeepScan = MyDeepScan
# test
operations = {'deepscan': ['thumbs_db']}
ui = CLI.CliCallback()
worker = Worker(ui, False, operations).run()
while worker.next():
pass
self.assertEqual(1, self.scanned)
# clean up
bleachbit.DeepScan.DeepScan = SaveDeepScan
def test_multiple_options(self):
"""Test one cleaner with two options"""
ui = CLI.CliCallback()
(fd, filename1) = tempfile.mkstemp(prefix='bleachbit-test-worker')
os.close(fd)
self.assert_(os.path.exists(filename1))
(fd, filename2) = tempfile.mkstemp(prefix='bleachbit-test-worker')
os.close(fd)
self.assert_(os.path.exists(filename2))
astr1 = '<action command="delete" search="file" path="%s"/>' % filename1
astr2 = '<action command="delete" search="file" path="%s"/>' % filename2
cleaner = TestCleaner.actions_to_cleaner([astr1, astr2])
backends['test'] = cleaner
operations = {'test': ['option1', 'option2']}
worker = Worker(ui, True, operations)
run = worker.run()
while run.next():
pass
self.assert_(not os.path.exists(filename1),
"Path still exists '%s'" % filename1)
self.assert_(not os.path.exists(filename2),
"Path still exists '%s'" % filename2)
self.assertEqual(worker.total_special, 0)
self.assertEqual(worker.total_errors, 0)
self.assertEqual(worker.total_deleted, 2)
def suite():
return unittest.makeSuite(WorkerTestCase)
if __name__ == '__main__':
unittest.main()
|
uudiin/bleachbit
|
tests/TestWorker.py
|
Python
|
gpl-3.0
| 4,656
|
# NOTE: this should inherit from (object) to function correctly with python 2.7
class CachedProperty(object):
""" A property that is only computed once per instance and
then stores the result in _cached_properties of the object.
Source: https://github.com/bottlepy/bottle/commit/fa7733e075da0d790d809aa3d2f53071897e6f76
"""
def __init__(self, func):
self.__doc__ = getattr(func, '__doc__')
self.func = func
def __get__(self, obj, cls):
if obj is None:
return self
propname = self.func.__name__
if not hasattr(obj, '_cached_properties'):
obj._cached_properties = {}
if propname not in obj._cached_properties:
obj._cached_properties[propname] = self.func(obj)
# value = obj.__dict__[propname] = self.func(obj)
return obj._cached_properties[propname]
@staticmethod
def clear(obj):
"""clears cache of obj"""
if hasattr(obj, '_cached_properties'):
obj._cached_properties = {}
@staticmethod
def is_cached(obj, propname):
if hasattr(obj, '_cached_properties') and propname in obj._cached_properties:
return True
else:
return False
|
psy0rz/zfs_autobackup
|
zfs_autobackup/CachedProperty.py
|
Python
|
gpl-3.0
| 1,252
|
#!/usr/bin/env python3
# Copyright (c) 2015-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Functionality to build scripts, as well as signature hash functions.
This file is modified from python-bitcoinlib.
"""
import hashlib
import struct
from .messages import (
CTransaction,
CTxOut,
hash256,
ser_string,
ser_uint256,
sha256,
uint256_from_str,
)
MAX_SCRIPT_ELEMENT_SIZE = 520
OPCODE_NAMES = {}
def hash160(s):
return hashlib.new('ripemd160', sha256(s)).digest()
def bn2vch(v):
"""Convert number to bitcoin-specific little endian format."""
# We need v.bit_length() bits, plus a sign bit for every nonzero number.
n_bits = v.bit_length() + (v != 0)
# The number of bytes for that is:
n_bytes = (n_bits + 7) // 8
# Convert number to absolute value + sign in top bit.
encoded_v = 0 if v == 0 else abs(v) | ((v < 0) << (n_bytes * 8 - 1))
# Serialize to bytes
return encoded_v.to_bytes(n_bytes, 'little')
_opcode_instances = []
class CScriptOp(int):
"""A single script opcode"""
__slots__ = ()
@staticmethod
def encode_op_pushdata(d):
"""Encode a PUSHDATA op, returning bytes"""
if len(d) < 0x4c:
return b'' + bytes([len(d)]) + d # OP_PUSHDATA
elif len(d) <= 0xff:
return b'\x4c' + bytes([len(d)]) + d # OP_PUSHDATA1
elif len(d) <= 0xffff:
return b'\x4d' + struct.pack(b'<H', len(d)) + d # OP_PUSHDATA2
elif len(d) <= 0xffffffff:
return b'\x4e' + struct.pack(b'<I', len(d)) + d # OP_PUSHDATA4
else:
raise ValueError("Data too long to encode in a PUSHDATA op")
@staticmethod
def encode_op_n(n):
"""Encode a small integer op, returning an opcode"""
if not (0 <= n <= 16):
raise ValueError('Integer must be in range 0 <= n <= 16, got %d' % n)
if n == 0:
return OP_0
else:
return CScriptOp(OP_1 + n - 1)
def decode_op_n(self):
"""Decode a small integer opcode, returning an integer"""
if self == OP_0:
return 0
if not (self == OP_0 or OP_1 <= self <= OP_16):
raise ValueError('op %r is not an OP_N' % self)
return int(self - OP_1 + 1)
def is_small_int(self):
"""Return true if the op pushes a small integer to the stack"""
if 0x51 <= self <= 0x60 or self == 0:
return True
else:
return False
def __str__(self):
return repr(self)
def __repr__(self):
if self in OPCODE_NAMES:
return OPCODE_NAMES[self]
else:
return 'CScriptOp(0x%x)' % self
def __new__(cls, n):
try:
return _opcode_instances[n]
except IndexError:
assert len(_opcode_instances) == n
_opcode_instances.append(super(CScriptOp, cls).__new__(cls, n))
return _opcode_instances[n]
# Populate opcode instance table
for n in range(0xff + 1):
CScriptOp(n)
# push value
OP_0 = CScriptOp(0x00)
OP_FALSE = OP_0
OP_PUSHDATA1 = CScriptOp(0x4c)
OP_PUSHDATA2 = CScriptOp(0x4d)
OP_PUSHDATA4 = CScriptOp(0x4e)
OP_1NEGATE = CScriptOp(0x4f)
OP_RESERVED = CScriptOp(0x50)
OP_1 = CScriptOp(0x51)
OP_TRUE = OP_1
OP_2 = CScriptOp(0x52)
OP_3 = CScriptOp(0x53)
OP_4 = CScriptOp(0x54)
OP_5 = CScriptOp(0x55)
OP_6 = CScriptOp(0x56)
OP_7 = CScriptOp(0x57)
OP_8 = CScriptOp(0x58)
OP_9 = CScriptOp(0x59)
OP_10 = CScriptOp(0x5a)
OP_11 = CScriptOp(0x5b)
OP_12 = CScriptOp(0x5c)
OP_13 = CScriptOp(0x5d)
OP_14 = CScriptOp(0x5e)
OP_15 = CScriptOp(0x5f)
OP_16 = CScriptOp(0x60)
# control
OP_NOP = CScriptOp(0x61)
OP_VER = CScriptOp(0x62)
OP_IF = CScriptOp(0x63)
OP_NOTIF = CScriptOp(0x64)
OP_VERIF = CScriptOp(0x65)
OP_VERNOTIF = CScriptOp(0x66)
OP_ELSE = CScriptOp(0x67)
OP_ENDIF = CScriptOp(0x68)
OP_VERIFY = CScriptOp(0x69)
OP_RETURN = CScriptOp(0x6a)
# stack ops
OP_TOALTSTACK = CScriptOp(0x6b)
OP_FROMALTSTACK = CScriptOp(0x6c)
OP_2DROP = CScriptOp(0x6d)
OP_2DUP = CScriptOp(0x6e)
OP_3DUP = CScriptOp(0x6f)
OP_2OVER = CScriptOp(0x70)
OP_2ROT = CScriptOp(0x71)
OP_2SWAP = CScriptOp(0x72)
OP_IFDUP = CScriptOp(0x73)
OP_DEPTH = CScriptOp(0x74)
OP_DROP = CScriptOp(0x75)
OP_DUP = CScriptOp(0x76)
OP_NIP = CScriptOp(0x77)
OP_OVER = CScriptOp(0x78)
OP_PICK = CScriptOp(0x79)
OP_ROLL = CScriptOp(0x7a)
OP_ROT = CScriptOp(0x7b)
OP_SWAP = CScriptOp(0x7c)
OP_TUCK = CScriptOp(0x7d)
# splice ops
OP_CAT = CScriptOp(0x7e)
OP_SUBSTR = CScriptOp(0x7f)
OP_LEFT = CScriptOp(0x80)
OP_RIGHT = CScriptOp(0x81)
OP_SIZE = CScriptOp(0x82)
# bit logic
OP_INVERT = CScriptOp(0x83)
OP_AND = CScriptOp(0x84)
OP_OR = CScriptOp(0x85)
OP_XOR = CScriptOp(0x86)
OP_EQUAL = CScriptOp(0x87)
OP_EQUALVERIFY = CScriptOp(0x88)
OP_RESERVED1 = CScriptOp(0x89)
OP_RESERVED2 = CScriptOp(0x8a)
# numeric
OP_1ADD = CScriptOp(0x8b)
OP_1SUB = CScriptOp(0x8c)
OP_2MUL = CScriptOp(0x8d)
OP_2DIV = CScriptOp(0x8e)
OP_NEGATE = CScriptOp(0x8f)
OP_ABS = CScriptOp(0x90)
OP_NOT = CScriptOp(0x91)
OP_0NOTEQUAL = CScriptOp(0x92)
OP_ADD = CScriptOp(0x93)
OP_SUB = CScriptOp(0x94)
OP_MUL = CScriptOp(0x95)
OP_DIV = CScriptOp(0x96)
OP_MOD = CScriptOp(0x97)
OP_LSHIFT = CScriptOp(0x98)
OP_RSHIFT = CScriptOp(0x99)
OP_BOOLAND = CScriptOp(0x9a)
OP_BOOLOR = CScriptOp(0x9b)
OP_NUMEQUAL = CScriptOp(0x9c)
OP_NUMEQUALVERIFY = CScriptOp(0x9d)
OP_NUMNOTEQUAL = CScriptOp(0x9e)
OP_LESSTHAN = CScriptOp(0x9f)
OP_GREATERTHAN = CScriptOp(0xa0)
OP_LESSTHANOREQUAL = CScriptOp(0xa1)
OP_GREATERTHANOREQUAL = CScriptOp(0xa2)
OP_MIN = CScriptOp(0xa3)
OP_MAX = CScriptOp(0xa4)
OP_WITHIN = CScriptOp(0xa5)
# crypto
OP_RIPEMD160 = CScriptOp(0xa6)
OP_SHA1 = CScriptOp(0xa7)
OP_SHA256 = CScriptOp(0xa8)
OP_HASH160 = CScriptOp(0xa9)
OP_HASH256 = CScriptOp(0xaa)
OP_CODESEPARATOR = CScriptOp(0xab)
OP_CHECKSIG = CScriptOp(0xac)
OP_CHECKSIGVERIFY = CScriptOp(0xad)
OP_CHECKMULTISIG = CScriptOp(0xae)
OP_CHECKMULTISIGVERIFY = CScriptOp(0xaf)
# expansion
OP_NOP1 = CScriptOp(0xb0)
OP_CHECKLOCKTIMEVERIFY = CScriptOp(0xb1)
OP_CHECKSEQUENCEVERIFY = CScriptOp(0xb2)
OP_NOP4 = CScriptOp(0xb3)
OP_NOP5 = CScriptOp(0xb4)
OP_NOP6 = CScriptOp(0xb5)
OP_NOP7 = CScriptOp(0xb6)
OP_NOP8 = CScriptOp(0xb7)
OP_NOP9 = CScriptOp(0xb8)
OP_NOP10 = CScriptOp(0xb9)
# template matching params
OP_SMALLINTEGER = CScriptOp(0xfa)
OP_PUBKEYS = CScriptOp(0xfb)
OP_PUBKEYHASH = CScriptOp(0xfd)
OP_PUBKEY = CScriptOp(0xfe)
OP_INVALIDOPCODE = CScriptOp(0xff)
OPCODE_NAMES.update({
OP_0: 'OP_0',
OP_PUSHDATA1: 'OP_PUSHDATA1',
OP_PUSHDATA2: 'OP_PUSHDATA2',
OP_PUSHDATA4: 'OP_PUSHDATA4',
OP_1NEGATE: 'OP_1NEGATE',
OP_RESERVED: 'OP_RESERVED',
OP_1: 'OP_1',
OP_2: 'OP_2',
OP_3: 'OP_3',
OP_4: 'OP_4',
OP_5: 'OP_5',
OP_6: 'OP_6',
OP_7: 'OP_7',
OP_8: 'OP_8',
OP_9: 'OP_9',
OP_10: 'OP_10',
OP_11: 'OP_11',
OP_12: 'OP_12',
OP_13: 'OP_13',
OP_14: 'OP_14',
OP_15: 'OP_15',
OP_16: 'OP_16',
OP_NOP: 'OP_NOP',
OP_VER: 'OP_VER',
OP_IF: 'OP_IF',
OP_NOTIF: 'OP_NOTIF',
OP_VERIF: 'OP_VERIF',
OP_VERNOTIF: 'OP_VERNOTIF',
OP_ELSE: 'OP_ELSE',
OP_ENDIF: 'OP_ENDIF',
OP_VERIFY: 'OP_VERIFY',
OP_RETURN: 'OP_RETURN',
OP_TOALTSTACK: 'OP_TOALTSTACK',
OP_FROMALTSTACK: 'OP_FROMALTSTACK',
OP_2DROP: 'OP_2DROP',
OP_2DUP: 'OP_2DUP',
OP_3DUP: 'OP_3DUP',
OP_2OVER: 'OP_2OVER',
OP_2ROT: 'OP_2ROT',
OP_2SWAP: 'OP_2SWAP',
OP_IFDUP: 'OP_IFDUP',
OP_DEPTH: 'OP_DEPTH',
OP_DROP: 'OP_DROP',
OP_DUP: 'OP_DUP',
OP_NIP: 'OP_NIP',
OP_OVER: 'OP_OVER',
OP_PICK: 'OP_PICK',
OP_ROLL: 'OP_ROLL',
OP_ROT: 'OP_ROT',
OP_SWAP: 'OP_SWAP',
OP_TUCK: 'OP_TUCK',
OP_CAT: 'OP_CAT',
OP_SUBSTR: 'OP_SUBSTR',
OP_LEFT: 'OP_LEFT',
OP_RIGHT: 'OP_RIGHT',
OP_SIZE: 'OP_SIZE',
OP_INVERT: 'OP_INVERT',
OP_AND: 'OP_AND',
OP_OR: 'OP_OR',
OP_XOR: 'OP_XOR',
OP_EQUAL: 'OP_EQUAL',
OP_EQUALVERIFY: 'OP_EQUALVERIFY',
OP_RESERVED1: 'OP_RESERVED1',
OP_RESERVED2: 'OP_RESERVED2',
OP_1ADD: 'OP_1ADD',
OP_1SUB: 'OP_1SUB',
OP_2MUL: 'OP_2MUL',
OP_2DIV: 'OP_2DIV',
OP_NEGATE: 'OP_NEGATE',
OP_ABS: 'OP_ABS',
OP_NOT: 'OP_NOT',
OP_0NOTEQUAL: 'OP_0NOTEQUAL',
OP_ADD: 'OP_ADD',
OP_SUB: 'OP_SUB',
OP_MUL: 'OP_MUL',
OP_DIV: 'OP_DIV',
OP_MOD: 'OP_MOD',
OP_LSHIFT: 'OP_LSHIFT',
OP_RSHIFT: 'OP_RSHIFT',
OP_BOOLAND: 'OP_BOOLAND',
OP_BOOLOR: 'OP_BOOLOR',
OP_NUMEQUAL: 'OP_NUMEQUAL',
OP_NUMEQUALVERIFY: 'OP_NUMEQUALVERIFY',
OP_NUMNOTEQUAL: 'OP_NUMNOTEQUAL',
OP_LESSTHAN: 'OP_LESSTHAN',
OP_GREATERTHAN: 'OP_GREATERTHAN',
OP_LESSTHANOREQUAL: 'OP_LESSTHANOREQUAL',
OP_GREATERTHANOREQUAL: 'OP_GREATERTHANOREQUAL',
OP_MIN: 'OP_MIN',
OP_MAX: 'OP_MAX',
OP_WITHIN: 'OP_WITHIN',
OP_RIPEMD160: 'OP_RIPEMD160',
OP_SHA1: 'OP_SHA1',
OP_SHA256: 'OP_SHA256',
OP_HASH160: 'OP_HASH160',
OP_HASH256: 'OP_HASH256',
OP_CODESEPARATOR: 'OP_CODESEPARATOR',
OP_CHECKSIG: 'OP_CHECKSIG',
OP_CHECKSIGVERIFY: 'OP_CHECKSIGVERIFY',
OP_CHECKMULTISIG: 'OP_CHECKMULTISIG',
OP_CHECKMULTISIGVERIFY: 'OP_CHECKMULTISIGVERIFY',
OP_NOP1: 'OP_NOP1',
OP_CHECKLOCKTIMEVERIFY: 'OP_CHECKLOCKTIMEVERIFY',
OP_CHECKSEQUENCEVERIFY: 'OP_CHECKSEQUENCEVERIFY',
OP_NOP4: 'OP_NOP4',
OP_NOP5: 'OP_NOP5',
OP_NOP6: 'OP_NOP6',
OP_NOP7: 'OP_NOP7',
OP_NOP8: 'OP_NOP8',
OP_NOP9: 'OP_NOP9',
OP_NOP10: 'OP_NOP10',
OP_SMALLINTEGER: 'OP_SMALLINTEGER',
OP_PUBKEYS: 'OP_PUBKEYS',
OP_PUBKEYHASH: 'OP_PUBKEYHASH',
OP_PUBKEY: 'OP_PUBKEY',
OP_INVALIDOPCODE: 'OP_INVALIDOPCODE',
})
class CScriptInvalidError(Exception):
"""Base class for CScript exceptions"""
pass
class CScriptTruncatedPushDataError(CScriptInvalidError):
"""Invalid pushdata due to truncation"""
def __init__(self, msg, data):
self.data = data
super(CScriptTruncatedPushDataError, self).__init__(msg)
# This is used, eg, for blockchain heights in coinbase scripts (bip34)
class CScriptNum:
__slots__ = ("value",)
def __init__(self, d=0):
self.value = d
@staticmethod
def encode(obj):
r = bytearray(0)
if obj.value == 0:
return bytes(r)
neg = obj.value < 0
absvalue = -obj.value if neg else obj.value
while (absvalue):
r.append(absvalue & 0xff)
absvalue >>= 8
if r[-1] & 0x80:
r.append(0x80 if neg else 0)
elif neg:
r[-1] |= 0x80
return bytes([len(r)]) + r
@staticmethod
def decode(vch):
result = 0
# We assume valid push_size and minimal encoding
value = vch[1:]
if len(value) == 0:
return result
for i, byte in enumerate(value):
result |= int(byte) << 8 * i
if value[-1] >= 0x80:
# Mask for all but the highest result bit
num_mask = (2**(len(value) * 8) - 1) >> 1
result &= num_mask
result *= -1
return result
class CScript(bytes):
"""Serialized script
A bytes subclass, so you can use this directly whenever bytes are accepted.
Note that this means that indexing does *not* work - you'll get an index by
byte rather than opcode. This format was chosen for efficiency so that the
general case would not require creating a lot of little CScriptOP objects.
iter(script) however does iterate by opcode.
"""
__slots__ = ()
@classmethod
def __coerce_instance(cls, other):
# Coerce other into bytes
if isinstance(other, CScriptOp):
other = bytes([other])
elif isinstance(other, CScriptNum):
if (other.value == 0):
other = bytes([CScriptOp(OP_0)])
else:
other = CScriptNum.encode(other)
elif isinstance(other, int):
if 0 <= other <= 16:
other = bytes([CScriptOp.encode_op_n(other)])
elif other == -1:
other = bytes([OP_1NEGATE])
else:
other = CScriptOp.encode_op_pushdata(bn2vch(other))
elif isinstance(other, (bytes, bytearray)):
other = CScriptOp.encode_op_pushdata(other)
return other
def __add__(self, other):
# Do the coercion outside of the try block so that errors in it are
# noticed.
other = self.__coerce_instance(other)
try:
# bytes.__add__ always returns bytes instances unfortunately
return CScript(super(CScript, self).__add__(other))
except TypeError:
raise TypeError('Can not add a %r instance to a CScript' % other.__class__)
def join(self, iterable):
# join makes no sense for a CScript()
raise NotImplementedError
def __new__(cls, value=b''):
if isinstance(value, bytes) or isinstance(value, bytearray):
return super(CScript, cls).__new__(cls, value)
else:
def coerce_iterable(iterable):
for instance in iterable:
yield cls.__coerce_instance(instance)
# Annoyingly on both python2 and python3 bytes.join() always
# returns a bytes instance even when subclassed.
return super(CScript, cls).__new__(cls, b''.join(coerce_iterable(value)))
def raw_iter(self):
"""Raw iteration
Yields tuples of (opcode, data, sop_idx) so that the different possible
PUSHDATA encodings can be accurately distinguished, as well as
determining the exact opcode byte indexes. (sop_idx)
"""
i = 0
while i < len(self):
sop_idx = i
opcode = self[i]
i += 1
if opcode > OP_PUSHDATA4:
yield (opcode, None, sop_idx)
else:
datasize = None
pushdata_type = None
if opcode < OP_PUSHDATA1:
pushdata_type = 'PUSHDATA(%d)' % opcode
datasize = opcode
elif opcode == OP_PUSHDATA1:
pushdata_type = 'PUSHDATA1'
if i >= len(self):
raise CScriptInvalidError('PUSHDATA1: missing data length')
datasize = self[i]
i += 1
elif opcode == OP_PUSHDATA2:
pushdata_type = 'PUSHDATA2'
if i + 1 >= len(self):
raise CScriptInvalidError('PUSHDATA2: missing data length')
datasize = self[i] + (self[i + 1] << 8)
i += 2
elif opcode == OP_PUSHDATA4:
pushdata_type = 'PUSHDATA4'
if i + 3 >= len(self):
raise CScriptInvalidError('PUSHDATA4: missing data length')
datasize = self[i] + (self[i + 1] << 8) + (self[i + 2] << 16) + (self[i + 3] << 24)
i += 4
else:
assert False # shouldn't happen
data = bytes(self[i:i + datasize])
# Check for truncation
if len(data) < datasize:
raise CScriptTruncatedPushDataError('%s: truncated data' % pushdata_type, data)
i += datasize
yield (opcode, data, sop_idx)
def __iter__(self):
"""'Cooked' iteration
Returns either a CScriptOP instance, an integer, or bytes, as
appropriate.
See raw_iter() if you need to distinguish the different possible
PUSHDATA encodings.
"""
for (opcode, data, sop_idx) in self.raw_iter():
if data is not None:
yield data
else:
opcode = CScriptOp(opcode)
if opcode.is_small_int():
yield opcode.decode_op_n()
else:
yield CScriptOp(opcode)
def __repr__(self):
def _repr(o):
if isinstance(o, bytes):
return "x('%s')" % o.hex()
else:
return repr(o)
ops = []
i = iter(self)
while True:
op = None
try:
op = _repr(next(i))
except CScriptTruncatedPushDataError as err:
op = '%s...<ERROR: %s>' % (_repr(err.data), err)
break
except CScriptInvalidError as err:
op = '<ERROR: %s>' % err
break
except StopIteration:
break
finally:
if op is not None:
ops.append(op)
return "CScript([%s])" % ', '.join(ops)
def GetSigOpCount(self, fAccurate):
"""Get the SigOp count.
fAccurate - Accurately count CHECKMULTISIG, see BIP16 for details.
Note that this is consensus-critical.
"""
n = 0
lastOpcode = OP_INVALIDOPCODE
for (opcode, data, sop_idx) in self.raw_iter():
if opcode in (OP_CHECKSIG, OP_CHECKSIGVERIFY):
n += 1
elif opcode in (OP_CHECKMULTISIG, OP_CHECKMULTISIGVERIFY):
if fAccurate and (OP_1 <= lastOpcode <= OP_16):
n += opcode.decode_op_n()
else:
n += 20
lastOpcode = opcode
return n
SIGHASH_ALL = 1
SIGHASH_NONE = 2
SIGHASH_SINGLE = 3
SIGHASH_ANYONECANPAY = 0x80
def FindAndDelete(script, sig):
"""Consensus critical, see FindAndDelete() in Satoshi codebase"""
r = b''
last_sop_idx = sop_idx = 0
skip = True
for (opcode, data, sop_idx) in script.raw_iter():
if not skip:
r += script[last_sop_idx:sop_idx]
last_sop_idx = sop_idx
if script[sop_idx:sop_idx + len(sig)] == sig:
skip = True
else:
skip = False
if not skip:
r += script[last_sop_idx:]
return CScript(r)
def LegacySignatureHash(script, txTo, inIdx, hashtype):
"""Consensus-correct SignatureHash
Returns (hash, err) to precisely match the consensus-critical behavior of
the SIGHASH_SINGLE bug. (inIdx is *not* checked for validity)
"""
HASH_ONE = b'\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
if inIdx >= len(txTo.vin):
return (HASH_ONE, "inIdx %d out of range (%d)" % (inIdx, len(txTo.vin)))
txtmp = CTransaction(txTo)
for txin in txtmp.vin:
txin.scriptSig = b''
txtmp.vin[inIdx].scriptSig = FindAndDelete(script, CScript([OP_CODESEPARATOR]))
if (hashtype & 0x1f) == SIGHASH_NONE:
txtmp.vout = []
for i in range(len(txtmp.vin)):
if i != inIdx:
txtmp.vin[i].nSequence = 0
elif (hashtype & 0x1f) == SIGHASH_SINGLE:
outIdx = inIdx
if outIdx >= len(txtmp.vout):
return (HASH_ONE, "outIdx %d out of range (%d)" % (outIdx, len(txtmp.vout)))
tmp = txtmp.vout[outIdx]
txtmp.vout = []
for i in range(outIdx):
txtmp.vout.append(CTxOut(-1))
txtmp.vout.append(tmp)
for i in range(len(txtmp.vin)):
if i != inIdx:
txtmp.vin[i].nSequence = 0
if hashtype & SIGHASH_ANYONECANPAY:
tmp = txtmp.vin[inIdx]
txtmp.vin = []
txtmp.vin.append(tmp)
s = txtmp.serialize_without_witness()
s += struct.pack(b"<I", hashtype)
hash = hash256(s)
return (hash, None)
# TODO: Allow cached hashPrevouts/hashSequence/hashOutputs to be provided.
# Performance optimization probably not necessary for python tests, however.
# Note that this corresponds to sigversion == 1 in EvalScript, which is used
# for version 0 witnesses.
def SegwitV0SignatureHash(script, txTo, inIdx, hashtype, amount):
hashPrevouts = 0
hashSequence = 0
hashOutputs = 0
if not (hashtype & SIGHASH_ANYONECANPAY):
serialize_prevouts = bytes()
for i in txTo.vin:
serialize_prevouts += i.prevout.serialize()
hashPrevouts = uint256_from_str(hash256(serialize_prevouts))
if (not (hashtype & SIGHASH_ANYONECANPAY) and (hashtype & 0x1f) != SIGHASH_SINGLE and (hashtype & 0x1f) != SIGHASH_NONE):
serialize_sequence = bytes()
for i in txTo.vin:
serialize_sequence += struct.pack("<I", i.nSequence)
hashSequence = uint256_from_str(hash256(serialize_sequence))
if ((hashtype & 0x1f) != SIGHASH_SINGLE and (hashtype & 0x1f) != SIGHASH_NONE):
serialize_outputs = bytes()
for o in txTo.vout:
serialize_outputs += o.serialize()
hashOutputs = uint256_from_str(hash256(serialize_outputs))
elif ((hashtype & 0x1f) == SIGHASH_SINGLE and inIdx < len(txTo.vout)):
serialize_outputs = txTo.vout[inIdx].serialize()
hashOutputs = uint256_from_str(hash256(serialize_outputs))
ss = bytes()
ss += struct.pack("<i", txTo.nVersion)
ss += ser_uint256(hashPrevouts)
ss += ser_uint256(hashSequence)
ss += txTo.vin[inIdx].prevout.serialize()
ss += ser_string(script)
ss += struct.pack("<q", amount)
ss += struct.pack("<I", txTo.vin[inIdx].nSequence)
ss += ser_uint256(hashOutputs)
ss += struct.pack("<i", txTo.nLockTime)
ss += struct.pack("<I", hashtype)
return hash256(ss)
|
ahmedbodi/vertcoin
|
test/functional/test_framework/script.py
|
Python
|
mit
| 21,580
|
# -*- coding: utf-8 -*-
"""
An email representation based on a database record.
"""
from html2text import HTML2Text
from django.template.loader import render_to_string
from modoboa.lib.email_utils import Email
from .sql_connector import SQLconnector
from .utils import fix_utf8_encoding, smart_text
class SQLemail(Email):
"""The SQL version of the Email class."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.qtype = ""
self.qreason = ""
qreason = self.msg["X-Amavis-Alert"]
if qreason:
if "," in qreason:
self.qtype, qreason = qreason.split(",", 1)
elif qreason.startswith("BAD HEADER SECTION "):
# Workaround for amavis <= 2.8.0 :p
self.qtype = "BAD HEADER SECTION"
qreason = qreason[19:]
qreason = " ".join([x.strip() for x in qreason.splitlines()])
self.qreason = qreason
def _fetch_message(self):
return SQLconnector().get_mail_content(self.mailid)
@property
def body(self):
if self._body is None:
super(SQLemail, self).body
self._body = fix_utf8_encoding(self._body)
# if there's no plain text version available attempt to make one by
# sanitising the html version. The output isn't always pretty but it
# is readable, better than a blank screen and helps the user decide
# if the message is spam or ham.
if self.dformat == "plain" and not self.contents["plain"] \
and self.contents["html"]:
h = HTML2Text()
h.ignore_tables = True
h.images_to_alt = True
mail_text = h.handle(self.contents["html"])
self.contents["plain"] = self._post_process_plain(
smart_text(mail_text))
self._body = self.viewmail_plain()
self._body = fix_utf8_encoding(self._body)
return self._body
def render_headers(self, **kwargs):
context = {
"qtype": self.qtype,
"qreason": self.qreason,
"headers": self.headers,
}
return render_to_string("modoboa_amavis/mailheaders.html", context)
|
modoboa/modoboa-amavis
|
modoboa_amavis/sql_email.py
|
Python
|
mit
| 2,249
|
from PyQt4 import QtGui
from dynamics_ui import Ui_dynamic
from config import Config
class DynamicsWindow(QtGui.QDialog):
def __init__(self, parent = None):
QtGui.QWidget.__init__(self,parent)
self.ui = Ui_dynamic()
self.ui.setupUi(self)
self.init_table()
self.ui.addButton_2.clicked.connect(self.addItem)
self.ui.removeButton.clicked.connect(self.removeItem)
self.ui.saveButton_3.clicked.connect(self.saveItem)
def addItem(self):
self.ui.tableWidget.insertRow(0)
def saveItem(self):
i = 0
json = {}
while i < self.ui.tableWidget.rowCount():
json[self.ui.tableWidget.item(i,0).text()] = float(self.ui.tableWidget.item(i,1).text())
i += 1
Config.dynamic_tags = json
Config.save_config()
QtGui.QMessageBox.warning(self,"Message","The settings have been saved")
self.close()
def removeItem(self):
row = self.ui.tableWidget.currentRow()
if row == -1:
return
self.ui.tableWidget.removeRow(row)
def init_table(self):
i = 0
while i < len(Config.dynamic_tags):
i = i + 1
self.ui.tableWidget.insertRow(0)
i = 0
for k,v in Config.dynamic_tags.items():
item = QtGui.QTableWidgetItem(k)
self.ui.tableWidget.setItem(i,0,item)
item1 = QtGui.QTableWidgetItem("%d"%v)
self.ui.tableWidget.setItem(i,1,item1)
i = i + 1
|
elliott-wen/LocalizationSystemGUI
|
dynamicswindow.py
|
Python
|
apache-2.0
| 1,526
|
# coding: utf-8
import base64
import flynn.decoder
import flynn.encoder
import flynn.data
__all__ = [
"decoder",
"encoder",
"dump",
"dumps",
"dumph",
"load",
"loads",
"loadh",
"Tagging",
"Undefined"
]
dump = flynn.encoder.dump
dumps = flynn.encoder.dumps
load = flynn.decoder.load
loads = flynn.decoder.loads
Tagging = flynn.data.Tagging
Undefined = flynn.data.Undefined
def dumph(*args, **kwargs):
return base64.b16encode(dumps(*args, **kwargs)).decode("utf-8")
def loadh(data, *args, **kwargs):
return loads(base64.b16decode(data), *args, **kwargs)
|
fritz0705/flynn
|
flynn/__init__.py
|
Python
|
mit
| 572
|
import logging
from ..topology import TopologyChangeError
log = logging.getLogger(__name__)
def set_pos(eptm, geom, pos):
"""Updates the vertex position of the :class:`Epithelium` object.
Assumes that pos is passed as a 1D array to be reshaped as (eptm.Nv, eptm.dim)
"""
log.debug("set pos")
if eptm.topo_changed:
# reset the switch and interupt what we were doing
eptm.topo_changed = False
raise TopologyChangeError
eptm.vert_df.loc[eptm.vert_df.is_active.astype(bool), eptm.coords] = pos.reshape(
(-1, eptm.dim)
)
geom.update_all(eptm)
|
CellModels/tyssue
|
tyssue/solvers/base.py
|
Python
|
gpl-2.0
| 607
|
#!/usr/bin/env python
"""
Script for generating distributables based on platform skeletons.
User supplies path for pyfa code base, root skeleton directory, and where the
builds go. The builds are automatically named depending on the pyfa config
values of `version` and `tag`. If it's a Stable release, the naming
convention is:
pyfa-pyfaversion-expansion-expversion-platform
If it is not Stable (tag=git), we determine if the pyfa code base includes
the git repo to use as an ID. If not, uses randomly generated 6-character ID.
The unstable naming convention:
pyfa-YYYMMDD-id-platform
dist.py can also build the Windows installer provided that it has a path to
Inno Setup (and, for generating on non-Windows platforms, that WINE is
installed). To build the EXE file, `win` must be included in the platforms to
be built.
"""
#@todo: ensure build directory can be written to
# todo: default build and dist directories
from optparse import OptionParser
import os.path
import shutil
import sys
import tarfile
import datetime
import random
import string
import zipfile
import errno
from subprocess import call
class FileStub():
def write(self, *args):
pass
def flush(self, *args):
pass
i = 0
def loginfo(path, names):
# Print out a "progress" and return directories / files to ignore
global i
i += 1
if i % 10 == 0:
sys.stdout.write(".")
sys.stdout.flush()
return ()
def copyanything(src, dst):
try:
shutil.copytree(src, dst, ignore=loginfo)
except: # python >2.5
try:
shutil.copy(src, dst)
except:
raise
def id_generator(size=6, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for x in range(size))
def zipdir(path, zip):
for root, dirs, files in os.walk(path):
for file in files:
zip.write(os.path.join(root, file))
skels = ['win', 'src', 'mac', 'mac-deprecated']
iscc = "C:\Program Files (x86)\Inno Setup 5\ISCC.exe" # inno script location via wine
if __name__ == "__main__":
oldstd = sys.stdout
parser = OptionParser()
parser.add_option("-s", "--skeleton", dest="skeleton", help="Location of Pyfa-skel directory")
parser.add_option("-b", "--base", dest="base", help="Location of cleaned read-only base directory")
parser.add_option("-d", "--destination", dest="destination", help="Where to copy our distributable")
parser.add_option("-p", "--platforms", dest="platforms", help="Comma-separated list of platforms to build", default=','.join(skels))
parser.add_option("-q", "--quiet", dest="silent", action="store_true")
parser.add_option("-w", "--winexe", dest="winexe", action="store_true", help="Build the Windows installer file (needs Inno Setup). Must include 'win' in platform options")
parser.add_option("-z", "--zip", dest="zip", action="store_true", help="zip archive instead of tar")
options, args = parser.parse_args()
if options.skeleton is None or options.base is None or options.destination is None:
print "Need --skeleton argument as well as --base and --destination argument"
parser.print_help()
sys.exit()
if options.silent:
sys.stdout = FileStub()
options.platforms = options.platforms.split(",")
for skel in skels:
if skel not in options.platforms:
continue
print "\n======== %s ========"%skel
info = {}
config = {}
setup = {}
skeleton = os.path.expanduser(os.path.join(options.skeleton, skel))
execfile(os.path.join(options.base, "config.py"), config)
execfile(os.path.join(skeleton, "info.py"), info)
execfile(os.path.join(options.base, "setup.py"), setup)
destination = os.path.expanduser(options.destination)
if not os.path.isdir(destination) or not os.access(destination, os.W_OK | os.X_OK):
print "Destination directory does not exist or is not writable: {}".format(destination)
sys.exit()
dirName = info["arcname"]
nowdt = datetime.datetime.now()
now = "%04d%02d%02d" % (nowdt.year, nowdt.month, nowdt.day)
git = False
if config['tag'].lower() == "git":
try: # if there is a git repo associated with base, use master commit
with open(os.path.join(options.base, ".git", "refs", "heads", "master"), 'r') as f:
id = f.readline()[0:6]
git = True
except: # else, use custom ID
id = id_generator()
fileName = "pyfa-{}-{}-{}".format(now, id, info["os"])
else:
fileName = "pyfa-{}-{}-{}-{}".format(
config['version'],
config['expansionName'].lower(),
config['expansionVersion'],
info["os"]
)
archiveName = "{}.{}".format(fileName, "zip" if options.zip else "tar.bz2")
tmpDir = os.path.join(os.getcwd(), dirName) # tmp directory where files are copied
tmpFile = os.path.join(os.getcwd(), archiveName)
try:
print "Copying skeleton to ", tmpDir
shutil.copytree(skeleton, tmpDir, ignore=loginfo)
print
source = os.path.expanduser(options.base)
root = os.path.join(tmpDir, info["base"])
# it is easier to work from the source directory
oldcwd = os.getcwd()
os.chdir(source)
if info["library"]:
print "Injecting files into", info["library"]
libraryFile = os.path.join(root, info["library"])
with zipfile.ZipFile(libraryFile, 'a') as library:
for dir in setup['packages']:
zipdir(dir, library)
library.write('pyfa.py', 'pyfa__main__.py')
library.write('config.py')
else: # platforms where we don't have a packaged library
print "Copying modules into", root
for dir in setup['packages']:
copyanything(dir, os.path.join(root, dir))
# add some additional files to root dir for these platforms
# (hopefully can figure out a way later for OS X to use the one in
# it's library)
if skel == 'mac':
setup['include_files'] += ['pyfa.py']
if skel in ('src', 'mac-deprecated'):
setup['include_files'] += ['pyfa.py', 'config.py']
print
print "Copying included files:",
for file in setup['include_files']:
if isinstance(file, basestring):
print file,
copyanything(file, os.path.join(root, file))
print
print "Creating images zipfile:",
os.chdir('imgs')
imagesFile = os.path.join(root, "imgs.zip")
with zipfile.ZipFile(imagesFile, 'w') as images:
for dir in setup['icon_dirs']:
print dir,
zipdir(dir, images)
os.chdir(oldcwd)
print
print "Creating archive"
if options.zip:
archive = zipfile.ZipFile(tmpFile, 'w', compression=zipfile.ZIP_DEFLATED)
zipdir(dirName, archive)
archive.close()
else:
archive = tarfile.open(tmpFile, "w:bz2")
archive.add(tmpDir, arcname=info["arcname"])
archive.close()
print "Moving archive to ", destination
shutil.move(tmpFile, destination)
if "win" in skel and options.winexe:
print "Compiling EXE"
if config['tag'].lower() == "git":
if git: # if git repo info available, use git commit
expansion = "git-%s"%(id)
else: # if there is no git repo, use timestamp
expansion = now
else: # if code is Stable, use expansion name
expansion = "%s %s"%(config['expansionName'], config['expansionVersion']),
calllist = ["wine"] if 'win' not in sys.platform else []
call(calllist + [
iscc,
"pyfa-setup.iss",
"/dMyAppVersion=%s"%(config['version']),
"/dMyAppExpansion=%s"%(expansion),
"/dMyAppDir=pyfa",
"/dMyOutputDir=%s"%destination,
"/dMyOutputFile=%s"%fileName]) #stdout=devnull, stderr=devnull
print "EXE completed"
except Exception as e:
print "Encountered an error: \n\t", e
raise
finally:
print "Deleting tmp files\n"
try:
shutil.rmtree("dist") # Inno dir
except:
pass
try:
shutil.rmtree(tmpDir)
except:
pass
try:
os.unlink(tmpFile)
except:
pass
sys.stdout = oldstd
if os.path.isdir(destination):
print os.path.join(destination, os.path.split(tmpFile)[1])
else:
print destination
|
Ebag333/Pyfa
|
scripts/dist.py
|
Python
|
gpl-3.0
| 9,587
|
# Find Eulerian Tour
#
# Write a function that takes in a graph
# represented as a list of tuples
# and return a list of nodes that
# you would follow on an Eulerian Tour
#
# For example, if the input graph was
# [(1, 2), (2, 3), (3, 1)]
# A possible Eulerian tour would be [1, 2, 3, 1]
from collections import defaultdict, namedtuple
def find_bridges(adj_matrix):
timer = 0
covered = {}
fup = {}
tin = {}
bridges = defaultdict(list)
[bridge_dfs(
node, None, timer, tin, fup, adj_matrix, covered, bridges
) for node in adj_matrix if node not in covered
]
return bridges
def bridge_dfs(node, from_node, timer, tin, fup, adj_matrix, covered, bridges):
covered[node] = True
timer += 1
# Note total time through all paths and time to reach IN node.
fup[node] = tin[node] = timer
for v2 in adj_matrix[node]:
# For all nodes not parent node from_node.
if v2 != from_node:
if v2 in covered:
fup[node] = min(tin[node], fup[v2])
else:
bridge_dfs(v2, node, timer, tin, fup, adj_matrix, covered, bridges)
fup[node] = min(fup[node], fup[v2])
# If the connection from node -> v2 < time via all
# other routes to reach v2 then its a bridge node.
if tin[node] < fup[v2]:
bridges[node].append(v2)
bridges[v2].append(node)
def get_edges_cached_decorate(func):
if not hasattr(func, 'cache'):
setattr(func, 'cache', {})
def get_non_bridges(*args, **kw):
hashed = hash((args, kw))
if hashed not in func.cache:
res = func.cache[hashed] = func(*args, **kw)
else:
res = func.cache[hashed]
return res
return get_non_bridges
# @get_edges_cached_decorate
def filter_non_bridges(node_list, bridges):
return [node for node in node_list if node not in bridges]
def pick_node(from_node, to_nodes, edges_covered):
for to_node in to_nodes:
# print 'inside pick_node. from_node=%s to_node=%s' %(from_node, to_node)
# print 'edges_covered', edges_covered
if (from_node, to_node) not in edges_covered[from_node] and (
to_node, from_node) not in edges_covered[to_node]:
return to_node
return None
def find_eulerian_tour(graph):
adj_matrix = defaultdict(list)
num_odd_edged_nodes = 0
odd_nodes = []
edges_covered = defaultdict(list)
edges = []
bridge_nodes = defaultdict(set)
# Create adjency matrix.
for edge in graph:
adj_matrix[edge[0]].append(edge[1])
adj_matrix[edge[1]].append(edge[0])
# print 'adj_matrix', adj_matrix
# Get a list of all nodes.
all_nodes = adj_matrix.keys()
# print 'all nodes are', all_nodes
# Find all nodes with bridge edges.
bridges = find_bridges(adj_matrix)
# print 'bridge nodes >>>', bridges
for node in adj_matrix:
num_odd_edged_nodes = len(adj_matrix[node])
if num_odd_edged_nodes%2 == 1:
odd_nodes.append(node)
num_odd_nodes = len(odd_nodes)
node = None
# print 'odd_nodes', odd_nodes
# Choose nodes with odd edges if available.
node_list = odd_nodes if num_odd_nodes%2 == 1 else all_nodes
# Filter Nodes that have non-bridge edges.
# print 'node_list before', node_list
node_list = filter_non_bridges(node_list, bridges) or node_list
# print 'node_list after', node_list
# Pick any random node.
from_node = node_list[0]
# print '1st node picked', from_node
while True:
to_nodes = adj_matrix[from_node]
# print 'to_nodes', to_nodes
# Cached function. pick a non bridge edge if available.
to_nodes = filter_non_bridges(to_nodes, bridges) or to_nodes
# Add the bridge nodes.
bridge_nodes[from_node].update(set(adj_matrix[from_node]) - set(to_nodes))
# Try to pick a new node each time. Prefer a non bridge node over a bridged one.
to_node = pick_node(from_node, to_nodes, edges_covered) or pick_node(
from_node, bridge_nodes[from_node], edges_covered)
# print 'from node %s to_node picked %s' %(from_node, to_node)
if to_node is not None:
edges_covered[from_node].append((from_node, to_node))
edges.append((from_node, to_node))
# Traverse the other node on the edge.
from_node = to_node
else:
break
# print 'edges_covered', edges_covered
# print 'edges_covered', edges_covered
# print edges
result = []
for edge in edges:
if result and edge[0] != result[-1]:
result.append(edge[0])
if result and edge[1] != result[-1]:
result.append(edge[1])
else:
result.extend(edge)
del bridge_nodes, edges_covered, adj_matrix, node_list
return result
assert find_eulerian_tour([(1, 2), (2, 3), (3, 1)]) == [1,2,3,1]
assert find_eulerian_tour([(1, 2), (2, 4), (3, 1), (3, 4)]) == [1, 2, 4, 3, 1]
assert find_eulerian_tour(
[
(0, 1), (1, 5), (1, 7), (4, 5), (4, 8), (1, 6), (3, 7), (5, 9),
(2, 4), (0, 4), (2, 5), (3, 6), (8, 9)
]) == [2, 4, 8, 9, 5, 4, 0, 1, 7, 3, 6, 1, 5, 2]
assert find_eulerian_tour([(8, 16), (8, 18), (16, 17), (18, 19),
(3, 17), (13, 17), (5, 13),(3, 4), (0, 18), (3, 14), (11, 14),
(1, 8), (1, 9), (4, 12), (2, 19),(1, 10), (7, 9), (13, 15),
(6, 12), (0, 1), (2, 11), (3, 18), (5, 6), (7, 15), (8, 13), (10, 17)]
)
|
codecakes/algorithms_monk
|
graphs/find_eulerian_path.py
|
Python
|
mit
| 5,531
|
import sys, py
import pycmd
pytest_plugins = "pytest_pytester"
def pytest_generate_tests(metafunc):
multi = getattr(metafunc.function, 'multi', None)
if multi is not None:
assert len(multi.kwargs) == 1
for name, l in multi.kwargs.items():
for val in l:
metafunc.addcall(funcargs={name: val})
@py.test.mark.multi(name=[x for x in dir(pycmd) if x[0] != "_"])
def test_cmdmain(name, pytestconfig):
main = getattr(pycmd, name)
assert py.builtin.callable(main)
assert name[:2] == "py"
if not pytestconfig.getvalue("notoolsonpath"):
scriptname = "py." + name[2:]
assert py.path.local.sysfind(scriptname), scriptname
class TestPyLookup:
def test_basic(self, testdir):
p = testdir.makepyfile(hello="def x(): pass")
result = testdir.run("py.lookup", "pass")
result.stdout.fnmatch_lines(
['%s:*def x(): pass' %(p.basename)]
)
def test_basic_ignore_dirs(self, testdir):
testdir.tmpdir.join("x.py", "hello.py").ensure().write("hello")
result = testdir.run("py.lookup", "hello")
assert result.ret == 0
result.stdout.fnmatch_lines(
'*hello.py:*hello*'
)
def test_search_in_filename(self, testdir):
p = testdir.makepyfile(hello="def x(): pass")
result = testdir.run("py.lookup", "hello")
result.stdout.fnmatch_lines(
['*%s:*' %(p.basename)]
)
def test_glob(self, testdir):
p = testdir.maketxtfile(hello="world")
result = testdir.run("py.lookup", "-g*.txt", "world")
result.stdout.fnmatch_lines(
['*%s:*' %(p.basename)]
)
def test_with_explicit_path(self, testdir):
sub1 = testdir.mkdir("things")
sub2 = testdir.mkdir("foo")
sub1.join("pyfile.py").write("def stuff(): pass")
searched = sub2.join("other.py")
searched.write("stuff = x")
result = testdir.run("py.lookup", sub2.basename, "stuff")
result.stdout.fnmatch_lines(
["%s:1: stuff = x" % (searched.basename,)]
)
class TestPyCleanup:
def test_basic(self, testdir, tmpdir):
p = tmpdir.ensure("hello.py")
result = testdir.run("py.cleanup", tmpdir)
assert result.ret == 0
assert p.check()
pyc = p.new(ext='pyc')
pyc.ensure()
pyclass = p.new(basename=p.basename + '$py.class')
result = testdir.run("py.cleanup", tmpdir)
assert not pyc.check()
assert not pyclass.check()
def test_dir_remove__pycache__(self, testdir, tmpdir):
subdir = tmpdir.mkdir("subdir")
p = subdir.ensure("file")
w = subdir.ensure("__pycache__", "whatever")
result = testdir.run("py.cleanup", tmpdir)
assert result.ret == 0
assert subdir.check()
assert w.check()
result = testdir.run("py.cleanup", "-p", tmpdir)
assert not w.check()
assert not w.dirpath().check()
assert subdir.check()
w.ensure()
result = testdir.run("py.cleanup", "-a", tmpdir)
assert not w.dirpath().check()
def test_dir_remove_simple(self, testdir, tmpdir):
subdir = tmpdir.mkdir("subdir")
p = subdir.ensure("file")
result = testdir.run("py.cleanup", "-d", tmpdir)
assert result.ret == 0
assert subdir.check()
p.remove()
p = tmpdir.mkdir("hello")
result = testdir.run("py.cleanup", tmpdir, '-d')
assert result.ret == 0
assert not subdir.check()
@py.test.mark.multi(opt=["-s"])
def test_remove_setup_simple(self, testdir, tmpdir, opt):
subdir = tmpdir.mkdir("subdir")
p = subdir.ensure("setup.py")
subdir.mkdir("build").ensure("hello", "world.py")
egg1 = subdir.mkdir("something.egg-info")
egg1.mkdir("whatever")
okbuild = subdir.mkdir("preserved1").mkdir("build")
egg2 = subdir.mkdir("preserved2").mkdir("other.egg-info")
subdir.mkdir("dist")
result = testdir.run("py.cleanup", opt, subdir)
assert result.ret == 0
assert okbuild.check()
assert egg1.check()
assert egg2.check()
assert subdir.join("preserved1").check()
assert subdir.join("preserved2").check()
assert not subdir.join("build").check()
assert not subdir.join("dist").check()
def test_remove_all(self, testdir, tmpdir):
tmpdir.ensure("setup.py")
tmpdir.ensure("build", "xyz.py")
tmpdir.ensure("dist", "abc.py")
piplog = tmpdir.ensure("preserved2", "pip-log.txt")
tmpdir.ensure("hello.egg-info")
setup = tmpdir.ensure("setup.py")
tmpdir.ensure("src/a/b")
x = tmpdir.ensure("src/x.py")
x2 = tmpdir.ensure("src/x.pyc")
x3 = tmpdir.ensure("src/x$py.class")
result = testdir.run("py.cleanup", "-a", tmpdir)
assert result.ret == 0
assert len(tmpdir.listdir()) == 3
assert setup.check()
assert x.check()
assert not x2.check()
assert not x3.check()
assert not piplog.check()
|
blindroot/pycmd
|
test_pycmd.py
|
Python
|
mit
| 5,157
|
#!/usr/bin/env python
# encoding: utf-8
"""
redis_utils.py
"""
import inspect
import os
import redis
import redis.sentinel
import redis_lock
import time
import traceback
from redis.exceptions import ConnectionError, RedisError
class StoneRedis(redis.client.Redis):
def __init__(self, *args, **kwargs):
''' Original method. Called through args kwargs to keep compatibility with future versions
of redis-py. If we need to pass non exisiting arguments they would have to be treated here:
self.myparam = kwargs.pop(myparam)
If new arguments are added to this class they must also be added to pipeline method and be treated in StonePipeline class.
'''
# Save them with re connection purposes
self.args = args
self.kwargs = kwargs
# conn_retries is the number of times that reconnect will try to connect
if 'conn_retries' in kwargs:
self.conn_retries = kwargs.pop('conn_retries')
else:
self.conn_retries = 1
# max_sleep is the amount of time between reconnection attmpts by safe_reconnect
if 'max_sleep' in kwargs:
self.max_sleep = kwargs.pop('max_sleep')
else:
self.max_sleep = 30
if 'logger' in kwargs:
self.logger = kwargs.pop('logger')
else:
self.logger = None
super(redis.client.Redis, self).__init__(*args, **kwargs)
def ping(self):
try:
super(StoneRedis, self).ping()
except:
return False
return True
def connect(self, conn_retries=None):
''' Connects to Redis with a exponential waiting (3**n) '''
return self.reconnect(conn_retries=conn_retries)
def reconnect(self, conn_retries=None):
''' Connects to Redis with a exponential waiting (3**n) '''
if conn_retries is None:
conn_retries = self.conn_retries
count = 0
if self.logger:
self.logger.info('Connecting to Redis..')
while count < conn_retries:
super(redis.client.Redis, self).__init__(*self.args, **self.kwargs)
if self.ping():
if self.logger:
self.logger.info('Connected to Redis!')
return True
else:
sl = min(3 ** count, self.max_sleep)
if self.logger:
self.logger.info('Connecting failed, retrying in {0} seconds'.format(sl))
time.sleep(sl)
count += 1
raise ConnectionError
def safe_reconnect(self):
''' Connects to Redis with a exponential waiting (3**n), wont return until successfully connected'''
count = 0
if self.logger:
self.logger.info('Connecting to Redis..')
while True:
super(redis.client.Redis, self).__init__(*self.args, **self.kwargs)
if self.ping():
if self.logger:
self.logger.info('Connected to Redis!')
return True
else:
sl = min(3 ** count, self.max_sleep)
if self.logger:
self.logger.info('Connecting failed, retrying in {0} seconds'.format(sl))
time.sleep(sl)
count += 1
def _multi_lpop_pipeline(self, pipe, queue, number):
''' Pops multiple elements from a list in a given pipeline'''
pipe.lrange(queue, 0, number - 1)
pipe.ltrim(queue, number, -1)
def multi_lpop(self, queue, number, transaction=False):
''' Pops multiple elements from a list
This operation will be atomic if transaction=True is passed
'''
try:
pipe = self.pipeline(transaction=transaction)
pipe.multi()
self._multi_lpop_pipeline(pipe, queue, number)
return pipe.execute()[0]
except IndexError:
return []
except:
raise
def _multi_rpush_pipeline(self, pipe, queue, values, bulk_size=0):
''' Pushes multiple elements to a list in a given pipeline
If bulk_size is set it will execute the pipeline every bulk_size elements
'''
cont = 0
for value in values:
pipe.rpush(queue, value)
if bulk_size != 0 and cont % bulk_size == 0:
pipe.execute()
def multi_rpush(self, queue, values, bulk_size=0, transaction=False):
''' Pushes multiple elements to a list
If bulk_size is set it will execute the pipeline every bulk_size elements
This operation will be atomic if transaction=True is passed
'''
# Check that what we receive is iterable
if hasattr(values, '__iter__'):
pipe = self.pipeline(transaction=transaction)
pipe.multi()
self._multi_rpush_pipeline(pipe, queue, values, bulk_size)
pipe.execute()
else:
raise ValueError('Expected an iterable')
def multi_rpush_limit(self, queue, values, limit=100000):
''' Pushes multiple elements to a list in an atomic way until it reaches certain size
Once limit is reached, the function will lpop the oldest elements
This operation runs in LUA, so is always atomic
'''
lua = '''
local queue = KEYS[1]
local max_size = tonumber(KEYS[2])
local table_len = tonumber(table.getn(ARGV))
local redis_queue_len = tonumber(redis.call('LLEN', queue))
local total_size = redis_queue_len + table_len
local from = 0
if total_size >= max_size then
-- Delete the same amount of data we are inserting. Even better, limit the queue to the specified size
redis.call('PUBLISH', 'DEBUG', 'trim')
if redis_queue_len - max_size + table_len > 0 then
from = redis_queue_len - max_size + table_len
else
from = 0
end
redis.call('LTRIM', queue, from, redis_queue_len)
end
for _,key in ipairs(ARGV) do
redis.call('RPUSH', queue, key)
end
return 1
'''
# Check that what we receive is iterable
if hasattr(values, '__iter__'):
if len(values) > limit:
raise ValueError('The iterable size is bigger than the allowed limit ({1}): {0}'.format(len(values), limit))
try:
self.multi_rpush_limit_script([queue, limit], values)
except AttributeError:
if self.logger:
self.logger.info('Script not registered... registering')
# If the script is not registered, register it
self.multi_rpush_limit_script = self.register_script(lua)
self.multi_rpush_limit_script([queue, limit], values)
else:
raise ValueError('Expected an iterable')
def rpush_limit(self, queue, value, limit=100000):
''' Pushes an element to a list in an atomic way until it reaches certain size
Once limit is reached, the function will lpop the oldest elements
This operation runs in LUA, so is always atomic
'''
lua = '''
local queue = KEYS[1]
local max_size = tonumber(KEYS[2])
local table_len = 1
local redis_queue_len = tonumber(redis.call('LLEN', queue))
local total_size = redis_queue_len + table_len
local from = 0
if total_size >= max_size then
-- Delete the same amount of data we are inserting. Even better, limit the queue to the specified size
redis.call('PUBLISH', 'DEBUG', 'trim')
if redis_queue_len - max_size + table_len > 0 then
from = redis_queue_len - max_size + table_len
else
from = 0
end
redis.call('LTRIM', queue, from, redis_queue_len)
end
redis.call('RPUSH', queue, ARGV[1])
return 1
'''
try:
self.rpush_limit_script([queue, limit], [value])
except AttributeError:
if self.logger:
self.logger.info('Script not registered... registering')
# If the script is not registered, register it
self.rpush_limit_script = self.register_script(lua)
self.rpush_limit_script([queue, limit], [value])
def get_lock(self, lockname, locktime=60, auto_renewal=False):
''' Gets a lock and returns if it can be stablished. Returns false otherwise '''
pid = os.getpid()
caller = inspect.stack()[0][3]
try:
# rl = redlock.Redlock([{"host": settings.REDIS_SERVERS['std_redis']['host'], "port": settings.REDIS_SERVERS['std_redis']['port'], "db": settings.REDIS_SERVERS['std_redis']['db']}, ])
rl = redis_lock.Lock(self, lockname, expire=locktime, auto_renewal=auto_renewal)
except:
if self.logger:
self.logger.error('Process {0} ({1}) could not get lock {2}. Going ahead without locking!!! {3}'.format(pid, caller, lockname, traceback.format_exc()))
return False
try:
lock = rl.acquire(blocking=False)
except RedisError:
return False
if not lock:
return False
else:
return rl
def wait_for_lock(self, lockname, locktime=60, auto_renewal=False):
''' Gets a lock or waits until it is able to get it '''
pid = os.getpid()
caller = inspect.stack()[0][3]
try:
# rl = redlock.Redlock([{"host": settings.REDIS_SERVERS['std_redis']['host'], "port": settings.REDIS_SERVERS['std_redis']['port'], "db": settings.REDIS_SERVERS['std_redis']['db']}, ])
rl = redis_lock.Lock(self, lockname, expire=locktime, auto_renewal=auto_renewal)
except AssertionError:
if self.logger:
self.logger.error('Process {0} ({1}) could not get lock {2}. Going ahead without locking!!! {3}'.format(pid, caller, lockname, traceback.format_exc()))
return False
cont = 1
t0 = time.time()
lock = None
while not lock:
time.sleep(.05)
cont += 1
if cont % 20 == 0:
if self.logger:
self.logger.debug('Process {0} ({1}) waiting for lock {2}. {3} seconds elapsed.'.format(pid, caller, lockname, time.time() - t0))
# lock = rl.lock(lockname, locktime_ms)
try:
lock = rl.acquire()
except RedisError:
pass
if self.logger:
self.logger.debug('Process {0} ({1}) got lock {2} for {3} seconds'.format(pid, caller, lockname, locktime))
return rl
def release_lock(self, lock, force=False):
''' Frees a lock '''
pid = os.getpid()
caller = inspect.stack()[0][3]
# try:
# rl = redlock.Redlock([{"host": settings.REDIS_SERVERS['std_redis']['host'], "port": settings.REDIS_SERVERS['std_redis']['port'], "db": settings.REDIS_SERVERS['std_redis']['db']}, ])
# except:
# logger.error('Process {0} ({1}) could not release lock {2}'.format(pid, caller, lock.resource))
# return False
if lock and lock._held:
lock.release()
if self.logger:
self.logger.debug('Process {0} ({1}) released lock'.format(pid, caller))
def pipeline(self, transaction=True, shard_hint=None):
''' Return a pipeline that support StoneRedis custom methods '''
args_dict = {
'connection_pool': self.connection_pool,
'response_callbacks': self.response_callbacks,
'transaction': transaction,
'shard_hint': shard_hint,
'logger': self.logger,
}
return StonePipeline(**args_dict)
class StonePipeline(redis.client.BasePipeline, StoneRedis):
''' Pipeline for the StoneRedis class.
If we need to pass non exisiting arguments they would have to be removed:
kwargs.pop(myparam)
'''
def __init__(self, *args, **kwargs):
if 'logger' in kwargs:
self.logger = kwargs.pop('logger')
else:
self.logger = None
super(StonePipeline, self).__init__(*args, **kwargs)
def multi_lpop(self, queue, number, transaction=False):
''' Pops multiple elements from a list '''
try:
self._multi_lpop_pipeline(self, queue, number)
except:
raise
def multi_rpush(self, queue, values, bulk_size=0, transaction=False):
''' Pushes multiple elements to a list '''
# Check that what we receive is iterable
if hasattr(values, '__iter__'):
self._multi_rpush_pipeline(self, queue, values, 0)
else:
raise ValueError('Expected an iterable')
|
stoneworksolutions/stoneredis
|
stoneredis/client.py
|
Python
|
mit
| 13,287
|
import bucket
import unittest
class BucketTestCase(unittest.TestCase):
def setUp(self):
self.app = bucket.app.test_client()
def test_set(self):
response = self.app.post('/set/foobar', data={'foobar': 'wangskata'})
assert response.status_code == 200
assert response.data == 'OK'
def test_get(self):
response = self.app.get('/get/foobar')
assert response.status_code == 200
assert response.data == 'wangskata'
if __name__ == '__main__':
unittest.main()
|
marconi/blog-post-bucket
|
bucket_test.py
|
Python
|
mit
| 529
|
import sys
sys.path.append("../naive_bayes/")
from kl_distance import KLDistanceEvaluator
INPUT = {
"word_threshold": 10,
"tag_threshold": 5,
"base_path": "../../data/stat/",
}
CLASSIFIER = {
"retrain_model": False,
"beta" : 0.5,
"train_count": 300000,
"sample_count": 50
}
|
StackResys/Stack-Resys
|
src/evaluation/config.py
|
Python
|
bsd-3-clause
| 304
|
import espressomd
import espressomd.checkpointing
import espressomd.electrostatics
import espressomd.virtual_sites
import espressomd.accumulators
import espressomd.observables
checkpoint = espressomd.checkpointing.Checkpointing(checkpoint_id="mycheckpoint", checkpoint_path="@CMAKE_CURRENT_BINARY_DIR@")
system = espressomd.System(box_l=[10.0, 10.0, 10.0])
system.cell_system.skin = 0.4
system.time_step = 0.01
system.min_global_cut = 2.0
system.part.add(pos=[1.0]*3)
system.part.add(pos=[1.0, 1.0, 2.0])
if espressomd.has_features('ELECTROSTATICS'):
system.part[0].q = 1
system.part[1].q = -1
p3m = espressomd.electrostatics.P3M(prefactor=1.0, accuracy=0.1, mesh=10, cao=1, alpha=1.0, r_cut=1.0, tune=False)
system.actors.add(p3m)
obs = espressomd.observables.ParticlePositions(ids=[0,1])
acc = espressomd.accumulators.MeanVarianceCalculator(obs=obs)
acc.update()
system.part[0].pos = [1.0, 2.0, 3.0]
acc.update()
system.thermostat.set_langevin(kT=1.0, gamma=2.0)
if espressomd.has_features(['VIRTUAL_SITES', 'VIRTUAL_SITES_RELATIVE']):
system.virtual_sites = espressomd.virtual_sites.VirtualSitesRelative(have_velocity = True,
have_quaternion = True)
system.part[1].vs_auto_relate_to(0)
if espressomd.has_features(['LENNARD_JONES']):
system.non_bonded_inter[0, 0].lennard_jones.set_params(epsilon=1.2, sigma=1.3, cutoff=2.0, shift=0.1)
checkpoint.register("system")
checkpoint.register("acc")
checkpoint.save(0)
|
KonradBreitsprecher/espresso
|
testsuite/save_checkpoint.py
|
Python
|
gpl-3.0
| 1,491
|
#!/usr/bin/env python
import os
import re
import sys
import glob
import argparse
from copy import copy
from decimal import Decimal,InvalidOperation
number_pattern = re.compile("(-?\d+\.?\d*(e[\+|\-]?\d+)?)", re.IGNORECASE)
# Search an input value for a number
def findNumber(value):
try:
return Decimal(value)
except InvalidOperation:
try:
return Decimal(number_pattern.search(value.replace(',', '')).group())
except AttributeError:
raise Exception('Value "{0}" does not contain a number'.format(value))
def concatFiles(files, opts='r'):
for f in files:
for line in openFile(f, opts):
yield line
def fileRange(startFile, endFile):
startDir, startFile = os.path.split(startFile)
_, endFile = os.path.split(endFile)
if startDir == '':
files = glob.iglob('*');
else:
files = glob.iglob(os.path.expanduser(startDir) + '/*');
ret = []
for fn in files:
if startFile <= os.path.basename(fn) <= endFile:
ret.append(fn)
return sorted(ret)
def openFile(filename, opts):
if type(filename) is str:
if filename == '-':
return sys.stdin if opts == 'r' else sys.stdout
else:
return gzip.open(os.path.expanduser(filename), opts+'b') if filename.endswith('.gz') else open(os.path.expanduser(filename), opts)
elif type(filename) is file:
return filename
else:
raise IOError('Unknown input type: %s' % type(filename))
class Header:
def __init__(self, columns = []):
self.columns = columns
def __len__(self):
return len(self.columns)
def __iter__(self):
return self.columns.__iter__()
def setCol(self, colName, index):
while len(self.columns) <= index:
self.columns.append(str(len(self.columns)))
self.columns[index] = colName
def addCol(self, colName):
col = colName
i = 1
while col in self.columns:
col = colName+str(i)
i += 1
self.columns.append(col)
return len(self.columns) - 1
def addCols(self, colNames):
return [self.addCol(colName) for colName in colNames]
def extend(self, header):
self.addCols(header.columns)
def index(self, colName):
if colName is None:
return colName
elif colName in self.columns:
return self.columns.index(colName)
else:
try:
return int(colName)
except ValueError as e:
raise ValueError('Invalid column %s specified' % colName)
def indexes(self, colNames):
return [self.index(colName) for colName in colNames]
def name(self, index):
try:
return self.columns[int(index)]
except ValueError:
return str(index)
except IndexError:
return 'col_'+str(index)
def names(self, indexes):
return [self.name(index) for index in indexes]
def copy(self):
return Header(copy(self.columns))
class FileWriter:
def __init__(self, outputStream, reader, args, opts = 'w'):
self._outputStream = openFile(outputStream, opts)
self._delimiter = args.delimiter if args.delimiter else os.environ.get('TOOLBOX_DELIMITER', ' ')
self.write = self._firstwrite
self._header = Header()
if reader and reader.hasHeader:
if hasattr(args, 'append') and args.append:
self._header = reader.header.copy()
else:
if hasattr(args, 'group'):
self._header.addCols(reader.header.names(args.group))
if hasattr(args, 'labels'):
self._header.addCols(args.labels)
@property
def header(self):
return self._header
@property
def hasHeader(self):
return len(self._header.columns) > 0
def _firstwrite(self, chunks):
self.write = self._write
if self.hasHeader:
self.write(self._header.columns)
if len(self._header) != len(chunks):
sys.stderr.write('Warning: number of rows in output does not match number of rows in header\n')
self.write(chunks)
def _write(self, chunks):
self._outputStream.write(self._delimiter.join(map(str, chunks))+'\n')
class FileReader:
def __init__(self, inputStream, args):
self._inputStream = openFile(inputStream, 'r')
self._delimiter = args.delimiter if args.delimiter else os.environ.get('TOOLBOX_DELIMITER', None)
header = args.header or os.environ.get('TOOLBOX_HEADER', '').lower() == 'true'
if header:
self._header = self._readHeader()
self.next = self._firstnext
else:
self._header = Header()
self.next = self._next
@property
def delimiter(self):
return self._delimiter
@property
def header(self):
return self._header
@property
def hasHeader(self):
return len(self._header.columns) > 0
def _readHeader(self):
preamble = next(self._inputStream)
return Header(preamble.strip().split(self._delimiter))
def __iter__(self):
return self
def __next__(self):
return self.next()
def _firstnext(self):
self.next = self._next
row = self.next()
if len(row) != len(self._header):
sys.stderr.write('Warning: number of rows in input does not match number of rows in header\n')
return row
def _next(self):
return next(self._inputStream).strip().split(self._delimiter)
def readline(self):
try:
return self.next()
except StopIteration:
return None
def close(self):
self._inputStream.close()
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
class ParameterParser:
def __init__(self, descrip, infiles = 1, outfile = True, group = True, columns = 1, append = True, labels = None, ordered = True):
self.parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter, description=descrip)
if infiles == 0:
pass
elif infiles == 1:
self.parser.add_argument('infile', nargs='?', default='-', help='use - for stdin')
else:
self.parser.add_argument('infiles', nargs='*', default=['-'], help='use - for stdin')
if outfile:
self.parser.add_argument('outfile', nargs='?', default='-', help='use - for stdout')
if group:
self.parser.add_argument('-g', '--group', nargs='+', default=[], help='column(s) to group input by')
if columns == 1:
self.parser.add_argument('-c', '--column', default=0, help='column to manipulate')
elif columns != 0:
self.parser.add_argument('-c', '--columns', nargs='+', default=[0], help='column(s) to manipulate')
if labels:
self.parser.add_argument('-l', '--labels', nargs='+', default=labels, help='labels for the column(s)')
if append:
self.parser.add_argument('--append', action='store_true', default=False, help='keep original columns in output')
if ordered:
self.parser.add_argument('--ordered', action='store_true', default=False, help='input is sorted by group')
self.parser.add_argument('--delimiter', default=None)
self.parser.add_argument('--header', action='store_true', default=False)
def parseArgs(self):
args = self.parser.parse_args()
if hasattr(args, 'infile'):
args.infile = FileReader(args.infile, args)
elif hasattr(args, 'infiles'):
args.infiles = [FileReader(infile, args) for infile in args.infiles]
args.infile = args.infiles[0]
if hasattr(args, 'group'):
args.group_names = args.infile.header.names(args.group)
args.group = args.infile.header.indexes(args.group)
if hasattr(args, 'columns'):
args.columns_names = args.infile.header.names(args.columns)
args.columns = args.infile.header.indexes(args.columns)
if hasattr(args, 'column'):
args.column_name = args.infile.header.name(args.column)
args.column = args.infile.header.index(args.column)
return args
def getArgs(self, args):
if hasattr(args, 'outfile'):
if hasattr(args, 'infile'):
args.outfile = FileWriter(args.outfile, args.infile, args)
else:
args.outfile = FileWriter(args.outfile, None, args)
return args
|
scoky/pytools
|
data_tools/files.py
|
Python
|
mit
| 8,781
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# -*- encoding: utf-8 -*-
# ######################################################################
#
# odoo-italia-bot: #odoo-it IRC BOT
#
# Copyright 2014 Francesco OpenCode Apruzzese <cescoap@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
# ######################################################################
# -------
# IMPORTS
# -------
from os import path
import re
from random import randint
from datetime import datetime
import lib.botlib as botlib
from sentence.insult import INSULTS
from sentence.answer import ANSWERS
# ---------
# CONSTANTS
# ---------
BOT_NAME = 'Ticelli'
PROJECT_PATH = path.dirname(path.realpath(__file__))
# ----- Create a new class for our bot extending the Bot class from botlib
class OdooItaliaBotIRC(botlib.Bot):
def __init__(self, server, channel, nick, password=None):
botlib.Bot.__init__(self, server, 6667, channel, nick)
def __actions__(self):
botlib.Bot.__actions__(self)
# ----- Update log file
with open('%s/logs/%s' % (PROJECT_PATH,
datetime.today().strftime('%Y_%m_%d')),
'ab') as log_file:
log_file.write(self.data)
# ----- Get the senders username
username = self.get_username()
# ----- With this if we eclude standard channel messages
if self.nick.lower() != username.lower() and not '\n' in username and \
not ' ' in username:
message_yet = False
chat_message = self.data.lower()
# ----- Check if there is some valid answer
for regex_answer in ANSWERS:
if message_yet:
break
formatted_regex_answer = regex_answer.format(bot=BOT_NAME)
formatted_regex_answer = formatted_regex_answer.lower()
#print '[MSG]', chat_message
#print '[RGX]', formatted_regex_answer
# ----- If valid answer exists, use it
if re.search(formatted_regex_answer, chat_message):
self.protocol.privmsg(
self.channel,
ANSWERS[regex_answer].format(
username=username,
bot=BOT_NAME,
insult=self.get_random_insult(chat_message),))
message_yet = True
def get_random_insult(self, chat_message):
insulted = chat_message.split()[-1]
random_index = randint(0, len(INSULTS)-1)
return INSULTS[random_index].format(insulted=insulted)
if __name__ == "__main__":
# ----- New instance of our bot and run it
OdooItaliaBotIRC("irc.freenode.net", "#odoo-it", BOT_NAME).run()
|
OpenCode/odoo-italia-bot-irc
|
bot.py
|
Python
|
gpl-3.0
| 3,461
|
from NodeDefender.mqtt.message.respond.icpe import sys, zwave
def event(topic, payload):
if topic['node'] == '0':
zwave.event(topic, payload)
elif topic['node'] == 'sys':
sys.event(topic, payload)
|
CTSNE/NodeDefender
|
NodeDefender/mqtt/message/respond/icpe/__init__.py
|
Python
|
mit
| 222
|
__author__ = "Zhenzhou Wu"
__copyright__ = "Copyright 2012, Zhenzhou Wu"
__credits__ = ["Zhenzhou Wu"]
__license__ = "3-clause BSD"
__email__ = "hyciswu@gmail.com"
__maintainer__ = "Zhenzhou Wu"
"""
Functionality for preprocessing Datasets. With Preprocessor, GCN, Standardize adapted from pylearn2
"""
import sys
import copy
import logging
import time
import warnings
import numpy as np
try:
from scipy import linalg
except ImportError:
warnings.warn("Could not import scipy.linalg")
from theano import function
import theano.tensor as T
log = logging.getLogger(__name__)
class Preprocessor(object):
"""
Adapted from pylearn2
Abstract class.
An object that can preprocess a dataset.
Preprocessing a dataset implies changing the data that
a dataset actually stores. This can be useful to save
memory--if you know you are always going to access only
the same processed version of the dataset, it is better
to process it once and discard the original.
Preprocessors are capable of modifying many aspects of
a dataset. For example, they can change the way that it
converts between different formats of data. They can
change the number of examples that a dataset stores.
In other words, preprocessors can do a lot more than
just example-wise transformations of the examples stored
in the dataset.
"""
def apply(self, X):
"""
dataset: The dataset to act on.
can_fit: If True, the Preprocessor can adapt internal parameters
based on the contents of dataset. Otherwise it must not
fit any parameters, or must re-use old ones.
Typical usage:
# Learn PCA preprocessing and apply it to the training set
my_pca_preprocessor.apply(training_set, can_fit = True)
# Now apply the same transformation to the test set
my_pca_preprocessor.apply(test_set, can_fit = False)
Note: this method must take a dataset, rather than a numpy ndarray,
for a variety of reasons:
1) Preprocessors should work on any dataset, and not all
datasets will store their data as ndarrays.
2) Preprocessors often need to change a dataset's metadata.
For example, suppose you have a DenseDesignMatrix dataset
of images. If you implement a fovea Preprocessor that
reduces the dimensionality of images by sampling them finely
near the center and coarsely with blurring at the edges,
then your preprocessor will need to change the way that the
dataset converts example vectors to images for visualization.
"""
raise NotImplementedError(str(type(self))+" does not implement an apply method.")
def invert(self, X):
"""
Do any necessary prep work to be able to support the "inverse" method
later. Default implementation is no-op.
"""
raise NotImplementedError(str(type(self))+" does not implement an invert method.")
class ExamplewisePreprocessor(Preprocessor):
"""
Abstract class.
A Preprocessor that restricts the actions it can do in its
apply method so that it could be implemented as a Block's
perform method.
In other words, this Preprocessor can't modify the Dataset's
metadata, etc.
TODO: can these things fit themselves in their apply method?
That seems like a difference from Block.
"""
def as_block(self):
raise NotImplementedError(str(type(self))+" does not implement as_block.")
class Standardize(ExamplewisePreprocessor):
"""
Adapted from pylearn2
Subtracts the mean and divides by the standard deviation.
"""
def __init__(self, global_mean=False, global_std=False, std_eps=1e-4, can_fit=True):
"""
Initialize a Standardize preprocessor.
Parameters
----------
global_mean : bool
If `True`, subtract the (scalar) mean over every element
in the design matrix. If `False`, subtract the mean from
each column (feature) separately. Default is `False`.
global_std : bool
If `True`, after centering, divide by the (scalar) standard
deviation of every element in the design matrix. If `False`,
divide by the column-wise (per-feature) standard deviation.
Default is `False`.
std_eps : float
Stabilization factor added to the standard deviations before
dividing, to prevent standard deviations very close to zero
from causing the feature values to blow up too much.
Default is `1e-4`.
"""
self._global_mean = global_mean
self._global_std = global_std
self._std_eps = std_eps
self._mean = None
self._std = None
self.can_fit = can_fit
def apply(self, X):
if self.can_fit:
self._mean = X.mean() if self._global_mean else X.mean(axis=0)
self._std = X.std() if self._global_std else X.std(axis=0)
else:
if self._mean is None or self._std is None:
raise ValueError("can_fit is False, but Standardize object "
"has no stored mean or standard deviation")
X = (X - self._mean) / (self._std_eps + self._std)
return X
def invert(self, X):
return X * (self._std_eps + self._std) + self._mean
class GCN(Preprocessor):
"""
Adapted from pylearn2
Global contrast normalizes by (optionally) subtracting the mean
across features and then normalizes by either the vector norm
or the standard deviation (across features, for each example).
Parameters
----------
X : ndarray, 2-dimensional
Design matrix with examples indexed on the first axis and
features indexed on the second.
scale : float, optional
Multiply features by this const.
subtract_mean : bool, optional
Remove the mean across features/pixels before normalizing.
Defaults to `False`.
use_std : bool, optional
Normalize by the per-example standard deviation across features
instead of the vector norm.
sqrt_bias : float, optional
Fudge factor added inside the square root. Defaults to 0.
min_divisor : float, optional
If the divisor for an example is less than this value,
do not apply it. Defaults to `1e-8`.
"""
def __init__(self, scale=1., subtract_mean=False, use_std=False,
sqrt_bias=0., min_divisor=1e-8):
self.scale = scale
self.subtract_mean = subtract_mean
self.use_std = use_std
self.sqrt_bias = sqrt_bias
self.min_divisor = min_divisor
def apply(self, X):
"""
Returns
-------
Xp : ndarray, 2-dimensional
The contrast-normalized features.
Notes
-----
`sqrt_bias` = 10 and `use_std = True` (and defaults for all other
parameters) corresponds to the preprocessing used in [1].
.. [1] A. Coates, H. Lee and A. Ng. "An Analysis of Single-Layer
Networks in Unsupervised Feature Learning". AISTATS 14, 2011.
http://www.stanford.edu/~acoates/papers/coatesleeng_aistats_2011.pdf
"""
assert X.ndim == 2, "X.ndim must be 2"
scale = float(self.scale)
# Note: this is per-example mean across pixels, not the
# per-pixel mean across examples. So it is perfectly fine
# to subtract this without worrying about whether the current
# object is the train, valid, or test set.
if self.subtract_mean:
self.mean = X.mean(axis=1)[:, np.newaxis]
X = X - self.mean # Makes a copy.
else:
X = X.copy()
if self.use_std:
# ddof=1 simulates MATLAB's var() behaviour, which is what Adam
# Coates' code does.
self.normalizers = np.sqrt(self.sqrt_bias + X.var(axis=1, ddof=1)) / scale
else:
self.normalizers = np.sqrt(self.sqrt_bias + (X ** 2).sum(axis=1)) / scale
# Don't normalize by anything too small.
self.normalizers[self.normalizers < self.min_divisor] = 1.
X /= self.normalizers[:, np.newaxis] # Does not make a copy.
return X
def invert(self, X):
try:
if self.subtract_mean:
X = X + self.mean
rval = X * self.normalizers[:, np.newaxis]
return rval
except AttributeError:
print 'apply() needs to be used before invert()'
except:
print "Unexpected error:", sys.exc_info()[0]
class LogGCN(GCN):
def __init__(self, positive_values=True, **kwarg):
'''
postive_values: bool
indicates whether the output of the processor should be scaled to be positive
'''
self.positive_values = positive_values
super(LogGCN, self).__init__(**kwarg)
def apply(self, X):
if self.positive_values:
rval = X + 1
rval = np.log(rval)
return super(LogGCN, self).apply(rval)
def invert(self, X):
X = super(LogGCN, self).invert(X)
if self.positive_values:
return np.exp(X) - 1
else:
return np.exp(X)
class Log(Preprocessor):
def __init__(self, positive_values=False, **kwarg):
'''
postive_values: bool
indicates whether the output of the processor should be scaled to be positive
'''
self.positive_values = positive_values
def apply(self, X):
if self.positive_values:
X = X + 1
return np.log(X)
def invert(self, X):
if self.positive_values:
return np.exp(X) - 1
else:
return np.exp(X)
class Scale(Preprocessor):
"""
Scale the input into a range
Parameters
----------
X : ndarray, 2-dimensional
numpy matrix with examples indexed on the first axis and
features indexed on the second.
global_max : real
the maximum value of the whole dataset. If not provided, global_max is set to X.max()
global_min : real
the minimum value of the whole dataset. If not provided, global_min is set to X.min()
scale_range : size 2 list
set the upper bound and lower bound after scaling
buffer : float
the buffer on the upper lower bound such that [L+buffer, U-buffer]
"""
def __init__(self, global_max=89, global_min=-23, scale_range=[-1,1], buffer=0.5):
self.scale_range = scale_range
self.buffer = buffer
self.max = global_max
self.min = global_min
assert scale_range[0] + buffer < scale_range[1] - buffer, \
'the lower bound is larger than the upper bound'
def apply(self, X):
self.max = self.max if self.max else X.max()
self.min = self.min if self.min else X.min()
width = self.max - self.min
assert width > 0, 'the max is not bigger than the min'
scale = (self.scale_range[1] - self.scale_range[0] - 2 * self.buffer) / width
X = scale * (X - self.min)
X = X + self.scale_range[0] + self.buffer
return X
def invert(self, X):
if self.max is None or self.min is None:
raise ValueError('to use invert, either global_max and global_min are provided or \
apply(X) is used before')
width = self.max - self.min
assert width > 0, 'the max is not bigger than the min'
scale = width / (self.scale_range[1] - self.scale_range[0] - 2 * self.buffer)
X = scale * (X - self.scale_range[0] - self.buffer)
X = X + self.min
return X
|
hycis/Pynet
|
pynet/datasets/preprocessor.py
|
Python
|
apache-2.0
| 12,105
|
#!/usr/bin/env python
"""
A proxy server which enables multiple interactive wiring sessions to interact
with the same SpiNNaker machine.
"""
import argparse
import logging
from spinner.scripts import arguments
from spinner.probe import WiringProbe
from spinner.proxy import ProxyServer, DEFAULT_PORT
from rig.machine_control import BMPController
def main(args=None):
parser = argparse.ArgumentParser(
description="Start a proxy server to enable multiple interactive wiring "
"sessions to interact with the same SpiNNaker machine.")
arguments.add_version_args(parser)
parser.add_argument("--host", "-H", type=str, default="",
help="Host interface to listen on (default: any)")
parser.add_argument("--port", "-p", type=int, default=DEFAULT_PORT,
help="Port listen on (default: %(default)d)")
parser.add_argument("--verbose", "-v", action="count", default=0,
help="Increase verbosity.")
arguments.add_topology_args(parser)
arguments.add_cabinet_args(parser)
arguments.add_bmp_args(parser)
# Process command-line arguments
args = parser.parse_args(args)
(w, h), transformation, uncrinkle_direction, folds =\
arguments.get_topology_from_args(parser, args)
cabinet, num_frames = arguments.get_cabinets_from_args(parser, args)
bmp_ips = arguments.get_bmps_from_args(parser, args,
cabinet.num_cabinets,
num_frames)
if cabinet.num_cabinets == num_frames == 1:
num_boards = 3 * w * h
else:
num_boards = cabinet.boards_per_frame
# Set verbosity level
if args.verbose == 1:
logging.basicConfig(level=logging.INFO)
elif args.verbose >= 2:
logging.basicConfig(level=logging.DEBUG)
# Create a BMP connection
if len(bmp_ips) == 0:
parser.error("All BMPs must be supplied using --bmp")
bmp_controller = BMPController(bmp_ips)
# Create a wiring probe
wiring_probe = WiringProbe(bmp_controller,
cabinet.num_cabinets,
num_frames,
num_boards)
proxy_server = ProxyServer(bmp_controller, wiring_probe,
args.host, args.port)
print("Proxy server starting...")
proxy_server.main()
return 0
if __name__=="__main__": # pragma: no cover
import sys
sys.exit(main())
|
SpiNNakerManchester/SpiNNer
|
spinner/scripts/proxy_server.py
|
Python
|
gpl-2.0
| 2,389
|
#! /usr/bin/python3
def main():
try:
while True:
line1 = input().strip().split(' ')
n = int(line1[0])
name_list = []
num_list = [0]
for i in range(1, len(line1)):
if i % 2 == 1:
name_list.append(line1[i])
else:
num_list.append(int(line1[i]))
ans = [0 for _ in range(len(num_list))]
m = int(input())
for i in range(len(num_list) - 1, 0, -1):
ans[i] = m % num_list[i]
m = int(m / num_list[i])
ans[0] = m
add = 0
if ans[1] * 2 >= num_list[1]:
add = 1
print("{} {}".format(ans[0] + add, name_list[0]))
add = 0
if n > 2 and ans[2] * 2 >= num_list[2]:
add = 1
if ans[1] + add >= num_list[1]:
print("{} {} {} {}".format(ans[0] + 1, name_list[0], 0,
name_list[1]))
else:
print("{} {} {} {}".format(ans[0], name_list[0], ans[1] +
add, name_list[1]))
except EOFError:
pass
if __name__ == '__main__':
main()
|
zyoohv/zyoohv.github.io
|
code_repository/tencent_ad_contest/tencent_contest/model/main.py
|
Python
|
mit
| 1,329
|
#!/usr/bin/env python
'''
'''
import unittest
from testRoot import RootClass
from noink.user_db import UserDB
from noink.role_db import RoleDB
from noink.activity_table import get_activity_dict
class AssignRole(RootClass):
def test_AssignRole(self):
user_db = UserDB()
role_db = RoleDB()
u = user_db.add("jontest", "pass", "Jon Q. Testuser")
g = user_db.add_group('test_group')
user_db.add_to_group(u, g)
r = role_db.add_role('test_role', 'test role', get_activity_dict(True))
role_db.assign_role(u, g, r)
all_roles_1st = set(rm.role for rm in role_db.get_roles(u))
was_in_before = r in all_roles_1st
role_db.revoke_role(u, g, r)
all_roles_2nd = set(rm.role for rm in role_db.get_roles(u))
not_in_after = r not in all_roles_2nd
self.assertTrue(was_in_before and not_in_after)
if __name__ == '__main__':
unittest.main()
|
criswell/noink
|
src/tests/test_RevokeRole.py
|
Python
|
agpl-3.0
| 939
|
#!/usr/bin/env python
"""
Common utility functions
"""
import os
import re
import sys
import gzip
import bz2
import numpy
def init_gene_DE():
"""
Initializing the gene structure for DE
"""
gene_det = [('id', 'f8'),
('chr', 'S15'),
('chr_num', 'f8'),
('exons', numpy.dtype),
('gene_info', numpy.dtype),
('is_alt_spliced', 'f8'),
('name', 'S25'),
('source', 'S25'),
('start', 'f8'),
('stop', 'f8'),
('strand', 'S2'),
('transcripts', numpy.dtype)]
return gene_det
def _open_file(fname):
"""
Open the file (supports .gz .bz2) and returns the handler
"""
try:
if os.path.splitext(fname)[1] == ".gz":
FH = gzip.open(fname, 'rb')
elif os.path.splitext(fname)[1] == ".bz2":
FH = bz2.BZ2File(fname, 'rb')
else:
FH = open(fname, 'rU')
except Exception as error:
sys.exit(error)
return FH
def make_Exon_cod(strand_p, five_p_utr, cds_cod, three_p_utr):
"""
Create exon cordinates from UTR's and CDS region
"""
exon_pos = []
if strand_p == '+':
utr5_start, utr5_end = 0, 0
if five_p_utr != []:
utr5_start, utr5_end = five_p_utr[-1][0], five_p_utr[-1][1]
cds_5start, cds_5end = cds_cod[0][0], cds_cod[0][1]
jun_exon = []
if cds_5start-utr5_end == 0 or cds_5start-utr5_end == 1:
jun_exon = [utr5_start, cds_5end]
if len(cds_cod) == 1:
five_prime_flag = 0
if jun_exon != []:
five_p_utr = five_p_utr[:-1]
five_prime_flag = 1
for utr5 in five_p_utr:
exon_pos.append(utr5)
jun_exon = []
utr3_start, utr3_end = 0, 0
if three_p_utr != []:
utr3_start = three_p_utr[0][0]
utr3_end = three_p_utr[0][1]
if utr3_start-cds_5end == 0 or utr3_start-cds_5end == 1:
jun_exon = [cds_5start, utr3_end]
three_prime_flag = 0
if jun_exon != []:
cds_cod = cds_cod[:-1]
three_p_utr = three_p_utr[1:]
three_prime_flag = 1
if five_prime_flag == 1 and three_prime_flag == 1:
exon_pos.append([utr5_start, utr3_end])
if five_prime_flag == 1 and three_prime_flag == 0:
exon_pos.append([utr5_start, cds_5end])
cds_cod = cds_cod[:-1]
if five_prime_flag == 0 and three_prime_flag == 1:
exon_pos.append([cds_5start, utr3_end])
for cds in cds_cod:
exon_pos.append(cds)
for utr3 in three_p_utr:
exon_pos.append(utr3)
else:
if jun_exon != []:
five_p_utr = five_p_utr[:-1]
cds_cod = cds_cod[1:]
for utr5 in five_p_utr:
exon_pos.append(utr5)
exon_pos.append(jun_exon) if jun_exon != [] else ''
jun_exon = []
utr3_start, utr3_end = 0, 0
if three_p_utr != []:
utr3_start = three_p_utr[0][0]
utr3_end = three_p_utr[0][1]
cds_3start = cds_cod[-1][0]
cds_3end = cds_cod[-1][1]
if utr3_start-cds_3end == 0 or utr3_start-cds_3end == 1:
jun_exon = [cds_3start, utr3_end]
if jun_exon != []:
cds_cod = cds_cod[:-1]
three_p_utr = three_p_utr[1:]
for cds in cds_cod:
exon_pos.append(cds)
exon_pos.append(jun_exon) if jun_exon != [] else ''
for utr3 in three_p_utr:
exon_pos.append(utr3)
elif strand_p == '-':
utr3_start, utr3_end = 0, 0
if three_p_utr != []:
utr3_start = three_p_utr[-1][0]
utr3_end = three_p_utr[-1][1]
cds_3start = cds_cod[0][0]
cds_3end = cds_cod[0][1]
jun_exon = []
if cds_3start-utr3_end == 0 or cds_3start-utr3_end == 1:
jun_exon = [utr3_start, cds_3end]
if len(cds_cod) == 1:
three_prime_flag = 0
if jun_exon != []:
three_p_utr = three_p_utr[:-1]
three_prime_flag = 1
for utr3 in three_p_utr:
exon_pos.append(utr3)
jun_exon = []
(utr5_start, utr5_end) = (0, 0)
if five_p_utr != []:
utr5_start = five_p_utr[0][0]
utr5_end = five_p_utr[0][1]
if utr5_start-cds_3end == 0 or utr5_start-cds_3end == 1:
jun_exon = [cds_3start, utr5_end]
five_prime_flag = 0
if jun_exon != []:
cds_cod = cds_cod[:-1]
five_p_utr = five_p_utr[1:]
five_prime_flag = 1
if three_prime_flag == 1 and five_prime_flag == 1:
exon_pos.append([utr3_start, utr5_end])
if three_prime_flag == 1 and five_prime_flag == 0:
exon_pos.append([utr3_start, cds_3end])
cds_cod = cds_cod[:-1]
if three_prime_flag == 0 and five_prime_flag == 1:
exon_pos.append([cds_3start, utr5_end])
for cds in cds_cod:
exon_pos.append(cds)
for utr5 in five_p_utr:
exon_pos.append(utr5)
else:
if jun_exon != []:
three_p_utr = three_p_utr[:-1]
cds_cod = cds_cod[1:]
for utr3 in three_p_utr:
exon_pos.append(utr3)
if jun_exon != []:
exon_pos.append(jun_exon)
jun_exon = []
(utr5_start, utr5_end) = (0, 0)
if five_p_utr != []:
utr5_start = five_p_utr[0][0]
utr5_end = five_p_utr[0][1]
cds_5start = cds_cod[-1][0]
cds_5end = cds_cod[-1][1]
if utr5_start-cds_5end == 0 or utr5_start-cds_5end == 1:
jun_exon = [cds_5start, utr5_end]
if jun_exon != []:
cds_cod = cds_cod[:-1]
five_p_utr = five_p_utr[1:]
for cds in cds_cod:
exon_pos.append(cds)
if jun_exon != []:
exon_pos.append(jun_exon)
for utr5 in five_p_utr:
exon_pos.append(utr5)
return exon_pos
|
ratschlab/oqtans_tools
|
mTIM/0.2/tools/helper.py
|
Python
|
mit
| 6,635
|
from django.conf.urls import patterns, url
from ..core import TOKEN_PATTERN
from . import views
urlpatterns = patterns(
'',
url(r'^%s/$' % (TOKEN_PATTERN,), views.details, name='details'),
url(r'^%s/payment/(?P<variant>[-\w]+)/$' % (TOKEN_PATTERN,),
views.start_payment, name='payment'),
url(r'^%s/cancel-payment/$' % (TOKEN_PATTERN,), views.cancel_payment,
name='cancel-payment'))
|
hongquan/saleor
|
saleor/order/urls.py
|
Python
|
bsd-3-clause
| 417
|
import yaml
import sys
from os import path
from pylaas_core.interface.core.service_interface import ServiceInterface
from pylaas_core.interface.technical.container_configurable_aware_interface import ContainerConfigurableAwareInterface
from pylaas_core.interface.technical.container_interface import ContainerInterface
class Container(ContainerInterface):
"""Container to handle DI
Attributes:
_definitions (dict) : list of container definitions
_singletons (dict) : list of class singletons
"""
def __init__(self) -> None:
self._definitions = {'configurations': {}, 'services': {}}
self._singletons = {}
def add_definitions(self, definitions):
"""Add definition to container
Args:
definitions (dict|string): list of definitions
Returns:
ContainerInterface
"""
if type(definitions) is not dict:
if path.exists(definitions):
with open(definitions, 'r') as f:
definitions = yaml.load(f)
else:
raise FileExistsError("Container definitions file '{}' does not exits".format(definitions))
if 'configurations' in definitions and definitions.get('configurations') is not None:
self._definitions['configurations'].update(definitions['configurations'])
if 'services' in definitions and definitions.get('services') is not None:
self._definitions['services'].update(definitions['services'])
return self
def get_definitions(self):
"""Get container definitions
Returns:
definitions (dict): list of definitions
"""
return self._definitions
def has(self, def_id):
"""Returns true if the container can return an entry for the given identifier.
Args:
def_id: Identifier of the entry to look for
Returns:
bool
"""
return def_id in self._definitions['services']
def get(self, def_id):
"""Finds an entry of the container by its identifier and returns it.
Args:
def_id: Identifier of the entry to look for.
Returns:
ServiceInterface: singleton services
"""
if not self.has(def_id):
raise RuntimeError("service id '{}' does not exists".format(def_id))
# check if service has been already created as a singleton
if def_id not in self._singletons:
parts = self._definitions['services'][def_id].split(".")
module_name = ".".join(parts[:-1])
class_name = parts[-1]
__import__(module_name)
service = getattr(sys.modules[module_name], class_name)
service = service()
if isinstance(service, ContainerConfigurableAwareInterface):
service.set_configs(self._definitions['configurations'][def_id])
self._singletons[def_id] = service
return self._singletons[def_id]
def clear(self, def_id) -> None:
"""Clear an entry of the container by its identifier
Args:
def_id: Identifier of the entry to look for.
Returns:
None
"""
if def_id not in self._singletons:
return
del self._singletons[def_id]
|
Agi-dev/pylaas_core
|
pylaas_core/technical/container.py
|
Python
|
mit
| 3,339
|
"""
You can use TeX to render all of your matplotlib text if the rc
parameter text.usetex is set. This works currently on the agg and ps
backends, and requires that you have tex and the other dependencies
described at http://matplotlib.org/users/usetex.html
properly installed on your system. The first time you run a script
you will see a lot of output from tex and associated tools. The next
time, the run may be silent, as a lot of the information is cached in
~/.tex.cache
"""
import numpy as np
import matplotlib.pyplot as plt
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.figure(1, figsize=(6, 4))
ax = plt.axes([0.1, 0.1, 0.8, 0.7])
t = np.linspace(0.0, 1.0, 100)
s = np.cos(4 * np.pi * t) + 2
plt.plot(t, s)
plt.xlabel(r'\textbf{time (s)}')
plt.ylabel(r'\textit{voltage (mV)}', fontsize=16)
plt.title(r"\TeX\ is Number $\displaystyle\sum_{n=1}^\infty"
r"\frac{-e^{i\pi}}{2^n}$!", fontsize=16, color='r')
plt.grid(True)
plt.savefig('tex_demo')
plt.show()
|
bundgus/python-playground
|
matplotlib-playground/examples/pylab_examples/tex_demo.py
|
Python
|
mit
| 996
|
import numpy as np
from scipy.optimize import minimize
from scipy import optimize
# array operations
class OrthAE(object):
def __init__(self, views, latent_spaces, x = None, knob = 0):
# x: input, column-wise
# y: output, column-wise
# h: hidden layer
# views and latent_spaces: specify number of neurons in each view and latent space respectively
# params: initial weights of the orthogonal autoencoders
self.x = x
self.knob = knob
self.views = views
self.latent_spaces = latent_spaces
# sets of weights
self.w1 = np.zeros((np.sum(latent_spaces), np.sum(views) + 1))
self.w2 = np.zeros((np.sum(views), np.sum(latent_spaces) + 1))
# add orthogonal constraints
start_idx_hidden = np.cumsum(self.latent_spaces)
start_idx_input = np.cumsum(np.append([0],self.views))
# public latent space has fully connection
self.w1[0:start_idx_hidden[0],:] = 1
# private latent spaces only connect to respective views
for i in range(latent_spaces.size-1):
self.w1[start_idx_hidden[i]: start_idx_hidden[i+1], start_idx_input[i]: start_idx_input[i+1]] = 1
self.w2[:, :-1] = np.transpose(self.w1[:, :-1])
self.w2[:, -1] = 1
# index of effective weigths
self.index1 = np.where( self.w1 != 0 )
self.index2 = np.where( self.w2 != 0 )
# # randomly initialize weights
# self.w1[self.index1] = randInitWeights(self.w1.shape)[self.index1]
# self.w2[self.index2] = randInitWeights(self.w2.shape)[self.index2]
# print self.w1
# print self.w2
def feedforward(self):
instance_count = self.x.shape[1]
bias = np.ones((1, instance_count))
# a's are with bias
self.a1 = np.concatenate(( self.x, bias ))
# before activation
self.h = np.dot(self.w1, self.a1)
self.a2 = np.concatenate(( activate(self.h), bias ))
self.y = np.dot(self.w2, self.a2)
self.a3 = activate(self.y)
return self.a3
def backpropogateT(self, weights, y):
self.weightSplit(weights)
self.feedforward()
return self.backpropogate(y)
def backpropogate(self, y):
# knob is to prevent over-fitting
instance_count = self.x.shape[1]
delta = (self.a3 - y)/instance_count * activateGradient(self.y)
self.w2_gradient = np.dot(delta, np.transpose(self.a2))
# regularization
self.w2_gradient[:, :-1] += np.dot(self.knob, self.w2[:, :-1])/instance_count
delta = np.dot(np.transpose(self.w2[:, :-1]), delta) * activateGradient(self.h)
self.w1_gradient = np.dot(delta, np.transpose(self.a1))
# regularization
self.w1_gradient[:, :-1] += np.dot(self.knob, self.w1[:, :-1])/instance_count
return np.concatenate(( self.w1_gradient[self.index1].flatten(), self.w2_gradient[self.index2].flatten() ), axis=1)
def costT(self, weights, y):
self.weightSplit(weights)
self.feedforward()
return self.cost(y)
def cost(self, y):
instance_count = self.x.shape[1]
result = np.sum(np.square(self.w1)) + np.sum(np.square(self.w2))
result = np.sum(np.square(self.a3 - y)) + np.dot(self.knob, result)
return result/2/instance_count
def tuneT(self, y = None):
if y is None:
y=self.x
# randomly initialize weights
w1 = randInitWeights(self.w1.shape)[self.index1].flatten()
w2 = randInitWeights(self.w2.shape)[self.index2].flatten()
res = minimize(self.costT, np.concatenate((w1, w2), axis=1), args=(y,), method='CG',\
jac=self.backpropogateT, options={'disp': True, 'gtol': 1e-10, 'maxiter': 1e+1})
return res.x
# res = optimize.fmin_cg(self.cost, np.concatenate((w1, w2)), fprime=self.backpropogate,\
# args=self.x, gtol = 1e-10, disp = True)
# minimize(self.cost,np.concatenate((self.w1, self.w2), axis=1),args=(self.x),method='CG',jac=self.backpropogate)
def tune(self, y = None):
if y is None:
y=self.x
# randomly initialize weights
w1 = randInitWeights(self.w1.shape)[self.index1].flatten()
w2 = randInitWeights(self.w2.shape)[self.index2].flatten()
# set parameters for stochastic gradient descent
x = self.x
gtol = 1e-7
maxiter = 2100
w = np.concatenate((w1, w2), axis=1)
step = 1.5e-8
# stochastic sampling
stochast_sample_count = 500
sample_index = np.arange(stochast_sample_count)
self.x = x[:, sample_index]
stoch_y = y[:, sample_index]
sample_index += stochast_sample_count
# start gradient descent
self.weightSplit(w)
self.feedforward()
iter = 0
while iter < maxiter:
weights_gradient = self.backpropogate(stoch_y)
if np.max(weights_gradient) < gtol:
break
w -= weights_gradient * step
self.x = x[:, sample_index]
stoch_y = y[:, sample_index]
sample_index = (sample_index + stochast_sample_count)%x.shape[1]
self.weightSplit(w)
self.feedforward()
iter += 1
print('iteration times:', iter)
return w
def weightSplit(self, weights):
# weights is expected to be a row vector(narray)
# weights = np.squeeze(weights)
split = self.index1[0].size
self.w1[self.index1] = weights[:split]
self.w2[self.index2] = weights[split:]
#
class OrthdAE(OrthAE):
def __init__(self, views, latent_spaces, x = None, knob = 0):
m, n = x.shape
###################################################
# trivial denoising
x = np.tile(x, [1, m])
self.denoise_x = x
for i in range(m):
x[i, i*n : i*n+n] = 0
###################################################
# evenly denoising
# x = np.tile(x, [1, 10])
# self.denoise_x = x
#
# step = int(m/10)
# for i in range(9):
# x[i*step : i*step+step, i*n : i*n+n] = 0
#
# x[9*step-m : , -n] = 0
###################################################
super(OrthdAE, self).__init__(views, latent_spaces, x, knob)
def tune(self, y = None):
if y is None:
y=self.denoise_x
return super(OrthdAE, self).tune(y)
# random initialize weights for AE
def randInitWeights(L_in, L_out):
epsilon = np.sqrt(6.0 / (L_in + L_out))
return np.random.rand(L_in, L_out) * 2 * epsilon - epsilon
def sig(x):
return 1.0 / (1.0 + np.exp(-x))
def sigGradient(x):
g = sig(x)
return g * (1.0 - g)
def tanh(x):
return 2.0 /(1.0 + np.exp(-2*x)) - 1
def tanhGradient(x):
g = tanh(x)
return 1.0 - np.square(g)
def MSGD(x, y, f):
pass
def activate(x):
return sig(x)
def activateGradient(x):
return sigGradient(x)
|
tengerye/orthogonal-denoising-autoencoder
|
python/orthAE.py
|
Python
|
apache-2.0
| 7,509
|
#!/usr/bin/env python
########################################
#Globale Karte fuer tests
# from Rabea Amther
########################################
# http://gfesuite.noaa.gov/developer/netCDFPythonInterface.html
import math
import numpy as np
import pylab as pl
import Scientific.IO.NetCDF as IO
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.ticker as mtick
import matplotlib.lines as lines
from mpl_toolkits.basemap import Basemap , addcyclic
from matplotlib.colors import LinearSegmentedColormap
import textwrap
pl.close('all')
########################## for CMIP5 charactors
DIR='/Users/tang/climate/CMIP5/hist/AFRICA/'
VARIABLE='clt'
PRODUCT='Amon'
ENSEMBLE='r1i1p1'
EXPERIMENT='hist'
TIME='195001-200512'
#OBS='CRU'
OBS='MODIS'
K=0
NonData=['EC-EARTH-XXXX','CSIRO-Mk3-6-0-XXXXXX']
GCMs=['CanESM2',\
'CNRM-CM5',\
'CNRM-CM5',\
'CSIRO-Mk3-6-0',\
'EC-EARTH',\
'EC-EARTH',\
'EC-EARTH',\
'EC-EARTH',\
'IPSL-CM5A-MR',\
'MIROC5',\
'HadGEM2-ES',\
'HadGEM2-ES',\
'MPI-ESM-LR',\
'MPI-ESM-LR',\
'NorESM1-M',\
'GFDL-ESM2M']
ENSEMBLE=['r1i1p1',\
'r3i1p1',\
'r3i1p1',\
'r2i1p1',\
'r2i1p1',\
'r2i1p1',\
'r2i1p1',\
'r2i1p1',\
'r1i1p1',\
'r1i1p1',\
'r2i1p1',\
'r2i1p1',\
'r1i1p1',\
'r1i1p1',\
'r2i1p1',\
'r1i1p1']
COLOR=['darkred','darkblue','darkgreen','deeppink',\
'black','orangered','cyan','magenta']
# read CRU data:
if OBS == 'CRU':
oVar='cld'
obs1='~/climate/GLOBALDATA/OBSDATA/CRU/3.22/cru_ts3.22.2001.2005.cld.summer.mean.AFR.nc'
else:
# read MODIS data:
oVar='clt'
obs1='/Users/tang/climate/GLOBALDATA/OBSDATA/MODIS/clt_MODIS_L3_C5_200101-200512.ymonmean.NDJFMA.AFR.nc'
print obs1
obsfile1=IO.NetCDFFile(obs1,'r')
ObsVar=obsfile1.variables[oVar][0][:][:].copy()
for idx,Model in enumerate(GCMs):
if OBS == 'CRU':
infile1=DIR+EXPERIMENT+'/'+Model+'/'\
'clt_Amon_'+Model+'_historical_'+ENSEMBLE[idx]+\
'_200101-200512.nc.summer.mean.nc.remap.nc'
#GFDL-ESM2M/clt_Amon_GFDL-ESM2M_historical_r1i1p1_200101-200512.nc.summer.mean.nc.remap.nc
else:
infile1=DIR+\
'clt_Amon_'+Model+'_historical_'+ENSEMBLE[idx]+\
'_200101-200512.summer.mean.remap.AFR.nc'
print infile1
if Model in NonData:
infile1=obsfile1
VAR=infile1.variables[oVar][0,:,:].copy()
else:
infile1=IO.NetCDFFile(infile1,'r')
VAR=infile1.variables[VARIABLE][0,:,:].copy()
print 'the variable tas ===============: '
print VAR
#open input files
# read the variables:
lat = infile1.variables['lat'][:].copy()
lon = infile1.variables['lon'][:].copy()
print np.shape(VAR)
print np.shape(ObsVar)
Bias=VAR-ObsVar
print np.shape(Bias)
#quit()
CoLev=10 #number of levels of colorbar
#=================================================== to plot
fig=plt.subplot(4,4,idx+1,aspect='equal')
print "============="
print idx; print Model
map=Basemap(projection='cyl',llcrnrlat=np.min(lat),urcrnrlat=np.max(lat),\
llcrnrlon=np.min(lon),urcrnrlon=np.max(lon),resolution='l')
map.drawcoastlines(linewidth=0.35)
map.drawparallels(np.arange(-90.,91.,15.),labels=[1,0,0,0],linewidth=0.35)
map.drawmeridians(np.arange(-180.,181.,20.),labels=[0,0,0,1],linewidth=0.35)
map.drawmapboundary()
x,y=map(lon,lat)
cmap=plt.get_cmap('bwr')
#cmap=plt.get_cmap('RdBu_r')
pic=map.pcolormesh(x,y,Bias,cmap=cmap)
plt.title(GCMs[idx])
#plt.figtext(0.68,0.73,timestamp, size="small")
#set the same colorbar range
pic.set_clim(vmin=-50,vmax=50)
plt.subplots_adjust(bottom=0.1, right=0.8, top=0.9)
cax = plt.axes([0.85, 0.1, 0.01, 0.8])
plt.colorbar(cax=cax)
#if idx > 11:
#plt.colorbar(orientation='horizontal') # draw colorbar
#plt.legend(loc=2)
plt.suptitle('seasonal mean bias of Total Cloud Cover (%) vs MODIS',fontsize=18)
plt.show()
quit()
|
CopyChat/Plotting
|
Downscaling/bias.TCC.GCMs.py
|
Python
|
gpl-3.0
| 4,182
|
def extractShurimtranslationWordpressCom(item):
'''
Parser for 'shurimtranslation.wordpress.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
if '(manga)' in item['title'].lower():
return None
tagmap = [
("A Wild Last Boss Appeared", "A Wild Last Boss Appeared", "translated"),
("Tensei Shitara Slime Datta Ken", "Tensei Shitara Slime Datta Ken", "translated"),
("Konjiki no Moji Tsukai", "Konjiki no Moji Tsukai", "translated"),
("Owarimonogatari", "Owarimonogatari", "translated"),
("Monogatari Series", "Monogatari Series", "translated"),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
titlemap = [
('A Wild Last Boss Appeared: Chapter', 'A Wild Last Boss Appeared', 'translated'),
]
for titlecomponent, name, tl_type in titlemap:
if titlecomponent.lower() in item['title'].lower():
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
if item['title'].startswith('Owarimonogatari') and 'Completed' in item['title']:
return buildReleaseMessageWithType(item, "Owarimonogatari", vol, chp, frag=frag, postfix=postfix)
return False
|
fake-name/ReadableWebProxy
|
WebMirror/management/rss_parser_funcs/feed_parse_extractShurimtranslationWordpressCom.py
|
Python
|
bsd-3-clause
| 1,576
|
#!/usr/bin/env python
"""
This is the main function to call for disambiguating between a human and
mouse BAM files that have alignments from the same source of fastq files.
It is part of the explant RNA/DNA-Seq workflow where an informatics
approach is used to distinguish between human and mouse RNA/DNA reads.
For reads that have aligned to both organisms, the functionality is based on
comparing quality scores from either Tophat of BWA. Read
name is used to collect all alignments for both mates (_1 and _2) and
compared between human and mouse alignments.
For tophat (default, can be changed using option -a), the sum of the flags XO,
NM and NH is evaluated and the lowest sum wins the paired end reads. For equal
scores, the reads are assigned as ambiguous.
The alternative algorithm (bwa) disambiguates (for aligned reads) by tags AS
(alignment score, higher better), NM (edit distance, lower better) and XS
(suboptimal alignment score, higher better), by first looking at AS, then
NM and finally XS.
Code by Miika Ahdesmaki July-August 2013, based on original Perl implementation
for Tophat by Zhongwu Lai.
"""
from __future__ import print_function
import sys, getopt, re, time, pysam
from array import array
from os import path, makedirs
from argparse import ArgumentParser
# "natural comparison" for strings
def nat_cmp(a, b):
convert = lambda text: int(text) if text.isdigit() else text # lambda function to convert text to int if number present
alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ] # split string to piecewise strings and string numbers
#return cmp(alphanum_key(a), alphanum_key(b)) # use internal cmp to compare piecewise strings and numbers
return (alphanum_key(a) > alphanum_key(b))-(alphanum_key(a) < alphanum_key(b))
# read reads into a list object for as long as the read qname is constant (sorted file). Return the first read with new qname or None
def read_next_reads(fileobject, listobject):
qnamediff = False
while not qnamediff:
try:
myRead=fileobject.next()
except StopIteration:
#print("5")
return None # return None as the name of the new reads (i.e. no more new reads)
if nat_cmp(myRead.qname, listobject[0].qname)==0:
listobject.append(myRead)
else:
qnamediff = True
return myRead # this is the first read with a new qname
# disambiguate between two lists of reads
def disambiguate(humanlist, mouselist, disambalgo):
if disambalgo == 'tophat':
dv = 2**13 # a high quality score to replace missing quality scores (no real quality score should be this high)
sa = array('i',(dv for i in range(0,4))) # score array, with [human_1_QS, human_2_QS, mouse_1_QS, mouse_2_QS]
for read in humanlist:
if 0x4&read.flag: # flag 0x4 means unaligned
continue
QScore = read.opt('XO') + read.opt('NM') + read.opt('NH')
# directionality (_1 or _2)
d12 = 0 if 0x40&read.flag else 1
if sa[d12]>QScore:
sa[d12]=QScore # update to lowest (i.e. 'best') quality score
for read in mouselist:
if 0x4&read.flag: # flag 0x4 means unaligned
continue
QScore = read.opt('XO') + read.opt('NM') + read.opt('NH')
# directionality (_1 or _2)
d12 = 2 if 0x40&read.flag else 3
if sa[d12]>QScore:
sa[d12]=QScore # update to lowest (i.e. 'best') quality score
if min(sa[0:2])==min(sa[2:4]) and max(sa[0:2])==max(sa[2:4]): # ambiguous
return 0
elif min(sa[0:2]) < min(sa[2:4]) or min(sa[0:2]) == min(sa[2:4]) and max(sa[0:2]) < max(sa[2:4]):
# assign to human
return 1
else:
# assign to mouse
return -1
elif disambalgo == 'bwa':
dv = -2^13 # default value, low
bwatags = ['AS','NM','XS'] # in order of importance (compared sequentially, not as a sum as for tophat)
bwatagsigns = [1,-1,1] # for AS and XS higher is better. for NM lower is better, thus multiply by -1
AS = list()
for x in range(0, len(bwatagsigns)):
AS.append(array('i',(dv for i in range(0,4)))) # alignment score array, with [human_1_Score, human_2_Score, mouse_1_Score, mouse_2_Score]
#
for read in humanlist:
if 0x4&read.flag: # flag 0x4 means unaligned
continue
# directionality (_1 or _2)
d12 = 0 if 0x40&read.flag else 1
for x in range(0, len(bwatagsigns)):
QScore = bwatagsigns[x]*read.opt(bwatags[x])
if AS[x][d12]<QScore:
AS[x][d12]=QScore # update to highest (i.e. 'best') quality score
#
for read in mouselist:
if 0x4&read.flag: # flag 0x4 means unaligned
continue
# directionality (_1 or _2)
d12 = 2 if 0x40&read.flag else 3
for x in range(0, len(bwatagsigns)):
QScore = bwatagsigns[x]*read.opt(bwatags[x])
if AS[x][d12]<QScore:
AS[x][d12]=QScore # update to highest (i.e. 'best') quality score
#
for x in range(0, len(bwatagsigns)):
if max(AS[x][0:2]) > max(AS[x][2:4]) or max(AS[x][0:2]) == max(AS[x][2:4]) and min(AS[x][0:2]) > min(AS[x][2:4]):
# assign to human
return 1
elif max(AS[x][0:2]) < max(AS[x][2:4]) or max(AS[x][0:2]) == max(AS[x][2:4]) and min(AS[x][0:2]) < min(AS[x][2:4]):
# assign to mouse
return -1
return 0 # ambiguous
else:
print("Not implemented yet")
sys.exit(2)
#code
def main(argv):
numhum = nummou = numamb = 0
starttime = time.clock()
# parse inputs
humanfilename = args.A
mousefilename = args.B
samplenameprefix = args.prefix
outputdir = args.output_dir
intermdir = args.intermediate_dir
disablesort = args.no_sort
disambalgo = args.aligner
supportedalgorithms = set(['tophat', 'bwa'])
# check existence of input BAM files
if not (file_exists(humanfilename) and file_exists(mousefilename)):
sys.stderr.write("\nERROR in disambiguate.py: Two existing input BAM files "
"must be specified using options -h and -m\n")
sys.exit(2)
if len(samplenameprefix) < 1:
humanprefix = path.basename(humanfilename.replace(".bam",""))
mouseprefix = path.basename(mousefilename.replace(".bam",""))
else:
if samplenameprefix.endswith(".bam"):
samplenameprefix = samplenameprefix[0:samplenameprefix.rfind(".bam")] # the above if is not stricly necessary for this to work
humanprefix = samplenameprefix
mouseprefix = samplenameprefix
samplenameprefix = None # clear variable
if disambalgo not in supportedalgorithms:
print(disambalgo+" is not a supported disambiguation scheme at the moment.")
sys.exit(2)
if disablesort:
humanfilenamesorted = humanfilename # assumed to be sorted externally...
mousefilenamesorted = mousefilename # assumed to be sorted externally...
else:
if not path.isdir(intermdir):
makedirs(intermdir)
humanfilenamesorted = path.join(intermdir,humanprefix+".human.namesorted.bam")
mousefilenamesorted = path.join(intermdir,mouseprefix+".mouse.namesorted.bam")
if not path.isfile(humanfilenamesorted):
pysam.sort("-n","-m","2000000000",humanfilename,humanfilenamesorted.replace(".bam",""))
if not path.isfile(mousefilenamesorted):
pysam.sort("-n","-m","2000000000",mousefilename,mousefilenamesorted.replace(".bam",""))
# read in human reads and form a dictionary
myHumanFile = pysam.Samfile(humanfilenamesorted, "rb" )
myMouseFile = pysam.Samfile(mousefilenamesorted, "rb" )
if not path.isdir(outputdir):
makedirs(outputdir)
myHumanUniqueFile = pysam.Samfile(path.join(outputdir, humanprefix+".human.bam"), "wb", template=myHumanFile)
myHumanAmbiguousFile = pysam.Samfile(path.join(outputdir, humanprefix+".ambiguousHuman.bam"), "wb", template=myHumanFile)
myMouseUniqueFile = pysam.Samfile(path.join(outputdir, mouseprefix+".mouse.bam"), "wb", template=myMouseFile)
myMouseAmbiguousFile = pysam.Samfile(path.join(outputdir, mouseprefix+".ambiguousMouse.bam"), "wb", template=myMouseFile)
summaryFile = open(path.join(outputdir,humanprefix+'_summary.txt'),'w')
#initialise
try:
nexthumread=myHumanFile.next()
nextmouread=myMouseFile.next()
except StopIteration:
print("No reads in one or either of the input files")
sys.exit(2)
EOFmouse = EOFhuman = False
prevHumID = '-+=RANDOMSTRING=+-'
prevMouID = '-+=RANDOMSTRING=+-'
while not EOFmouse&EOFhuman:
while not (nat_cmp(nexthumread.qname,nextmouread.qname) == 0):
# check order between current human and mouse qname (find a point where they're identical, i.e. in sync)
while nat_cmp(nexthumread.qname,nextmouread.qname) > 0 and not EOFmouse: # mouse is "behind" human, output to mouse disambiguous
myMouseUniqueFile.write(nextmouread)
if not nextmouread.qname == prevMouID:
nummou+=1 # increment mouse counter for unique only
prevMouID = nextmouread.qname
try:
nextmouread=myMouseFile.next()
except StopIteration:
EOFmouse=True
while nat_cmp(nexthumread.qname,nextmouread.qname) < 0 and not EOFhuman: # human is "behind" mouse, output to human disambiguous
myHumanUniqueFile.write(nexthumread)
if not nexthumread.qname == prevHumID:
numhum+=1 # increment human counter for unique only
prevHumID = nexthumread.qname
try:
nexthumread=myHumanFile.next()
except StopIteration:
EOFhuman=True
if EOFhuman or EOFmouse:
break
# at this point the read qnames are identical and/or we've reached EOF
humlist = list()
moulist = list()
if nat_cmp(nexthumread.qname,nextmouread.qname) == 0:
humlist.append(nexthumread)
nexthumread = read_next_reads(myHumanFile, humlist) # read more reads with same qname (the function modifies humlist directly)
if nexthumread == None:
EOFhuman = True
moulist.append(nextmouread)
nextmouread = read_next_reads(myMouseFile, moulist) # read more reads with same qname (the function modifies moulist directly)
if nextmouread == None:
EOFmouse = True
# perform comparison to check mouse, human or ambiguous
if len(moulist) > 0 and len(humlist) > 0:
myAmbiguousness = disambiguate(humlist, moulist, disambalgo)
if myAmbiguousness < 0: # mouse
nummou+=1 # increment mouse counter
for myRead in moulist:
myMouseUniqueFile.write(myRead)
elif myAmbiguousness > 0: # human
numhum+=1 # increment human counter
for myRead in humlist:
myHumanUniqueFile.write(myRead)
else: # ambiguous
numamb+=1 # increment ambiguous counter
for myRead in moulist:
myMouseAmbiguousFile.write(myRead)
for myRead in humlist:
myHumanAmbiguousFile.write(myRead)
if EOFhuman:
#flush the rest of the mouse reads
while not EOFmouse:
myMouseUniqueFile.write(nextmouread)
if not nextmouread.qname == prevMouID:
nummou+=1 # increment mouse counter for unique only
prevMouID = nextmouread.qname
try:
nextmouread=myMouseFile.next()
except StopIteration:
#print("3")
EOFmouse=True
if EOFmouse:
#flush the rest of the human reads
while not EOFhuman:
myHumanUniqueFile.write(nexthumread)
if not nexthumread.qname == prevHumID:
numhum+=1 # increment human counter for unique only
prevHumID = nexthumread.qname
try:
nexthumread=myHumanFile.next()
except StopIteration:
EOFhuman=True
summaryFile.write("sample\tunique human pairs\tunique mouse pairs\tambiguous pairs\n")
summaryFile.write(humanprefix+"\t"+str(numhum)+"\t"+str(nummou)+"\t"+str(numamb)+"\n")
summaryFile.close()
myHumanFile.close()
myMouseFile.close()
myHumanUniqueFile.close()
myHumanAmbiguousFile.close()
myMouseUniqueFile.close()
myMouseAmbiguousFile.close()
def file_exists(fname):
"""Check if a file exists and is non-empty.
"""
return path.exists(fname) and path.getsize(fname) > 0
if __name__ == "__main__":
description = """
disambiguate.py disambiguates between two organisms that have alignments
from the same source of fastq files. An example where this might be
useful is as part of an explant RNA/DNA-Seq workflow where an informatics
approach is used to distinguish between human and mouse RNA/DNA reads.
For reads that have aligned to both organisms, the functionality is based on
comparing quality scores from either Tophat of BWA. Read
name is used to collect all alignments for both mates (_1 and _2) and
compared between human and mouse alignments.
For tophat (default, can be changed using option -a), the sum of the flags XO,
NM and NH is evaluated and the lowest sum wins the paired end reads. For equal
scores, the reads are assigned as ambiguous.
The alternative algorithm (bwa) disambiguates (for aligned reads) by tags AS
(alignment score, higher better), NM (edit distance, lower better) and XS
(suboptimal alignment score, higher better), by first looking at AS, then
NM and finally XS.
"""
parser = ArgumentParser(description=description)
parser.add_argument('A', help='Input BAM file A.')
parser.add_argument('B', help='Input BAM file B.')
parser.add_argument('-o', '--output-dir', default="disambres",
help='Output directory.')
parser.add_argument('-i', '--intermediate-dir', default="intermfiles",
help='Location to store intermediate files')
parser.add_argument('-d', '--no-sort', action='store_true', default=False,
help='Disable BAM file sorting. Use this option if the '
'files have already been name sorted.')
parser.add_argument('-s', '--prefix', default='',
help='A prefix (e.g. sample name) to use for the output '
'BAM files. If not provided, the first BAM file prefix '
'will be used. Do not include .bam in the prefix.')
parser.add_argument('-a', '--aligner', default='tophat',
choices=('tophat', 'bwa'),
help='The aligner used to generate these reads. Some '
'aligners set different flags.')
args = parser.parse_args()
main(args)
|
roryk/disambiguate
|
disambiguate.py
|
Python
|
mit
| 15,496
|
from django.contrib import admin
from django.utils.translation import ugettext_lazy as _
from django.utils.datetime_safe import date
from django.db.models import Q
from open_municipio.people.models import *
from open_municipio.votations.admin import VotationsInline
from open_municipio.acts.models import Speech
from open_municipio.widgets import SortWidget
from sorl.thumbnail.admin import AdminImageMixin
from django.contrib.admin.util import unquote
from django.http import HttpResponseRedirect
from django.shortcuts import get_object_or_404
from django.utils.functional import update_wrapper
from django.utils.html import strip_spaces_between_tags as short
from django.forms import ModelForm
from open_municipio.people.forms import SittingItemFormSet, SpeechInlineForm
from open_municipio.om.admin import LinkedTabularInline
from .filters import FilterActiveCharge
class PersonResourceInline(admin.TabularInline):
model = PersonResource
extra = 0
class PersonAdminWithResources(AdminImageMixin, admin.ModelAdmin):
list_display = ('id', '__unicode__', 'has_current_charges', 'birth_date', 'birth_location' )
list_display_links = ('__unicode__',)
search_fields = ['^first_name', '^last_name']
prepopulated_fields = {"slug": ("first_name","last_name","birth_date", "birth_location",)}
inlines = [PersonResourceInline, ]
class GroupResourceInline(admin.TabularInline):
model = GroupResource
extra = 0
class GroupChargeInline(admin.TabularInline):
model = GroupCharge
raw_id_fields = ('charge', )
extra = 1
class GroupIsMajorityInline(admin.TabularInline):
model = GroupIsMajority
extra = 0
class InCouncilNow(admin.SimpleListFilter):
title = _("In council now")
parameter_name = "in_council_now"
def lookups(self, request, model_admin):
return(
('1', _('Yes')),
('0', _('No')),
)
def queryset(self, request, queryset):
val = self.value()
today = date.today()
# note: groups with no related item (groupismajority) will be considered
# as not in council
if val == '1':
queryset = queryset.exclude(groupismajority__isnull=True).filter(Q(groupismajority__end_date__gt=today) | Q(groupismajority__end_date__isnull=True))
elif val == '0':
# the check for groups NOT in majority is more complex because
# we have to check that ALL related objects (groupismajority)
# have an end_date previous the current date
groups_in_council = Group.objects.exclude(groupismajority__isnull=True).filter(Q(groupismajority__end_date__gt=today) | Q(groupismajority__end_date__isnull=True))
queryset = queryset.exclude(pk__in=groups_in_council)
return queryset
class GroupAdminWithCharges(AdminImageMixin, admin.ModelAdmin):
prepopulated_fields = {"slug": ("name","start_date")}
list_display = ('name', 'acronym', 'is_majority_now', 'start_date', 'end_date', 'in_council_now')
inlines = [GroupResourceInline, GroupIsMajorityInline, GroupChargeInline]
search_fields = [ 'name', 'acronym', 'slug', 'charge_set__person__first_name', 'charge_set__person__last_name', ]
list_filter = [ InCouncilNow, 'groupismajority__is_majority' ]
ordering = [ 'name', 'acronym' ]
def is_majority_now(self, obj):
return obj.is_majority_now
is_majority_now.short_description = _("Is majority now")
def in_council_now(self, obj):
return obj.in_council_now
in_council_now.short_description = _("In council now")
class ChargeInline(admin.StackedInline):
raw_id_fields = ('person', )
fieldsets = (
(None, {
'fields': (('person', 'start_date', 'end_date'), )
}),
(_('Advanced options'), {
'classes': ('collapse',),
'fields': ('description', 'end_reason')
})
)
extra = 1
class CompanyChargeInline(ChargeInline):
model = CompanyCharge
class AdministrationChargeInline(ChargeInline):
model = AdministrationCharge
class InstitutionResourceInline(admin.TabularInline):
model = InstitutionResource
extra = 0
class InstitutionChargeInline(ChargeInline):
model = InstitutionCharge
raw_id_fields = ('person', 'substitutes', 'substituted_by')
fieldsets = (
(None, {
'fields': (('person', 'op_charge_id', 'start_date', 'end_date'), )
}),
(_('Advanced options'), {
'classes': ('collapse',),
'fields': ('description', 'end_reason', ('substitutes', 'substituted_by'))
})
)
class ResponsabilityInline(admin.TabularInline):
raw_id_fields = ('charge',)
extra = 0
class InstitutionResponsabilityInline(ResponsabilityInline):
model = InstitutionResponsability
fields = ('charge', 'charge_type', 'start_date', 'end_date', 'description')
class GroupResponsabilityInline(admin.TabularInline):
model = GroupResponsability
raw_id_fields = ('charge',)
extra = 0
fields = ('charge', 'charge_type', 'start_date', 'end_date', 'description')
class ChargeAdmin(admin.ModelAdmin):
pass
class CompanyChargeAdmin(ChargeAdmin):
model = CompanyCharge
raw_id_fields = ('person', 'company')
fieldsets = (
(None, {
'fields': (('person', 'company'),
('start_date', 'end_date', 'end_reason'),
'description')
}),
)
class AdministrationChargeAdmin(ChargeAdmin):
model = AdministrationCharge
raw_id_fields = ('person', 'office')
fieldsets = (
(None, {
'fields': (('person', 'office','charge_type',),
('start_date', 'end_date', 'end_reason'),
'description')
}),
)
class InstitutionChargeAdmin(ChargeAdmin):
model = InstitutionCharge
raw_id_fields = ('person', 'substitutes', 'substituted_by', 'original_charge')
search_fields = ['^person__first_name', '^person__last_name']
fieldsets = (
(None, {
'fields': (('person', 'op_charge_id', 'institution', 'original_charge'),
('start_date', 'end_date', 'end_reason'),
'description',
('substitutes', 'substituted_by'),
'can_vote')
}),
(_("Presences"), {
'fields': (('n_present_votations', 'n_absent_votations'), ('n_present_attendances', 'n_absent_attendances'))
}),
)
list_display = ('__unicode__', 'institution', 'start_date', 'end_date')
list_select_related = True
list_filter = ['institution__name', FilterActiveCharge, ]
inlines = [InstitutionResponsabilityInline]
class GroupChargeAdmin(admin.ModelAdmin):
raw_id_fields = ('charge', )
list_display = ('__unicode__', 'start_date', 'end_date')
list_select_related = True
list_filter = ['group']
inlines = [GroupResponsabilityInline]
search_fields = [ 'charge__person__first_name', 'charge__person__last_name', 'group__name', 'group__acronym', ]
class BodyAdmin(admin.ModelAdmin):
prepopulated_fields = {"slug": ("name",)}
class CompanyAdmin(BodyAdmin):
inlines = [CompanyChargeInline]
class OfficeAdmin(BodyAdmin):
inlines = [AdministrationChargeInline]
class InstitutionAdmin(BodyAdmin):
list_filter = ("institution_type", )
def get_urls(self):
from django.conf.urls.defaults import patterns, url
def wrap(view):
def wrapper(*args, **kwargs):
return self.admin_site.admin_view(view)(*args, **kwargs)
return update_wrapper(wrapper, view)
info = self.model._meta.app_label, self.model._meta.module_name
return patterns('',
url(r'^(.+)/move-(up)/$',
wrap(self.move_view),
name='%s_%s_move_up' % info),
url(r'^(.+)/move-(down)/$',
wrap(self.move_view),
name='%s_%s_move_down' % info),
) + super(InstitutionAdmin, self).get_urls()
def move_view(self, request, object_id, direction):
obj = get_object_or_404(self.model, pk=unquote(object_id))
if direction == 'up':
obj.move_up()
else:
obj.move_down()
return HttpResponseRedirect('../../')
link_html = short("""
<a href="../../%(app_label)s/%(module_name)s/%(object_id)s/move-up/">UP</a> |
<a href="../../%(app_label)s/%(module_name)s/%(object_id)s/move-down/">DOWN</a> (%(position)s)
""")
def move_up_down_links(self, obj):
return self.link_html % {
'app_label': self.model._meta.app_label,
'module_name': self.model._meta.module_name,
'object_id': obj.id,
'position': obj.position,
}
move_up_down_links.allow_tags = True
move_up_down_links.short_description = _(u'Move')
inlines = [InstitutionResourceInline, InstitutionChargeInline]
list_display = ('name', 'institution_type', 'move_up_down_links',)
class SpeechInline(LinkedTabularInline):
model = Speech
fields = ("author", "author_name_when_external", "title", \
"seq_order", "admin_link", )
raw_id_fields = ["author", ]
extra = 0
form = SpeechInlineForm
class SittingItemInline(LinkedTabularInline):
model = SittingItem
fields = ('title', 'related_act_set', 'item_type', 'seq_order','admin_link',)
raw_id_fields = ('related_act_set',)
extra = 0
form = SittingItemFormSet
class SittingItemAdmin(admin.ModelAdmin):
list_display = ( 'title','sitting', 'seq_order','item_type','num_related_acts')
ordering = ('-sitting__date','seq_order')
search_fields = ['^title', ]
list_filter = ['sitting__institution','item_type',]
raw_id_fields = ( 'sitting', 'related_act_set', )
inlines = [SpeechInline, ]
class SittingAdmin(admin.ModelAdmin):
inlines = [SittingItemInline, VotationsInline]
raw_id_fields = [ "minute", ]
admin.site.register(SittingItem, SittingItemAdmin)
admin.site.register(Sitting, SittingAdmin)
admin.site.register(Person, PersonAdminWithResources)
admin.site.register(Group, GroupAdminWithCharges)
admin.site.register(GroupCharge, GroupChargeAdmin)
admin.site.register(InstitutionCharge, InstitutionChargeAdmin)
admin.site.register(CompanyCharge, CompanyChargeAdmin)
admin.site.register(AdministrationCharge, AdministrationChargeAdmin)
admin.site.register(Institution, InstitutionAdmin)
admin.site.register(Company, CompanyAdmin)
admin.site.register(Office, OfficeAdmin)
|
openpolis/open_municipio
|
open_municipio/people/admin.py
|
Python
|
agpl-3.0
| 10,689
|
# Generated by Django 3.1.1 on 2020-11-26 12:00
import uuid
import django.core.validators
import django.db.models.deletion
import django_extensions.db.fields
from django.conf import settings
from django.db import migrations, models
import grandchallenge.core.validators
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name="LookUpTable",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"title",
models.CharField(max_length=255, verbose_name="title"),
),
(
"description",
models.TextField(
blank=True, null=True, verbose_name="description"
),
),
(
"slug",
django_extensions.db.fields.AutoSlugField(
blank=True,
editable=False,
populate_from="title",
verbose_name="slug",
),
),
(
"color",
models.TextField(
validators=[
django.core.validators.RegexValidator(
regex="^\\[(?:((?: ?-?\\d*(?:\\.\\d+)? ){3}(?:-?\\d*(?:\\.\\d+)?)) ?, ?)+((?:-?\\d*(?:\\.\\d+)? ){3}(?:\\d*(:?\\.\\d+)? ?))\\]$"
)
]
),
),
(
"alpha",
models.TextField(
validators=[
django.core.validators.RegexValidator(
regex="^\\[(?:((?: ?-?\\d*(?:\\.\\d+)? ){1}(?:-?\\d*(?:\\.\\d+)?)) ?, ?)+((?:-?\\d*(?:\\.\\d+)? ){1}(?:\\d*(:?\\.\\d+)? ?))\\]$"
)
]
),
),
(
"color_invert",
models.TextField(
blank=True,
validators=[
django.core.validators.RegexValidator(
regex="^\\[(?:((?: ?-?\\d*(?:\\.\\d+)? ){3}(?:-?\\d*(?:\\.\\d+)?)) ?, ?)+((?:-?\\d*(?:\\.\\d+)? ){3}(?:\\d*(:?\\.\\d+)? ?))\\]$"
)
],
),
),
(
"alpha_invert",
models.TextField(
blank=True,
validators=[
django.core.validators.RegexValidator(
regex="^\\[(?:((?: ?-?\\d*(?:\\.\\d+)? ){1}(?:-?\\d*(?:\\.\\d+)?)) ?, ?)+((?:-?\\d*(?:\\.\\d+)? ){1}(?:\\d*(:?\\.\\d+)? ?))\\]$"
)
],
),
),
("range_min", models.SmallIntegerField(default=0)),
("range_max", models.SmallIntegerField(default=4095)),
("relative", models.BooleanField(default=False)),
(
"color_interpolation",
models.CharField(
choices=[
("RGB", "RGB"),
("HLS", "HLS"),
("HLSpos", "HLS Positive"),
("HLSneg", "HLS Negative"),
("Constant", "Constant"),
],
default="RGB",
max_length=8,
),
),
(
"color_interpolation_invert",
models.CharField(
choices=[
("RGB", "RGB"),
("HLS", "HLS"),
("HLSpos", "HLS Positive"),
("HLSneg", "HLS Negative"),
("Constant", "Constant"),
],
default="RGB",
max_length=8,
),
),
],
options={"ordering": ("title",), "abstract": False},
),
migrations.CreateModel(
name="WindowPreset",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"title",
models.CharField(max_length=255, verbose_name="title"),
),
(
"description",
models.TextField(
blank=True, null=True, verbose_name="description"
),
),
(
"slug",
django_extensions.db.fields.AutoSlugField(
blank=True,
editable=False,
populate_from="title",
verbose_name="slug",
),
),
(
"width",
models.PositiveIntegerField(
validators=[
django.core.validators.MinValueValidator(
limit_value=1
)
]
),
),
("center", models.IntegerField()),
],
options={"ordering": ("title",), "abstract": False},
),
migrations.CreateModel(
name="WorkstationConfig",
fields=[
(
"id",
models.UUIDField(
default=uuid.uuid4,
editable=False,
primary_key=True,
serialize=False,
),
),
("created", models.DateTimeField(auto_now_add=True)),
("modified", models.DateTimeField(auto_now=True)),
(
"title",
models.CharField(max_length=255, verbose_name="title"),
),
(
"description",
models.TextField(
blank=True, null=True, verbose_name="description"
),
),
(
"slug",
django_extensions.db.fields.AutoSlugField(
blank=True,
editable=False,
populate_from="title",
verbose_name="slug",
),
),
(
"default_slab_thickness_mm",
models.DecimalField(
blank=True,
decimal_places=2,
max_digits=4,
null=True,
validators=[
django.core.validators.MinValueValidator(
limit_value=0.01
)
],
),
),
(
"default_slab_render_method",
models.CharField(
blank=True,
choices=[
("MAX", "Maximum"),
("MIN", "Minimum"),
("AVG", "Average"),
],
max_length=3,
),
),
(
"default_orientation",
models.CharField(
blank=True,
choices=[
("A", "Axial"),
("C", "Coronal"),
("S", "Sagittal"),
],
max_length=1,
),
),
(
"default_overlay_interpolation",
models.CharField(
blank=True,
choices=[
("NN", "NearestNeighbor"),
("TL", "Trilinear"),
],
default="NN",
max_length=2,
),
),
("show_image_info_plugin", models.BooleanField(default=True)),
("show_display_plugin", models.BooleanField(default=True)),
(
"creator",
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to=settings.AUTH_USER_MODEL,
),
),
(
"default_overlay_lut",
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="workstation_configs.lookuptable",
),
),
(
"default_window_preset",
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="workstation_default_window_presets",
to="workstation_configs.windowpreset",
),
),
(
"window_presets",
models.ManyToManyField(
blank=True,
related_name="workstation_window_presets",
to="workstation_configs.WindowPreset",
),
),
(
"default_overlay_alpha",
models.DecimalField(
blank=True,
decimal_places=2,
max_digits=3,
null=True,
validators=[
django.core.validators.MinValueValidator(
limit_value=0.0
),
django.core.validators.MaxValueValidator(
limit_value=1.0
),
],
),
),
(
"default_zoom_scale",
models.DecimalField(
blank=True,
decimal_places=2,
max_digits=4,
null=True,
validators=[
django.core.validators.MinValueValidator(
limit_value=0.01
)
],
),
),
("show_flip_tool", models.BooleanField(default=True)),
("show_invert_tool", models.BooleanField(default=True)),
("show_reset_tool", models.BooleanField(default=True)),
("show_window_level_tool", models.BooleanField(default=True)),
(
"overlay_segments",
models.JSONField(
blank=True,
default=list,
validators=[
grandchallenge.core.validators.JSONValidator(
schema={
"$id": "http://example.com/example.json",
"$schema": "http://json-schema.org/draft-06/schema",
"description": "Define the overlay segments for the LUT.",
"items": {
"$id": "#/items",
"additionalProperties": False,
"default": {},
"description": "Defines what each segment of the LUT represents.",
"examples": [
{
"metric_template": "{{metrics.volumes[0]}} mm³",
"name": "Metastasis",
"visible": True,
"voxel_value": 1,
}
],
"properties": {
"metric_template": {
"$id": "#/items/properties/metric_template",
"default": "",
"description": "The jinja template to determine which property from the results.json should be used as the label text.",
"examples": [
"{{metrics.volumes[0]}} mm³"
],
"title": "The Metric Template Schema",
"type": "string",
},
"name": {
"$id": "#/items/properties/name",
"default": "",
"description": "What this segment should be called.",
"examples": ["Metastasis"],
"title": "The Name Schema",
"type": "string",
},
"visible": {
"$id": "#/items/properties/visible",
"default": True,
"description": "Whether this segment is visible by default.",
"examples": [True],
"title": "The Visible Schema",
"type": "boolean",
},
"voxel_value": {
"$id": "#/items/properties/voxel_value",
"default": 0,
"description": "The value of the LUT for this segment.",
"examples": [1],
"title": "The Voxel Value Schema",
"type": "integer",
},
},
"required": [
"voxel_value",
"name",
"visible",
],
"title": "The Segment Schema",
"type": "object",
},
"title": "The Overlay Segments Schema",
"type": "array",
}
)
],
),
),
(
"key_bindings",
models.JSONField(
blank=True,
default=list,
validators=[
grandchallenge.core.validators.JSONValidator(
schema={
"$id": "http://example.com/example.json",
"$schema": "http://json-schema.org/draft-06/schema",
"description": "Define the key bindings for the workstation.",
"items": {
"$id": "#/items",
"additionalProperties": False,
"default": {},
"description": "Defines a key binding for a command.",
"examples": [
{
"command": "editor.action.deleteLines",
"key": "ctrl+shift+k",
"when": "editorTextFocus",
}
],
"properties": {
"command": {
"$id": "#/items/properties/command",
"default": "",
"description": "The command called by this binding.",
"examples": [
"editor.action.deleteLines"
],
"title": "The Command Schema",
"type": "string",
},
"key": {
"$id": "#/items/properties/key",
"default": "",
"description": "The keys used for this binding.",
"examples": ["ctrl+shift+k"],
"title": "The Key Schema",
"type": "string",
},
"when": {
"$id": "#/items/properties/when",
"default": "",
"description": "The condition that must be met for this command to be called.",
"examples": [
"editorTextFocus"
],
"title": "The When Schema",
"type": "string",
},
},
"required": ["key", "command"],
"title": "The Key Binding Schema",
"type": "object",
},
"title": "The Key Bindings Schema",
"type": "array",
}
)
],
),
),
],
options={"ordering": ("created", "creator"), "abstract": False},
),
]
|
comic/comic-django
|
app/grandchallenge/workstation_configs/migrations/0001_squashed_0008_auto_20201001_0758.py
|
Python
|
apache-2.0
| 20,933
|
from django.conf.urls import patterns, url
import views
urlpatterns = patterns('',
url('^login/cancelled/$', views.login_cancelled,
name='socialaccount_login_cancelled'),
url('^login/error/$', views.login_error, name='socialaccount_login_error'),
url('^(?P<provider>.*)/signup/$', views.social_login, name='socialaccount_signup'),
url('^(?P<provider>.*)/callback/$', views.social_callback, name='socialaccount_callback'),
url('^connections/$', views.connections, name='socialaccount_connections'),
)
|
houssemFat/bloodOn
|
bloodon/accounts/social/urls.py
|
Python
|
mit
| 643
|
"""
Django settings for odyseja project.
Generated by 'django-admin startproject' using Django 1.8.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# _*_ coding: utf-8 _*_
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = ')zmc27xj3ta&b)0@hj#ye=z#7viacll@rciwply&pdz4$h#@8f'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
DEFAULT_CHARSET = 'utf-8'
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'oceny',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'odyseja.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [(os.path.join(os.path.dirname(__file__),'templates'))],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'odyseja.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'odyseja',
'USER': 'dawid',
'PASSWORD': 'placuszki',
'HOST': '127.0.0.1',
'PORT': '3306',
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'pl'
TIME_ZONE = 'Europe/Warsaw'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
MEDIA_ROOT = 'D:\Studia\Automatyka i Robotyka\Bazy Danych\projekt\django\odyseja\oceny\media'
|
superdyzio/PWR-Stuff
|
AIR-ARR/Bazy Danych/odyseja/odyseja/settings.py
|
Python
|
mit
| 2,937
|
# coding: utf-8
from sqlalchemy import Column, DateTime, ForeignKey, Integer, SmallInteger, String, text, Enum
from sqlalchemy.orm import relationship
from Houdini.Data import Base
metadata = Base.metadata
class RedemptionAward(Base):
__tablename__ = 'redemption_award'
CodeID = Column(ForeignKey(u'redemption_code.ID', ondelete=u'CASCADE', onupdate=u'CASCADE'), primary_key=True,
nullable=False, server_default=text("0"))
Award = Column(SmallInteger, primary_key=True, nullable=False, server_default=text("1"))
redemption_code = relationship(u'RedemptionCode')
class RedemptionCode(Base):
__tablename__ = 'redemption_code'
ID = Column(Integer, primary_key=True, unique=True)
Code = Column(String(16), nullable=False, unique=True, server_default=text("''"))
Type = Column(Enum(u'DS', u'BLANKET', u'CARD', u'GOLDEN', u'CAMPAIGN'), nullable=False,
server_default=text("'BLANKET'"))
Coins = Column(Integer, nullable=False, server_default=text("0"))
Expires = Column(DateTime)
penguin = relationship(u'Penguin', secondary='penguin_redemption')
class PenguinRedemption(Base):
__tablename__ = 'penguin_redemption'
PenguinID = Column(ForeignKey(u'penguin.ID', ondelete=u'CASCADE', onupdate=u'CASCADE'), primary_key=True,
nullable=False, server_default=text("0"))
CodeID = Column(ForeignKey(u'redemption_code.ID', ondelete=u'CASCADE', onupdate=u'CASCADE'), primary_key=True,
nullable=False, index=True, server_default=text("0"))
|
TunnelBlanket/Houdini
|
Houdini/Data/Redemption.py
|
Python
|
mit
| 1,567
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Matrix functions contains iterative methods for M^p."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
def matrix_square_root(mat_a, mat_a_size, iter_count=100, ridge_epsilon=1e-4):
"""Iterative method to get matrix square root.
Stable iterations for the matrix square root, Nicholas J. Higham
Page 231, Eq 2.6b
http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.6.8799&rep=rep1&type=pdf
Args:
mat_a: the symmetric PSD matrix whose matrix square root be computed
mat_a_size: size of mat_a.
iter_count: Maximum number of iterations.
ridge_epsilon: Ridge epsilon added to make the matrix positive definite.
Returns:
mat_a^0.5
"""
def _iter_condition(i, unused_mat_y, unused_old_mat_y, unused_mat_z,
unused_old_mat_z, err, old_err):
# This method require that we check for divergence every step.
return math_ops.logical_and(i < iter_count, err < old_err)
def _iter_body(i, mat_y, unused_old_mat_y, mat_z, unused_old_mat_z, err,
unused_old_err):
current_iterate = 0.5 * (3.0 * identity - math_ops.matmul(mat_z, mat_y))
current_mat_y = math_ops.matmul(mat_y, current_iterate)
current_mat_z = math_ops.matmul(current_iterate, mat_z)
# Compute the error in approximation.
mat_sqrt_a = current_mat_y * math_ops.sqrt(norm)
mat_a_approx = math_ops.matmul(mat_sqrt_a, mat_sqrt_a)
residual = mat_a - mat_a_approx
current_err = math_ops.sqrt(math_ops.reduce_sum(residual * residual)) / norm
return i + 1, current_mat_y, mat_y, current_mat_z, mat_z, current_err, err
identity = linalg_ops.eye(math_ops.to_int32(mat_a_size))
mat_a = mat_a + ridge_epsilon * identity
norm = math_ops.sqrt(math_ops.reduce_sum(mat_a * mat_a))
mat_init_y = mat_a / norm
mat_init_z = identity
init_err = norm
_, _, prev_mat_y, _, _, _, _ = control_flow_ops.while_loop(
_iter_condition, _iter_body, [
0, mat_init_y, mat_init_y, mat_init_z, mat_init_z, init_err,
init_err + 1.0
])
return prev_mat_y * math_ops.sqrt(norm)
def matrix_inverse_pth_root(mat_g,
mat_g_size,
alpha,
iter_count=100,
epsilon=1e-6,
ridge_epsilon=1e-6):
"""Computes mat_g^alpha, where alpha = -1/p, p a positive integer.
We use an iterative Schur-Newton method from equation 3.2 on page 9 of:
A Schur-Newton Method for the Matrix p-th Root and its Inverse
by Chun-Hua Guo and Nicholas J. Higham
SIAM Journal on Matrix Analysis and Applications,
2006, Vol. 28, No. 3 : pp. 788-804
https://pdfs.semanticscholar.org/0abe/7f77433cf5908bfe2b79aa91af881da83858.pdf
Args:
mat_g: the symmetric PSD matrix whose power it to be computed
mat_g_size: size of mat_g.
alpha: exponent, must be -1/p for p a positive integer.
iter_count: Maximum number of iterations.
epsilon: accuracy indicator, useful for early termination.
ridge_epsilon: Ridge epsilon added to make the matrix positive definite.
Returns:
mat_g^alpha
"""
identity = linalg_ops.eye(math_ops.to_int32(mat_g_size))
def mat_power(mat_m, p):
"""Computes mat_m^p, for p a positive integer.
Power p is known at graph compile time, so no need for loop and cond.
Args:
mat_m: a square matrix
p: a positive integer
Returns:
mat_m^p
"""
assert p == int(p) and p > 0
power = None
while p > 0:
if p % 2 == 1:
power = math_ops.matmul(mat_m, power) if power is not None else mat_m
p //= 2
mat_m = math_ops.matmul(mat_m, mat_m)
return power
def _iter_condition(i, mat_m, _):
return math_ops.logical_and(
i < iter_count,
math_ops.reduce_max(math_ops.abs(mat_m - identity)) > epsilon)
def _iter_body(i, mat_m, mat_x):
mat_m_i = (1 - alpha) * identity + alpha * mat_m
return (i + 1, math_ops.matmul(mat_power(mat_m_i, -1.0 / alpha), mat_m),
math_ops.matmul(mat_x, mat_m_i))
if mat_g_size == 1:
mat_h = math_ops.pow(mat_g + ridge_epsilon, alpha)
else:
damped_mat_g = mat_g + ridge_epsilon * identity
z = (1 - 1 / alpha) / (2 * linalg_ops.norm(damped_mat_g))
# The best value for z is
# (1 - 1/alpha) * (c_max^{-alpha} - c_min^{-alpha}) /
# (c_max^{1-alpha} - c_min^{1-alpha})
# where c_max and c_min are the largest and smallest singular values of
# damped_mat_g.
# The above estimate assumes that c_max > c_min * 2^p. (p = -1/alpha)
# Can replace above line by the one below, but it is less accurate,
# hence needs more iterations to converge.
# z = (1 - 1/alpha) / math_ops.trace(damped_mat_g)
# If we want the method to always converge, use z = 1 / norm(damped_mat_g)
# or z = 1 / math_ops.trace(damped_mat_g), but these can result in many
# extra iterations.
_, _, mat_h = control_flow_ops.while_loop(
_iter_condition, _iter_body,
[0, damped_mat_g * z, identity * math_ops.pow(z, -alpha)])
return mat_h
|
kobejean/tensorflow
|
tensorflow/contrib/opt/python/training/matrix_functions.py
|
Python
|
apache-2.0
| 5,984
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-11-17 20:11
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('payment', '0013_payment_failed'),
]
operations = [
migrations.RemoveField(
model_name='payment',
name='failed',
),
migrations.AddField(
model_name='payment',
name='status',
field=models.CharField(choices=[(b'INIT', b'Initiated'), (b'PAID', b'Paid'), (b'FAIL', b'Failed')], default=b'INIT', max_length=4),
),
]
|
katyaeka2710/python2017005
|
payment/migrations/0014_auto_20161117_2011.py
|
Python
|
mit
| 643
|
#!/usr/bin/env python3
from source.modules._generic_module import *
class Module(GenericModule):
def __init__(self):
self.authors = [
Author(name='Vitezslav Grygar', email='vitezslav.grygar@gmail.com', web='https://badsulog.blogspot.com'),
]
self.name = 'crypto.language'
self.short_description = 'Attempts to reveal string\'s language.'
self.references = [
]
self.date = '2016-02-03'
self.license = 'GNU GPLv2'
self.version = '2.0'
self.tags = [
'language',
]
self.description = """
This module compares strings located in Temporary table of the Dictionary database.
If no language is specified, all languages will be tested.
You can upload words into the database using:
1. db['dict'].add_tmp_words(self, tag, words) method
2. crypto.words.upload module
"""
self.dependencies = {
}
self.changelog = """
2.0: Words analyzed in database, any character except space is valid
Threading support removed
1.1: Threading support (not quite efficient)
Word splitting by punct, numbers and whitespace
1.0: Language recognition
Words A-Za-z'0-9
"""
self.reset_parameters()
def reset_parameters(self):
self.parameters = {
'SILENT': Parameter(value='no', mandatory=True, description='Suppress the output'),
'TAGS': Parameter(mandatory=False, description='Space-separated tags to analyze (empty = all)'),
'DICTS': Parameter(mandatory=False, description='Space-separated dictionary names (empty = all)'),
'THRESHOLD': Parameter(value='0.4', mandatory=True, description='Threshold value'),
'MATCHONLY': Parameter(value='no', mandatory=True, description='Print only positive matches'),
}
def check(self, silent=None):
if silent is None:
silent = positive(self.parameters['SILENT'].value)
dicts = self.parameters['DICTS'].value.split()
tags = self.parameters['TAGS'].value.split()
result = CHECK_SUCCESS
# DICTS EXIST?
dicts_present = db['dict'].get_dictionaries()
if dicts_present == DB_ERROR:
if not silent:
log.err('Cannot get list of dictionaries.')
result = CHECK_FAILURE
for d in dicts:
if d not in dicts_present:
if not silent:
log.err('\'%s\' dictionary is not in the database.' % (d))
result = CHECK_FAILURE
# TAGS EXIST?
tags_present = db['dict'].get_tmp_tags()
if tags_present == DB_ERROR:
if not silent:
log.err('Cannot get list of tags from the database.')
result = CHECK_FAILURE
for t in tags:
if t not in tags_present:
if not silent:
log.err('\'%s\' tag is not in the database.' % (t))
result = CHECK_FAILURE
return result
def run(self):
silent = positive(self.parameters['SILENT'].value)
tags = self.parameters['TAGS'].value.split() if self.parameters['TAGS'] != '' \
else db['dict'].get_tmp_tags()
dicts = self.parameters['DICTS'].value.split()
matchonly = positive(self.parameters['MATCHONLY'].value)
if len(dicts) == 0:
dicts = db['dict'].get_dictionaries()
if dicts == DB_ERROR:
log.err('Cannot get list of dictionaries.')
return None
for d in dicts:
matches = db['dict'].get_match_percent(d, tags)
if matches == DB_ERROR:
log.err('Cannot get results for \'%s\' dictionary' % (d))
continue
for match in matches:
if not silent:
if match[2]<float(self.parameters['THRESHOLD'].value):
if not matchonly:
log.info('%s analysis of %s: %.2f %%' % (match[1], match[0], match[2]*100))
else:
log.ok('%s analysis of %s: %.2f %%' % (match[1], match[0], match[2]*100))
return None
lib.module_objects.append(Module())
|
lightfaith/locasploit
|
source/modules/crypto_language.py
|
Python
|
gpl-2.0
| 4,349
|
import ckan.plugins as plugins
import ckan.plugins.toolkit as toolkit
class DatosPyThemePlugin(plugins.SingletonPlugin):
'''An example theme plugin.
'''
# Declare that this class implements IConfigurer.
plugins.implements(plugins.IConfigurer)
def update_config(self, config):
# Add this plugin's templates dir to CKAN's extra_template_paths, so
# that CKAN will use this plugin's custom templates.
# 'templates' is the path to the templates dir, relative to this
# plugin.py file.
toolkit.add_template_directory(config, 'templates')
toolkit.add_public_directory(config, 'public')
toolkit.add_resource('fanstatic', 'datospy_theme')
|
datospy/ckanext-datospy_theme
|
ckanext/datospy_theme/plugin.py
|
Python
|
mit
| 700
|
__author__ = 'mcharbit'
import pickle
import os
import sys
import subprocess
import urllib2
import logging
from time import sleep, time
from datetime import date
auto_run_config_file_name = "auto_run_config.txt"
auto_run_config = os.path.join(os.path.dirname(sys.argv[0]), auto_run_config_file_name)
last_auto_run_execution = "last_auto_run_execution.txt"
last_auto_run_execution = os.path.join(os.path.dirname(sys.argv[0]), last_auto_run_execution)
script_file = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), os.path.pardir, 'parser.py'))
default_script_log = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), os.path.pardir, 'parser_log.txt'))
run_config = dict()
def internet_on():
try:
response=urllib2.urlopen('http://www.google.com',timeout=1)
return True
except urllib2.URLError as err: pass
return False
def main():
if os.path.exists(auto_run_config):
try:
with open(auto_run_config, 'rb') as fd:
run_config = pickle.load(fd)
except:
logging.error("problem while opening {}".format(auto_run_config))
sys.exit()
execute_script = False
if run_config.has_key('mode') and run_config['mode'] in ['daily', 'weekly']:
if os.path.exists(last_auto_run_execution):
with open(last_auto_run_execution, 'r') as fd:
try:
last_execution = date.fromtimestamp(float(fd.read()))
except:
# Assume not executed before
execute_script = True
else:
today = date.today()
difference = (today - last_execution).days
if (difference >= 7 and run_config['mode'] == 'weekly') \
or (difference >= 1 and run_config['mode'] == 'daily'):
execute_script = True
else:
# Assume not executed before
execute_script = True
else:
# Assume at every system_startup. Continue execution
execute_script = True
if execute_script:
python_bin = 'python'
if run_config.has_key('virtual_env'):
vpython = run_config['virtual_env']
if os.path.exists(vpython):
python_bin = vpython
if run_config.has_key('log_file'):
script_log = os.path.abspath(run_config['log_file'])
else:
script_log = default_script_log
if run_config.has_key('file_config'):
config_file = run_config['file_config']
if os.path.exists(config_file) and os.path.exists(script_file):
network_ok = False
for i in range(10):
network_ok = internet_on()
if network_ok:
break
else:
sleep(3)
if network_ok:
subprocess.call([python_bin, script_file, config_file, '-l', script_log, '-b'])
with open(last_auto_run_execution, 'w') as fd:
fd.write(str(time()))
else:
logging.error("Network not available after max number of attempts")
sys.exit()
else:
logging.error("{} or {} are missing".format(config_file, script_file))
sys.exit()
else:
logging.error("{} has an invalid format".format(auto_run_config))
sys.exit()
else:
sys.exit()
else:
logging.error("{} is not a valid path".format(auto_run_config))
sys.exit()
if __name__ == "__main__":
main()
|
atadlate/movie_torrent_parser
|
torrent_parser/scripts/auto_run.py
|
Python
|
gpl-3.0
| 4,041
|
# -*- coding: utf-8 -*-
__doc__ = """
WebSocket within CherryPy is a tricky bit since CherryPy is
a threaded server which would choke quickly if each thread
of the server were kept attached to a long living connection
that WebSocket expects.
In order to work around this constraint, we take some advantage
of some internals of CherryPy as well as the introspection
Python provides.
Basically, when the WebSocket handshake is complete, we take over
the socket and let CherryPy take back the thread that was
associated with the upgrade request.
These operations require a bit of work at various levels of
the CherryPy framework but this module takes care of them
and from your application's perspective, this is abstracted.
Here are the various utilities provided by this module:
* WebSocketTool: The tool is in charge to perform the
HTTP upgrade and detach the socket from
CherryPy. It runs at various hook points of the
request's processing. Enable that tool at
any path you wish to handle as a WebSocket
handler.
* WebSocketPlugin: The plugin tracks the instanciated web socket handlers.
It also cleans out websocket handler which connection
have been closed down. The websocket connection then
runs in its own thread that this plugin manages.
Simple usage example:
.. code-block:: python
:linenos:
import cherrypy
from ws4py.server.cherrypyserver import WebSocketPlugin, WebSocketTool
from ws4py.websocket import EchoWebSocket
cherrypy.config.update({'server.socket_port': 9000})
WebSocketPlugin(cherrypy.engine).subscribe()
cherrypy.tools.websocket = WebSocketTool()
class Root(object):
@cherrypy.expose
def index(self):
return 'some HTML with a websocket javascript connection'
@cherrypy.expose
def ws(self):
pass
cherrypy.quickstart(Root(), '/', config={'/ws': {'tools.websocket.on': True,
'tools.websocket.handler_cls': EchoWebSocket}})
Note that you can set the handler class on per-path basis,
meaning you could also dynamically change the class based
on other envrionmental settings (is the user authenticated for ex).
"""
import base64
from hashlib import sha1
import inspect
import threading
import cherrypy
from cherrypy import Tool
from cherrypy.process import plugins
from cherrypy.wsgiserver import HTTPConnection, HTTPRequest, KnownLengthRFile
from ws4py import WS_KEY, WS_VERSION
from ws4py.exc import HandshakeError
from ws4py.websocket import WebSocket
from ws4py.compat import py3k, get_connection, detach_connection
from ws4py.manager import WebSocketManager
__all__ = ['WebSocketTool', 'WebSocketPlugin']
class WebSocketTool(Tool):
def __init__(self):
Tool.__init__(self, 'before_request_body', self.upgrade)
def _setup(self):
conf = self._merged_args()
hooks = cherrypy.serving.request.hooks
p = conf.pop("priority", getattr(self.callable, "priority",
self._priority))
hooks.attach(self._point, self.callable, priority=p, **conf)
hooks.attach('before_finalize', self.complete,
priority=p)
hooks.attach('on_end_resource', self.cleanup_headers,
priority=70)
hooks.attach('on_end_request', self.start_handler,
priority=70)
def upgrade(self, protocols=None, extensions=None, version=WS_VERSION,
handler_cls=WebSocket, heartbeat_freq=None):
"""
Performs the upgrade of the connection to the WebSocket
protocol.
The provided protocols may be a list of WebSocket
protocols supported by the instance of the tool.
When no list is provided and no protocol is either
during the upgrade, then the protocol parameter is
not taken into account. On the other hand,
if the protocol from the handshake isn't part
of the provided list, the upgrade fails immediatly.
"""
request = cherrypy.serving.request
request.process_request_body = False
ws_protocols = None
ws_location = None
ws_version = version
ws_key = None
ws_extensions = []
if request.method != 'GET':
raise HandshakeError('HTTP method must be a GET')
for key, expected_value in [('Upgrade', 'websocket'),
('Connection', 'upgrade')]:
actual_value = request.headers.get(key, '').lower()
if not actual_value:
raise HandshakeError('Header %s is not defined' % key)
if expected_value not in actual_value:
raise HandshakeError('Illegal value for header %s: %s' %
(key, actual_value))
version = request.headers.get('Sec-WebSocket-Version')
supported_versions = ', '.join([str(v) for v in ws_version])
version_is_valid = False
if version:
try: version = int(version)
except: pass
else: version_is_valid = version in ws_version
if not version_is_valid:
cherrypy.response.headers['Sec-WebSocket-Version'] = supported_versions
raise HandshakeError('Unhandled or missing WebSocket version')
key = request.headers.get('Sec-WebSocket-Key')
if key:
ws_key = base64.b64decode(key.encode('utf-8'))
if len(ws_key) != 16:
raise HandshakeError("WebSocket key's length is invalid")
protocols = protocols or []
subprotocols = request.headers.get('Sec-WebSocket-Protocol')
if subprotocols:
ws_protocols = []
for s in subprotocols.split(','):
s = s.strip()
if s in protocols:
ws_protocols.append(s)
exts = extensions or []
extensions = request.headers.get('Sec-WebSocket-Extensions')
if extensions:
for ext in extensions.split(','):
ext = ext.strip()
if ext in exts:
ws_extensions.append(ext)
location = []
include_port = False
if request.scheme == "https":
location.append("wss://")
include_port = request.local.port != 443
else:
location.append("ws://")
include_port = request.local.port != 80
location.append('localhost')
if include_port:
location.append(":%d" % request.local.port)
location.append(request.path_info)
if request.query_string != "":
location.append("?%s" % request.query_string)
ws_location = ''.join(location)
response = cherrypy.serving.response
response.stream = True
response.status = '101 Switching Protocols'
response.headers['Content-Type'] = 'text/plain'
response.headers['Upgrade'] = 'websocket'
response.headers['Connection'] = 'Upgrade'
response.headers['Sec-WebSocket-Version'] = str(version)
response.headers['Sec-WebSocket-Accept'] = base64.b64encode(sha1(key.encode('utf-8') + WS_KEY).digest())
if ws_protocols:
response.headers['Sec-WebSocket-Protocol'] = ', '.join(ws_protocols)
if ws_extensions:
response.headers['Sec-WebSocket-Extensions'] = ','.join(ws_extensions)
addr = (request.remote.ip, request.remote.port)
rfile = request.rfile.rfile
if isinstance(rfile, KnownLengthRFile):
rfile = rfile.rfile
ws_conn = get_connection(rfile)
request.ws_handler = handler_cls(ws_conn, ws_protocols, ws_extensions,
request.wsgi_environ.copy(),
heartbeat_freq=heartbeat_freq)
def complete(self):
"""
Sets some internal flags of CherryPy so that it
doesn't close the socket down.
"""
self._set_internal_flags()
def cleanup_headers(self):
"""
Some clients aren't that smart when it comes to
headers lookup.
"""
response = cherrypy.response
if not response.header_list:
return
headers = response.header_list[:]
for (k, v) in headers:
if k[:7] == 'Sec-Web':
response.header_list.remove((k, v))
response.header_list.append((k.replace('Sec-Websocket', 'Sec-WebSocket'), v))
def start_handler(self):
"""
Runs at the end of the request processing by calling
the opened method of the handler.
"""
request = cherrypy.request
if not hasattr(request, 'ws_handler'):
return
addr = (request.remote.ip, request.remote.port)
ws_handler = request.ws_handler
request.ws_handler = None
delattr(request, 'ws_handler')
# By doing this we detach the socket from
# the CherryPy stack avoiding memory leaks
detach_connection(request.rfile.rfile)
cherrypy.engine.publish('handle-websocket', ws_handler, addr)
def _set_internal_flags(self):
"""
CherryPy has two internal flags that we are interested in
to enable WebSocket within the server. They can't be set via
a public API and considering I'd want to make this extension
as compatible as possible whilst refraining in exposing more
than should be within CherryPy, I prefer performing a bit
of introspection to set those flags. Even by Python standards
such introspection isn't the cleanest but it works well
enough in this case.
This also means that we do that only on WebSocket
connections rather than globally and therefore we do not
harm the rest of the HTTP server.
"""
current = inspect.currentframe()
while True:
if not current:
break
_locals = current.f_locals
if 'self' in _locals:
if type(_locals['self']) == HTTPRequest:
_locals['self'].close_connection = True
if type(_locals['self']) == HTTPConnection:
_locals['self'].linger = True
# HTTPConnection is more inner than
# HTTPRequest so we can leave once
# we're done here
return
_locals = None
current = current.f_back
class WebSocketPlugin(plugins.SimplePlugin):
def __init__(self, bus):
plugins.SimplePlugin.__init__(self, bus)
self.manager = WebSocketManager()
def start(self):
self.bus.log("Starting WebSocket processing")
self.bus.subscribe('stop', self.cleanup)
self.bus.subscribe('handle-websocket', self.handle)
self.bus.subscribe('websocket-broadcast', self.broadcast)
self.manager.start()
def stop(self):
self.bus.log("Terminating WebSocket processing")
self.bus.unsubscribe('stop', self.cleanup)
self.bus.unsubscribe('handle-websocket', self.handle)
self.bus.unsubscribe('websocket-broadcast', self.broadcast)
def handle(self, ws_handler, peer_addr):
"""
Tracks the provided handler.
:param ws_handler: websocket handler instance
:param peer_addr: remote peer address for tracing purpose
"""
self.manager.add(ws_handler)
def cleanup(self):
"""
Terminate all connections and clear the pool. Executed when the engine stops.
"""
self.manager.close_all()
self.manager.stop()
self.manager.join()
def broadcast(self, message, binary=False):
"""
Broadcasts a message to all connected clients known to
the server.
:param message: a message suitable to pass to the send() method
of the connected handler.
:param binary: whether or not the message is a binary one
"""
self.manager.broadcast(message, binary)
if __name__ == '__main__':
import random
cherrypy.config.update({'server.socket_host': '127.0.0.1',
'server.socket_port': 9000})
WebSocketPlugin(cherrypy.engine).subscribe()
cherrypy.tools.websocket = WebSocketTool()
class Root(object):
@cherrypy.expose
@cherrypy.tools.websocket(on=False)
def ws(self):
return """<html>
<head>
<script type='application/javascript' src='https://ajax.googleapis.com/ajax/libs/jquery/1.8.3/jquery.min.js'> </script>
<script type='application/javascript'>
$(document).ready(function() {
var ws = new WebSocket('ws://192.168.0.10:9000/');
ws.onmessage = function (evt) {
$('#chat').val($('#chat').val() + evt.data + '\\n');
};
ws.onopen = function() {
ws.send("Hello there");
};
ws.onclose = function(evt) {
$('#chat').val($('#chat').val() + 'Connection closed by server: ' + evt.code + ' \"' + evt.reason + '\"\\n');
};
$('#chatform').submit(function() {
ws.send('%(username)s: ' + $('#message').val());
$('#message').val("");
return false;
});
});
</script>
</head>
<body>
<form action='/echo' id='chatform' method='get'>
<textarea id='chat' cols='35' rows='10'></textarea>
<br />
<label for='message'>%(username)s: </label><input type='text' id='message' />
<input type='submit' value='Send' />
</form>
</body>
</html>
""" % {'username': "User%d" % random.randint(0, 100)}
@cherrypy.expose
def index(self):
cherrypy.log("Handler created: %s" % repr(cherrypy.request.ws_handler))
cherrypy.quickstart(Root(), '/', config={'/': {'tools.websocket.on': True,
'tools.websocket.handler_cls': EchoWebSocketHandler}})
|
xuhdev/WebSocket-for-Python
|
ws4py/server/cherrypyserver.py
|
Python
|
bsd-3-clause
| 14,368
|
# -*- coding: utf-8 -*-
# Copyright © 2007-2013, All rights reserved. GoodData® Corporation, http://gooddata.com
__author__ = "miroslav.hedl@gooddata.com"
__maintainer__ = __author__
'''
Primary goal of this module is function `plugins_to_xml` that solves issue PCI-1385.
This module converts plugins dictionary from `Smoker.py` and produces generic list
of named tuple based on configuration template (described inside).
Also implements simple but flexible :py:class:`HtmlBuilder('node_name').sub_node(attr1='val1', attr2='$GenericValue').innerText("Inside text...")`
Jenkins doesn't take structure of xUnit/jUnit XML file into account.
So sorting or structuring testsuites/testcases is futile effort.
Important is to properly set 'classname' and 'name' attributes for testcase elements.
'''
import yaml
import collections
from cgi import escape
import rows
from xml_builder import XmlBuilder
import default_config
def plugins_to_xml(dict_data,
yaml_filename=None,
yaml_data=default_config.YAML_CONFIG,
dict_templates=['All'],
additional_fields='AdditionalFields',
ts_attr='HtmlTestSuiteAttr',
tc_attr='HtmlTestCaseAttr',
tc_elem='HtmlTestCaseElem'):
'''
Provided data (from plugins dictionary) and walking template, get all valid items and convert it to jUnit xml representation.
Function have sane defaults (depends on calee opinion):
:param dict dict_data: datastructure taken as result from running smoke tests (Smoker's output)
:param str yaml_data: yaml string that will be taken as configuration; does have precedence before `yaml_filename`
:param str yaml_filename: if yaml_data is None, tries to read config from file specified as path (relative from cwd)
:param list dict_templates: TODO: probably remove this parameter
:param dict additional_fields: get Yaml name for additional fields for Row namedtuple
:param dict ts_attr: get Yaml name for configured testsuite xml attributes
:param dict tc_attr: get Yaml name for configured testcase xml attributes
:param dict tc_elem: get Yaml name for configured testcase xml subelements
:rval: string
:return: returns xml structure (testsuites corresponds to nodes, testcases to plugin)
'''
def apply(inst, custom_dict=None, **kwargs):
'''
Dynamically applies value of value as new value.
>>> inst
Row(node='stg-c3', plugin='alog', status='UNKNOWN')
>>> inst.ClassName
'stg-c3.alog'
>>> custom_dict
{ 'name': 'node', 'classname': 'ClassName'}
>>> apply(inst, custom_dict=custom_dict)
{ 'name': 'sgt-c3', 'classname': 'stg-c3.alog'}
'''
applied_args = {}
if custom_dict:
for k, v in custom_dict.iteritems():
applied_args[k] = getattr(inst, v)
for k, v in kwargs.iteritems():
applied_args[k] = getattr(inst, v)
return applied_args
if yaml_filename:
with open(yaml_filename) as f:
C = yaml.safe_load(f)
else:
C = yaml.safe_load(yaml_data)
results = {}
for template in dict_templates:
results[template] = rows.create(data=dict_data,
template=C[template],
additional_fields=C[additional_fields])
ts_data = {}
for res in results.iterkeys():
ts_data[res] = collections.defaultdict(list)
ts_res = ts_data[res]
for row in results[res]:
ts_res[row.Node].append(row)
junit_xml = XmlBuilder()
for template_name, ts in sorted(ts_data.iteritems()):
with junit_xml.testsuites as html_tss:
html_tss(name=template_name)
for node, tcs in sorted(ts.iteritems()):
with html_tss.testsuite as html_ts:
first = tcs[0] if tcs else None
if first:
html_ts(custom_dict=apply(first, custom_dict=C[ts_attr]))
for tc in sorted(tcs):
html_tc = html_ts.testcase(custom_dict=apply(tc, custom_dict=C[tc_attr]))
if tc.MsgWarn:
html_tc <= (XmlBuilder('system-out') <= escape('\n'.join(tc.MsgWarn), quote=1))
if tc.MsgError:
getattr(html_tc, 'system-err') <= escape('\n'.join(tc.MsgError), quote=1)
html_tc.error(message=tc.ErrorMessage)
return junit_xml.dump()
# def test_onepass():
# fixture_filename = 'tests/input_data_fixture'
#
# with open('%s.yaml' % (fixture_filename,)) as f_in:
# xml_txt = plugins_to_xml(yaml.safe_load(f_in))
#
# with open('%s.testresult' % (fixture_filename,), 'w') as f_out:
# f_out.write(xml_txt)
|
pbenas/smoker
|
smoker/client/out_junit/__init__.py
|
Python
|
bsd-3-clause
| 4,926
|
#
# Evy - a concurrent networking library for Python
#
# Unless otherwise noted, the files in Evy are under the following MIT license:
#
# Copyright (c) 2012, Alvaro Saurin
# Copyright (c) 2008-2010, Eventlet Contributors (see AUTHORS)
# Copyright (c) 2007-2010, Linden Research, Inc.
# Copyright (c) 2005-2006, Bob Ippolito
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import errno
import new
import evy
from evy.io.pipes import GreenPipe
from evy import patcher
from evy.patched import os
from evy.patched import select
patcher.inject('subprocess', globals(), ('select', select))
subprocess_orig = __import__("subprocess")
# This is the meat of this module, the green version of Popen.
class Popen(subprocess_orig.Popen):
"""
evy-friendly version of subprocess.Popen
"""
# We do not believe that Windows pipes support non-blocking I/O. At least,
# the Python file objects stored on our base-class object have no
# setblocking() method, and the Python fcntl module doesn't exist on
# Windows. (see evy.io.sockets.set_nonblocking()) As the sole purpose of
# this __init__() override is to wrap the pipes for evy-friendly
# non-blocking I/O, don't even bother overriding it on Windows.
if not subprocess_orig.mswindows:
def __init__ (self, args, bufsize = 0, *argss, **kwds):
# Forward the call to base-class constructor
subprocess_orig.Popen.__init__(self, args, 0, *argss, **kwds)
# Now wrap the pipes, if any. This logic is loosely borrowed from
# evy.processes.Process.run() method.
for attr in "stdin", "stdout", "stderr":
pipe = getattr(self, attr)
if pipe is not None and not type(pipe) == GreenPipe:
wrapped_pipe = GreenPipe(pipe, pipe.mode, bufsize)
setattr(self, attr, wrapped_pipe)
__init__.__doc__ = subprocess_orig.Popen.__init__.__doc__
def wait (self, check_interval = 0.01):
# Instead of a blocking OS call, this version of wait() uses logic
# borrowed from the evy 0.2 processes.Process.wait() method.
try:
while True:
status = self.poll()
if status is not None:
return status
evy.sleep(check_interval)
except OSError, e:
if e.errno == errno.ECHILD:
# no child process, this happens if the child process
# already died and has been cleaned up
return -1
else:
raise
wait.__doc__ = subprocess_orig.Popen.wait.__doc__
if not subprocess_orig.mswindows:
# don't want to rewrite the original _communicate() method, we
# just want a version that uses evy.patched.select.select()
# instead of select.select().
try:
_communicate = new.function(subprocess_orig.Popen._communicate.im_func.func_code,
globals())
except AttributeError:
# 2.4 only has communicate
_communicate = new.function(subprocess_orig.Popen.communicate.im_func.func_code,
globals())
def communicate (self, input = None):
return self._communicate(input)
# Borrow subprocess.call() and check_call(), but patch them so they reference
# OUR Popen class rather than subprocess.Popen.
call = new.function(subprocess_orig.call.func_code, globals())
try:
check_call = new.function(subprocess_orig.check_call.func_code, globals())
except AttributeError:
pass # check_call added in 2.5
|
inercia/evy
|
evy/patched/subprocess.py
|
Python
|
mit
| 4,674
|
softwareName = 'PyBitmessage'
softwareVersion = '0.6.3.2'
|
PeterSurda/PyBitmessage
|
src/version.py
|
Python
|
mit
| 58
|
################################################################################
# Name: PyZenity.py
# Author: Brian Ramos
# Created: 10/17/2005
# Revision Information:
# $Date: $
# $Revision: $
# $Author: bramos $
#
# Licence: MIT Licence
#
# Copyright (c) 2010 Brian Ramos
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
################################################################################
from datetime import date
from subprocess import Popen, PIPE
from itertools import chain
from os import path
__all__ = ['GetDate', 'GetFilename', 'GetDirectory', 'GetSavename', 'GetText',
'InfoMessage', 'Question', 'Warning', 'ErrorMessage',
'Notification', 'TextInfo', 'Progress','List' ]
__doc__ = """PyZenity is an easy to use interface to Zenity for Python.
Zenity is normally called from scripts by invoking it with a multitude of
command line parameters that it uses to construct its interfaces. This
module hides the details of invoking the command and presents simple API
functions like:
cancel = Question('Should I cancel the operation?')
Each function takes optional kwargs parameters. This is to allow the use of
general Zenity parameters such as:
title - Set the dialog title
window_icon - Set the window icon
ok_label - Set the text for the Ok label
cancel_label - Set the text for the Cancel label
height - Set the height
width - Set the width
timeout - Set the dialog timeout in seconds"""
zen_exec = 'zenity'
def run_zenity(type, *args):
return Popen([zen_exec, type] + list(args), stdin=PIPE, stdout=PIPE)
# This is a dictionary of optional parameters that would create
# syntax errors in python if they were passed in as kwargs.
kw_subst = {
'window_icon': 'window-icon',
'ok_label': 'ok-label',
'cancel_label': 'cancel-label'
}
def kwargs_helper(kwargs):
"""This function preprocesses the kwargs dictionary to sanitize it."""
args = []
for param, value in kwargs.items():
param = kw_subst.get(param, param)
args.append((param, value))
return args
def GetDate(text=None, selected=None, **kwargs):
"""Prompt the user for a date.
This will raise a Zenity Calendar Dialog for the user to pick a date.
It will return a datetime.date object with the date or None if the
user hit cancel.
text - Text to be displayed in the calendar dialog.
selected - A datetime.date object that will be the pre-selected date.
kwargs - Optional command line parameters for Zenity such as height,
width, etc."""
args = ['--date-format=%d/%m/%Y']
if text:
args.append('--text=%s' % text)
if selected:
args.append('--day=%d' % selected.day)
args.append('--month=%d' % selected.month)
args.append('--year=%d' % selected.year)
for generic_args in kwargs_helper(kwargs):
args.append('--%s=%s' % generic_args)
p = run_zenity('--calendar', *args)
if p.wait() == 0:
retval = p.stdout.read().strip()
day, month, year = [int(x) for x in retval.split('/')]
return date(year, month, day)
def GetFilename(multiple=False, sep='|', **kwargs):
"""Prompt the user for a filename.
This will raise a Zenity File Selection Dialog. It will return a list with
the selected files or None if the user hit cancel.
multiple - True to allow the user to select multiple files.
sep - Token to use as the path separator when parsing Zenity's return
string.
kwargs - Optional command line parameters for Zenity such as height,
width, etc."""
args = []
if multiple:
args.append('--multiple')
if sep != '|':
args.append('--separator=%s' % sep)
for generic_args in kwargs_helper(kwargs):
args.append('--%s=%s' % generic_args)
p = run_zenity('--file-selection', *args)
if p.wait() == 0:
return p.stdout.read()[:-1].split('|')
def GetDirectory(multiple=False, selected=None, sep=None, **kwargs):
"""Prompt the user for a directory.
This will raise a Zenity Directory Selection Dialog. It will return a
list with the selected directories or None if the user hit cancel.
multiple - True to allow the user to select multiple directories.
selected - Path to the directory to be selected on startup.
sep - Token to use as the path separator when parsing Zenity's return
string.
kwargs - Optional command line parameters for Zenity such as height,
width, etc."""
args = ['--directory']
if multiple:
args.append('--multiple')
if selected:
if not path.lexists(selected):
raise ValueError("File %s does not exist!" % selected)
args.append('--filename=%s' % selected)
if sep:
args.append('--separator=%s' % sep)
for generic_args in kwargs_helper(kwargs):
args.append('--%s=%s' % generic_args)
p = run_zenity('--file-selection', *args)
if p.wait() == 0:
return p.stdout.read().strip().split('|')
def GetSavename(default=None, **kwargs):
"""Prompt the user for a filename to save as.
This will raise a Zenity Save As Dialog. It will return the name to save
a file as or None if the user hit cancel.
default - The default name that should appear in the save as dialog.
kwargs - Optional command line parameters for Zenity such as height,
width, etc."""
args = ['--save']
if default:
args.append('--filename=%s' % default)
for generic_args in kwargs_helper(kwargs):
args.append('--%s=%s' % generic_args)
p = run_zenity('--file-selection', *args)
if p.wait() == 0:
return p.stdout.read().strip().split('|')
def Notification(text=None, window_icon=None, **kwargs):
"""Put an icon in the notification area.
This will put an icon in the notification area and return when the user
clicks on it.
text - The tooltip that will show when the user hovers over it.
window_icon - The stock icon ("question", "info", "warning", "error") or
path to the icon to show.
kwargs - Optional command line parameters for Zenity such as height,
width, etc."""
args = []
if text:
args.append('--text=%s' % text)
if window_icon:
args.append('--window-icon=%s' % window_icon)
for generic_args in kwargs_helper(kwargs):
args.append('--%s=%s' % generic_args)
p = run_zenity('--notification', *args)
p.wait()
def List(column_names, title=None, boolstyle=None, editable=False,
select_col=None, sep='|', data=[], **kwargs):
"""Present a list of items to select.
This will raise a Zenity List Dialog populated with the colomns and rows
specified and return either the cell or row that was selected or None if
the user hit cancel.
column_names - A tuple or list containing the names of the columns.
title - The title of the dialog box.
boolstyle - Whether the first columns should be a bool option ("checklist",
"radiolist") or None if it should be a text field.
editable - True if the user can edit the cells.
select_col - The column number of the selected cell to return or "ALL" to
return the entire row.
sep - Token to use as the row separator when parsing Zenity's return.
Cells should not contain this token.
data - A list or tuple of tuples that contain the cells in the row. The
size of the row's tuple must be equal to the number of columns.
kwargs - Optional command line parameters for Zenity such as height,
width, etc."""
args = []
for column in column_names:
args.append('--column=%s' % column)
if title:
args.append('--title=%s' % title)
if boolstyle:
if not (boolstyle == 'checklist' or boolstyle == 'radiolist'):
raise ValueError('"%s" is not a proper boolean column style.'
% boolstyle)
args.append('--' + boolstyle)
if editable:
args.append('--editable')
if select_col:
args.append('--print-column=%s' % select_col)
if sep != '|':
args.append('--separator=%s' % sep)
for generic_args in kwargs_helper(kwargs):
args.append('--%s=%s' % generic_args)
for datum in chain(*data):
args.append(str(datum))
p = run_zenity('--list', *args)
if p.wait() == 0:
return p.stdout.read().strip().split(sep)
def ErrorMessage(text, **kwargs):
"""Show an error message dialog to the user.
This will raise a Zenity Error Dialog with a description of the error.
text - A description of the error.
kwargs - Optional command line parameters for Zenity such as height,
width, etc."""
args = ['--text=%s' % text]
for generic_args in kwargs_helper(kwargs):
args.append('--%s=%s' % generic_args)
run_zenity('--error', *args).wait()
def InfoMessage(text, **kwargs):
"""Show an info message dialog to the user.
This will raise a Zenity Info Dialog displaying some information.
text - The information to present to the user.
kwargs - Optional command line parameters for Zenity such as height,
width, etc."""
args = ['--text=%s' % text]
for generic_args in kwargs_helper(kwargs):
args.append('--%s=%s' % generic_args)
run_zenity('--info', *args).wait()
def Question(text, **kwargs):
"""Ask the user a question.
This will raise a Zenity Question Dialog that will present the user with an
OK/Cancel dialog box. It returns True if the user clicked OK; False on
Cancel.
text - The question to ask.
kwargs - Optional command line parameters for Zenity such as height,
width, etc."""
args = ['--text=%s' % text]
for generic_args in kwargs_helper(kwargs):
args.append('--%s=%s' % generic_args)
return run_zenity('--question', *args).wait() == 0
def Warning(text, **kwargs):
"""Show a warning message dialog to the user.
This will raise a Zenity Warning Dialog with a description of the warning.
It returns True if the user clicked OK; False on cancel.
text - A description of the warning.
kwargs - Optional command line parameters for Zenity such as height,
width, etc."""
args = ['--text=%s' % text]
for generic_args in kwargs_helper(kwargs):
args.append('--%s=%s' % generic_args)
return run_zenity('--warning', *args).wait() == 0
def Progress(text='', percentage=0, auto_close=False, pulsate=False, no_cancel=False, **kwargs):
"""Show a progress dialog to the user.
This will raise a Zenity Progress Dialog. It returns a callback that
accepts two arguments. The first is a numeric value of the percent
complete. The second is a message about the progress.
NOTE: This function sends the SIGHUP signal if the user hits the cancel
button. You must connect to this signal if you do not want your
application to exit.
text - The initial message about the progress.
percentage - The initial percentage to set the progress bar to.
auto_close - True if the dialog should close automatically if it reaches
100%.
pulsate - True is the status should pulsate instead of progress.
kwargs - Optional command line parameters for Zenity such as height,
width, etc."""
args = []
if text:
args.append('--text=%s' % text)
if percentage:
args.append('--percentage=%s' % percentage)
if auto_close:
args.append('--auto-close=%s' % auto_close)
if no_cancel:
args.append('--no-cancel=%s' % no_cancel)
if pulsate:
args.append('--pulsate=%s' % pulsate)
for generic_args in kwargs_helper(kwargs):
args.append('--%s=%s' % generic_args)
p = Popen([zen_exec, '--progress'] + args, stdin=PIPE, stdout=PIPE)
def update(percent, message=''):
if type(percent) == float:
percent = int(percent * 100)
p.stdin.write(str(percent) + '\n')
if message:
p.stdin.write('# %s\n' % message)
return p.returncode
return update
def GetText(text='', entry_text='', password=False, **kwargs):
"""Get some text from the user.
This will raise a Zenity Text Entry Dialog. It returns the text the user
entered or None if the user hit cancel.
text - A description of the text to enter.
entry_text - The initial value of the text entry box.
password - True if text entered should be hidden by stars.
kwargs - Optional command line parameters for Zenity such as height,
width, etc."""
args = []
if text:
args.append('--text=%s' % text)
if entry_text:
args.append('--entry-text=%s' % entry_text)
if password:
args.append('--hide-text')
for generic_args in kwargs_helper(kwargs):
args.append('--%s=%s' % generic_args)
p = run_zenity('--entry', *args)
if p.wait() == 0:
return p.stdout.read()[:-1]
def TextInfo(filename=None, editable=False, html_support=False, **kwargs):
"""Show the text of a file to the user.
This will raise a Zenity Text Information Dialog presenting the user with
the contents of a file. It returns the contents of the text box.
filename - The path to the file to show.
editable - True if the text should be editable.
kwargs - Optional command line parameters for Zenity such as height,
width, etc."""
args = []
if filename:
args.append('--filename=%s' % filename)
if editable:
args.append('--editable')
if html_support is True:
args.append('--html')
for generic_args in kwargs_helper(kwargs):
args.append('--%s=%s' % generic_args)
p = run_zenity('--text-info', *args)
if p.wait() == 0:
return p.stdout.read()
|
dleicht/PSB
|
PyZenity.py
|
Python
|
mit
| 15,175
|
# -*- coding: utf-8 -*-
"""Run test to import camt.053 import."""
##############################################################################
#
# Copyright (C) 2015 Therp BV <http://therp.nl>.
#
# All other contributions are (C) by their respective contributors
#
# All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.tests.common import TransactionCase
from openerp.modules.module import get_module_resource
class TestStatementFile(TransactionCase):
"""Run test to import camt.053 import."""
def test_statement_import(self):
"""Test correct creation of single statement."""
import_model = self.registry('account.bank.statement.import')
statement_model = self.registry('account.bank.statement')
cr, uid = self.cr, self.uid
statement_path = get_module_resource(
'bank_statement_parse_camt',
'test_files',
'test-camt053.xml'
)
statement_file = open(
statement_path, 'rb').read().encode('base64')
bank_statement_id = import_model.create(
cr, uid,
dict(
data_file=statement_file,
)
)
import_model.import_file(cr, uid, [bank_statement_id])
ids = statement_model.search(
cr, uid, [('name', '=', '1234Test/1')])
self.assertTrue(ids, 'Statement not found after parse.')
statement_id = ids[0]
statement_obj = statement_model.browse(
cr, uid, statement_id)
self.assertTrue(
abs(statement_obj.balance_start - 15568.27) < 0.00001,
'Start balance %f not equal to 15568.27' %
statement_obj.balance_start
)
self.assertTrue(
abs(statement_obj.balance_end_real - 15121.12) < 0.00001,
'Real end balance %f not equal to 15121.12' %
statement_obj.balance_end_real
)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
acsone/bank-statement-import-camt
|
bank_statement_import_camt/tests/test_import_bank_statement.py
|
Python
|
agpl-3.0
| 2,725
|
import click
import os
import penguin.pdf as pdf
import penguin.utils as utils
def check_src(src):
if not all((map(utils.is_valid_source, src))):
raise click.BadParameter("src arguments must be either a valid directory"
" or pdf file.")
@click.group()
def penguin():
pass
@penguin.command()
@click.argument('src', nargs=-1)
@click.argument('dst')
@click.option('--bookmark', 'bookmark', flag_value='include-bookmarks',
default=True)
@click.option('--remove-blank-pages', 'rmblanks', flag_value='remove-blanks-pages',
default=False)
def combine(src, dst, bookmark, rmblanks):
"""Combine Pdf files from the source provided into the destination file.
:param src: The source Pdf file(s). src can either be a list of individual
files or directories containing Pdf files.
:param dst: The output file destination.
:param bookmark: True if the combined Pdf should include bookmarks.
:param rmblanks: True if blank pages should be removed from the combined Pdf.
"""
check_src(src)
combined_pdf = pdf.combine(src, bookmark, rmblanks)
with open(dst, 'wb') as f:
combined_pdf.write(f)
@penguin.command()
@click.argument('src',)
@click.argument('pages', nargs=-1)
@click.argument('dst')
def split(src, pages, dst):
"""Split the specified pages from src into the the dst.
:param src: The source Pdf file (directory).
:param pages: The page number(s) to extract from each file.
:param dst: The output file destination.
"""
check_src(src)
combined_pdf = pdf.split(src, pages)
with open(dst, 'wb') as f:
combined_pdf.write(f)
if __name__ == '__main__':
penguin()
|
zrluety/penguin
|
penguin/scripts/penguin_cli.py
|
Python
|
mit
| 1,738
|
# -*- coding: utf-8 -*-
"""
Script: GotoLineCol.py
Utility: 1. Moves the cursor position to the specified line and column for a file in Notepad++.
Especially useful for inspecting data files in fixed-width record formats.
2. Also, displays the character code (SBCS & LTR) in decimal and hex at the specified position.
Requires: Python Script plugin in Notepad++
Customizable parameters for the goToLineCol function call in main():
bRepeatPrompt: Whether to repeat prompting when the specified number value is out of range
iEdgeBuffer: Ensures that the caret will be that many characters inside the left and right edges of the editor viewing area, when possible
iCaretHiliteDuration: Caret will be in Block mode for specified seconds
bCallTipAutoHide: Whether to hide the call tip automatically in sync when caret highlighting is turned off
bBraceHilite: Whether to use brace highlighting style for the character at the specified position. Automatically turns off when current line changes.
Known Issues: 1. Character code display in the call tip is functional with SBCS (Single-Byte Character Sets) and LTR (left-to-right) direction.
With MBCS (Bulti-Bytes Character Sets) or RTL (right-to-left) direction, results will not be reliable.
2. If iCaretHiliteDuration is set to a high value (>3 seconds), and the user tries to rerun the script
while the previous execution is still running, the Python Script plugin will display an error message:
"Another script is still running..." So set this parameter to 3 seconds or lower.
Author: Shridhar Kumar
Date: 2019-08-15
"""
def main():
goToLineCol(bRepeatPrompt = True,
iEdgeBuffer = 5,
iCaretHiliteDuration = 5,
bCallTipAutoHide = False,
bBraceHilite = True)
def getDisplayLineCol():
iCurrLine = editor.lineFromPosition(editor.getCurrentPos())
iCurrCol = editor.getCurrentPos() - editor.positionFromLine(iCurrLine)
return str(iCurrLine + 1), str(iCurrCol + 1)
def promptValue(sInfoText, sTitleText, sDefaultVal, iMinVal, iMaxVal, sRangeError, bRepeatPrompt):
while True:
sNewVal = notepad.prompt(sInfoText, sTitleText, sDefaultVal)
if sNewVal == None:
return None
try:
iNewVal = int(sNewVal)
if iMinVal <= iNewVal <= iMaxVal:
return iNewVal
else:
raise
except:
notepad.messageBox(sRangeError + '.\n\nYou specified: ' + sNewVal +
'\n\nPlease specify a number between ' + str(iMinVal) + ' and ' + str(iMaxVal) + '.',
'Specified value is out of range')
if not bRepeatPrompt:
return None
def goToLineCol(bRepeatPrompt, iEdgeBuffer, iCaretHiliteDuration, bCallTipAutoHide, bBraceHilite):
import time
sCurrLine, sCurrCol = getDisplayLineCol()
iMaxLines = editor.getLineCount()
iNewLine = promptValue(sInfoText = 'Line number (between 1 and ' + str(iMaxLines) + '):',
sTitleText = 'Specify line number',
sDefaultVal = sCurrLine,
iMinVal = 1,
iMaxVal = iMaxLines,
sRangeError = 'File line count is only ' + str(iMaxLines),
bRepeatPrompt = bRepeatPrompt)
if iNewLine == None:
return
# Get the character count plus 1 for the specified line
# Plus 1 is to account for the caret position at the end of the line, past all characters but before EOL/EOF
# Since lineLength already includes EOL, we just need to subtract 1 only when EOL is 2 chars. i.e., CRLF
# For the last line in file, there is no 2-character CRLF EOL; only a single character EOF.
iMaxCols = max(1, editor.lineLength(iNewLine - 1))
if (editor.getEOLMode() == ENDOFLINE.CRLF) and (iNewLine < iMaxLines):
iMaxCols -= 1
iNewCol = promptValue(sInfoText = 'Column position (between 1 and ' + str(iMaxCols) + ') for line ' + str(iNewLine) + ':',
sTitleText = 'Specify column position',
sDefaultVal = sCurrCol,
iMinVal = 1,
iMaxVal = iMaxCols,
sRangeError = 'There are only ' + str(iMaxCols) + ' characters in line ' + str(iNewLine),
bRepeatPrompt = bRepeatPrompt)
# Navigate to the specified position in the document
iLineStartPos = editor.positionFromLine(iNewLine - 1)
iNewPos = iLineStartPos + iNewCol - 1
editor.ensureVisible(iNewLine - 1)
editor.gotoPos( min(iLineStartPos + iMaxCols, iNewPos + iEdgeBuffer) ) # Ensure that caret is 'iEdgeBuffer' characters inside right edge when possible
editor.gotoPos( max(iLineStartPos, iNewPos - iEdgeBuffer) ) # Ensure that caret is 'iEdgeBuffer' characters inside left edge when possible
editor.gotoPos(iNewPos) # Finally, move caret to the specified position
# Obtain current caret style to restore it later on
currCS = editor.getCaretStyle()
# Set the caret to block style to highlight the new position
editor.setCaretStyle(CARETSTYLE.BLOCK)
# Display a call tip with the new line and column numbers with verification
# Also display the character code in decimal and hex
sCurrLine, sCurrCol = getDisplayLineCol()
editor.callTipShow(iNewPos, ' Line: ' + sCurrLine +
'\n Column: ' + sCurrCol +
'\nChar Code: ' + str(editor.getCharAt(iNewPos)) + ' [' + hex(editor.getCharAt(iNewPos)) + ']')
if iCaretHiliteDuration > 0:
time.sleep(iCaretHiliteDuration)
# Reset the caret style
editor.setCaretStyle(currCS)
if bCallTipAutoHide:
editor.callTipCancel()
if bBraceHilite:
editor.braceHighlight(iNewPos, iNewPos)
main()
|
bruderstein/PythonScript
|
scripts/Samples/GotoLineCol.py
|
Python
|
gpl-2.0
| 6,088
|
########################################################################
#
# File Name: NodeFilter.py
#
# Documentation: http://docs.4suite.com/4DOM/NodeFilter.py.html
#
"""
WWW: http://4suite.com/4DOM e-mail: support@4suite.com
Copyright (c) 2000 Fourthought Inc, USA. All Rights Reserved.
See http://4suite.com/COPYRIGHT for license and copyright information
"""
class NodeFilter:
"""
This class is really just an abstract base.
All implementation must be provided in a derived class
"""
FILTER_ACCEPT = 1
FILTER_REJECT = 2
FILTER_SKIP = 3
SHOW_ALL = 0xFFFFFFFF
SHOW_ELEMENT = 0x00000001
SHOW_ATTRIBUTE = 0x00000002
SHOW_TEXT = 0x00000004
SHOW_CDATA_SECTION = 0x00000008
SHOW_ENTITY_REFERENCE = 0x00000010
SHOW_ENTITY = 0x00000020
SHOW_PROCESSING_INSTRUCTION = 0x00000040
SHOW_COMMENT = 0x00000080
SHOW_DOCUMENT = 0x00000100
SHOW_DOCUMENT_TYPE = 0x00000200
SHOW_DOCUMENT_FRAGMENT = 0x00000400
SHOW_NOTATION = 0x00000800
def acceptNode(self, node):
raise TypeError("Please define and use a subclass.")
|
iCarto/siga
|
extScripting/scripts/jython/Lib/xml/dom/NodeFilter.py
|
Python
|
gpl-3.0
| 1,284
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from osv import fields, osv
import decimal_precision as dp
class product_product(osv.osv):
def _get_product_samples(self, cr, uid, ids, field_name, arg, context):
"""
Gets remaining samples checking sale order lines with its product and checkbox 'Sample?' check
"""
res = {}
qty = 0.0
c = context.copy()
action_model, samples_location = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'eln_product_samples', "stock_physical_location_samples2")
c.update({'location': samples_location})
for product in self.pool.get('product.product').browse(cr, uid, ids, context=c):
qty += round(product.qty_available,2)
res[product.id] = qty
return res
_inherit = 'product.product'
_columns = {
'remaining_samples':fields.function(_get_product_samples, method=True, string='Samples', type='float', digits_compute=dp.get_precision('Account'), help="Given Samples (in UoM)", readonly=True),
}
product_product()
|
Comunitea/alimentacion
|
eln_product_samples/product_product.py
|
Python
|
agpl-3.0
| 2,066
|
##########################################################################
#
# Copyright (c) 2016, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import functools
import IECore
import Gaffer
import GafferUI
def __plugMenu( title, plugs ) :
chosenPlugs = []
def choosePlug( plug ) :
chosenPlugs.append( plug )
menuDefinition = IECore.MenuDefinition()
for plug in plugs :
menuDefinition.append(
"/" + ( Gaffer.Metadata.value( plug, "noduleLayout:label" ) or plug.getName() ),
{
"command" : functools.partial( choosePlug, plug )
}
)
menu = GafferUI.Menu( menuDefinition, title = title )
menu.popup( modal = True )
return chosenPlugs[0] if chosenPlugs else None
GafferUI.PlugAdder.plugMenuSignal().connect( __plugMenu, scoped = False )
def __menu( title, names ) :
from uuid import uuid4
chosenNames = []
def chooseName( name ) :
chosenNames.append( name )
menuDefinition = IECore.MenuDefinition()
for name in names :
if not name.split('/')[-1] :
menuDefinition.append(
"/" + name + uuid4().hex,
{
"divider" : True
}
)
else :
menuDefinition.append(
"/" + name,
{
"command" : functools.partial( chooseName, name )
}
)
menu = GafferUI.Menu( menuDefinition, title = title )
menu.popup( modal = True )
return chosenNames[0] if chosenNames else ""
GafferUI.PlugAdder.menuSignal().connect( __menu, scoped = False )
|
hradec/gaffer
|
python/GafferUI/_PlugAdder.py
|
Python
|
bsd-3-clause
| 3,063
|
import pytest
from bluesky import Msg
from bluesky.plans import fly, count
from bluesky.run_engine import IllegalMessageSequence
from bluesky.tests import requires_ophyd
from ophyd import Component as Cpt, Device
from ophyd.sim import NullStatus, TrivialFlyer
@requires_ophyd
def test_flyer_with_collect_asset_documents(RE):
from ophyd.sim import det, new_trivial_flyer, trivial_flyer
from bluesky.preprocessors import fly_during_wrapper
assert hasattr(new_trivial_flyer, "collect_asset_docs")
assert not hasattr(trivial_flyer, "collect_asset_docs")
RE(fly_during_wrapper(count([det], num=5), [new_trivial_flyer, trivial_flyer]))
@requires_ophyd
def test_collect_uncollected_and_log_any_errors(RE):
# test that if stopping one motor raises an error, we can carry on
collected = {}
from ophyd.sim import TrivialFlyer
class DummyFlyerWithFlag(TrivialFlyer):
def collect(self):
collected[self.name] = True
super().collect()
class BrokenDummyFlyerWithFlag(DummyFlyerWithFlag):
def collect(self):
super().collect()
raise Exception
flyer1 = DummyFlyerWithFlag()
flyer1.name = "flyer1"
flyer2 = BrokenDummyFlyerWithFlag()
flyer2.name = "flyer2"
collected.clear()
RE([Msg("open_run"), Msg("kickoff", flyer1), Msg("kickoff", flyer2)])
assert "flyer1" in collected
assert "flyer2" in collected
collected.clear()
RE([Msg("open_run"), Msg("kickoff", flyer2), Msg("kickoff", flyer1)])
assert "flyer1" in collected
assert "flyer2" in collected
@requires_ophyd
def test_flying_outside_a_run_is_illegal(RE, hw):
flyer = hw.trivial_flyer
# This is normal, legal usage.
RE(
[
Msg("open_run"),
Msg("kickoff", flyer, group="foo"),
Msg("wait", group="foo"),
Msg("complete", flyer, group="bar"),
Msg("wait", group="bar"),
Msg("collect", flyer),
Msg("close_run"),
]
)
# This is normal, legal usage (partial collection).
RE(
[
Msg("open_run"),
Msg("kickoff", flyer, group="foo"),
Msg("wait", group="foo"),
Msg("collect", flyer),
Msg("collect", flyer),
Msg("collect", flyer),
Msg("complete", flyer, group="bar"),
Msg("wait", group="bar"),
Msg("collect", flyer),
Msg("collect", flyer),
Msg("collect", flyer),
Msg("close_run"),
]
)
# It is not legal to kickoff outside of a run.
with pytest.raises(IllegalMessageSequence):
RE([Msg("kickoff", flyer)])
@requires_ophyd
def test_flyer_descriptor(RE, hw):
class Flyer(TrivialFlyer):
def __init__(self, name):
self.name = name
self.detector = FlyerDetector(name="flyer-detector")
def read_configuration(self):
return self.detector.read_configuration()
def describe_configuration(self):
return self.detector.describe_configuration()
def describe_collect(self):
return {
"primary": {
"data_key_1": {
"dims": [],
"dtype": "string",
"shape": [],
"source": "",
},
"data_key_2": {
"dims": [],
"dtype": "number",
"shape": [],
"source": "",
},
},
"secondary": {
"data_key_3": {
"dims": [],
"dtype": "string",
"shape": [],
"source": "",
},
"data_key_4": {
"dims": [],
"dtype": "number",
"shape": [],
"source": "",
},
},
}
def collect(self):
yield {
"data": {"data_key_1": "1", "data_key_2": 2},
"timestamps": {"data_key_1": 0, "data_key_2": 0},
"time": 0,
}
yield {
"data": {"data_key_3": "3", "data_key_4": 4},
"timestamps": {"data_key_3": 0, "data_key_4": 0},
"time": 0,
}
flyers = [Flyer(name="flyer"), TrivialFlyer()]
descriptors = dict()
RE(
fly(flyers),
{"descriptor": lambda name, doc: descriptors.update({doc["name"]: doc})},
)
primary_descriptor = descriptors["primary"]
assert primary_descriptor["configuration"]["flyer"] == {
"data": {
"config_key_1": "1",
"config_key_2": 2,
"config_key_3": "3",
"config_key_4": 4,
},
"timestamps": {
"config_key_1": 1,
"config_key_2": 2,
"config_key_3": 3,
"config_key_4": 4,
},
"data_keys": {
"config_key_1": {"dtype": "string", "shape": [], "source": "PV:Config:1"},
"config_key_2": {"dtype": "number", "shape": [], "source": "PV:Config:2"},
"config_key_3": {"dtype": "string", "shape": [], "source": "PV:Config:3"},
"config_key_4": {"dtype": "number", "shape": [], "source": "PV:Config:4"},
},
}
assert "flyer" in primary_descriptor["object_keys"]
secondary_descriptor = descriptors["secondary"]
assert len(secondary_descriptor["configuration"]["flyer"]["data"]) == 4
assert secondary_descriptor["configuration"] == primary_descriptor["configuration"]
assert "flyer" in secondary_descriptor["object_keys"]
trivial_flyer_descriptor = descriptors["stream_name"]
print(f"trivial flyer descriptor: {trivial_flyer_descriptor}")
assert len(trivial_flyer_descriptor["configuration"]) == 1
assert "trivial_flyer" in trivial_flyer_descriptor["object_keys"]
@requires_ophyd
def test_device_flyer_descriptor(RE, hw):
# TrivialFlyer is not a Device
flyers = [FlyerDevice(name="flyer-detector"), TrivialFlyer()]
descriptors = dict()
RE(
fly(flyers),
{"descriptor": lambda name, doc: descriptors.update({doc["name"]: doc})},
)
primary_descriptor = descriptors["primary"]
print(f"primary descriptor: {primary_descriptor}")
assert len(primary_descriptor["configuration"]) == 1
assert primary_descriptor["configuration"]["flyer-detector"] == {
"data": {
"config_key_1": "1",
"config_key_2": 2,
"config_key_3": "3",
"config_key_4": 4,
},
"timestamps": {
"config_key_1": 1,
"config_key_2": 2,
"config_key_3": 3,
"config_key_4": 4,
},
"data_keys": {
"config_key_1": {"dtype": "string", "shape": [], "source": "PV:Config:1"},
"config_key_2": {"dtype": "number", "shape": [], "source": "PV:Config:2"},
"config_key_3": {"dtype": "string", "shape": [], "source": "PV:Config:3"},
"config_key_4": {"dtype": "number", "shape": [], "source": "PV:Config:4"},
},
}
secondary_descriptor = descriptors["secondary"]
print(f"secondary_descriptor: {secondary_descriptor}")
assert len(secondary_descriptor["configuration"]["flyer-detector"]["data"]) == 4
assert secondary_descriptor["configuration"] == primary_descriptor["configuration"]
trivial_flyer_descriptor = descriptors["stream_name"]
print(f"trivial flyer descriptor: {trivial_flyer_descriptor}")
assert len(trivial_flyer_descriptor["configuration"]) == 1
assert "trivial_flyer" in trivial_flyer_descriptor["object_keys"]
class FlyerDetector(Device):
def __init__(self, name, *args, **kwargs):
super().__init__(name=name, *args, **kwargs)
def describe_configuration(self):
return {
"config_key_1": {"dtype": "string", "shape": [], "source": "PV:Config:1"},
"config_key_2": {"dtype": "number", "shape": [], "source": "PV:Config:2"},
"config_key_3": {"dtype": "string", "shape": [], "source": "PV:Config:3"},
"config_key_4": {"dtype": "number", "shape": [], "source": "PV:Config:4"},
}
def read_configuration(self):
return {
"config_key_1": {"value": "1", "timestamp": 1},
"config_key_2": {"value": 2, "timestamp": 2},
"config_key_3": {"value": "3", "timestamp": 3},
"config_key_4": {"value": 4, "timestamp": 4},
}
class FlyerDevice(Device):
detector = Cpt(FlyerDetector, name="flyer-detector")
def kickoff(self):
return NullStatus()
def complete(self):
return NullStatus()
def stop(self, *, success=False):
pass
def describe_collect(self):
return {
"primary": {
"data_key_1": {
"dims": [],
"dtype": "string",
"shape": [],
"source": "",
},
"data_key_2": {
"dims": [],
"dtype": "number",
"shape": [],
"source": "",
},
},
"secondary": {
"data_key_3": {
"dims": [],
"dtype": "string",
"shape": [],
"source": "",
},
"data_key_4": {
"dims": [],
"dtype": "number",
"shape": [],
"source": "",
},
},
}
def collect(self):
yield {
"data": {"data_key_1": "", "data_key_2": 0},
"timestamps": {"data_key_1": 0, "data_key_2": 0},
"time": 0,
}
yield {
"data": {"data_key_3": "", "data_key_4": 0},
"timestamps": {"data_key_3": 0, "data_key_4": 0},
"time": 0,
}
|
ericdill/bluesky
|
bluesky/tests/test_flyer.py
|
Python
|
bsd-3-clause
| 10,262
|
#!/usr/bin/env python
#
# texttable - module for creating simple ASCII tables
# Copyright (C) 2003-2011 Gerome Fournier <jef(at)foutaise.org>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
"""module for creating simple ASCII tables
Example:
table = Texttable()
table.set_cols_align(["l", "r", "c"])
table.set_cols_valign(["t", "m", "b"])
table.add_rows([ ["Name", "Age", "Nickname"],
["Mr\\nXavier\\nHuon", 32, "Xav'"],
["Mr\\nBaptiste\\nClement", 1, "Baby"] ])
print table.draw() + "\\n"
table = Texttable()
table.set_deco(Texttable.HEADER)
table.set_cols_dtype(['t', # text
'f', # float (decimal)
'e', # float (exponent)
'i', # integer
'a']) # automatic
table.set_cols_align(["l", "r", "r", "r", "l"])
table.add_rows([["text", "float", "exp", "int", "auto"],
["abcd", "67", 654, 89, 128.001],
["efghijk", 67.5434, .654, 89.6, 12800000000000000000000.00023],
["lmn", 5e-78, 5e-78, 89.4, .000000000000128],
["opqrstu", .023, 5e+78, 92., 12800000000000000000000]])
print table.draw()
Result:
+----------+-----+----------+
| Name | Age | Nickname |
+==========+=====+==========+
| Mr | | |
| Xavier | 32 | |
| Huon | | Xav' |
+----------+-----+----------+
| Mr | | |
| Baptiste | 1 | |
| Clement | | Baby |
+----------+-----+----------+
text float exp int auto
===========================================
abcd 67.000 6.540e+02 89 128.001
efgh 67.543 6.540e-01 90 1.280e+22
ijkl 0.000 5.000e-78 89 0.000
mnop 0.023 5.000e+78 92 1.280e+22
"""
__all__ = ["Texttable", "ArraySizeError"]
__author__ = 'Gerome Fournier <jef(at)foutaise.org>'
__license__ = 'LGPL'
__version__ = '0.8.1'
__credits__ = """\
Jeff Kowalczyk:
- textwrap improved import
- comment concerning header output
Anonymous:
- add_rows method, for adding rows in one go
Sergey Simonenko:
- redefined len() function to deal with non-ASCII characters
Roger Lew:
- columns datatype specifications
Brian Peterson:
- better handling of unicode errors
"""
import sys
import string
from functools import reduce
try:
if sys.version >= '2.3':
import textwrap
elif sys.version >= '2.2':
from optparse import textwrap
else:
from optik import textwrap
except ImportError:
sys.stderr.write("Can't import textwrap module!\n")
raise
def len(iterable):
"""Redefining len here so it will be able to work with non-ASCII characters
"""
if not isinstance(iterable, str):
return iterable.__len__()
try:
return len(str(iterable, 'utf'))
except:
return iterable.__len__()
class ArraySizeError(Exception):
"""Exception raised when specified rows don't fit the required size
"""
def __init__(self, msg):
self.msg = msg
Exception.__init__(self, msg, '')
def __str__(self):
return self.msg
class Texttable:
BORDER = 1
HEADER = 1 << 1
HLINES = 1 << 2
VLINES = 1 << 3
def __init__(self, max_width=80):
"""Constructor
- max_width is an integer, specifying the maximum width of the table
- if set to 0, size is unlimited, therefore cells won't be wrapped
"""
if max_width <= 0:
max_width = False
self._max_width = max_width
self._precision = 3
self._deco = Texttable.VLINES | Texttable.HLINES | Texttable.BORDER | \
Texttable.HEADER
self.set_chars(['-', '|', '+', '='])
self.reset()
def reset(self):
"""Reset the instance
- reset rows and header
"""
self._hline_string = None
self._row_size = None
self._header = []
self._rows = []
def set_chars(self, array):
"""Set the characters used to draw lines between rows and columns
- the array should contain 4 fields:
[horizontal, vertical, corner, header]
- default is set to:
['-', '|', '+', '=']
"""
if len(array) != 4:
raise(ArraySizeError, "array should contain 4 characters")
array = [ x[:1] for x in [ str(s) for s in array ] ]
(self._char_horiz, self._char_vert,
self._char_corner, self._char_header) = array
def set_deco(self, deco):
"""Set the table decoration
- 'deco' can be a combinaison of:
Texttable.BORDER: Border around the table
Texttable.HEADER: Horizontal line below the header
Texttable.HLINES: Horizontal lines between rows
Texttable.VLINES: Vertical lines between columns
All of them are enabled by default
- example:
Texttable.BORDER | Texttable.HEADER
"""
self._deco = deco
def set_cols_align(self, array):
"""Set the desired columns alignment
- the elements of the array should be either "l", "c" or "r":
* "l": column flushed left
* "c": column centered
* "r": column flushed right
"""
self._check_row_size(array)
self._align = array
def set_cols_valign(self, array):
"""Set the desired columns vertical alignment
- the elements of the array should be either "t", "m" or "b":
* "t": column aligned on the top of the cell
* "m": column aligned on the middle of the cell
* "b": column aligned on the bottom of the cell
"""
self._check_row_size(array)
self._valign = array
def set_cols_dtype(self, array):
"""Set the desired columns datatype for the cols.
- the elements of the array should be either "a", "t", "f", "e" or "i":
* "a": automatic (try to use the most appropriate datatype)
* "t": treat as text
* "f": treat as float in decimal format
* "e": treat as float in exponential format
* "i": treat as int
- by default, automatic datatyping is used for each column
"""
self._check_row_size(array)
self._dtype = array
def set_cols_width(self, array):
"""Set the desired columns width
- the elements of the array should be integers, specifying the
width of each column. For example:
[10, 20, 5]
"""
self._check_row_size(array)
try:
array = map(int, array)
if reduce(min, array) <= 0:
raise ValueError
except ValueError:
sys.stderr.write("Wrong argument in column width specification\n")
raise
self._width = array
def set_precision(self, width):
"""Set the desired precision for float/exponential formats
- width must be an integer >= 0
- default value is set to 3
"""
if not type(width) is int or width < 0:
raise ValueError('width must be an integer greater then 0')
self._precision = width
def header(self, array):
"""Specify the header of the table
"""
self._check_row_size(array)
self._header = map(str, array)
def add_row(self, array):
"""Add a row in the rows stack
- cells can contain newlines and tabs
"""
self._check_row_size(array)
if not hasattr(self, "_dtype"):
self._dtype = ["a"] * self._row_size
cells = []
for i,x in enumerate(array):
cells.append(self._str(i,x))
self._rows.append(cells)
def add_rows(self, rows, header=True):
"""Add several rows in the rows stack
- The 'rows' argument can be either an iterator returning arrays,
or a by-dimensional array
- 'header' specifies if the first row should be used as the header
of the table
"""
# nb: don't use 'iter' on by-dimensional arrays, to get a
# usable code for python 2.1
if header:
if hasattr(rows, '__iter__') and hasattr(rows, 'next'):
self.header(rows.next())
else:
self.header(rows[0])
rows = rows[1:]
for row in rows:
self.add_row(row)
def draw(self):
"""Draw the table
- the table is returned as a whole string
"""
if not self._header and not self._rows:
return
self._compute_cols_width()
self._check_align()
out = ""
if self._has_border():
out += self._hline()
if self._header:
out += self._draw_line(self._header, isheader=True)
if self._has_header():
out += self._hline_header()
length = 0
for row in self._rows:
length += 1
out += self._draw_line(row)
if self._has_hlines() and length < len(self._rows):
out += self._hline()
if self._has_border():
out += self._hline()
return out[:-1]
def _str(self, i, x):
"""Handles string formatting of cell data
i - index of the cell datatype in self._dtype
x - cell data to format
"""
try:
f = float(x)
except:
return str(x)
n = self._precision
dtype = self._dtype[i]
if dtype == 'i':
return str(int(round(f)))
elif dtype == 'f':
return '%.*f' % (n, f)
elif dtype == 'e':
return '%.*e' % (n, f)
elif dtype == 't':
return str(x)
else:
if f - round(f) == 0:
if abs(f) > 1e8:
return '%.*e' % (n, f)
else:
return str(int(round(f)))
else:
if abs(f) > 1e8:
return '%.*e' % (n, f)
else:
return '%.*f' % (n, f)
def _check_row_size(self, array):
"""Check that the specified array fits the previous rows size
"""
if not self._row_size:
self._row_size = len(array)
elif self._row_size != len(array):
raise(ArraySizeError, "array should contain %d elements" \
% self._row_size)
def _has_vlines(self):
"""Return a boolean, if vlines are required or not
"""
return self._deco & Texttable.VLINES > 0
def _has_hlines(self):
"""Return a boolean, if hlines are required or not
"""
return self._deco & Texttable.HLINES > 0
def _has_border(self):
"""Return a boolean, if border is required or not
"""
return self._deco & Texttable.BORDER > 0
def _has_header(self):
"""Return a boolean, if header line is required or not
"""
return self._deco & Texttable.HEADER > 0
def _hline_header(self):
"""Print header's horizontal line
"""
return self._build_hline(True)
def _hline(self):
"""Print an horizontal line
"""
if not self._hline_string:
self._hline_string = self._build_hline()
return self._hline_string
def _build_hline(self, is_header=False):
"""Return a string used to separated rows or separate header from
rows
"""
horiz = self._char_horiz
if (is_header):
horiz = self._char_header
# compute cell separator
s = "%s%s%s" % (horiz, [horiz, self._char_corner][self._has_vlines()],
horiz)
# build the line
l = s.join([horiz * n for n in self._width], s)
# add border if needed
if self._has_border():
l = "%s%s%s%s%s\n" % (self._char_corner, horiz, l, horiz,
self._char_corner)
else:
l += "\n"
return l
def _len_cell(self, cell):
"""Return the width of the cell
Special characters are taken into account to return the width of the
cell, such like newlines and tabs
"""
cell_lines = cell.split('\n')
maxi = 0
for line in cell_lines:
length = 0
parts = line.split('\t')
for part, i in zip(parts, range(1, len(parts) + 1)):
length = length + len(part)
if i < len(parts):
length = (length/8 + 1) * 8
maxi = max(maxi, length)
return maxi
def _compute_cols_width(self):
"""Return an array with the width of each column
If a specific width has been specified, exit. If the total of the
columns width exceed the table desired width, another width will be
computed to fit, and cells will be wrapped.
"""
if hasattr(self, "_width"):
return
maxi = []
if self._header:
maxi = [ self._len_cell(x) for x in self._header ]
for row in self._rows:
for cell,i in zip(row, range(len(row))):
try:
maxi[i] = max(maxi[i], self._len_cell(cell))
except (TypeError, IndexError):
maxi.append(self._len_cell(cell))
items = len(maxi)
length = reduce(lambda x,y: x+y, maxi)
if self._max_width and length + items * 3 + 1 > self._max_width:
maxi = [(self._max_width - items * 3 -1) / items \
for n in range(items)]
self._width = maxi
def _check_align(self):
"""Check if alignment has been specified, set default one if not
"""
if not hasattr(self, "_align"):
self._align = ["l"] * self._row_size
if not hasattr(self, "_valign"):
self._valign = ["t"] * self._row_size
def _draw_line(self, line, isheader=False):
"""Draw a line
Loop over a single cell length, over all the cells
"""
line = self._splitit(line, isheader)
space = " "
out = ""
for i in range(len(line[0])):
if self._has_border():
out += "%s " % self._char_vert
length = 0
for cell, width, align in zip(line, self._width, self._align):
length += 1
cell_line = cell[i]
fill = width - len(cell_line)
if isheader:
align = "c"
if align == "r":
out += "%s " % (fill * space + cell_line)
elif align == "c":
out += "%s " % (fill/2 * space + cell_line \
+ (fill/2 + fill%2) * space)
else:
out += "%s " % (cell_line + fill * space)
if length < len(line):
out += "%s " % [space, self._char_vert][self._has_vlines()]
out += "%s\n" % ['', self._char_vert][self._has_border()]
return out
def _splitit(self, line, isheader):
"""Split each element of line to fit the column width
Each element is turned into a list, result of the wrapping of the
string to the desired width
"""
line_wrapped = []
for cell, width in zip(line, self._width):
array = []
for c in cell.split('\n'):
try:
c = str(c, 'utf')
except UnicodeDecodeError as strerror:
sys.stderr.write("UnicodeDecodeError exception for string '%s': %s\n" % (c, strerror))
c = str(c, 'utf', 'replace')
array.extend(textwrap.wrap(c, width))
line_wrapped.append(array)
max_cell_lines = reduce(max, map(len, line_wrapped))
for cell, valign in zip(line_wrapped, self._valign):
if isheader:
valign = "t"
if valign == "m":
missing = max_cell_lines - len(cell)
cell[:0] = [""] * (missing / 2)
cell.extend([""] * (missing / 2 + missing % 2))
elif valign == "b":
cell[:0] = [""] * (max_cell_lines - len(cell))
else:
cell.extend([""] * (max_cell_lines - len(cell)))
return line_wrapped
if __name__ == '__main__':
table = Texttable()
table.set_cols_align(["l", "r", "c"])
table.set_cols_valign(["t", "m", "b"])
table.add_rows([ ["Name", "Age", "Nickname"],
["Mr\nXavier\nHuon", 32, "Xav'"],
["Mr\nBaptiste\nClement", 1, "Baby"] ])
print(table.draw() + "\n")
table = Texttable()
table.set_deco(Texttable.HEADER)
table.set_cols_dtype(['t', # text
'f', # float (decimal)
'e', # float (exponent)
'i', # integer
'a']) # automatic
table.set_cols_align(["l", "r", "r", "r", "l"])
table.add_rows([["text", "float", "exp", "int", "auto"],
["abcd", "67", 654, 89, 128.001],
["efghijk", 67.5434, .654, 89.6, 12800000000000000000000.00023],
["lmn", 5e-78, 5e-78, 89.4, .000000000000128],
["opqrstu", .023, 5e+78, 92., 12800000000000000000000]])
print(table.draw())
|
bossiernesto/uLisp
|
texttable/texttable.py
|
Python
|
bsd-3-clause
| 18,426
|
'''
Functions for comparing basis sets and pieces of basis sets
'''
import operator
from ..sort import sort_shell
def _reldiff(a, b):
"""
Computes the relative difference of two floating-point numbers
rel = abs(a-b)/min(abs(a), abs(b))
If a == 0 and b == 0, then 0.0 is returned
Otherwise if a or b is 0.0, inf is returned.
"""
a = float(a)
b = float(b)
aa = abs(a)
ba = abs(b)
if a == 0.0 and b == 0.0:
return 0.0
elif a == 0 or b == 0.0:
return float('inf')
return abs(a - b) / min(aa, ba)
def _compare_keys(element1, element2, key, compare_func, *args):
"""
Compares a specific key between two elements of a basis set
If the key exists in one element but not the other, False is returned.
If the key exists in neither element, True is returned.
Parameters
----------
element1 : dict
Basis info for an element
element2 : dict
Basis info for another element
key : string
Key to compare in the two elements
compare_func : function
Function that returns True if the data under the key is equivalent
in both elements
args
Additional arguments to be passed to compare_Func
"""
if key in element1 and key in element2:
if not compare_func(element1[key], element2[key], *args):
return False
elif key in element1 or key in element2:
return False
return True
def _compare_vector(arr1, arr2, rel_tol):
"""
Compares two vectors (python lists) for approximate equality.
Each array contains floats or strings convertible to floats
This function returns True if both arrays are of the same length
and each value is within the given relative tolerance.
"""
length = len(arr1)
if len(arr2) != length:
return False
for i in range(length):
element_1 = float(arr1[i])
element_2 = float(arr2[i])
diff = abs(abs(element_1) - abs(element_2))
if diff != 0.0:
rel = _reldiff(element_1, element_2)
# For a basis set, a relatively coarse comparison
# should be acceptable
if rel > rel_tol:
return False
return True
def _compare_matrix(mat1, mat2, rel_tol):
"""
Compares two matrices (nested python lists) for approximate equality.
Each matrix contains floats or strings convertible to floats
This function returns True if both matrices are of the same dimensions
and each value is within the given relative tolerance.
"""
length = len(mat1)
if len(mat2) != length:
return False
for i in range(length):
if not _compare_vector(mat1[i], mat2[i], rel_tol):
return False
return True
def compare_electron_shells(shell1, shell2, compare_meta=False, rel_tol=0.0):
'''
Compare two electron shells for approximate equality
(exponents/coefficients are within a tolerance)
If compare_meta is True, the metadata is also compared for exact equality.
'''
if shell1['angular_momentum'] != shell2['angular_momentum']:
return False
# Sort into some canonical order
shell1 = sort_shell(shell1)
shell2 = sort_shell(shell2)
# Zip together exponents and coeffs
# This basically creates the typical matrix with exponents
# being in the first column
tmp1 = list(zip(shell1['exponents'], *shell1['coefficients']))
tmp2 = list(zip(shell2['exponents'], *shell2['coefficients']))
if not _compare_matrix(tmp1, tmp2, rel_tol):
return False
if compare_meta:
if shell1['region'] != shell2['region']:
return False
if shell1['function_type'] != shell2['function_type']:
return False
return True
else:
return True
def electron_shells_are_subset(subset, superset, compare_meta=False, rel_tol=0.0):
'''
Determine if a list of electron shells is a subset of another
If 'subset' is a subset of the 'superset', True is returned.
The shells are compared approximately (exponents/coefficients are
within a tolerance)
If compare_meta is True, the metadata is also compared for exact equality.
'''
for item1 in subset:
for item2 in superset:
if compare_electron_shells(item1, item2, compare_meta, rel_tol):
break
else:
return False
return True
def electron_shells_are_equal(shells1, shells2, compare_meta=False, rel_tol=0.0):
'''
Determine if a list of electron shells is the same as another
The shells are compared approximately (exponents/coefficients are
within a tolerance)
If compare_meta is True, the metadata is also compared for exact equality.
'''
if len(shells1) != len(shells2):
return False
# Lists are equal if each is a subset of the other
# Slow but effective
return electron_shells_are_subset(shells1, shells2, compare_meta, rel_tol) and electron_shells_are_subset(
shells2, shells1, compare_meta, rel_tol)
def compare_ecp_pots(potential1, potential2, compare_meta=False, rel_tol=0.0):
'''
Compare two ecp potentials for approximate equality
(exponents/coefficients are within a tolerance)
If compare_meta is True, the metadata is also compared for exact equality.
'''
if potential1['angular_momentum'] != potential2['angular_momentum']:
return False
rexponents1 = potential1['r_exponents']
rexponents2 = potential2['r_exponents']
gexponents1 = potential1['gaussian_exponents']
gexponents2 = potential2['gaussian_exponents']
coefficients1 = potential1['coefficients']
coefficients2 = potential2['coefficients']
# integer comparison
if rexponents1 != rexponents2:
return False
if not _compare_vector(gexponents1, gexponents2, rel_tol):
return False
if not _compare_matrix(coefficients1, coefficients2, rel_tol):
return False
if compare_meta:
if potential1['ecp_type'] != potential2['ecp_type']:
return False
return True
else:
return True
def ecp_pots_are_subset(subset, superset, compare_meta=False, rel_tol=0.0):
'''
Determine if a list of ecp potentials is a subset of another
If 'subset' is a subset of the 'superset', True is returned.
The potentials are compared approximately (exponents/coefficients are
within a tolerance)
If compare_meta is True, the metadata is also compared for exact equality.
'''
for item1 in subset:
for item2 in superset:
if compare_ecp_pots(item1, item2, compare_meta, rel_tol):
break
else:
return False
return True
def ecp_pots_are_equal(pots1, pots2, compare_meta=False, rel_tol=0.0):
'''
Determine if a list of electron shells is the same as another
The potentials are compared approximately (exponents/coefficients are
within a tolerance)
If compare_meta is True, the metadata is also compared for exact equality.
'''
# Lists are equal if each is a subset of the other
# Slow but effective
return ecp_pots_are_subset(pots1, pots2, compare_meta) and ecp_pots_are_subset(pots2, pots1, compare_meta)
def compare_elements(element1,
element2,
compare_electron_shells_meta=False,
compare_ecp_pots_meta=False,
compare_meta=False,
rel_tol=0.0):
'''
Determine if the basis information for two elements is the same as another
Exponents/coefficients are compared using a tolerance.
Parameters
----------
element1 : dict
Basis information for an element
element2 : dict
Basis information for another element
compare_electron_shells_meta : bool
Compare the metadata of electron shells
compare_ecp_pots_meta : bool
Compare the metadata of ECP potentials
compare_meta : bool
Compare the overall element metadata
rel_tol : float
Maximum relative error that is considered equal
'''
if not _compare_keys(element1, element2, 'electron_shells', electron_shells_are_equal,
compare_electron_shells_meta, rel_tol):
return False
if not _compare_keys(element1, element2, 'ecp_potentials', ecp_pots_are_equal, compare_ecp_pots_meta, rel_tol):
return False
if not _compare_keys(element1, element2, 'ecp_electrons', operator.eq):
return False
if compare_meta:
if not _compare_keys(element1, element2, 'references', operator.eq):
return False
return True
def compare_basis(bs1,
bs2,
compare_electron_shells_meta=False,
compare_ecp_pots_meta=False,
compare_elements_meta=False,
compare_meta=False,
rel_tol=0.0):
'''
Determine if two basis set dictionaries are the same
bs1 : dict
Full basis information
bs2 : dict
Full basis information
compare_electron_shells_meta : bool
Compare the metadata of electron shells
compare_ecp_pots_meta : bool
Compare the metadata of ECP potentials
compare_elements_meta : bool
Compare the overall element metadata
compare_meta: bool
Compare the metadata for the basis set (name, description, etc)
rel_tol : float
Maximum relative error that is considered equal
'''
els1 = sorted(bs1['elements'].keys())
els2 = sorted(bs2['elements'].keys())
if not els1 == els2:
return False
for el in els1:
if not compare_elements(bs1['elements'][el],
bs2['elements'][el],
compare_electron_shells_meta=compare_electron_shells_meta,
compare_ecp_pots_meta=compare_ecp_pots_meta,
compare_meta=compare_elements_meta,
rel_tol=rel_tol):
print("Element failed:", el)
return False
if compare_meta:
for k in ['name', 'family', 'description', 'revision_description', 'role', 'auxiliaries']:
if not _compare_keys(bs1, bs2, k, operator.eq):
return False
return True
|
MOLSSI-BSE/basis_set_exchange
|
basis_set_exchange/curate/compare.py
|
Python
|
bsd-3-clause
| 10,431
|
# Copyright (c) 2020 University of Chicago
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_config import cfg
from blazar.manager import exceptions as manager_exceptions
from blazar.utils.openstack import base
from oslo_log import log as logging
from zunclient import client as zun_client
from zunclient import exceptions as zun_exception
zun_opts = [
cfg.StrOpt(
'zun_api_version',
default='1',
help='Zun API version'),
cfg.StrOpt(
'zun_api_microversion',
default='1.22',
help='Zun API microversion'),
cfg.StrOpt(
'endpoint_override',
help='Zun endpoint URL to use')
]
CONF = cfg.CONF
CONF.register_opts(zun_opts, group='zun')
LOG = logging.getLogger(__name__)
class BlazarZunClient(object):
"""Client class for Zun service."""
def __init__(self, **kwargs):
client_kwargs = base.client_kwargs(**kwargs)
client_kwargs.setdefault('os_zun_api_version',
CONF.zun.zun_api_microversion)
self.zun = zun_client.Client(
CONF.zun.zun_api_version, **client_kwargs)
def __getattr__(self, attr):
return getattr(self.zun, attr)
class ZunClientWrapper(object):
@property
def zun(self):
zun = BlazarZunClient(endpoint_override=CONF.zun.endpoint_override)
return zun
class ZunInventory(BlazarZunClient):
def get_host_details(self, host):
"""Get Zun capabilities of a single host
:param host: UUID or name of zun compute node
:return: Dict of capabilities or raise HostNotFound
"""
try:
host = self.zun.hosts.get(host)
except (zun_exception.NotFound, zun_exception.BadRequest):
host_ids = []
for h in self.zun.hosts.list():
if h.hostname == host:
host_ids.append(h.uuid)
if len(host_ids) == 0:
raise manager_exceptions.HostNotFound(host=host)
elif len(host_ids) > 1:
raise manager_exceptions.MultipleHostsFound(host=host)
else:
host = self.zun.hosts.get(host_ids[0])
return {'id': host.uuid,
'name': host.hostname,
'containers': self.zun.containers.list(host=host.hostname)
}
|
ChameleonCloud/blazar
|
blazar/utils/openstack/zun.py
|
Python
|
apache-2.0
| 2,836
|
from collections import OrderedDict
from datetime import timedelta
from django import forms
from django.db.models import Q
from django.db.models.constants import LOOKUP_SEP
from django.forms.utils import pretty_name
from django.utils.itercompat import is_iterable
from django.utils.timezone import now
from django.utils.translation import ugettext_lazy as _
from .conf import settings
from .constants import EMPTY_VALUES
from .fields import (
BaseCSVField,
BaseRangeField,
ChoiceField,
DateRangeField,
DateTimeRangeField,
IsoDateTimeField,
IsoDateTimeRangeField,
LookupChoiceField,
ModelChoiceField,
ModelMultipleChoiceField,
MultipleChoiceField,
RangeField,
TimeRangeField
)
from .utils import get_model_field, label_for_filter
__all__ = [
'AllValuesFilter',
'AllValuesMultipleFilter',
'BaseCSVFilter',
'BaseInFilter',
'BaseRangeFilter',
'BooleanFilter',
'CharFilter',
'ChoiceFilter',
'DateFilter',
'DateFromToRangeFilter',
'DateRangeFilter',
'DateTimeFilter',
'DateTimeFromToRangeFilter',
'DurationFilter',
'Filter',
'IsoDateTimeFilter',
'IsoDateTimeFromToRangeFilter',
'LookupChoiceFilter',
'ModelChoiceFilter',
'ModelMultipleChoiceFilter',
'MultipleChoiceFilter',
'NumberFilter',
'NumericRangeFilter',
'OrderingFilter',
'RangeFilter',
'TimeFilter',
'TimeRangeFilter',
'TypedChoiceFilter',
'TypedMultipleChoiceFilter',
'UUIDFilter',
]
class Filter(object):
creation_counter = 0
field_class = forms.Field
def __init__(self, field_name=None, lookup_expr='exact', *, label=None,
method=None, distinct=False, exclude=False, **kwargs):
self.field_name = field_name
self.lookup_expr = lookup_expr
self.label = label
self.method = method
self.distinct = distinct
self.exclude = exclude
self.extra = kwargs
self.extra.setdefault('required', False)
self.creation_counter = Filter.creation_counter
Filter.creation_counter += 1
# TODO: remove assertion in 2.1
assert not isinstance(self.lookup_expr, (type(None), list)), \
"The `lookup_expr` argument no longer accepts `None` or a list of " \
"expressions. Use the `LookupChoiceFilter` instead. See: " \
"https://django-filter.readthedocs.io/en/master/guide/migration.html"
def get_method(self, qs):
"""Return filter method based on whether we're excluding
or simply filtering.
"""
return qs.exclude if self.exclude else qs.filter
def method():
"""
Filter method needs to be lazily resolved, as it may be dependent on
the 'parent' FilterSet.
"""
def fget(self):
return self._method
def fset(self, value):
self._method = value
# clear existing FilterMethod
if isinstance(self.filter, FilterMethod):
del self.filter
# override filter w/ FilterMethod.
if value is not None:
self.filter = FilterMethod(self)
return locals()
method = property(**method())
def label():
def fget(self):
if self._label is None and hasattr(self, 'model'):
self._label = label_for_filter(
self.model, self.field_name, self.lookup_expr, self.exclude
)
return self._label
def fset(self, value):
self._label = value
return locals()
label = property(**label())
@property
def field(self):
if not hasattr(self, '_field'):
field_kwargs = self.extra.copy()
if settings.DISABLE_HELP_TEXT:
field_kwargs.pop('help_text', None)
self._field = self.field_class(label=self.label, **field_kwargs)
return self._field
def filter(self, qs, value):
if value in EMPTY_VALUES:
return qs
if self.distinct:
qs = qs.distinct()
lookup = '%s__%s' % (self.field_name, self.lookup_expr)
qs = self.get_method(qs)(**{lookup: value})
return qs
class CharFilter(Filter):
field_class = forms.CharField
class BooleanFilter(Filter):
field_class = forms.NullBooleanField
class ChoiceFilter(Filter):
field_class = ChoiceField
def __init__(self, *args, **kwargs):
self.null_value = kwargs.get('null_value', settings.NULL_CHOICE_VALUE)
super().__init__(*args, **kwargs)
def filter(self, qs, value):
if value != self.null_value:
return super().filter(qs, value)
qs = self.get_method(qs)(**{'%s__%s' % (self.field_name, self.lookup_expr): None})
return qs.distinct() if self.distinct else qs
class TypedChoiceFilter(Filter):
field_class = forms.TypedChoiceField
class UUIDFilter(Filter):
field_class = forms.UUIDField
class MultipleChoiceFilter(Filter):
"""
This filter performs OR(by default) or AND(using conjoined=True) query
on the selected options.
Advanced usage
--------------
Depending on your application logic, when all or no choices are selected,
filtering may be a no-operation. In this case you may wish to avoid the
filtering overhead, particularly if using a `distinct` call.
You can override `get_filter_predicate` to use a custom filter.
By default it will use the filter's name for the key, and the value will
be the model object - or in case of passing in `to_field_name` the
value of that attribute on the model.
Set `always_filter` to `False` after instantiation to enable the default
`is_noop` test. You can override `is_noop` if you need a different test
for your application.
`distinct` defaults to `True` as to-many relationships will generally
require this.
"""
field_class = MultipleChoiceField
always_filter = True
def __init__(self, *args, **kwargs):
kwargs.setdefault('distinct', True)
self.conjoined = kwargs.pop('conjoined', False)
self.null_value = kwargs.get('null_value', settings.NULL_CHOICE_VALUE)
super().__init__(*args, **kwargs)
def is_noop(self, qs, value):
"""
Return `True` to short-circuit unnecessary and potentially slow
filtering.
"""
if self.always_filter:
return False
# A reasonable default for being a noop...
if self.extra.get('required') and len(value) == len(self.field.choices):
return True
return False
def filter(self, qs, value):
if not value:
# Even though not a noop, no point filtering if empty.
return qs
if self.is_noop(qs, value):
return qs
if not self.conjoined:
q = Q()
for v in set(value):
if v == self.null_value:
v = None
predicate = self.get_filter_predicate(v)
if self.conjoined:
qs = self.get_method(qs)(**predicate)
else:
q |= Q(**predicate)
if not self.conjoined:
qs = self.get_method(qs)(q)
return qs.distinct() if self.distinct else qs
def get_filter_predicate(self, v):
name = self.field_name
if name and self.lookup_expr != 'exact':
name = LOOKUP_SEP.join([name, self.lookup_expr])
try:
return {name: getattr(v, self.field.to_field_name)}
except (AttributeError, TypeError):
return {name: v}
class TypedMultipleChoiceFilter(MultipleChoiceFilter):
field_class = forms.TypedMultipleChoiceField
class DateFilter(Filter):
field_class = forms.DateField
class DateTimeFilter(Filter):
field_class = forms.DateTimeField
class IsoDateTimeFilter(DateTimeFilter):
"""
Uses IsoDateTimeField to support filtering on ISO 8601 formatted datetimes.
For context see:
* https://code.djangoproject.com/ticket/23448
* https://github.com/tomchristie/django-rest-framework/issues/1338
* https://github.com/alex/django-filter/pull/264
"""
field_class = IsoDateTimeField
class TimeFilter(Filter):
field_class = forms.TimeField
class DurationFilter(Filter):
field_class = forms.DurationField
class QuerySetRequestMixin(object):
"""
Add callable functionality to filters that support the ``queryset``
argument. If the ``queryset`` is callable, then it **must** accept the
``request`` object as a single argument.
This is useful for filtering querysets by properties on the ``request``
object, such as the user.
Example::
def departments(request):
company = request.user.company
return company.department_set.all()
class EmployeeFilter(filters.FilterSet):
department = filters.ModelChoiceFilter(queryset=departments)
...
The above example restricts the set of departments to those in the logged-in
user's associated company.
"""
def __init__(self, *args, **kwargs):
self.queryset = kwargs.get('queryset')
super().__init__(*args, **kwargs)
def get_request(self):
try:
return self.parent.request
except AttributeError:
return None
def get_queryset(self, request):
queryset = self.queryset
if callable(queryset):
return queryset(request)
return queryset
@property
def field(self):
request = self.get_request()
queryset = self.get_queryset(request)
if queryset is not None:
self.extra['queryset'] = queryset
return super().field
class ModelChoiceFilter(QuerySetRequestMixin, ChoiceFilter):
field_class = ModelChoiceField
def __init__(self, *args, **kwargs):
kwargs.setdefault('empty_label', settings.EMPTY_CHOICE_LABEL)
super().__init__(*args, **kwargs)
class ModelMultipleChoiceFilter(QuerySetRequestMixin, MultipleChoiceFilter):
field_class = ModelMultipleChoiceField
class NumberFilter(Filter):
field_class = forms.DecimalField
class NumericRangeFilter(Filter):
field_class = RangeField
def filter(self, qs, value):
if value:
if value.start is not None and value.stop is not None:
value = (value.start, value.stop)
elif value.start is not None:
self.lookup_expr = 'startswith'
value = value.start
elif value.stop is not None:
self.lookup_expr = 'endswith'
value = value.stop
return super().filter(qs, value)
class RangeFilter(Filter):
field_class = RangeField
def filter(self, qs, value):
if value:
if value.start is not None and value.stop is not None:
self.lookup_expr = 'range'
value = (value.start, value.stop)
elif value.start is not None:
self.lookup_expr = 'gte'
value = value.start
elif value.stop is not None:
self.lookup_expr = 'lte'
value = value.stop
return super().filter(qs, value)
def _truncate(dt):
return dt.date()
class DateRangeFilter(ChoiceFilter):
choices = [
('today', _('Today')),
('yesterday', _('Yesterday')),
('week', _('Past 7 days')),
('month', _('This month')),
('year', _('This year')),
]
filters = {
'today': lambda qs, name: qs.filter(**{
'%s__year' % name: now().year,
'%s__month' % name: now().month,
'%s__day' % name: now().day
}),
'yesterday': lambda qs, name: qs.filter(**{
'%s__year' % name: (now() - timedelta(days=1)).year,
'%s__month' % name: (now() - timedelta(days=1)).month,
'%s__day' % name: (now() - timedelta(days=1)).day,
}),
'week': lambda qs, name: qs.filter(**{
'%s__gte' % name: _truncate(now() - timedelta(days=7)),
'%s__lt' % name: _truncate(now() + timedelta(days=1)),
}),
'month': lambda qs, name: qs.filter(**{
'%s__year' % name: now().year,
'%s__month' % name: now().month
}),
'year': lambda qs, name: qs.filter(**{
'%s__year' % name: now().year,
}),
}
def __init__(self, choices=None, filters=None, *args, **kwargs):
if choices is not None:
self.choices = choices
if filters is not None:
self.filters = filters
unique = set([x[0] for x in self.choices]) ^ set(self.filters)
assert not unique, \
"Keys must be present in both 'choices' and 'filters'. Missing keys: " \
"'%s'" % ', '.join(sorted(unique))
# TODO: remove assertion in 2.1
assert not hasattr(self, 'options'), \
"The 'options' attribute has been replaced by 'choices' and 'filters'. " \
"See: https://django-filter.readthedocs.io/en/master/guide/migration.html"
# null choice not relevant
kwargs.setdefault('null_label', None)
super().__init__(choices=self.choices, *args, **kwargs)
def filter(self, qs, value):
if not value:
return qs
assert value in self.filters
qs = self.filters[value](qs, self.field_name)
return qs.distinct() if self.distinct else qs
class DateFromToRangeFilter(RangeFilter):
field_class = DateRangeField
class DateTimeFromToRangeFilter(RangeFilter):
field_class = DateTimeRangeField
class IsoDateTimeFromToRangeFilter(RangeFilter):
field_class = IsoDateTimeRangeField
class TimeRangeFilter(RangeFilter):
field_class = TimeRangeField
class AllValuesFilter(ChoiceFilter):
@property
def field(self):
qs = self.model._default_manager.distinct()
qs = qs.order_by(self.field_name).values_list(self.field_name, flat=True)
self.extra['choices'] = [(o, o) for o in qs]
return super().field
class AllValuesMultipleFilter(MultipleChoiceFilter):
@property
def field(self):
qs = self.model._default_manager.distinct()
qs = qs.order_by(self.field_name).values_list(self.field_name, flat=True)
self.extra['choices'] = [(o, o) for o in qs]
return super().field
class BaseCSVFilter(Filter):
"""
Base class for CSV type filters, such as IN and RANGE.
"""
base_field_class = BaseCSVField
def __init__(self, *args, **kwargs):
kwargs.setdefault('help_text', _('Multiple values may be separated by commas.'))
super().__init__(*args, **kwargs)
class ConcreteCSVField(self.base_field_class, self.field_class):
pass
ConcreteCSVField.__name__ = self._field_class_name(
self.field_class, self.lookup_expr
)
self.field_class = ConcreteCSVField
@classmethod
def _field_class_name(cls, field_class, lookup_expr):
"""
Generate a suitable class name for the concrete field class. This is not
completely reliable, as not all field class names are of the format
<Type>Field.
ex::
BaseCSVFilter._field_class_name(DateTimeField, 'year__in')
returns 'DateTimeYearInField'
"""
# DateTimeField => DateTime
type_name = field_class.__name__
if type_name.endswith('Field'):
type_name = type_name[:-5]
# year__in => YearIn
parts = lookup_expr.split(LOOKUP_SEP)
expression_name = ''.join(p.capitalize() for p in parts)
# DateTimeYearInField
return str('%s%sField' % (type_name, expression_name))
class BaseInFilter(BaseCSVFilter):
def __init__(self, *args, **kwargs):
kwargs.setdefault('lookup_expr', 'in')
super().__init__(*args, **kwargs)
class BaseRangeFilter(BaseCSVFilter):
base_field_class = BaseRangeField
def __init__(self, *args, **kwargs):
kwargs.setdefault('lookup_expr', 'range')
super().__init__(*args, **kwargs)
class LookupChoiceFilter(Filter):
"""
A combined filter that allows users to select the lookup expression from a dropdown.
* ``lookup_choices`` is an optional argument that accepts multiple input
formats, and is ultimately normlized as the choices used in the lookup
dropdown. See ``.get_lookup_choices()`` for more information.
* ``field_class`` is an optional argument that allows you to set the inner
form field class used to validate the value. Default: ``forms.CharField``
ex::
price = django_filters.LookupChoiceFilter(
field_class=forms.DecimalField,
lookup_choices=[
('exact', 'Equals'),
('gt', 'Greater than'),
('lt', 'Less than'),
]
)
"""
field_class = forms.CharField
outer_class = LookupChoiceField
def __init__(self, field_name=None, lookup_choices=None, field_class=None, **kwargs):
self.empty_label = kwargs.pop('empty_label', settings.EMPTY_CHOICE_LABEL)
super(LookupChoiceFilter, self).__init__(field_name=field_name, **kwargs)
self.lookup_choices = lookup_choices
if field_class is not None:
self.field_class = field_class
@classmethod
def normalize_lookup(cls, lookup):
"""
Normalize the lookup into a tuple of ``(lookup expression, display value)``
If the ``lookup`` is already a tuple, the tuple is not altered.
If the ``lookup`` is a string, a tuple is returned with the lookup
expression used as the basis for the display value.
ex::
>>> LookupChoiceFilter.normalize_lookup(('exact', 'Equals'))
('exact', 'Equals')
>>> LookupChoiceFilter.normalize_lookup('has_key')
('has_key', 'Has key')
"""
if isinstance(lookup, str):
return (lookup, pretty_name(lookup))
return (lookup[0], lookup[1])
def get_lookup_choices(self):
"""
Get the lookup choices in a format suitable for ``django.forms.ChoiceField``.
If the filter is initialized with ``lookup_choices``, this value is normalized
and passed to the underlying ``LookupChoiceField``. If no choices are provided,
they are generated from the corresponding model field's registered lookups.
"""
lookups = self.lookup_choices
if lookups is None:
field = get_model_field(self.model, self.field_name)
lookups = field.get_lookups()
return [self.normalize_lookup(l) for l in lookups]
@property
def field(self):
if not hasattr(self, '_field'):
inner_field = super().field
lookups = self.get_lookup_choices()
self._field = self.outer_class(
inner_field, lookups,
label=self.label,
empty_label=self.empty_label,
required=self.extra['required'],
)
return self._field
def filter(self, qs, lookup):
if not lookup:
return super(LookupChoiceFilter, self).filter(qs, None)
self.lookup_expr = lookup.lookup_expr
return super(LookupChoiceFilter, self).filter(qs, lookup.value)
class OrderingFilter(BaseCSVFilter, ChoiceFilter):
"""
Enable queryset ordering. As an extension of ``ChoiceFilter`` it accepts
two additional arguments that are used to build the ordering choices.
* ``fields`` is a mapping of {model field name: parameter name}. The
parameter names are exposed in the choices and mask/alias the field
names used in the ``order_by()`` call. Similar to field ``choices``,
``fields`` accepts the 'list of two-tuples' syntax that retains order.
``fields`` may also just be an iterable of strings. In this case, the
field names simply double as the exposed parameter names.
* ``field_labels`` is an optional argument that allows you to customize
the display label for the corresponding parameter. It accepts a mapping
of {field name: human readable label}. Keep in mind that the key is the
field name, and not the exposed parameter name.
Additionally, you can just provide your own ``choices`` if you require
explicit control over the exposed options. For example, when you might
want to disable descending sort options.
This filter is also CSV-based, and accepts multiple ordering params. The
default select widget does not enable the use of this, but it is useful
for APIs.
"""
descending_fmt = _('%s (descending)')
def __init__(self, *args, **kwargs):
"""
``fields`` may be either a mapping or an iterable.
``field_labels`` must be a map of field names to display labels
"""
fields = kwargs.pop('fields', {})
fields = self.normalize_fields(fields)
field_labels = kwargs.pop('field_labels', {})
self.param_map = {v: k for k, v in fields.items()}
if 'choices' not in kwargs:
kwargs['choices'] = self.build_choices(fields, field_labels)
kwargs.setdefault('label', _('Ordering'))
kwargs.setdefault('help_text', '')
kwargs.setdefault('null_label', None)
super().__init__(*args, **kwargs)
def get_ordering_value(self, param):
descending = param.startswith('-')
param = param[1:] if descending else param
field_name = self.param_map.get(param, param)
return "-%s" % field_name if descending else field_name
def filter(self, qs, value):
if value in EMPTY_VALUES:
return qs
ordering = [self.get_ordering_value(param) for param in value]
return qs.order_by(*ordering)
@classmethod
def normalize_fields(cls, fields):
"""
Normalize the fields into an ordered map of {field name: param name}
"""
# fields is a mapping, copy into new OrderedDict
if isinstance(fields, dict):
return OrderedDict(fields)
# convert iterable of values => iterable of pairs (field name, param name)
assert is_iterable(fields), \
"'fields' must be an iterable (e.g., a list, tuple, or mapping)."
# fields is an iterable of field names
assert all(isinstance(field, str) or
is_iterable(field) and len(field) == 2 # may need to be wrapped in parens
for field in fields), \
"'fields' must contain strings or (field name, param name) pairs."
return OrderedDict([
(f, f) if isinstance(f, str) else f for f in fields
])
def build_choices(self, fields, labels):
ascending = [
(param, labels.get(field, _(pretty_name(param))))
for field, param in fields.items()
]
descending = [
('-%s' % param, labels.get('-%s' % param, self.descending_fmt % label))
for param, label in ascending
]
# interleave the ascending and descending choices
return [val for pair in zip(ascending, descending) for val in pair]
class FilterMethod(object):
"""
This helper is used to override Filter.filter() when a 'method' argument
is passed. It proxies the call to the actual method on the filter's parent.
"""
def __init__(self, filter_instance):
self.f = filter_instance
def __call__(self, qs, value):
if value in EMPTY_VALUES:
return qs
return self.method(qs, self.f.field_name, value)
@property
def method(self):
"""
Resolve the method on the parent filterset.
"""
instance = self.f
# noop if 'method' is a function
if callable(instance.method):
return instance.method
# otherwise, method is the name of a method on the parent FilterSet.
assert hasattr(instance, 'parent'), \
"Filter '%s' must have a parent FilterSet to find '.%s()'" % \
(instance.field_name, instance.method)
parent = instance.parent
method = getattr(parent, instance.method, None)
assert callable(method), \
"Expected parent FilterSet '%s.%s' to have a '.%s()' method." % \
(parent.__class__.__module__, parent.__class__.__name__, instance.method)
return method
|
alex/django-filter
|
django_filters/filters.py
|
Python
|
bsd-3-clause
| 24,652
|
from django.contrib.auth.models import User
from django.conf import settings
from timestack import facebook
from timestack.models import *
class FacebookBackend:
supports_object_permissions = False
supports_anonymous_user = False
supports_inactive_user = False
def authenticate(self, token=None):
try:
try:
#not first time login, assume not deauth
u = Person.objects.get(access_token=token)
except Person.DoesNotExist:
profile = facebook.GraphAPI(token).get_object("me")
uid = profile['id']
try:
#login first time but face already exist
u = Person.objects.get(uid=uid)
u.access_token=token
#u.user.email=profile['email']
u.user.save()
u.save()
except Person.DoesNotExist:
#login first time and face does not exist
u = Person(uid=uid,access_token=token,profile_url=profile['link'])
user = User(username=uid,first_name=profile['first_name'],last_name=profile['last_name'])
user.set_unusable_password()
user.save()
u.user=user
u.save()
return u.user
except:
return None
#todo handle deauth callback
def get_user(self, user_id):
try:
return User.objects.get(pk=user_id)
except User.DoesNotExist:
return None
|
flashycud/timestack
|
timestack/backends.py
|
Python
|
mit
| 1,755
|
from random import randrange
from time import sleep
class Game:
def __init__(self,cash=100):
self.p = Player(cash)
self.dealer = Dealer()
self.beginning = True
self.is_bj = False
def deal(self):
print "Cash:",self.p.cash
self.p.hit()
self.p.hit()
def display(self):
print 'You: {0}'.format(self.p.current_hand.cards)
def ask_bet(self):
while True:
try:
bet_amount = int(raw_input("Enter bet amount: "))
break
except ValueError:
print "Enter a valid amount!"
while bet_amount > self.p.cash:
print "You don't have that much money. Try again."
bet_amount = int(raw_input("Enter bet amount: "))
while bet_amount <= 0:
print "Are you kidding me? (-_-)"
bet_amount = int(raw_input("Enter bet amount: "))
print "Bet amount:",bet_amount
return bet_amount
def ask_action(self):
actions = ['h','s','d','p']
act = raw_input("What will you do? (h: Hit | s: Stand | d: Double down | p: Split): ")
while act not in actions:
print "Invalid response! Try again."
self.ask_action()
return act
def is_valid_split(self):
if self.beginning:
if self.p.current_hand.cards[0] == self.p.current_hand.cards[1]:
return True
else:
return False
else:
return False
class Player:
def __init__(self,cash):
self.hand_list = [Hand()]
self.cash = cash
self.is_stand = False
self.current_hand = self.hand_list[0]
self.busted = False
def hit(self):
new_card = Card()
self.current_hand.cards.append(new_card.value)
def stand(self):
self.is_stand = True
def double(self):
self.current_hand.bet *= 2
self.hit()
self.is_stand = True
def split(self):
new_hand = Hand()
new_hand.cards.append(self.current_hand.cards.pop())
self.hand_list.append(new_hand)
def send(self,action):
action_dict = {'h':self.hit,'s':self.stand,'d':self.double,'p':self.split}
action_dict[action]()
def reset(self):
self.is_stand = False
self.busted = False
class Dealer(Player):
def __init__(self):
self.hand_list = [Hand()]
self.is_stand = False
self.current_hand = self.hand_list[0]
self.busted = False
def play(self):
while (self.current_hand.score() < 17):
self.hit()
self.display()
return self.current_hand.score()
def display(self):
print 'Dealer: {0}'.format(self.current_hand.cards)
sleep(1)
class Hand:
def __init__(self):
self.bet = 0
self.cards = []
def score(self):
s = 0
for card in self.cards:
s += card
return s
class Card:
suites = ['Spades','Hearts','Clubs','Diamonds']
names = ['Two','Three','Four','Five','Six','Seven','Eight','Nine','Ten','Jack','Queen','King','Ace']
def __init__(self):
rand_suite = randrange(0,4)
rand_val = randrange(0,13)
self.name = "{0} of {1}".format(Card.names[rand_val],Card.suites[rand_suite])
if rand_val <= 8:
self.value = rand_val + 2
else:
self.value = 10
|
jobini/blackjack
|
classes.py
|
Python
|
mit
| 3,464
|
'''
-------------------------------------------------------------------------------
This function simply converts a file to UTF-8 from UTF-16. It's needed for
Solarwinds integration
-------------------------------------------------------------------------------
'''
def conv(filename):
"""Takes a file name string as input, converts to UTF-8"""
target_file = input('What is the name of the customer? \n') + ".csv"
with open(filename, 'rb') as source_file:
with open(target_file, 'w+b') as dest_file:
contents = source_file.read()
dest_file.write(contents.decode('utf-16').encode('utf-8'))
return target_file
|
admiralspark/NetSpark-Scripts
|
Example_Scripts/Utilities/convencoding.py
|
Python
|
gpl-3.0
| 665
|
# Python module for control of VELMEX stepper motor
from serial import Serial
import time
import sys
import re
def Clear(port):
""" Clear current program from VELMEX memory."""
port.write("C")
def Run(port):
""" Run current program in VELMEX memory."""
port.write("R");
def JogMode(port):
""" Put VELMEX controller offline and leave in jog mode."""
port.write("Q")
def Online(port):
""" Put VELMEX controller online and do not echo commands.."""
port.write("F")
def Index(port,x,y):
""" Instruct controller to individually move motor x by y and Run"""
commandString = "C,"+"I"+str(x)+"M"+str(y)+",R"
port.write(commandString)
def IndexS(port,x,y):
""" Instruct controller to individually move motor x by y and Run, then sleep for 10 seconds"""
commandString = "C,"+"I"+str(x)+"M"+str(y)+",R"
port.write(commandString)
time.sleep(5)
def TestSlip(port,N):
""" Drive motor back and forth across marker source for N interations. Assumes marker is primed BEHIND the drive roller"""
for its in range(N):
print "forward iteration " + str(its)
for i in range(20):
IndexS(port,2,-400)
print "backward iteration " + str(its)
for i in range(20):
IndexS(port,2,400)
def TestSlipDual(port,N):
""" Drive motor back and forth across marker source for N interations. Assumes marker is primed BEHIND the drive roller"""
for its in range(N):
print "forward iteration " + str(its)
for i in range(10):
IndexS(port,1,133)
IndexS(port,2,-400)
print "backward iteration " + str(its)
for i in range(10):
IndexS(port,1,-133)
IndexS(port,2,400)
def TestSlipDualReverse(port,N):
""" Drive motor back and forth across marker source for N interations. Assumes marker is primed BEHIND the drive roller"""
for its in range(N):
print "backward iteration " + str(its)
for i in range(10):
IndexS(port,1,-133)
IndexS(port,2,400)
print "forward iteration " + str(its)
for i in range(10):
IndexS(port,1,133)
IndexS(port,2,-400)
def WaitUntilReady(port):
port.setTimeout(1)
port.write("V")
resp=port.read(100)
while( not( ('^' in resp) or ('R' in resp) ) ):
port.write("V")
resp = port.read(1000)
return resp
def ConvertStep(step):
step = step.lstrip('^XZ,')
step = re.sub('(?<=^[\+\-])0+','',step)
step = step.strip('\r')
if(step=='+' or step=='-' or step==''):
step = 0
else:
try:
step = int(step)
except (ValueError, RuntimeError, TypeError, NameError):
print "Not valid for integer conversion: ", step
return step
def GetXZ(portVXM):
""" Return tuple of X and Z axis positions """
portVXM.flush()
portVXM.write("X ")
posX = portVXM.read(9)
posX = ConvertStep(posX)
portVXM.write("Z ")
posZ = portVXM.read(9)
posZ = ConvertStep(posZ)
return posX, posZ
def TestResponse(port):
""" Make sure '^' is returned."""
commandString = "F"
port.write(commandString)
commandString = "PM3,C,I1M500,I3M-500,I3M500,I1M-500,R"
port.write(commandString)
WaitUntilReady(port)
port.write("R")
resp=WaitUntilReady(port)
count=0
print("starting loop:")
while('^' in resp):
port.write("X")
xpos=port.read(9)
print(xpos)
port.write("R")
time.sleep(5)
resp=WaitUntilReady(port)
count = count+1
print(count)
def Setup(portVXM):
""" Setup VXM, and make sure status 'R' is returned."""
commandString = "F"
portVXM.write(commandString)
commandString = "setMA1M5,setMA3M4 "
#Set Motor 1 Absolute index
#Set program to stop and send 'O' when limit reached
portVXM.write(commandString)
WaitUntilReady(portVXM)
portVXM.write("Q")
def TestDeploy(portVXM,portArd):
commandString = "F"
portVXM.write(commandString)
Xpos,Zpos = GetXZ(portVXM)
commandString = "PM-1,C,SA1M400,SA3M400,SA1M400,SA3M400,LM0,I1M50,P5,I3M-400,P5,L0,R,"
portVXM.write(commandString)
t0 = time.time()
t = time.time()
resp=''
while(not('21,0,1\n\r21,1,2\n\r21,0,0\n\r' in resp) and Xpos < 5500):
resp = portArd.read(1000)
t = time.time()
Xpos,Zpos = GetXZ(portVXM)
if((t-t0)%100==0):
sys.stdout.write('\r')
sys.stdout.write(str(Zpos))
sys.stdout.flush()
if(Xpos >= 5450):
print '\n',"Garage Reel Approaching Limit", '\n'
portVXM.write("D,")
resp = portVXM.read(1000)
resp = (portVXM.read(1000)).rstrip()
portVXM.write("Q,")
Xpos, Zpos = GetXZ(portVXM)
localtime = time.asctime(time.localtime(time.time()))
print 'Source Fully Deployed at (X,Y) : ',Xpos, '\t', Zpos ,'\t', 'at localtime: ', localtime
print '\t', abs(t-t0) , '\t', 'Seconds \r'
portArd.flush()
def TestRetract(portVXM,portArd):
""" Garage source, and make sure '^' is returned."""
commandString = "F"
portVXM.write(commandString)
commandString = "PM-3,C,SA1M400,SA3M100,LM0,I3M400,P5,I1M-90,P5,L3,R "
portVXM.write(commandString)
resp='abs'
while( '^' not in resp ):
resp = portVXM.read(1000)
print "Moving to Standard Operation."
commandString = "PM-2,C,SA1M400,SA3M100,LM0,I3M400,P5,I1M-90,P5,L0,R "
portVXM.write(commandString)
t0 = time.time()
t = time.time()
portArd.flushInput()
resp='abs'
#while( ('21,1,1' not in resp) and ((t-t0)>30.0)):
while( not('21,0,1\n\r21,1,1\n\r21,0,0\n\r' in resp) ):
resp = portArd.read(10000)
t = time.time()
#print "CONDITION: \t" ,('21,1,1' not in resp),'\t'
portVXM.write("D,")
resp = portVXM.read(1000)
Xpos, Zpos = GetXZ(portVXM)
localtime = time.asctime(time.localtime(time.time()))
print "Source Fully Retracted at (X,Y) : ",Xpos, "\t", Zpos ,"\t", "at localtime: ", localtime
print abs(t-t0) , "\t Seconds \r"
WaitUntilReady(portVXM)
portVXM.write("C,IA1M-0,IA3M-0,R ")
portVXM.write("Q, ")
portArd.flush()
def Stress(portVXM,portArd):
cycles = 0;
while(1):
TestDeploy(portVXM,portArd)
TestRetract(portVXM,portArd)
cycles = cycles + 1
print "Source deployed and retracted" , cycles
|
goett/MJDCalibrationPack
|
GLITCH/GLITCHv1.3.py
|
Python
|
mit
| 6,053
|
############################################################################
# Joshua R. Boverhof, LBNL
# See Copyright for copyright notice!
# $Id: $
###########################################################################
import os, sys, types, inspect
from StringIO import StringIO
# twisted & related imports
from zope.interface import classProvides, implements, Interface
# ZSI imports
from pyremotevbox.ZSI import _get_element_nsuri_name, EvaluateException, ParseException,\
fault, ParsedSoap, SoapWriter
from pyremotevbox.ZSI.twisted.reverse import DataHandler, ReverseHandlerChain,\
HandlerChainInterface
"""
EXAMPLES:
See zsi/samples/WSGI
"""
def soapmethod(requesttypecode, responsetypecode, soapaction='',
operation=None, **kw):
"""@soapmethod
decorator function for soap methods
"""
def _closure(func_cb):
func_cb.root = (requesttypecode.nspname,requesttypecode.pname)
func_cb.action = soapaction
func_cb.requesttypecode = requesttypecode
func_cb.responsetypecode = responsetypecode
func_cb.soapmethod = True
func_cb.operation = None
return func_cb
return _closure
class SOAPCallbackHandler:
""" ps --> pyobj, pyobj --> sw
class variables:
writerClass -- ElementProxy implementation to use for SoapWriter instances.
"""
classProvides(HandlerChainInterface)
writerClass = None
@classmethod
def processRequest(cls, ps, **kw):
"""invokes callback that should return a (request,response) tuple.
representing the SOAP request and response respectively.
ps -- ParsedSoap instance representing HTTP Body.
request -- twisted.web.server.Request
"""
resource = kw['resource']
request = kw['request']
root = _get_element_nsuri_name(ps.body_root)
for key,method in inspect.getmembers(resource, inspect.ismethod):
if (getattr(method, 'soapmethod', False) and method.root == root):
break
else:
raise RuntimeError, 'Missing soap callback method for root "%s"' %root
try:
req = ps.Parse(method.requesttypecode)
except Exception, ex:
raise
try:
rsp = method.responsetypecode.pyclass()
except Exception, ex:
raise
try:
req,rsp = method(req, rsp)
except Exception, ex:
raise
return rsp
@classmethod
def processResponse(cls, output, **kw):
sw = SoapWriter(outputclass=cls.writerClass)
sw.serialize(output)
return sw
class SOAPHandlerChainFactory:
protocol = ReverseHandlerChain
@classmethod
def newInstance(cls):
return cls.protocol(DataHandler, SOAPCallbackHandler)
class WSGIApplication(dict):
encoding = "UTF-8"
def __call__(self, env, start_response):
"""do dispatching, else process
"""
script = env['SCRIPT_NAME'] # consumed
ipath = os.path.split(env['PATH_INFO'])[1:]
for i in range(1, len(ipath)+1):
path = os.path.join(*ipath[:i])
print "PATH: ", path
application = self.get(path)
if application is not None:
env['SCRIPT_NAME'] = script + path
env['PATH_INFO'] = ''
print "SCRIPT: ", env['SCRIPT_NAME']
return application(env, start_response)
return self._request_cb(env, start_response)
def _request_cb(self, env, start_response):
"""callback method, override
"""
start_response("404 ERROR", [('Content-Type','text/plain')])
return ['Move along people, there is nothing to see to hear']
def putChild(self, path, resource):
"""
"""
path = path.split('/')
lp = len(path)
if lp == 0:
raise RuntimeError, 'bad path "%s"' %path
if lp == 1:
self[path[0]] = resource
for i in range(len(path)):
if not path[i]: continue
break
next = self.get(path[i], None)
if next is None:
next = self[path[i]] = WSGIApplication()
next.putChild('/'.join(path[-1:]), resource)
class SOAPApplication(WSGIApplication):
"""
"""
factory = SOAPHandlerChainFactory
def __init__(self, **kw):
dict.__init__(self, **kw)
self.delegate = None
def _request_cb(self, env, start_response):
"""process request,
"""
if env['REQUEST_METHOD'] == 'GET':
return self._handle_GET(env, start_response)
if env['REQUEST_METHOD'] == 'POST':
return self._handle_POST(env, start_response)
start_response("500 ERROR", [('Content-Type','text/plain')])
s = StringIO()
h = env.items(); h.sort()
for k,v in h:
print >>s, k,'=',`v`
return [s.getvalue()]
def _handle_GET(self, env, start_response):
if env['QUERY_STRING'].lower() == 'wsdl':
start_response("200 OK", [('Content-Type','text/plain')])
r = self.delegate or self
return _resourceToWSDL(r)
start_response("404 ERROR", [('Content-Type','text/plain')])
return ['NO RESOURCE FOR GET']
def _handle_POST(self, env, start_response):
"""Dispatch Method called by twisted render, creates a
request/response handler chain.
request -- twisted.web.server.Request
"""
input = env['wsgi.input']
data = input.read( int(env['CONTENT_LENGTH']) )
mimeType = "text/xml"
if self.encoding is not None:
mimeType = 'text/xml; charset="%s"' % self.encoding
request = None
resource = self.delegate or self
chain = self.factory.newInstance()
try:
pyobj = chain.processRequest(data, request=request, resource=resource)
except Exception, ex:
start_response("500 ERROR", [('Content-Type',mimeType)])
return [fault.FaultFromException(ex, False, sys.exc_info()[2]).AsSOAP()]
try:
soap = chain.processResponse(pyobj, request=request, resource=resource)
except Exception, ex:
start_response("500 ERROR", [('Content-Type',mimeType)])
return [fault.FaultFromException(ex, False, sys.exc_info()[2]).AsSOAP()]
start_response("200 OK", [('Content-Type',mimeType)])
return [soap]
def test(app, port=8080, host="localhost"):
"""
"""
from twisted.internet import reactor
from twisted.python import log
from twisted.web2.channel import HTTPFactory
from twisted.web2.server import Site
from twisted.web2.wsgi import WSGIResource
log.startLogging(sys.stdout)
reactor.listenTCP(port,
HTTPFactory( Site(WSGIResource(app)) ),
interface=host,
)
reactor.run()
def _issoapmethod(f):
return type(f) is types.MethodType and getattr(f, 'soapmethod', False)
def _resourceToWSDL(resource):
from xml.etree import ElementTree
from xml.etree.ElementTree import Element, QName
from pyremotevbox.ZSI.wstools.Namespaces import WSDL
r = resource
methods = filter(_issoapmethod, map(lambda i: getattr(r, i), dir(r)))
tns = ''
#tree = ElementTree()
defs = Element("{%s}definitions" %WSDL.BASE)
defs.attrib['name'] = 'SampleDefs'
defs.attrib['targetNamespace'] = tns
#tree.append(defs)
porttype = Element("{%s}portType" %WSDL)
porttype.attrib['name'] = QName("{%s}SamplePortType" %tns)
binding = Element("{%s}binding" %WSDL)
defs.append(binding)
binding.attrib['name'] = QName("{%s}SampleBinding" %tns)
binding.attrib['type'] = porttype.get('name')
for m in methods:
m.action
service = Element("{%s}service" %WSDL.BASE)
defs.append(service)
service.attrib['name'] = 'SampleService'
port = Element("{%s}port" %WSDL.BASE)
service.append(port)
port.attrib['name'] = "SamplePort"
port.attrib['binding'] = binding.get('name')
soapaddress = Element("{%s}address" %WSDL.BIND_SOAP)
soapaddress.attrib['location'] = 'http://localhost/bla'
port.append(soapaddress)
return [ElementTree.tostring(defs)]
"""
<?xml version="1.0" encoding="UTF-8"?>
<wsdl:definitions name="Counter" targetNamespace="http://counter.com/bindings" xmlns:wsdl="http://schemas.xmlsoap.org/wsdl/" xmlns:porttype="http://counter.com" xmlns:soap="http://schemas.xmlsoap.org/wsdl/soap/">
<wsdl:import namespace="http://counter.com" location="counter_flattened.wsdl"/>
<wsdl:binding name="CounterPortTypeSOAPBinding" type="porttype:CounterPortType">
<soap:binding style="document" transport="http://schemas.xmlsoap.org/soap/http"/>
<wsdl:operation name="createCounter">
<soap:operation soapAction="http://counter.com/CounterPortType/createCounterRequest"/>
<wsdl:input>
<soap:body use="literal"/>
</wsdl:input>
<wsdl:output>
<soap:body use="literal"/>
</wsdl:output>
</wsdl:operation>
<wsdl:definitions name="Counter" targetNamespace="http://counter.com/service"
xmlns:wsdl="http://schemas.xmlsoap.org/wsdl/" xmlns:soap="http://schemas.xmlsoap.org/wsdl/soap/" xmlns:binding="http://counter.com/bindings">
<wsdl:import namespace="http://counter.com/bindings" location="counter_bindings.wsdl"/>
<wsdl:service name="CounterService">
<wsdl:port name="CounterPortTypePort" binding="binding:CounterPortTypeSOAPBinding">
<soap:address location="http://localhost:8080/wsrf/services/"/>
</wsdl:port>
</wsdl:service>
</wsdl:definitions>
"""
|
rameshg87/pyremotevbox
|
pyremotevbox/ZSI/twisted/wsgi.py
|
Python
|
apache-2.0
| 9,882
|
# -*- coding: utf-8 -*-
from shoop.xtheme.layout import LayoutCell
from shoop.xtheme.views.forms import (
LayoutCellFormGroup, LayoutCellGeneralInfoForm
)
from shoop_tests.xtheme.utils import plugin_override
def test_pluginless_lcfg():
with plugin_override():
cell = LayoutCell(None)
assert not cell.instantiate_plugin()
lcfg = LayoutCellFormGroup(layout_cell=cell)
assert "plugin" not in lcfg.forms
def test_formless_plugin_in_lcfg():
two_thirds = int(LayoutCellGeneralInfoForm.CELL_FULL_WIDTH * 2 / 3)
with plugin_override():
cell = LayoutCell("inject")
assert cell.instantiate_plugin()
lcfg = LayoutCellFormGroup(data={"general-cell_width": "%d" % two_thirds}, layout_cell=cell)
assert "plugin" not in lcfg.forms
assert lcfg.is_valid()
lcfg.save()
assert cell.sizes["md"] == two_thirds # Something got saved even if the plugin doesn't need config
def test_lcfg():
two_thirds = int(LayoutCellGeneralInfoForm.CELL_FULL_WIDTH * 2 / 3)
with plugin_override():
cell = LayoutCell("text", sizes={"md": two_thirds, "sm": two_thirds})
lcfg = LayoutCellFormGroup(layout_cell=cell)
assert "general" in lcfg.forms
assert "plugin" in lcfg.forms
assert not lcfg.is_valid() # Oh, we must've forgotten the text...
lcfg = LayoutCellFormGroup(data={
"general-cell_width": "%d" % two_thirds,
"plugin-text": "Hello, world!"
}, layout_cell=cell)
assert lcfg.is_valid() # Let's see now!
lcfg.save()
assert cell.sizes["md"] == two_thirds
assert cell.config["text"] == "Hello, world!"
|
taedori81/shoop
|
shoop_tests/xtheme/test_editor_forms.py
|
Python
|
agpl-3.0
| 1,697
|
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
# vim: foldlevel=0
# Copyright (C) 2016, Art SoftWare
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
# ********************************************************************
# For any questions, feture request or bug reports please contact me
# at support@art-software.fr
from sys import path
import os
curdir=os.path.dirname(__file__)
if curdir=="":
curdir=os.getcwd()
curdir=os.path.realpath(curdir)
path.insert(1, curdir)
os.chdir(curdir)
import asyncio
import discord
from util.modules import Modules
import util.cfg
class Bot:
def __init__(self):
self.usernameDB = {}
self.modules = Modules(self)
self.client = discord.Client()
self.hooks = {'MESSAGE':[]}
self.cfg = util.cfg.load("cfg/bot.json")
def start(self):
@self.client.event
async def on_ready():
print('Logged in as {} ({})'.format(self.client.user.name, self.client.user.id))
geek = None
general = None
server = list(self.client.servers)[0]
channelIdCache = {}
def findChannel(name):
for c in server.channels:
if name[0] == "#":
name = name[1:]
if c.name == name:
return c.id
while True:
m = self.modules['modules.talk.ircBridge']
print("Started discord>irc pool")
while True:
message = m.getMessage()
if message != None:
if not message['channel'] in channelIdCache:
channelIdCache[message['channel']] = findChannel(message['channel'])
if message['channel'] in channelIdCache:
if message['author']==self.cfg["nick"]:
await self.client.send_message(server.get_channel(channelIdCache[message['channel']]), '{m}'.format(m=message['message']))
else:
await self.client.send_message(server.get_channel(channelIdCache[message['channel']]), '<{a}> {m}'.format(a=message['author'], m=message['message']))
await asyncio.sleep(0.5)
@self.client.event
async def on_message(message):
for func in self.hooks['MESSAGE']:
print("{}".format(func.__module__ + "." + func.__name__))
await func(message)
with open("credentials.txt", "r") as f:
login = f.readline().replace("\n", "")
password = f.readline().replace("\n", "")
self.modules.loadAll('modules')
self.client.run(login, password)
def getUserName(self, uid):
found = 0
for server in self.client.servers:
clients = {c.id:c for c in server.members}
if uid in clients:
found += 1
if clients[uid].name not in ["", " ", " "]:
self.usernameDB[uid] = clients[uid].name
return clients[uid].name
break
if found == 0 and uid in self.usernameDB:
return self.usernameDB[uid]
return ""
async def send(self, target, message):
self.modules['modules.talk.ircBridge'].poolAdd(target, self.cfg["nick"], message)
await self.client.send_message(target, message)
while True:
try:
instance = Bot()
instance.start()
except KeyboardInterrupt:
exit(0)
except RuntimeError:
print("Discord.py died, relaunching…")
|
Art-SoftWare/discordBot
|
bot.py
|
Python
|
gpl-3.0
| 4,234
|
# Copyright 2014 Rackspace, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from ironic.dhcp import base
class NoneDHCPApi(base.BaseDHCP):
"""No-op DHCP API."""
def update_port_dhcp_opts(self, port_id, dhcp_options, token=None):
pass
def update_dhcp_opts(self, task, options, vifs=None):
pass
def update_port_address(self, port_id, address, token=None):
pass
def get_ip_addresses(self, task):
return []
|
supermari0/ironic
|
ironic/dhcp/none.py
|
Python
|
apache-2.0
| 1,015
|
import random
from django.conf import settings
def inject_settings(request):
return {
'DEBUG': settings.DEBUG,
'MIN_DONATION': settings.MIN_DONATION
}
info_tips = (
# RECAP
'<a href="http://www.recapthelaw.org" target="_blank">RECAP</a> is our browser extension that saves you money whenever you use PACER.',
'Using the <a href="http://www.recapthelaw.org">RECAP project</a> means never paying for the same PACER document twice.',
# Juriscraper
'CourtListener is powered by <a href="https://github.com/freelawproject/juriscraper" target="_blank">more than 200 screen scrapers</a>.',
# Seal Rookery
'We are collecting <a href="https://github.com/freelawproject/seal-rookery" target="_blank">all of the court seals in the U.S.</a> You can help contribute seals.',
# History
'CourtListener was started in 2009 to create alerts for the Federal Appeals Courts. It has since grown into the <a href="/donate/?referrer=tip">user-supported</a> non-profit Free Law Project.',
# Non-profit
'Free Law Project is a 501(c)(3) non-profit that relies on your support to operate. Please <a href="/donate/?referrer=tip" target="_blank">donate</a> to support this site.',
'CourtListener gets more than two million visits per year, but has a lean staff of only a few developers. Please <a href="/donate/?referrer=tip" target="_blank">donate</a> to support this site.',
'CourtListener is supported by <a href="/donate/?referrer=tip">user donations</a> and small grants. More donations result in less time spent seeking grants and more time adding features.',
'Free Law Project is a member of the <a href="http://www.falm.info/" target="_blank">Free Access to Law Movement</a> and relies heavily on <a href="/donate/?referrer=tip">your donations</a>.',
# Recognition
'Free Law Project\'s founders were <a href="http://freelawproject.org/2014/07/14/free-law-project-co-founders-named-to-fastcase-50-for-2014/" target="_blank">selected as FastCase 50 winners in 2014</a>.',
'Oral Arguments were <a href="http://freelawproject.org/2014/12/04/free-law-project-recognized-in-two-of-top-ten-legal-hacks-of-2014-by-dc-legal-hackers/" target="_blank">selected as a Top Ten Legal Hack of 2014</a>.',
# Open source
'All of code powering CourtListener is <a href="https://github.com/freelawproject/courtlistener" target="_blank">open source</a> and can be copied, shared, and contributed to.',
'We need volunteers to help us with coding, design and legal research. <a href="/contact/" target="_blank">Contact us for more info</a> or check out our <a href="https://trello.com/b/l0qS4yhd/assistance-needed" target="_blank">help wanted board</a> to get started.',
'The current design of CourtListener was <a href="http://freelawproject.org/2014/11/13/check-out-courtlisteners-new-paint-and-features/" target="_blank">created by a volunteer</a>.',
# Neutral Citations
'WestLaw currently has a monopoly on citations. This hinders legal innovation but few courts have adopted <a href="/faq/#explain-neutral-citations">neutral citations</a>.',
# Alerts, RSS & Podcasts, API, Search
'Create alerts for any query to receive an email if the query has new results.',
'There is an <a href="/feeds/">RSS feed</a> for every query so you can easily stay up to date.',
'A podcast is created for every oral argument query that you make.',
'CourtListener has an <a href="/api/">API</a> so anybody can easily use our data.',
'Oral Arguments are available in <a href="http://freelawproject.org/2014/11/09/more-oral-argument-news/">Stitcher Radio</a>.',
'Search Relevancy on CourtListener is <a href="http://freelawproject.org/2013/11/12/courtlistener-improves-search-results-thanks-to-volunteer-contributor/" target="_blank">powered by the citation network between cases.',
'You can make sophisticated queries using a number of <a href="/search/advanced-techniques/">advanced search features</a>.',
)
def inject_random_tip(request):
return {'TIP': random.choice(info_tips)}
|
shashi792/courtlistener
|
alert/lib/context_processors.py
|
Python
|
agpl-3.0
| 4,085
|
# -*- coding: utf-8 -*-
# -*- encoding: utf-8 -*-
#############################################################################
#
# Copyright (c) 2007 Martin Reisenhofer <martin.reisenhofer@funkring.net>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
from openerp import api
class commission_line(osv.osv):
@api.cr_uid_context
def _update_bonus(self, cr, uid, salesman_ids, period_ids, context=None):
if not salesman_ids or not period_ids:
return
period_id = None
partner_id = None
team_id = None
team = None
team_obj = self.pool.get("crm.case.section")
commission_line_obj = self.pool.get("commission.line")
cr.execute("SELECT s.id team_id, cl.period_id, cl.partner_id, SUM(cl.price_sub) "
" FROM commission_line cl "
" INNER JOIN res_users u ON u.id = cl.salesman_id "
" INNER JOIN crm_case_section s ON s.id = u.default_section_id "
" WHERE cl.salesman_id IN %s "
" AND cl.period_id IN %s "
" GROUP BY 1,2,3 "
" ORDER BY 1,2,3 "
,(tuple(salesman_ids),tuple(period_ids)))
for row in cr.fetchall():
# grouped by section
if team_id != row[0]:
team_id = row[0]
team = team_obj.browse(cr,uid,team_id,context)
period_id = row[1]
partner_id = row[2]
total = row[3]
#
sale_bonus = team.sales_bonus_id
if not sale_bonus:
continue
#
bonus = None
for bonus_line in sale_bonus.line_ids:
if total >= bonus_line.volume_of_sales:
bonus = bonus_line
else:
break
if not bonus:
line_ids = commission_line_obj.search(cr,uid,[("period_id","=",period_id),
("partner_id","=",partner_id),
("invoiced_id","=",False),
("sales_bonus_line_id","!=",False)])
for cl in commission_line_obj.browse(cr,uid,line_ids,context):
amount = cl.price_sub * (cl.base_commission / 100.0)*-1.0
commission_line_obj.write(cr,uid,[cl.id], {
"total_commission" : cl.base_commission,
"amount" : amount,
"sales_bonus_line_id" : None
},context)
elif bonus:
line_ids = commission_line_obj.search(cr,uid,[("period_id","=",period_id),
("partner_id","=",partner_id),
("invoiced_id","=",False),
'|',
("sales_bonus_line_id","!=",bonus.id),
("sales_bonus_line_id","=",False)])
for cl in commission_line_obj.browse(cr,uid,line_ids,context):
total_commission=cl.base_commission+bonus.bonus
amount = cl.price_sub * (total_commission / 100.0)*-1.0
commission_line_obj.write(cr,uid,[cl.id], {
"total_commission" : total_commission,
"amount" : amount,
"sales_bonus_line_id" : bonus.id
},context)
return True
@api.cr_uid_context
def _validate_sale_commission(self, cr, uid, values, obj=None, company=None, context=None):
return values
@api.cr_uid_context
def _get_sale_commission(self, cr, uid, name, user, customer, product, qty, netto, date, pricelist=None, defaults=None, obj=None, company=None, period=None, commission_custom=None, context=None):
res = []
# exclude delivery cost
delivery_cost = product.delivery_cost_co
if delivery_cost:
netto -= (qty*delivery_cost)
period_obj = self.pool["account.period"]
pricelist_obj = self.pool.get("product.pricelist")
pricelist_item_obj = self.pool.get("product.pricelist.item")
rule_obj = self.pool.get("commission_sale.rule")
team = user.default_section_id
partner = user.partner_id
#check partner and team
if not partner or not team:
return res
prov_prod = None
percent = 0.0
# determine percent of not passed
if commission_custom is None:
percent = product.commission_percent
if not percent:
percent = product.categ_id.commission_percent
if not percent:
percent = team.sales_commission
# provision product
prov_prod = product.commission_prod_id
if not prov_prod:
prov_prod = product.categ_id.commission_prod_id
# search for rule
rule = rule_obj._get_rule(cr, uid, team, product, context=context)
if rule:
percent = rule.get("commission",0.0) or 0.0
elif pricelist:
# search for pricelist rule
item_id = pricelist_obj.price_rule_get(cr, uid, [pricelist.id], product.id, qty,
partner=customer.id,context=context)[pricelist.id][1]
if item_id:
prule = pricelist_item_obj.read(cr, uid, item_id, ["commission_active","commission"], context=context)
if prule.get("commission_active"):
percent = prule.get("commission",0.0) or 0.0
# otherwise use custom commission
else:
percent = commission_custom
if percent:
factor = (percent / 100.0)*-1
period_id = period and period.id or None
if not period_id:
period_id = period_obj.find(cr, uid, dt=date, context=context)[0]
commission_product = team.property_commission_product
journal = team.property_analytic_journal
entry = {}
if defaults:
entry.update(defaults)
entry.update({
"date": date,
"name": _("Sales Commission: %s") % self._short_name(name),
"unit_amount": qty,
"amount": netto*factor,
"base_commission" : percent,
"total_commission" : percent,
"product_id": commission_product.id,
"product_uom_id": commission_product.uom_id.id,
"general_account_id": commission_product.account_income_standard_id.id,
"journal_id": journal.id,
"partner_id" : partner.id,
"user_id" : uid,
"period_id" : period_id,
"price_sub" : netto,
"salesman_id" : user.id,
"sale_partner_id" : customer.id,
"sale_product_id" : product.id
})
entry = self._validate_sale_commission(cr, uid, entry, obj=obj, company=company, context=context)
if entry:
res.append(entry)
if prov_prod:
period_id = period and period.id or None
if not period_id:
period_id = period_obj.find(cr, uid, dt=date, context=context)[0]
journal = team.property_analytic_journal
pricelist = partner.property_product_pricelist
price = prov_prod.lst_price
if pricelist:
price = pricelist_obj.price_get(cr, uid, [pricelist.id], prov_prod.id, qty, partner=partner, context=context)[pricelist.id]
# amount with correct sign
amount = price*qty*-1
percent = 0.0
if amount:
percent = abs((100.0/netto)*amount)
# if customer refund than turn sign
if netto < 0:
amount *= -1
entry = {}
if defaults:
entry.update(defaults)
entry.update({
"date": date,
"name": _("Sales Commission: %s") % self._short_name(name),
"unit_amount": qty,
"amount": amount,
"base_commission" : percent,
"total_commission" : percent,
"product_id": prov_prod.id,
"product_uom_id": prov_prod.uom_id.id,
"general_account_id": prov_prod.account_income_standard_id.id,
"journal_id": journal.id,
"partner_id" : partner.id,
"user_id" : uid,
"period_id" : period_id,
"price_sub" : netto,
"salesman_id" : user.id,
"sale_partner_id" : customer.id,
"sale_product_id" : product.id,
"val_based" : True
})
entry = self._validate_sale_commission(cr, uid, entry, obj=obj, company=company, context=context)
if entry:
res.append(entry)
return res
_columns = {
"salesman_id" : fields.many2one("res.users","Salesman",ondelete="restrict"),
"sales_bonus_line_id" : fields.many2one("commission_sale.bonus_line","Sales Bonus",ondelete="restrict")
}
_inherit = "commission.line"
|
funkring/fdoo
|
addons-funkring/commission_sale/commission.py
|
Python
|
agpl-3.0
| 11,057
|
import sys
import getopt
import random
import numpy as np
import utils as utils
import scipy.stats as stats
def expectation(K, means, points, stddev):
points_size = len(points)
expectations = np.zeros((points_size, K))
for i in range(points_size):
total = 0
current_point = points[i]
for j in range(K):
total += stats.norm(means[j], stddev).pdf(current_point)
for j in range(K):
expectations[i][j] = stats.norm(means[j], stddev).pdf(current_point) / total
return expectations
def maximization(K, expectations, points):
points_size = len(points)
means = []
for j in range(K):
m_step_numerator = 0
m_step_denominator = 0
for i in range(points_size):
m_step_numerator += expectations[i][j] * points[i]
m_step_denominator += expectations[i][j]
means.append(m_step_numerator / m_step_denominator)
return means
def q_function(K, stddev, points, centroids, expectations):
q = 0.0
for i in range(len(points)):
q += -np.log(K) + np.log(1.0 / np.sqrt(2 * np.pi * (stddev ** 2)))
for j in range(K):
q += -1.0 / (2 * stddev ** 2) * expectations[i][j] * ((points[i] - centroids[j]) ** 2)
return q
def expectation_maximization(points, K, stddev, means, threshold):
old_means = np.zeros(means.shape)
expectations = None
clusters = None
while not utils.convergence(means, old_means, threshold):
old_means = means
# the E step
expectations = expectation(K, means, points, stddev)
# the M step
means = np.array(maximization(K, expectations, points))
clusters = assign_points_to_clusters(points, expectations, K)
return means, expectations, clusters # returns a tuple of them
def assign_points_to_clusters(points, expectations, K):
clusters = {}
for i in range(K):
clusters[i] = []
for i in range(len(points)):
best_cluster_key = max([(j, expectations[i][j]) for j in range(K)], key = lambda t: t[1])[0]
clusters[best_cluster_key].append(points[i])
return clusters
error_msg = 'em1d.py -i <inputfile> -k <number of clusters> -m <comma-separated initial K means values> ' \
'-s <stddev> -t <threshold> -o <outputfile>'
try:
opts, args = getopt.getopt(sys.argv[1:], "i:k:m:s:t:o:", ["inputfile=", "means=", "stddev=", "threshold=", "outputfile="])
except getopt.GetoptError:
print error_msg
sys.exit(2)
input_filename = None
K = 0
means = None
stddev = None
threshold = None
output_filename = None
for opt, arg in opts:
if opt in ('-i', '--inputfile'):
input_filename = arg
elif opt in ('-o', '--outputfile'):
output_filename = arg
elif opt == '-k':
K = int(arg)
elif opt in ('-s', '--stddev'):
stddev = float(arg)
elif opt in ('-t', '--threshold'):
threshold = float(arg)
elif opt in ('-m', '--means'):
means_string = arg.split(',')
means = np.array([float(m) for m in means_string])
if input_filename is None or K == 0:
print error_msg
sys.exit(2)
if threshold is None:
threshold = 0.01
if output_filename is None:
output_filename = "em.out"
output_file = open(output_filename, 'w')
input_points = utils.read_points(input_filename)
if stddev is None:
stddev = np.std(input_points)
if means is None:
means = np.array(random.sample(input_points, K))
# writing the standard deviation to file
output_file.write(str(stddev))
output_file.write('\n')
centroids, expectations, clusters = expectation_maximization(input_points, K, stddev, means, threshold)
print "centroids:\n {} \n expectations:\n {}".format(centroids, expectations)
# outputting q function to file
output_file.write(str(q_function(K, stddev, input_points, centroids, expectations)))
output_file.write('\n')
# outputting centroids to file
utils.print_array_to_file(centroids, output_file)
# outputting expectations to file
utils.print_matrix_to_file(expectations, output_file)
output_file.close()
utils.plot_clusters_1d(centroids, clusters)
|
anamariad/ML
|
Clusterization/clusterization/em1d.py
|
Python
|
apache-2.0
| 4,140
|
__author__ = 'bromix'
from resources.lib import nightcrawler
from resources.lib import content
nightcrawler.run(content.Provider())
|
azumimuo/family-xbmc-addon
|
plugin.audio.soundcloud/addon.py
|
Python
|
gpl-2.0
| 134
|
# package
from pulpy.tests.integration.base import IntegrationTestBase
from pulpy.tests.integration.base import _initTestingDB
from pulpy.tests.integration.basic import IntegrationBasicViews
from pulpy.tests.integration.auth import IntegrationAuthViews
from pulpy.tests.integration.note import IntegrationNoteViews
from pulpy.tests.integration.account import IntegrationAccountViews
|
plastboks/Pulpy
|
pulpy/tests/integration/__init__.py
|
Python
|
mit
| 385
|
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import logging
from werkzeug import urls
from odoo import _, api, models
from odoo.exceptions import ValidationError
from odoo.tools.float_utils import float_compare
from odoo.addons.payment_alipay.controllers.main import AlipayController
_logger = logging.getLogger(__name__)
class PaymentTransaction(models.Model):
_inherit = 'payment.transaction'
def _get_specific_rendering_values(self, processing_values):
""" Override of payment to return Alipay-specific rendering values.
Note: self.ensure_one() from `_get_processing_values`
:param dict processing_values: The generic and specific processing values of the transaction
:return: The dict of acquirer-specific processing values
:rtype: dict
"""
res = super()._get_specific_rendering_values(processing_values)
if self.provider != 'alipay':
return res
base_url = self.acquirer_id.get_base_url()
if self.fees:
# Similarly to what is done in `payment::payment.transaction.create`, we need to round
# the sum of the amount and of the fees to avoid inconsistent string representations.
# E.g., str(1111.11 + 7.09) == '1118.1999999999998'
total_fee = self.currency_id.round(self.amount + self.fees)
else:
total_fee = self.amount
rendering_values = {
'_input_charset': 'utf-8',
'notify_url': urls.url_join(base_url, AlipayController._notify_url),
'out_trade_no': self.reference,
'partner': self.acquirer_id.alipay_merchant_partner_id,
'return_url': urls.url_join(base_url, AlipayController._return_url),
'subject': self.reference,
'total_fee': total_fee,
}
if self.acquirer_id.alipay_payment_method == 'standard_checkout':
# https://global.alipay.com/docs/ac/global/create_forex_trade
rendering_values.update({
'service': 'create_forex_trade',
'product_code': 'NEW_OVERSEAS_SELLER',
'currency': self.currency_id.name,
})
else:
rendering_values.update({
'service': 'create_direct_pay_by_user',
'payment_type': 1,
'seller_email': self.acquirer_id.alipay_seller_email,
})
sign = self.acquirer_id._alipay_build_sign(rendering_values)
rendering_values.update({
'sign_type': 'MD5',
'sign': sign,
'api_url': self.acquirer_id._alipay_get_api_url(),
})
return rendering_values
@api.model
def _get_tx_from_feedback_data(self, provider, data):
""" Override of payment to find the transaction based on Alipay data.
:param str provider: The provider of the acquirer that handled the transaction
:param dict data: The feedback data sent by the provider
:return: The transaction if found
:rtype: recordset of `payment.transaction`
:raise: ValidationError if inconsistent data were received
:raise: ValidationError if the data match no transaction
"""
tx = super()._get_tx_from_feedback_data(provider, data)
if provider != 'alipay':
return tx
reference = data.get('reference') or data.get('out_trade_no')
txn_id = data.get('trade_no')
if not reference or not txn_id:
raise ValidationError(
"Alipay: " + _(
"Received data with missing reference %(r)s or txn_id %(t)s.",
r=reference, t=txn_id
)
)
tx = self.search([('reference', '=', reference), ('provider', '=', 'alipay')])
if not tx:
raise ValidationError(
"Alipay: " + _("No transaction found matching reference %s.", reference)
)
# Verify signature (done here because we need the reference to get the acquirer)
sign_check = tx.acquirer_id._alipay_build_sign(data)
sign = data.get('sign')
if sign != sign_check:
raise ValidationError(
"Alipay: " + _(
"Expected signature %(sc) but received %(sign)s.", sc=sign_check, sign=sign
)
)
return tx
def _process_feedback_data(self, data):
""" Override of payment to process the transaction based on Alipay data.
Note: self.ensure_one()
:param dict data: The feedback data sent by the provider
:return: None
:raise: ValidationError if inconsistent data were received
"""
super()._process_feedback_data(data)
if self.provider != 'alipay':
return
if float_compare(float(data.get('total_fee', '0.0')), (self.amount + self.fees), 2) != 0:
# mc_gross is amount + fees
_logger.error(
"the paid amount (%(amount)s) does not match the total + fees (%(total)s + "
"%(fees)s) for transaction with reference %(ref)s",
{
'amount': data.get('total_fee', '0.0'),
'total': self.amount,
'fees': self.fees,
'ref': self.reference,
}
)
raise ValidationError("Alipay: " + _("The amount does not match the total + fees."))
if self.acquirer_id.alipay_payment_method == 'standard_checkout':
if data.get('currency') != self.currency_id.name:
raise ValidationError(
"Alipay: " + _(
"The currency returned by Alipay %(rc)s does not match the transaction "
"currency %(tc)s.", rc=data.get('currency'), tc=self.currency_id.name
)
)
elif data.get('seller_email') != self.acquirer_id.alipay_seller_email:
_logger.error(
"the seller email (%(email)s) does not match the configured Alipay account "
"(%(acc_email)s) for transaction with reference %(ref)s",
{
'email': data.get('seller_email'),
'acc_email:': self.acquirer_id.alipay_seller_email,
'ref': self.reference,
},
)
raise ValidationError(
"Alipay: " + _("The seller email does not match the configured Alipay account.")
)
self.acquirer_reference = data.get('trade_no')
status = data.get('trade_status')
if status in ['TRADE_FINISHED', 'TRADE_SUCCESS']:
self._set_done()
elif status == 'TRADE_CLOSED':
self._set_canceled()
else:
_logger.info(
"received data with invalid payment status (%s) for transaction with reference %s",
status, self.reference,
)
self._set_error("Alipay: " + _("received invalid transaction status: %s", status))
|
jeremiahyan/odoo
|
addons/payment_alipay/models/payment_transaction.py
|
Python
|
gpl-3.0
| 7,132
|
# flake8: noqa
from __future__ import absolute_import
# This will make sure the celery app is always imported when
# Django starts so that tasks can use this celery app.
# Without this Django wouldn't know which celery app to use.
# See http://celery.readthedocs.org/en/latest/django/first-steps-with-django.html
from .celery import app as celery_app
|
aksh1/wagtail-cookiecutter-foundation
|
{{cookiecutter.repo_name}}/{{cookiecutter.repo_name}}/__init__.py
|
Python
|
mit
| 352
|
import sys,numpy,matplotlib
import matplotlib.pyplot, scipy.stats
import library
def colorDefiner(epoch):
if epoch == '0':
theColor='blue'
elif epoch == '0.5':
theColor='red'
elif epoch == '1':
theColor='green'
elif epoch == '1.5':
theColor='orange'
else:
print 'error from colorDefiner. exiting...'
sys.exit()
return theColor
def dataGrapherEpochs(dataStructure,figureLabel):
resolution=1000
figureFile='results/figure_%s.pdf'%figureLabel
for epochLabel in dataStructure:
epoch=epochLabel.split('_')[0]
localTime=numpy.array(dataStructure[epochLabel][0])
shiftedTime=localTime-min(localTime)
localCells=dataStructure[epochLabel][1]
highResolutionTime=numpy.linspace(min(shiftedTime),max(shiftedTime),resolution)
epochColor=colorDefiner(epoch)
# plotting the data
if len(localCells) > 1:
matplotlib.pyplot.plot(localTime,localCells,'o',color=epochColor,markeredgecolor='None',ms=4)
# plotting the model if there is growth, otherwise plot a best model straight line
if len(localCells) <= 2:
matplotlib.pyplot.plot([localTime[0],localTime[-1]],[localCells[0],localCells[-1]],'-',color=epochColor)
elif localCells[0] > localCells[-1]:
slope, intercept, temp0, temp1, temp2 = scipy.stats.linregress(shiftedTime,localCells)
matplotlib.pyplot.plot([localTime[0],localTime[-1]],[intercept,slope*shiftedTime[-1]+intercept],'-',color=epochColor)
else:
fittedTrajectory=library.dataFitter(shiftedTime,localCells)
b=library.peval(highResolutionTime,fittedTrajectory[0])
matplotlib.pyplot.plot(highResolutionTime+min(localTime),b,'-',color=epochColor)
matplotlib.pyplot.xlim([-0.5,20])
matplotlib.pyplot.ylim([-0.5e5,18e5])
matplotlib.pyplot.xlabel('time (days)')
matplotlib.pyplot.ylabel('number of cells (x 1e5)')
matplotlib.pyplot.title('%s ppm'%figureLabel)
matplotlib.pyplot.yticks((0,2e5,4e5,6e5,8e5,10e5,12e5,14e5,16e5,18e5),('0','2','4','6','8','10','12','14','16','18'))
matplotlib.pyplot.savefig(figureFile)
matplotlib.pyplot.clf()
return None
### MAIN
# 1. data reading
data300=library.dataReader('data/300ppmSetsLight.v2.txt')
data1000=library.dataReader('data/1000ppmSetsLight.v2.txt')
# 2. fitting the data to sigmoidal function
print 'fitting data for 300 pppm...'
dataGrapherEpochs(data300,'300')
print
print 'fitting data for 1000 ppm...'
dataGrapherEpochs(data1000,'1000')
print '... graphs completed.'
|
adelomana/viridis
|
growthAnalysis/epochGrapher.py
|
Python
|
gpl-2.0
| 2,642
|
from .base import YarhBase
class DTD(YarhBase):
def __init__(self, content, **kwargs):
super().__init__(None)
self.content = content
def html(self):
return "<!DOCTYPE %s>\n" % self.content
def yarh(self):
for k, v in doctypes.items():
if self.content == v.content:
return "!%s\n" % k
return "!%s\n" % self.content
html4strict = DTD('HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd"')
html4transitional = DTD('HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd"')
html4frameset = DTD('HTML PUBLIC "-//W3C//DTD HTML 4.01 Frameset//EN" "http://www.w3.org/TR/html4/frameset.dtd"')
xhtml11 = DTD('html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd"')
html5 = DTD("html")
doctypes = {
"strict": html4strict,
"transitional": html4transitional,
"frameset": html4frameset,
"xhtml": xhtml11,
"html": html5,
}
|
minacle/yarh
|
yarh/dtd.py
|
Python
|
bsd-2-clause
| 1,012
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.