repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
qgis/QGIS-Django | qgis-app/layerdefinitions/admin.py | Python | gpl-2.0 | 618 | 0 | from django.contrib import admin
from layerdefinitions.models import LayerDefinition, Review
class LayerDefinitionInline(admin.TabularInline):
model = Review
list_display = ('review_date', 'comment', 'reviewer')
@admin.register(LayerDefinition)
class LayerDefinitionAdmin(admin.ModelAdmin):
inlines = [L | ayerDefinitionInline | , ]
list_display = ('name', 'description', 'creator', 'upload_date',)
search_fields = ('name', 'description', 'provider')
@admin.register(Review)
class LayerDefinitionReviewAdmin(admin.ModelAdmin):
list_display = ('resource', 'reviewer', 'comment', 'review_date',)
|
hexforge/pulp_db | src/pulp_db/kds/kds_pyapi.py | Python | apache-2.0 | 7,543 | 0.005568 | import os
from cffi import FFI
ffi = FFI()
# This is just a placeholder for the c implementation.
# Some of it is thinking of the c and is not very pythony.
# We are thinkling alot about chunking the data.
# Stored separately to be more continuous. Meta gets queried.
# overall_meta
# meta1
# meta2
# meta3
# meta4
# meta5
# encoded_numbers1 #<--- Might be quite expensive to decode.
# encoded_numbers2 #<--- Continous
# encoded_numbers3
# encoded_numbers4
# encoded_numbers5
#meta_data["min"]
#meta_data["max"]
#meta_data["len"]
#meta_data["start_i"]
# Should never need to worry about Order of expression
# auto optimise gallops symmetrically.
# Say results are [1,2,3,5,......] vs [99999999].
# Just need to merge galop through the longer.
# Data is sorted can therefor do a bit of jump predictability :)
############################################################################################
###############################################################################################
KDS_STRUCTS_CDEF = """
struct keyds
{
char state;
char mode;
char mapping_type;
unsigned long long total_messages;
unsigned long long num_keys;
char *field_name;
char *folder_path;
char *meta_file_path;
char *dpref_file_path;
char *cpref_file_path;
char *ttrie_file_path;
...;
};
struct query_context
{
signed int refs_type;
void *self; //<--- arbitrary data probably cpref
struct keyds *kds_data;
signed long long (*len)(struct query_context*);
signed long long (*geti)(struct query_context*, signed long long);
signed long long (*next)(struct query_context*);
signed long long (*prev)(struct query_context*);
signed long long (*ge)(struct query_context*, signed long long);
signed long long (*le)(struct query_context*, signed long long);
};
"""
KDS_API_CDEF = """
int kds__open(struct keyds *kds, char *folder_path, char *fieldname, char mode);
int kds__close(struct keyds *kds);
int kds__append(struct keyds *kds, void *value, int val_len, unsigned long long msg_num);
int kds__optimize_write(struct keyds *kds);
unsigned long long kds__len(struct keyds *k); // This should be a passthrough to rtrie in w mode, in read mode can just return this. OR should we just pass through as well?
unsigned long long kds__total_refs(struct keyds *k); // This should be a passthrough to dref in w mode. in read mode can just return the value. OR shold we just pass through as well?
int kds__get_key_i(struct keyds *k, unsigned long long i, void *key, int*key_len);
int kds__contains(struct keyds *k, void *key, int key_len);
void kds__setup_query(struct query_context *q, struct keyds *k);
int kds__lookup(struct query_context *q, char *key, int n);
void kds__teardown_query(struct query_context *q);
"""
ffi.cdef(KDS_STRUCTS_CDEF)
ffi.cdef(KDS_API_CDEF)
folder_path = os.path.dirname(__file__)
if not folder_path:
folder_path = os.getcwd()
common_path = os.path.abspath(os.path.join(folder_path, "../common"))
KDS_SO = ffi.verify("""#include "kds.h" """,
libraries=["kds"],
library_dirs=[folder_path],
runtime_library_dirs=[folder_path],
include_dirs=[folder_path, common_path],
extra_compile_args=["-std=c99"])
class KeyTable(object):
def __init__(self, dirname, fieldname, mode, dumper=None, loader=None):
self.dirname = dirname
self.fieldname = fieldname
#self.path = os.path.join(dirname, '{}.db'.format(self.field))
self.mode = mode
foo = ffi.new("struct keyds *k")
self.c_kds_data = ffi.gc(foo, KDS_SO.kds__close)
self.flushed = False
self.dumper = dumper
self.loader = loader
def __enter__(self):
path = self.dirname.encode('ascii')
mode = self.mode.encode('ascii')
fieldname = self.fieldname.encode('ascii')
if self.mode == 'w':
rc = KDS_SO.kds__open(self.c_kds_data, path, fieldname, mode)
assert rc == 0
elif self.mode == 'r':
rc = KDS_SO.kds__open(self.c_kds_data, path, fieldname, mode)
assert rc == 0
else:
raise NotImplementedError("Unknown mode {}".format(self.mode))
return self
def __exit__(self, exc_type, exc_value, traceback):
self.flush()
def flush(self):
if self.flushed == False:
self.flushed = True
if self.mode == 'w':
KDS_SO.kds__optimize_write(self.c_kds_data)
def append(self, key, msg_num):
if self.dumper is not None:
key = self.dumper(key)
assert(isinstance(key, type("".encode('ascii'))))
KDS_SO.kds__append(self.c_kds_data, key, len(key), msg_num);
def __iter__(self):
i = 0
while i<len(self):
#print("yileding a key out of iter")
yield self.getkeyi(i)
i += 1
def __len__(self):
return int(KDS_SO.kds__len(self.c_kds_data))
def __contains__(self, key):
if self.dumper is not None:
key = self.dumper(key)
assert(isinstance(key, type("".encode('ascii'))))
return KDS_SO.kds__contains(self.c_kds_data, key, len(key)) == 1;
def __getitem__(self, key):
#import pdb
#pdb.set_trace()
if self.dumper is not None:
key = self.dumper(key)
assert(isinstance(key, type("".encode('ascii'))))
return Query(self.c_kds_data, key)
def getkeyi(self, i):
raw = ffi.new("unsigned char [256]")
raw_len = ffi.new("int *")
rc = KDS_SO.kds__get_key_i(self.c_kds_data, i, raw, raw_len)
if rc != 0:
raise IndexError("Key at i={} not found".format(i))
key = ffi.buffer(raw, raw_len[0])[:]
if self.loader is not None:
key = self.loader(key)
return key
def keys(self):
return (k for k in self)
class Query(object):
def __init__(self, kds_data, key):
self.key = key
self.c_kds_data = kds_data
self.query_context = None
tmp = ffi.new("struct query_context *")
self.query_context = ffi.gc(tmp, KDS_SO.kds__teardown_query)
KDS_SO.kds__setup_query(self.query_context, self.c_kds_data)
KDS_SO.kds__lookup(self.query_context, self.key, len(self.key))
def __repr__(self):
return "<Query object for key={}>".format(self.key)
def __len__(self):
assert(self.query_context)
return int(self.query_context.len(self.query_context))
def __getitem__(self, i):
assert(self.query_context)
return int(self.query_context.geti(self.query_context, i))
def next(self):
assert(self.query_context)
#print("What am I")
return int(self.query_context.next(self.query_context))
def prev(self):
assert(self.query_context)
return int(self.query_context.prev(self.query_context))
def ge(self, ref):
assert(self.query_context)
return int(self.query_context.ge(self.query_context, | ref))
def le(self, ref):
assert(self.query_context)
return int(self.query_context.le(self.query_context, ref))
# Really this is all just number series.
# Comparisons and joins etc.
# Do do this better than conventional db we ne | ed meta.
# Can do lots here have consitancy.
|
waxkinetic/fabcloudkit | fabcloudkit/tool/nginx.py | Python | bsd-3-clause | 6,961 | 0.003735 | """
fabcloudkit
Functions for managing Nginx.
This module provides functions that check for installation, install, and manage an
installation of, Nginx.
/etc/init.d/nginx:
The "init-script" that allows Nginx to be run automatically at system startup.
The existence of this file is verified, but it's assumed that the script is
installed by the package manager that installed Nginx.
/etc/nginx/nginx.conf:
The main or root Nginx configuration file. This file is loaded by Nginx when
it launches. The file contains an include directive that tells Nginx to
load additional configurations from a different directory.
Currently, this code writes a very basic nginx.conf file.
/etc/nginx/conf.d/:
The directory marked by the include directive in the nginx root configuration
file. Individual server configurations are stored in files in this folder.
/etc/nginx/conf.g/*.conf:
Individual server configuration files.
<deploy_root>/<name>/logs/ngaccess.log, ngerror.log:
Default location of the access (ngaccess.log) and error (ngerror.log) log files
for a specific server configuration. This location can be overridden in the call
to write_server_config().
For more information on Nginx check out: http://nginx.org, http://wiki.nginx.org
:copyright: (c) 2013 by Rick Bohrer.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
# standard
import posixpath as path
# pypi
from fabric.operations import run, sudo
# package
from fabcloudkit import cfg, put_string
from ..internal import *
from ..toolbase import Tool, SimpleTool
class NginxTool(Tool):
def __init__(self):
super(NginxTool,self).__init__()
self._simple = SimpleTool.create('nginx')
def check(self, **kwargs):
return self._simple.check()
def install(self, **kwargs):
# install Nginx using the package manager.
self._simple.install()
start_msg('----- Configuring "Nginx":')
# verify that there's an init-script.
result = run('test -f /etc/init.d/nginx')
if result.failed:
raise HaltError('Uh oh. Package manager did not install an Nginx init-script.')
# write nginx.conf file.
dest = path.join(cfg().nginx_conf, 'nginx.conf')
message('Writing "nginx.conf"')
put_string(_NGINX_CONF, dest, use_sudo=True)
# the Amazon Linux AMI uses chkconfig; the init.d script won't do the job by itself.
# set Nginx so it can be managed by chkconfig; and turn on boot startup.
result = run('which chkconfig')
if result.succeeded:
message('System has chkconfig; configuring.')
result = sudo('chkconfig --add nginx')
if result.failed:
raise HaltError('"chkconfig --add nginx" failed.')
result = sudo('chkconfig nginx on')
if result.failed:
raise HaltError('"chkconfig nginx on" failed.')
succeed_msg('Successfully installed and configured "Nginx".')
return self
def write_config(self, name, server_names, proxy_pass, static_locations='', log_root=None, listen=80):
"""
Writes an Nginx server configuration file.
This function writes a specific style of configuration, that seems to be somewhat common, where
Nginx is used as a reverse-proxy for a locally-running (e.g., WSGI) server.
:param name: identifies the server name; used to name the configuration file.
:param server_names:
:param proxy_pass: identifies the local proxy to which Nginx will pass requests.
"""
start_msg('----- Writing Nginx server configuration for "{0}":'.format(name))
# be sure the log directory exists.
if log_root is None:
log_root = path.join(cfg().deploy_root, name, 'logs')
result = sudo('mkdir -p {0}'.format(log_root))
if result.failed:
raise HaltError('Unable to create log directory: "{0}"'.format(log_root))
# generate and write the configuration file.
server_config = _NGINX_SERVER_CONF.format(**locals())
dest = path.join(cfg().nginx_include_conf, '{name}.conf'.format(**locals()))
message('Writing to file: "{0}"'.format(dest))
put_string(server_config, dest, use_sudo=True)
succeed_msg('Wrote conf file for "{0}".'.format(name))
return self
def delete_config(self, name):
start_msg('----- Deleting server configuration for "{0}":'.format(name))
# delete the file, but ignore any errors.
config_name = '{name}.conf'.format(**locals())
result = sudo('rm -f {0}'.format(path.join(cfg().nginx_include_conf, config_name)))
if result.failed:
failed_msg('Ignoring failed attempt to delete configuration "{0}"'.format(config_name))
else:
succeed_msg('Successfully deleted configuration "{0}".'.format(config_name))
return self
def reload(self):
start_msg('----- Telling "Nginx" to reload configuration:')
result = sudo('/etc/init.d/nginx reload')
if result.failed:
raise HaltError('"Nginx" configuration reload failed ({0})'.format(result))
succeed_msg('Successfully reloaded.')
return self
# register.
Tool.__tools__['nginx'] = NginxTool
_NGINX_SERVER_CONF = """
server {{
listen {listen};
server_name {server_names};
access_log {log_root}/ngaccess.log;
error_log | {log_ro | ot}/ngerror.log;
location / {{
proxy_pass {proxy_pass};
proxy_redirect off;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
client_max_body_size 10m;
client_body_buffer_size 128k;
proxy_connect_timeout 90;
proxy_send_timeout 90;
proxy_read_timeout 90;
proxy_buffer_size 4k;
proxy_buffers 4 32k;
proxy_busy_buffers_size 64k;
proxy_temp_file_write_size 64k;
}}
{static_locations}
}}
""".lstrip()
_NGINX_CONF = """
user nginx;
worker_processes 1;
error_log /var/log/nginx/error.log;
pid /var/run/nginx.pid;
events {
worker_connections 1024;
}
http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /var/log/nginx/access.log main;
sendfile on;
keepalive_timeout 65;
include /etc/nginx/conf.d/*.conf;
}
""".lstrip()
|
juswilliams/bioscripts | CpG_by_feature/cpg_gene.py | Python | gpl-3.0 | 3,970 | 0.004282 | #!/usr/bin/env python
'''
Purpose:
This script, using default values, determines and plots the CpG islands in
relation to a given feature "type" (e.g. "gene" or "mRNA") from a GFF file
which corresponds to the user-provided fasta file.
Note:
CpG Islands are determined by ObEx = (Observed CpG) / (Expected CpG) ,
default threshold > 1.
Where Expected CpG = (count(C) * count(G)) / WindowSize
Usage:
python cpg_gene.py FastaFile Gff_File OutFile.png
Default optional parameters:
-s, Step Size, default = 50
-w, Window Size, default = 200
-oe, Minimum Observed Expected CpG, default = 1
-gc, Minimum GC, default = .5
-r Range from ATG, or provided feature, default = 5000
-f, GFF Feature, default = "gene"
-i, Gene ID from GFF, default = ""
'''
import sys
import os
import argparse
from collections import Counter
from Bio import SeqIO
import cpgmod
import gffutils
import pandas as pd
import numpy as np
from ggplot import *
# Capture command line args, with or without defaults
if __name__ == '__main__':
# Parse the arguments
LineArgs = cpgmod.parseArguments()
# Populate vars with args
FastaFile = LineArgs.FastaFile
GffFile = LineArgs.GffFile
OutFile = LineArgs.FileOut
Step = LineArgs.s
WinSize = LineArgs.w
ObExthresh = LineArgs.oe
GCthresh = LineArgs.gc
StartRange = LineArgs.r
FeatGFF = LineArgs.f
ID_Feat = LineArgs.i
# Gather all possible CpG islands
MergedRecs = []
print "Parsing sequences...\n"
for SeqRecord in SeqIO.parse(FastaFile, "fasta"):
print SeqRecord.id
# Determine if sequences and args are acceptable
cpgmod.arg_seqcheck(SeqRecord, WinSize, Step)
# Pre-determine number of islands
NumOfChunks = cpgmod.chunks(SeqRecord, WinSize, Step)
# Return array of SeqRec class | (potential CpG island) instances
SeqRecList = cpgmod.compute(SeqRecord, Step, NumOfChunks, WinSize)
MergedRecs = MergedRecs + SeqRecList
# Create GFF DB
GffDb = gffutils.create_db(GffFile, dbfn='GFF.db', force=True, keep_order=True,
merge_strategy='merge', sort_attribute_values=True,
disable_infer_transcripts=Tr | ue,
disable_infer_genes=True)
print "\nGFF Database Created...\n"
# Filter out SeqRec below threshold
DistArr = []
for Rec in MergedRecs:
Cond1 = Rec.expect() > 0
if Cond1 == True:
ObEx = (Rec.observ() / Rec.expect())
Cond2 = ObEx > ObExthresh
Cond3 = Rec.gc_cont() > GCthresh
if Cond2 and Cond3:
# Query GFF DB for closest gene feature *or provided feature*
Arr = cpgmod.get_closest(Rec, GffDb, StartRange, FeatGFF, ID_Feat)
if Arr <> False:
Arr.append(ObEx)
DistArr.append(Arr)
print "CpG Islands predicted...\n"
print "Generating Figure...\n"
# Releasing SeqRecs
MergedRecs = None
SeqRecList = None
# Pre-check DistArr Results
if len(DistArr) < 2:
print "WARNING, "+ str(len(DistArr)) + " sites were found."
print "Consider changing parameters.\n"
# Generate Figure:
ObExRes = pd.DataFrame({
'gene' : [],
'xval': [],
'yval': []})
try:
Cnt = 0
for Dist in DistArr:
Cnt += 1
print "PROGRESS: "+str(Cnt) +" of "+ str(len(DistArr))
ObExdf = pd.DataFrame({
'gene': [Dist[2]],
'xval': [Dist[1]],
'yval': [Dist[3]]})
ObExFram = [ObExRes, ObExdf]
ObExRes = pd.concat(ObExFram, ignore_index=True)
p = ggplot(aes(x='xval', y='yval'), data=ObExRes) \
+ geom_point() \
+ ylab("Observed/Expected CpG") \
+ xlab("Position (bp) Relative to (ATG = 0)") \
+ ggtitle("Predicted CpG Island Position Relative to ATG")
p.save(OutFile)
except IndexError as e:
print 'Error: '+ str(e)
sys.exit('Exiting script...')
print p
# Remove GFF DB
os.remove('GFF.db')
|
EOSIO/eos | tests/restart-scenarios-test.py | Python | mit | 5,013 | 0.011969 | #!/usr/bin/env python3
from testUtils import Utils
from Cluster import Cluster
from WalletMgr import WalletMgr
from TestHelper import TestHelper
import random
###############################################################
# restart-scenarios-test
#
# Tests restart scenarios for nodeos. Uses "-c" flag to indicate "replay" (--replay-blockchain), "resync"
# (--delete-all-blocks), "hardReplay"(--hard-replay-blockchain), and "none" to indicate what kind of restart flag should
# be used. This is one of the only test that actually verify that nodeos terminates with a good exit status.
#
###############################################################
Print=Utils.Print
errorExit=Utils.errorExit
args=TestHelper.parse_args({"-p","-d","-s","-c","--kill-sig","--kill-count","--keep-logs"
,"--dump-error-details","-v","--leave-running","--clean-run"})
pnodes=args.p
topo=args.s
delay=args.d
chainSyncStrategyStr=args.c
debug=args.v
total_nodes = pnodes
killCount=args.kill_count if args.kill_count > 0 else 1
killSignal=args.kill_sig
killEosInstances= not args.leave_running
dumpErrorDetails=args.dump_error_details
keepLogs=args.keep_logs
killAll=args.clean_run
seed=1
Utils.Debug=debug
testSuccessful=Fal | se
random.seed(seed) # Use a fixed seed for repeatability.
cluster=Cluster(walletd=True)
walletMgr=WalletMgr(True)
try:
TestHelper.printSystemInfo("BEGIN")
cluster.setWalletMgr(walletMgr)
cluster | .setChainStrategy(chainSyncStrategyStr)
cluster.setWalletMgr(walletMgr)
cluster.killall(allInstances=killAll)
cluster.cleanup()
walletMgr.killall(allInstances=killAll)
walletMgr.cleanup()
Print ("producing nodes: %d, topology: %s, delay between nodes launch(seconds): %d, chain sync strategy: %s" % (
pnodes, topo, delay, chainSyncStrategyStr))
Print("Stand up cluster")
if cluster.launch(pnodes=pnodes, totalNodes=total_nodes, topo=topo, delay=delay) is False:
errorExit("Failed to stand up eos cluster.")
Print ("Wait for Cluster stabilization")
# wait for cluster to start producing blocks
if not cluster.waitOnClusterBlockNumSync(3):
errorExit("Cluster never stabilized")
Print("Stand up EOS wallet keosd")
accountsCount=total_nodes
walletName="MyWallet"
Print("Creating wallet %s if one doesn't already exist." % walletName)
wallet=walletMgr.create(walletName, [cluster.eosioAccount,cluster.defproduceraAccount,cluster.defproducerbAccount])
Print ("Populate wallet with %d accounts." % (accountsCount))
if not cluster.populateWallet(accountsCount, wallet):
errorExit("Wallet initialization failed.")
defproduceraAccount=cluster.defproduceraAccount
eosioAccount=cluster.eosioAccount
Print("Importing keys for account %s into wallet %s." % (defproduceraAccount.name, wallet.name))
if not walletMgr.importKey(defproduceraAccount, wallet):
errorExit("Failed to import key for account %s" % (defproduceraAccount.name))
Print("Create accounts.")
if not cluster.createAccounts(eosioAccount):
errorExit("Accounts creation failed.")
Print("Wait on cluster sync.")
if not cluster.waitOnClusterSync():
errorExit("Cluster sync wait failed.")
Print("Kill %d cluster node instances." % (killCount))
if cluster.killSomeEosInstances(killCount, killSignal) is False:
errorExit("Failed to kill Eos instances")
Print("nodeos instances killed.")
Print("Spread funds and validate")
if not cluster.spreadFundsAndValidate(10):
errorExit("Failed to spread and validate funds.")
Print("Wait on cluster sync.")
if not cluster.waitOnClusterSync():
errorExit("Cluster sync wait failed.")
Print ("Relaunch dead cluster nodes instances.")
if cluster.relaunchEosInstances(cachePopen=True) is False:
errorExit("Failed to relaunch Eos instances")
Print("nodeos instances relaunched.")
Print ("Resyncing cluster nodes.")
if not cluster.waitOnClusterSync():
errorExit("Cluster never synchronized")
Print ("Cluster synched")
Print("Spread funds and validate")
if not cluster.spreadFundsAndValidate(10):
errorExit("Failed to spread and validate funds.")
Print("Wait on cluster sync.")
if not cluster.waitOnClusterSync():
errorExit("Cluster sync wait failed.")
if killEosInstances:
atLeastOne=False
for node in cluster.getNodes():
if node.popenProc is not None:
atLeastOne=True
node.interruptAndVerifyExitStatus()
assert atLeastOne, "Test is setup to verify that a cleanly interrupted nodeos exits with an exit status of 0, but this test may no longer be setup to do that"
testSuccessful=True
finally:
TestHelper.shutdown(cluster, walletMgr, testSuccessful=testSuccessful, killEosInstances=killEosInstances, killWallet=killEosInstances, keepLogs=keepLogs, cleanRun=killAll, dumpErrorDetails=dumpErrorDetails)
exit(0)
|
rustychris/freebird | software/freebird/datafile.py | Python | gpl-2.0 | 12,288 | 0.018066 | """
Parse binary output files from freebird logger
"""
import os
import numpy as np
import re
from numpy.lib import recfunctions
import datetime
from matplotlib.dates import date2num
from contextlib import contextmanager
from collections import namedtuple
import array_append
import derived
import netCDF4
def freebird_file_factory(filename):
""" figures out the version of the given file, and returns
an object ready to parse it
"""
return FreebirdFile0001(filename)
class FreebirdFile(object):
""" [will eventually] define the interface for
freebird files
"""
pass
class FreebirdFile0001(FreebirdFile):
block_Nbytes = 512 # matches SD block size
sample_interval_us=2000
def __init__(self,filename):
self.filename=filename
self.fp=None
self.data_block_start=None
self.Block=namedtuple('Block',['header','data','text'])
self.serials={}
# for long running operations, the method can be run in a thread,
# and will attempt to update progress with a fractional estimate of
# it's progress.
self.progress=0.0
self.data=None
@property
def nblocks(self):
return os.stat(self.filename).st_size // self.block_Nbytes
@property
def nbytes(self):
return os.stat(self.filename).st_size
@contextmanager
def opened(self):
| """ Because the data files are often on removeable media, and
keeping a file open might be a headache when trying to remove
the media, extra care is | taken to only have the file open while
it is actively read.
Any access to self.fp should be through the opened context manager,
which can be nested if necessary
"""
if self.fp is None:
self.fp=open(self.filename,'rb')
yield self.fp
self.fp.close()
self.fp=None
else:
yield self.fp
def read_block(self,block_num):
""" bytes for a block
"""
with self.opened() as fp:
return self.open_read_block(block_num)
def open_read_block(self,block_num):
self.fp.seek(block_num*self.block_Nbytes)
buff=self.fp.read(self.block_Nbytes)
if len(buff)!=self.block_Nbytes:
return None
return self.parse_block(buff)
FLAG_TYPE_MASK=1
FLAG_TYPE_DATA=0
FLAG_TYPE_TEXT=1
FLAG_OVERRUN=2
header_dtype=np.dtype([('unixtime','<u4'),
('ticks','<u2'),
('frame_count','u1'),
('flags','u1')])
frame_dtype=None
frame_bytes=None
def parse_block(self,buff):
header_bytes=self.header_dtype.itemsize
header=np.fromstring(buff[:header_bytes],dtype=self.header_dtype)[0]
flag_type=header['flags']&self.FLAG_TYPE_MASK
if flag_type==self.FLAG_TYPE_TEXT:
data=None
#
rest=buff[header_bytes:]
eos=rest.find("\0")
if eos<0:
print "Warning - text block was not properly terminated"
text=rest
else:
text=rest[:eos]
elif flag_type==self.FLAG_TYPE_DATA:
text=None
#
if self.frame_dtype is not None:
frame_count=header['frame_count']
trimmed=buff[header_bytes:header_bytes+frame_count*self.frame_bytes]
data=np.fromstring(trimmed,dtype=self.frame_dtype)
else:
data="frame dtype unavailable"
else:
raise Exception("Not a valid type for block")
return self.Block(header=header,text=text,data=data)
def read_header(self):
with self.opened():
return self.open_read_header()
def open_read_header(self):
blk_i=0
header_texts=[]
for blk_i in range(self.nblocks):
blk=self.read_block(blk_i)
if blk is None:
break
if (blk.header['flags']&self.FLAG_TYPE_MASK)==self.FLAG_TYPE_TEXT:
header_texts.append(blk.text)
else:
self.data_block_start=blk_i
break
blk_i+=1
return "".join(header_texts)
def read_all(self):
""" populate header text, parse that to header data,
process the raw data, and apply any postprocess steps necessary
"""
with self.opened():
self.header_text=self.open_read_header()
self.header_data=self.parse_header(self.header_text)
return self.open_read_data()
@classmethod
def parse_header(klass,header_text):
lines=re.split(r'[\r\n]+',header_text)
kvs=[]
for line in lines:
if line=="":
continue
keyval=re.split(r':\s*',line,maxsplit=1)
if keyval and len(keyval)==2:
key,val = keyval
kvs.append( (key,val) )
else:
print "Skipping line:",line
return dict(kvs)
def read_data(self):
with self.opened():
return self.open_read_data()
def open_read_data(self):
self.frame_format=eval(self.header_data['frame_format'])
self.frame_dtype=np.dtype(self.frame_format)
self.frame_bytes=self.frame_dtype.itemsize
if 'ticks_per_second' in self.header_data:
ticks_per_second=self.header_data['ticks_per_second']
elif 'rtc_timer_freq_hz' in self.header_data:
ticks_per_second=self.header_data['rtc_timer_freq_hz']
else:
ticks_per_second='n/a'
try:
ticks_per_second=float(ticks_per_second)
except ValueError:
ticks_per_second=None
self.frames=[]
# np.datetime64 is less lossy, but dnums have better support in matplotlib
# and readily convertible to matlab (offset of 366, not sure which way)
self.timestamps=[]
self.unixtimes=[] #DBG
self.microsecs=[] #DBG
for blk_i in range(self.data_block_start,self.nblocks):
# 0.9, because the stitching together at the end takes some time
# but doesn't update progress
self.progress=0.9*float(blk_i)/self.nblocks
blk=self.open_read_block(blk_i)
if blk.data is not None:
self.frames.append(blk.data)
else:
print "Skipping a text frame, I think"
hdr=blk.header
unixtime=hdr['unixtime']
microsecs=int( float(hdr['ticks']) * 1e6 / ticks_per_second )
self.unixtimes.append(unixtime)#DBG
self.microsecs.append(microsecs)#DBG
# constructed as a UTC time, though printing often converts to local
# time.
# this will upcast to int64, with plenty of room. note that the
# argument to datetime64 must be an integer, no floating point.
# this works in numpy 1.8, but not 1.7:
# dt64=np.datetime64(1000000*hdr['unixtime'] + microsecs,'us')
# in 1.7, it's pretty picky about casts, but this appears to work:
dt64=np.datetime64('1970-01-01 00:00:00')
dt64=dt64+np.timedelta64(int(hdr['unixtime']),'s')
dt64=dt64+np.timedelta64(microsecs,'us')
self.timestamps.append( dt64 )
basic_data = np.concatenate(self.frames)
data_and_time=self.add_timestamps(basic_data)
self.data=self.add_derived_data(data_and_time)
return self.data
def add_timestamps(self,basic_data):
expanded=[]
dt_us=1000000/float(self.header_data['sample_rate_hz'])
for timestamp,frame in zip(self.timestamps,self.frames):
# make sure timestamp is in microseconds, so we can add more microseconds
expanded.append(timestamp.astype('<M8[us]') + (np.arange(len(frame))*dt_us).astype(np.int64))
full_datetimes=np.concatenate(expanded)
|
eli261/jumpserver | apps/authentication/backends/openid/urls.py | Python | gpl-2.0 | 278 | 0 | # -*- coding: utf-8 -*-
#
from django.urls import | path
from . import views
urlpatterns = [
path( | 'login/', views.OpenIDLoginView.as_view(), name='openid-login'),
path('login/complete/', views.OpenIDLoginCompleteView.as_view(),
name='openid-login-complete'),
]
|
takumak/tuna | src/sessionfilemanager.py | Python | mit | 192 | 0.015625 | import umsgpack
from configfilemanager im | port ConfigFileManagerBase
class SessionFileManager(ConfigFileManagerBase) | :
@classmethod
def convertVersion(cls, obj, version):
return obj
|
open-risk/portfolio_analytics_library | portfolioAnalytics/utils/portfolio.py | Python | gpl-2.0 | 2,867 | 0.004534 | # encoding: utf-8
# (c) 2019 Open Risk (https://www.openriskmanagement.com)
#
# portfolioAnalytics is licensed under the Apache 2.0 license a copy of which is included
# in the source distribution of correlationMatrix. This is notwithstanding any licenses of
# third-party software included in this distribution. You may not use this file except in
# compliance with the License.
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific language governing permissions and
# limitations under the License.
""" This module provides simple functionality for holding portfolio data for calculation purposes.
* Portfolio_ implements a simple portfolio data container
"""
import numpy as np
class Portfolio(object):
""" The _`Portfolio` object implements a simple portfolio data structure. See `loan tape <https://www.openriskmanual.org/wiki/Loan_Tape>`_ for more general structures.
"""
def __init__(self, psize=0, rating=[], exposure=[], factor=[]):
"""Initialize portfolio.
| :param psize: initialization values
:param rating: list of default proba | bilities
:param exposure: list of exposures (numerical values, e.g. `Exposure At Default <https://www.openriskmanual.org/wiki/Exposure_At_Default>`_
:param factor: list of factor indices (those should match the factors used e.g. in a correlation matrix
:type psize: int
:type rating: list of floats
:type exposure: list of floats
:type factor: list of int
:returns: returns a Portfolio object
:rtype: object
.. note:: The initialization in itself does not validate if the provided values form indeed valid portfolio data
"""
self.psize = psize
self.exposure = exposure
self.rating = rating
self.factor = factor
def loadjson(self, data):
"""Load portfolio data from JSON object.
The data format for the input json object is a list of dictionaries as follows
[{"ID":"1","PD":"0.015","EAD":"40","FACTOR":0},
...
{"ID":"2","PD":"0.286","EAD":"20","FACTOR":0}]
"""
self.psize = len(data)
for x in data:
self.exposure.append(float(x['EAD']))
self.rating.append(float(x['PD']))
self.factor.append(x['FACTOR'])
def preprocess_portfolio(self):
"""
Produce some portfolio statistics like total number of entities and exposure weighted average probability of default
:return:
"""
N = self.psize
Total_Exposure = np.sum(self.exposure)
p = np.inner(self.rating, self.exposure) / Total_Exposure
return N, p
|
NeCTAR-RC/horizon | openstack_dashboard/dashboards/identity/users/tables.py | Python | apache-2.0 | 7,853 | 0 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.template import defaultfilters
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ungettext_lazy
from horizon import forms
from horizon import tables
from openstack_dashboard import api
from openstack_dashboard import policy
ENABLE = 0
DISABLE = 1
KEYSTONE_V2_ENABLED = api.keystone.VERSIONS.active < 3
class CreateUserLink(tables.LinkAction):
name = "create | "
verbose_name = _("Create User")
url = "horizon:identity:users:create"
classes = ("ajax-modal",)
icon = "plus"
policy_rules = (('identity', 'ident | ity:create_grant'),
("identity", "identity:create_user"),
("identity", "identity:list_roles"),
("identity", "identity:list_projects"),)
def allowed(self, request, user):
return api.keystone.keystone_can_edit_user()
class EditUserLink(policy.PolicyTargetMixin, tables.LinkAction):
name = "edit"
verbose_name = _("Edit")
url = "horizon:identity:users:update"
classes = ("ajax-modal",)
icon = "pencil"
policy_rules = (("identity", "identity:update_user"),
("identity", "identity:list_projects"),)
policy_target_attrs = (("user_id", "id"),
("target.user.domain_id", "domain_id"),)
def allowed(self, request, user):
return api.keystone.keystone_can_edit_user()
class ChangePasswordLink(policy.PolicyTargetMixin, tables.LinkAction):
name = "change_password"
verbose_name = _("Change Password")
url = "horizon:identity:users:change_password"
classes = ("ajax-modal",)
icon = "key"
policy_rules = (("identity", "identity:update_user"),)
policy_target_attrs = (("user_id", "id"),
("target.user.domain_id", "domain_id"))
def allowed(self, request, user):
return api.keystone.keystone_can_edit_user()
class ToggleEnabled(policy.PolicyTargetMixin, tables.BatchAction):
name = "toggle"
@staticmethod
def action_present(count):
return (
ungettext_lazy(
u"Enable User",
u"Enable Users",
count
),
ungettext_lazy(
u"Disable User",
u"Disable Users",
count
),
)
@staticmethod
def action_past(count):
return (
ungettext_lazy(
u"Enabled User",
u"Enabled Users",
count
),
ungettext_lazy(
u"Disabled User",
u"Disabled Users",
count
),
)
classes = ("btn-toggle",)
policy_rules = (("identity", "identity:update_user"),)
policy_target_attrs = (("user_id", "id"),
("target.user.domain_id", "domain_id"))
def allowed(self, request, user=None):
if (not api.keystone.keystone_can_edit_user() or
user.id == request.user.id):
return False
self.enabled = True
if not user:
return self.enabled
self.enabled = user.enabled
if self.enabled:
self.current_present_action = DISABLE
else:
self.current_present_action = ENABLE
return True
def action(self, request, obj_id):
if self.enabled:
api.keystone.user_update_enabled(request, obj_id, False)
self.current_past_action = DISABLE
else:
api.keystone.user_update_enabled(request, obj_id, True)
self.current_past_action = ENABLE
class DeleteUsersAction(policy.PolicyTargetMixin, tables.DeleteAction):
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Delete User",
u"Delete Users",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Deleted User",
u"Deleted Users",
count
)
policy_rules = (("identity", "identity:delete_user"),)
def allowed(self, request, datum):
if not api.keystone.keystone_can_edit_user() or \
(datum and datum.id == request.user.id):
return False
return True
def delete(self, request, obj_id):
api.keystone.user_delete(request, obj_id)
class UserFilterAction(tables.FilterAction):
if api.keystone.VERSIONS.active < 3:
filter_type = "query"
else:
filter_type = "server"
filter_choices = (("name", _("User Name ="), True),
("id", _("User ID ="), True),
("enabled", _("Enabled ="), True, _('e.g. Yes/No')))
class UpdateRow(tables.Row):
ajax = True
def get_data(self, request, user_id):
user_info = api.keystone.user_get(request, user_id, admin=True)
return user_info
class UsersTable(tables.DataTable):
STATUS_CHOICES = (
("true", True),
("false", False)
)
name = tables.WrappingColumn('name',
link="horizon:identity:users:detail",
verbose_name=_('User Name'),
form_field=forms.CharField(required=False))
description = tables.Column(lambda obj: getattr(obj, 'description', None),
verbose_name=_('Description'),
hidden=KEYSTONE_V2_ENABLED,
form_field=forms.CharField(
widget=forms.Textarea(attrs={'rows': 4}),
required=False))
email = tables.Column(lambda obj: getattr(obj, 'email', None),
verbose_name=_('Email'),
form_field=forms.EmailField(required=False),
filters=(lambda v: defaultfilters
.default_if_none(v, ""),
defaultfilters.escape,
defaultfilters.urlize)
)
# Default tenant is not returned from Keystone currently.
# default_tenant = tables.Column('default_tenant',
# verbose_name=_('Default Project'))
id = tables.Column('id', verbose_name=_('User ID'),
attrs={'data-type': 'uuid'})
enabled = tables.Column('enabled', verbose_name=_('Enabled'),
status=True,
status_choices=STATUS_CHOICES,
filters=(defaultfilters.yesno,
defaultfilters.capfirst),
empty_value="False")
if api.keystone.VERSIONS.active >= 3:
domain_name = tables.Column('domain_name',
verbose_name=_('Domain Name'),
attrs={'data-type': 'uuid'})
class Meta(object):
name = "users"
verbose_name = _("Users")
row_actions = (EditUserLink, ChangePasswordLink, ToggleEnabled,
DeleteUsersAction)
table_actions = (UserFilterAction, CreateUserLink, DeleteUsersAction)
row_class = UpdateRow
|
benjello/openfisca-france-indirect-taxation | openfisca_france_indirect_taxation/examples/transports/plot_legislation/plot_ticpe_accises.py | Python | agpl-3.0 | 865 | 0.003497 | # -*- coding: utf-8 -*-
"""
Created on Mon Aug 17 15:48:31 2015
@author: thomas.douenne
"""
# L'objectif est de décrire l'évolution des montants des accises de la TICPE depuis 1993
# Import de fonctions | spécifiques à Openfisca Indirect Taxation
from openfisca_france_indirect_taxation.examples.utils_example import graph_builder_bar_list
from openfisca_france_indirect_taxation.examples.dataframes_from_legislation.get_accises import \
get_accise_ticpe_majoree
# Recherche des paramètres de la législation
liste = ['ticpe_gazole', 'ticpe_super9598', 'super_plombe_ticpe']
df_accises = get_accise_ticpe_majoree()
# Réalisation des graphiques
graph_builder | _bar_list(df_accises['accise majoree sans plomb'], 1, 1)
graph_builder_bar_list(df_accises['accise majoree diesel'], 1, 1)
graph_builder_bar_list(df_accises['accise majoree super plombe'], 1, 1)
|
thauser/pnc-cli | pnc_cli/swagger_client/models/license.py | Python | apache-2.0 | 5,572 | 0.000718 | # coding: utf-8
"""
Copyright 2015 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from datetime import datetime
from pprint import pformat
from six import iteritems
class License(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
License - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'id': 'int',
'full_name': 'str',
'full_content': 'str',
'ref_url': 'str',
'short_name': 'str',
'field_handler': 'FieldHandler'
}
self.attribute_map = {
'id': 'id',
'full_name': 'fullName',
'full_content': 'fullContent',
'ref_url': 'refUrl',
'short_name': 'shortName',
'field_handler': 'fieldHandler'
}
self._id = None
self._full_name = None
self._full_content = None
self._ref_url = None
self._short_name = None
self._field_handler = None
@property
def id(self):
"""
Gets the id of this License.
:return: The id of this License.
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this License.
:param id: The id of this License.
:type: int
"""
self._id = id
@property
def full_name(self):
"""
Gets the full_name of this License.
:return: The full_name of this License.
:rtype: str
"""
return self._full_name
@full_name.setter
def full_name(self, full_name):
"""
Sets the full_name of this License.
:param full_name: The full_name of this License.
:type: str
"""
self._full_name = full_name
@property
def full_content(self):
"""
Gets the full_content of this License.
:return: The full_content of this License.
:rtype: str
"""
return self._full_content
@full_content.setter
def full_content(self, full_content):
"""
Sets the full_content of this License.
:param full_content: The full_content of this License.
:type: str
"""
self._full_content = full_content
@property
def ref_url(self):
"""
Gets the ref_url of this License.
:return: The ref_url of this License.
:rtype: str
"""
return self._ref_url
@ref_url.setter
def ref_url(self, ref_url):
"""
Sets the ref_url of this License.
:param ref_url: The ref_url of this License.
:type: str
"""
self._ref_url = ref_url
@property
def short_name(self):
"""
Gets the short_name of this License.
:return: The short_name of this License.
:rtype: str
"""
return self._short_name
@short_name.setter
def short_name(self, short_name):
"""
Sets the short_name of this License.
:param short_name: The short_name of this License.
:type: str
"""
self._short_name = short_name
@property
def field_handler(self):
"""
Gets the field_handler of this License.
:return: The field_handler of this License.
:rtype: FieldHandler
"""
return self._field_handler
@field_handler.setter
def field_handler(self, field_handler):
"""
Sets the field_handler of this License.
:param field_handler: The field_handler of this License.
:type: FieldHandler
"""
self._field_handler = field_handler
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, datetime):
result[attr] = str(value.date())
else:
result[at | tr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dic | t())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
|
allotria/intellij-community | python/testData/inspections/PyUnresolvedReferencesInspection/PrefixExpressionOnClassHavingSkeletons/numpy/core/multiarray.py | Python | apache-2.0 | 8,049 | 0.001242 | def array(p_object, dtype=None, copy=True, order=None, subok=False, ndmin=0): # real signature unknown; restored from __doc__
"""
array(object, dtype=None, copy=True, order=None, subok=False, ndmin=0)
Create an array.
Parameters
----------
object : array_like
An array, any object exposing the array interface, an
object whose __array__ method returns an array, or any
(nested) sequence.
dtype : data-type, optional
The desired data-type for the array. If not given, then
the type will be determined as the minimum type required
to hold the objects in the sequence. This argument can only
be used to 'upcast' the array. For downcasting, use the
.astype(t) method.
copy : bool, optional
If true (default), then the object is copied. Otherwise, a copy
will only be made if __array__ returns a copy, if obj is a
nested sequence, or if a copy is needed to satisfy any of the other
requirements (`dtype`, `order`, etc.).
order : {'C', 'F', 'A'}, optional
Specify the order of the array. If order is 'C' (default), then the
array will be in C-contiguous order (last-index varies the
fastest). If order is 'F', then the returned array
will be in Fortran-contiguous order (first-index varies the
fastest). If order is 'A', then the returned array may
be in any order (either C-, Fortran-contiguous, or even
discontiguous).
subok : bool, optional
If True, then sub-classes will be passed-through, otherwise
the returned array will be forced to be a base-class array (default).
ndmin : int, optional
Specifies the minimum number of dimensions that the resulting
array should have. Ones will be pre-pended to the shape as
needed to meet this requirement.
Returns
-------
out : ndarray
An array object satisfying the specified requirements.
See Also
--------
empty, empty_like, zeros, zeros_like, ones, ones_like, fill
Examples
--------
>>> np.array([1, 2, 3])
array([1, 2, 3])
Upcasting:
>>> np.array([1, 2, 3.0])
array([ 1., 2., 3.])
More than one dimension:
>>> np.array([[1, 2], [3, 4]])
array([[1, 2],
[3, 4]])
Minimum dimensions 2:
>>> np.array([1, 2, 3], ndmin=2)
array([[1, 2, 3]])
Type provided:
>>> np.array([1, 2, 3], dtype=complex)
array([ 1.+0.j, 2.+0.j, 3.+0.j])
Data-type consisting of more than one element:
>>> x = np.array([(1,2),(3,4)],dtype=[('a','<i4'),('b','<i4')])
>>> x['a']
array([1, 3])
Creating an array from sub-classes:
>>> np.array(np.mat('1 2; 3 4'))
array([[1, 2],
[3, 4]])
>>> np.array(np.mat('1 2; 3 4'), subok=True)
matrix([[1, 2],
[3, 4]])
"""
pass
class ndarray(object):
"""
ndarray(shape, dtype=float, buffer=None, offset=0,
strides=None, order=None)
An array object represents a multidimensional, homogeneous array
of fixed-size items. An associated data-type object describes the
format of each element in the array (its byte-order, how many bytes it
occupies in memory, whether it is an integer, a floating point number,
or something else, etc.)
Arrays should be constructed using `array`, `zeros` or `empty` (refer
to the See Also section below). The parameters given here refer to
a low-level method (`ndarray(...)`) for instantiating an array.
For more information, refer to the `numpy` module and examine the
the methods and attributes of an array.
Parameters
----------
(for the __new__ method; see Notes below)
shape : tuple of ints
Shape of created array.
dtype : data-type, optional
Any object that can be interpreted as a numpy data type.
buffer : object exposing buffer interface, optional
Used to fill the array with data.
offset : int, optional
Offset of array data in buffer.
strides : tuple of ints, optional
Strides of data in memory.
order : {'C', 'F'}, optional
Row-major or column-major order.
Attributes
----------
T : ndarray
Transpose of the array.
data : buffer
The array's elements, in memory.
dtype : dtype object
Describes the format of the elements in the array.
flags : dict
Dictionary containing information related to memory use, e.g.,
'C_CONTIGUOUS', 'OWNDATA', 'WRITEABLE', etc.
flat : numpy.flatiter object
Flattened version of the array as an iterator. The iterator
allows assignments, e.g., ``x.flat = 3`` (See `ndarray.flat` for
assignment examples; TODO).
imag : ndarray
Imaginary part of the array.
real : ndarray
Real part of the array.
size : int
Number of elements in the array.
itemsize : int
The memory use of each array element in bytes.
nbytes : int
The total number of bytes required to store the array data,
i.e., ``itemsize * size``. |
ndim : int
The array's number of dimensions.
shape : tuple of ints
Shape of the array.
strides : tuple of ints
The step-size required to move from one element to the next in
memory. For example, a contiguous ``(3, 4)`` array of type
``int16`` in C-order has strides ``(8, 2)``. This implies that
to move from element to element in memory requires jumps of 2 bytes.
To move from | row-to-row, one needs to jump 8 bytes at a time
(``2 * 4``).
ctypes : ctypes object
Class containing properties of the array needed for interaction
with ctypes.
base : ndarray
If the array is a view into another array, that array is its `base`
(unless that array is also a view). The `base` array is where the
array data is actually stored.
See Also
--------
array : Construct an array.
zeros : Create an array, each element of which is zero.
empty : Create an array, but leave its allocated memory unchanged (i.e.,
it contains "garbage").
dtype : Create a data-type.
Notes
-----
There are two modes of creating an array using ``__new__``:
1. If `buffer` is None, then only `shape`, `dtype`, and `order`
are used.
2. If `buffer` is an object exposing the buffer interface, then
all keywords are interpreted.
No ``__init__`` method is needed because the array is fully initialized
after the ``__new__`` method.
Examples
--------
These examples illustrate the low-level `ndarray` constructor. Refer
to the `See Also` section above for easier ways of constructing an
ndarray.
First mode, `buffer` is None:
>>> np.ndarray(shape=(2,2), dtype=float, order='F')
array([[ -1.13698227e+002, 4.25087011e-303],
[ 2.88528414e-306, 3.27025015e-309]]) #random
Second mode:
>>> np.ndarray((2,), buffer=np.array([1,2,3]),
... offset=np.int_().itemsize,
... dtype=int) # offset = 1*itemsize, i.e. skip first element
array([2, 3])
"""
def max():
pass
def __neg__(self, *args, **kwargs):
""" -self """
pass |
csningli/MultiAgent | examples/simple_move/simple_move_sim.py | Python | apache-2.0 | 2,835 | 0.017637 |
# MultiAgent
# (c) 2017-2019, NiL, csningli@gmail.com
import sys, random, datetime, math
random.seed(datetime.datetime.now())
sys.path.append("../..")
from mas.multiagent import *
AREA_SIZE = 200
POS_ERROR = 5
MIN_SPEED = 100
MAX_SPEED = 500
class TargetModule(Module) :
def __init__(self) :
super(TargetModule, self).__init__()
self.index = 0
self.targets = [
(100, 0), (100, 100),
(0, 100), (-100, 100),
(-100, 0), (-100, -100),
(0, -100), (100, -100),
]
def process(self) :
pos = self.mem.read("pos", None)
target = self.mem.read("target", None)
if pos is not None and (target is None or ppdist_l2(target, pos) <= POS_ERROR) :
self.mem.reg("target", self.targets[self.index])
self.index = (self.index + 1) % len(self.targets)
class SimpleMoveModule(ObjectModule) :
def act(self, resp) :
target = self.mem.read("target", None)
pos = self.me | m.read("pos", None)
if target is not None and pos is not None:
diff = vec2_sub(target, pos)
resp.add_msg(Message(key = "angle", value = vec2_angle(diff)))
resp.add_msg(Message | (key = "vel", value = vec2_min_max(vec2_scale(diff, 3), MIN_SPEED, MAX_SPEED)))
super(SimpleMoveModule, self).act(resp)
class SimpleMoveAgent(Agent) :
def __init__(self, name) :
super(SimpleMoveAgent, self).__init__(name)
self.mods = [SimpleMoveModule(), TargetModule()]
def focus(self) :
focus_info = super(SimpleMoveAgent, self).get_focus()
target = self.mem.read("target", None)
if target is not None :
focus_info["target"] = "(%4.2f, %4.2f)" % (target[0], target[1])
pos = self.mem.read("pos", None)
if pos is not None :
focus_info["pos"] = "(%4.2f, %4.2f)" % (pos[0], pos[1])
return focus_info
def run_sim(filename = None) :
'''
>>> run_sim()
'''
# create the oracle space
oracle = OracleSpace()
# create the context
context = Context(oracle = oracle)
# create the schedule for adding agents in the running
schedule = Schedule()
# add objects and agents to the context
obj = Object(name = "0")
obj.pos = (0, 0)
context.add_obj(obj)
schedule.add_agent(SimpleMoveAgent(name = "0"))
# create the driver
driver = Driver(context = context, schedule = schedule)
# create the inspector
# inspector = Inspector(delay = 10)
# create the simulator
sim = Simulator(driver = driver)
print("Simulating")
sim.simulate(graphics = True, filename = filename)
if __name__ == '__main__' :
filename = None
if (len(sys.argv) > 1) :
filename = sys.argv[1]
run_sim(filename = filename)
|
slackhq/python-slackclient | integration_tests/samples/openid_connect/sanic_example.py | Python | mit | 4,583 | 0.002618 | import json
import jwt
import logging
import os
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.DEBUG)
client_id = os.environ["SLACK_CLIENT_ID"]
client_secret = os.environ["SLACK_CLIENT_SECRET"]
redirect_uri = os.environ["SLACK_REDIRECT_URI"]
scopes = ["openid", "email", "profile"]
from slack_sdk.web.async_client import AsyncWebClient
from slack_sdk.oauth import OpenIDConnectAuthorizeUrlGenerator, RedirectUriPageRenderer
from slack_sdk.oauth.state_store import FileOAuthStateStore
state_store = FileOAuthStateStore(expiration_seconds=300)
authorization_url_generator = OpenIDConnectAuthorizeUrlGenerator(
client_id=client_id,
scopes=scopes,
redirect_uri=redirect_uri,
)
redirect_page_renderer = RedirectUriPageRenderer(
install_path="/slack/install",
redirect_uri_path="/slack/oauth_redirect",
)
# https://sanicframework.org/
from sanic import Sanic
from sanic.request import Request
from sanic.response import HTTPResponse
app = Sanic("my-awesome-slack-app")
@app.get("/slack/install")
async def oauth_start(req: Request):
state = state_store.issue()
url = authorization_url_generator.generate(state)
response_body = (
'<html><head><link rel="icon" href="data:,"></head><body>'
f'<a href="{url}">'
f'<img alt=""Add to Slack"" height="40" width="139" src="https://platform.slack-edge.com/img/add_to_slack.png" srcset="https://platform.slack-edge.com/img/add_to_slack.png 1x, https://platform.slack-edge.com/img/add_to_slack@2x.png 2x" /></a>'
"</body></html>"
)
return HTTPResponse(
status=200,
body=response_body,
)
@app.get("/slack/oauth_redirect")
async def oauth_callback(req: Request):
# Retrieve the auth code and state from the request params
if "code" in req.args:
state = r | eq.args.get("state")
if state_store.consume(state):
| code = req.args.get("code")
try:
token_response = await AsyncWebClient().openid_connect_token(
client_id=client_id, client_secret=client_secret, code=code
)
logger.info(f"openid.connect.token response: {token_response}")
id_token = token_response.get("id_token")
claims = jwt.decode(
id_token, options={"verify_signature": False}, algorithms=["RS256"]
)
logger.info(f"claims (decoded id_token): {claims}")
user_token = token_response.get("access_token")
user_info_response = await AsyncWebClient(
token=user_token
).openid_connect_userInfo()
logger.info(f"openid.connect.userInfo response: {user_info_response}")
html = f"""
<html>
<head>
<style>
body h2 {{
padding: 10px 15px;
font-family: verdana;
text-align: center;
}}
</style>
</head>
<body>
<h2>OpenID Connect Claims</h2>
<pre>{json.dumps(claims, indent=2)}</pre>
<h2>openid.connect.userInfo response</h2>
<pre>{json.dumps(user_info_response.data, indent=2)}</pre>
</body>
</html>
"""
return HTTPResponse(
status=200,
headers={
"Content-Type": "text/html; charset=utf-8",
},
body=html,
)
except Exception:
logger.exception("Failed to perform openid.connect.token API call")
return redirect_page_renderer.render_failure_page(
"Failed to perform openid.connect.token API call"
)
else:
html = redirect_page_renderer.render_failure_page(
"The state value is already expired"
)
return HTTPResponse(
status=400,
headers={
"Content-Type": "text/html; charset=utf-8",
},
body=html,
)
error = req.args.get("error") if "error" in req.args else ""
return HTTPResponse(
status=400, body=f"Something is wrong with the installation (error: {error})"
)
if __name__ == "__main__":
app.run(host="0.0.0.0", port=3000)
# python3 integration_tests/samples/openid_connect/sanic_example.py
# ngrok http 3000
# https://{yours}.ngrok.io/slack/install
|
Distrotech/bzr | bzrlib/tests/commands/test_update.py | Python | gpl-2.0 | 1,571 | 0.000637 | # Copyright (C) 2007, 2009, 2010 Canonical Ltd
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
from bzrlib import (
branch,
builtins,
tests,
)
from bzrlib.tests import transport_util
class TestUpdate(transport_util.TestCaseWithConnectionHookedTransport):
def test_update(self):
remote_wt = self.make_branch_and_tree('remote')
local_wt = self.make_branch_ | and_tree('local')
remote_branch = branch.Branch.open(self.get_url('remote'))
local_wt.branch.bind(remote_branch)
remote_wt.commit('empty commit')
self.start_logging_connections()
update = builtins.cmd_update()
# update ne | eds the encoding from outf to print URLs
update.outf = tests.StringIOWrapper()
# update calls it 'dir' where other commands calls it 'directory'
update.run(dir='local')
self.assertEquals(1, len(self.connections))
|
simplegeo/sqlalchemy | lib/sqlalchemy/orm/interfaces.py | Python | mit | 37,723 | 0.003181 | # interfaces.py
# Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010 Michael Bayer
# mike_mp@zzzcomputing.com
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
Semi-private module containing various base classes used throughout the ORM.
Defines the extension classes :class:`MapperExtension`,
:class:`SessionExtension`, and :class:`AttributeExtension` as
well as other user-subclassable extension objects.
"""
from itertools import chain
import sqlalchemy.exceptions as sa_exc
from sqlalchemy import log, util
from sqlalchemy.sql import expression
class_mapper = None
collections = None
__all__ = (
'AttributeExtension',
'EXT_CONTINUE',
'EXT_STOP',
'ExtensionOption',
'InstrumentationManager',
'LoaderStrategy',
'MapperExtension',
'MapperOption',
'MapperProperty',
'PropComparator',
'PropertyOption',
'SessionExtension',
'StrategizedOption',
'StrategizedProperty',
'build_path',
)
EXT_CONTINUE = util.symbol('EXT_CONTINUE')
EXT_STOP = util.symbol('EXT_STOP')
ONETOMANY = util.symbol('ONETOMANY')
MANYTOONE = util.symbol('MANYTOONE')
MANYTOMANY = util.symbol('MANYTOMANY')
class MapperExtension(object):
"""Base implementation for customizing ``Mapper`` behavior.
New extension classes subclass ``MapperExtension`` and are specified
using the ``extension`` mapper() argument, which is a single
``MapperExtension`` or a list of such. A single mapper
can maintain a chain of ``MapperExtension`` objects. When a
particular mapping event occurs, the corresponding method
on each ``MapperExtension`` is invoked serially, and each method
has the ability to halt the chain from proceeding further.
Each ``MapperExtension`` method returns the symbol
EXT_CONTINUE by default. This symbol generally means "move
to the next ``MapperExtension`` for processing". For methods
that return objects like translated rows or new object
instances, EXT_CONTINUE means the result of the method
should be ignored. In some cases it's required for a
default mapper activity to be performed, such as adding a
new instance to a result list.
The symbol EXT_STOP has significance within a chain
of ``MapperExtension`` objects that the chain will be stopped
when this symbol is returned. Like EXT_CONTINUE, it also
has additional significance in some cases that a default
mapper activity will not be performed.
"""
def instrument_class(self, mapper, class_):
"""Receive a class when the mapper is first constructed, and has
applied instrumentation to the mapped class.
The return value is only significant within the ``MapperExtension``
chain; the parent mapper's behavior isn't modified by this method.
"""
return EXT_CONTINUE
def init_instance(self, mapper, class_, oldinit, instance, args, kwargs):
"""Receive an instance when it's constructor is called.
This method is only called during a userland construction of
an object. It is not called when an object is loaded from the
database.
The return value is only significant within the ``MapperExtension``
chain; the parent mapper's behavior isn't modified by this method.
"""
return EXT_CONTINUE
def init_failed(self, mapper, class_, oldinit, instance, args, kwargs):
"""Receive an instance when it's constructor has been called,
and raised an exception.
This method is only called during a userland construction of
an object. It is not called when an object is loaded from the
database.
The return value is only significant within the ``MapperExtension``
chain; the parent mapper's behavior isn't modified by this method.
"""
return EXT_CONTINUE
def translate_row(self, mapper, context, row):
"""Perform pre-processing on the given result row and return a
new row instance.
This is called when the mapper first receives a row, before
the object identity or the instance itself has been derived
from that row. The given row may or may not be a
``RowProxy`` object - it will always be a dictionary-like
object which contains mapped columns as keys. The
returned object should also be a dictionary-like object
which recognizes mapped columns as keys.
If the ultimate return value is EXT_CONTINUE, the row
is not translated.
"""
return EXT_CONTINUE
def create_instance(self, mapper, selectcontext, row, class_):
"""Receive a row when a new object instance is about to be
created from that row.
The method can choose to create the instance itself, or it can return
EXT_CONTINUE to indicate normal object creation should take place.
mapper
The mapper doing the operation
selectcontext
The QueryContext generated from the Query.
row
The result row from the database
class\_
The class we are mapping.
return value
A new object instance, or EXT_CONTINUE
"""
return EXT_CONTINUE
def append_result(self, mapper, selectconte | xt, row, instance,
result, **flags):
"""Receive an object instance before that instance is appended
to a result list.
If this method returns EXT_CONTINUE, result appending will proceed
normally. if this method returns any other value or None,
result appending will not proceed for | this instance, giving
this extension an opportunity to do the appending itself, if
desired.
mapper
The mapper doing the operation.
selectcontext
The QueryContext generated from the Query.
row
The result row from the database.
instance
The object instance to be appended to the result.
result
List to which results are being appended.
\**flags
extra information about the row, same as criterion in
``create_row_processor()`` method of
:class:`~sqlalchemy.orm.interfaces.MapperProperty`
"""
return EXT_CONTINUE
def populate_instance(self, mapper, selectcontext, row,
instance, **flags):
"""Receive an instance before that instance has
its attributes populated.
This usually corresponds to a newly loaded instance but may
also correspond to an already-loaded instance which has
unloaded attributes to be populated. The method may be called
many times for a single instance, as multiple result rows are
used to populate eagerly loaded collections.
If this method returns EXT_CONTINUE, instance population will
proceed normally. If any other value or None is returned,
instance population will not proceed, giving this extension an
opportunity to populate the instance itself, if desired.
As of 0.5, most usages of this hook are obsolete. For a
generic "object has been newly created from a row" hook, use
``reconstruct_instance()``, or the ``@orm.reconstructor``
decorator.
"""
return EXT_CONTINUE
def reconstruct_instance(self, mapper, instance):
"""Receive an object instance after it has been created via
``__new__``, and after initial attribute population has
occurred.
This typically occurs when the instance is created based on
incoming result rows, and is only called once for that
instance's lifetime.
Note that during a result-row load, this method is called upon
the first row received for this instance. Note that some
attributes and collections may or may not be loaded or even
initialized, depending on what's present in the result r |
nkgilley/home-assistant | tests/components/airly/__init__.py | Python | apache-2.0 | 23 | 0 | "" | "Tests for Airly. | """
|
pniedzielski/fb-hackathon-2013-11-21 | src/pythonCode.py | Python | agpl-3.0 | 155 | 0.03871 | import sys
for m | in range(0, 2):
n = raw_input()
for i in range(0, int(n)):
print "\x11" + str(m) + ": " + raw_input() + "\x11"
#sys.stdout.f | lush() |
resurtm/recarguide | backend/cars/search/elastic.py | Python | mit | 2,295 | 0 | import datetime
from django.conf import settings
from elasticsearch import Elasticsearch
instance = Elasticsearch([settings.ELASTICSEARCH_DSN])
index_prefix = 'recarguide_'
def indices():
items = ['car']
return ['{}{}'.format(index_prefix, index) for index in items]
def mappings():
items = {
'car': {
'properties': {
'make': {'type': 'string', 'index': 'not_analyzed'},
'model': {'type': 'string', 'index': 'not_analyzed'},
'category': {'type': 'string', 'index': 'not_analyzed'},
'subcategory': {'type': 'string', 'index': 'not_analyzed'},
},
},
}
return {'{}{}'.format(index_prefix, k): v for k, v in items.items()}
def ensure_indices():
maps = mappings()
for index in indices():
if instance.indices.exists(index=index):
continue
body = {'mappings': {'{}_type'.format(index): maps[index]}}
instance.indices.create(index=index, body=body)
def delete_indices():
for index in indices():
if instance.indices.exists(index=index):
instance.indices.delete(index=index)
def encode_car(car):
if car.category.parent_id is None:
category = car.category.name
subcategory = None
else:
category = car.category.parent.name
subcategory = car.category.name
return {'name': car.name,
'slug': car.slug,
'make': car.make.name,
'model': car.model.name,
'trim' | : car.trim_name,
'category': category,
'subcategory': subcategory,
'price': car.price,
'year': car.year,
'car': car.mileage,
'description': car.description}
def reind | ex_car(car):
body = encode_car(car)
body['timestamp'] = datetime.datetime.now()
instance.index(index='{}car'.format(index_prefix),
doc_type='{}car_type'.format(index_prefix),
id=car.id,
body=body)
def search_cars(query_body, query='search'):
return getattr(instance, query)(index='{}car'.format(index_prefix),
doc_type='{}car_type'.format(index_prefix),
body=query_body)
|
joansalasoler/auale | src/auale/gui/widgets/board_animator.py | Python | gpl-3.0 | 7,553 | 0 | # -*- coding: utf-8 -*-
# Aualé oware graphic user interface.
# Copyright (C) 2014-2020 Joan Sala Soler <contact@joansala.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from gi.repository import GObject
from ..animation import Capture
from ..animation import Energize
from ..animation import Pick
from ..animation import Ripe
from ..animation import Sow
class BoardAnimator(GObject.GObject):
"""Animates moves on the board canvas"""
__gtype_name__ = 'BoardAnimator'
__STEP_DELAY = 250
def __init__(self, canvas):
GObject.GObject.__init__(self)
self._canvas = canvas
self._is_playing = False
self._transitions = set()
@GObject.Signal
def capture_animation(self, transition: object):
"""Emitted when a capture animation starts"""
@GObject.Signal
def harvest_animation(self, transition: object):
"""Emitted when a harvest animation starts"""
@GObject.Signal
def pick_animation(self, transition: object):
"""Emitted when a pick animation starts"""
@GObject.Signal
def ripe_animation(self, transition: object):
"""Emitted when a ripening animation starts"""
@GObject.Signal
def sow_animation(self, transition: object):
"""Emitted when a sow animation starts"""
def is_playing(self):
"""Check if the animation is playing"""
return self._is_playing
def animate_move(self, match):
"""Animate the move that lead to the current match position"""
self._is_playing = True
self._push_move_transitions(match)
self._start_transitions()
def stop_animation(self):
"""Stop the current animation if it is playing"""
self._clear_transitions()
self._is_playing = False
def _start_transitions(self):
"""Starts the attached transitions"""
for transition in self._transitions:
transition.attach()
def _clear_transitions(self):
"""Stops and detaches any pending transitions"""
while self._transitions:
transition = self._transitions.pop()
transition.detach()
transition.stop()
def _remove_transition(self, transition):
"""Discard a completed transition"""
transition.detach()
self._transitions.discard(transition)
return self._transitions
def _push_transition(self, transition, match):
"""Attach a house transition for the given match"""
self._transitions.add(transition)
callback = self._on_transition_completed
transition.connect('completed', callback, match)
return self._transitions
def _push_move_transitions(self, match):
"""Add animation for the last move on a match"""
steps = self._push_pick_transitions(0, match)
steps = self._push_sow_transitions(steps, match)
steps = self._push_ripe_transitions(steps, match)
steps = self._push_capture_transitions(steps, match)
steps = self._push_harvest_transitions(steps, match)
def _push_pick_transitions(self, steps, match):
"""Add animations to pick the house seeds"""
for house in self._canvas.get_children('houses'):
is_move = match.get_move() == house.get_move()
transition = Pick(house) if is_move else Energize(house)
transition.connect('started', self.pick_animation.emit)
self._push_transition(transition, match)
return steps + 1
def _push_sow_transitions(self, steps, match):
"""Add animations to sow the picked seeds"""
move = match.get_move()
sowings = match.get_sowings()
board = list(match.get_board(-2))
for step, move in enumerate(sowings, steps):
board[move] += 1
seeds = board[move]
content = self._canvas.get_seed_canvas(seeds)
house = self._canvas.get_object(f'house-{ move }')
state = self._canvas.get_ripening_stage(move, seeds, match)
transition = Sow(house)
transition.set_state(state)
transition.set_content(content)
transition.set_delay(step * self.__STEP_DELAY)
transition.connect('started', self.sow_animation.emit)
self._push_transition(transition, match)
return steps + len(sowings)
def _push_capture_transitions(self, steps, match):
"""Add animations to capture the house seeds"""
move = match.get_move()
sowings = match.get_sowings()
current = match.get_board()
previous = match.get_board(-2)
previous = [s + sowings.count(i) for i, s in enumerate(previous)]
moves = [i for i in range(11, -1, -1) if current[i] == 0]
houses = [i for i in moves if i != move and previous[i] != 0]
for step, move in enumerate(houses, steps):
house = self._canvas.get_object(f'house-{ move | }')
transition = Capture(house)
transition.set_delay(step * self.__STEP_DELAY)
transition.connect('started', self.capture | _animation.emit)
self._push_transition(transition, match)
return steps + len(houses)
def _push_harvest_transitions(self, steps, match):
"""Add animations to harvest seeds onto the player's homes"""
current = match.get_board()
previous = match.get_board(-2)
houses = [i for i in (12, 13) if current[i] != previous[i]]
delay = (steps + .5) * self.__STEP_DELAY
for move in houses:
seeds = match.get_seeds(move)
content = self._canvas.get_seed_canvas(seeds)
house = self._canvas.get_object(f'house-{ move }')
transition = Sow(house)
transition.set_delay(delay)
transition.set_content(content)
transition.connect('started', self.harvest_animation.emit)
self._push_transition(transition, match)
return steps + .5
def _push_ripe_transitions(self, steps, match):
"""Add animations to show the ripening state"""
move = match.get_move()
delay = steps * self.__STEP_DELAY
for house in self._canvas.get_children('houses'):
move = house.get_move()
seeds = match.get_seeds(move)
state = self._canvas.get_ripening_stage(move, seeds, match)
transition = Ripe(house)
transition.set_state(state)
transition.set_delay(delay)
transition.connect('started', self.ripe_animation.emit)
self._push_transition(transition, match)
return steps
def _on_transition_completed(self, transition, match):
"""Fired when a transition for a match is completed"""
if not self._remove_transition(transition):
self._is_playing = False
self._canvas.show_match(match)
self._canvas.animation_completed.emit()
|
ajrichards/GenesDI | genesdi/qtlib/LeftDock.py | Python | gpl-3.0 | 7,251 | 0.008413 | #!/usr/bin/python
'''
Cytostream
LeftDock
Adam Richards
adamricha@gmail.com
'''
import sys,os
from PyQt4 import QtGui,QtCore
if hasattr(sys,'frozen'):
baseDir = os.path.dirname(sys.executable)
baseDir = re.sub("MacOS","Resources",baseDir)
else:
baseDir = os.path.dirname(__file__)
sys.path.append(os.path.join(baseDir,'..'))
#from FileControls import *
from gdiqt4 import get_gene_list_names
from gdiqt4.qtlib import GeneListSelector
from gdiqt4.qtlib.InitialDock1 import InitialDock1
from gdiqt4.qtlib import PipelineDock
def add_pipeline_dock(mainWindow):
btnCallBacks = [lambda a=mainWindow:mainWindow.move_to_data_processing(a),
lambda a=mainWindow:mainWindow.move_to_subset_finder(a),
lambda a=mainWindow:mainWindow.move_to_results_navigation(a)]
mainWindow.pDock = PipelineDock(parent=mainWindow.mainDockWidget,eSize=mainWindow.eSize,btnCallBacks=btnCallBacks)
def remove_left_dock(mainWindow):
mainWindow.removeDockWidget(mainWindow.mainDockWidget)
def add_left_dock(mainWindow):
if mainWindow.dockWidget != None:
remove_left_dock(mainWindow)
if mainWindow.controller.homeDir == None:
noProject = True
mainWindow.mainDockWidget = QtGui.QDockWidget('no project loaded', mainWindow)
else:
noProject = False
allGeneLists = get_gene_list_names(mainWindow.controller.homeDir)
mainWindow.mainDockWidget = QtGui.QDockWidget(mainWindow.controller.projectID, mainWindow)
mainWindow.mainDockWidget.setObjectName("MainDockWidget")
mainWindow.mainDockWidget.setAllowedAreas(QtCore.Qt.LeftDockWidgetArea|QtCore.Qt.RightDockWidgetArea)
mainWindow.dockWidget = QtGui.QWidget(mainWindow)
palette = mainWindow.dockWidget.palette()
role = mainWindow.dockWidget.backgroundRole()
palette.setColor(role, QtGui.QColor('black'))
mainWindow.dockWidget.setPalette(palette)
mainWindow.dockWidget.setAutoFillBackground(True)
# setup alignments
masterBox = QtGui.QVBoxLayout(mainWindow.dockWidget)
vbox1 = QtGui.QVBoxLayout()
vbox1.setAlignment(QtCore.Qt.AlignTop)
hbox1 = QtGui.QHBoxLayout()
hbox1.setAlignment(QtCore.Qt.AlignCenter)
vbox2 = QtGui.QVBoxLayout()
vbox2.setAlignment(QtCore.Qt.AlignBottom)
hbox2 = QtGui.QHBoxLayout()
hbox2.setAlignment(QtCore.Qt.AlignCenter)
widgetWidth = 0.15 * mainWindow.screenWidth
mainWindow.dockWidget.setMaximumWidth(widgetWidth)
mainWindow.dockWidget.setMinimumWidth(widgetWidth)
if mainWindow.log.log['currentState'] == 'initial':
mainWindow.dock1 = InitialDock1(contBtnFn=False,addBtnFn=False,speciesFn=False,message="To begin select 'file' \nand create/load a project")
mainWindow.dock1.setAutoFillBackground(True)
hbox1.addWidget(mainWindow.dock1)
elif mainWindow.log.log['currentState'] == 'Data Processing':
mainWindow.dock1 = InitialDock1(contBtnFn=False,addBtnFn=mainWindow.add_files_to_project)
mainWindow.dock1.setAutoFillBackground(True)
hbox1.addWidget(mainWindow.dock1)
## add the pipeline dock
add_pipeline_dock(mainWindow)
hbox2.addWidget(mainWindow.pDock)
## finalize alignments
vbox1.addLayout(hbox1)
vbox2.addLayout(hbox2)
masterBox.addLayout(vbox1)
masterBox.addLayout(vbox2)
#vbox.addLayout(vbl3)
mainWindow.mainDockWidget.setWidget(mainWindow.dockWidget)
mainWindow.addDockWidget(QtCore.Qt.LeftDockWidgetArea, mainWindow.mainDockWidget)
## file selector
'''
if mainWindow.log.log['currentState'] in ['Data Processing','Quality Assurance','Model','Results Navigation']:
mainWindow.fileSelector = GeneListSele | ctor(fileList,parent=mainWindow.dockWidget,
selectionFn=mainWind | ow.set_selected_file,
fileDefault=mainWindow.log.log['selectedFile'],
showModelSelector=showModelSelector,modelsRun=modelsRun)
mainWindow.fileSelector.setAutoFillBackground(True)
subsamplingDefault = mainWindow.log.log['subsample']
vbl1.addWidget(mainWindow.fileSelector)
## data processing
if mainWindow.log.log['currentState'] == "Data Processing":
mainWindow.dock1 = DataProcessingDock1(masterChannelList,transformList,compensationList,subsetList,parent=mainWindow.dockWidget,
contBtnFn=None,subsetDefault=subsamplingDefault)
callBackfn = mainWindow.handle_data_processing_mode_callback
mainWindow.dock2 = DataProcessingDock2(callBackfn,parent=mainWindow.dockWidget,default=mainWindow.log.log['dataProcessingAction'])
mainWindow.dock1.setAutoFillBackground(True)
mainWindow.dock2.setAutoFillBackground(True)
hbl2.addWidget(mainWindow.dock2)
hbl3.addWidget(mainWindow.dock1)
## quality assurance
elif mainWindow.log.log['currentState'] == "Quality Assurance":
### check to see if fileList needs adjusting
#if type(mainWindow.log.log['excludedFiles']) == type([]) and len(mainWindow.log.log['excludedFiles']) > 0:
# for f in mainWindow.log.log['excludedFiles']:
# fileList.remove(f)
# print 'removeing file %s in leftdock'%f
mainWindow.dock = QualityAssuranceDock(fileList,masterChannelList,transformList,compensationList,subsetList,parent=mainWindow.dockWidget,
contBtnFn=None,subsetDefault=subsamplingDefault,viewAllFn=mainWindow.display_thumbnails)
vbl3.addWidget(mainWindow.dock)
mainWindow.dock.setAutoFillBackground(True)
## model
elif mainWindow.log.log['currentState'] == "Model":
modelList = ['DPMM','K-means']
mainWindow.dock = ModelDock(modelList,parent=mainWindow.dockWidget,componentsFn=mainWindow.set_num_components)
mainWindow.dock.setAutoFillBackground(True)
vbl3.addWidget(mainWindow.dock)
## results navigation
elif mainWindow.log.log['currentState'] == "Results Navigation":
mainWindow.dock = ResultsNavigationDock(mainWindow.resultsModeList,masterChannelList,parent=mainWindow.dockWidget,
resultsModeFn=mainWindow.set_selected_results_mode,
resultsModeDefault=mainWindow.log.log['resultsMode'],viewAllFn=mainWindow.display_thumbnails,
infoBtnFn=mainWindow.show_model_log_info)
mainWindow.dock.setAutoFillBackground(True)
vbl3.addWidget(mainWindow.dock)
## one dimensional viewer
if mainWindow.log.log['currentState'] == 'OneDimViewer':
mainWindow.dock = OneDimViewerDock(fileList,masterChannelList,callBack=mainWindow.odv.paint)
mainWindow.dock.setAutoFillBackground(True)
vbl1.addWidget(mainWindow.dock)
## stages with thumbnails
if mainWindow.log.log['currentState'] in ['Quality Assurance', 'Results Navigation']:
mainWindow.fileSelector.set_refresh_thumbs_fn(mainWindow.display_thumbnails)
'''
|
blueset/ehForwarderBot | ehforwarderbot/wizard.py | Python | agpl-3.0 | 28,382 | 0.002537 | # -*- coding: utf-8 -*-
"""
Interactive wizard to guide user to set up EFB and modules.
Since newer version of pip (>=9.0), which checks Python version
prior to installation, is already widespread, we are dropping
Python version check in wizard script, and assuming user is
running an appropriate Python version.
"""
import argparse
import gettext
import os
import platform
import sys
from collections import namedtuple
from contextlib import suppress
from io import StringIO
from typing import Dict, Callable, Optional
from urllib.parse import quote
import bullet.utils
import cjkwrap
import pkg_resources
from bullet import Bullet, keyhandler, colors
from bullet.charDef import NEWLINE_KEY, BACK_SPACE_KEY
from ruamel.yaml import YAML
from ehforwarderbot import coordinator, utils
Module = namedtuple("Module", ['type', 'id', 'name', 'emoji', 'wizard'])
Module.replace = Module._replace # type: ignore
gettext.translation(
'ehforwarderbot',
pkg_resources.resource_filename('ehforwarderbot', 'locale'),
fallback=True
).install(names=["ngettext"])
_: Callable
ngettext: Callable
def print_wrapped(text):
paras = text.split("\n")
for i in paras:
print(*cjkwrap.wrap(i), sep="\n")
class DataModel:
def __init__(self, profile):
self.profile = profile
self.yaml = YAML()
self.config = None
self.modules: Dict[str, Module] = {}
@staticmethod
def default_config():
# TRANSLATORS: This part of text must be formatted in a monospaced font, and all lines must not exceed the width of a 70-cell-wide terminal.
config = _(
"# ===================================\n"
"# EH Forwarder Bot Configuration File\n"
"# =============== | ====================\n"
"# \n"
"# This file determines what modules, including master channel, slave channels,\n"
"# and middlewares, are enabled in this profile.\n"
"# \n"
"# \n"
"# Master Channel\n"
"# --------------\n"
"# Exactly one instance of a master channel is required for a profile.\n"
"# Fill in the module ID and instance ID (if needed) below.\n" |
)
config += "\nmaster_channel:\n\n"
# TRANSLATORS: This part of text must be formatted in a monospaced font, and all lines must not exceed the width of a 70-cell-wide terminal.
config += _(
"# Slave Channels\n"
"# --------------\n"
"# \n"
"# At least one slave channel is required for a profile.\n"
"# Fill in the module ID and instance ID (if needed) of each slave channel\n"
"# to be enabled below.\n"
)
config += "\nslave_channels: []\n\n"
# TRANSLATORS: This part of text must be formatted in a monospaced font, and all lines must not exceed the width of a 70-cell-wide terminal.
config += _(
"# Middlewares\n"
"# -----------\n"
"# Middlewares are not required to run an EFB profile. If you are not\n"
"# going to use any middleware in this profile, you can safely remove\n"
"# this section. Otherwise, please list down the module ID and instance\n"
"# ID of each middleware to be enabled below.\n"
)
config += "middlewares: []\n"
str_io = StringIO(config)
str_io.seek(0)
return str_io
def load_config(self):
coordinator.profile = self.profile
conf_path = utils.get_config_path()
if not os.path.exists(conf_path):
self.config = self.yaml.load(self.default_config())
else:
with open(conf_path) as f:
self.config = self.yaml.load(f)
self.load_modules_list()
def save_config(self):
coordinator.profile = self.profile
conf_path = utils.get_config_path()
if not conf_path.exists():
conf_path.parent.mkdir(parents=True, exist_ok=True)
with open(conf_path, 'w') as f:
self.yaml.dump(self.config, f)
def load_modules_list(self):
for i in pkg_resources.iter_entry_points("ehforwarderbot.master"):
cls = i.load()
self.modules[cls.channel_id] = Module(type="master",
id=cls.channel_id,
name=cls.channel_name,
emoji=cls.channel_emoji,
wizard=None)
for i in pkg_resources.iter_entry_points("ehforwarderbot.slave"):
cls = i.load()
self.modules[cls.channel_id] = Module(type="slave",
id=cls.channel_id,
name=cls.channel_name,
emoji=cls.channel_emoji,
wizard=None)
for i in pkg_resources.iter_entry_points("ehforwarderbot.middleware"):
cls = i.load()
self.modules[cls.middleware_id] = Module(type="middleware",
id=cls.middleware_id,
name=cls.middleware_name,
emoji=None,
wizard=None)
for i in pkg_resources.iter_entry_points("ehforwarderbot.wizard"):
if i.name in self.modules:
fn = i.load()
self.modules[i.name] = self.modules[i.name].replace(wizard=fn)
def get_master_lists(self):
names = []
ids = []
for i in self.modules.values():
if i.type == "master":
names.append(i.name)
ids.append(i.id)
return names, ids
def get_slave_lists(self):
names = []
ids = []
for i in self.modules.values():
if i.type == "slave":
names.append(i.name)
ids.append(i.id)
return names, ids
@staticmethod
def split_cid(cid):
if "#" in cid:
mid, iid = cid.split("#")
else:
mid = cid
iid = None
return mid, iid
def get_instance_display_name(self, cid):
if not cid:
return cid
mid, iid = self.split_cid(cid)
if mid not in self.modules:
if iid:
return _("Unknown/custom module (instance: {instance})").format(
iid
)
else:
return _("Unknown/custom module")
else:
if iid:
name = _("{channel} (instance: {instance})").format(
channel=self.modules[mid].name,
instance=iid
)
else:
name = self.modules[mid].name
return name
def has_wizard(self, cid):
mid, _ = self.split_cid(cid)
if mid not in self.modules:
return False
return callable(self.modules[mid].wizard)
def get_selected_slave_lists(self):
if 'slave_channels' not in self.config:
self.config['slave_channels'] = []
return [], []
i = 0
names = []
ids = []
while i < len(self.config['slave_channels']):
cid = self.config['slave_channels'][i]
mid, __ = self.split_cid(cid)
if mid not in self.modules or self.modules[mid].type != "slave":
names.append(_("Unknown/custom channel ({channel_id})").format(channel_id=cid))
ids.append(cid)
else:
name = self.get_instance_display_name(cid)
names.append(name)
ids.append(cid)
i += 1
return names, ids
def get_middleware_lists(self):
names = []
ids = []
for i in self.modules.values():
if i.type == "middleware":
names.append(i.name)
|
ESEGroup/Paraguai | repositorios_memoria/__init__.py | Python | apache-2.0 | 98 | 0 | from .usuar | io import RepositorioUsuarioEmMemoria
fro | m .recurso import RepositorioRecursoEmMemoria
|
bskari/sparkfun-avc | control/extension_waypoint_generator.py | Python | mit | 2,933 | 0 | """Implements the WaypointGenerator interface. Returns waypoints from a KML
file. All WaypointGenerator implementations should have two methods:
get_current_waypoint(self, x_m, y_m) -> (float, float)
get_raw_waypoint(self) -> (float, float)
reached(self, x_m, y_m) -> bool
next(self)
done(self) -> bool
reset(self)
This implements an extension of the goal beyond the actual waypoint to try to
reduce oscillating.
"""
import math
|
from control.telemetry import Telemetry
from control.simple_waypoint_generator import SimpleWaypointGenerator
class ExtensionWaypointGenerator(SimpleWaypointGenerator):
"""Generates waypoints extending through the actual waypoint to try to
reduce oscillating.
"""
BEYOND_M = 5.0
BEYOND_M_2 = BEYOND_M ** 2
def __init__(self, waypoints):
super(ExtensionWaypointGenerator, self)._ | _init__(waypoints)
self._extension_waypoint = waypoints[0]
def get_current_waypoint(self, x_m, y_m):
"""Returns the current waypoint as projected BEYOND_M past."""
return self._extension_waypoint
def next(self):
"""Goes to the next waypoint."""
super(ExtensionWaypointGenerator, self).next()
self._extension_waypoint = self._get_extended_waypoint()
def reached(self, x_m, y_m):
"""Returns True if the current waypoint has been reached."""
if super(ExtensionWaypointGenerator, self).reached(x_m, y_m):
return True
# Because the car is trying to go for the extension, the car might
# pass the actual waypoint and keep on driving, so check if it's close
# to the extension as well
distance_m_2 = (
(x_m - self._extension_waypoint[0]) ** 2
+ (y_m - self._extension_waypoint[1]) ** 2
)
if distance_m_2 < self.BEYOND_M_2:
return True
return False
def _get_extended_waypoint(self):
"""Returns the extended waypoint."""
if self._current_waypoint_index == 0:
return self._waypoints[0]
if (
self._current_waypoint_index >= 1
and self._current_waypoint_index < len(self._waypoints)
):
previous_waypoint_m = self._waypoints[
self._current_waypoint_index - 1
]
current_waypoint_m = self._waypoints[self._current_waypoint_index]
degrees = Telemetry.relative_degrees(
previous_waypoint_m[0],
previous_waypoint_m[1],
current_waypoint_m[0],
current_waypoint_m[1]
)
offset_m = Telemetry.rotate_degrees_clockwise(
(0.0, self.BEYOND_M),
degrees
)
return (
current_waypoint_m[0] + offset_m[0],
current_waypoint_m[1] + offset_m[1]
)
return self._waypoints[-1]
|
hankcs/HanLP | hanlp/metrics/parsing/span.py | Python | apache-2.0 | 3,222 | 0.00031 | # MIT License
#
# Copyright (c) 2020 Yu Zhang
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from collections import Counter
from hanlp.metrics.metric import Metric
class SpanMetric(Metric):
def __init__(self, eps=1e-12):
super().__init__()
self.reset(eps)
# noinspection PyAttributeOutsideInit
def reset(self, eps=1e-12):
self.n = 0.0
self.n_ucm = 0.0
self.n_lcm = 0.0
self.utp = 0.0
self.ltp = 0.0
self.pred = 0.0
self.gold = 0.0
self.eps = eps
def __call__(self, preds, golds):
for pred, gold in zip(preds, golds):
upred = Counter([(i, j) for i, j, label in pred])
ugold = Counter([(i, j) for i, j, label in gold])
utp = list((upred & ugold).elements())
lpred = Counter(pred)
lgold = Counter(gold)
ltp = list((lpred & lgold).elements())
self.n += 1
self.n_ucm += len(utp) == len(pred) == len(gold)
self.n_lcm += len(ltp) == len(pred) == len(gold)
self.utp += len(utp)
self.ltp += len(ltp)
self.pred += len(pred)
self.gold += len(gold)
return self
def __repr__(self):
s = f"UCM: {self.ucm:.2%} LCM: {self.lcm:.2%} "
s += f"UP: {self.up:.2%} UR: {self.ur: | .2%} UF: {self.uf:.2%} "
s += f"LP: {self.lp:.2%} LR: {self.lr:.2%} LF: {self.lf:.2%}"
return s
@property
def score(self):
return self.lf
@property
def ucm(self):
return self.n_ucm / (self.n + self.eps)
@property
def lcm(s | elf):
return self.n_lcm / (self.n + self.eps)
@property
def up(self):
return self.utp / (self.pred + self.eps)
@property
def ur(self):
return self.utp / (self.gold + self.eps)
@property
def uf(self):
return 2 * self.utp / (self.pred + self.gold + self.eps)
@property
def lp(self):
return self.ltp / (self.pred + self.eps)
@property
def lr(self):
return self.ltp / (self.gold + self.eps)
@property
def lf(self):
return 2 * self.ltp / (self.pred + self.gold + self.eps)
|
plivo/plivo-python | tests/xml/test_sElement.py | Python | mit | 2,927 | 0.00205 | from unittest import TestCase
from plivo import plivoxml
from tests import PlivoXmlTestCase
class SElementTest(TestCase, PlivoXmlTestCase):
def test_set_methods(self):
expected_response = '<Response><Speak><s><break strength="strong"/>' \
'<emphasis level="strong">This is Test</emphasis><lang xml:lang="it">This is ' \
'Test</lang><phoneme alphabet="ipa" ph="t&#x259;mei&#x325;&#x27E;' \
'ou&#x325;">This is Test</phoneme><prosody pitch="low" rate="x-high" ' \
'volume="+6dB">This is Test</prosody><say-as format="" interpret-as="spell-out">' \
'This is Test</say-as><sub alias="World Wide Web Consortium">This is Test</sub>' \
'<w role="claws:VV0" | >This is Test</w></s></Speak></Response>'
content_break = 'This is Test'
stre | ngth_break = 'strong'
time_break = '250ms'
content_lang = 'This is Test'
xmllang_lang = "it"
content_emphasis = 'This is Test'
level_emphasis = 'strong'
content_phoneme = 'This is Test'
alphabet_phoneme = "ipa"
ph_phoneme = "təmei̥ɾou̥"
content_prosody = "This is Test"
volume_prosody = "+6dB"
rate_prosody = "x-high"
pitch_prosody = "low"
content_say_as = 'This is Test'
interpret_as_say_as = "spell-out"
# TODO: need to ask the value
format_say_as = ""
content_sub = "This is Test"
alias_sub = "World Wide Web Consortium"
content_w = "This is Test"
role_w = "claws:VV0"
element = plivoxml.ResponseElement()
response = element.add(
plivoxml.SpeakElement("").add(
plivoxml.SElement().add_break(
strength=strength_break
).add_emphasis(
content_emphasis,
level=level_emphasis
).add_lang(
content_lang,
xmllang=xmllang_lang
).add_phoneme(
content_phoneme,
alphabet=alphabet_phoneme,
ph=ph_phoneme
).add_prosody(
content_prosody,
volume=volume_prosody,
rate=rate_prosody,
pitch=pitch_prosody
).add_say_as(
content_say_as,
interpret_as=interpret_as_say_as,
format=format_say_as
).add_sub(
content_sub,
alias=alias_sub,
).add_w(
content_w,
role_w
)
)
).to_string(False)
self.assertXmlEqual(response, expected_response)
|
mitsei/dlkit | tests/resource/test_record_templates.py | Python | mit | 1,495 | 0.005351 | """Unit tests of resource records."""
import pytest
from ..utilities.general import is_never_authz, is_no_authz, uses_cataloging, uses_filesystem_only
@pytest.mark.usefixtures("resource_record_class_fixture", "resource_record_test_fixture")
class TestResourceRecord(object):
"""Tests for ResourceRecord"""
@pytest.mark.usefixtures("resource_query_record_class_fixture", "resource_query_record_test_fixture")
class TestResourceQueryRecord(object):
"""Tests for ResourceQueryRecord"""
@pytest.mark.usefixtures("resource_form_record_class_fixture", "resource_form_record_test_fixture")
class TestResourceFormRecord(object):
"""Tests for ResourceFormRecord"""
@pytest.mark.usefixtures("resource_search_record_class_fixture", "resource_search_record_test_fixture")
class TestResourceSearchRecord(object):
"""Tests for ResourceSearchRecord"""
@pytest.mark.usefixtures("bin_record_class_fixture", "bin_record_test_fixture")
class TestBinRecord(object):
"""Tests for BinRecord"""
@pytest.mar | k.usefixtures("bin_query_record_class_fixture", "bin_query_record_test_fixture")
class TestBinQueryRecord(object):
"""Tests for BinQueryRecord"""
@pytest.mark.usefixtures("bin_form_record_class_fixture", "bin_form_record_test_fixture")
class TestBinFormRecord(object):
"""Tests for Bi | nFormRecord"""
@pytest.mark.usefixtures("bin_search_record_class_fixture", "bin_search_record_test_fixture")
class TestBinSearchRecord(object):
"""Tests for BinSearchRecord"""
|
Acehaidrey/incubator-airflow | tests/providers/amazon/aws/operators/test_s3_file_transform.py | Python | apache-2.0 | 5,277 | 0.000758 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import errno
import io
import os
import shutil
import sys
import unittest
from tempfile import mkdtemp
from unittest import mock
import boto3
import pytest
from moto import mock_s3
from airflow.exceptions import AirflowException
from airflow.providers.amazon.aws.operators.s3 import S3FileTransformOperator
class TestS3FileTransformOperator(unittest.TestCase):
def setUp(self):
self.content = b"input"
self.bucket = "bucket"
self.input_key = "foo"
self.output_key = "bar"
self.bio = io.BytesIO(self.content)
self.tmp_dir = mkdtemp(prefix='test_tmpS3FileTransform_')
self.transform_script = os.path.join(self.tmp_dir, "transform.py")
os.mknod(self.transform_script)
def tearDown(self):
try:
shutil.rmtree(self.tmp_dir)
except OSError as e:
# ENOENT - no such file or directory
if e.errno != errno.ENOENT:
raise e
@mock.patch('subprocess.Popen')
@mock.patch.object(S3FileTransformOperator, 'log')
@mock_s3
def test_execute_with_transform_script(self, mock_log, mock_popen):
process_output = [b"Foo", b"Bar", b"Baz"]
self.mock_process(mock_popen, process_output=process_output)
input_path, output_path = self.s3_paths()
op = S3FileTransformOperator(
source_s3_key=input_path,
dest_s3_key=output_path,
transform_script=self.transform_script,
replace=True,
task_id="task_id",
)
op.execute(None)
mock_log.info.assert_has_calls(
[mock.call(line.decode(sys.getdefaultencoding())) for line in process_output]
)
@mock.patch('subprocess.Popen')
@mock_s3
def test_execute_with_failing_transform_script(self, mock_popen):
self.mock_process(mock_popen, return_code=42)
input_path, output_path = self.s3_paths()
op = S3FileTransformOperator(
source_s3_key=input_path,
dest_s3_key=output_path,
transform_script=self.transform_script,
replace=True,
task_id="task_id",
)
with pytest.raises(AirflowException) as ctx:
op.execute(None)
assert 'Transform script failed: 42' == str(ctx.value)
@mock.patch('subprocess.Popen')
@mock_s3
def test_execute_with_transform_script_args(self, mock_popen):
self.mock_process(mock_popen, process_output=[b"Foo", b"Bar", b"Baz"])
input_path, output_path = self.s3_paths()
script_args = ['arg1', 'arg2']
op = S3FileTransformOperator(
| source_s3_key=input_path,
dest_s3_key=output_path,
transform_script=self.transform_script,
script_args=script_args,
replace=True,
task_id="task_id",
)
op.execute(None)
assert script_args == mock_popen.call_args[0][0][3:]
@mock.patch('airflow.providers.a | mazon.aws.hooks.s3.S3Hook.select_key', return_value="input")
@mock_s3
def test_execute_with_select_expression(self, mock_select_key):
input_path, output_path = self.s3_paths()
select_expression = "SELECT * FROM s3object s"
op = S3FileTransformOperator(
source_s3_key=input_path,
dest_s3_key=output_path,
select_expression=select_expression,
replace=True,
task_id="task_id",
)
op.execute(None)
mock_select_key.assert_called_once_with(key=input_path, expression=select_expression)
conn = boto3.client('s3')
result = conn.get_object(Bucket=self.bucket, Key=self.output_key)
assert self.content == result['Body'].read()
@staticmethod
def mock_process(mock_popen, return_code=0, process_output=None):
mock_proc = mock.MagicMock()
mock_proc.returncode = return_code
mock_proc.stdout.readline.side_effect = process_output or []
mock_proc.wait.return_value = None
mock_popen.return_value.__enter__.return_value = mock_proc
def s3_paths(self):
conn = boto3.client('s3')
conn.create_bucket(Bucket=self.bucket)
conn.upload_fileobj(Bucket=self.bucket, Key=self.input_key, Fileobj=self.bio)
s3_url = "s3://{0}/{1}"
input_path = s3_url.format(self.bucket, self.input_key)
output_path = s3_url.format(self.bucket, self.output_key)
return input_path, output_path
|
Akagi201/learning-python | flask/Flask-Script/test1/app/__init__.py | Python | mit | 80 | 0 | from flask import Flask
app = Flask(__name__)
a | pp.config.from_object('config') | |
linkedin/indextank-service | storefront/templatetags/google_analytics.py | Python | apache-2.0 | 2,712 | 0.002581 | """
Google Analytics template tags and filters.
"""
from __future__ import absolute_import
import re
from django.template import Library, Node, TemplateSyntaxError
from templatetags.utils import is_internal_ip, disable_html, get_required_setting
SCOPE_VISITOR = 1
SCOPE_SESSION = 2
SCOPE_PAGE = 3
PROPERTY_ID_RE = re.compile(r'^UA-\d+-\d+$')
SETUP_CODE = """
<script type="text/javascript">
var _gaq = _gaq || [];
_gaq.push(['_setAccount', '%(property_id)s']);
_gaq.push(['_trackPageview']);
%(commands)s
(function() {
var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true;
ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js';
var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(ga, s);
})();
</script>
"""
CUSTOM_VAR_CODE = "_gaq.push(['_setCustomVar', %(index)s, '%(name)s', " \
"'%(value)s', %(scope)s]);"
register = Library()
@register.tag
def google_analytics(parser, token):
"""
Google Analytics tracking template tag.
Renders Javascript code to track page visits. You must supply
your website property ID (as a string) in the
``GOOGLE_ANALYTICS_PROPERTY_ID`` setting.
"""
bits = token.split_contents()
if len(bits) > 1:
raise TemplateSyntaxError("'%s' takes no arguments" % bits[0])
return GoogleAnalyticsNode()
class GoogleAnalyticsNode(Node):
def __init__(self):
self.property_id = get_required_setting(
'GOOGLE_ANALYTICS_PROPERTY_ID', PROPERTY_ID_RE,
"must be a string looking like 'UA-XXXXXX-Y'")
def render(self, context):
commands = self._get_custom_var_commands(context)
html = SETUP_CODE % {'property_id': self.property_id,
'commands': " ".join(commands)}
if is_internal_ip(context, 'GOOGLE_ANALYTICS'):
html = disable_html(html, 'Google Analytics')
return html
def _get_custom_var_commands(self, context):
values = (context.get('google_analytics_var%s' % i)
for i in range(1, 6))
vars = [(i, v) for i, v in enumerate(values, 1) if v is not None]
commands = []
for index, var in vars:
name = var[0]
value = var[1]
try: |
scope = var[2]
except IndexError:
scope = SCOPE_PAGE
commands.append(CUST | OM_VAR_CODE % locals())
return commands
def contribute_to_analytical(add_node):
GoogleAnalyticsNode() # ensure properly configured
add_node('head_bottom', GoogleAnalyticsNode)
|
OCA/l10n-switzerland | ebill_paynet/models/ebill_payment_contract.py | Python | agpl-3.0 | 1,855 | 0.001078 | # Copyright 2019 Camptocamp SA
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo import _, api, fields, models
from odoo.exceptions import ValidationError
class EbillPaymentContract(models.Model):
_inherit = "ebill.payment.contract"
paynet_account_number = fields.Char(string="Paynet ID", size=20)
is_paynet_contract = fields.Boolean(
compute="_compute_is_paynet_contract", store=False
)
paynet_service_id = fields.Many2one(
comodel_name="paynet.service", string="Paynet Service", ondelete="restrict"
)
payment_type = fields.Selection(
selection=[("qr", "QR"), ("isr", "ISR")],
string="Payment method",
default="qr",
help="Payment type to use for the invoices sent,"
" PDF will be generated and attached accordingly.",
)
@api.depends("transmit_method_id")
def _compute_is_paynet_contract(self):
transmit_method = self.env.ref("ebill_paynet.paynet_transmit_method")
for record in self:
record.is_paynet_contract = record.transmit_method_id == transmit_method
@api.constrains("transmit_method_id", "paynet_account_number")
def _check_paynet_account_number(self):
for contract in self:
if not contract.is_paynet_contract:
continue
if not contract.paynet_account_number:
raise ValidationError(
_("The Paynet ID is required for a Paynet contract.")
)
@api.constrains("transmit_meth | od_id", "paynet_service_id")
def _check_paynet_service_id(self):
for contract in self:
if contract.is_paynet_contract and not contract.paynet_service_id:
raise ValidationErr | or(
_("A Paynet service is required for a Paynet contract.")
)
|
Cadasta/cadasta-platform | cadasta/organization/serializers.py | Python | agpl-3.0 | 10,962 | 0 | from django.conf import settings
from django.db.models import Q
from django.core.urlresolvers import reverse
from core.util import slugify
from django.utils.translation import ugettext as _
from rest_framework import serializers
from rest_framework_gis import serializers as geo_serializers
from django_countries.serializer_fields import CountryField
from core import serializers as core_serializers
from accounts.models import User
from accounts.serializers import UserSerializer
from .models import Organization, Project, OrganizationRole, | ProjectRole
from .forms import create_update_or_delete_project_role
class OrganizationSerializer(core_serializers.SanitizeF | ieldSerializer,
core_serializers.DetailSerializer,
core_serializers.FieldSelectorSerializer,
serializers.ModelSerializer):
users = UserSerializer(many=True, read_only=True)
class Meta:
model = Organization
fields = ('id', 'slug', 'name', 'description', 'archived', 'urls',
'contacts', 'users',)
read_only_fields = ('id', 'slug',)
detail_only_fields = ('users',)
def validate_name(self, value):
invalid_names = settings.CADASTA_INVALID_ENTITY_NAMES
if slugify(value, allow_unicode=True) in invalid_names:
raise serializers.ValidationError(
_("Organization name cannot be “Add” or “New”."))
is_create = not self.instance
queryset = Organization.objects.filter(name__iexact=value)
if is_create:
not_unique = queryset.exists()
else:
not_unique = queryset.exclude(id=self.instance.id).exists()
if not_unique:
raise serializers.ValidationError(
_("Organization with this name already exists."))
return value
def create(self, *args, **kwargs):
org = super(OrganizationSerializer, self).create(*args, **kwargs)
OrganizationRole.objects.create(
organization=org,
user=self.context['request'].user,
admin=True
)
return org
def update(self, *args, **kwargs):
org = super(OrganizationSerializer, self).update(*args, **kwargs)
data = args[1]
if 'archived' in data.keys():
for project in org.projects.all():
project.archived = data['archived']
project.save()
return org
class ProjectSerializer(core_serializers.SanitizeFieldSerializer,
core_serializers.DetailSerializer,
serializers.ModelSerializer):
users = UserSerializer(many=True, read_only=True)
organization = OrganizationSerializer(hide_detail=True, read_only=True)
country = CountryField(required=False)
def validate_name(self, value):
# Check that name is not restricted
invalid_names = settings.CADASTA_INVALID_ENTITY_NAMES
if slugify(value, allow_unicode=True) in invalid_names:
raise serializers.ValidationError(
_("Project name cannot be “Add” or “New”."))
# Check that name is unique globally
# (Explicit validation: see comment in the Meta class)
is_create = not self.instance
queryset = Project.objects.filter(name__iexact=value)
if is_create:
not_unique = queryset.exists()
else:
not_unique = queryset.exclude(id=self.instance.id).exists()
if not_unique:
raise serializers.ValidationError(
_("Project with this name already exists"))
return value
class Meta:
model = Project
fields = ('id', 'organization', 'country', 'name', 'description',
'archived', 'urls', 'contacts', 'users', 'access', 'slug',
'extent',)
read_only_fields = ('id', 'country', 'slug')
detail_only_fields = ('users',)
# Suppress automatic model-derived UniqueTogetherValidator because
# organization is a read-only field in the serializer.
# Instead, perform this validation explicitly in validate_name()
validators = []
def create(self, validated_data):
organization = self.context['organization']
return Project.objects.create(
organization_id=organization.id,
**validated_data
)
class ProjectGeometrySerializer(geo_serializers.GeoFeatureModelSerializer):
org = serializers.SerializerMethodField()
url = serializers.SerializerMethodField()
class Meta:
model = Project
geo_field = 'extent'
fields = ('name', 'org', 'url')
def get_org(self, object):
return object.organization.name
def get_url(self, object):
return reverse(
'organization:project-dashboard',
kwargs={'organization': object.organization.slug,
'project': object.slug})
class EntityUserSerializer(serializers.Serializer):
username = serializers.CharField()
def to_representation(self, instance):
if isinstance(instance, User):
rep = UserSerializer(instance).data
rep[self.Meta.role_key] = self.get_role_json(instance)
return rep
def to_internal_value(self, data):
data[self.Meta.role_key] = self.set_roles(
data.get(self.Meta.role_key, None)
)
return super().to_internal_value(data)
def validate_username(self, value):
error = ""
if self.instance:
self.user = self.instance
else:
users = User.objects.filter(
Q(username=value) | Q(email=value) | Q(phone=value))
users_count = len(users)
if users_count == 0:
error = _(
"User with username or email or phone {} does not exist")
elif users_count > 1:
error = _(
"More than one user found for username or email or"
" phone {}")
else:
self.user = users[0]
if error:
raise serializers.ValidationError(error.format(value))
try:
self.get_roles_object(self.user)
raise serializers.ValidationError(
_("Not able to add member. The role already exists."))
except self.Meta.role_model.DoesNotExist:
pass
def create(self, validated_data):
obj = self.context[self.Meta.context_key]
role_value = validated_data[self.Meta.role_key]
create_kwargs = {
self.Meta.role_key: role_value,
self.Meta.context_key: obj,
'user': self.user,
}
self.role = self.Meta.role_model.objects.create(**create_kwargs)
return self.user
def update(self, instance, validated_data):
role = self.get_roles_object(instance)
role_value = validated_data[self.Meta.role_key]
if self.Meta.role_key in validated_data:
setattr(role, self.Meta.role_key, role_value)
role.save()
return instance
class OrganizationUserSerializer(EntityUserSerializer):
class Meta:
role_model = OrganizationRole
context_key = 'organization'
role_key = 'admin'
admin = serializers.BooleanField()
def validate_admin(self, role_value):
if 'request' in self.context:
if self.context['request'].user == self.instance:
if role_value != self.get_roles_object(self.instance).admin:
raise serializers.ValidationError(
_("Organization administrators cannot change their "
"own permissions within the organization"))
return role_value
def get_roles_object(self, instance):
self.role = OrganizationRole.objects.get(
user=instance,
organization=self.context['organization'])
return self.role
def get_role_json(self, instance):
role = self.get_roles_object(instance)
return role.admi |
romank0/kombu | kombu/log.py | Python | bsd-3-clause | 4,115 | 0 | from __future__ import absolute_import
import os
import logging
import sys
from logging.handlers import WatchedFileHandler
from .five import string_t
from .utils import cached_property
from .utils.encoding import safe_repr, safe_str
from .utils.functional import maybe_promise
__all__ = ['LogMixin', 'LOG_LEVELS', 'get_loglevel', 'setup_logging']
LOG_LEVELS = dict(logging._levelNames)
LOG_LEVELS['FATAL'] = logging.FATAL
LOG_LEVELS[logging.FATAL] = 'FATAL'
DISABLE_TRACEBACKS = os.environ.get('DISABLE_TRACEBACKS')
class NullHandler(logging.Handler):
def emit(self, record):
pass
def get_logger(logger):
if isinstance(logger, string_t):
logger = logging.getLogger(logger)
if not logger.handlers:
logger.addHandler(NullHandler())
return logger
def anon_logger(name):
logger = logging.getLogger(name)
logger.addHandler(NullHandler())
return logger
def get_loglevel(level):
if isinstance(level, string_t):
return LOG_LEVELS[level]
return level
def naive_format_parts(fmt):
parts = fmt.split('%')
for i, e in enumerate(parts[1:]):
yield None if not e or not parts[i - 1] else e[0]
def safeify_format(fmt, args,
filters={'s': safe_str,
'r': safe_repr}):
for index, type in enumerate(naive_format_parts(fmt)):
filt = filters.get(type)
yield filt(args[index]) if filt else args[index]
class LogMixin(object):
def debug(self, *args, **kwargs):
return self.log(logging.DEBUG, *args, **kwargs)
def info(self, *args, **kwargs):
return self.log(logging.INFO, *args, **kwargs)
def warn(self, *args, **kwargs):
return self.log(logging.WARN, *args, **kwargs)
def error(self, *args, **kwargs):
return self._error(logging.ERROR, *args, **kwargs)
def critical(self, *args, **kwargs):
return self._error(logging.CRITICAL, *args, **kwargs)
def _error(self, severity, *args, **kwargs):
kwargs.setdefault('exc_info', True)
if DISABLE_TRACEBACKS:
kwargs.pop('exc_info', None)
return self.log(severity, *args, **kwargs)
def annotate(self, text):
return '%s - %s' % (self.logger_name, text)
def log(self, severity, *args, **kwargs):
if self.logger.isEnabledFor(severity):
log = self.logger.log
if len(args) > 1 and isinstance(args[0], string_t):
expand = [maybe_promise(arg) for arg in args[1:]]
return log(severity,
self.annotate(args[0].replace('%r', '%s')),
*list(safeify_f | ormat(args[0], expand)), **kwargs)
else:
return self.logger.log(
severity, self.annotate(' '.join(map(safe_str, args))),
**kwa | rgs)
def get_logger(self):
return get_logger(self.logger_name)
def is_enabled_for(self, level):
return self.logger.isEnabledFor(self.get_loglevel(level))
def get_loglevel(self, level):
if not isinstance(level, int):
return LOG_LEVELS[level]
return level
@cached_property
def logger(self):
return self.get_logger()
@property
def logger_name(self):
return self.__class__.__name__
class Log(LogMixin):
def __init__(self, name, logger=None):
self._logger_name = name
self._logger = logger
def get_logger(self):
if self._logger:
return self._logger
return LogMixin.get_logger(self)
@property
def logger_name(self):
return self._logger_name
def setup_logging(loglevel=None, logfile=None):
logger = logging.getLogger()
loglevel = get_loglevel(loglevel or 'ERROR')
logfile = logfile if logfile else sys.__stderr__
if not logger.handlers:
if hasattr(logfile, 'write'):
handler = logging.StreamHandler(logfile)
else:
handler = WatchedFileHandler(logfile)
logger.addHandler(handler)
logger.setLevel(loglevel)
return logger
|
Laurawly/tvm-1 | python/tvm/topi/gpu/conv2d_nhwc.py | Python | apache-2.0 | 5,436 | 0.000368 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, too-many-locals, too-many-statements, unused-argument
"""Direct conv2d in NHWC layout"""
import tvm
from tvm import te
from tvm import autotvm
from ..utils import get_const_tuple
def schedule_conv2d_nhwc_direct(cfg, s, Conv):
"""schedule optimized for NHWC direct conv2d"""
pad_data, kernel = s[Conv].op.input_tensors
s[pad_data].compute_inline()
if isinstance(kernel.op, tvm.te.ComputeOp) and "dilate" in kernel.op.tag:
s[kernel].compute_inline()
if Conv.op in s.outputs:
output = Conv
OL = s.cache_write(Conv, "local")
else:
output = s.outputs[0].output(0)
s[Conv].set_scope("local")
OL = Conv
# create cache stage
AA = s.cache_read(pad_data, "shared", [OL])
WW = s.cache_read(kernel, "shared", [OL])
AL = s.cache_read(AA, "local", [OL])
WL = s.cache_read(WW, "local", [OL])
# Currently Conv2d NHWC only support dynamic shpe in batch
dynamic_batch = isinstance(s[output].op.axis[0].dom.extent, tvm.tir.expr.Var)
# Schedule for autotvm
cfg.define_knob("tile_n", [1] if dynamic_batch else [2, 4, 8])
cfg.define_knob("tile_c", [2, 4, 8])
cfg.define_knob("num_thread_n", [1] if dynamic_batch else [4, 8, 16])
cfg.define_knob("num_thread_c", [4, 8, 16])
cfg.define_knob("vthread_n", [1] if dynamic_batch else [1, 2])
cfg.define_knob("vthread_c", [1, 2])
cfg.define_knob("step", [16, 3, 32, 64])
cfg.define_knob("vectorize", [1, 2, 4, 8])
# fallback support
target = tvm.target.Target.current()
if cfg.is_fallback:
ref_log = autotvm.tophub.loa | d_reference_log(
| target.kind.name, target.model, "conv2d_nhwc.gpu"
)
cfg.fallback_with_reference_log(ref_log)
tile_n = cfg["tile_n"].val
tile_c = cfg["tile_c"].val
num_thread_n = cfg["num_thread_n"].val
num_thread_c = cfg["num_thread_c"].val
vthread_n = cfg["vthread_n"].val
vthread_c = cfg["vthread_c"].val
step = cfg["step"].val
vec_factor = cfg["vectorize"].val
block_factor_c = tile_c * num_thread_c * vthread_c
offset = 8
A_align = step + offset
W_align = block_factor_c + offset
block_x = te.thread_axis("blockIdx.x")
block_y = te.thread_axis("blockIdx.y")
block_z = te.thread_axis("blockIdx.z")
thread_x = te.thread_axis((0, num_thread_c), "threadIdx.x")
thread_y = te.thread_axis((0, num_thread_n), "threadIdx.y")
thread_xz = te.thread_axis((0, vthread_c), "vthread", name="vx")
thread_yz = te.thread_axis((0, vthread_n), "vthread", name="vy")
# Schedule for output
ni, _, wi, fi = s[output].op.axis
bx = wi
fi, vec = s[output].split(fi, factor=vec_factor)
s[output].vectorize(vec)
tx, fi = s[output].split(fi, factor=tile_c)
txz, tx = s[output].split(tx, factor=num_thread_c)
bz, txz = s[output].split(txz, factor=vthread_c)
ty, ni = s[output].split(ni, factor=tile_n)
tyz, ty = s[output].split(ty, factor=num_thread_n)
by, tyz = s[output].split(tyz, factor=vthread_n)
s[output].reorder(bx, by, bz, tyz, txz, ty, tx, ni, fi, vec)
s[output].bind(bz, block_z)
s[output].bind(by, block_y)
s[output].bind(bx, block_x)
s[output].bind(tyz, thread_yz)
s[output].bind(txz, thread_xz)
s[output].bind(ty, thread_y)
s[output].bind(tx, thread_x)
# Schedule local computation
s[OL].compute_at(s[output], tx)
ni, yi, xi, fi = s[OL].op.axis
ry, rx, rc = s[OL].op.reduce_axis
rco, rci = s[OL].split(rc, factor=step)
s[OL].vectorize(fi)
s[OL].reorder(rco, ry, rx, rci, ni, fi)
s[AA].compute_at(s[OL], rx)
s[WW].compute_at(s[OL], rx)
s[AL].compute_at(s[OL], rci)
s[WL].compute_at(s[OL], rci)
# Schedule for data's share memory
ni, yi, xi, ci = s[AA].op.axis
s[AA].reorder(yi, xi, ni, ci)
s[AA].storage_align(xi, A_align - 1, A_align)
t = s[AA].fuse(ni, ci)
ty, tx = s[AA].split(t, factor=num_thread_c)
_, ty = s[AA].split(ty, factor=num_thread_n)
s[AA].bind(tx, thread_x)
s[AA].bind(ty, thread_y)
# Schedule for kernel's share memory
_, _, ic, o = s[WW].op.axis
t = s[WW].fuse(ic, o)
s[WW].storage_align(ic, W_align - 1, W_align)
t, vec = s[WW].split(t, factor=vec_factor)
s[WW].vectorize(vec)
ty, tx = s[WW].split(t, factor=num_thread_c)
_, ty = s[WW].split(ty, factor=num_thread_n)
s[WW].bind(tx, thread_x)
s[WW].bind(ty, thread_y)
N, OH, OW, CO = get_const_tuple(output.shape)
KH, KW, CI, _ = get_const_tuple(kernel.shape)
if isinstance(N, int):
cfg.add_flop(2 * N * OH * OW * CO * CI * KH * KW)
|
mikeakohn/naken_asm | tests/disasm/pic32.py | Python | gpl-3.0 | 2,606 | 0.023024 | #!/usr/bin/env python3
import os, sys
reg_nums = []
instructions = []
errors = 0
alias = [ "move", "negu", "not" ]
# -------------------------- fold here -----------------------------
print("Disassembler: PIC32")
for i in range(0, 32):
reg_nums.append("$" + str(i))
fp = open("../comparison/pic32.txt", "r")
out = open("test.asm", "w")
out.write(".pic32\n")
for line in fp:
instruction = line.split("|")[0].strip()
if instruction.startswith("main:"): continue
if instruction.startswith("li"): continue
if instruction.startswith("la"): continue
ignore = False
for reg_num in reg_nums:
if reg_num in instruction: ignore = True
if ignore == True: continue
if " " in instruction: name = instruction.split()[0]
else: name = instruction
if name in alias:
#print("Skipping: " + name)
continue
out.write(instruction + "\n")
instructions.append(instruction)
fp.close()
out.close()
os.system("../../naken_asm -l test.asm > /dev/null")
fp = open("out.lst", "r")
i = 0
for line in fp:
if not line.startswith("0x"): continue
line = line[23:64].strip()
if line != instructions[i]:
a = instructions[i].split(",")
b = line.split(",")
name_a = a[0].split()[0]
name_b = b | [0].split()[0]
if a[0] in [ "di", "ei" ]: continue
a[0] = a[0].split()[1]
b[0] = b[0].split()[1]
broken = False
if len(a) != len(b) or name_a != name_b: |
print(name_a + " " + name_b)
broken = True
else:
for j in range(0, len(a)):
a[j] = a[j].strip()
b[j] = b[j].strip()
if a[j] != b[j]:
if not " " in a[j] and "(" in a[j] and a[j][0] != '(' and \
not " " in b[j] and "(" in b[j] and b[j][0] != '(':
a[j] = a[j].replace("(", " (")
b[j] = b[j].replace("(", " (")
if a[j].split()[1] == b[j].split()[1]:
value_a = int(a[j].split()[0], 0)
value_b = int(b[j].split()[0], 0)
if value_a < 0: value_a = value_a + 1 + 0xffff
if value_b < 0: value_b = value_b + 1 + 0xffff
if value_a == value_b: continue
operands = b[j].replace("(","").replace(")","").split()
if len(operands) == 2:
if str(a[j]) in operands: continue
broken = True
if broken == True:
print(str(i) + ") " + line + " " + instructions[i])
errors += 1
i += 1
fp.close()
os.unlink("test.asm")
os.unlink("out.hex")
os.unlink("out.lst")
if errors != 0:
print("Total errors: " + str(errors))
print("Failed!")
sys.exit(-1)
else:
print("Passed!")
|
goshow-jp/Kraken | Python/kraken_components/fabrice/fabrice_clavicle.py | Python | bsd-3-clause | 7,805 | 0.004356 | from kraken.core.maths import Vec3
from kraken.core.maths.xfo import Xfo
from kraken.core.objects.components.base_example_component import BaseExampleComponent
from kraken.core.objects.attributes.attribute_group import AttributeGroup
from kraken.core.objects.attributes.scalar_attribute import ScalarAttribute
from kraken.core.objects.attributes.bool_attribute import BoolAttribute
from kraken.core.objects.attributes.string_attribute import StringAttribute
from kraken.core.objects.constraints.pose_constraint import PoseConstraint
from kraken.core.objects.component_group import ComponentGroup
from kraken.core.objects.hierarchy_group import HierarchyGroup
from kraken.core.objects.locator import Locator
from kraken.core.objects.joint import Joint
from kraken.core.objects.ctrlSpace import CtrlSpace
from kraken.core.objects.control import Control
from kraken.core.objects.operators.kl_operator import KLOperator
from kraken.core.profiler import Profiler
from kraken.helpers.utility_methods import logHierarchy
class FabriceClavicle(BaseExampleComponent):
"""Clavicle Component Base"""
de | f __init__(self, name='clavicle', parent=None):
| super(FabriceClavicle, self).__init__(name, parent)
# ===========
# Declare IO
# ===========
# Declare Inputs Xfos
self.spineEndInputTgt = self.createInput('spineEnd', dataType='Xfo', parent=self.inputHrcGrp).getTarget()
# Declare Output Xfos
self.clavicleOutputTgt = self.createOutput('clavicle', dataType='Xfo', parent=self.outputHrcGrp).getTarget()
# Declare Input Attrs
self.drawDebugInputAttr = self.createInput('drawDebug', dataType='Boolean', value=False, parent=self.cmpInputAttrGrp).getTarget()
self.rigScaleInputAttr = self.createInput('rigScale', dataType='Float', value=1.0, parent=self.cmpInputAttrGrp).getTarget()
# Declare Output Attrs
class FabriceClavicleGuide(FabriceClavicle):
"""Clavicle Component Guide"""
def __init__(self, name='clavicle', parent=None):
Profiler.getInstance().push("Construct Clavicle Guide Component:" + name)
super(FabriceClavicleGuide, self).__init__(name, parent)
# =========
# Controls
# =========
# Guide Controls
guideSettingsAttrGrp = AttributeGroup("GuideSettings", parent=self)
self.clavicleCtrl = Control('clavicle', parent=self.ctrlCmpGrp, shape="cube")
self.clavicleCtrl.alignOnXAxis()
self.clavicleCtrl.scalePoints(Vec3(1.0, 0.25, 0.25))
data = {
"name": name,
"location": "L",
"clavicleXfo": Xfo(Vec3(0.1322, 15.403, -0.5723)),
'clavicleCtrlCrvData': self.clavicleCtrl.getCurveData()
}
self.loadData(data)
Profiler.getInstance().pop()
# =============
# Data Methods
# =============
def saveData(self):
"""Save the data for the component to be persisted.
Return:
The JSON data object
"""
data = super(FabriceClavicleGuide, self).saveData()
data['clavicleXfo'] = self.clavicleCtrl.xfo
data['clavicleCtrlCrvData'] = self.clavicleCtrl.getCurveData()
return data
def loadData(self, data):
"""Load a saved guide representation from persisted data.
Arguments:
data -- object, The JSON data object.
Return:
True if successful.
"""
super(FabriceClavicleGuide, self).loadData( data )
self.clavicleCtrl.xfo = data['clavicleXfo']
self.clavicleCtrl.setCurveData(data['clavicleCtrlCrvData'])
return True
def getRigBuildData(self):
"""Returns the Guide data used by the Rig Component to define the layout of the final rig..
Return:
The JSON rig data object.
"""
data = super(FabriceClavicleGuide, self).getRigBuildData()
data['clavicleXfo'] = self.clavicleCtrl.xfo
data['clavicleCtrlCrvData'] = self.clavicleCtrl.getCurveData()
return data
# ==============
# Class Methods
# ==============
@classmethod
def getComponentType(cls):
"""Enables introspection of the class prior to construction to determine if it is a guide component.
Return:
The true if this component is a guide component.
"""
return 'Guide'
@classmethod
def getRigComponentClass(cls):
"""Returns the corresponding rig component class for this guide component class
Return:
The rig component class.
"""
return FabriceClavicleRig
class FabriceClavicleRig(FabriceClavicle):
"""Clavicle Component"""
def __init__(self, name='Clavicle', parent=None):
Profiler.getInstance().push("Construct Clavicle Rig Component:" + name)
super(FabriceClavicleRig, self).__init__(name, parent)
# =========
# Controls
# =========
# Clavicle
self.clavicleCtrlSpace = CtrlSpace('clavicle', parent=self.ctrlCmpGrp)
self.clavicleCtrl = Control('clavicle', parent=self.clavicleCtrlSpace, shape="cube")
self.clavicleCtrl.alignOnXAxis()
# ==========
# Deformers
# ==========
deformersLayer = self.getOrCreateLayer('deformers')
defCmpGrp = ComponentGroup(self.getName(), self, parent=deformersLayer)
self.addItem('defCmpGrp', self.defCmpGrp)
self.clavicleDef = Joint('clavicle', parent=defCmpGrp)
self.clavicleDef.setComponent(self)
# ==============
# Constrain I/O
# ==============
# Constraint inputs
clavicleInputConstraint = PoseConstraint('_'.join([self.clavicleCtrl.getName(), 'To', self.spineEndInputTgt.getName()]))
clavicleInputConstraint.setMaintainOffset(True)
clavicleInputConstraint.addConstrainer(self.spineEndInputTgt)
self.clavicleCtrlSpace.addConstraint(clavicleInputConstraint)
# Constraint outputs
clavicleConstraint = PoseConstraint('_'.join([self.clavicleOutputTgt.getName(), 'To', self.clavicleCtrl.getName()]))
clavicleConstraint.addConstrainer(self.clavicleCtrl)
self.clavicleOutputTgt.addConstraint(clavicleConstraint)
# ===============
# Add Canvas Ops
# ===============
# Add Deformer Canvas Op
self.defConstraintOp = KLOperator('defConstraint', 'PoseConstraintSolver', 'Kraken')
self.addOperator(self.defConstraintOp)
# Add Att Inputs
self.defConstraintOp.setInput('drawDebug', self.drawDebugInputAttr)
self.defConstraintOp.setInput('rigScale', self.rigScaleInputAttr)
# Add Xfo Inputs
self.defConstraintOp.setInput('constrainer', self.clavicleOutputTgt)
# Add Xfo Outputs
self.defConstraintOp.setOutput('constrainee', self.clavicleDef)
Profiler.getInstance().pop()
def loadData(self, data=None):
"""Load a saved guide representation from persisted data.
Arguments:
data -- object, The JSON data object.
Return:
True if successful.
"""
super(FabriceClavicleRig, self).loadData( data )
self.clavicleCtrlSpace.xfo = data['clavicleXfo']
self.clavicleCtrl.xfo = data['clavicleXfo']
self.clavicleCtrl.setCurveData(data['clavicleCtrlCrvData'])
# Set IO Xfos
self.spineEndInputTgt.xfo = data['clavicleXfo']
self.clavicleOutputTgt.xfo = data['clavicleXfo']
# Eval Operators
self.defConstraintOp.evaluate()
from kraken.core.kraken_system import KrakenSystem
ks = KrakenSystem.getInstance()
ks.registerComponent(FabriceClavicleGuide)
ks.registerComponent(FabriceClavicleRig)
|
bssrdf/SparseLSH | sparselsh/__init__.py | Python | mit | 277 | 0 | # This module is part of | lshash and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
__title__ = 'sparselsh'
__author__ = 'Brandon Roberts (brandon@bxroberts.org)'
__license__ = 'MIT'
__version__ = '0.0.2'
from sparselsh.lsh import | LSH
|
jawilson/home-assistant | tests/components/denonavr/test_media_player.py | Python | apache-2.0 | 3,970 | 0.000252 | """The tests for the denonavr media player platform."""
from unittest.mock import patch
import pytest
from homeassistant.components import media_player
from homeassistant.components.denonavr.config_flow import (
CONF_MANUFACTURER,
CONF_MODEL,
CONF_SERIAL_NUMBER,
CONF_TYPE,
DOMAIN,
)
from homeassistant.components.denonavr.media_player import (
ATTR_COMMAND,
ATTR_DYNAMIC_EQ,
SERVICE_GET_COMMAND,
SERVICE_SET_DYNAMIC_EQ,
SERVICE_UPDATE_AUDYSSEY,
)
from homeassistant.const import ATTR_ENTITY_ID, CONF_HOST
from tests.common import MockConfigEntry
TEST_HOST = "1.2.3.4"
TEST_NAME = "Test_Receiver"
TEST_MODEL = "model5"
TEST_SERIALNUMBER = "123456789"
TEST_MANUFACTURER = "Denon"
TEST_RECEIVER_TYPE = "avr-x"
TEST_ZONE = "Main"
TEST_UNIQUE_ID = f"{TEST_MODEL}-{TEST_SERIALNUMBER}"
TEST_TIMEOUT = 2
TEST_SHOW_ALL_SOURCES = False
TEST_ZONE2 = False
TEST_ZONE3 = False
ENTITY_ID = f"{media_player.DOMAIN}.{TEST_NAME}"
@pytest.fixture(name="client")
def client_fixture():
"""Patch of client library for tests."""
with patch(
"homeassistant.components.denonavr.receiver.DenonAVR",
autospec=True,
) as mock_client_class, patch(
"homeassistant.components.denonavr.config_flow.denonavr.async_discover"
):
mock_client_class.return_value.name = TEST_NAME
mock_client_class.return_value.model_name = TEST_MODEL
mock_client_class.return_value.serial_number = TEST_SERIALNUMBER
mock_client_class.return_value.manufacturer = TEST_MANUFACTURER
mock_client_class.return_value.receiver_type = TEST_RECEIVER_TYPE
mock_client_class.return_value.zone = TEST_ZONE
mock_client_class.return_value.input_func_list = []
mock_client_class.return_value.sound_mode_list = []
mock_client_class.return_value.zones = {"Main": mock_client_class.return_value}
yield mock_client_class.return_value
async def setup_denonavr(hass):
"""Initialize media_player for tests."""
entry_data = {
CONF_HOST: TEST_HOST,
CONF_MODEL: TEST_MODEL,
CONF_TYPE: TEST_RECEIVER_TYPE,
CONF_MANUFACTURER: TEST_MANUFACTURER,
CONF_SERIAL_NUMBER: TEST_SERIALNUMBER,
}
mock_entry = MockConfigEntry(
domain=DOMAIN,
unique_id=TEST_UNIQUE_ID,
data=entry_data,
)
mock_entry.add_to_hass(hass)
await hass.config_entries.async_setup(mock_entry.entry_id)
await hass.async_block_till_done()
state = hass.states.get(ENTITY_ID)
assert state
assert state.name == TEST_NAME
async def test_get_comman | d(hass, client):
"""Test generic command functionality."""
await setup_denonavr(hass)
data = {
ATTR_ENTITY_ID: ENTITY_ID,
ATTR_COMMAND: "test_command",
}
await hass.services.async_call(DOMAIN, S | ERVICE_GET_COMMAND, data)
await hass.async_block_till_done()
client.async_get_command.assert_awaited_with("test_command")
async def test_dynamic_eq(hass, client):
"""Test that dynamic eq method works."""
await setup_denonavr(hass)
data = {
ATTR_ENTITY_ID: ENTITY_ID,
ATTR_DYNAMIC_EQ: True,
}
# Verify on call
await hass.services.async_call(DOMAIN, SERVICE_SET_DYNAMIC_EQ, data)
await hass.async_block_till_done()
# Verify off call
data[ATTR_DYNAMIC_EQ] = False
await hass.services.async_call(DOMAIN, SERVICE_SET_DYNAMIC_EQ, data)
await hass.async_block_till_done()
client.async_dynamic_eq_on.assert_called_once()
client.async_dynamic_eq_off.assert_called_once()
async def test_update_audyssey(hass, client):
"""Test that dynamic eq method works."""
await setup_denonavr(hass)
# Verify call
await hass.services.async_call(
DOMAIN,
SERVICE_UPDATE_AUDYSSEY,
{
ATTR_ENTITY_ID: ENTITY_ID,
},
)
await hass.async_block_till_done()
client.async_update_audyssey.assert_called_once()
|
mancoast/CPythonPyc_test | cpython/221_test_descr.py | Python | gpl-3.0 | 81,788 | 0.007947 | # Test enhancements related to descriptors and new-style classes
from test_support import verify, vereq, verbose, TestFailed, TESTFN
from copy import deepcopy
def veris(a, b):
if a is not b:
raise TestFailed, "%r is %r" % (a, b)
def testunop(a, res, expr="len(a)", meth="__len__"):
if verbose: print "checking", expr
dict = {'a': a}
vereq(eval(expr, dict), res)
t = type(a)
m = getattr(t, meth)
while meth not in t.__dict__:
t = t.__bases__[0]
vereq(m, t.__dict__[meth])
vereq(m(a), res)
bm = getattr(a, meth)
vereq(bm(), res)
def testbinop(a, b, res, expr="a+b", meth="__add__"):
if verbose: print "checking", expr
dict = {'a': a, 'b': b}
# XXX Hack so this passes before 2.3 when -Qnew is specified.
if meth == "__div__" and 1/2 == 0.5:
meth = "__truediv__"
vereq(eval(expr, dict), res)
t = type(a)
m = getattr(t, meth)
while meth not in t.__dict__:
t = t.__bases__[0]
vereq(m, t.__dict__[meth])
vereq(m(a, b), res)
bm = getattr(a, meth)
vereq(bm(b), res)
def testternop(a, b, c, res, expr="a[b:c]", meth="__getslice__"):
if verbose: print "checking", expr
dict = {'a': a, 'b': b, 'c': c}
vereq(eval(expr, dict), res)
t = type(a)
m = getattr(t, meth)
while meth not in t.__dict__:
t = t.__bases__[0]
vereq(m, t.__dict__[meth])
vereq(m(a, b, c), res)
bm = getattr(a, meth)
vereq(bm(b, c), res)
def testsetop(a, b, res, stmt="a+=b", meth="__iadd__"):
if verbose: print "checking", stmt
dict = {'a': deepcopy(a), 'b': b}
exec stmt in dict
vereq(dict['a'], res)
t = type(a)
m = getattr(t, meth)
while meth not in t.__dict__:
t = t.__bases__[0]
vereq(m, t.__dict__[meth])
dict['a'] = deepcopy(a)
m(dict['a'], b)
vereq(dict['a'], res)
dict['a'] = deepcopy(a)
bm = getattr(dict['a'], meth)
bm(b)
vereq(dict['a'], res)
def testset2op(a, b, c, res, stmt="a[b]=c", meth="__setitem__"):
if verbose: print "checking", stmt
dict = {'a': deepcopy(a), 'b': b, 'c': c}
exec stmt in dict
vereq(dict['a'], res)
t = type(a)
m = getattr(t, meth)
while meth not in t.__dict__:
t = t.__bases__[0]
vereq(m, t.__dict__[meth])
dict['a'] = deepcopy(a)
m(dict['a'], b, c)
vereq(dict['a'], res)
dict['a'] = deepcopy(a)
bm = getattr(dict['a'], meth)
bm(b, c)
vereq(dict['a'], res)
def testset3op(a, b, c, d, res, stmt="a[b:c]=d", meth="__setslice__"):
if verbose: print "checking", stmt
dict = {'a': deepcopy(a), 'b': b, 'c': c, 'd': d}
exec stmt in dict
vereq(dict['a'], res)
t = type(a)
while meth not in t.__dict__:
t = t.__bases__[0]
m = getattr(t, meth)
vereq(m, t.__dict__[meth])
dict['a'] = deepcopy(a)
m(dict['a'], b, c, d)
vereq(dict['a'], res)
dict['a'] = deepcopy(a)
bm = getattr(dict['a'], meth)
bm(b, c, d)
vereq(dict['a'], res)
def class_docstrings():
class Classic:
"A classic docstring."
vereq(Classic.__doc__, "A classic docstring.")
vereq(Classic.__dict__['__doc__'], "A classic docstring.")
class Classic2:
pass
verify(Classic2.__doc__ is None)
class NewStatic(object):
"Another docstring."
vereq(NewStatic.__doc__, "Another docstring.")
vereq(NewStatic.__dict__['__doc__'], "Another docstring.")
class NewStatic2(object):
pass
verify(NewStatic2.__doc__ is None)
class NewDynamic(object):
"Another docstring."
vereq(NewDynamic.__doc__, "Another docstring.")
vereq(NewDynamic.__dict__['__doc__'], "Another docstring.")
class NewDynamic2(object):
pass
verify(NewDynamic2.__doc__ is None)
def lists():
if verbose: print "Testing list operations..."
testbinop([1], [2], [1,2], "a+b", "__add__")
testbinop([1,2,3], 2, 1, "b in a", "__contains__")
testbinop([1,2,3], 4, 0, "b in a", "__contains__")
testbinop([1,2,3], 1, 2, "a[b]", "__getitem__")
testternop([1,2,3], 0, 2, [1,2], "a[b:c]", "__getslice__")
testsetop([1], [2], [1,2], "a+=b", "__iadd__")
testsetop([1,2], 3, [1,2,1,2,1,2], "a*=b", "__imul__")
testunop([1,2,3], 3, "len(a)", "__len__")
testbinop([1,2], 3, [1,2,1,2,1,2], "a*b", "__mul__")
testbinop([1,2], 3, [1,2,1,2,1,2], "b*a", "__rmul__")
testset2op([1,2], 1, 3, [1,3], "a[b]=c", "__setitem__")
testset3op([1,2,3,4], 1, 3, [5,6], [1,5,6,4], "a[b:c]=d", "__setslice__")
de | f dicts():
if verbose: print "Testing dict operations..."
testbinop({1:2}, {2:1}, -1, "cmp(a,b)", "__cmp__")
testbinop({1:2,3:4}, 1, 1, "b in a", "__contains__")
testbinop({1:2,3:4}, 2, 0, "b in a", "__contains__")
testbinop({1:2,3:4}, 1, 2, "a[b]", "__getitem__")
d = {1:2,3:4}
l1 = []
for i in d.keys(): l1.append(i)
l = []
for i in iter(d): l.append(i)
vereq(l, l1)
l = []
fo | r i in d.__iter__(): l.append(i)
vereq(l, l1)
l = []
for i in dict.__iter__(d): l.append(i)
vereq(l, l1)
d = {1:2, 3:4}
testunop(d, 2, "len(a)", "__len__")
vereq(eval(repr(d), {}), d)
vereq(eval(d.__repr__(), {}), d)
testset2op({1:2,3:4}, 2, 3, {1:2,2:3,3:4}, "a[b]=c", "__setitem__")
def dict_constructor():
if verbose:
print "Testing dict constructor ..."
d = dict()
vereq(d, {})
d = dict({})
vereq(d, {})
d = dict(items={})
vereq(d, {})
d = dict({1: 2, 'a': 'b'})
vereq(d, {1: 2, 'a': 'b'})
vereq(d, dict(d.items()))
vereq(d, dict(items=d.iteritems()))
for badarg in 0, 0L, 0j, "0", [0], (0,):
try:
dict(badarg)
except TypeError:
pass
except ValueError:
if badarg == "0":
# It's a sequence, and its elements are also sequences (gotta
# love strings <wink>), but they aren't of length 2, so this
# one seemed better as a ValueError than a TypeError.
pass
else:
raise TestFailed("no TypeError from dict(%r)" % badarg)
else:
raise TestFailed("no TypeError from dict(%r)" % badarg)
try:
dict(senseless={})
except TypeError:
pass
else:
raise TestFailed("no TypeError from dict(senseless={})")
try:
dict({}, {})
except TypeError:
pass
else:
raise TestFailed("no TypeError from dict({}, {})")
class Mapping:
# Lacks a .keys() method; will be added later.
dict = {1:2, 3:4, 'a':1j}
try:
dict(Mapping())
except TypeError:
pass
else:
raise TestFailed("no TypeError from dict(incomplete mapping)")
Mapping.keys = lambda self: self.dict.keys()
Mapping.__getitem__ = lambda self, i: self.dict[i]
d = dict(items=Mapping())
vereq(d, Mapping.dict)
# Init from sequence of iterable objects, each producing a 2-sequence.
class AddressBookEntry:
def __init__(self, first, last):
self.first = first
self.last = last
def __iter__(self):
return iter([self.first, self.last])
d = dict([AddressBookEntry('Tim', 'Warsaw'),
AddressBookEntry('Barry', 'Peters'),
AddressBookEntry('Tim', 'Peters'),
AddressBookEntry('Barry', 'Warsaw')])
vereq(d, {'Barry': 'Warsaw', 'Tim': 'Peters'})
d = dict(zip(range(4), range(1, 5)))
vereq(d, dict([(i, i+1) for i in range(4)]))
# Bad sequence lengths.
for bad in [('tooshort',)], [('too', 'long', 'by 1')]:
try:
dict(bad)
except ValueError:
pass
else:
raise TestFailed("no ValueError from dict(%r)" % bad)
def test_dir():
if verbose:
print "Testing dir() ..."
junk = 12
vereq(dir(), ['junk'])
del junk
# Just make sure these don't blow up!
for arg in 2, 2L, 2j, 2e0, [2], "2", u"2", (2,), {2:2}, type, test_dir:
dir(arg)
# Try classic classes.
class C:
Cdata = 1
def Cmethod(self): pass
cstuff = ['Cdata', 'Cmethod', '__doc__', '__mod |
ddboline/Garmin-Forerunner-610-Extractor_fork | ant/fs/test/commandpipe_test.py | Python | mit | 1,838 | 0.005985 | # Ant-FS
#
# Copyright (c) 2012, Gustav Tiger <gustav@tiger.name>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import array
from ant.fs.commandpipe import parse, CreateFile
def main():
| # Test create file
data = [0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09]
request = CreateFile(len(data), 0x80, [0x04, 0x00, 0x00], [0x00, 0xff, 0xff])
print request
print request.get()
# Test create file response
response_data = array.array('B', [2, 0, 0, 0, 4, 0, 0, 0, 128, 4, 123, 0, 103, 0, 0, 0])
response = parse(response_data)
assert response.get_request_id() == 0x04
assert response.get_response() == 0x00
assert resp | onse.get_data_type() == 0x80 #FIT
assert response.get_identifier() == array.array('B', [4, 123, 0])
assert response.get_index() == 103
|
woolfson-group/isambard | isambard/external_programs/reduce.py | Python | mit | 7,041 | 0.000142 | """This module provides an interface to the program Reduce.
Requires the reduce executable and reduce_wwPDB_het_dict.txt located
in a directory specified in global_settings. These can be downloaded
from: http://kinemage.biochem.duke.edu/software/reduce.php
For more information on Reduce, see [1].
References
----------
.. [1] Word, et al.(1999) Asparagine and glutamine: using hydrogen atom
contacts in the choice of sidechain amide orientation" J. Mol. Biol.
285, 1735-1747.
"""
import subprocess
import tempfile
from pathlib import Path
from settings import global_settings
def run_reduce(input_file, path=True):
""" Runs reduce on a pdb or mmol file at the specified path.
Notes
-----
Runs Reduce programme to add missing protons to a PDB file.
Parameters
----------
input_file : str
Path to file to add protons to or structure in mmol/pdb format.
path : bool, optional
True if input_file is a path.
Returns
-------
reduce_mmol : str
Structure file with protons added.
reduce_message : str
Messages generated while running Reduce.
Raises
------
FileNotFoundError
Raised if the executable cannot be found.
"""
if path:
input_path = Path(input_file)
if not input_path.exists():
print('No file found at', path)
return None, None
else:
pathf = tempfile.NamedTemporaryFile()
encoded_input = input_file.encode()
pathf.write(encoded_input)
pathf.seek(0)
file_path = pathf.name
input_path = Path(file_path)
reduce_folder = Path(global_settings['reduce']['folder'])
reduce_exe = reduce_folder / global_settings['reduce']['path']
reduce_dict = reduce_folder / 'reduce_wwPDB_het_dict.txt'
try:
reduce_output = subprocess.run(
[str(reduce_exe), '-build', '-DB',
str(reduce_dict), str(input_path)],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except FileNotFoundError as e:
raise FileNotFoundError(
'The Reduce executable cannot be found. Ensure the '
'location and filename are specified in settings.')
try:
reduced_mmol = reduce_output.stdout.decode()
except UnicodeDecodeError:
print("Reduce could not detect any missing protons in the protein. "
" | Using the original structure.")
if path:
reduced_mmol = input_path.read_text()
else:
reduced_mmol = input_file
reduce_message = reduce_output.stderr.decode()
if 'could not open' in reduce_message:
print('Caution: the Reduce connectivity dictionary could not be '
'found. Some protons may be missing. See notes.')
return reduced_mmol, reduce_message
|
def reduce_output_path(path=None, pdb_name=None):
"""Defines location of Reduce output files relative to input files."""
if not path:
if not pdb_name:
raise NameError(
"Cannot save an output for a temporary file without a PDB"
"code specified")
pdb_name = pdb_name.lower()
output_path = Path(global_settings['structural_database']['path'],
pdb_name[1:3].lower(), pdb_name[:4].lower(),
'reduce', pdb_name + '_reduced.mmol')
else:
input_path = Path(path)
if len(input_path.parents) > 1:
output_path = input_path.parents[1] / 'reduce' / \
(input_path.stem + '_reduced' + input_path.suffix)
else:
output_path = input_path.parent / \
(input_path.stem + '_reduced' + input_path.suffix)
return output_path
def output_reduce(input_file, path=True, pdb_name=None, force=False):
"""Runs Reduce on a pdb or mmol file and creates a new file with the output.
Parameters
----------
input_file : str or pathlib.Path
Path to file to run Reduce on.
path : bool
True if input_file is a path.
pdb_name : str
PDB ID of protein. Required if providing string not path.
force : bool
True if existing reduce outputs should be overwritten.
Returns
-------
output_path : pathlib.Path
Location of output file.
"""
if path:
output_path = reduce_output_path(path=input_file)
else:
output_path = reduce_output_path(pdb_name=pdb_name)
if output_path.exists() and not force:
return output_path
reduce_mmol, reduce_message = run_reduce(input_file, path=path)
if not reduce_mmol:
return None
output_path.parent.mkdir(exist_ok=True)
output_path.write_text(reduce_mmol)
return output_path
def output_reduce_list(path_list, force=False):
"""Generates structure file with protons from a list of structure files."""
output_paths = []
for path in path_list:
output_path = output_reduce(path, force=force)
if output_path:
output_paths.append(output_path)
return output_paths
def assembly_plus_protons(input_file, path=True, pdb_name=None,
save_output=False, force_save=False):
"""Returns an Assembly with protons added by Reduce.
Notes
-----
Looks for a pre-existing Reduce output in the standard location before
running Reduce. If the protein contains oligosaccharides or glycans,
use reduce_correct_carbohydrates.
Parameters
----------
input_file : str or pathlib.Path
Location of file to be converted to Assembly or PDB file as string.
path : bool
Whether we are looking at a file or a pdb string. Defaults to file.
pdb_name : str
PDB ID of protein. Required if providing string not path.
save_output : bool
If True will save the generated assembly.
force_save : bool
If True will overwrite existing reduced assembly.
Returns
-------
reduced_assembly : AMPAL Assembly
Assembly of protein with protons added by Reduce.
"""
from ampal.pdb_parser import convert_pdb_to_ampal
if path:
input_path = Path(input_file)
if not pdb_name:
pdb_name = input_path.stem[:4]
reduced_path = reduce_output_path(path=input_path)
if reduced_path.exists() and not save_output and not force_save:
reduced_assembly = convert_pdb_to_ampal(
str(reduced_path), pdb_id=pdb_name)
return reduced_assembly
if save_output:
reduced_path = output_reduce(
input_file, path=path, pdb_name=pdb_name, force=force_save)
reduced_assembly = convert_pdb_to_ampal(str(reduced_path), path=True)
else:
reduce_mmol, reduce_message = run_reduce(input_file, path=path)
if not reduce_mmol:
return None
reduced_assembly = convert_pdb_to_ampal(
reduce_mmol, path=False, pdb_id=pdb_name)
return reduced_assembly
__author__ = 'Kieran L. Hudson, Gail J. Bartlett'
|
sdiazpier/nest-simulator | testsuite/pytests/test_facetshw_stdp.py | Python | gpl-2.0 | 5,894 | 0 | # -*- coding: utf-8 -*-
#
# test_facetshw_stdp.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
import nest
import numpy as np
import unittest
class FacetsTestCase(unittest.TestCase):
"""
This script is testing the accumulation of spike pairs and
the weight update mechanism as implemented in the FACETS hardware.
Author: Thomas Pfeil
Date of first version: 21.01.2013
"""
def test_facetshw_stdp(self):
nest.ResetKernel()
modelName = 'stdp_facetshw_synapse_hom'
# homogeneous parameters for all synapses
Wmax = 100.0
# see *.cpp file of synapse model and Pfeil et al. 2012 for LUT
# configuration
lut_0 = [2, 3, 4, | 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 14, 15]
lut_1 = [0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 11, 12, 13]
lut | _2 = range(16) # identity
config_0 = [0, 0, 1, 0]
config_1 = [0, 1, 0, 0]
reset_pattern = 6 * [1] # reset all
# individual parameters for each synapse
# reached every 36 runs (e^(-10/20) = 21.83510375)
lut_th_causal = 21.835
lut_th_acausal = lut_th_causal
# other parameters
startWeight = 0 # as digital value [0, 1, ..., 15]
tau = 20.0
timeBetweenPairs = 100.0
# frequency_of_pairs = 10Hz => delta_t(+) = 10ms, delta_t(-) = 90ms
delay = 5.0
spikesIn = np.arange(10.0, 60000.0, timeBetweenPairs)
synapseDict = {'tau_plus': tau,
'tau_minus_stdp': tau,
'Wmax': Wmax,
'synapses_per_driver': 50,
'driver_readout_time': 15.0,
'lookuptable_0': lut_0,
'lookuptable_1': lut_1,
'lookuptable_2': lut_2,
'configbit_0': config_0,
'configbit_1': config_1,
'reset_pattern': reset_pattern,
'a_thresh_th': lut_th_causal,
'a_thresh_tl': lut_th_acausal}
# build network
stim = nest.Create('spike_generator')
neuronA = nest.Create('parrot_neuron')
neuronB = nest.Create('parrot_neuron')
nest.SetStatus(stim, [{'spike_times': spikesIn}])
nest.SetDefaults(modelName, synapseDict)
# check if GetDefaults returns same values as have been set
synapseDictGet = nest.GetDefaults(modelName)
for key in synapseDict.keys():
self.assertTrue(
all(np.atleast_1d(synapseDictGet[key] == synapseDict[key])))
nest.Connect(stim, neuronA)
nest.Connect(neuronA, neuronB, syn_spec={
'weight': float(startWeight) / 15.0 * Wmax,
'delay': delay, 'synapse_model': modelName})
nest.Simulate(50.0)
weightTrace = []
for run in range(len(spikesIn)):
nest.Simulate(timeBetweenPairs)
connections = nest.GetConnections(neuronA)
if (connections.get('synapse_model') == modelName):
weightTrace.append(
[run, connections.get('weight'),
connections.get('a_causal'),
connections.get('a_acausal')])
# analysis
weightTrace = np.array(weightTrace)
# just before theoretical updates
weightTraceMod36pre = weightTrace[35::36]
# just after theoretical updates
weightTraceMod36 = weightTrace[::36]
weightIndex = int(startWeight)
for i in range(len(weightTraceMod36pre)):
# check weight value before update
# (after spike pair with index 35, 71, ...)
self.assertTrue(np.allclose(weightTraceMod36pre[i][1],
1.0 / 15.0 * weightIndex * Wmax,
atol=1e-6))
weightIndex = lut_0[weightIndex]
weightIndex = int(startWeight)
for i in range(len(weightTraceMod36)):
# check weight value after update
# (after spike pair with index 0, 36, 72, ...)
self.assertTrue(np.allclose(weightTraceMod36[i][1],
1.0 / 15.0 * weightIndex * Wmax,
atol=1e-6))
# check charge on causal capacitor
self.assertTrue(np.allclose(weightTraceMod36[i][2],
np.ones_like(weightTraceMod36[i][2]) *
np.exp(-2 * delay / tau), atol=1e-6))
weightIndex = lut_0[weightIndex]
# check charge on anti-causal capacitor after each pair
for i in range(len(weightTrace) - 1):
# TODO: global params
self.assertTrue(np.allclose(weightTrace[i, 3], ((i % 36) + 1) *
np.exp(-(timeBetweenPairs -
2 * delay) / tau),
atol=1e-6))
def suite():
suite = unittest.makeSuite(FacetsTestCase, 'test')
return suite
def run():
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite())
if __name__ == "__main__":
run()
|
TomiBelan/fetchrev | syncgit.py | Python | apache-2.0 | 5,005 | 0.001399 | #!/usr/bin/env python2
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import re
try:
import cPickle as pickle
except:
import pickle
import subprocess as SP
from collections import deque
import fetchrev
log = fetchrev.log
def list_files(base):
yield base
if os.path.isdir(base) and not os.path.islink(base):
for item in os.listdir(base):
for entry in list_files(base + '/' + item):
yield entry
def discover_repos():
return [entry for entry in list_files('.')
if os.path.isdir(entry) and entry.endswith('.git')]
def list_reachable_revs():
result = set()
hash_re = re.compile(r'^[a-fA-F0-9]{40}$')
def read_file(filename):
with open(filename) as f:
return f.read()
def process(content, hash_columns=1):
for line in content.split('\n'):
for word in line.split(' ')[0:hash_columns]:
if hash_re.match(word):
result.add(word.lower())
if word[0:1] == '^' and hash_re.match(word[1:]):
# packed-refs peeled tag
result.add(word[1:].lower())
# process refs/
for entry in list_files('refs'):
if os.path.isfile(entry):
process(read_file(entry))
# process logs/
for entry in list_files('logs'):
if os.path.isfile(entry):
process(read_file(entry), hash_columns=2)
# process packed-refs and all refs directly under git dir (*_HEAD etc.)
for entry in os.listdir('.'):
if os.path.isfile(entry):
process(read_file(entry))
# other special-purpose state, such as in-progress rebase or am, isn't
# processed -- it'd be a mess to do correctly and it's not really needed.
return result - set(['0'*40])
def filter_existing_revs(revs):
batch_checker = SP.Popen(['git', 'cat-file', '--batch-check'],
stdin=SP.PIPE, stdout=SP.PIPE)
existing_revs = []
| for hash in revs:
batch_checker.std | in.write(hash + '^{}\n')
result = batch_checker.stdout.readline()
if not result.endswith('missing\n'):
existing_revs.append(hash)
batch_checker.stdin.close()
batch_checker.wait()
return existing_revs
def local(input, output, args):
local_root, remote_root = args
pickle.dump(remote_root, output)
os.chdir(local_root)
local_root = os.getcwd()
local_repos = set(discover_repos())
remote_repos = set(pickle.load(input))
for item in (local_repos - remote_repos):
sys.stderr.write('WARNING: {} is only on local side\n'.format(item))
for item in (remote_repos - local_repos):
sys.stderr.write('WARNING: {} is only on remote side\n'.format(item))
for repo in (local_repos & remote_repos):
sys.stderr.write('------- local->remote {} --------\n'.format(repo))
pickle.dump(repo, output)
os.chdir(repo)
revs = filter_existing_revs(list_reachable_revs())
fetchrev.sender(input, output, revs, is_local=True)
input.read(1)
sys.stderr.write('------- remote->local {} --------\n'.format(repo))
fetchrev.receiver(input, output)
os.chdir(local_root)
pickle.dump(None, output)
def remote(input=None, output=None):
if not input: input = os.fdopen(0, 'r', 0)
if not output: output = os.fdopen(1, 'w', 0)
remote_root = pickle.load(input)
os.chdir(remote_root)
remote_root = os.getcwd()
pickle.dump(discover_repos(), output)
while True:
repo = pickle.load(input)
if not repo:
break
os.chdir(remote_root)
os.chdir(repo)
revs = filter_existing_revs(list_reachable_revs())
fetchrev.receiver(input, output)
output.write('F')
fetchrev.sender(input, output, revs, is_local=False)
def connect(ssh_cmd, args):
sys.path.insert(1, sys.path[0]+'/py-remoteexec')
from remoteexec import remote_exec
modules = [sys.path[0]+'/fetchrev.py', sys.path[0]+'/syncgit.py']
p, s = remote_exec(ssh_cmd=ssh_cmd, module_filenames=modules,
main_func='syncgit.remote')
local(s.makefile('r', 0), s.makefile('w', 0), args)
p.wait()
def main():
argv = sys.argv[1:]
ssh_cmd = argv[0:argv.index('--')]
program_args = argv[argv.index('--')+1:]
connect(ssh_cmd, program_args)
if __name__ == '__main__':
main()
|
tbielawa/sphinxcontrib-showterm | docsite/source/conf.py | Python | apache-2.0 | 8,592 | 0.005703 | # -*- coding: utf-8 -*-
#
# Juicer documentation build configuration file, created by
# sphinx-quickstart on Thu May 21 00:27:23 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# Read the docs theme
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# This first `insert` adds the plugin source directory to the load
# path. From here we can load the showterm plugin
sys.path.insert(0, os.path.abspath('../../sphinxcontrib/'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'showterm',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'showterm'
copyright = u'2015, Tim Bielawa'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.0.1'
# The full version, including alpha/beta/rc tags.
release = ''
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :f | unc: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by d | efault.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%Y-%m-%d - %H:%M:%S %Z'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'showtermdocs'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'showterm.tex', u'showterm documentation',
u'Tim Bielawa', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'showterm', u'showterm documentation',
[u'Tim Bielawa'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'showterm', u'showterm documentation',
u'Tim Bielawa', 'showterm', |
ganwell/dht3k | setup.py | Python | mit | 1,441 | 0 | #!/usr/bin/env python
import os
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
sys.exit()
readme = open('README.rst').read()
doclink = """
Documentation
-------------
The full documentation is at http://dht3k.rtfd.org."""
history = open('HISTORY.rst').read().replace('.. :changelog:', '')
setup(
name='dht3k',
version='0.1.0',
description=readme + '\n\n' + doclink + '\n\n' + history,
author='Jean-Louis Fuchs',
author_email='ganwell@fangorn.ch',
url='https://github.com/ganwell/dht3k',
packages=[
'dht3k',
'lazymq',
],
package_dir={
'dht3k': 'dht3k',
'lazymq': 'lazymq',
},
include_package_data=True,
install_requires=[
"msgpack-python",
"mock",
"pytest",
],
license='MIT',
zip_safe=False,
keywords='d | ht3k',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
| 'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: Implementation :: PyPy',
],
)
|
scrollback/kuma | kuma/users/providers/github/views.py | Python | mpl-2.0 | 1,715 | 0 | import requests
from allauth.account.utils import get_next_redirect_url
from allauth.socialaccount.providers.oauth2.views import (OAuth2LoginView,
OAuth2CallbackView)
from allauth.socialaccount.providers.github.views import GitHubOAuth2Adapter
from kuma.core.urlresolvers import reverse
class KumaGitHubOAuth2Adapter(GitHubOAuth2Adapter):
"""
A custom GitHub OAuth adapter to be used for fetching the list
of private email addresses stored for the given user at GitHub.
We store those email addresses in the extra data of each account.
"""
email_url = 'https://api.github.com/user/emails'
def complete_login(self, request, app, token, **kwargs):
params = {'access_token': token.token}
profile_data = requests.get(self.profile_url, params=params)
extra_data = profile_data.json()
email_data = requests.get(self.email_url, params=params)
extra_data['email_ad | dresses'] = email_data.json()
return self.get_provider().sociallogin_from_response(request,
extra_data)
class KumaOAuth2LoginView(OAuth2LoginView):
def dispatch(self, request):
next_url = (get_next_redirect_url(request) or
reverse('users.my_profile_edit',
locale=request.locale))
request.sessi | on['sociallogin_next_url'] = next_url
request.session.modified = True
return super(KumaOAuth2LoginView, self).dispatch(request)
oauth2_login = KumaOAuth2LoginView.adapter_view(KumaGitHubOAuth2Adapter)
oauth2_callback = OAuth2CallbackView.adapter_view(KumaGitHubOAuth2Adapter)
|
rutsky/pytest-pylint | setup.py | Python | mit | 906 | 0 | # -*- coding: utf-8 -*-
"""
pytest-pylint
=============
Plugin for py.test for doing pylint tests
"""
from setuptools import setup
setup(
name='pytest-pylint',
des | cription='pytest plugin to check source code with pylint',
long_description=open("README.rst").read(),
license="MIT",
version='0.3.0',
author='Carson Gee',
author_email='x@carsongee.com',
url='https://github.com/carsongee/pytest-pylint',
py_modules=['pytest_pylint'],
e | ntry_points={'pytest11': ['pylint = pytest_pylint']},
install_requires=['pytest>=2.4', 'pylint', 'six'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
)
|
mef51/plawt | examples/subplotstest.py | Python | mit | 1,910 | 0.014136 | #!/usr/bin/python3
import numpy as np
import matplotlib.pyplot as plt
import plawt
import matplotlib as mpl
x = np.linspace(0, 2 * np.pi, 400)
y = np.sin(x ** 2)
##### Vanilla Matplotlib #####
# Three subplots sharing both x/y axes
# f, (ax1, ax2, ax3) = plt.subplots(3, sharex=True, sharey=True)
f, (ax1, ax2, ax3) = plt.subplots(3, sharex=True, sharey=True)
f.suptitle('Sharing both axes')
# hack to have commone x and y labels
# https://stackoverflow.com/questions/6963035/pyplot-axes-labels-for-subplots
f.add_subplot(111, frameon=False)
plt.tick_params(labelcolor='none', top='off', bottom='off', left='off', right='off')
plt.xlabel('Velocity')
plt.ylabel('Amplitude')
ax1.plot(x, y)
# ax1.set_title('Sharing both axes')
ax2.scatter(x, y)
ax3.sca | tter(x, 2 * y ** 2 - 1, color='r')
ax1.set_title('panel a', fontsize=12)
ax2.set_title('panel b', fontsize=12)
ax3.set_title('pan | el c', fontsize=12)
ax1.minorticks_on()
# Fine-tune figure; make subplots close to each other and hide x ticks for
# all but bottom plot.
f.subplots_adjust(hspace=0.3, wspace=0)
plt.savefig('subplotcompare.png')
plt.close()
##### Same plot but with plawt #####
subtitledict = {'verticalalignment': 'center'}
plawt.plot({
0: {'x': x, 'y': y},
'title': 'Sharing both axes',
'subtitle': 'panel a',
'subtitledict': {'verticalalignment': 'center'},
'fontsize': 12,
'subloc': 'left',
'minorticks': True,
'xlabel': 'Velocity', 'ylabel': 'Amplitude',
'sharex': True, 'sharey': True,
'hspace': 0.3,
# 'aspect': 16/9,
'filename': 'subplottest.png'
}, {
0: {'x': x, 'y': y, 'line': 'bo'},
'subtitle': 'panel b',
'subtitledict': {'verticalalignment': 'center'},
'fontsize': 12,
'subloc': 'left',
}, {
0: {'x': x, 'y': 2*y**2-1, 'line': 'ro', 'label': 'panel c'},
'subtitle': 'panel c',
'subtitledict': {'verticalalignment': 'center'},
'fontsize': 12,
'subloc': 'left',
'legend': {'loc': 2, 'fontsize': 10},
})
|
GrandComicsDatabase/django-templatesadmin | templatesadmin/edithooks/gitcommit.py | Python | bsd-3-clause | 2,280 | 0.005263 | from django import forms
from django.utils.translation import ugettext_lazy as _
from templatesadmin import TemplatesAdminException
from templatesadmin.edithooks import TemplatesAdminHook
import subprocess
import os, sys
class GitCommitHook(TemplatesAdminHook):
'''
Commit to git after saving
'''
@classmethod
def post_save(cls, request, form, template_path):
dir, file = os.path.dirname(template_path) + "/", os.path.basename(template_path)
if request.user.first_name and request.user.last_name:
author = "%s %s" % (request.user.first_name, request.user.last_name)
else:
author = request.user.username
message = form.cleaned_data['commitmessage'] or '--'
enc = 'utf-8'
command = (
'git commit -F - '
'--author "%(author)s <%(email)s>" '
'-- %(file)s '
) % {
'file': template_path,
'author': author,
'email': request.user.email,
}
# Stolen from gitpython's git/cmd.py
proc = subprocess.Popen(
args=command.encode(enc),
shell=True,
cwd=dir,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
try:
proc.stdin.write(message.encode(enc))
proc.stdin.close()
stderr_value = proc.stderr.read()
stdout_value = proc.stdout.read()
status = proc.wait()
| finally:
proc.stderr.close()
msg = stderr_value.decode(enc).rstrip()
if status ! | = 0:
if status == 1 and msg == '':
if 'nothing to commit' in stdout_value:
msg = 'nothing to commit'
raise TemplatesAdminException(_("Error while executing %(command)s: %(msg)s") % dict(
command=command,
msg=msg))
return msg
@classmethod
def contribute_to_form(cls, template_path):
return dict(commitmessage=forms.CharField(
widget=forms.Textarea(attrs={'rows':'5', 'cols': '40'}),
label = _('Change message:'),
required = True,
))
|
birkelbach/python-canfix | canfix/messages/__init__.py | Python | gpl-2.0 | 1,516 | 0 | #!/usr/bin/env python
# CAN-FIX Protocol Module - An Open Source Module that abstracts communication
# with the CAN-FIX Aviation Protocol
# Copyright (c) 2012 Phil Birkelbach
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along w | ith this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
from .nodealarm import NodeAlarm
from .parameter import Parameter
from .twoway import TwoWayMsg
from .nodespecific import *
from .nodeidentification import NodeIdentification
from .bitrateset import BitRateSet
from .nodeidset import NodeIDSet
from .edparameter impor | t DisableParameter, EnableParameter
from .nodereport import NodeReport
from .nodestatus import NodeStatus
from .updatefirmware import UpdateFirmware
from .twowayconnection import TwoWayConnection
from .nodeconfiguration import NodeConfigurationSet, NodeConfigurationQuery
from .nodedescription import NodeDescription
from .parameterset import ParameterSet
|
aleaf/pest_tools | res.py | Python | mit | 16,441 | 0.010644 | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import math
class Res:
def __init__(self, res_file):
''' Res Class
Parameters
----------
res_file : str
Path to .res or .rei file from PEST
Attributes
----------
df : Pandas Data Frame
groups : array
Array of observation groups
'''
check = open(res_file, 'r')
line_num = 0
while True:
current_line = check.readline()
if "Name" in current_line and "Residual" in current_line:
break
else:
line_num += 1
self.df = pd.read_csv(res_file, sep = '\s*', index_col = 0, header = line_num)
# Apply weighted residual
self.df['Weighted Residual'] = self.df['Residual'] * self.df['Weight']
self.df['Absolute Residual'] = abs(self.df['Residual'])
self.df['Weighted Absolute Residual'] = self.df['Absolute Residual'] * self.df['Weight']
self.groups = self.df.groupby('Group').groups.keys()
def group(self, group):
''' Get pandas DataFrame for a single group
Parameters
----------
group : str
Observation group to get
Returns
--------
pandas DataFrame
DataFrame of residuals for group
'''
return self.df.ix[self.df['Group'] == group]
def stats(self, group):
''' Return stats for single group
Parameters
----------
group: str
Observation group to get stats for
Returns
--------
pandas DataFrame
DataFrame of statistics
'''
group_df = self.df.ix[self.df['Group'] == group.lower()]
# Basic info
count = group_df.count()['Group']
min_measured = group_df.describe()['Measured'].loc['min']
max_measured = group_df.describe()['Measured'].loc['max']
range_measured = max_measured - min_measured
min_model = group_df.describe()['Modelled'].loc['min']
max_model = group_df.describe()['Modelled'].loc['max']
range_model = max_model - min_model
# Residual Stats
mean_res = group_df['Residual'].values.mean()
min_res = group_df['Residual'].values.min()
max_res = group_df['Residual'].values.max()
std_res = group_df['Residual'].values.std()
range_res = max_res - min_res
# Weighted Residual Stats
mean_w_res = group_df['Weighted Residual'].values.mean()
min_w_res = group_df['Weighted Residual'].values.min()
max_w_res = group_df['Weighted Residual'].values.max()
std_w_res = group_df['Weighted Residual'].values.std()
range_w_res = max_w_res - min_w_res
# Absolute Residual Stats
mean_abs_res = group_df['Absolute Residual'].values.mean()
min_abs_res = group_df['Absolute Residual'].values.min()
max_abs_res = group_df['Absolute Residual'].values.max()
std_abs_res = group_df['Absolute Residual'].values.std()
range_abs_res = max_abs_res - min_abs_res
# Root Mean Square Error
rmse = math.sqrt(((group_df['Residual'].values)**2).mean())
# RMSE/measured range
rmse_over_range = rmse/float(range_measured)
print '-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*'
print 'Observation Group: %s' % (group)
print '-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*'
print 'Number of observations in group: %d' % (count)
print '-------Measured Stats------------------'
print 'Minimum: %10.4e Maximum: %10.4e' % (min_measured, max_measured)
print 'Range: %10.4e' % (range_measured)
print '-------Residual Stats------------------'
print 'Mean: %10.4e Std Dev: %10.4e' % (mean_res, std_res)
print 'Minimum: %10.4e Maximum: %10.4e' % (min_res, max_res)
print 'RMSE: %10.4e RMSE/Range: %10.4e' % (rmse, rmse_over_range)
print 'Range: %10.4e' % (range_res)
print '-------Absolute Residual Stats---------'
print 'Mean: %10.4e Std Dev: %10.4e' % (mean_abs_res, std_abs_res)
print 'Minimum: %10.4e Maximum: %10.4e' % (min_abs_res, max_abs_res)
print 'Range: %10.4e' % (range_abs_res)
print '-------Weighted Residual Stats---------'
print 'Mean: %10.4e Std Dev: %10.4e' % (mean_w_res, std_w_res)
print 'Minimum: %10.4e Maximum: %10.4e' % (min_w_res, max_w_res)
print 'Range: %10.4e' % (range_w_res)
print ' '
def stats_all(self):
''' Return stats for each observation group
Returns
--------
Stats for each group printed to screen
'''
grouped = self.df.groupby('Group')
group_keys = grouped.groups.keys()
for key in group_keys:
group_df = self.df.ix[self.df['Group'] == key]
# Basic info
count = group_df.count()['Group']
min_measured = group_df.describe()['Measured'].loc['min']
max_measured = group_df.describe()['Measured'].loc['max']
range_measured = max_measured - min_measured
min_model = group_df.describe()['Modelled'].loc['min']
max_model = group_df.describe()['Modelled'].loc['max']
range_model = max_model - min_model
# Residual Stats
mean_res = group_df['Residual'].values.mean()
min_res = group_df['Residual'].values.min()
max_res = group_df['Residual'].values.max()
std_res = group_df['Residual'].values.std()
range_res = max_res - min_res
# Weighted Residual Stats
mean_w_res = group_df['Weighted Residual'].values.mean()
min_w_res = group_df['Weighted Residual'].values.min()
max_w_res = group_df['We | ighted Residual'].values.max()
std_w_res = group_df['Weighted Residual'].values.std()
range_w_res = max_w_res - min_w_res
# Absolute Residual Stats
mean_abs_res = group_df['Absolute Residual'].values.mean()
min_abs_res = group_df['Absolute Residual'].values.min()
max_abs_res = group_df['Absolute Residual'].values.max()
std_abs_res = group | _df['Absolute Residual'].values.std()
range_abs_res = max_abs_res - min_abs_res
# Root Mean Square Error
rmse = math.sqrt(((group_df['Residual'].values)**2).mean())
# RMSE/measured range
if range_measured > 0.0:
rmse_over_range = rmse/float(range_measured)
else:
rmse_over_range = np.nan
print '-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*'
print 'Observation Group: %s' % (key)
print '-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*'
print 'Number of observations in group: %d' % (count)
print '-------Measured Stats------------------'
print 'Minimum: %10.4e Maximum: %10.4e' % (min_measured, max_measured)
print 'Range: %10.4e' % (range_measured)
print '-------Residual Stats------------------'
print 'Mean: %10.4e Std Dev: %10.4e' % (mean_res, std_res)
print 'Minimum: %10.4e Maximum: %10.4e' % (min_res, max_res)
print 'RMSE: %10.4e RMSE/Range: %10.4e' % (rmse, rmse_over_range)
print 'Range: %10.4e' % (range_res)
print '-------Absolute Residual Stats---------'
print 'Mean: %10.4e Std Dev: %10.4e' % (mean_abs_res, std_abs_res)
print 'Minimum: %10.4e Maximum: %10.4e' % (min_abs_res, max_abs_res)
print 'Range: %1 |
jaumemarti/l10n-spain-txerpa | l10n_es_aeat_mod347/__openerp__.py | Python | agpl-3.0 | 5,103 | 0 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C)
# 2004-2011: Pexego Sistemas Informáticos. (http://pexego.es)
# 2012: NaN·Tic (http://www.nan-tic.com)
# 2013: Acysos (http://www.acysos.com)
# Joaquín Pedrosa Gutierrez (http://gutierrezweb.es)
# 2014: Serv. Tecnol. Avanzados - Pedro M. Baeza
# (http://www.serviciosbaeza.com)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
####################################### | #######################################
{
'name': "AEAT Model 347",
'version': "1.1",
'author': "Spanish Localization Team",
'website': "https://launchpad.net/openerp-spain",
'contributors': [
'Pexego (http://www.pexego.es)',
'ASR-OSS (http://www.asr-oss.com)',
'NaN·tic (http://www.nan-tic.com)',
'Acysos (http://www.acysos.com)',
'Pedro M. Baeza <pedro.baeza@serviciosbaeza.com>',
| 'Joaquín Gutierrez (http://gutierrezweb.es)',
],
'category': "Localisation/Accounting",
'description': """
Presentación del Modelo AEAT 347
================================
(Declaración Anual de Operaciones con Terceros)
Basado en la Orden EHA/3012/2008, de 20 de Octubre, por el que se aprueban los
diseños físicos y lógicos del 347.
De acuerdo con la normativa de la Hacienda Española, están obligados a
presentar el modelo 347:
-------------------------------------------------------------------------------
* Todas aquellas personas físicas o jurídicas que no esten acogidas al regimen
de módulos en el IRPF, de naturaleza pública o privada que desarrollen
actividades empresariales o profesionales, siempre y cuando hayan realizado
operaciones que, en su conjunto, respecto de otra persona o Entidad,
cualquiera que sea su naturaleza o carácter, hayan superado la cifra de
3.005,06 € durante el año natural al que se refiere la declaración. Para el
cálculo de la cifra de 3.005,06 € se computan de forma separada las entregas
de biene y servicios y las adquisiciones de los mismos.
* En el caso de Sociedades Irregulares, Sociedades Civiles y Comunidad de
Bienes no acogidas el regimen de módulos en el IRPF, deben incluir las
facturas sin incluir la cuantía del IRPF.
* En el caso de facturas de proveedor con IRPF, no deben ser presentadas en
este modelo. Se presentan en el modelo 190. Desactivar en la ficha del
proveedor la opción de "Incluir en el informe 347".
De acuerdo con la normativa no están obligados a presentar el modelo 347:
-------------------------------------------------------------------------
* Quienes realicen en España actividades empresariales o profesionales sin
tener en territorio español la sede de su actividad, un establecimiento
permanente o su domicilio fiscal.
* Las personas físicas y entidades en régimen de atribución de rentas en
el IRPF, por las actividades que tributen en dicho impuesto por el
régimen de estimación objetiva y, simultáneamente, en el IVA por los
régimenes especiales simplificados o de la agricultura, ganadería
y pesca o recargo de equivalencia, salvo las operaciones que estén
excluidas de la aplicación de los expresados regímenes.
* Los obligados tributarios que no hayan realizado operaciones que en su
conjunto superen la cifra de 3.005,06 €.
* Los obligados tributarios que hayan realizado exclusivamente operaciones
no declarables.
* Los obligados tributarios que deban informar sobre las operaciones
incluidas en los libros registro de IVA (modelo 340) salvo que realicen
operaciones que expresamente deban incluirse en el modelo 347.
(http://www.boe.es/boe/dias/2008/10/23/pdfs/A42154-42190.pdf)
**AVISO:** Este módulo requiere el módulo *account_invoice_currency*,
disponible en:
https://github.com/OCA/account-financial-tools
""",
'license': "AGPL-3",
'depends': [
"base_vat",
"l10n_es_aeat",
"account_invoice_currency",
],
'data': [
"account_period_view.xml",
"res_partner_view.xml",
"wizard/export_mod347_to_boe.xml",
"report/mod347_report.xml",
"security/ir.model.access.csv",
"security/mod_347_security.xml",
"mod347_view.xml",
],
'installable': True,
'active': False,
'images': [
'images/l10n_es_aeat_mod347.png',
],
}
|
tswast/google-cloud-python | texttospeech/google/cloud/texttospeech_v1beta1/__init__.py | Python | apache-2.0 | 1,400 | 0 | # -*- coding: utf-8 -*-
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# | ERROR: type should be string, got " https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing per" | missions and
# limitations under the License.
from __future__ import absolute_import
import sys
import warnings
from google.cloud.texttospeech_v1beta1 import types
from google.cloud.texttospeech_v1beta1.gapic import enums
from google.cloud.texttospeech_v1beta1.gapic import text_to_speech_client
if sys.version_info[:2] == (2, 7):
message = (
"A future version of this library will drop support for Python 2.7."
"More details about Python 2 support for Google Cloud Client Libraries"
"can be found at https://cloud.google.com/python/docs/python2-sunset/"
)
warnings.warn(message, DeprecationWarning)
class TextToSpeechClient(text_to_speech_client.TextToSpeechClient):
__doc__ = text_to_speech_client.TextToSpeechClient.__doc__
enums = enums
__all__ = ("enums", "types", "TextToSpeechClient")
|
mickael-grima/optimizedGPS | optimizedGPS/problems/utils/utils.py | Python | apache-2.0 | 895 | 0 | # -*- coding: utf-8 -*-
# !/bin/env python
import os
class SafeOpen(object):
"""
A safe opening: if the direc | tory doesn't exist, create it, and then open
"""
def __init__(self, filename, mode):
self.filename = filename
self.mode = mode
def makeDir(self, directory):
""" Check if the file directory exists, otherwise create the directory
"""
if not os.path.exi | sts(directory):
os.makedirs(directory)
def __enter__(self):
self.makeDir('/'.join(self.filename.split('/')[:-1]))
self.file = open(self.filename, self.mode)
return self.file
def __exit__(self, exc_type, exc_val, exc_tb):
self.file.close()
def around(number):
"""
Truncate a float to the third decimal
"""
if number is not None:
return int(number * 1000) / 1000.
else:
return None
|
Impactstory/depsy | scripts/github_users.py | Python | mit | 2,346 | 0.00341 | __author__ = 'jay'
from pathlib import Path
import os
import json
import requests
data_dir = Path(__file__, "../../data").resolve()
usernames_path = Path(data_dir, "github_usernames.json")
users_path = Path(data_dir, | "github_users.json")
users_url_template = "https://api.github.com/users/%s"
class RateLimitException(Exception):
pass
def get_github_creds():
creds_str = os.environ["GITHUB_TOKENS"]
cred_pairs =[]
for pair_string in creds_str.split(","):
cred_pairs.append(pair_string.split(":"))
return cred_pairs
def get_profile_data(username, user, password):
user | s_url = users_url_template % username
r = requests.get(users_url, auth=(user, password))
print "got {status_code} response from {url}. X-RateLimit-Remaining: {rate_limit}".format(
status_code=r.status_code,
url=users_url,
rate_limit=r.headers["X-RateLimit-Remaining"]
)
if r.status_code == 200:
return r.json()
elif r.status_code == 404:
return {"login": username, "404": True}
elif r.status_code == 403:
raise RateLimitException
def fetch_main():
"""
Get the data for each GitHub user and save in a json file
Handles rate-limiting by simply dying when the limit is reached,
so you have to restart it every hour.
"""
creds = get_github_creds()[0] # just use one person's creds for now
with open(str(users_path), "r") as f:
users = json.load(f)
for username, user_data in users.iteritems():
if user_data is None:
try:
users[username] = get_profile_data(username, creds[0], creds[1])
except RateLimitException:
break
print "saving user data..."
with open(str(users_path), "w") as f:
json.dump(users, f, indent=3, sort_keys=True)
def save_users_file():
"""
we've got a list of usernames, make a dict of username=>None and save.
"""
users_dict = {}
with open(str(usernames_path), "r") as f:
usernames = json.load(f)
for username in usernames:
users_dict[username] = None
with open(str(users_path), "w") as f:
json.dump(users_dict, f, indent=3, sort_keys=True)
if __name__ == '__main__':
# just run this once to make the correct file
#save_users_file()
fetch_main()
|
gotlium/django-feedme | feedme/urls.py | Python | mit | 686 | 0 | from django.conf.urls import patterns, url
from .views import FeedList, ImportView, AddView
from .ajax import mark_as_read
urlpatterns = patterns(
'',
url(r'^$', FeedList.as_view(), name="feedme-feed-list"),
url(r'^by_category/(?P<cat | egory>[-\w]+)/$', FeedList.as_view(),
name='feedme-feed-list-by-category'),
url(r'^by_feed/(?P<feed_id>[-\w]+)/$', FeedList.as_view(),
name='feedme-feed-list-by-feed'),
url(r'^import/$', | ImportView.as_view(),
name='feedme-import-google-takeout'),
url(r'^ajax/mark_as_read/$', mark_as_read,
name='feedme-mark-as-read-ajax'),
url(r'^ajax/add/$', AddView.as_view(), name='feedme-add-ajax'),
)
|
gion86/awlsim | awlsim/core/instructions/insn_assert_lt.py | Python | gpl-2.0 | 1,478 | 0.012855 | # -*- coding: utf-8 -*-
#
# AWL simulator - instructions
#
# Copyright 2012-2014 Michael Buesch <m@bues.ch>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
from __future__ import division, absolute_import, print_function, un | icode_literals
from awlsim.common.compat import *
from awlsim.core.instructions.main import * #@nocy
from awlsim.core.operators import *
#from awlsim.core.instructions.main cimport * #@cy
class AwlInsn_ASSERT_LT(AwlInsn): #+cdef
__slots__ = ()
def __init__(self, cpu, rawInsn):
AwlInsn | .__init__(self, cpu, AwlInsn.TYPE_ASSERT_LT, rawInsn)
self.assertOpCount(2)
def run(self):
#@cy cdef S7StatusWord s
s = self.cpu.statusWord
val0 = self.cpu.fetch(self.ops[0])
val1 = self.cpu.fetch(self.ops[1])
if not (val0 < val1):
raise AwlSimError("Assertion failed")
s.NER = 0
|
jryu/recurringtasks | checklist/urls.py | Python | gpl-2.0 | 1,246 | 0 | from django.conf.urls import url
from django.contrib.auth.decorators import login_required
from django.views.generic import TemplateView
from django.views.i18n import javascript_catalog
from checklist import views
urlpatterns = [
url(r'^$', views.Main.as_view(), name='main'),
url(r'^check/$', views.CheckCreate.as_view(), name='check'),
url(r'^uncheck/$', views.CheckDelete.as_view(), name='uncheck'),
url(r'^tasks/$', views.TaskList.as_view(), name='task_list'),
url(r'^tasks/create/$', views.TaskCreate.as_view(), name='task_create'),
url(r'^tasks/update/(?P<pk>\d+)$',
views.TaskUpdate | .as_view(), name='task_update'),
url(r'^tasks/delete/(?P<pk>\d+)$',
views.TaskDelete.as_view(), name= | 'task_delete'),
url(r'^archives/(?P<year>[0-9]{4})-(?P<month>[0-9]+)-(?P<day>[0-9]+)/$',
views.Archives.as_view(month_format='%m'), name='archives'),
url(r'^trends/$', login_required(
TemplateView.as_view(template_name="checklist/trends.html")),
name='trends'),
url(r'^trends/ajax/$', views.TrendsAjax.as_view(), name='trends_ajax'),
url(r'^csv/$', views.DownloadCsv.as_view(), name='csv'),
url(r'^jsi18n/$', javascript_catalog, {'packages': ('checklist')}),
]
|
ioam/lancet | doc/conf.py | Python | bsd-3-clause | 2,993 | 0.003675 | # -*- coding: utf-8 -*-
import sys, os
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
from builder.shared_conf import * # pyflakes:ignore (API import)
paths = ['../param/', '.', '..']
add_paths(paths)
# General information about the project.
project = u'Lancet'
copyright = u'2014, IOAM'
ioam_project = 'param'
from lancet import __version__
# The version info for the project being documented, defining |version|
# and |release| and used in various other places throughout the built
# documents. Assumes __version__ is a param.version.Version object.
#
# The short X.Y.Z version.
version = __version__.abbrev()
# The full version, including alpha/beta/rc/dev tags.
release = __version__.abbrev(dev_suffix="-dev")
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patter | ns = ['_build', 'test_data', 'reference_data', 'nbpublisher',
'builder']
# Add any paths that contain custom static files (such as style sheets) here | ,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static', 'builder/_shared_static']
# Output file base name for HTML help builder.
htmlhelp_basename = 'Lancetdoc'
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = '_static/mini_logo.png'
# -- Options for LaTeX output --------------------------------------------------
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Lancet.tex', u'Lancet Documentation',
u'IOAM', 'manual'),
]
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'lancet', u'Lancet Documentation',
[u'IOAM'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Lancet', u'Lancet Documentation',
u'IOAM', 'Lancet', 'One line description of project.',
'Miscellaneous'),
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None,
'http://ioam.github.io/param/': None,
'http://ioam.github.io/holoviews/': None,
'http://ipython.org/ipython-doc/2/' : None}
from builder.paramdoc import param_formatter
def setup(app):
app.connect('autodoc-process-docstring', param_formatter)
|
psb-seclab/malware_classifier | ostsa/classification/Classifier.py | Python | mit | 16,877 | 0.006577 | from ..storage import create_dirs
from sklearn.metrics import accuracy_score, confusion_matrix, log_loss
from sklearn.model_selection import cross_val_score, train_test_split
from sys import version_info
import matplotlib.pyplot as plot
import numpy as np
import pickle
###############################################################################
def load_classifier(filename, classifier_type=None):
"""Load in the classifier from the specified file.
classifier_type should be a class inheriting from Classifier. If specified,
the classifier will be loaded as that type instead of the type specified
in the classifier's file.
"""
# Load data from the file.
with open(filename, 'rb') as file:
# NOTE: There are compatibility issues with Python 2 and 3 whe | n it
# comes to numpy arrays. If the classifier was saved in Python 2
# and Python 3 is used | to load it, an encoding will need to be
# specified in order to prevent issues.
data = pickle.load(file, encoding='latin1') if version_info >= (3, 0) \
else pickle.load(file)
# If the file was correctly formatted, the data should contain the model,
# the training data, and the testing data.
cls = data[0]
model = data[1]
# Recreate the classifier with the items read in from the file.
classifier = Classifier(model)
# Modify the type of the classifier if necessary.
if (classifier_type):
classifier.__class__ = classifier_type
else:
classifier.__class__ = cls
# Load the training and testing data if it is present.
if (len(data) > 2):
classifier._training_features = data[2]
classifier._training_labels = data[3]
classifier._testing_features = data[4]
classifier._testing_labels = data[5]
return classifier
###############################################################################
class Classifier(object):
"""Classifier is the base class for all types of classifiers. It defines
the base logic that all classes should take advantage of. In particular,
the base constructor can be used to handle verifying and splitting a passed
in data set.
A Classifier takes in a set of labeled data and then allows classifying
unlabeled data based on what it learned from the passed in labeled data.
All concrete subclasses should implement the model property to return the
model that should be used for the classification. The Classifier class
can handle the rest of the logic.
"""
file_extension = '.clsf'
def __init__(self, model, features=None, labels=None, training_ratio=0.7):
"""Initialize a new Model that fits to the specified data.
features is a feature matrix which should be represented by a 2D-array
or 2D-array-like object. Each row in the feature matrix represents a
sample.
model is the machine learning model that should be used by the
classifier. If data is specified, the model will be trained with the
data. An already trained model can be passed in by specifying no data.
labels is the list of labels that correspond to each sample in the
feature matrix. Each element in the list corresponds to a row in the
feature matrix. That means that len(features) must equal len(labels).
training_ratio is a float between 0 and 1 that specifies what portion
of the data will be used for training. The remaining portion of the
data will then be used for testing. The default values is 0.7.
"""
# Set the model.
self._model = model
# Check if any data was passed in.
if (features is None):
return
# Verify that the number of labels and features is consistent.
if (len(features) != len(labels)):
raise ValueError('The numbers of labels and samples are not ' + \
'consistent')
# Split the dataset into training and testing data.
if (training_ratio == 1.0):
self._training_features = features
self._training_labels = labels
self._testing_features = []
self._testing_labels = []
else:
self._training_features, self._testing_features, \
self._training_labels, self._testing_labels = \
train_test_split(features, labels, train_size=training_ratio)
# Train the model with the training set.
self.model.fit(self.training_features, self.training_labels)
@property
def model(self):
"""Get the trained model used by the classifier."""
return self._model
@property
def testing_features(self):
"""Get the feature matrix used for testing the model. This will be a
2D-array with the same length as the testing_labels.
"""
return self._testing_features
@property
def testing_labels(self):
"""Get the list of labels used for testing the model. This will be a
list with the same length as the training_features.
"""
return self._testing_labels
@property
def training_features(self):
"""Get the feature matrix used for training the model. This will be a
a 2D-array with the same length as the training_labels.
"""
return self._training_features
@property
def training_labels(self):
"""Get the list of labels used for training the model. This will be a
list with the same length as the training_features.
"""
return self._training_labels
def accuracy(self, testing_features=None, testing_labels=None):
"""Calculate the accuracy of the classifier by validating against a
set of labeled test data.
The labeled test data can be passed in. If no testing data is
specified, the portion of the original data set that was reserved for
testing will be used instead.
testing_features is the feature matrix for the specified test data.
This should be 2D-array.
testing_labels is the list of labels for the specified feature matrix.
This should be a list with the same length as testing_features.
"""
# Ensure either both or neither testing parameters were specified.
if ((testing_features is None) != (testing_labels is None)):
raise ValueError("Must specify both testing features and labels")
# Use default testing data if necessary.
testing_features = testing_features or self.testing_features
testing_labels = testing_labels or self.testing_labels
# Determine the accuracy of the model.
predicted = self.model.predict(testing_features)
return accuracy_score(testing_labels, predicted)
def classify(self, *samples):
"""Classifies the specified samples and returns a predicted label for
each.
Each sample should be a list of features. The number of features must
be equal to the number of features each sample passed into the
classifier used.
A list of classification results for each sample will be returned. If
only one sample is passed in, the return value will just be the
classification result for the sample.
"""
# Classify each sample.
results = self.model.predict(np.array(samples))
# If there is only one sample, just return the result. If there were
# multiple samples, return the full list.
return results if len(results) != 1 else results[0]
def confusion_matrix(self, testing_features=None, testing_labels=None):
"""Calculate the confusion matrix for the classifier based on the
specified set of labelled test data.
|
peragro/peragro-at | src/damn_at/analyzers/video/__init__.py | Python | bsd-3-clause | 21 | 0 | """Vide | o A | nalyzer"""
|
greglinch/sourcelist | django-magic-link/django_magic_login/settings.py | Python | mit | 3,560 | 0.001685 | """
Django settings for django_magic_login project.
Generated by 'django-admin startproject' using Django 1.10.3.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '8ow#z)z$lyvv@hlitmcyhfr&cclv1(@$!b2bk6ep0&$3whhfzq'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
| 'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.Authentica | tionMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'django_magic_login.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'django_magic_login.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
# Magic Login
LOGIN_URL = '/customers/login/'
MIDDLEWARE += ['sesame.middleware.AuthenticationMiddleware']
AUTHENTICATION_BACKENDS = ['sesame.backends.ModelBackend']
SESAME_TOKEN_NAME = "url_auth_token"
SESAME_MAX_AGE = 6 * 60 * 60 # 6 hour
EMAIL_HOST = ""
EMAIL_PORT = 2587
EMAIL_HOST_USER = ""
EMAIL_HOST_PASSWORD = ""
EMAIL_USE_TLS = True
DEFAULT_FROM_EMAIL = "Admin<user@domain.com>" |
Lunatixz/script.skin.helper.service | resources/lib/ListItemMonitor.py | Python | gpl-2.0 | 70,988 | 0.012707 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import threading, thread
import requests, re
import random
import xml.etree.ElementTree as etree
from Utils import *
import ArtworkUtils as artutils
import SkinShortcutsIntegration as skinshortcuts
class ListItemMonitor(threading.Thread):
event = None
exit = False
delayedTaskInterval = 1795
lastWeatherNotificationCheck = None
lastNextAiredNotificationCheck = None
widgetContainerPrefix = ""
liPath = ""
liFile = ""
liLabel = ""
liTitle = ""
liDbId = ""
liImdb = ""
unwatched = 1
contentType = ""
allStudioLogos = {}
allStudioLogosColor = {}
LastCustomStudioImagesPath = ""
widgetTaskInterval = 590
moviesetCache = {}
extraFanartCache = {}
streamdetailsCache = {}
pvrArtCache = {}
tmdbinfocache = {}
omdbinfocache = {}
imdb_top250 = {}
cachePath = os.path.join(ADDON_DATA_PATH,"librarycache.json")
ActorImagesCachePath = os.path.join(ADDON_DATA_PATH,"actorimages.json")
def __init__(self, *args):
logMsg("ListItemMonitor - started")
self.event = threading.Event()
self.monitor = xbmc.Monitor()
threading.Thread.__init__(self, *args)
def stop(self):
logMsg("ListItemMonitor - stop called",0)
self.saveCacheToFile()
self.exit = True
self.event.set()
def run(self):
setAddonsettings()
self.getCacheFromFile()
playerTitle = ""
playerFile = ""
lastPlayerItem = ""
playerItem = ""
liPathLast = ""
curFolder = ""
curFolderLast = ""
lastListItem = ""
nextairedActive = False
screenSaverSetting = None
while (self.exit != True):
if xbmc.getCondVisibility("Player.HasAudio"):
#set window props for music player
try:
playerTitle = xbmc.getInfoLabel("Player.Title").decode('utf-8')
playerFile = xbmc.getInfoLabel("Player.Filenameandpath").decode('utf-8')
playerItem = playerTitle + playerFile
#only perform actions when the listitem has actually changed
if playerItem and playerItem != lastPlayerItem:
#clear all window props first
self.resetPlayerWindowProps()
self.setMusicPlayerDetails()
lastPlayerItem = playerItem
except Exception as e:
logMsg("ERROR in setMusicPlayerDetails ! --> " + str(e), 0)
elif lastPlayerItem:
#cleanup remaining window props
self.resetPlayerWindowProps()
playerItem = ""
lastPlayerItem = ""
if xbmc.getCondVisibility("Window.IsActive(visualisation) + Skin.HasSetting(SkinHelper.DisableScreenSaverOnFullScreenMusic)"):
#disable the screensaver if fullscreen music playback
if not screenSaverSetting:
screenSaverSetting = getJSON('Settings.GetSettingValue', '{"setting":"screensaver.mode"}')
if screenSaverSetting: setJSON('Settings.SetSettingValue', '{"setting":"screensaver.mode", "value": ""}')
elif screenSaverSetting:
setJSON('Settings.SetSettingValue', '{"setting":"screensaver.mode", "value": "%s"}' %screenSaverSetting)
screenSaverSetting = None
#auto close OSD after X seconds of inactivity
if xbmc.getCondVisibility("Window.IsActive(videoosd) | Window.IsActive(musicosd)"):
if xbmc.getCondVisibility("Window.IsActive(videoosd)"):
secondsToDisplay = xbmc.getInfoLabel("Skin.String(SkinHelper.AutoCloseVideoOSD)")
window = "videoosd"
elif xbmc.getCondVisibility("Window.IsActive(musicosd)"):
secondsToDisplay = xbmc.getInfoLabel("Skin.String(SkinHelper.AutoCloseMusicOSD)")
window = "musicosd"
else:
secondsToDisplay = ""
if secondsToDisplay and secondsToDisplay != "0":
while xbmc.getCondVisibility("Window.IsActive(%s)"%window):
if xbmc.getCondVisibility("System.IdleTime(%s)" %secondsToDisplay):
if xbmc.getCondVisibility("Window.IsActive(%s)"%window):
xbmc.executebuiltin("Dialog.Close(%s)" %window)
else:
xbmc.sleep(500)
#do some background stuff every 30 minutes
if self.delayedTaskInterval >= 1800 and not self.exit:
thread.start_new_thread(self.doBackgroundWork, ())
self.delayedTaskInterval = 0
#reload some widgets every 10 minutes
if self.widgetTaskInterval >= 600 and not self.exit:
self.resetGlobalWidgetWindowProps()
self.widgetTaskInterval = 0
#flush cache if videolibrary has changed
if WINDOW.getProperty("resetVideoDbCache") == "reset":
self.extraFanartCache = {}
self.streamdetailsCache = {}
WINDOW.clearProperty("resetVideoDbCache")
#flush cache if pvr settings have changed
if WINDOW.getProperty("resetPvrArtCache") == "reset":
self.pvrArtCache = {}
WINDOW.clearProperty("SkinHelper.PVR.ArtWork")
WINDOW.clearProperty("resetPvrArtCache")
if xbmc.getCondVisibility("[Window.IsMedia | !IsEmpty(Window(Home).Property(SkinHelper.WidgetContainer))]") and not self.exit:
try:
widgetContainer = WINDOW.getProperty("SkinHelper.WidgetContainer").decode('utf-8')
if widgetContainer:
self.widgetContainerPrefix = "Container(%s)."%widgetContainer
curFolder = xbmc.getInfoLabel("widget-%s-$INFO[Container(%s).NumItems]" %(widgetContainer,widgetContainer)).decode('utf-8')
else:
self.widgetContainerPrefix = ""
curFolder = xbmc.getInfoLabel("$INFO[Container.FolderPath]$INFO[Container.NumItems]").decode('utf-8')
| self.liTitle = xbmc.getInfoLabel("%sListItem.Title" %self.widgetContainerPrefix).decode('utf-8')
self.liLabel = xbmc.getInfoLabel("%sListItem.Label" %self.widgetContainerPrefix).decode('utf-8')
except Exception as e:
| logMsg(str(e),0)
curFolder = ""
self.liLabel = ""
self.liTitle = ""
#perform actions if the container path has changed
if (curFolder != curFolderLast):
self.resetWindowProps()
self.contentType = ""
curFolderLast = curFolder
if curFolder and self.liLabel:
#always wait for the contentType because plugins can be slow
for i in range(20):
self.contentType = getCurrentContentType(self.widgetContainerPrefix)
if self.contentType: break
else: xbmc.sleep(250)
if not self.widgetContainerPrefix and self.contentType:
self.setForcedView()
self.setContentHeader()
curListItem = curFolder + self.liLabel + self.liTitle
WINDOW.setProperty("curListItem",curListItem)
#only perform actions when the listitem has actually changed
if curListItem and curListItem != lastListItem and self.contentType:
#clear all window props first
self.resetWindowProps()
|
redhat-openstack/trove | trove/common/base_wsgi.py | Python | apache-2.0 | 27,852 | 0.000754 | # Copyright 2014 Tesora, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Utility methods for working with WSGI servers."""
from __future__ import print_function
import eventlet
eventlet.patcher.monkey_patch(all=False, socket=True)
import datetime
import errno
import socket
import sys
import time
import eventlet.wsgi
from oslo_config import cfg
from oslo_log import log as logging
from oslo_log import loggers
from oslo_serialization import jsonutils
from oslo_service import service
from oslo_service import sslutils
import routes
import routes.middleware
import webob.dec
import webob.exc
from xml.dom import minidom
from xml.parsers import expat
from trove.common import base_exception
from trove.common.i18n import _
from trove.common import xmlutils
socket_opts = [
cfg.IntOpt('backlog',
default=4096,
help="Number of backlog requests to configure the socket with"),
cfg.IntOpt('tcp_keepidle',
default=600,
help="Sets the value of TCP_KEEPIDLE in seconds for each "
"server socket. Not supported on OS X."),
]
CONF = cfg.CONF
CONF.register_opts(socket_opts)
LOG = logging.getLogger(__name__)
def run_server(application, port, **kwargs):
"""Run a WSGI server with the given application."""
sock = eventlet.listen(('0.0.0.0', port))
eventlet.wsgi.server(sock, application, **kwargs)
class Service(service.Service):
"""
Provides a Service API for wsgi servers.
This gives us the ability to launch wsgi servers with the
Launcher classes in oslo_service.service.py.
"""
def __init__(self, application, port,
host='0.0.0.0', backlog=4096, threads=1000):
self.application = application
self._port = port
self._host = host
self._backlog = backlog if backlog else CONF.backlog
self._socket = self._get_socket(host, port, self._backlog)
super(Service, self).__init__(threads)
def _get_socket(self, host, port, backlog):
# TODO(dims): eventlet's green dns/socket module does not actually
# support IPv6 in getaddrinfo(). We need to get around this in the
# future or monitor upstream for a fix
info = socket.getaddrinfo(host,
port,
socket.AF_UNSPEC,
socket.SOCK_STREAM)[0]
family = info[0]
bind_addr = info[-1]
sock = None
retry_until = time.time() + 30
while not sock and time.time() < retry_until:
try:
sock = eventlet.listen(bind_addr,
backlog=backlog,
family=family)
if sslutils.is_enabled(CONF):
sock = sslutils.wrap(CONF, sock)
except socket.error as err:
if err.args[0] != errno.EADDRINUSE:
raise
| eventlet.sleep(0.1)
if not sock:
| raise RuntimeError(_("Could not bind to %(host)s:%(port)s "
"after trying for 30 seconds") %
{'host': host, 'port': port})
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# sockets can hang around forever without keepalive
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
# This option isn't available in the OS X version of eventlet
if hasattr(socket, 'TCP_KEEPIDLE'):
sock.setsockopt(socket.IPPROTO_TCP,
socket.TCP_KEEPIDLE,
CONF.tcp_keepidle)
return sock
def start(self):
"""Start serving this service using the provided server instance.
:returns: None
"""
super(Service, self).start()
self.tg.add_thread(self._run, self.application, self._socket)
@property
def backlog(self):
return self._backlog
@property
def host(self):
return self._socket.getsockname()[0] if self._socket else self._host
@property
def port(self):
return self._socket.getsockname()[1] if self._socket else self._port
def stop(self):
"""Stop serving this API.
:returns: None
"""
super(Service, self).stop()
def _run(self, application, socket):
"""Start a WSGI server in a new green thread."""
logger = logging.getLogger('eventlet.wsgi')
eventlet.wsgi.server(socket,
application,
custom_pool=self.tg.pool,
log=loggers.WritableLogger(logger))
class Middleware(object):
"""
Base WSGI middleware wrapper. These classes require an application to be
initialized that will be called next. By default the middleware will
simply call its wrapped app, or you can override __call__ to customize its
behavior.
"""
def __init__(self, application):
self.application = application
def process_request(self, req):
"""
Called on each request.
If this returns None, the next application down the stack will be
executed. If it returns a response then that response will be returned
and execution will stop here.
"""
return None
def process_response(self, response):
"""Do whatever you'd like to the response."""
return response
@webob.dec.wsgify
def __call__(self, req):
response = self.process_request(req)
if response:
return response
response = req.get_response(self.application)
return self.process_response(response)
class Debug(Middleware):
"""
Helper class that can be inserted into any WSGI application chain
to get information about the request and response.
"""
@webob.dec.wsgify
def __call__(self, req):
print(("*" * 40) + " REQUEST ENVIRON")
for key, value in req.environ.items():
print(key, "=", value)
print()
resp = req.get_response(self.application)
print(("*" * 40) + " RESPONSE HEADERS")
for (key, value) in resp.headers.iteritems():
print(key, "=", value)
print()
resp.app_iter = self.print_generator(resp.app_iter)
return resp
@staticmethod
def print_generator(app_iter):
"""
Iterator that prints the contents of a wrapper string iterator
when iterated.
"""
print(("*" * 40) + " BODY")
for part in app_iter:
sys.stdout.write(part)
sys.stdout.flush()
yield part
print()
class Router(object):
"""
WSGI middleware that maps incoming requests to WSGI apps.
"""
def __init__(self, mapper):
"""
Create a router for the given routes.Mapper.
Each route in `mapper` must specify a 'controller', which is a
WSGI app to call. You'll probably want to specify an 'action' as
well and have your controller be a wsgi.Controller, who will route
the request to the action method.
Examples:
mapper = routes.Mapper()
sc = ServerController()
# Explicit mapping of one route to a controller+action
mapper.connect(None, "/svrlist", controller=sc, action="list")
# Actions are all implicitly defined
mapper.resource("server", "servers", controller=sc)
# Pointing to an arbitrary WSGI app. You can specify the
|
jasonz93/python-tordatahub | tordatahub/errors/__init__.py | Python | apache-2.0 | 3,583 | 0.00614 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
class DatahubException(Exception):
"""
There was an base exception class that occurred while handling your request to tordatahub server.
"""
def __init__(self, status_code, request_id, error_code, error_msg):
super(DatahubException, self).__init__(error_msg)
self.status_code = status_code
self.request_id = request_id
self.error_code = error_code
self.error_msg = error_msg
def __str__(self):
return "status_code:%d, request_id:%s, error_code:%s, error_msg:%s"\
%(self.status_code, self.request_id, self.error_code, self.error_msg)
# A long list of server defined exceptions
class ObjectAlreadyExistException(DatahubException):
"""
The exception is raised while Datahub Object that you are creating is alreay exist.
"""
def __init__(self, status_code, request_id, error_code, error_msg):
super(ObjectAlreadyExistException, self).__init__(status_code, request_id, error_code, error_msg)
class NoSuchObjectException(DatahubException):
"""
The exception is raised while Datahub Object that you are handling is not exist.
"""
def __init__(self, status_code, request_id, error_code, error_msg):
super(NoSuchObjectException, self).__init__(status_code, request_id, error_code, error_msg)
class InvalidParameterException(DatahubException):
"""
The exception is raised while that your handling request parameter is invalid.
"""
def __init__(self, status_code, request_id, error_code, error_msg):
super(InvalidParameterException, self).__init__(status_code, request_id, error_code, error_msg)
class InvalidShardOperationException(DatahubException):
"""
The opertaion of shard is not support yet.
"""
def __init__(self, status_code, request_id, error_code, error_msg):
super(InvalidShardOperationException, self).__init__(status_code, request_id, error_code, error_msg)
class MalformedRecordException(DatahubException):
"""
The record is malformed.
"""
def __init__(self, status_code, request_id, error_code, error_msg):
super(MalformedRecordException, self).__init__(status_code, request_id, error_code, error_msg)
class LimitExceededException(DatahubException):
"""
Too many request.
"""
def __in | it__(self, status_code, request_id, error_code, error_msg):
super(LimitExceededException, self).__init__(status_code, request_id, error_code, error_msg)
class ServerInternalError(DatahubException):
"""
The Datahub server occured error.
"""
de | f __init__(self, status_code, request_id, error_code, error_msg):
super(ServerInternalError, self).__init__(status_code, request_id, error_code, error_msg)
|
rtucker-mozilla/inventory | mcsv/tests/importer.py | Python | bsd-3-clause | 18,133 | 0.000662 | from django.core.exceptions import ValidationError
from django.test import TestCase, Client
from mcsv.importer import csv_import
from systems.models import (
OperatingSystem, System, SystemRack, Allocation, Location, SystemStatus,
SystemType
)
from systems.tests.utils import create_fake_host
import datetime
class CSVTests(TestCase):
def setUp(self):
OperatingSystem.objects.create(name='foo', version='1.1')
OperatingSystem.objects.create(name='foo', version='2.1')
OperatingSystem.objects.create(name='bar', version='2.1')
SystemStatus.objects.create(
status='production', color='burgandy', color_code='wtf?'
)
Allocation.objects.create(name='something')
SystemType.objects.create(type_name='foobar')
self.client = Client()
def client_tst(self, test_csv, save=True, primary_attr='hostname'):
resp = self.client.post('/en-US/csv/ajax_csv_importer/', {
'csv-data': test_csv, 'save': save, 'primary-attr': primary_attr
})
if resp.status_code != 200:
# The exception thrown by csv_import is more useful than a status
# code so we are running the function knowing it will fail. TODO,
# figure out a better way for tests to know what went wrong.
csv_import(test_csv, save=save)
def test_get_related(self):
test_csv = """
hostname,operating_system%name,serial,system_type%type_name,warranty_start,warranty_end,allocation%name
baz.mozilla.com,foo,asdf,foobar,2012-01-01,2013-01-01,something
"""
self.assertRaises(Exception, self.client_tst, test_csv)
test_csv = """
hostname,operating_system%name,serial,system_type%type_name,warranty_start,warranty_end,allocation%name
baz.mozilla.com,foo%foo,asdf,foobar,2012-01-01,2013-01-01,something
"""
self.assertRaises(Exception, self.client_tst, test_csv)
test_csv = """
hostname,operating_system,serial,system_type%type_name,warranty_start,warranty_end,allocation%name
baz.mozilla.com,foo%foo,asdf,foobar,2012-01-01,2013-01-01,something
"""
self.assertRaises(Exception, self.client_tst, test_csv)
test_csv = """
hostname,operating_system%version,serial,system_type%type_name,warranty_start,warranty_end,allocation%name
baz.mozilla.com,foo%foo,asdf,foobar,2012-01-01,2013-01-01,something
"""
self.assertRaises(Exception, self.client_tst, test_csv)
test_csv = """
hostname,operating_system%name%version,serial,system_type%type_name,warranty_start,warranty_end,allocation%name
foobob.mozilla.com,foo%1.1,asdf,foobar,2012-01-01,2013-01-01,something
"""
ret = csv_import(test_csv)
self.assertEqual(1, len(ret))
self.assertTrue(ret[0]['system'])
def test_get_related_spaces(self):
test_csv = """
hostname, operating_system %name,serial,system_type%type_name,warranty_start,warranty_end,allocation%name
baz.mozilla.com,foo,asdf,foobar,2012-01-01,2013-01-01,something
"""
self.assertRaises(Exception, self.client_tst, test_csv)
test_csv = """
hostname, operating_system % name,serial,system_type%type_name,warranty_start,warranty_end,allocation%name
baz.mozilla.com,foo%foo,asdf,foobar,2012-01-01,2013-01-01,something
"""
self.assertRaises(Exception, self.client_tst, test_csv)
test_csv = """
hostname,operating_system,serial,system_type%type_name,warranty_start,warranty_end baz.mozilla.com, foo % foo,asdf,foobar,2012-01-01,2013-01-01
"""
self.assertRaises(Exception, self.client_tst, test_csv)
test_csv = """
hostname,operating_system%version,serial,system_type%type_name,warranty_start,warranty_end,allocation%name
baz.mozilla.com, foo %foo,asdf,foobar,2012-01-01,2013-01-01,something
"""
self.assertRaises(Exception, self.client_tst, test_csv)
test_csv = """
hostname,operating_system%name%version,serial,system_type%type_name,warranty_start,warranty_end,allocation%name
foobob.mozilla.com,foo% 1.1,asdf,foobar,2012-01-01,2013-01-01,something
"""
ret = csv_import(test_csv)
self.assertEqual(1, len(ret))
self.assertTrue(ret[0]['system'])
def test_multiple(self):
test_csv = """
hostname,operating_system%name%version,serial,system_type%type_name,warranty_start,warranty_end,allocation%name
foobob.mozilla.com,foo%1.1,asdf,foobar,2012-01-01,2013-01-01,something
1fooboz.mozilla.com,foo%1.1,asdf,foobar,2012-01-01,2013-01-01,something
2fooboz.mozilla.com,foo%1.1,asdf,foobar,2012-01-01,2013-01-01,something
3fooboz.mozilla.com,foo%1.1,asdf,foobar,2012-01-01,2013-01-01,something
4fooboz.mozilla.com,foo%1.1,asdf,foobar,2012-01-01,2013-01-01,something
5fooboz.mozilla.com,foo%1.1,asdf,foobar,2012-01-01,2013-01-01,something
6fooboz.mozilla.com,foo%1.1,asdf,foobar,2012-01-01,2013-01-01,something
7fooboz.mozilla.com,foo%1.1,asdf,foobar,2012-01-01,2013-01-01,something
8fooboz.mozilla.com,foo%1.1,asdf,foobar,2012-01-01,2013-01-01,something
"""
before = System.objects.all().count()
ret = csv_import(test_csv)
after = System.objects.all().count()
self.assertEqual(9, len(ret))
self.assertEqual(before, after - 9)
def test_multiple_no_save(self):
test_csv = """
hostname,operating_system%name%version,serial,system_type%type_name,warranty_start,warranty_end,allocation%name
foobob.mozilla.com,foo%1.1,asdf,foobar,2012-01-01,2013-01-01,something
1fooboz.mozilla.com,foo%1.1,asdf,foobar,2012-01-01,2013-01-01,something
2fooboz.mozilla.com,foo%1.1,asdf,foobar,2012-01-01,2013-01-01,something
3fooboz.mozilla.com,foo%1.1,asdf,foobar,2012-01-01,2013-01-01,something
4fooboz.mozilla.com,foo%1.1,asdf,foobar,2012-01-01,2013-01-01,something
5fooboz.mozilla.com,foo%1.1,asdf,foobar,2012-01-01,2013-01-01,something
6fooboz.mozilla.com,foo%1.1,asdf,fooba | r,2012-01-01,2 | 013-01-01,something
7fooboz.mozilla.com,foo%1.1,asdf,foobar,2012-01-01,2013-01-01,something
8fooboz.mozilla.com,foo%1.1,asdf,foobar,2012-01-01,2013-01-01,something
"""
before = System.objects.all().count()
ret = csv_import(test_csv, save=False)
after = System.objects.all().count()
self.assertEqual(9, len(ret))
self.assertEqual(before, after)
def test_keyvalue(self):
test_csv = """
hostname,nic.0.mac_address.0,serial,warranty_start,warranty_end,system_type%type_name,allocation%name
foobob.mozilla.com,keyvalue,asdf,2012-01-01,2013-01-01,foobar,something
"""
ret = csv_import(test_csv, save=False)
self.assertTrue(ret[0]['kvs'])
def test_warranty_start_end(self):
test_csv = """
hostname,warranty_start,warranty_end,serial,system_type%type_name,allocation%name
foobob.mozilla.com,2011-03-01,2012-03-12,asdf,foobar,something
"""
self.client_tst(test_csv, save=True)
s = System.objects.get(hostname='foobob.mozilla.com')
self.assertTrue(s.warranty_start)
self.assertTrue(s.warranty_end)
def test_invalid_field(self):
test_csv = """
hostname,warranty_start,warranty_end,serial,system_type%type_name,allocation%name
foobob.mozilla.com,2011-03-01,20192-03-12,asdf,foobar,something
"""
self.assertRaises(ValueError, csv_import, test_csv, {'save': True})
#s = System.objects.get(hostname='foobob.mozilla.com')
#self.assertTrue(s.warranty_start)
#self.assertTrue(s.warranty_end)
def test_override(self):
test_csv = """
hostname,warranty_start,warranty_end,serial,system_type%type_name,allocation%name
foobob.mozilla.com,2011-03-01,2012-03-12,asdf,foobar,something
"""
s = create_fake_host(hostname='foobob.mozilla.com', serial='1234')
self.client |
gpotter2/scapy | scapy/layers/tls/crypto/prf.py | Python | gpl-2.0 | 12,924 | 0 | # This file is part of Scapy
# Copyright (C) 2007, 2008, 2009 Arnaud Ebalard
# 2015, 2016, 2017 Maxence Tury
# This program is published under a GPLv2 license
"""
TLS Pseudorandom Function.
"""
from __future__ import absolute_import
from scapy.error import warning
from scapy.utils import strxor
from scapy.layers.tls.crypto.hash import _tls_hash_algs
from scapy.layers.tls.crypto.h_mac import _tls_hmac_algs
from scapy.modules.six.moves import range
from scapy.compat import bytes_encode
# Data expansion functions
def _tls_P_hash(secret, seed, req_len, hm):
"""
Provides the implementation of P_hash function defined in
section 5 of RFC 4346 (and section 5 of RFC 5246). Two
parameters have been added (hm and req_len):
- secret : the key to be used. If RFC 4868 is to be believed,
the length must match hm.key_len. Actually,
python hmac takes care of formatting every key.
- seed : the seed to be used.
- req_len : the length of data to be generated by iterating
the specific HMAC function (hm). This prevents
multiple calls to the function.
- hm : the hmac function class to use for iteration (either
Hmac_MD5 or Hmac_SHA1 in TLS <= 1.1 or
Hmac_SHA256 or Hmac_SHA384 in TLS 1.2)
"""
hash_len = hm.hash_alg.hash_len
n = (req_len + hash_len - 1) // hash_len
seed = bytes_encode(seed)
res = b""
a = hm(secret).digest(seed) # A(1)
while n > 0:
res += hm(secret).digest(a + seed)
a = hm(secret).digest(a)
n -= 1
return res[:req_len]
def _tls_P_MD5(secret, seed, req_len):
return _tls_P_hash(secret, seed, req_len, _tls_hmac_algs["HMAC-MD5"])
def _tls_P_SHA1(secret, seed, req_len):
return _tls_P_hash(secret, seed, req_len, _tls_hmac_algs["HMAC-SHA"])
def _tls_P_SHA256(secret, seed, req_len):
return _tls_P_hash(secret, seed, req_len, _tls_hmac_algs["HMAC-SHA256"])
def _tls_P_SHA384(secret, seed, req_len):
return _tls_P_hash(secret, seed, req_len, _tls_hmac_algs["HMAC-SHA384"])
def _tls_P_SHA512(secret, seed, req_len):
return _tls_P_hash(secret, seed, req_len, _tls_hmac_algs["HMAC-SHA512"])
# PRF functions, according to the protocol version
def _sslv2_PRF(secret, seed, req_len):
hash_md5 = _tls_hash_algs["MD5"]()
rounds = (req_len + hash_md5.hash_len - 1) // hash_md5.hash_len
res = b""
if rounds == 1:
res += hash_md5.digest(secret + seed)
else:
r = 0
while r < rounds:
label = str(r).encode("utf8")
res += hash_md5.digest(secret + label + seed)
r += 1
return res[:req_len]
def _ssl_PRF(secret, seed, req_len):
"""
Provides the implementation of SSLv3 PRF function:
SSLv3-PRF(secret, seed) =
MD5(secret || SHA-1("A" || secret || seed)) ||
MD5(secret || SHA-1("BB" || secret || seed)) ||
MD5(secret || SHA-1("CCC" || secret || seed)) || ...
req_len should not be more than 26 x 16 = 416.
"""
if req_len > 416:
warning("_ssl_PRF() is not expected to provide more than 416 bytes")
return ""
d = [b"A", b"B", b"C", b"D", b"E", b"F", b"G", b"H", b"I", b"J", b"K", b"L", # noqa: E501
b"M", b"N", b"O", b"P", b"Q", b"R", b"S", b"T", b"U", b"V", b"W", b"X", # noqa: E501
b"Y", b"Z"]
res = b""
hash_sha1 = _tls_hash_algs["SHA"]()
hash_md5 = _tls_hash_algs["MD5"]()
rounds = (req_len + hash_md5.hash_len - 1) // hash_md5.hash_len
for i in range(rounds):
label = d[i] * (i + 1)
tmp = hash_sha1.digest(label + secret + seed)
res += hash_md5.digest(secret + tmp)
return res[:req_len]
def _tls_PRF(secret, label, seed, req_len):
"""
Provides the implementation of TLS PRF function as defined in
section 5 of RFC 4346:
PRF(secret, label, seed) = P_MD5(S1, label + seed) XOR
P_SHA-1(S2, label + seed)
Parameters are:
- secret: the secret used by the HMAC in the 2 expansion
functions (S1 and S2 are the halves of this secret).
- label: specific label as defined in various sections of the RFC
depending on the use of the generated PRF keystream
- seed: the seed used by the expansion functions.
- req_len: amount of keystream to be generated
"""
tmp_len = (len(secret) + 1) // 2
S1 = secret[:tmp_len]
S2 = secret[-tmp_len:]
a1 = _tls_P_MD5(S1, label + seed, req_len)
a2 = _tls_P_SHA1(S2, label + seed, req_len)
return strxor(a1, a2)
def _tls12_SHA256PRF(secret, label, seed, req_len):
"""
Provides the implementation of TLS 1.2 PRF function as
defined in section 5 of RFC 5246:
PRF(secret, label, seed) = P_SHA256(secret, label + seed)
Parameters are:
- secret: the secret used by the HMAC in the 2 expansion
functions (S1 and S2 are the halves of this secret).
- label: specific label as de | fined in various sections of the RFC
depending on the use of the generated PRF keystream
- seed: the seed used by the expansion functions.
- req_len: amount of keystream to be generated
"""
return _tls_P_SHA256(secret, label + seed, req_len)
def _tls12_SHA384PRF(secret, label, seed, req_len):
return _tls_P_SHA384(secret, label + seed, req_len)
def _tls12_SHA | 512PRF(secret, label, seed, req_len):
return _tls_P_SHA512(secret, label + seed, req_len)
class PRF(object):
"""
The PRF used by SSL/TLS varies based on the version of the protocol and
(for TLS 1.2) possibly the Hash algorithm of the negotiated cipher suite.
The various uses of the PRF (key derivation, computation of verify_data,
computation of pre_master_secret values) for the different versions of the
protocol also changes. In order to abstract those elements, the common
_tls_PRF() object is provided. It is expected to be initialised in the
context of the connection state using the tls_version and the cipher suite.
"""
def __init__(self, hash_name="SHA256", tls_version=0x0303):
self.tls_version = tls_version
self.hash_name = hash_name
if tls_version < 0x0300: # SSLv2
self.prf = _sslv2_PRF
elif tls_version == 0x0300: # SSLv3
self.prf = _ssl_PRF
elif (tls_version == 0x0301 or # TLS 1.0
tls_version == 0x0302): # TLS 1.1
self.prf = _tls_PRF
elif tls_version == 0x0303: # TLS 1.2
if hash_name == "SHA384":
self.prf = _tls12_SHA384PRF
elif hash_name == "SHA512":
self.prf = _tls12_SHA512PRF
else:
if hash_name in ["MD5", "SHA"]:
self.hash_name = "SHA256"
self.prf = _tls12_SHA256PRF
else:
warning("Unknown TLS version")
def compute_master_secret(self, pre_master_secret, client_random,
server_random, extms=False, handshake_hash=None):
"""
Return the 48-byte master_secret, computed from pre_master_secret,
client_random and server_random. See RFC 5246, section 6.3.
Supports Extended Master Secret Derivation, see RFC 7627
"""
seed = client_random + server_random
label = b'master secret'
if extms is True and handshake_hash is not None:
seed = handshake_hash
label = b'extended master secret'
if self.tls_version < 0x0300:
return None
elif self.tls_version == 0x0300:
return self.prf(pre_master_secret, seed, 48)
else:
return self.prf(pre_master_secret, label, seed, 48)
def derive_key_block(self, master_secret, server_random,
client_random, req_len):
"""
Perform the derivation of master_secret into a key_block of req_len
requested length. See RFC 5246, section 6.3.
"""
seed = server_random + client_random
if self.tls_version <= 0x0300:
return self.prf(master_se |
saurabhbajaj207/CarpeDiem | venv/Lib/site-packages/pyasn1/codec/der/decoder.py | Python | mit | 2,169 | 0.002766 | #
# This file is part of pyasn1 software.
#
# Copyright (c) 2005-2017, Ilya Etingof <etingof@gmail.com>
# License: http://pyasn1.sf.net/license.html
#
from pyasn1.type import univ
from pyasn1.codec.cer import decoder
__all__ = ['decode']
class BitStringDecoder( | decoder.BitStringDecoder):
supportConstructedForm = False
class OctetStringDecoder(decoder.OctetStringDecoder):
supportConstructedForm = False
# TODO: prohibit non-canonical encoding
RealDecoder = decoder.RealDecoder
tagMap = decoder.tagMap.copy()
tagMap.update(
{univ.BitString.tagSet: BitStringDecoder(),
univ.OctetString.tagSet: OctetStringDecoder(),
u | niv.Real.tagSet: RealDecoder()}
)
typeMap = decoder.typeMap.copy()
# Put in non-ambiguous types for faster codec lookup
for typeDecoder in tagMap.values():
if typeDecoder.protoComponent is not None:
typeId = typeDecoder.protoComponent.__class__.typeId
if typeId is not None and typeId not in typeMap:
typeMap[typeId] = typeDecoder
class Decoder(decoder.Decoder):
supportIndefLength = False
#: Turns DER octet stream into an ASN.1 object.
#:
#: Takes DER octetstream and decode it into an ASN.1 object
#: (e.g. :py:class:`~pyasn1.type.base.PyAsn1Item` derivative) which
#: may be a scalar or an arbitrary nested structure.
#:
#: Parameters
#: ----------
#: substrate: :py:class:`bytes` (Python 3) or :py:class:`str` (Python 2)
#: DER octetstream
#:
#: asn1Spec: any pyasn1 type object e.g. :py:class:`~pyasn1.type.base.PyAsn1Item` derivative
#: A pyasn1 type object to act as a template guiding the decoder. Depending on the ASN.1 structure
#: being decoded, *asn1Spec* may or may not be required. Most common reason for
#: it to require is that ASN.1 structure is encoded in *IMPLICIT* tagging mode.
#:
#: Returns
#: -------
#: : :py:class:`tuple`
#: A tuple of pyasn1 object recovered from DER substrate (:py:class:`~pyasn1.type.base.PyAsn1Item` derivative)
#: and the unprocessed trailing portion of the *substrate* (may be empty)
#:
#: Raises
#: ------
#: : :py:class:`pyasn1.error.PyAsn1Error`
#: On decoding errors
decode = Decoder(tagMap, typeMap)
|
zegra1989/pytree | rtree.py | Python | mit | 14,094 | 0.000676 | # -*- coding:utf-8 -*-
# 使用 UTF-8
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
import sys
from heap import Heap
class Rectangle(object):
"""docstring for Rectangle"""
def __init__(self, dimension, entry=None):
super(Rectangle, self).__init__()
self.dimension = dimension
self.min_dim = [None for _ in xrange(dimension)]
self.max_dim = [None for _ in xrange(dimension)]
if entry is not None:
for ipos in xrange(self.dimension):
self.min_dim[ipos] = entry[ipos]
self.max_dim[ipos] = entry[ipos]
def resize(self, rects):
"""
通过给定的 子节点Rectangle列表
重新计算当前 Rectangle 的 MBR(Minimal Boundary Rect)
"""
for ipos in xrange(self.dimension):
self.min_dim[ipos] = min(map(lambda x: x.min_dim[ipos], rects))
self.max_dim[ipos] = max(map(lambda x: x.max_dim[ipos], rects))
def resize2(self, entry):
"""
通过给定的 entry,
重新计算当前 Rectangle 的 MBR(Minimal Boundary Rect)
entry 代表一条数据的所有维度
"""
for ipos in xrange(self.dimension):
if entry[ipos] < self.min_dim[ipos]:
self.min_dim[ipos] = entry[ipos]
elif entry[ipos] > self.max_dim[ipos]:
self.max_dim[ipos] = entry[ipos]
def expand_area(self, entry):
new_area = 1.0
curr_area = 1.0
for ipos in xrange(self.dimension):
max_value = self.max_dim[ipos]
min_value = self.min_dim[ipos]
try:
curr_area *= (max_value - min_value)
except TypeError as e:
# 未完全初始化的 Rectangle
return -1
if entry[ipos] > self.max_dim[ipos]:
max_value = entry[ipos]
elif entry[ipos] < self.min_dim[ipos]:
min_value = entry[ipos]
try:
new_area *= (max_value - min_value)
except TypeError as e:
# 未完全初始化的 Rectangle
return -1
return new_area - curr_area
def overlap_area(self, rect):
area = 1.0
for ipos in xrange(self.dimension):
try:
if self.max_dim[ipos] < rect.max_dim[ipos]:
factor = self.max_dim[ipos] - rect.min_dim[ipos]
else:
factor = rect.max_dim[ipos] - self.min_dim[ipos]
except TypeError as e:
# 未完全初始化的 Rectangle
return -1
if factor < 0:
return 0.0
area *= factor
return area
def __contains__(self, rect):
for ipos in xrange(self.dimension):
if self.max_dim[ipos] < rect.min_dim[ipos]:
return False
if self.min_dim[ipos] > rect.max_dim[ipos]:
return Fals | e
return True
def __str_ | _(self):
return "Min:{0}, Max:{1}".format(
self.min_dim, self.max_dim)
class RNode(object):
def __init__(self, degree, dimension):
super(RNode, self).__init__()
self.num = 0
self.isleaf = True
self.degree = degree
self.dimension = dimension
if dimension < 2:
raise Exception("请使用 B/B+树 代替")
if dimension > 6:
print "WARNING:R树推荐维度为 [2,6]"
self.mbr = Rectangle(self.dimension)
self.threshold = degree*2
self.rects = [None for _ in xrange(self.threshold)]
self.pnodes = [None for _ in xrange(self.threshold)]
def adjust(self):
self.mbr = Rectangle(self.dimension)
self.mbr.resize(self.rects[:self.num])
def involve(self, entry):
self.mbr.resize2(entry)
def pointer(self):
return self
def most_overlap_pos(self, ipos):
"""
从 self.pnodes 中找到与 self.pnodes[ipos] 重合度最大的点的位置
"""
child = self.pnodes[ipos]
ichild_pos, max_overlap, max_overlap_pos = 0, -1, 0
while ichild_pos < self.num:
if ipos == ichild_pos:
continue
overlap = child.overlap_area(self.pnodes[ichild_pos].mbr)
if max_overlap < overlap:
max_overlap = overlap
max_overlap_pos = ichild_pos
ichild_pos += 1
return max_overlap_pos
class DataNode(object):
"""docstring for DataNode"""
def __init__(self, max_length=10):
super(DataNode, self).__init__()
self.num = 0
self.data = None
self.max_length = max_length
base, mode = divmod(self.max_length, 2)
if mode > 0:
base += 1
self.min_length = base
self.mbr = Rectangle(self.dimension)
class RTree(object):
"""docstring for RTree"""
def __init__(self, degree, dimension):
super(RTree, self).__init__()
self.degree = degree
self.dimension = dimension
self.threshold = degree*2
self.root = self.allocate_namenode()
def allocate_namenode(self):
raise NotImplementedError()
def deallocate_namenode(self, node):
raise NotImplementedError()
def allocate_datanode(self):
raise NotImplementedError()
def deallocate_datanode(self, node):
raise NotImplementedError()
def save_docs(self, metanode):
raise NotImplementedError()
def load_docs(self, metanode, ipos):
raise NotImplementedError()
def search(self, rect, node=None):
if node is None:
node = self.root
indexes = []
ipos = node.num-1
while ipos >= 0:
if rect in node.rects[ipos]:
indexes.append(ipos)
ipos -= 1
if len(indexes) == 0:
return []
if node.isleaf is True:
return map(lambda x: self.load_docs(node.pnodes[x]), indexes)
results = []
for ipos in indexes:
results.extend(self.search(rect, node.pnodes[ipos]))
return results
def split(self, parent, ipos, node):
"""
由于 R树 中节点内部是无序的,为了减少移动数据的开销
分裂后的两个节点一个放在分裂前节点的位置,一个放在末尾
目前分裂的简单算法:
直接选取第一个点当作旧节点的核心rect
计算旧核心rect与其他rect的重合度
选取重合度最低的一个rect作为新节点的核心rect
计算新核心rect与其他rect的重合度
对比每个非核心rect与两个核心的重合度
选出与新核心重合度更高的 degree-1 个节点组成新节点
"""
if parent.isleaf is False:
new_node = self.allocate_namenode()
new_node.isleaf = node.isleaf
ancor = node.rects[0]
heap = Heap(node.pnodes, reverse=True,
key=lambda x: ancor.overlap_area(x.mbr))
ipos = 0
while ipos < node.degree:
new_node.pnodes[ipos] = heap.pop()
new_node.rects[ipos] = new_node.pnodes[ipos].mbr
ipos += 1
new_node.num = node.degree
new_node.adjust()
ipos = 0
length = len(heap)
while ipos < length:
node.pnodes[ipos] = heap.heap[ipos]
node.pnodes[ipos].adjust()
node.rects[ipos] = heap.heap[ipos].mbr
ipos += 1
node.num = length
node.adjust()
parent.pnodes[parent.num-1] = new_node.pointer()
parent.rects[parent.num-1] = new_node.mbr
parent.num += 1
return None
new_node = node.split()
parent.pnodes[parent.num-1] = new_node.pointer()
parent.rects[parent.num-1] = new_node.mbr
parent.num += 1
return None
def insert(self, entry, doc):
"""
entry 是长度为 self.dimension 的数组
entry 中每一个维度都需要是数值型
"""
if self.root.num != self.threshold:
return self.insert_nonfull(self.root, entry, doc)
old_root = self.root
new_root = self.allocate_namenode()
new_root.isleaf = False
new_root.pnodes[0] = old_root.pointer()
new_root.rects[0] = old_root.mbr
new_root.num += 1
sel |
sobomax/virtualbox_64bit_edd | src/VBox/ValidationKit/testmanager/core/testset.py | Python | gpl-2.0 | 28,203 | 0.014608 | # -*- coding: utf-8 -*-
# $Id: testset.py $
"""
Test Manager - TestSet.
"""
__copyright__ = \
"""
Copyright (C) 2012-2015 Oracle Corporation
This file is part of VirtualBox Open Source Edition (OSE), as
available from http://www.virtualbox.org. This file is free software;
you can redistribute it and/or modify it under the terms of the GNU
General Public License (GPL) as published by the Free Software
Foundation, in version 2 as it comes in the "COPYING" file of the
VirtualBox OSE distribution. VirtualBox OSE is distributed in the
hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
The contents of this file may alternatively be used under the terms
of the Common Development and Distribution License Version 1.0
(CDDL) only, as it comes in the "COPYING.CDDL" file of the
VirtualBox OSE distribution, in which case the provisions of the
CDDL are applicable instead of those of the GPL.
You may elect to license modified versions of this file under the
terms and conditions of either the GPL or the CDDL or both.
"""
__version__ = "$Revision: 100880 $"
# Standard python imports.
import os;
import zipfile;
import unittest;
# Validation Kit imports.
from common import utils;
from testmanager import config;
from testmanager.core.base import ModelDataBase, ModelDataBaseTestCase, ModelLogicBase, TMExceptionBase;
from testmanager.core.testbox import TestBoxData;
from testmanager.core.testresults import TestResultFileDataEx;
class TestSetData(ModelDataBase):
"""
TestSet Data.
"""
## @name TestStatus_T
# @{
ksTestStatus_Running = 'running';
ksTestStatus_Success = 'success';
ksTestStatus_Skipped = 'skipped';
ksTestStatus_BadTestBox = 'bad-testbox';
ksTestStatus_Aborted = 'aborted';
ksTestStatus_Failure = 'failure';
ksTestStatus_TimedOut = 'timed-out';
ksTestStatus_Rebooted = 'rebooted';
## @}
## List of relatively harmless (to testgroup/case) statuses.
kasHarmlessTestStatuses = [ ksTestStatus_Skipped, ksTestStatus_BadTestBox, ksTestStatus_Aborted, ];
## List of bad statuses.
kasBadTestStatuses = [ ksTestStatus_Failure, ksTestStatus_TimedOut, ksTestStatus_Rebooted, ];
ksIdAttr = 'idTestSet';
ksParam_idTestSet = 'TestSet_idTestSet';
ksParam_tsConfig = 'TestSet_tsConfig';
ksParam_tsCreated = 'TestSet_tsCreated';
ksParam_tsDone = 'TestSet_tsDone';
ksParam_enmStatus = 'TestSet_enmStatus';
ksParam_idBuild = 'TestSet_idBuild';
ksParam_idBuildCategory = 'TestSet_idBuildCategory';
ksParam_idBuildTestSuite = 'TestSet_idBuildTestSuite';
ksParam_idGenTestBox = 'TestSet_idGenTestBox';
ksParam_idTestBox = 'TestSet_idTestBox';
ksParam_idTestGroup = 'TestSet_idTestGroup';
ksParam_idGenTestCase = 'TestSet_idGenTestCase';
ksParam_idTestCase = 'TestSet_idTestCase';
ksParam_idGenTestCaseArgs = 'TestSet_idGenTestCaseArgs';
ksParam_idTestCaseArgs = 'TestSet_idTestCaseArgs';
ksParam_idTestResult = 'TestSet_idTestResult';
ksParam_sBaseFilename = 'TestSet_sBaseFilename';
ksParam_iGangMemberNo = 'TestSet_iGangMemberNo';
ksParam_idTestSetGangLeader = 'TestSet_idTestSetGangLeader';
kasAllowNullAttributes = ['tsDone', 'idBuildTestSuite', 'idTestSetGangLeader' ];
kasValidValues_enmStatus = [
ksTestStatus_Running,
ksTestStatus_Success,
ksTestStatus_Skipped,
ksTestStatus_BadTestBox,
ksTestStatus_Aborted,
ksTestStatus_Failure,
ksTestStatus_TimedOut,
ksTestStatus_Rebooted,
];
kiMin_iGangMemberNo = 0;
kiMax_iGangMemberNo = 1023;
def __init__(self):
ModelDataBase.__init__(self);
#
# Initialize with defaults.
# See the database for explanations of each of these fields.
#
self.idTestSet = None;
self.tsConfig = None;
self.tsCreated = None;
self.tsDone = None;
self.enmStatus = 'running';
self.idBuild = None;
self.idBuildCategory = None;
self.idBuildTestSuite = None;
self.idGenTestBox = None;
self.idTestBox = None;
self.idTestGroup = None;
self.idGenTestCase = None;
self.idTestCase = None;
self.idGenTestCaseArgs = None;
self.idTestCaseArgs = None;
self.idTestResult = None;
self.sBaseFilename = None;
self.iGangMemberNo = 0;
self.idTestSetGangLeader = None;
def initFromDbRow(self, aoRow):
"""
Internal worker for initFromDbWithId and initFromDbWithGenId as well as
TestBoxSetLogic.
"""
if aoRow is None:
raise TMExceptionBase('TestSet not found.');
self.idTestSet = aoRow[0];
self.tsConfig = aoRow[1];
self.tsCreated = aoRow[2];
self.tsDone = aoRow[3];
self.enmStatus = aoRow[4];
self.idBuild = aoRow[5];
self.idBuildCategory = aoRow[6];
self.idBuildTestSuite = aoRow[7];
self.idGenTestBox = aoRow[8];
self.idTestBox = aoRow[9];
self.idTestGroup = aoRow[10];
self.idGenTestCase = aoRow[11];
self.idTestCase = aoRow[12];
self.idGenTestCaseArgs = aoRow[13];
self.idTestCaseArgs = aoRow[14];
self.idTestResult = aoRow[15];
self.sBaseFilename = aoRow[16];
self.iGangMemberNo = aoRow[17];
self.idTestSetGangLeader = aoRow[18];
return self;
def initFromDbWithId(self, oDb, idTestSet):
"""
Initialize the object from the database.
"""
oDb.execute('SELECT *\n'
'FROM TestSets\n'
'WHERE idTestSet = %s\n'
, (idTestSet, ) );
aoRow = oDb.fetchOne()
if aoRow is None:
raise TMExceptionBase('idTestSet=%s not found' % (idTestSet,));
return self.initFromDbRow(aoRow);
def openFile(self, sFilename, sMode = 'rb'):
"""
Opens a file.
Returns (oFile, cbFile, fIsStream) on success.
Returns (None, sErrorMsg, None) on failure.
| Will not raise exceptions, unless the class instance is invalid.
"""
assert sMode in [ 'rb', 'r', 'rU' ];
# Try raw file first.
sFile1 = os.path.join(config.g_ksFileAreaRootDir, self.sBaseFilename + '-' + sFilename);
try:
oFile = open(sFile1, sMode);
return (oFile, os.fstat(oFile.fileno()).st_size, False);
except Exception as oXcpt1:
# Try the zip arch | ive next.
sFile2 = os.path.join(config.g_ksZipFileAreaRootDir, self.sBaseFilename + '.zip');
try:
oZipFile = zipfile.ZipFile(sFile2, 'r');
oFile = oZipFile.open(sFilename, sMode if sMode != 'rb' else 'r');
cbFile = oZipFile.getinfo(sFilename).file_size;
return (oFile, cbFile, True);
except Exception as oXcpt2:
# Construct a meaningful error message.
try:
if os.path.exists(sFile1):
return (None, 'Error opening "%s": %s' % (sFile1, oXcpt1), None);
if not os.path.exists(sFile2):
return (None, 'File "%s" not found. [%s, %s]' % (sFilename, sFile1, sFile2,), None);
return (None, 'Error opening "%s" inside "%s": %s' % (sFilename, sFile2, oXcpt2), None);
except Exception as oXcpt3:
return (None, 'Aa! Megami-sama! %s; |
cjng96/devCmdTool | mainRegList.py | Python | apache-2.0 | 10,374 | 0.031687 |
import os
import urwid
import subprocess
from multiprocessing import Pool
import urwidHelper as ur
from tool import git, system, systemSafe, systemRet, programPath
import tool
import myutil
from globalBase import *
def _repoGetStatus(item):
status = dict(M=0, E=None)
if not item["repo"]:
return status
try:
ss = system("git status -s")
if ss != "":
status["M"] = 1
except subprocess.CalledProcessError as e:
status["E"] = str(e)
return status
# pool.map에 줄꺼라 local func이면 안된다.
def _genRepoItem(item):
pp = item["path"]
try:
os.chdir(pp)
item["repoStatus"] = _repoGetStatus(item)
except FileNotFoundError:
item["repoStatus"] = dict(E="Not found")
item["title"] = getTitle(item)
return item
def getTitle(item):
ss = os.path.basename(item["path"])
ss += "("
for n in item["names"]:
ss += n + ", "
ss = ss[:-2]
ss += ")"
if item["repo"]:
ss += " ==> ["
branch = ""
upstream = ""
repoStatus = item["repoStatus"]
isSame = True
if repoStatus is None:
ss += "Not found"
else:
if repoStatus["E"] is not None:
ss += "err: " + str(repoStatus["E"])
else:
if repoStatus["M"] != 0:
ss += "M"
isSame = False
try:
out = tool.git.getBranchStatus()
if out is None:
ss += "no branch"
else:
branch, rev, upstream, remoteRev, ahead, behind = out
#print(branch, rev, upstream, ahead, behind)
if ahead:
ss += "+%d" % ahead
isSame = False
if behind:
ss += "-%d" % behind
isSame = False
except subprocess.CalledProcessError as e:
ss += "Err - %s" % e
ss += "]"
ss += " %s -> %s" % (branch, upstream)
repoStatus["same"] = isSame
return ss
# 두칸씩 작은 오버레이로 띄우자
class DlgRegFolderSetting(ur.cDialog):
def __init__(self, onExit, item):
super().__init__()
self.onExit = onExit
self.item = item
self.header = ">> dc V%s - folder setting" % g.version
self.headerText = urwid.Text(self.header)
self.lbPath = urwid.Text("Path: %s" % item["path"])
self.lbRepo = urwid.Text("Repo: ..")
self.lbNames = urwid.Text("Names -----------")
self.lbGroups = urwid.Text("Groups -----------")
self.widgetListName = ur.mListBox(urwid.SimpleFocusListWalker(ur.btnListMakeTerminal([], None)))
self.widgetListGroup = ur.mListBox(urwid.SimpleFocusListWalker(ur.btnListMakeTerminal(["< No group >"], None)))
#urwid. | SimpleFocusListWalker(ur.makeBtnListTerminal([], None)))
self.lbHelp = urwid.Text("Insert: new name/group, Delete: remove name/group, R: toggle repo status")
self.widgetFrame = urwid.LineBox(urwid.Pile(
[("pack", self.headerText),
("pack", self.lbPath),
("pack", self.lbRepo),
| ("pack", self.lbNames), (8, self.widgetListName),
('pack', urwid.Divider('-')),
("pack", self.lbGroups), (8, self.widgetListGroup),
("pack", self.lbHelp)]))
self.mainWidget = urwid.Overlay(urwid.Filler(self.widgetFrame), g.loop.widget, 'center', 80, 'middle', 30)
def init(self):
self.showInfo()
return True
def showInfo(self):
self.lbRepo.set_text("Repo: %s" % ("O" if self.item["repo"] else "X"))
names = self.item["names"]
del self.widgetListName.body[:]
self.widgetListName.body += ur.btnListMakeTerminal(names, None)
groups = self.item["groups"]
if len(groups) > 0:
del self.widgetListGroup.body[:]
self.widgetListGroup.body += ur.btnListMakeTerminal(groups, None)
#self.widgetFrame.set_focus(self.widgetContent)
def unhandled(self, key):
if key == 'f4' or key == "q" or key == "esc":
self.close()
elif key == "r" or key == "R":
self.item["repo"] = not self.item["repo"]
ii = g.regFindByPath(self.item["path"])
ii["repo"] = self.item["repo"]
g.configSave()
self.showInfo()
elif key == "insert":
focusWidget = self.widgetFrame.original_widget.get_focus()
if focusWidget == self.widgetListName:
def onOk(ss):
self.item["names"].append(ss)
g.configSave()
self.showInfo()
ur.popupInput("Input new name", "", onOk, width=60)
elif focusWidget == self.widgetListGroup:
def onOk(ss):
self.item["groups"].append(ss)
g.configSave()
self.showInfo()
ur.popupInput("Input new group", "", onOk, width=60)
elif key == "delete":
focusWidget = self.widgetFrame.original_widget.get_focus()
if focusWidget == self.widgetListName:
ss = self.widgetListName.focus.original_widget.get_label()
def onOk():
self.item["names"].remove(ss)
g.configSave()
self.showInfo()
ur.popupAsk("Remove Name", "[%s] will be deleted. Are you sure?" % ss, onOk)
elif focusWidget == self.widgetListGroup:
ss = self.widgetListGroup.focus.original_widget.get_label()
def onOk():
self.item["groups"].remove(ss)
g.configSave()
self.showInfo()
ur.popupAsk("Remove Group", "[%s] will be deleted. Are you sure?" % ss, onOk)
class DlgRegList(ur.cDialog):
def __init__(self, onExit):
super().__init__()
self.onExit = onExit
self.widgetFileList = ur.mListBox(urwid.SimpleFocusListWalker(ur.btnListMakeTerminal([], None)))
#self.widgetFileList.setFocusCb(lambda newFocus: self.onFileFocusChanged(newFocus))
self.widgetContent = ur.mListBox(urwid.SimpleListWalker(ur.textListMakeTerminal(["< Nothing to display >"])))
#self.widgetContent.isViewContent = True
self.header = ">> dc repo list - J/K(move) E(modify) P(pull all) del Q/esc(quit)"
self.headerText = urwid.Text(self.header)
#self.widgetFrame = urwid.Pile(
# [(15, urwid.AttrMap(self.widgetFileList, 'std')), ('pack', urwid.Divider('-')), self.widgetContent])
self.widgetFrame = urwid.AttrMap(self.widgetFileList, 'std')
self.edInput = ur.editGen("$ ", "", lambda edit, text: self.onInputChanged(edit, text))
self.mainWidget = urwid.Frame(self.widgetFrame, header=self.headerText, footer=self.edInput)
self.itemList = None
#self.cbFileSelect = lambda btn: self.onFileSelected(btn)
self.mainWidget.set_focus("footer")
#self.content = ""
#self.selectFileName = ""
def init(self):
self.refreshFile()
return True
def onInputChanged(self, edit, text):
last = ""
if len(text) > 0:
last = text[-1]
if last in ["E", 'J', 'K', "H", 'D', 'Q', "P"]:
def _cb(self, data):
data["dlg"].edInput.set_edit_text(data["text"][:-1])
g.loop.set_alarm_in(0.00001, _cb, dict(dlg=self, text=text))
self.unhandled(last)
#traceback.print_stack()
return #text
self.refreshList(text)
def onFileSelected(self, btn):
widget = btn
pp = widget.attr["path"]
os.chdir(pp)
self.close()
def refreshFile(self):
oldPath = os.getcwd()
# title, item
# itemList = []
# for x in g.regList:
# # todo: multi thread
# itemList.append(genRepoItem(x))
pool = Pool(10)
lst = filter(lambda x: x["repo"], g.regList)
self.itemList = pool.map(_genRepoItem, lst)
#itemList = [ (item["title"], item) for item in itemList]
#itemList = [ (getTitle(x), x) for x in g.regList ]
os.chdir(oldPath)
# mstd, title, item
def _gen(item):
mstd = "std"
if "repo" in item and item["repo"]:
if item["repoStatus"]["same"]:
mstd = "grayfg"
else:
mstd = "greenfg"
return mstd, item["title"], item
# status
self.itemList = list(map(_gen, self.itemList))
self.refreshList("")
def refreshList(self, filterStr):
# TODO: names?
def _filterList(item):
if filterStr == "": return True
for name in item[2]["names"]:
if filterStr.lower() in name.lower():
return True
itemList = list(filter(_filterList, self.itemList))
#self.headerText.set_text("%s - %s%s - %d" % (self.title, pp, status, len(itemList)))
idx = 0
if self.widgetFileList.body.focus is not None:
idx = self.widgetFileList.body.focus
myutil.refreshBtnListMarkupTuple(itemList, self.widgetFileList, lambda btn: self.onFileSelected(btn))
if idx >= len(self.widgetFileList.body):
idx = len(self. |
sdpython/ensae_teaching_cs | _doc/examples/automation/jenkins_setup.py | Python | mit | 1,450 | 0.002073 | # -*- coding: utf-8 -*-
"""
Set up or update Jenkins Jobs
=============================
Update Jenkins jobs for GitHub repositories.
"""
#########################################
# import
import sys
import os
from pyquickhelper.loghelper import get_keyword
#########################################
# logging
from pyquickhelper.loghelper import fLOG # publish_lectures
fLOG(OutputPrint=True)
#########################################
# import des fonctions dont on a besoin
from pyquickhelper.jenkinshelper import JenkinsExt
from ensae_teaching_cs.automation.jenkins_helper import setup_jenkins_server, engines_default
#########################################
# récupération des identifiants Jenkins
user = get_password("jenkins", "_automation,user")
pwd = get_password("jenkins", "_automation,pwd")
#########################################
# instantiation d'une classe faisant l'interface avec le service
platform = sys.platform
if platform.startswith("win"):
location = "d:\\jenkins\\pymy"
else:
location = "/var/lib/jenkins/workspace"
js = JenkinsExt('http://localhost:8080/', user, pwd, platform=platform,
fLOG=fLOG, engines=engines_default(platform=platform))
#########################################
# mise à jour des jobs
setup_jenkins_server(js, overwrite=True,
| delete_first=False,
| location="d:\\jenkins\\pymy",
disable_schedule=False)
|
raman-sharma/pyAudioAnalysis | analyzeMovieSound.py | Python | apache-2.0 | 6,014 | 0.045727 | import os, sys, shutil, glob, numpy, csv, cPickle
import scipy.io.wavfile as wavfile
import audioBasicIO
import audioTrainTest as aT
import audioSegmentation as aS
import matplotlib.pyplot as plt
import scipy.spatial.distance
minDuration = 7;
def classifyFolderWrapper(inputFolder, modelType, modelName, outputMode=False):
if not os.path.isfile(modelName):
raise Exception("Input modelName not found!")
if modelType=='svm':
[Classifier, MEAN, STD, classNames, mtWin, mtStep, stWin, stStep, computeBEAT] = aT.loadSVModel(modelName)
elif modelType=='knn':
[Classifier, MEAN, STD, classNames, mtWin, mtStep, stWin, stStep, computeBEAT] = aT.loadKNNModel(modelName)
PsAll = numpy.zeros((len(classNames), ))
files = "*.wav"
if os.path.isdir(inputFolder):
strFilePattern = os.path.join(inputFolder, files)
else:
strFilePattern = inputFolder + files
wavFilesList = []
wavFilesList.extend(glob.glob(strFilePattern))
wavFilesList = sorted(wavFilesList)
if len(wavFilesList)==0:
print "No WAV files found!"
return
Results = []
for wavFile in wavFilesList:
[Fs, x] = audioBasicIO.readAudioFile(wavFile)
signalLength = x.shape[0] / float(Fs)
[Result, P, classNames] = aT.fileClassification(wavFile, modelName, modelType)
PsAll += (numpy.array(P) * signalLength)
Result = int(Result)
Results.append(Result)
if outputMode:
print "{0:s}\t{1:s}".format(wavFile,classNames[Result])
Results = numpy.array(Results)
# print distribution of classes:
[Histogram, _] = numpy.histogram(Results, bins=numpy.arange(len(classNames)+1))
if outputMode:
for i,h in enumerate(Histogram):
print "{0:20s}\t\t{1:d}".format(classNames[i], h)
PsAll = PsAll / numpy.sum(PsAll)
if outputMode:
fig = plt.figure()
ax = fig.add_subplot(111)
plt.title("Classes percentage " + inputFolder.replace('Segments',''))
ax.axis((0, len(classNames)+1, 0, 1))
ax.set_xticks(numpy.array(range(len(classNames)+1)))
ax.set_xticklabels([" "] + classNames)
ax.bar(numpy.array(range(len(classNames)))+0.5, PsAll)
plt.show()
return classNames, PsAll
def getMusicSegmentsFromFile(inputFile):
modelType = "svm"
modelName = | "data/svmMovies8classes"
dirOutput = inputFile[0:-4] + "_musicSegments"
if os.path.exists(dirOutput) and dirOutput!=".":
shutil.rmtree(dirOutput)
os.makedirs(dirOutput)
[Fs, x] = audioBasicIO.readAudioFile(inputFile)
if modelType=='svm':
[Classifier, MEAN, STD, classNames, mtWin, mtStep, stWin, s | tStep, computeBEAT] = aT.loadSVModel(modelName)
elif modelType=='knn':
[Classifier, MEAN, STD, classNames, mtWin, mtStep, stWin, stStep, computeBEAT] = aT.loadKNNModel(modelName)
flagsInd, classNames, acc = aS.mtFileClassification(inputFile, modelName, modelType, plotResults = False, gtFile = "")
segs, classes = aS.flags2segs(flagsInd, mtStep)
for i, s in enumerate(segs):
if (classNames[int(classes[i])] == "Music") and (s[1] - s[0] >= minDuration):
strOut = "{0:s}{1:.3f}-{2:.3f}.wav".format(dirOutput+os.sep, s[0], s[1])
wavfile.write( strOut, Fs, x[int(Fs*s[0]):int(Fs*s[1])])
def analyzeDir(dirPath):
for i,f in enumerate(glob.glob(dirPath + os.sep + '*.wav')): # for each WAV file
getMusicSegmentsFromFile(f)
[c, P]= classifyFolderWrapper(f[0:-4] + "_musicSegments", "svm", "data/svmMusicGenre8", False)
if i==0:
print "".ljust(100)+"\t",
for C in c:
print C.ljust(12)+"\t",
print
print f.ljust(100)+"\t",
for p in P:
print "{0:.2f}".format(p).ljust(12)+"\t",
print
def main(argv):
if argv[1]=="--file":
getMusicSegmentsFromFile(argv[2])
classifyFolderWrapper(argv[2][0:-4] + "_musicSegments", "svm", "data/svmMusicGenre8", True)
elif argv[1]=="--dir":
analyzeDir(argv[2])
elif argv[1]=="--sim":
csvFile = argv[2]
f = []
fileNames = []
with open(csvFile, 'rb') as csvfile:
spamreader = csv.reader(csvfile, delimiter='\t', quotechar='|')
for j,row in enumerate(spamreader):
if j>0:
ftemp = []
for i in range(1,9):
ftemp.append(float(row[i]))
f.append(ftemp)
R = row[0]
II = R.find(".wav");
fileNames.append(row[0][0:II])
f = numpy.array(f)
Sim = numpy.zeros((f.shape[0], f.shape[0]))
for i in range(f.shape[0]):
for j in range(f.shape[0]):
Sim[i,j] = scipy.spatial.distance.cdist(numpy.reshape(f[i,:], (f.shape[1],1)).T, numpy.reshape(f[j,:], (f.shape[1],1)).T, 'cosine')
Sim1 = numpy.reshape(Sim, (Sim.shape[0]*Sim.shape[1], 1))
plt.hist(Sim1)
plt.show()
fo = open(csvFile + "_simMatrix", "wb")
cPickle.dump(fileNames, fo, protocol = cPickle.HIGHEST_PROTOCOL)
cPickle.dump(f, fo, protocol = cPickle.HIGHEST_PROTOCOL)
cPickle.dump(Sim, fo, protocol = cPickle.HIGHEST_PROTOCOL)
fo.close()
elif argv[1]=="--loadsim":
try:
fo = open(argv[2], "rb")
except IOError:
print "didn't find file"
return
try:
fileNames = cPickle.load(fo)
f = cPickle.load(fo)
Sim = cPickle.load(fo)
except:
fo.close()
fo.close()
print fileNames
Sim1 = numpy.reshape(Sim, (Sim.shape[0]*Sim.shape[1], 1))
plt.hist(Sim1)
plt.show()
elif argv[1]=="--audio-event-dir":
files = "*.wav"
inputFolder = argv[2]
if os.path.isdir(inputFolder):
strFilePattern = os.path.join(inputFolder, files)
else:
strFilePattern = inputFolder + files
wavFilesList = []
wavFilesList.extend(glob.glob(strFilePattern))
wavFilesList = sorted(wavFilesList)
for i,w in enumerate(wavFilesList):
[flagsInd, classesAll, acc] = aS.mtFileClassification(w, "data/svmMovies8classes", "svm", False, '')
histTemp = numpy.zeros( (len(classesAll), ) )
for f in flagsInd:
histTemp[int(f)] += 1.0
histTemp /= histTemp.sum()
if i==0:
print "".ljust(100)+"\t",
for C in classesAll:
print C.ljust(12)+"\t",
print
print w.ljust(100)+"\t",
for h in histTemp:
print "{0:.2f}".format(h).ljust(12)+"\t",
print
return 0
if __name__ == '__main__':
main(sys.argv)
|
maaaaz/nmaptocsv | nmaptocsv.py | Python | lgpl-3.0 | 24,214 | 0.010325 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# This file is part of nmaptocsv.
#
# Copyright (C) 2012, 2019 Thomas Debize <tdebize at mail.com>
# All rights reserved.
#
# nmaptocsv is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# nma | ptocsv is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with nmaptocsv. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import
from __future__ import division
from __future__ import p | rint_function
# Global imports
import sys
import re
import csv
import struct
import socket
import itertools
import argparse
import xml.etree.cElementTree as ET
# Python 2 and 3 compatibility
if (sys.version_info < (3, 0)):
izip = itertools.izip
fd_read_options = 'rb'
fd_write_options = 'wb'
else:
izip = zip
fd_read_options = 'r'
fd_write_options = 'w'
# Script version
VERSION = '1.6'
# Options definition
parser = argparse.ArgumentParser()
# Options definition
mandatory_grp = parser.add_argument_group('Mandatory parameters')
mandatory_grp.add_argument('-i', '--input', help = 'Nmap scan output file in normal (-oN) or Grepable (-oG) format (stdin if not specified)')
mandatory_grp.add_argument('-x', '--xml-input', help = 'Nmap scan output file in XML (-oX) format')
output_grp = parser.add_argument_group('Output parameters')
output_grp.add_argument('-o', '--output', help = 'CSV output filename (stdout if not specified)')
output_grp.add_argument('-f', '--format', help = 'CSV column format { fqdn, rdns, hop_number, ip, mac_address, mac_vendor, port, protocol, os, script, service, version } (default: ip-fqdn-port-protocol-service-version)', default = 'ip-fqdn-port-protocol-service-version')
output_grp.add_argument('-S', '--script', help = 'Adds the script column in output, alias for -f "ip-fqdn-port-protocol-service-version-script"', action = 'store_const', const = 'ip-fqdn-port-protocol-service-version-script')
output_grp.add_argument('-d', '--delimiter', help = 'CSV output delimiter (default ";"). Ex: -d ","', default = ';')
output_grp.add_argument('-n', '--no-newline', help = 'Do not insert a newline between each host. By default, a newline is added for better readability', action = 'store_true', default = False)
output_grp.add_argument('-s', '--skip-header', help = 'Do not print the CSV header', action = 'store_true', default = False)
# Handful patterns
#-- IP regex
p_ip_elementary = r'(?:[\d]{1,3})\.(?:[\d]{1,3})\.(?:[\d]{1,3})\.(?:[\d]{1,3})'
p_mac_elementary = r'[0-9a-fA-F][0-9a-fA-F]:){5}([0-9a-fA-F][0-9a-fA-F]'
# Nmap Normal Output patterns
#-- Target
p_ip_nmap5 = r'Interesting.*on\s(?:(?P<fqdn_nmap5>.*) (?=\((?P<ip_nmap5>%s)\)))|Interesting.*on\s(?P<ip_only_nmap5>.*)\:' % p_ip_elementary
p_ip_nmap6 = r'Nmap.*for\s(?:(?P<fqdn_nmap6>.*) (?=\((?P<ip_nmap6>%s)\)))|Nmap.*for\s(?P<ip_only_nmap6>%s)$' % (p_ip_elementary, p_ip_elementary)
p_ip = re.compile('%s|%s' % (p_ip_nmap5, p_ip_nmap6))
#-- rDNS
p_rdns = re.compile(r'rDNS record for (?P<ip>%s):\s(?P<rdns>.*)$' % p_ip_elementary)
#-- Port header
p_port_header = re.compile(r'^(?P<port>PORT)\s+(?P<state>STATE)\s+(?P<service>SERVICE)\s+(?P<reason>REASON\s*)?(?P<version>VERSION$)?')
#-- Port finding
p_port_without_reason = re.compile(r'^(?P<number>[\d]+)\/(?P<protocol>tcp|udp)\s+(?:open|open\|filtered)\s+(?P<service>[\w\S]*)(?:\s*(?P<version>.*))?$')
p_port_with_reason = re.compile(r'^(?P<number>[\d]+)\/(?P<protocol>tcp|udp)\s+(?:open|open\|filtered)\s+(?P<service>[\w\S]*)\s+(?P<reason>.* ttl [\d]+)\s*(?:\s*(?P<version>.*))$')
#-- Script output finding
p_script = re.compile(r'^\|[\s|\_](?P<script>.*)$')
#-- MAC address
p_mac = re.compile(r'MAC Address:\s(?P<mac_addr>(%s))\s\((?P<mac_vendor>.*)\)' % p_mac_elementary)
#-- OS detection (pattern order is important, the latter position the more precise and reliable the information is)
p_os = re.compile(r'(?:^Service Info: OS|^OS CPE|\s+OS|^OS details|smb-os-discovery|\|):\s(?P<os>[^;]+)')
#-- Network distance
p_network_dist = re.compile(r'Network Distance:\s(?P<hop_number>\d+)\shops?')
# Nmap Grepable output
#-- Target, Ports
p_grepable = re.compile(r'(?P<whole_line>^Host:\s.*)')
# Handful functions
def dottedquad_to_num(ip):
"""
Convert decimal dotted quad string IP to long integer
"""
return struct.unpack('!L',socket.inet_aton(ip))[0]
def num_to_dottedquad(n):
"""
Convert long int IP to dotted quad string
"""
return socket.inet_ntoa(struct.pack('!L',n))
def unique_match_from_list(list):
"""
Check the list for a potential pattern match
@param list : a list of potential matching groups
@rtype : return the string representation of the unique value that matched, or nothing if nothing matched
"""
result = ''
for item in list:
if item != None:
result = str(item)
return result
def extract_matching_pattern(regex, group_name, unfiltered_list):
"""
Return the desired group_name from a list of matching patterns
@param regex : a regular expression with named groups
@param group_name : the desired matching group name value
@param unfiltered_list : a list of matches
@rtype : the string value
"""
result = ''
filtered_list = list(filter(regex.search, unfiltered_list))
if len(filtered_list) == 1:
filtered_string = ''.join(filtered_list)
result = regex.search(filtered_string).group(group_name)
return result
class Host:
def __init__(self, ip, fqdn=''):
self.ip_dottedquad = ip
self.ip_num = dottedquad_to_num(ip)
self.fqdn = fqdn
self.rdns = ''
self.ports = []
self.os = ''
self.mac_address = ''
self.mac_address_vendor = ''
self.network_distance = ''
def add_port(self, port):
self.ports.append(port)
# Getters
def get_ip_num_format(self):
return str(self.ip_num)
def get_ip_dotted_format(self):
return str(self.ip_dottedquad)
def get_fqdn(self):
return str(self.fqdn)
def get_rdns_record(self):
return str(self.rdns)
def get_port_list(self):
return self.ports
def get_port_number_list(self):
if not(self.get_port_list()):
return ['']
else:
result = []
for port in self.get_port_list():
result.append(port.get_number())
return result
def get_port_protocol_list(self):
if not(self.get_port_list()):
return ['']
else:
result = []
for port in self.get_port_list():
result.append(port.get_protocol())
return result
def get_port_service_list(self):
if not(self.get_port_list()):
return ['']
else:
result = []
for port in self.get_port_list():
result.append(port.get_service())
return result
def get_port_version_list(self):
if not(self.get_port_list()):
return ['']
else:
result = []
for port in self.get_port_list():
result.append(port.get_version())
return result
def get_port_script_list(self):
if not(self.get_port_list()):
return ['']
else:
result = []
for port in self.get_port_list():
result.append(port.get_script())
return result
def get_os(self):
return str(self.os)
def get_mac_address(self):
return str(self.mac_address)
def get_mac_address_vendor(self):
return str(self.mac_addr |
iEngage/python-sdk | iengage_client/models/nlc.py | Python | apache-2.0 | 5,546 | 0.000541 | # coding: utf-8
"""
Stakeholder engagement API
This API enables Intelligent Engagement for your Business. iEngage is a platform that combines process, augmented intelligence and rewards to help you intelligently engage customers.
OpenAPI spec version: 1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class NLC(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, nlc_id=None, nlc_classifier_name=None, created_date=None, modified_date=None, classification=None):
"""
NLC - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'nlc_id': 'int',
'nlc_classifier_name': 'str',
'created_date': 'datetime',
'modified_date': 'datetime',
'classification': 'list[Bucket]'
}
self.attribute_map = {
'nlc_id': 'nlcId',
'nlc_classifier_name': 'nlcClassifierName',
'created_date': 'createdDate',
'modified_date': 'modifiedDate',
'classification': 'classification'
}
self._nlc_id = nlc_id
self._nlc_classifier_name = nlc_classifier_name
self._created_date = created_date
self._modified_date = modified_date
self._classification = classification
@property
def nlc_id(self):
"""
Gets the nlc_id of this NLC.
:return: The nlc_id of this NLC.
:rtype: int
"""
return self._nlc_id
@nlc_id.setter
def nlc_id(self, nlc_id):
"""
Sets the nlc_id of this NLC.
:param nlc_id: The nlc_id of this NLC.
:type: int
"""
self._nlc_id = nlc_id
@property
def nlc_classifier_name(self):
"""
Gets the nlc_classifier_name of this NLC.
:return: The nlc_classifier_name of this NLC.
:rtype: str
"""
return self._nlc_classifier_name
@nlc_classifier_name.setter
def nlc_classifier_name(self, nlc_classifier_name):
"""
Sets the nlc_classifier_name of this NLC.
:param nlc_classifier_name: The nlc_classifier_name of this NLC.
:type: str
"""
self._nlc_classifier_name = nlc_classifier_name
@property
def created_date(self):
"""
Gets the created_date of this NLC.
:return: The created_date of this NLC.
:rtype: datetime
"""
return self._created_date
@created_date.setter
def created_date(self, created_date):
"""
Sets the created_date of this NLC.
:param created_date: The created_date of this NLC.
:type: datetime
"""
self._created_date = created_date
@property
def modified_date(self):
"""
Gets the modified_date of this NLC.
:return: The modified_date of this NLC.
:rtype: datetime
"""
return se | lf._modified_date
@modified_date.setter
def modified_date(self, modified_date):
"""
Sets the modified_date of this NLC.
:param modified_date: The modified_date of this NLC.
:type: datetime
"""
self._modified_date = modified_date
@property
def classification(self):
"""
Gets the classification of this NLC.
:return: The classification of this NLC.
:rtype: list[Bucket]
"""
return s | elf._classification
@classification.setter
def classification(self, classification):
"""
Sets the classification of this NLC.
:param classification: The classification of this NLC.
:type: list[Bucket]
"""
self._classification = classification
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
CodethinkLabs/python-consonant | consonant/register/__init__.py | Python | gpl-2.0 | 6,463 | 0 | # Copyright (C) 2013 Codethink Limited.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""Classes to access the Consonant register."""
import os
import yaml
from consonant.util import expressions
from consonant.util.phase import Phase
class RegisterFormatError(Exception):
"""Exception | for when a register file has an invalid format."""
def __init__(self, filename, msg):
self.filename = filename
self.msg = msg
def __str__(self):
return 'File "%s": %s' % (self.filename, self.msg)
class UnknownSchemaError(Exception):
"""Exception for when an unknown schema is looked up via the register."""
def __init__(self, schema):
| self.schema = schema
def __str__(self):
return '%s' % self.schema
class UnknownServiceError(Exception):
"""Exception for when an unknown service is looked up via the register."""
def __init__(self, service):
self.service = service
def __str__(self):
return '%s' % self.service
class Register(yaml.YAMLObject):
"""Class to access the system and user register."""
yaml_tag = u'!Register'
def __init__(self):
self.schemas = {}
self.services = {}
self._load_register_files()
def _load_register_files(self):
config_dirs = self._collect_config_dirs()
filenames = [os.path.join(x, 'consonant', 'register.yaml')
for x in config_dirs]
# first phase: load YAML data from register files
data = []
with Phase() as phase:
for filename in filenames:
if os.path.exists(filename):
try:
with open(filename) as f:
data.append(
(filename, yaml.load(f, Loader=yaml.CLoader)))
except Exception, e:
phase.error(e)
# second phase: validate the data
with Phase() as phase:
for filename, data in data:
schemas = data.get('schemas', {})
if not isinstance(schemas, dict):
phase.error(RegisterFormatError(
filename,
'Schemas are not specified as a dictionary'))
for key, val in schemas.iteritems():
if not isinstance(key, basestring):
phase.error(RegisterFormatError(
filename,
'Schema name "%s" is not a string' % key))
if not expressions.schema_name.match(key):
phase.error(RegisterFormatError(
filename, 'Schema name "%s" is invalid' % key))
if not isinstance(val, basestring):
phase.error(RegisterFormatError(
filename,
'Schema name "%s" is mapped to "%s", '
'which is not a string' % (key, val)))
self.schemas.update(schemas)
services = data.get('services', {})
if not isinstance(services, dict):
phase.error(RegisterFormatError(
filename,
'Services are not specified as a dictionary'))
for key, val in services.iteritems():
if not isinstance(key, basestring):
phase.error(RegisterFormatError(
filename,
'Service name "%s" is not a string' % key))
if not expressions.service_name.match(key):
phase.error(RegisterFormatError(
filename,
'Service name "%s" is invalid' % key))
if not isinstance(val, basestring):
phase.error(RegisterFormatError(
filename,
'Service name "%s" is mapped to "%s", '
'which is not a string' % (key, val)))
self.services.update(services)
def _collect_config_dirs(self):
config_dirs = []
if os.environ.get('XDG_CONFIG_HOME'):
config_dirs.append(os.environ.get('XDG_CONFIG_HOME'))
else:
if os.environ.get('HOME'):
config_dirs.append(
os.path.join(os.environ.get('HOME'), '.config'))
else:
config_dirs.append('~/')
if os.environ.get('XDG_CONFIG_DIRS'):
config_dirs.extend(os.environ.get('XDG_CONFIG_DIRS').split(':'))
else:
config_dirs.append(os.path.join('/etc', 'xdg'))
return reversed(config_dirs)
def schema_url(self, name):
"""Look up the schema URL for a schema name and return it.
Raises an UnknownSchemaError if no schema with this name is registered.
"""
if name in self.schemas:
return self.schemas[name]
else:
raise UnknownSchemaError(name)
def service_url(self, name):
"""Look up the service URL for a service name and return it.
Raises an UnknownServiceError if no service with this name is
registered.
"""
if name in self.services:
return self.services[name]
else:
raise UnknownServiceError(name)
@classmethod
def to_yaml(cls, dumper, register): # pragma: no cover
"""Return a YAML representation of a Register."""
return dumper.represent_mapping(
u'tag:yaml.org,2002:map', {
'schemas': register.schemas,
'services': register.services,
})
|
lunixbochs/glshim | test/util/run.py | Python | mit | 8,092 | 0.001977 | import argparse
import jinja2
import os
import signal
import subprocess
import sys
import traceback
from blessings import Terminal
from contextlib import contextmanager
signals = dict((k, v) for v, k in signal.__dict__.iteritems() if v.startswith('SIG'))
term = Terminal()
TEST_ROOT = os.getcwd()
env = jinja2.Environment(
trim_blocks=True,
lstrip_blocks=True,
loader=jinja2.FileSystemLoader(os.path.join(TEST_ROOT, 'util', 'template')),
)
@contextmanager
def chdir(d):
old = os.getcwd()
os.chdir(d)
yield
os.chdir(old)
def shell(*args, **kwargs):
p = subprocess.Popen(args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output = p.communicate(kwargs.get('input', ''))
out = '\n'.join(((output[0] or '').strip(), (output[1] or '').strip()))
if p.returncode < 0:
sig = signals.get(-p.returncode)
if sig is not None:
out += '\n' + sig
return out.strip(), p.returncode
def walk(base):
for root, _, files in os.walk(base):
for name in files:
yield os.path.join(root, name)
class Test:
cmake_template = 'CMakeLists.j2'
build_dir = 'build/mock'
def __init__(self, path, tail):
self.path = path
self.name = os.path.splitext(tail)[0].strip('/')
self.exe = os.path.basename(self.name)
self.dir = self.name.rsplit(self.exe, 1)[0].strip('/')
self.ran = False
self.success = None
self.build_failed = False
self.output = ''
@classmethod
def find(cls, base, filt):
tests = []
for path in walk(base):
tail = path.replace(base, '', 1)
test = None
if path.endswith('.c'):
if os.path.basename(path).startswith('_'):
test = PureCTest(path, tail)
else:
test = Test(path, tail)
elif path.endswith('.py'):
test = PythonTest(path, tail)
else:
continue
if test and filt:
for f in filt:
if test.name.startswith(f):
break
else:
continue
tests.append(test)
return tests
@property
def status(self):
if self.output and ('ERROR' in self.output or 'Assertion failed' in self.output):
self.success = False
if self.build_failed:
return 'build failed'
elif not self.ran:
return 'skipped'
return 'pass' if self.success else 'fail'
@property
def status_color(self):
if self.build_failed:
return term.red
elif not self.ran:
return term.yellow
return term.green if self.success else term.red
def build(self, project):
junk_dir = os.path.join(TEST_ROOT, self.build_dir)
bin_dir = os.path.join(TEST_ROOT, 'bin', self.dir)
if not os.path.exists(junk_dir):
os.makedirs(junk_dir)
cmakelists = os.path.join(junk_dir, 'CMakeLists.txt')
t = env.get_template(self.cmake_template)
txt = t.render(
project=args.project,
exe=self.exe,
sources=self.path,
bin_dir=bin_dir,
util=os.path.join(TEST_ROOT, 'util'),
)
with open(cmakelists, 'w') as f:
f.write(txt)
out, status = shell('cmake', cmakelists)
if status | :
self.output = out
self.build_failed = True
return False
with chdir(junk_dir):
out, status = shell('make', '-j2')
if status:
self.output = out
self.build_failed = True
return False
tmp = os.path.join(bin_dir, 'tm | p')
out = os.path.join(bin_dir, self.exe)
if os.path.exists(out):
os.unlink(out)
os.rename(tmp, out)
return True
def run(self):
bin_dir = os.path.join(TEST_ROOT, 'bin', self.dir)
with chdir(bin_dir):
self.output, status = shell('./' + self.exe)
self.ran = True
self.success = (status == 0)
return self.success
def __repr__(self):
if self.ran:
return '<Test: {} ({})>'.format(self.name, self.status)
else:
return '<Test: {}>'.format(self.name)
class PureCTest(Test):
build_dir = 'build/pure'
cmake_template = 'CMakeLists_pure.j2'
class PythonTest(Test):
def build(self, project):
return True
def run(self):
with chdir(TEST_ROOT):
self.output, status = shell('python', self.path)
self.ran = True
self.success = (status == 0)
return False
def run(args):
tests = Test.find(args.base, args.tests)
tests.sort(key=lambda t: (t.__class__, t.name))
if not tests:
print 'No tests!'
return
step_fmt = lambda step: term.bold('[' + step + ']')
status_fmt = lambda test: term.bold('[' + test.status_color(test.status) + ']')
back = lambda mult: '\b' * mult
out = lambda *a: (sys.stdout.write(' '.join(str(s) for s in a)), sys.stdout.flush())
duplicate_errors = set()
for i, test in enumerate(tests):
headline = '[{}/{}] {} ['.format(i + 1, len(tests), test.name)
print term.bold(headline.ljust(79, '-')),
out(back(8) + term.bold('-') + step_fmt('build'))
try:
build = test.build(args.project)
except Exception:
test.build_failed = True
print
traceback.print_exc()
out(back(7) + step_fmt(' run '))
if not test.build_failed:
try:
success = test.run()
except Exception:
test.ran = True
success = test.success = False
print
traceback.print_exc()
out(back(max(7, len(test.status) + 3)) + term.bold('--') + status_fmt(test))
print
if test.output:
if test.build_failed:
if test.output in duplicate_errors:
continue
else:
duplicate_errors.add(test.output)
for line in test.output.split('\n'):
ASSERT = term.bold(term.red('Assertion failed'))
ERROR = term.bold(term.red('ERROR:'))
WARNING = term.bold(term.yellow('WARNING:'))
line = line.decode('utf8', 'replace')
line = line.replace('Assertion failed', ASSERT)
if line.startswith('ERROR'):
line = line.replace('ERROR:', ERROR, 1)
elif line.startswith('WARNING'):
line = line.replace('WARNING:', WARNING, 1)
if test.build_failed:
line = line.replace('error:', ERROR)
line = line.replace('warning:', WARNING)
print '> {}'.format(line.encode('utf8', 'replace'))
passed = sum(t.success for t in tests if t.ran)
total = sum(t.ran for t in tests)
results = '{} / {} passed, {} skipped '.format(passed, total, len(tests) - total)
if total > 0:
pc = passed / float(total) * 100
percent = '{:.2f}%'.format(pc)
if passed == total:
percent = term.green('100%')
elif pc < 75:
percent = term.red(percent)
else:
percent = term.yellow(percent)
print term.bold((results + '[{}]').format(percent).rjust(80 + len(term.green(''))))
print
if passed < len(tests):
return sys.exit(1)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Build and run tests.')
parser.add_argument('--project', help='project directory', default='.')
parser.add_argument('--base', help='test directories to search', required=True)
parser.add_argument('tests', help='test names to run (all by default)', nargs='*')
args = parser.parse_args()
run(args)
|
cbclab/MDT | mdt/gui/maps_visualizer/design/ui_TabGeneral.py | Python | lgpl-3.0 | 33,361 | 0.001828 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'TabGeneral.ui'
#
# Created by: PyQt5 UI code generator 5.10.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_TabGeneral(object):
def setupUi(self, TabGeneral):
TabGeneral.setObjectName("TabGeneral")
TabGeneral.resize(963, 704)
self.gridLayout = QtWidgets.QGridLayout(TabGeneral)
self.gridLayout.setContentsMargins(0, 0, 0, 0)
self.gridLayout.setSpacing(0)
self.gridLayout.setObjectName("gridLayout")
self.scrollArea = QtWidgets.QScrollArea(TabGeneral)
self.scrollArea.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.scrollArea.setWidgetResizable(True)
self.scrollArea.setObjectName("scrollArea")
self.scrollAreaWidgetContents = QtWidgets.QWidget()
self.scrollAreaWidgetContents.setGeometry(QtCore.QRect(0, -289, 947, 1075))
self.scrollAreaWidgetContents.setObjectName("scrollAreaWidgetContents")
self.gridLayout_6 = QtWidgets.QGridLayout(self.scrollAreaWidgetContents)
self.gridLayout_6.setContentsMargins(6, 6, 6, 6)
self.gridLayout_6.setHorizontalSpacing(0)
self.gridLayout_6.setVerticalSpacing(10)
self.gridLayout_6.setObjectName("gridLayout_6")
self.frame = CollapsablePanel(self.scrollAreaWidgetContents)
self.frame.setFrameShape(QtWidgets.QFrame.NoFrame)
self.frame.setFrameShadow(QtWidgets.QFrame.Plain)
self.frame.setObjectName("frame")
self.verticalLayout_4 = QtWidgets.QVBoxLayout(self.frame)
self.verticalLayout_4.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_4.setSpacing(0)
self.verticalLayout_4.setObjectName("verticalLayout_4")
self.label_13 = CollapsablePanelHeader(self.frame)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_13.setFont(font)
self.label_13.setObjectName("label_13")
self.verticalLayout_4.addWidget(self.label_13)
self.line_2 = QtWidgets.QFrame(self.frame)
self.line_2.setFrameShape(QtWidgets.QFrame.HLine)
self.line_2.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_2.setObjectName("line_2")
self.verticalLayout_4.addWidget(self.line_2)
self.frame_3 = CollapsablePanelContent(self.frame)
self.frame_3.setFrameShape(QtWidgets.QFrame.NoFrame)
self.frame_3.setFrameShadow(QtWidgets.QFrame.Plain)
self.frame_3.setObjectName("frame_3")
self.gridLayout_2 = QtWidgets.QGridLayout(self.frame_3)
self.gridLayout_2.setContentsMargins(6, 6, 0, 0)
self.gridLayout_2.setHorizontalSpacing(6)
self.gridLayout_2.setVerticalSpacing(3)
self.gridLayout_2.setObjectName("gridLayout_2")
self.horizontalLayout_7 = QtWidgets.QHBoxLayout()
self.horizontalLayout_7.setContentsMargins(3, -1, -1, -1)
self.horizontalLayout_7.setObjectName("horizontalLayout_7")
self.label_10 = QtWidgets.QLabel(self.frame_3)
self.label_10.setObjectName("label_10")
self.horizontalLayout_7.addWidget(self.label_10)
self.maximumDimension = QtWidgets.QLabel(self.frame_3)
self.maximumDimension.setObjectName("maximumDimension")
self.horizontalLayout_7.addWidget(self.maximumDimension)
self.gridLayout_2.addLayout(self.horizontalLayout_7, 0, 2, 1, 1)
self.horizontalLayout_6 = QtWidgets.QHBoxLayout()
self.horizontalLayout_6.setContentsMargins(3, -1, -1, -1)
self.horizontalLayout_6.setObjectName("horizontalLayout_6")
self.label_9 = QtWidgets.QLabel(self.frame_3)
self.label_9.setObjectName("label_9")
self.horizontalLayout_6.addWidget(self.label_9)
self.maximumIndex = QtWidgets.QLabel(self.frame_3)
self.maximumIndex.setObjectName("maximumIndex")
self.horizontalLayout_6.addWidget(self.maximumIndex)
self.gridLayout_2.addLayout(self.horizontalLayout_6, 1, 2, 1, 1)
self.label_14 = QtWidgets.QLabel(self.frame_3)
self.label_14.setObjectName("label_14")
self.gridLayout_2.addWidget(self.label_14, 0, 0, 1, 1)
self.general_dimension = QtWidgets.QSpinBox(self.frame_3)
self.general_dimension.setObjectName("general_dimension")
self.gridLayout_2.addWidget(self.general_dimension, 0, 1, 1, 1)
self.label_16 = QtWidgets.QLabel(self.frame_3)
self.label_16.setObjectName("label_16")
self.gridLayout_2.addWidget(self.label_16, 1, 0, 1, 1)
self.general_slice_index = QtWidgets.QSpinBox(self.frame_3)
self.general_slice_index.setObjectName("general_slice_index")
self.gridLayout_2.addWidget(self.general_slice_index, 1, 1, 1, 1)
self.label_15 = QtWidgets.QLabel(self.frame_3)
self.label_15.setObjectName("label_15")
self.gridLayout_2.addWidget(self.label_15, 2, 0, 1, 1)
self.general_volume_index = QtWidgets.QSpinB | ox(self.frame_3)
self.general_volume_index.setObjectName("general_volume_index")
self.gridLayout_2.addWidget(self.general_volume_index, 2, 1, 1, 1)
self.horizontalLayout_10 = QtWidgets.QHBoxLayout()
self.horizontalLayout_10.setContentsMargins(3, -1, -1, -1)
| self.horizontalLayout_10.setObjectName("horizontalLayout_10")
self.label_25 = QtWidgets.QLabel(self.frame_3)
self.label_25.setObjectName("label_25")
self.horizontalLayout_10.addWidget(self.label_25)
self.maximumVolume = QtWidgets.QLabel(self.frame_3)
self.maximumVolume.setObjectName("maximumVolume")
self.horizontalLayout_10.addWidget(self.maximumVolume)
self.gridLayout_2.addLayout(self.horizontalLayout_10, 2, 2, 1, 1)
self.gridLayout_2.setColumnStretch(1, 1)
self.verticalLayout_4.addWidget(self.frame_3)
self.gridLayout_6.addWidget(self.frame, 0, 0, 1, 1)
self.general_Miscellaneous = CollapsablePanel(self.scrollAreaWidgetContents)
self.general_Miscellaneous.setFrameShape(QtWidgets.QFrame.NoFrame)
self.general_Miscellaneous.setFrameShadow(QtWidgets.QFrame.Plain)
self.general_Miscellaneous.setObjectName("general_Miscellaneous")
self.verticalLayout_8 = QtWidgets.QVBoxLayout(self.general_Miscellaneous)
self.verticalLayout_8.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_8.setSpacing(0)
self.verticalLayout_8.setObjectName("verticalLayout_8")
self.label_23 = CollapsablePanelHeader(self.general_Miscellaneous)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_23.setFont(font)
self.label_23.setObjectName("label_23")
self.verticalLayout_8.addWidget(self.label_23)
self.line_6 = QtWidgets.QFrame(self.general_Miscellaneous)
self.line_6.setFrameShape(QtWidgets.QFrame.HLine)
self.line_6.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_6.setObjectName("line_6")
self.verticalLayout_8.addWidget(self.line_6)
self.frame_10 = CollapsablePanelContent(self.general_Miscellaneous)
self.frame_10.setFrameShape(QtWidgets.QFrame.NoFrame)
self.frame_10.setFrameShadow(QtWidgets.QFrame.Plain)
self.frame_10.setObjectName("frame_10")
self.formLayout = QtWidgets.QFormLayout(self.frame_10)
self.formLayout.setContentsMargins(6, 6, 0, 0)
self.formLayout.setHorizontalSpacing(6)
self.formLayout.setVerticalSpacing(3)
self.formLayout.setObjectName("formLayout")
self.label = QtWidgets.QLabel(self.frame_10)
self.label.setObjectName("label")
self.formLayout.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.label)
self.general_colormap = QtWidgets.QComboBox(self.frame_10)
self.general_colormap.setObjectName("general_colormap")
self.formLayout.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.general_colormap)
self.label_2 = QtWidgets.QLabel(self.frame_10)
self.label_2.setObjectName("label_2")
self.formLayout.setWidget(2, QtWidg |
shankari/e-mission-server | bin/debug/fix_usercache_processing.py | Python | bsd-3-clause | 1,917 | 0.010955 | from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
# Fixes usercache processing
# If there are any errors in the usercache processing, fix them and reload the data
# Basic flow
# - Copy data back to user cache
# - Attempt to moveToLongTerm
# - Find errors
# - Fix errors
# - Repeat until no errors are found
from future import standard_library
standard_library.install_aliases()
from builtins import *
import sys
import logging
logging.basicConfig(level=logging.DEBUG)
import uuid
import datetime as pydt
import json
import bson.json_util as bju
import emission.core.get_database as edb
import emission.net.usercache.abstract_usercache_han | dler as euah
import emission.net.usercache.abstract_usercache as enua
def fix_usercache_errors():
copy_to_usercache()
move_to_long_term()
def copy_to_usercache():
# Step 1: Copy data back to user cache
error_it = edb.get_timeseries_error_db().find()
uc = edb.get_usercache_db()
te = edb.get_timeseries_error_db()
logging.info("Found %d errors in this round" % edb.get_timeseries_error_db.estimate_document_count())
for error in error_it:
| logging.debug("Copying entry %s" % error["metadata"])
save_result = uc.save(error)
remove_result = te.remove(error["_id"])
logging.debug("save_result = %s, remove_result = %s" % (save_result, remove_result))
logging.info("step copy_to_usercache DONE")
def move_to_long_term():
cache_uuid_list = enua.UserCache.get_uuid_list()
logging.info("cache UUID list = %s" % cache_uuid_list)
for uuid in cache_uuid_list:
logging.info("*" * 10 + "UUID %s: moving to long term" % uuid + "*" * 10)
uh = euah.UserCacheHandler.getUserCacheHandler(uuid)
uh.moveToLongTerm()
if __name__ == '__main__':
fix_usercache_errors()
|
timm-tem/RPi_mediaserver | piface/output7off.py | Python | gpl-3.0 | 966 | 0.005176 | # THIS IS THE PYTHON CODE FOR PiFACE OUTPUT OFF
#
# Copyright (C) 2014 Tim Massey
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# Yo | u should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Also add information on how to contact you by electronic and paper mail.
#!/usr/bin/python
im | port pifacedigitalio
pifacedigital = pifacedigitalio.PiFaceDigital()
pifacedigital.output_pins[7].turn_off()
|
kampanita/pelisalacarta | python/main-classic/servers/flashx.py | Python | gpl-3.0 | 2,876 | 0.003133 | # -*- coding: utf-8 -*-
# ------------------------------------------------------------
# pelisalacarta - XBMC Plugin
# Conector para flashx
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
# ------------------------------------------------------------
import re
from core import logger
from core import jsunpack
from core import scrapertools
headers = [['User-Agent', 'Mozilla/5.0']]
def test_video_exists(page_url):
logger.info("pelisalacarta.servers.flashx test_video_exists(page_url='%s')" % page_url)
data = scrapertools.cache_page(page_url, headers=headers)
if 'FILE NOT FOUND' in data:
return False, "[FlashX] El archivo no existe o ha sido borrado"
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("pelisalacarta.servers.flashx url=" + page_url)
# Lo pide una vez
data = scrapertools.cache_page(page_url, headers=headers)
# Si salta aviso, se carga la pagina de comprobacion y luego la inicial
if "You try to access this video with Kodi" in data:
url_reload = scrapertools.find_single_match(data, 'try to reload the page.*?href="([^"]+)"')
data = scrapertools.cache_page(url_reload, headers=headers)
data = scrapertools.cache_page(page_url, headers=headers)
match = scrapertools.find_single_match(data, "<script type='text/javascript'>(.*?)</script>")
if match.startswith("eval"):
match = jsunpack.unpac | k(match)
# Extrae la URL
# {file:"http://f11-play.flashx.tv/luq4gfc7gxixexzw6v4lhz4xqslgqmqku7gxjf4bk43u4qvwzsadrjsozxoa/video1.mp4"}
video_urls = []
media_urls = scrapertools.find_multiple_matches(match, '\{file\:"([^"]+)"')
for media_u | rl in media_urls:
if not media_url.endswith("png"):
video_urls.append(["." + media_url.rsplit('.', 1)[1] + " [flashx]", media_url])
for video_url in video_urls:
logger.info("pelisalacarta.servers.flashx %s - %s" % (video_url[0], video_url[1]))
return video_urls
# Encuentra vídeos del servidor en el texto pasado
def find_videos(data):
# Añade manualmente algunos erróneos para evitarlos
encontrados = set()
devuelve = []
# http://flashx.tv/z3nnqbspjyne
# http://www.flashx.tv/embed-li5ydvxhg514.html
patronvideos = 'flashx.(?:tv|pw)/(?:embed-|)([a-z0-9A-Z]+)'
logger.info("pelisalacarta.servers.flashx find_videos #" + patronvideos + "#")
matches = re.compile(patronvideos, re.DOTALL).findall(data)
for match in matches:
titulo = "[flashx]"
url = "http://www.flashx.tv/playvid-%s.html" % match
if url not in encontrados:
logger.info(" url=" + url)
devuelve.append([titulo, url, 'flashx'])
encontrados.add(url)
else:
logger.info(" url duplicada=" + url)
return devuelve
|
spulec/PyQS | example/api/example.py | Python | mit | 1,239 | 0 | import logging
import json
from flask import Blueprint, current_app, request, jsonify
from api.helpers import construct_response
from api import tasks
blueprint = Blueprint('api', __name__)
@blueprint.route('/example', methods=['POST'])
def example():
queues = current_app.config.get('QUEUES')
queue_name = queues['example']['name']
payload = {}
data = request.get_data()
try:
message_data = json.loads(data)
except (TypeError, AttributeError, ValueError) as e:
status = 500
return construct_response(
'Your payload does not appear to be valid json!', data, status)
try:
tasks.process.delay(message=message_data)
response_message = (
f'Successfully submitted message to queue {queue_name}'
)
| status = 200
except Exception as e:
response_message = (
f'Something went wrong submitting message '
'to queue {queue_name}! {e}'
)
status = 500
return construct_response(response_mes | sage, message_data, status)
@blueprint.route('/health', methods=['GET'])
def health():
return jsonify('OK'), 200
@blueprint.route('/', methods=['GET'])
def hello():
return 'Hello!'
|
pozytywnie/django-facebook-auth | facebook_auth/utils.py | Python | mit | 2,967 | 0.000674 | import json
try:
from urllib.parse import urljoin
from urllib.parse import urlencode
except ImportError:
from urlparse import urljoin
from urllib import urlencode
import facepy
from django.conf import settings
from django.utils import ti | mezone
from django.conf import settings
from django.core import signing
from django.core.urlresolvers import reverse
from django.utils import encoding
from . facepy_wrapper import utils
GRAPH_MAX_TRIES = 3
FACEBOOK_TIMEOUT = getattr(settings, 'FACEBOOK_AUTH_B | ACKEND_FACEBOOK_TIMEOUT',
timezone.timedelta(seconds=20).total_seconds())
FACEBOOK_API_VERSION = getattr(settings, 'FACEBOOK_API_VERSION', '2.1')
class InvalidNextUrl(Exception):
pass
class Next(object):
salt = 'facebook_auth.urls.Next'
def encode(self, data):
data = self.dumps(data)
return urlencode({'next': data})
def decode(self, data):
try:
return self.loads(data)
except signing.BadSignature:
raise InvalidNextUrl()
def dumps(self, obj):
data = json.dumps(
obj, separators=(',', ':'), sort_keys=True).encode('utf-8')
base64d = signing.b64_encode(data)
return signing.Signer(salt=self.salt).sign(base64d)
def loads(self, s):
base64d = encoding.force_bytes(
signing.Signer(salt=self.salt).unsign(s))
data = signing.b64_decode(base64d)
return json.loads(data.decode('utf-8'))
def redirect_uri(next, close):
return urljoin(
settings.FACEBOOK_CANVAS_URL,
reverse('facebook-auth-handler') + "?" +
Next().encode({'next': next, 'close': close})
)
def get_from_graph_api(graphAPI, query):
for i in range(GRAPH_MAX_TRIES):
try:
return graphAPI.get(query)
except facepy.FacepyError as e:
if i == GRAPH_MAX_TRIES - 1 or getattr(e, 'code', None) != 1:
raise
def get_application_graph(version=None):
version = version or FACEBOOK_API_VERSION
token = (facepy.utils
.get_application_access_token(settings.FACEBOOK_APP_ID,
settings.FACEBOOK_APP_SECRET,
api_version=version))
return get_graph(token)
def get_graph(*args, **kwargs):
version = FACEBOOK_API_VERSION
return utils.get_graph(*args, version=version, timeout=FACEBOOK_TIMEOUT, **kwargs)
def get_long_lived_access_token(access_token):
return utils.get_long_lived_access_token(
access_token=access_token,
client_id=settings.FACEBOOK_APP_ID,
client_secret=settings.FACEBOOK_APP_SECRET,
)
def get_access_token(code=None, redirect_uri=None):
return utils.get_access_token(
code=code,
redirect_uri=redirect_uri,
client_id=settings.FACEBOOK_APP_ID,
client_secret=settings.FACEBOOK_APP_SECRET,
timeout=FACEBOOK_TIMEOUT,
)
|
ghajba/CodingContestConfig | python/ContestMain.py | Python | mit | 3,187 | 0.001883 | import os
from glob import glob
from filecmp import cmp
from ContestCode import dummy_message
from ContestCode import do_level_1
from ContestCode import do_level_2
from ContestCode import do_level_3
from ContestCode import do_level_4
PATH = "."
FOLDER_PREFIX = "level"
DEFAULT_FOLDER = PATH + '/' + FOLDER_PREFIX
def write_result_to_location(some_result_value, output_location, output_file_name):
""" Writes a result value to the given file at the given location!"""
write_result(some_result_value, output_location + '/' + output_file_name)
def write_result(result_value, output_file_name):
with open(output_file_name, 'w') as content_file:
content_file.write(result_value)
def read_input_file_from_location(input_location, input_file_name):
""" Reads an input file and returns it contents as a String"""
return read_input_file(input_location + '/' + input_file_name)
def read_input_file(input_file_with_path):
with open(input_file_with_path, 'r') as content_file:
return content_file.read()
def create_folder(folder_to_create):
""" Creates a folder if it does not exist """
if not os.path.exists(folder_to_create):
os.makedirs(folder_to_create)
return folder_to_create
def create_default_folder(folder_postfix):
return create_folder(DEFAULT_FOLDER + folder_postfix)
def compare_file_contents(file_with_path_1, file_with_path2):
""" compares two files and return true if they have the same content, false if not """
return cmp(file_with_path_1, file_with_path2)
def compare_file_with_content(file_with_path, expected_content):
""" looks if a file contains the expectedContent """
return expected_content == read_input_file(file_with_path)
def do_setup_testing():
""" tests the setup of the notebook """
folder_name = create_default_folder('0')
write_result_to_location("foo", folder_name, 'testInput.txt')
input_content = read_input_file_from_location(folder_name, 'testInput.txt')
write_result_to_location(dummy_message(input_content), folder_name, 'testOutput.txt')
print compare_file_contents(folder_name + '/testInput.txt', folder_name + '/testOutput.txt')
print compare_file_with_content(folder_name + '/testOutput.txt', dummy_message(input_content))
do_it_nasty(folder_name, False, dummy_message)
do_it_nasty(folder_name, True, dummy_message)
def do_it_nasty(folder_name, level_completed, level_function):
create_folder(folder_name)
for content_file in glob(folder_name + '/' + "*.in"):
input_content = read_input_file(content_file)
result = level_function(input_content)
out_file_name = content_file.replace(".in", ".out")
if not level_completed:
write_result(result, out_file_name)
else:
expected = read_input_file(out_file_name)
assert e | xpected == result, "Actual result [%s] is not as expected [%s]!" % (result, expec | ted)
if __name__ == '__main__':
do_setup_testing()
do_it_nasty("level1", False, do_level_1)
do_it_nasty("level2", False, do_level_2)
do_it_nasty("level3", False, do_level_3)
do_it_nasty("level4", False, do_level_4)
|
martjanz/shub2sqlite | import.py | Python | mit | 2,628 | 0.036149 | import sys, getopt
import os.path
import sqlite3, json
import requests
import config # config file
from scrapinghub import Connection
""" Scrapinghub2Sqlite
1. Create sqlite database (if not exists yet, obviously)
a. Create tables
2. get all projects from scrapinghub
3. check for new projects (not stored yet in local db)
4. for each project
a. insert (only new) project data in shub_projects
a. get new jobs (not stored yet in local db)
i. insert job info in shub_jobs
ii. for each job
I. insert items in shub_items
"""
def getJobItems(job_id):
url = 'https://storage.scrapinghub.com/items/' + job_id \
+ '?format=json&meta=_key&meta=_ts&apikey=' + config.apikey
print url
res = requests.get(url)
return res.json()
def getJobs(db, jobs):
for job in jobs:
cursor = db.cursor()
cursor.execute('''SELECT *
FROM shub_jobs
WHERE shub_id = ?''',\
(job.id,))
# insert jobs if not exists
if len(cursor.fetchall()) == 0:
print "Guardando job " + job.id + "..."
jsonString = json.dumps(job.info)
db.execute('''INSERT INTO shub_jobs (shub_id, raw_json)
VALUES (?, ?)''',\
[job.id, jsonString])
for item in getJobItems(job.id):
jsonString = json.dumps(item)
# insert items
db.execute('''INSERT INTO shub_items (shub_job_id, raw_json)
VALUES (?, ?)''', [job.id, jsonString])
cursor.close()
db.commit()
def main(argv):
getDeleted = False
opts, args = getopt.getopt(argv, "d", ["deleted"])
for opt, arg in opts:
if opt in ("-d", "--deleted"):
getDeleted = True
# create schema if db not exists
if not os.path.isfile(config.database):
createSchema = True
else:
createSchema = False
connection = Connection(config.apikey)
# connect to database
db = sqlite3.connect(config.database)
cursor = db.cursor()
if createSchema:
# open schema definition file
file = open("schema.sql", 'r')
for line in file.readlines():
cursor.execute(line)
db.commit()
# get projects
project_ids = connection.project_ids()
for project_ | id in project_ids:
project = connection[project_id]
cursor.execute('''SELECT *
FROM shub_projects
WHERE shub_id = ?''', (project.id,))
# insert if no | t exist
if len(cursor.fetchall()) == 0:
print "Guardando proyecto " + str(project.id) + "..."
db.execute('''INSERT INTO shub_projects (shub_id)
VALUES (?)''',\
[project.id])
# get finished jobs
getJobs(db, project.jobs(state='finished'))
# get deleted jobs
if getDeleted:
getJobs(db, project.jobs(state='deleted'))
db.close()
if __name__ == '__main__':
sys.exit(main(sys.argv[1:])) |
mindbody/API-Examples | SDKs/Python/swagger_client/models/get_waitlist_entries_request.py | Python | bsd-2-clause | 10,823 | 0.000092 | # coding: utf-8
"""
MINDBODY Public API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: v6
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class GetWaitlistEntriesRequest(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'class_ids': 'list[int]',
'class_schedule_ids': 'list[int]',
'client_ids': 'list[str]',
'hide_past_entries': 'bool',
'waitlist_entry_ids': 'list[int]',
'limit': 'int',
'offset': 'int'
}
attribute_map = {
'class_ids': 'ClassIds',
'class_schedule_ids': 'ClassScheduleIds',
'client_ids': 'ClientIds',
'hide_past_entries': 'HidePastEntries',
'waitlist_entry_ids': 'WaitlistEntryIds',
'limit': 'Limit',
'offset': 'Offset'
}
def __init__(self, class_ids=None, class_schedule_ids=None, client_ids=None, hide_past_entries=None, waitlist_entry_ids=None, limit=None, offset=None): # noqa: E501
"""GetWaitlistEntriesRequest - a model defined in Swagger""" # noqa: E501
self._class_ids = None
self._class_schedule_ids = None
self._client_ids = None
self._hide_past_entries = None
self._waitlist_entry_ids = None
self._limit = None
self._offset = None
self.discriminator = None
if class_ids is not None:
self.class_ids = class_ids
if class_schedule_ids is not None:
self.class_schedule_ids = class_schedule_ids
if client_ids is not None:
self.client_ids = client_ids
if hide_past_entries is not None:
self.hide_past_entries = hide_past_entries
if waitlist_entry_ids is not None:
self.waitlist_entry_ids = waitlist_entry_ids
if limit is not None:
self.limit = limit
if offset is not None:
self.offset = offset
@property
def class_ids(self):
"""Gets the class_ids of this GetWaitlistEntriesRequest. # noqa: E501
The requested class IDs. If a class ID is present, the request automatically disregards any class schedule IDs in the request. <br /> Either `ClassScheduleIds`, `ClientIds`, `WaitlistEntryIds`, or `ClassIds` is required; the others become optional.<br /> Default: **all ClassIds** # noqa: E501
:return: The class_ids of this GetWaitlistEntriesRequest. # noqa: E501
:rtype: list[int]
"""
return self._class_ids
@class_ids.setter
def class_ids(self, class_ids):
"""Sets the class_ids of this GetWaitlistEntriesRequest.
The requested class IDs. If a class ID is present, the request automatically disregards any class schedule IDs in the request. <br /> Either `ClassScheduleIds`, `ClientIds`, `WaitlistEntryIds`, or `ClassIds` is required; the others become optional.<br /> Default: **all ClassIds** # noqa: E501
:param class_ids: The class_ids of this GetWaitlistEntriesRequest. # noqa: E501
:type: list[int]
"""
self._class_ids = class_ids
@property
def class_sch | edule_ids(self):
"""Gets the class_schedule_ids of this GetWaitlistEntriesRequest. # noqa: E501
The requested class schedule IDs. If a class ID is present, the request automatically disregards any class schedule IDs in the request.<br /> Either `ClassSche | duleIds`, `ClientIds`, `WaitlistEntryIds`, or `ClassIds` is required; the others become optional.<br /> Default: **all ClassScheduleIds** # noqa: E501
:return: The class_schedule_ids of this GetWaitlistEntriesRequest. # noqa: E501
:rtype: list[int]
"""
return self._class_schedule_ids
@class_schedule_ids.setter
def class_schedule_ids(self, class_schedule_ids):
"""Sets the class_schedule_ids of this GetWaitlistEntriesRequest.
The requested class schedule IDs. If a class ID is present, the request automatically disregards any class schedule IDs in the request.<br /> Either `ClassScheduleIds`, `ClientIds`, `WaitlistEntryIds`, or `ClassIds` is required; the others become optional.<br /> Default: **all ClassScheduleIds** # noqa: E501
:param class_schedule_ids: The class_schedule_ids of this GetWaitlistEntriesRequest. # noqa: E501
:type: list[int]
"""
self._class_schedule_ids = class_schedule_ids
@property
def client_ids(self):
"""Gets the client_ids of this GetWaitlistEntriesRequest. # noqa: E501
The requested client IDs.<br /> Either `ClassScheduleIds`, `ClientIds`, `WaitlistEntryIds`, or `ClassIds` is required; the others become optional.<br /> Default: **all ClientIds** # noqa: E501
:return: The client_ids of this GetWaitlistEntriesRequest. # noqa: E501
:rtype: list[str]
"""
return self._client_ids
@client_ids.setter
def client_ids(self, client_ids):
"""Sets the client_ids of this GetWaitlistEntriesRequest.
The requested client IDs.<br /> Either `ClassScheduleIds`, `ClientIds`, `WaitlistEntryIds`, or `ClassIds` is required; the others become optional.<br /> Default: **all ClientIds** # noqa: E501
:param client_ids: The client_ids of this GetWaitlistEntriesRequest. # noqa: E501
:type: list[str]
"""
self._client_ids = client_ids
@property
def hide_past_entries(self):
"""Gets the hide_past_entries of this GetWaitlistEntriesRequest. # noqa: E501
When `true`, indicates that past waiting list entries are hidden from clients.<br /> When `false`, indicates that past entries are not hidden from clients.<br /> Default: **false** # noqa: E501
:return: The hide_past_entries of this GetWaitlistEntriesRequest. # noqa: E501
:rtype: bool
"""
return self._hide_past_entries
@hide_past_entries.setter
def hide_past_entries(self, hide_past_entries):
"""Sets the hide_past_entries of this GetWaitlistEntriesRequest.
When `true`, indicates that past waiting list entries are hidden from clients.<br /> When `false`, indicates that past entries are not hidden from clients.<br /> Default: **false** # noqa: E501
:param hide_past_entries: The hide_past_entries of this GetWaitlistEntriesRequest. # noqa: E501
:type: bool
"""
self._hide_past_entries = hide_past_entries
@property
def waitlist_entry_ids(self):
"""Gets the waitlist_entry_ids of this GetWaitlistEntriesRequest. # noqa: E501
The requested waiting list entry IDs.<br /> Either `ClassScheduleIds`, `ClientIds`, `WaitlistEntryIds`, or `ClassIds` is required; the others become optional.<br /> Default: **all WaitlistEntryIds** # noqa: E501
:return: The waitlist_entry_ids of this GetWaitlistEntriesRequest. # noqa: E501
:rtype: list[int]
"""
return self._waitlist_entry_ids
@waitlist_entry_ids.setter
def waitlist_entry_ids(self, waitlist_entry_ids):
"""Sets the waitlist_entry_ids of this GetWaitlistEntriesRequest.
The requested waiting list entry IDs.<br /> Either `ClassScheduleIds`, `ClientIds`, `WaitlistEntryIds`, or `ClassIds` is required; the others become optional.<br /> Default: **all WaitlistEntryIds** # noqa: E501
:param waitlist_entry_ids: The waitlist_entry_ids of this GetWaitlistEntriesRequest. # noqa: E501
:type: list[int]
"""
self._waitlist_entry_ids = waitlist_entry_ids
@property
def limit(self):
"""Gets the limit of this GetWaitlistEntriesRequest. # noqa: E501
Numbe |
jdemel/gr-misc | examples/uhd_fft_qt.py | Python | gpl-3.0 | 16,000 | 0.006938 | #!/usr/bin/env python
##################################################
# Gnuradio Python Flow Graph
# Title: UHD FFT Qt
# Author: Johannes Demel
# Generated: Wed Jan 29 13:51:16 2014
##################################################
from PyQt4 import Qt
from gnuradio import eng_notation
from gnuradio import gr
from gnuradio import qtgui
from gnuradio import uhd
from gnuradio.eng_option import eng_option
from gnuradio.filter import firdes
from optparse import OptionParser
import PyQt4.Qwt5 as Qwt
import sip
import sys
import threading
import time
class uhd_fft_qt(gr.top_block, Qt.QWidget):
def __init__(self):
gr.top_block.__init__(self, "UHD FFT Qt")
Qt.QWidget.__init__(self)
self.setWindowTitle("UHD FFT Qt")
try:
self.setWindowIcon(Qt.QIcon.fromTheme('gnuradio-grc'))
except:
pass
self.top_scroll_layout = Qt.QVBoxLayout()
self.setLayout(self.top_scroll_layout)
self.top_scroll = Qt.QScrollArea()
self.top_scroll.setFrameStyle(Qt.QFrame.NoFrame)
self.top_scroll_layout.addWidget(self.top_scroll)
self.top_scroll.setWidgetResizable(True)
self.top_widget = Qt.QWidget()
self.top_scroll.setWidget(self.top_widget)
self.top_layout = Qt.QVBoxLayout(self.top_widget)
self.top_grid_layout = Qt.QGridLayout()
self.top_layout.addLayout(self.top_grid_layout)
self.settings = Qt.QSettings("GNU Radio", "uhd_fft_qt")
self.restoreGeometry(self.settings.value("geometry").toByteArray())
##################################################
# Variables
##################################################
self.z_info = z_info = {"mboard_id":"id","mboard_serial":"serial","rx_serial":"rx","rx_subdev_name":"subname", "rx_subdev_spec":"spec","rx_antenna":"antenna"}
self.usrp_serial = usrp_serial = z_info["mboard_serial"]
self.usrp_id = usrp_id = z_info["mboard_id"]
self.db_spec = db_spec = z_info["rx_subdev_spec"]
self.db_serial = db_serial = z_info["rx_serial"]
self.db_name = db_name = z_info["rx_subdev_name"]
self.db_antenna = db_antenna = z_info["rx_antenna"]
self.catch_result = catch_result = uhd.tune_result()
self.usrp_type = usrp_type = "usrp2"
self.usrp_text = usrp_text = usrp_id + " (" + usrp_serial + ")"
self.master_clock_rate = master_clock_rate = 40e6
self.db_text = db_text = db_name + " (" + db_serial + " ," + db_spec + " ," + db_antenna + ")"
self.actual_rf = actual_rf = catch_result.actual_rf_freq
self.actual_dsp = actual_dsp = catch_result.actual_dsp_freq
self.uhd_version = uhd_version = uhd.get_version_string()
self.samp_rate = samp_rate = 10e6
self.rf_label = rf_label = actual_rf
self.myzero = myzero = 0
self.gain = gain = 50
self.dsp_label = dsp_label = actual_dsp
self.dev_args = dev_args = "type=" + usrp_type + ",master_clock_rate=" + str(master_clock_rate) + ", recv_buff_size=32768e6"
self.center_freq = center_freq = 900e6
self.a_usrp = a_usrp = usrp_text
self.a_db_label = a_db_label = db_text
##################################################
# Blocks
##################################################
self._samp_rate_tool_bar = Qt.QToolBar(self)
self._samp_rate_tool_bar.addWidget(Qt.QLabel("Sample Rate"+": "))
self._samp_rate_line_edit = Qt.QLineEdit(str(self.samp_rate))
self._samp_rate_tool_bar.addWidget(self._samp_rate_line_edit)
self._samp_rate_line_edit.returnPressed.connect(
lambda: self.set_samp_rate(eng_notation.str_to_num(self._samp_rate_line_edit.text().toAscii())))
self.top_grid_layout.addWidget(self._samp_rate_tool_bar, 4, 0, 1, 3)
self._gain_layout = Qt.QVBoxLayout()
self._gain_tool_bar = Qt.QToolBar(self)
self._gain_layout.addWidget(self._gain_tool_bar)
self._gain_tool_bar.addWidget(Qt.QLabel("Gain"+": "))
self._gain_counter = Qwt.QwtCounter()
self._gain_counter.setRange(0, 100, 1)
self._gain_counter.setNumButtons(2)
self._gain_counter.setValue(self.gain)
self._gain_tool_bar.addWidget(self._gain_counter)
self._gain_counter.valueChanged.connect(self.set_gain)
self._gain_slider = Qwt.QwtSlider(None, Qt.Qt.Horizontal, Qwt.QwtSlider.BottomScale, Qwt.QwtSlider.BgSlot)
self._gain_slider.setRange(0, 100, 1)
self._gain_slider.setValue(self.gain)
self._gain_slider.setMinimumWidth(200)
self._gain_slider.valueChanged.connect(self.set_gain)
self._gain_layout.addWidget(self._gain_slider)
self.top_grid_layout.addLayout(self._gain_layout, 5, 0, 1, 5)
self._center_freq_tool_bar = Qt.QToolBar(self)
self._center_freq_tool_bar.addWidget(Qt.QLabel("Center Frequency"+": "))
self._center_freq_line_edit = Qt.QLineEdit(str(self.center_freq))
self._center_freq_tool_bar.addWidget(self._center_freq_line_edit)
self._center_freq_line_edit.returnPressed.connect(
lambda: self.set_center_freq(eng_notation.str_to_num(self._center_freq_line_edit.text().toAscii())))
self.top_grid_layout.addWidget(self._center_freq_tool_bar, 4, 3, 1, 2)
self.usrp_dev = uhd.usrp_source(
device_addr=dev_args,
stream_args=uhd.stream_args(
cpu_format="fc32",
args="calibration-file=/home/johannes/tests/calibration-rx_B210_150N15_FE-RX2_integrated_TX-RX_1387571801.csv",
channels=range(1),
),
)
self.usrp_dev.set_samp_rate(samp_rate)
self.usrp_dev.set_center_freq(center_freq, 0)
self.usrp_dev.set_gain(gain, 0)
self.usrp_dev.set_antenna("RX2", 0)
self.z_info = val = self.usrp_dev.get_usrp_info(0)
def _z_info_probe():
notset = True
while notset:
try:
self.set_z_info(self.z_info)
notset = False
except:
notset = True
time.sleep(1.0/10.0)
self._z_info_thread = threading.Thread(target=_z_info_probe)
self._z_info_thread.daemon = True
self._z_info_thread.start()
self._uhd_version_tool_bar = Qt.QToolBar(self)
self._uhd_version_tool_bar.addWidget(Qt.QLabel("UHD"+": "))
self._uhd_version_label = Qt.QLabel(str(self.uhd_version))
self._uhd_version_tool_bar.addWidget(self._uhd_version_label)
self.top_grid_layout.addWidget(self._uhd_version_tool_bar, 3, 0, 1, 1)
self._rf_label_tool_bar = Qt.QToolBar(self)
self._rf_label_tool_bar.addWidget(Qt.QLabel("RF Freq"+": "))
self._rf_label_label = Qt.QLabel(str(self.rf_label))
self._rf_label_tool_bar.addWidget(self._rf_label_label)
self.top_grid_layout.addWidget(self._rf_label_tool_bar, 3, 3, 1, 1)
self.qtgui_sink_x_0 = qtgui.sink_c(
1024, #fftsize
firdes.WIN_BLACKMAN_hARRIS, #wintype
center_freq, #fc
samp_rate, #bw
"QT GUI Plot", #name
True, #plotfreq
True, #plotwaterfall
True, #plottime
True, #plotconst
)
self.qtgui_sink_x_0.set_update_time(1.0/10)
self._qtgui_sink_x_0_win = sip.wrapinstance(self.qtgui_sink_x_0.pyqwidget(), Qt.QWidget)
self.top_grid_layout.addWidget(self._qtgui_sink_x_0_win, 0, 0, 3, 5)
|
self._dsp_label_ | tool_bar = Qt.QToolBar(self)
self._dsp_label_tool_bar.addWidget(Qt.QLabel("DSP Freq"+": "))
self._dsp_label_label = Qt.QLabel(str(self.dsp_label))
self._dsp_label_tool_bar.addWidget(self._dsp_label_label)
self.top_grid_layout.addWidget(self._dsp_label_tool_bar, 3, 4, 1, 1)
self.catch_result = val = self.usrp_dev.set_center_freq(center_freq, myzero)
def _catch_result_probe():
notset = True
while notset:
try:
self.set_catch_result(self.catch_result)
notset = False
|
annapowellsmith/openpresc | openprescribing/frontend/management/commands/send_monthly_alerts.py | Python | mit | 7,849 | 0.000382 | # -*- coding: utf-8 -*-
import logging
from django.core.management.base import BaseCommand
from django.core.management.base import CommandError
from django.db.models import Q
from frontend.models import EmailMessage
from frontend.models import ImportLog
from frontend.models import OrgBookmark
from frontend.models import Profile
from frontend.models import SearchBookmark
from frontend.models import User
from common.alert_utils import EmailErrorDeferrer
from frontend.views import bookmark_utils
logger = logging.getLogger(__name__)
class Command(BaseCommand):
args = ''
help = ''' Send monthly emails based on bookmarks. With no arguments, sends
an email to every user for each of their bookmarks, for the
current month. With arguments, sends a test email to the specified
user for the specified organisation.'''
def add_arguments(self, parser):
parser.add_argument(
'--recipient-email',
help=('A single alert recipient to which the batch should be sent')
)
parser.add_argument(
'--recipient-email-file',
help=('The subset of alert recipients to which the batch should '
'be sent. One email per line.'))
parser.add_argument(
'--skip-email-file',
help=('The subset of alert recipients to which the batch should '
'NOT be sent. One email per line.'))
parser.add_argument(
'--ccg',
help=('If specified, a CCG code | for which a test alert should be '
'sent to `recipient-email`')
)
parser.add_argument(
'--practice',
help=('If specified, a Practice code for which a test alert '
'should be sent to `recipient-email`'))
parser.add_argument(
'--search-name',
help=('If specified, a name (could be anything) for a test search '
'alert about `url` w | hich should be sent to '
'`recipient-email`'))
parser.add_argument(
'--url',
help=('If specified, a URL for a test search '
'alert with name `search-name` which should be sent to '
'`recipient-email`'))
parser.add_argument(
'--max_errors',
help='Max number of permitted errors before aborting the batch',
default=3)
def get_org_bookmarks(self, now_month, **options):
"""Get approved OrgBookmarks for active users who have not been sent a
message tagged with `now_month`
"""
query = (
Q(approved=True, user__is_active=True) &
~Q(user__emailmessage__tags__contains=['measures', now_month]))
if options['recipient_email'] and (
options['ccg'] or options['practice']):
dummy_user = User(email=options['recipient_email'], id='dummyid')
dummy_user.profile = Profile(key='dummykey')
bookmarks = [OrgBookmark(
user=dummy_user,
pct_id=options['ccg'],
practice_id=options['practice']
)]
logger.info("Created a single test org bookmark")
elif options['recipient_email'] or options['recipient_email_file']:
recipients = []
if options['recipient_email_file']:
with open(options['recipient_email_file'], 'r') as f:
recipients = [x.strip() for x in f]
else:
recipients = [options['recipient_email']]
query = query & Q(user__email__in=recipients)
bookmarks = OrgBookmark.objects.filter(query)
logger.info("Found %s matching org bookmarks" % bookmarks.count())
else:
bookmarks = OrgBookmark.objects.filter(query)
if options['skip_email_file']:
with open(options['skip_email_file'], 'r') as f:
skip = [x.strip() for x in f]
bookmarks = bookmarks.exclude(user__email__in=skip)
logger.info("Found %s matching org bookmarks" % bookmarks.count())
return bookmarks
def get_search_bookmarks(self, now_month, **options):
query = (
Q(approved=True, user__is_active=True) &
~Q(user__emailmessage__tags__contains=['analyse', now_month]))
if options['recipient_email'] and options['url']:
dummy_user = User(email=options['recipient_email'], id='dummyid')
dummy_user.profile = Profile(key='dummykey')
bookmarks = [SearchBookmark(
user=dummy_user,
url=options['url'],
name=options['search_name']
)]
logger.info("Created a single test search bookmark")
elif not options['recipient_email']:
bookmarks = SearchBookmark.objects.filter(query)
logger.info(
"Found %s matching search bookmarks" % bookmarks.count())
else:
query = query & Q(user__email=options['recipient_email'])
bookmarks = SearchBookmark.objects.filter(query)
logger.info(
"Found %s matching search bookmarks" % bookmarks.count())
return bookmarks
def validate_options(self, **options):
if ((options['url'] or options['ccg'] or options['practice']) and
not options['recipient_email']):
raise CommandError(
"You must specify a test recipient email if you want to "
"specify a test CCG, practice, or URL")
if options['url'] and (options['practice'] or options['ccg']):
raise CommandError(
"You must specify either a URL, or one of a ccg or a practice"
)
def send_org_bookmark_email(self, org_bookmark, now_month, options):
stats = bookmark_utils.InterestingMeasureFinder(
practice=org_bookmark.practice or options['practice'],
pct=org_bookmark.pct or options['ccg']
).context_for_org_email()
try:
msg = bookmark_utils.make_org_email(org_bookmark, stats, tag=now_month)
msg = EmailMessage.objects.create_from_message(msg)
msg.send()
logger.info(
"Sent org bookmark alert to %s about %s" % (
msg.to, org_bookmark.id))
except bookmark_utils.BadAlertImageError as e:
logger.exception(e)
def send_search_bookmark_email(self, search_bookmark, now_month):
try:
recipient_id = search_bookmark.user.id
msg = bookmark_utils.make_search_email(search_bookmark, tag=now_month)
msg = EmailMessage.objects.create_from_message(msg)
msg.send()
logger.info(
"Sent search bookmark alert to %s about %s" % (
recipient_id, search_bookmark.id))
except bookmark_utils.BadAlertImageError as e:
logger.exception(e)
def handle(self, *args, **options):
self.validate_options(**options)
now_month = ImportLog.objects.latest_in_category(
'prescribing').current_at.strftime('%Y-%m-%d').lower()
with EmailErrorDeferrer(options['max_errors']) as error_deferrer:
for org_bookmark in self.get_org_bookmarks(now_month, **options):
error_deferrer.try_email(
self.send_org_bookmark_email,
org_bookmark,
now_month,
options
)
for search_bookmark in self.get_search_bookmarks(now_month, **options):
error_deferrer.try_email(
self.send_search_bookmark_email,
search_bookmark,
now_month
)
|
KMSkelton/cgm_flask | schema.py | Python | mit | 920 | 0.003261 | from marshmallow import post_load
from marshmallow_sqlalchemy import field_for
from m | odels import ma, User, Device, Measurement
class UserSchema(ma.Schema):
id = field_for(User, 'id', dump_only=True)
class Meta:
# Fields to expose
fields = ('id', 'name', 'username')
model = User
@post_load
def make_user(self, data):
return User(**data)
class DeviceSchema(ma.Schema):
id | = field_for(Device, 'id', dump_only=True)
class Meta:
# Fields to expose
fields = ('id', 'model', 'manufacturerID')
model = Device
@post_load
def make_device(self, data):
return Device(**data)
class MeasurementSchema(ma.Schema):
id = field_for(Measurement, 'id', dump_only=True)
class Meta:
# Fields to expose
model = Measurement
@post_load
def make_measurement(self, data):
return Measurement(**data)
|
taedori81/shoop | shoop/admin/modules/products/views/edit_cross_sell.py | Python | agpl-3.0 | 3,483 | 0.000287 | # -*- coding: utf-8 -*-
# This file is part of Shoop.
#
# Copyright (c) 2012-2015, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import unicode_literals
from django.contrib import messages
from django.forms.formsets import DEFAULT_MAX_NUM, DEFAULT_MIN_NUM
from django.forms.models import BaseModelFormSet, ModelForm
from django.http import HttpResponseRedirect
from django.utils.translation import ugettext_lazy as _
f | rom django.views.generic import UpdateView
from shoop.admin.base import MenuEntry
from shoop.admin.forms.widgets import ProductChoiceWidget
from shoop.admin.toolbar import PostActionButton, Toolbar
from shoop.admin.utils.urls import get_model_url
from shoop.core.models import (
Product, ProductCrossSell, ProductCrossSellTy | pe, ProductMedia
)
class ProductCrossSellForm(ModelForm):
class Meta:
model = ProductCrossSell
fields = (
"product2",
"weight",
"type",
)
def __init__(self, **kwargs):
self.product = kwargs.pop("product")
super(ProductCrossSellForm, self).__init__(**kwargs)
self.fields["product2"].widget = ProductChoiceWidget()
self.fields["product2"].label = _("Product")
def save(self, commit=True):
self.instance.product1 = self.product
return super(ProductCrossSellForm, self).save(commit=commit)
class ProductCrossSellFormSet(BaseModelFormSet):
validate_min = False
min_num = DEFAULT_MIN_NUM
validate_max = False
max_num = DEFAULT_MAX_NUM
absolute_max = DEFAULT_MAX_NUM
model = ProductMedia
can_delete = True
can_order = False
extra = 5
def __init__(self, *args, **kwargs):
self.product = kwargs.pop("product")
super(ProductCrossSellFormSet, self).__init__(*args, **kwargs)
def form(self, **kwargs):
kwargs.setdefault("product", self.product)
return ProductCrossSellForm(**kwargs)
class ProductCrossSellEditView(UpdateView):
model = Product
template_name = "shoop/admin/products/edit_cross_sell.jinja"
context_object_name = "product"
form_class = ProductCrossSellFormSet
def get_breadcrumb_parents(self):
return [
MenuEntry(
text="%s" % self.object,
url=get_model_url(self.object)
)
]
def get_context_data(self, **kwargs):
context = super(ProductCrossSellEditView, self).get_context_data(**kwargs)
context["title"] = _("Edit Cross-Sell: %s") % self.object
context["toolbar"] = Toolbar([
PostActionButton(
icon="fa fa-save",
form_id="xsell_form",
text=_("Save"),
extra_css_class="btn-success",
),
])
return context
def get_form_kwargs(self):
kwargs = super(ProductCrossSellEditView, self).get_form_kwargs()
instance = kwargs.pop("instance", None)
kwargs["queryset"] = ProductCrossSell.objects. \
filter(product1=instance). \
exclude(type=ProductCrossSellType.COMPUTED). \
order_by("weight")
kwargs["product"] = instance
return kwargs
def form_valid(self, form):
form.save()
messages.success(self.request, _("Changes saved."))
return HttpResponseRedirect(self.request.path)
|
irmen/Pyro4 | examples/stockquotes-old/phase3/aggregator.py | Python | mit | 1,432 | 0.001397 | from __future__ import print_function
import Pyro4
@Pyro4.expose
class Aggregator(object):
def __init__(self):
self.viewers = {}
self.symbols = []
def add_symbols(self, symbols):
self.symbols.extend(symbols)
def available_symbols(self):
return self.symbols
| def view(self, viewer, symbols):
print("aggregator gets a new viewer, for symbols:", symbols)
self.viewers[viewer] = symbols
def quotes(self, market, stockquotes):
for symbol, value in stockquotes.items():
for viewer, symbols in self.viewers.items():
if symbol in symbols:
viewer.quote(market, symbol, value)
def main():
aggregator = Aggregator()
daemon = Pyro4.Daemon()
agg_uri = daemon. | register(aggregator)
ns = Pyro4.locateNS()
ns.register("example.stockquote-old.aggregator", agg_uri)
for market, market_uri in ns.list(prefix="example.stockmarket-old.").items():
print("joining market", market)
stockmarket = Pyro4.Proxy(market_uri)
stockmarket.listener(aggregator)
aggregator.add_symbols(stockmarket.symbols())
if not aggregator.available_symbols():
raise ValueError("no symbols found! (have you started the stock market first?)")
print("Aggregator running. Symbols:", aggregator.available_symbols())
daemon.requestLoop()
if __name__ == "__main__":
main()
|
albermax/xcsvm | xcsvm/xcsvm/tests/solvers/llwmr.py | Python | mit | 7,407 | 0 | from ...utils.tests import base as base
from ...utils.tests import mpi as mpit
# key order: dataset; epsilon; C
__REFERENCE_RESULTS__ = {
"dryrun": {
0.1: {
0.1: {
"accuracy": 1-0.9300,
},
1: {
"accuracy": 1-0.9300,
},
10: {
"accuracy": 1-0.9300,
},
},
},
"glass": {
0.001: {
0.1: {
"accuracy": 1-0.6667,
},
1: {
"accuracy": 1-0.6190,
},
10: {
"accuracy": 1-0.3333,
},
},
},
"iris": {
0.001: {
0.1: {
"accuracy": 1-0.1333,
},
1: {
"accuracy": 1-0.2667,
},
10: {
"accuracy": 1-0.2667,
},
},
},
"news20": {
0.001: {
0.1: {
"accuracy": 1-0.2923,
},
1: {
"accuracy": 1-0.2297,
},
10: {
"accuracy": 1-0.1615,
},
},
},
}
def do_llwmr_tsd(options_update={}, mpi_comm=None,
datasets=None, reference=True):
solver_id = "llw_mr_sparse"
# default options
options = {
"epsilon": 0.001,
"C": 10**-1,
"mpi_comm": mpi_comm,
}
options.update(options_update)
reference_results = __REFERENCE_RESULTS__
if reference is False:
reference_results = None
base._do_test_small_datasets(solver_id, options,
datasets=datasets,
reference_results=reference_results,
mpi_comm=mpi_comm)
pass
def do_llwmr_tld(options_update={}, mpi_comm=None,
datasets=None, reference=True):
solver_id = "llw_mr_sparse"
# default options
options = {
"epsilon": 0.1,
"C": 10**-1,
"mpi_comm": mpi_comm,
}
options.update(options_update)
tolerance = 0.01
reference_results = __REFERENCE_RESULTS__
if reference is False:
reference_results = None
base._do_test_large_datasets(solver_id, options,
datasets=datasets,
reference_results=reference_results,
mpi_comm=mpi_comm,
tolerance=tolerance)
pass
###############################################################################
# .............................................................................
# BASE SOLVER TESTS
# .............................................................................
###############################################################################
###############################################################################
# Running default config
def test_default_sd():
do_llwmr_tsd()
@base.testattr("slow")
def test_default_ld():
do_llwmr_tld()
###############################################################################
# Parameter C and max_iter
@base.testattr("slow")
def test_C_1_sd():
do_llwmr_tsd({"C": 10**0})
@base.testattr("slow")
def test_C_1_ld():
do_llwmr_tld({"C": 10**0})
@base.testattr("slow")
def test_C_10_sd():
do_llwmr_tsd({"C": 10**1, "max_iter": 10000})
@base.testattr("slow")
def test_C_10_ld():
do_llwmr_tld({"C": 10**1, "max_iter": 10000})
###############################################################################
# Parameter epsilon
def test_small_epsilon_sd():
do_llwmr_tsd({"epsilon": 0.0001}, reference=False)
@base.testattr("slow")
def test_small_epsilon_ld():
do_llwmr_tld({"epsilon": 0.01}, reference=False)
###############################################################################
# Parameter shuffle
def test_no_shuffle_sd():
do_llwmr_tsd({"shuffle": False})
@base.testattr("slow")
def test_no_shuffle_ld():
do_llwmr_tld({"shuffle": False})
###############################################################################
# Parameter seed
def test_seed_12345_sd():
do_llwmr_tsd({"seed": 12345})
@base.testattr("slow")
def test_seed_12345_ld():
do_llwmr_tld({"seed": 12345})
###############################################################################
# Parameter dtype
@base.testattr("slow")
def test_dtype_float32_sd():
do_llwmr_tsd({"dtype": "float32"})
@base.testattr("slow")
def test_dtype_flo | at32_ld():
do_llwmr_tld({"dtype": "float32"})
@base.testattr("slow")
def test_dtype_float64_sd():
do_llwmr_tsd({"dtype": "float64"})
@base.testattr("slow")
def test_dtype_float64_ld():
do_llwmr_tld({"dtype": "float64"})
###############################################################################
# Parameter idtype
@base.testattr("slow")
def | test_idtype_uint32_sd():
do_llwmr_tsd({"idtype": "uint32"})
@base.testattr("slow")
def test_idtype_uint32_ld():
do_llwmr_tld({"idtype": "uint32"})
@base.testattr("slow")
def test_idtype_uint64_sd():
do_llwmr_tsd({"idtype": "uint64"})
@base.testattr("slow")
def test_idtype_uint64_ld():
do_llwmr_tld({"idtype": "uint64"})
###############################################################################
# Parameter nr_threads
def test_nr_threads_2_sd():
do_llwmr_tsd({"nr_threads": 2})
@base.testattr("slow")
def test_nr_threads_2_ld():
do_llwmr_tld({"nr_threads": 2})
def test_nr_threads_5_sd():
do_llwmr_tsd({"nr_threads": 5})
@base.testattr("slow")
def test_nr_threads_5_ld():
do_llwmr_tld({"nr_threads": 5})
###############################################################################
# .............................................................................
# LLW SOLVER TESTS
# .............................................................................
###############################################################################
###############################################################################
# Parameter folds
def test_folds_2_sd():
do_llwmr_tsd({"folds": 2})
@base.testattr("slow")
def test_folds_2_ld():
do_llwmr_tld({"folds": 2})
def test_folds_5_sd():
do_llwmr_tsd({"folds": 5})
@base.testattr("slow")
def test_folds_5_ld():
do_llwmr_tld({"folds": 5})
###############################################################################
# Parameter variant
def test_variant_1_sd():
do_llwmr_tsd({"variant": 1})
@base.testattr("slow")
def test_variant_1_ld():
do_llwmr_tld({"variant": 1})
###############################################################################
# Parameter shrinking
def test_shrinking_1_sd():
do_llwmr_tsd({"shrinking": 1})
@base.testattr("slow")
def test_shrinking_1_ld():
do_llwmr_tld({"shrinking": 1})
###############################################################################
# Spreading computation with openmpi
@mpit.wrap(2)
def test_nr_proc_2_sd(comm):
do_llwmr_tsd({}, comm)
@base.testattr("slow")
@mpit.wrap(2)
def test_nr_proc_2_ld(comm):
do_llwmr_tld({}, comm)
@mpit.wrap(3)
def test_nr_proc_3_sd(comm):
do_llwmr_tsd({}, comm)
@base.testattr("slow")
@mpit.wrap(3)
def test_nr_proc_3_ld(comm):
do_llwmr_tld({}, comm)
|
t0mk/python-stdnum | stdnum/de/vat.py | Python | lgpl-2.1 | 2,168 | 0 | # vat.py - functions for handling German VAT numbers
#
# Copyright (C) 2012, 2013 Arthur de Jong
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, o | r (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# | License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
"""Ust ID Nr. (Umsatzsteur Identifikationnummer, German VAT number).
The number is 10 digits long and uses the ISO 7064 Mod 11, 10 check digit
algorithm.
>>> compact('DE 136,695 976')
'136695976'
>>> validate('DE136695976')
'136695976'
>>> validate('136695978')
Traceback (most recent call last):
...
InvalidChecksum: ...
"""
from stdnum.exceptions import *
from stdnum.iso7064 import mod_11_10
from stdnum.util import clean
def compact(number):
"""Convert the number to the minimal representation. This strips the
number of any valid separators and removes surrounding whitespace."""
number = clean(number, ' -./,').upper().strip()
if number.startswith('DE'):
number = number[2:]
return number
def validate(number):
"""Checks to see if the number provided is a valid VAT number. This checks
the length, formatting and check digit."""
number = compact(number)
if not number.isdigit() or number[0] == '0':
raise InvalidFormat()
if len(number) != 9:
raise InvalidLength()
mod_11_10.validate(number)
return number
def is_valid(number):
"""Checks to see if the number provided is a valid VAT number. This checks
the length, formatting and check digit."""
try:
return bool(validate(number))
except ValidationError:
return False
|
SpringboardEdu/drip-py | drip/drip.py | Python | mit | 4,987 | 0.001404 | import json
import requests
import logging
from mixins import DripQueryPathMixin
logger = logging.getLogger(__name__)
class DripPy(DripQueryPathMixin):
"""
The main class that interacts with Drip
https://www.getdrip.com/docs/rest-api#subscribers
"""
def __init__(self, token, account_id, endpoint='https://api.getdrip.com/v2/'):
"""
Args:
token: Drip generated token
account_id: Drip generated account id
endpoint: Optional, already set to 'https://api.getdrip.com/v2/'
"""
super(DripPy, self).__init__(token, account_id, endpoint)
def fetch_subscriber(self, subscriber_id):
"""
Fetches a subscriber from Drip
GET /:account_id/subscribers/:subscriber_id
Args:
subscriber_id (int): The subscriber ID
Returns:
json: {
"links": { ... },
"subscribers": [{ ... }]
}
"""
url = self.get_fetch_subscriber_query_path(subscriber_id)
return self.send_request(url, method="GET")
def unsubscribe_email(self, email):
"""
Unsubscribe a lead from all campaigns
Args:
email (str): Email of the lead
Returns:
json: {
"links": { ... },
"subscribers": [{ ... }]
}
"""
url = self.get_unsubscribe_email_query_path(email)
self.send_request(url)
def add_subscriber_tag(self, email, tag):
"""
Uses post update API to add a given tag for an email, creates a new
subscriber if it doesn't exist in our list already
Args:
email (str): Email of the lead
tag (str): Tag to be added
Returns:
json: {
"links": { ... },
"subscribers": [{ ... }]
}
"""
url = self.get_update_subscriber_query_path()
self.send_request(url, {"subscribers": [{'email': email, 'tags': [tag]}]})
def remove_subscriber_tag(self, email, tag):
"""
Uses post update API to add a given tag for an email, creates a new
subscriber if it doesn't exist in our list already.
Args:
email (str): Email of the lead
tag (str): Tag to be added
Returns:
json: {
"links": { ... },
"subscribers": [{ ... }]
}
"""
url = self.get_update_subscriber_query_path()
self.send_request(url, {"subscribers": [{'email': email, 'remove_tags': [tag]}]})
def update_subscriber_tag_with_new_batch(self, list_of_subscribers):
"""
Uses post update API to add a given tag for an email, creates a new
subscriber if it doesn't exist in our list already
Args:
list_of_subscribers (list): List of subscribers
Returns:
json: {}
"""
url = self.get_update_subscriber_query_path_batches()
from .helpers import partition
partitions = partition(list_of_subscribers, 1000)
for partition_list_of_subscriber in partitions:
payload = list()
for subscriber in partition_list_of_subscriber:
customer = dict()
customer["email"] = subscriber[0]
if subscriber[1] is not None:
customer["tags"] = [subscriber[1]]
if subs | criber[2] is not None:
customer["remove_tags"] = [subscriber[2]]
payload.append(customer)
self.send_request(url, {"batches": [{"subscribers": pa | yload}]})
return {}
def send_request(self, request_url, payload=None, method="POST"):
"""
Dispatches the request and returns a response
Args:
request_url (str): The URL to request from
payload (dict): Optional
method (str): Defaults to POST, other option is GET
Returns:
json
"""
if not payload:
payload = {}
headers = {
'content-type': 'application/json',
'Accept': 'application/json'
}
if method == "POST":
r = requests.post(request_url, auth=(self.token, ''), headers=headers, data=json.dumps(payload))
else:
r = requests.get(request_url, auth=(self.token, ''), params=payload)
if r.status_code == 200:
try:
return r.json()
except Exception as e:
logger.error("Error while retrieving response. Error: {}".format(str(e)))
return r
elif r.status_code == 202:
return {}
else:
logger.error("Error while retrieving response. Status code: {}. Text: {}".format(r.status_code, r.text))
return {}
|
oczkers/gdown | gdown/modules/crocko.py | Python | gpl-3.0 | 1,644 | 0.00365 | # -*- coding: utf-8 | -*-
"""
gdown.modules.crocko
~~~~~~~~~~~~~~~~~~~
This module contains handlers for crocko.
"""
import re
from datetime import datetime
from ..module import browser, acc_info_template
def getApikey(username, passwd):
r = browser()
content = re.search('<content type="text">(.+)</content>', r.post('http://api.crocko.com/apikeys', {'login': username, 'password': passwd}).text). | group(1)
if content == 'Invalid login or password':
return False
else:
return content
def accInfo(username, passwd, proxy=False):
"""Returns account info."""
# get apikey
acc_info = acc_info_template()
apikey = getApikey(username, passwd)
if not apikey:
acc_info['status'] = 'deleted'
return acc_info # invalid username or password (?)
r = browser(proxy)
content = r.get('http://api.crocko.com/account', headers={'Authorization': apikey}).text
premium_end = re.search('<ed:premium_end>(.*?)</ed:premium_end>', content).group(1) # TODO: detect free acc (blind guess now)
if not premium_end:
acc_info['status'] = 'free'
else:
acc_info['status'] = 'premium'
acc_info['expire_date'] = datetime.fromtimestamp(premium_end) # premium
return acc_info
def upload(username, passwd, filename):
"""Returns uploaded file url."""
# get apikey
apikey = getApikey(username, passwd)
r = browser()
content = r.post('http://api.crocko.com/files', headers={'Authorization': apikey}, files={'file': open(filename, 'rb')}).text # upload
return re.search('<link title="download_link" href="(.+)"', content).group(1)
|
mementum/backtrader | tests/test_ind_priceosc.py | Python | gpl-3.0 | 1,613 | 0 | #!/usr/bin/env python
# -*- coding: utf-8; py-indent-offset:4 -*-
###############################################################################
#
# Copyright (C) 2015-2020 Daniel Rodriguez
#
# This | program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This | program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import testcommon
import backtrader as bt
import backtrader.indicators as btind
chkdatas = 1
chkvals = [
['25.821368', '23.202675', '-9.927422']
]
chkmin = 26
chkind = btind.PriceOsc
def test_run(main=False):
datas = [testcommon.getdata(i) for i in range(chkdatas)]
testcommon.runtest(datas,
testcommon.TestStrategy,
main=main,
plot=main,
chkind=chkind,
chkmin=chkmin,
chkvals=chkvals)
if __name__ == '__main__':
test_run(main=True)
|
drepetto/chiplotle | chiplotle/hpgl/abstract/hpglescape.py | Python | gpl-3.0 | 331 | 0.024169 | from chiplotle.hpgl.abstract | .hpgl import _HPGL
class _HPGLEscape(_HPGL):
_escape = chr(27)
## PUBLIC PROPERTIES ##
@property
def format(self):
return '%s.%s' % (self._escape, self._name)
## OVERRIDES ##
| def __repr__(self):
return 'Escape(%s, %s)' % (repr(self._escape), self._name)
|
lanius/flask-mitten | example/app.py | Python | bsd-3-clause | 1,645 | 0.000608 | # -*- coding: utf-8 -*-
from flask import (Flask, request, session, render_template, url_for, redirect,
jsonify)
app = Flask(__name__)
app.debug = True
app.secret_key = 'dummy secret key'
from flaskext.mitten import Mitten
mitten = Mitten(app) # apply Mitten
@app.route('/ | ')
def index():
if session.get('logged_in'):
return redirect(url_for('home'))
return render_template('index.html')
@app.route('/home/')
def home():
if not session.get('logged_in'):
return redirect(url_for('index'))
retur | n render_template('home.html')
# A POST request is protected from csrf automatically
@app.route('/login/', methods=['GET', 'POST'])
def login():
if request.method == 'GET':
return render_template('login.html')
else:
username = request.form.get('username')
password = request.form.get('password')
session.regenerate() # avoid session fixation
session['username'] = username
session['logged_in'] = True
return redirect(url_for('home'))
@app.route('/logout/')
def logout():
session.destroy()
return redirect(url_for('home'))
@mitten.csrf_exempt # excluded from csrf protection
@app.route('/public_api/', methods=['POST'])
def public_api():
return "POST was received successfully.", 200
@mitten.json
@app.route('/json_api/')
def json_api():
return jsonify(result='success')
@app.errorhandler(500)
def exception_handler(error):
return render_template('error.html')
if __name__ == '__main__':
app.run(host='localhost', port=8080)
|
mbauskar/frappe | frappe/desk/page/setup_wizard/setup_wizard.py | Python | mit | 8,424 | 0.025878 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: See license.txt
from __future__ import unicode_literals
import frappe, json, os
from frappe.utils import strip, cint
from frappe.translate import (set_default_language, get_dict, send_translations)
from frappe.geo.country_info import get_country_info
from frappe.utils.file_manager import save_file
from frappe.utils.password import update_password
from werkzeug.useragents import UserAgent
from . import install_fixtures
from six import string_types
@frappe.whitelist()
def setup_complete(args):
"""Calls hooks for `setup_wizard_complete`, sets home page as `desktop`
and clears cache. If wizard breaks, calls `setup_wizard_exception` hook"""
if cint(frappe.db.get_single_value('System Settings', 'setup_complete')):
# do not throw an exception if setup is already complete
# let the user continue to desk
return
#frappe.throw(_('Setup already complete'))
args = process_args(args)
try:
if args.language and args.language != "english":
set_default_language(get_language_code(args.lang))
frappe.clear_cache()
# update system settings
update_system_settings(args)
update_user_name(args)
for method in frappe.get_hooks("setup_wizard_complete"):
frappe.get_attr(method)(args)
disable_future_access()
frappe.db.commit()
frappe.clear_cache()
except:
frappe.db.rollback()
if args:
traceback = frappe.get_traceback()
for hook in frappe.get_hooks("setup_wizard_exception"):
frappe.get_attr(hook)(traceback, args)
raise
else:
for hook in frappe.get_hooks("setup_wizard_success"):
frappe.get_attr(hook)(args)
install_fixtures.install()
def update_system_settings(args):
number_format = get_country_info(args.get("country")).get("number_format", | "#,###.##")
# replace these as float number formats, as they have 0 precision
# and are currency number formats and not for floats
if number_format=="#.###":
number_format = "#.###,##"
elif number_format=="#,###":
number_format = "#,###.##"
system_settings = frappe.get_doc("System Settings", "System Settings")
system_settings.update({
"country": args.get("country"),
"language": get_langua | ge_code(args.get("language")),
"time_zone": args.get("timezone"),
"float_precision": 3,
'date_format': frappe.db.get_value("Country", args.get("country"), "date_format"),
'number_format': number_format,
'enable_scheduler': 1 if not frappe.flags.in_test else 0,
'backup_limit': 3 # Default for downloadable backups
})
system_settings.save()
def update_user_name(args):
first_name, last_name = args.get('full_name', ''), ''
if ' ' in first_name:
first_name, last_name = first_name.split(' ', 1)
if args.get("email"):
if frappe.db.exists('User', args.get('email')):
# running again
return
args['name'] = args.get("email")
_mute_emails, frappe.flags.mute_emails = frappe.flags.mute_emails, True
doc = frappe.get_doc({
"doctype":"User",
"email": args.get("email"),
"first_name": first_name,
"last_name": last_name
})
doc.flags.no_welcome_mail = True
doc.insert()
frappe.flags.mute_emails = _mute_emails
update_password(args.get("email"), args.get("password"))
elif first_name:
args.update({
"name": frappe.session.user,
"first_name": first_name,
"last_name": last_name
})
frappe.db.sql("""update `tabUser` SET first_name=%(first_name)s,
last_name=%(last_name)s WHERE name=%(name)s""", args)
if args.get("attach_user"):
attach_user = args.get("attach_user").split(",")
if len(attach_user)==3:
filename, filetype, content = attach_user
fileurl = save_file(filename, content, "User", args.get("name"), decode=True).file_url
frappe.db.set_value("User", args.get("name"), "user_image", fileurl)
if args.get('name'):
add_all_roles_to(args.get("name"))
def process_args(args):
if not args:
args = frappe.local.form_dict
if isinstance(args, string_types):
args = json.loads(args)
args = frappe._dict(args)
# strip the whitespace
for key, value in args.items():
if isinstance(value, string_types):
args[key] = strip(value)
return args
def add_all_roles_to(name):
user = frappe.get_doc("User", name)
for role in frappe.db.sql("""select name from tabRole"""):
if role[0] not in ["Administrator", "Guest", "All", "Customer", "Supplier", "Partner", "Employee"]:
d = user.append("roles")
d.role = role[0]
user.save()
def disable_future_access():
frappe.db.set_default('desktop:home_page', 'desktop')
frappe.db.set_value('System Settings', 'System Settings', 'setup_complete', 1)
frappe.db.set_value('System Settings', 'System Settings', 'is_first_startup', 1)
if not frappe.flags.in_test:
# remove all roles and add 'Administrator' to prevent future access
page = frappe.get_doc('Page', 'setup-wizard')
page.roles = []
page.append('roles', {'role': 'Administrator'})
page.flags.do_not_update_json = True
page.flags.ignore_permissions = True
page.save()
@frappe.whitelist()
def load_messages(language):
"""Load translation messages for given language from all `setup_wizard_requires`
javascript files"""
frappe.clear_cache()
set_default_language(get_language_code(language))
m = get_dict("page", "setup-wizard")
for path in frappe.get_hooks("setup_wizard_requires"):
# common folder `assets` served from `sites/`
js_file_path = os.path.abspath(frappe.get_site_path("..", *path.strip("/").split("/")))
m.update(get_dict("jsfile", js_file_path))
m.update(get_dict("boot"))
send_translations(m)
return frappe.local.lang
@frappe.whitelist()
def load_languages():
language_codes = frappe.db.sql('select language_code, language_name from tabLanguage order by name', as_dict=True)
codes_to_names = {}
for d in language_codes:
codes_to_names[d.language_code] = d.language_name
return {
"default_language": frappe.db.get_value('Language', frappe.local.lang, 'language_name') or frappe.local.lang,
"languages": sorted(frappe.db.sql_list('select language_name from tabLanguage order by name')),
"codes_to_names": codes_to_names
}
@frappe.whitelist()
def load_country():
from frappe.sessions import get_geo_ip_country
return get_geo_ip_country(frappe.local.request_ip) if frappe.local.request_ip else None
@frappe.whitelist()
def load_user_details():
return {
"full_name": frappe.cache().hget("full_name", "signup"),
"email": frappe.cache().hget("email", "signup")
}
@frappe.whitelist()
def reset_is_first_startup():
frappe.db.set_value('System Settings', 'System Settings', 'is_first_startup', 0)
def prettify_args(args):
# remove attachments
for key, val in args.items():
if isinstance(val, string_types) and "data:image" in val:
filename = val.split("data:image", 1)[0].strip(", ")
size = round((len(val) * 3 / 4) / 1048576.0, 2)
args[key] = "Image Attached: '{0}' of size {1} MB".format(filename, size)
pretty_args = []
for key in sorted(args):
pretty_args.append("{} = {}".format(key, args[key]))
return pretty_args
def email_setup_wizard_exception(traceback, args):
if not frappe.local.conf.setup_wizard_exception_email:
return
pretty_args = prettify_args(args)
if frappe.local.request:
user_agent = UserAgent(frappe.local.request.headers.get('User-Agent', ''))
else:
user_agent = frappe._dict()
message = """
#### Basic Information
- **Site:** {site}
- **User:** {user}
- **Browser:** {user_agent.platform} {user_agent.browser} version: {user_agent.version} language: {user_agent.language}
- **Browser Languages**: `{accept_languages}`
---
#### Traceback
<pre>{traceback}</pre>
---
#### Setup Wizard Arguments
<pre>{args}</pre>
---
#### Request Headers
<pre>{headers}</pre>""".format(
site=frappe.local.site,
traceback=traceback,
args="\n".join(pretty_args),
user=frappe.session.user,
user_agent=user_agent,
headers=frappe.local.request.headers,
accept_languages=", ".join(frappe.local.request.accept_languages.values()))
frappe.sendmail(recipients=frappe.local.conf.setup_wizard_exception_email,
sender=frappe.session.user,
subject="Exception in Setup Wizard - {}".format(frappe.local.site),
message=message,
delayed=False)
def get_langua |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.