repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
joehakimrahme/thawra
|
thawra/hero.py
|
1
|
3453
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import random
from thawra import action
class InvalidHero(Exception):
def __init__(self, msg, value):
self.value = value
self.msg = msg
super(InvalidHero, self).__init__()
def __str__(self):
return self.msg
class Hero(object):
def __repr__(self):
return self.name
def __init__(self, name, skillmap, attributes, element, macros=None):
self.name = name
self.element = element
if len(attributes) != 3 or \
not all(map(lambda x: isinstance(x, int), attributes)):
raise InvalidHero(
"Expected array of 3 integers for attributes, got: %s" %
attributes, attributes)
self.attributes = dict(zip(('str', 'int', 'agi'), attributes))
# TODO(rahmu): validate skillmap input
self.skillmap = skillmap
# TODO(rahmu): validate macros input
self.macros = macros
self.status = None
self.stats = {
'ATK': self.strength * 10,
'DEF': self.strength * 2,
'MAG': self.intelligence * 7,
'MDE': self.intelligence * 2,
'SPD': self.agility * 30
}
self.maxHP = self.strength * 100
self.maxMP = self.intelligence * 100
self._hp = self.maxHP
self._mp = self.maxMP
# TODO(rahmu): fill the rest of the dict with the skills
self.actions = {
'ATK': lambda target: action.Action(self, 'ATK', target, 0),
'MAG': lambda target: action.Action(self, 'MAG', target,
self.maxMP / 15)
}
@property
def level(self):
return self._get_level()
@property
def strength(self):
return self.attributes['str']
@property
def intelligence(self):
return self.attributes['int']
@property
def agility(self):
return self.attributes['agi']
@property
def hp(self):
return self._hp
@hp.setter
def hp(self, value):
switch = {
True: value,
value > self.maxHP: self.maxHP,
value < 0: 0}
self._hp = switch[True]
@property
def mp(self):
return self._mp
@mp.setter
def mp(self, value):
switch = {
True: value,
value > self.maxHP: self.maxHP,
value < 0: 0}
self._mp = switch[True]
def _get_level(self):
# TODO(rahmu): it should be a max between this and the highest skill
# TODO(rahmu): it should raise an InvalidHero exception in case of a
# problem
return int(sum(self.attributes.values()) / 10)
def choice(self, allies, enemies):
if self.macros:
return self.macros(allies, enemies)
def randattack(allies, enemies):
return 'ATK', [random.choice([h for h in enemies if h.hp > 0])]
|
apache-2.0
| 5,366,765,848,036,491,000
| 26.404762
| 76
| 0.5766
| false
| 3.888514
| false
| false
| false
|
raonyguimaraes/mendelmd
|
analyses/migrations/0001_initial.py
|
1
|
2097
|
# Generated by Django 2.1.4 on 2018-12-27 08:50
from django.conf import settings
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('tasks', '__first__'),
('projects', '0001_initial'),
('files', '0001_initial'),
('mapps', '__first__'),
('samples', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Analysis',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('params', django.contrib.postgres.fields.jsonb.JSONField(blank=True, null=True)),
('name', models.CharField(max_length=30)),
('status', models.TextField(blank=True, null=True)),
('apps', models.ManyToManyField(to='mapps.App')),
('files', models.ManyToManyField(to='files.File')),
('project', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='projects.Project')),
('samples', models.ManyToManyField(to='samples.Sample')),
('tasks', models.ManyToManyField(to='tasks.Task')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name_plural': 'analyses',
},
),
migrations.CreateModel(
name='AnalysisType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30)),
('repository', models.CharField(blank=True, max_length=600, null=True)),
],
options={
'verbose_name_plural': 'analysis_types',
},
),
]
|
bsd-3-clause
| -6,945,387,423,287,230,000
| 39.326923
| 138
| 0.567477
| false
| 4.305955
| false
| false
| false
|
rsteed11/GAT
|
gat/core/sna/resilience.py
|
1
|
1298
|
import networkx as nx
import random
import scipy as sp
from gat.core.sna import ergm
def resilience(cliques_found, ergm_iters=3000):
scaledResilience = {}
scaledBaseline = {}
toScale = []
baselinesToScale = []
traces = []
formatted_traces = {}
cliques, selected = cliques_found
# Find resilience of subgraphs
for clique in cliques:
initShortestPath = nx.average_shortest_path_length(clique)
baselinesToScale.append(initShortestPath)
# creating perturbation by removing random 10% of nodes and averaging result of x iterations
G = clique.copy() # percent of nodes removed can be changed here
rSample = random.sample(G.nodes(), int(G.number_of_nodes() * 0.1))
G.remove_nodes_from(rSample)
coefs, new_trace = ergm.resilience(G, ergm_iters, mu=initShortestPath*.2)
toScale.append(coefs["aspl"])
traces.append(new_trace["aspl"].tolist())
# scale resilience measures on a normal scale
for i in range(len(cliques)):
scaledResilience[selected[i]] = toScale[i]
scaledBaseline[selected[i]] = sp.stats.percentileofscore(baselinesToScale, baselinesToScale[i])
formatted_traces[selected[i]] = traces[i]
return scaledBaseline, scaledResilience, formatted_traces
|
mit
| 5,061,723,243,135,137,000
| 37.176471
| 103
| 0.68567
| false
| 3.452128
| false
| false
| false
|
benreynwar/pyvivado
|
pyvivado/test_utils.py
|
1
|
9478
|
import os
import unittest
import logging
import shutil
import time
import testfixtures
from pyvivado import filetestbench_project, fpga_project, axi
from pyvivado.synopsys import synopsys_project
from pyvivado import vivado_project, test_info
from pyvivado import config
from pyvivado import base_test_utils
logger = logging.getLogger(__name__)
# Import to make available is register
from pyvivado.hdl.wrapper import file_testbench
default_clock_period = 10
default_extra_clock_periods = 20
def compare_p(a, b, pause):
if (a != b) and pause:
import pdb
pdb.set_trace()
else:
testfixtures.compare(a, b)
def assert_p(a, pause):
if (not a) and pause:
import pdb
pdb.set_trace()
else:
assert(a)
class TestCase(unittest.TestCase):
def simulate(self, *args, **kwargs):
return simulate(*args, **kwargs)
def check_output(self, *args, **kwargs):
return base_test_utils.check_output(*args, **kwargs)
def simulate(directory, data, sim_type,
test_name='test',
interface=None,
params=None,
board=config.default_board,
clock_period=default_clock_period,
extra_clock_periods=default_extra_clock_periods,
force_refresh=False,
overwrite_ok=False,
project_class=filetestbench_project.FileTestBenchProject,
):
if interface is None:
if params is None:
raise ValueError('No params passed.')
else:
logger.warning('Deprecated: Pass parameters rather than interface')
if params:
raise ValueError('Do not pass interface as well as params. Just pass params.')
params = interface.parameters
params['module_name'] = interface.module_name
if force_refresh and os.path.exists(directory):
shutil.rmtree(directory)
# Make the project.
logger.debug('Making a FileTestBench Project')
p = project_class(
params=params, directory=directory,
overwrite_ok=overwrite_ok,
)
logger.debug('Updating input data')
p.update_input_data(input_data=data, test_name=test_name)
if sim_type.startswith('vivado'):
vivado_sim_type = sim_type[len('vivado_'):]
logger.debug('Making a Vivado Project')
v = vivado_project.VivadoProject(
p, overwrite_ok=overwrite_ok, wait_for_creation=True)
# Run the simulation.
runtime = '{} ns'.format((len(data) + extra_clock_periods) *
clock_period)
errors, output_data = v.run_simulation(
test_name=test_name, runtime=runtime, sim_type=vivado_sim_type)
for error in errors:
logger.error(error)
assert(len(errors) == 0)
elif sim_type.startswith('vcs'):
vcs_sim_type = sim_type[len('vcs_'):]
logger.debug('create vcs project')
v = synopsys_project.SynopsysProject(p)
logger.debug('run simulation')
errors, output_data = v.run_simulation(
test_name=test_name, sim_type=vcs_sim_type)
logger.debug('finished run simulation')
for error in errors:
logger.error(error)
assert(len(errors) == 0)
else:
raise ValueError('Unknown sim_type: {}'.format(sim_type))
return output_data[1:]
def deploy(directory, params,
board=config.default_board,
part=None,
force_refresh=False,
overwrite_ok=False,
):
if force_refresh and os.path.exists(directory):
shutil.rmtree(directory)
# Make the project.
p = fpga_project.FPGAProject(
parameters=params,
directory=directory,
board=board,
overwrite_ok=overwrite_ok,
)
v = vivado_project.VivadoProject(
project=p, board=board, wait_for_creation=True, overwrite_ok=overwrite_ok)
t_implement = v.implement()
t_implement.wait()
t_monitor, conn = v.send_to_fpga_and_monitor()
return conn
def run_test(test_class, test_name='default_test',
logging_level=logging.DEBUG):
suite = unittest.TestSuite()
suite.addTest(test_class(test_name))
runner = unittest.TextTestRunner()
runner.run(suite)
def deploy_and_test(
params, directory, tests, board=config.default_board,
part=None, force_refresh=False, overwrite_ok=False):
'''
Deploy design to an FPGA and run tests on it there.
The DUT must have an AXI4-LITE interface.
'''
# Make sure this directory is not already deployed.
v_dir = os.path.join(directory, 'vivado')
# Import connection down here so that if it's not available
# we can use other test_utils.
from pyvivado import connection
hwcode = connection.get_projdir_hwcode(v_dir)
assert(hwcode is None)
conn = deploy(
directory=directory, params=params,
board=board,
part=part,
force_refresh=force_refresh,
overwrite_ok=overwrite_ok,
)
handler = axi.ConnCommandHandler(conn)
for test in tests:
test.set_handler(handler)
test.prepare()
test.check()
# Sleep for 10 seconds so that we can kill monitor
time.sleep(10)
# Destroy monitoring process
connection.kill_free_monitors(v_dir)
def simulate_and_test(
directory, reset_input, tests,
test_name='test',
interface=None,
params=None,
wait_lines=20,
board=config.default_board,
sim_type=test_info.default_sim_type,
clock_period=default_clock_period,
extra_clock_periods=default_extra_clock_periods,
split_tag=base_test_utils.DEFAULT_SPLIT_TAG,
pause=False,
force_refresh=False,
overwrite_ok=False,
project_class=filetestbench_project.FileTestBenchProject,
):
'''
Run a single vivado simulation which contains many independent tests
that are run one after another in a single simulation.
'''
logger.debug('staring simulate and test')
if interface is None:
if params is None:
raise ValueError('No params passed.')
else:
logger.warning('Deprecated: Pass parameters rather than interface')
if params:
raise ValueError('Do not pass interface as well as params. Just pass params.')
params = interface.parameters
params['module_name'] = interface.module_name
logger.debug('Making input data')
input_data = base_test_utils.tests_to_input_data(
reset_input=reset_input, wait_lines=wait_lines, tests=tests)
logger.debug('Start simulate: simtype is {}'.format(sim_type))
output_data = simulate(
interface=None,
params=params,
directory=directory,
data=input_data,
sim_type=sim_type,
test_name=test_name,
overwrite_ok=overwrite_ok,
project_class=project_class,
)
logger.debug('finish simulate')
base_test_utils.validate_output_data_with_tests(
input_data=input_data,
output_data=output_data,
wait_lines=wait_lines,
pause=pause,
tests=tests,
)
class AxiTest():
def __init__(self):
self.handler = None
def set_handler(self, handler):
assert(self.handler is None)
self.handler = handler
def get_handler(self):
if self.handler is None:
raise Exception('Handler on AxiTest not set')
return self.handler
def prepare(self):
raise Exception('Unimplemented')
def check(self, pause=False):
raise Exception('Unimplemented')
def make_input_data(self):
handler = self.get_handler()
self.prepare()
input_data = [
{'reset': 0,
'i': d,
} for d in handler.make_command_dicts()]
assert(len(input_data) > 0)
return input_data
def check_output_data(self, input_data, output_data, pause=False):
handler = self.get_handler()
response_dicts = [d['o'] for d in output_data]
handler.consume_response_dicts(response_dicts)
self.check(pause=pause)
def axi_run_and_test(
directory,
tests,
test_name='test',
params=None,
wait_lines=20,
board=config.default_board,
sim_type=test_info.default_sim_type,
clock_period=default_clock_period,
extra_clock_periods=default_extra_clock_periods,
pause=False,
force_refresh=False,
overwrite_ok=False,
):
if sim_type == 'fpga':
deploy_and_test(
params=params,
directory=directory,
tests=tests,
board=board,
force_refresh=force_refresh,
overwrite_ok=overwrite_ok,
)
else:
handler = axi.DictCommandHandler()
for test in tests:
logger.debug('setting handler to {}'.format(handler))
test.set_handler(handler)
simulate_and_test(
directory=directory,
reset_input={'reset': 1, 'd': axi.make_empty_axi4lite_m2s_dict()},
tests=tests,
params=params,
wait_lines=wait_lines,
sim_type=sim_type,
clock_period=clock_period,
extra_clock_periods=extra_clock_periods,
pause=pause,
force_refresh=force_refresh,
overwrite_ok=overwrite_ok,
)
|
mit
| 2,229,433,497,618,155,000
| 29.973856
| 90
| 0.614265
| false
| 3.873314
| true
| false
| false
|
tuulos/ringo
|
ringogw/py/ringodisco.py
|
1
|
3199
|
import ringogw
def ringo_reader(fd, sze, fname):
import struct, zlib
MAGIC_HEAD = (0x47da66b5,)
MAGIC_TAIL = (0xacc50f5d,)
def read_really(s):
t = 0
buf = ""
while t < s:
r = fd.read(s - t)
if not r:
return buf
t += len(r)
buf += r
return buf
def check_body(head_body):
time, entryid, flags, keycrc, keylen, valcrc, vallen =\
struct.unpack("<IIIIIII", head_body)
tot = keylen + vallen + 4
body = read_really(tot)
if len(body) < tot:
return False, head_body + body
key = body[:keylen]
val = body[keylen:-4]
if zlib.crc32(key) != keycrc or zlib.crc32(val) != valcrc or\
struct.unpack("<I", body[-4:]) != MAGIC_TAIL:
return False, head_body + body
else:
return True, (entryid, flags, key, val)
def read_entry():
head = read_really(8)
while len(head) >= 8:
if struct.unpack("<I", head[:4]) == MAGIC_HEAD:
if len(head) < 36:
head += read_really(36 - len(head))
if len(head) < 36:
return None
head_crc = struct.unpack("<I", head[4:8])[0]
head_body = head[8:36]
if zlib.crc32(head_body) == head_crc:
ok, cont = check_body(head_body)
if ok:
return cont
head = cont
head = head[1:]
if len(head) < 8:
head += fd.read(1)
else:
return None
prev_id = None
while True:
entry = read_entry()
if not entry:
break
entryid, flags, key, val = entry
if flags & 1 or flags & 2:
continue
if entryid == prev_id:
continue
prev_id = entryid
yield key, val
def input_domain(ringo_host, name):
ringo = ringogw.Ringo(ringo_host)
code, res = ringo.request("/mon/domains/domain?name=" + name)
if code != 200:
return []
urls = []
for domainid, name, nodeid, chunk, owner, nrepl in res:
nodename, node = nodeid.split('@')
urls.append("disco://%s/_ringo/%s/rdomain-%s/data"\
% (node, nodename[6:], domainid))
return urls
if __name__ == "__main__":
import sys
print "\n".join(input_domain(sys.argv[1], sys.argv[2]))
|
bsd-3-clause
| 8,622,937,710,840,089,000
| 36.635294
| 77
| 0.371366
| false
| 4.649709
| false
| false
| false
|
ColumbiaDVMM/ColumbiaImageSearch
|
cufacesearch/cufacesearch/ingester/deprecated/kafka_image_processor.py
|
1
|
7700
|
# DEPRECATED
# import json
# import time
# import multiprocessing
# from .generic_kafka_processor import GenericKafkaProcessor
# from ..imgio.imgio import buffer_to_B64
#
# default_prefix = "KIP_"
# default_prefix_frompkl = "KIPFP_"
#
# # TODO: This class should be rewritten to actually extract features from images...
# # TODO: Work on getting a pycaffe sentibank featurizer. Check we get same feature values than command line in 'sentibank_cmdline'
# # at 'https://github.com/ColumbiaDVMM/ColumbiaImageSearch/blob/master/cu_image_search/feature_extractor/sentibank_cmdline.py'
# # Should we have a generic extractor to inherit from, with just a different process_one_core() method?...
#
# class KafkaImageProcessor(GenericKafkaProcessor):
#
# def __init__(self, global_conf_filename, prefix=default_prefix, pid=None):
# # when running as deamon
# self.pid = pid
# # call GenericKafkaProcessor init (and others potentially)
# super(KafkaImageProcessor, self).__init__(global_conf_filename, prefix)
# # any additional initialization needed, like producer specific output logic
# self.cdr_out_topic = self.get_required_param('producer_cdr_out_topic')
# self.images_out_topic = self.get_required_param('producer_images_out_topic')
# # TODO: get s3 url prefix from actual location
# # for now "object_stored_prefix" in "_meta" of domain CDR
# # but just get from conf
# self.url_prefix = self.get_required_param('obj_stored_prefix')
# self.process_count = 0
# self.process_failed = 0
# self.process_time = 0
# self.set_pp()
#
# def set_pp(self):
# self.pp = "KafkaImageProcessor"
# if self.pid:
# self.pp += ":"+str(self.pid)
#
#
#
# def process_one(self, msg):
# from ..imgio.imgio import get_SHA1_img_info_from_buffer, get_buffer_from_URL
#
# self.print_stats(msg)
#
# msg_value = json.loads(msg.value)
#
# # From msg value get list_urls for image objects only
# list_urls = self.get_images_urls(msg_value)
#
# # Get images data and infos
# dict_imgs = dict()
# for url, obj_pos in list_urls:
# start_process = time.time()
# if self.verbose > 2:
# print_msg = "[{}.process_one: info] Downloading image from: {}"
# print print_msg.format(self.pp, url)
# try:
# img_buffer = get_buffer_from_URL(url)
# if img_buffer:
# sha1, img_type, width, height = get_SHA1_img_info_from_buffer(img_buffer)
# dict_imgs[url] = {'obj_pos': obj_pos, 'img_buffer': img_buffer, 'sha1': sha1, 'img_info': {'format': img_type, 'width': width, 'height': height}}
# self.toc_process_ok(start_process)
# else:
# self.toc_process_failed(start_process)
# if self.verbose > 1:
# print_msg = "[{}.process_one: info] Could not download image from: {}"
# print print_msg.format(self.pp, url)
# except Exception as inst:
# self.toc_process_failed(start_process)
# if self.verbose > 0:
# print_msg = "[{}.process_one: error] Could not download image from: {} ({})"
# print print_msg.format(self.pp, url, inst)
#
# # Push to cdr_out_topic
# self.producer.send(self.cdr_out_topic, self.build_cdr_msg(msg_value, dict_imgs))
#
# # TODO: we could have all extraction registered here, and not pushing an image if it has been processed by all extractions. But that violates the consumer design of Kafka...
# # Push to images_out_topic
# for img_out_msg in self.build_image_msg(dict_imgs):
# self.producer.send(self.images_out_topic, img_out_msg)
#
#
# class KafkaImageProcessorFromPkl(GenericKafkaProcessor):
# # To push list of images to be processed from a pickle file containing a dictionary
# # {'update_ids': update['update_ids'], 'update_images': out_update_images}
# # with 'out_update_images' being a list of tuples (sha1, url)
#
# def __init__(self, global_conf_filename, prefix=default_prefix_frompkl):
# # call GenericKafkaProcessor init (and others potentially)
# super(KafkaImageProcessorFromPkl, self).__init__(global_conf_filename, prefix)
# # any additional initialization needed, like producer specific output logic
# self.images_out_topic = self.get_required_param('images_out_topic')
# self.pkl_path = self.get_required_param('pkl_path')
# self.process_count = 0
# self.process_failed = 0
# self.process_time = 0
# self.display_count = 100
# self.set_pp()
#
# def set_pp(self):
# self.pp = "KafkaImageProcessorFromPkl"
#
# def get_next_img(self):
# import pickle
# update = pickle.load(open(self.pkl_path,'rb'))
# for sha1, url in update['update_images']:
# yield sha1, url
#
# def build_image_msg(self, dict_imgs):
# # Build dict ouput for each image with fields 's3_url', 'sha1', 'img_info' and 'img_buffer'
# img_out_msgs = []
# for url in dict_imgs:
# tmp_dict_out = dict()
# tmp_dict_out['s3_url'] = url
# tmp_dict_out['sha1'] = dict_imgs[url]['sha1']
# tmp_dict_out['img_info'] = dict_imgs[url]['img_info']
# # encode buffer in B64?
# tmp_dict_out['img_buffer'] = buffer_to_B64(dict_imgs[url]['img_buffer'])
# img_out_msgs.append(json.dumps(tmp_dict_out).encode('utf-8'))
# return img_out_msgs
#
# def process(self):
# from ..imgio.imgio import get_SHA1_img_info_from_buffer, get_buffer_from_URL
#
# # Get images data and infos
# for sha1, url in self.get_next_img():
#
# if (self.process_count + self.process_failed) % self.display_count == 0:
# avg_process_time = self.process_time / max(1, self.process_count + self.process_failed)
# print_msg = "[%s] dl count: %d, failed: %d, time: %f"
# print print_msg % (self.pp, self.process_count, self.process_failed, avg_process_time)
#
# dict_imgs = dict()
# start_process = time.time()
# if self.verbose > 2:
# print_msg = "[{}.process_one: info] Downloading image from: {}"
# print print_msg.format(self.pp, url)
# try:
# img_buffer = get_buffer_from_URL(url)
# if img_buffer:
# sha1, img_type, width, height = get_SHA1_img_info_from_buffer(img_buffer)
# dict_imgs[url] = {'img_buffer': img_buffer, 'sha1': sha1,
# 'img_info': {'format': img_type, 'width': width, 'height': height}}
# self.toc_process_ok(start_process)
# else:
# self.toc_process_failed(start_process)
# if self.verbose > 1:
# print_msg = "[{}.process_one: info] Could not download image from: {}"
# print print_msg.format(self.pp, url)
# except Exception as inst:
# self.toc_process_failed(start_process)
# if self.verbose > 0:
# print_msg = "[{}.process_one: error] Could not download image from: {} ({})"
# print print_msg.format(self.pp, url, inst)
#
# # Push to images_out_topic
# for img_out_msg in self.build_image_msg(dict_imgs):
# self.producer.send(self.images_out_topic, img_out_msg)
#
# class DaemonKafkaImageProcessor(multiprocessing.Process):
#
# daemon = True
#
# def __init__(self, conf, prefix=default_prefix):
# super(DaemonKafkaImageProcessor, self).__init__()
# self.conf = conf
# self.prefix = prefix
#
# def run(self):
# try:
# print "Starting worker KafkaImageProcessor.{}".format(self.pid)
# kp = KafkaImageProcessor(self.conf, prefix=self.prefix, pid=self.pid)
# for msg in kp.consumer:
# kp.process_one(msg)
# except Exception as inst:
# print "KafkaImageProcessor.{} died ()".format(self.pid, inst)
|
apache-2.0
| -2,658,118,316,542,654,000
| 42.264045
| 179
| 0.633636
| false
| 3.062848
| false
| false
| false
|
breuderink/psychic
|
psychic/tests/testedf.py
|
1
|
2121
|
# -*- coding: utf-8 -*-
import unittest, os
from ..edfreader import *
class TestEDFBaseReader(unittest.TestCase):
def test_synthetic_content(self):
'''
Test EDF reader using artifical EDF dataset. Note that this is not an
EDF+ dataset and as such does not contain annotations. Annotations decoding
is separately tested, *but not from a real file*!.
'''
reader = BaseEDFReader(
open(os.path.join('data', 'sine3Hz_block0.2Hz.edf'), 'rb'))
reader.read_header()
h = reader.header
# check important header fields
self.assertEqual(h['label'], ['3Hz +5/-5 V', '0.2Hz Blk 1/0uV'])
self.assertEqual(h['units'], ['V', 'uV'])
self.assertEqual(h['contiguous'], True)
fs = np.asarray(h['n_samples_per_record']) / h['record_length']
# get records
recs = list(reader.records())
time = zip(*recs)[0]
signals = zip(*recs)[1]
annotations = list(zip(*recs)[2])
# check EDF+ fields that are *not present in this file*
np.testing.assert_equal(time, np.zeros(11) * np.nan)
self.assertEqual(annotations, [[]] * 11)
# check 3 Hz sine wave
sine, block = [np.hstack(s) for s in zip(*signals)]
target = 5 * np.sin(3 * np.pi * 2 * np.arange(0, sine.size) / fs[0])
assert np.max((sine - target) ** 2) < 1e-4
# check .2 Hz block wave
target = np.sin(.2 * np.pi * 2 * np.arange(1, block.size + 1) / fs[1]) >= 0
assert np.max((block - target) ** 2) < 1e-4
def test_tal(self):
mult_annotations = '+180\x14Lights off\x14Close door\x14\x00'
with_duration = '+1800.2\x1525.5\x14Apnea\x14\x00'
test_unicode = '+180\x14€\x14\x00\x00'
# test annotation with duration
self.assertEqual(tal(with_duration), [(1800.2, 25.5, [u'Apnea'])])
# test multiple annotations
self.assertEqual(tal('\x00' * 4 + with_duration * 3),
[(1800.2, 25.5, [u'Apnea'])] * 3)
# test multiple annotations for one time point
self.assertEqual(tal(mult_annotations),
[(180., 0., [u'Lights off', u'Close door'])])
# test unicode support
self.assertEqual(tal(test_unicode), [(180., 0., [u'€'])])
|
bsd-3-clause
| 4,709,166,478,044,871,000
| 34.283333
| 79
| 0.619745
| false
| 3.028612
| true
| false
| false
|
quarkslab/irma
|
probe/modules/antivirus/comodo/cavl.py
|
1
|
2379
|
#
# Copyright (c) 2013-2018 Quarkslab.
# This file is part of IRMA project.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License in the top-level directory
# of this distribution and at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# No part of the project, including this file, may be copied,
# modified, propagated, or distributed except according to the
# terms contained in the LICENSE file.
import logging
import re
from datetime import datetime
from pathlib import Path
from modules.antivirus.base import AntivirusUnix
log = logging.getLogger(__name__)
class ComodoCAVL(AntivirusUnix):
name = "Comodo Antivirus (Linux)"
# ==================================
# Constructor and destructor stuff
# ==================================
def __init__(self, *args, **kwargs):
# class super class constructor
super().__init__(*args, **kwargs)
# Comodo does not use return value as infection indicator. Distinction
# between INFECTED and CLEAN will be done in the 'false positive
# handler' of Antivirus.scan()
self._scan_retcodes[self.ScanResult.INFECTED] = lambda x: x in [0]
# scan tool variables
self.scan_args = (
"-v", # verbose mode, display more detailed output
"-s", # scan a file or directory
)
self.scan_patterns = [
re.compile('(?P<file>.*) ---> Found .*,' +
' Malware Name is (?P<name>.*)', re.IGNORECASE),
]
self.scan_path = Path("/opt/COMODO/cmdscan")
# ==========================================
# Antivirus methods (need to be overriden)
# ==========================================
def get_version(self):
"""return the version of the antivirus"""
return Path('/opt/COMODO/cavver.dat').read_text()
def get_database(self):
"""return list of files in the database"""
search_paths = [Path('/opt/COMODO/scanners/'), ]
return self.locate('*.cav', search_paths, syspath=False)
def get_virus_database_version(self):
"""Return the Virus Database version"""
d = Path("/opt/COMODO/scanners/bases.cav").stat().st_mtime
return datetime.fromtimestamp(d).strftime('%Y-%m-%d')
|
apache-2.0
| 6,662,217,824,837,010,000
| 34.507463
| 78
| 0.592686
| false
| 3.9
| false
| false
| false
|
marksamman/pylinkshortener
|
app/models.py
|
1
|
2721
|
# Copyright (c) 2014 Mark Samman <https://github.com/marksamman/pylinkshortener>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import config, math, random, time
from datetime import datetime
from sqlalchemy import create_engine, Column, DateTime, ForeignKey, Integer, String, VARCHAR
from sqlalchemy.dialects.postgresql import BIGINT, CIDR
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import backref, relationship, sessionmaker
from app.constants import url_safe
engine = create_engine(config.SQLALCHEMY_DATABASE_URI)
Session = sessionmaker(bind=engine)
Base = declarative_base()
class Link(Base):
__tablename__ = 'links'
id = Column(Integer, primary_key=True)
url = Column(VARCHAR)
creator_ip = Column(CIDR)
created = Column(BIGINT)
random = Column(String(2))
def __init__(self, url, creator_ip):
self.url = url
self.created = math.floor(time.time())
self.creator_ip = creator_ip
self.random = ''.join(random.choice(url_safe) for _ in range(Link.random.property.columns[0].type.length))
def __repr__(self):
return '<Link %r>' % self.url
class Click(Base):
__tablename__ = 'clicks'
id = Column(Integer, primary_key=True)
inserted = Column(BIGINT)
ip = Column(CIDR)
user_agent = Column(VARCHAR)
link_id = Column(Integer, ForeignKey('links.id'))
link = relationship('Link', backref=backref('clicks', order_by=inserted.desc()))
def __init__(self, ip, user_agent, inserted, link_id):
self.inserted = inserted
self.ip = ip
self.user_agent = user_agent
self.link_id = link_id
def __repr__(self):
return '<Click %r>' % self.id
|
mit
| -7,008,481,928,122,294,000
| 38.434783
| 114
| 0.715913
| false
| 3.937771
| false
| false
| false
|
corpnewt/CorpBot.py
|
Cogs/Xp.py
|
1
|
41529
|
import asyncio
import discord
import datetime
import time
import random
from discord.ext import commands
from Cogs import Settings, DisplayName, Nullify, CheckRoles, UserTime, Message, PickList
def setup(bot):
# Add the bot and deps
settings = bot.get_cog("Settings")
bot.add_cog(Xp(bot, settings))
# This is the xp module. It's likely to be retarded.
class Xp(commands.Cog):
# Init with the bot reference, and a reference to the settings var
def __init__(self, bot, settings):
self.bot = bot
self.settings = settings
self.is_current = False # Used for stopping loops
global Utils, DisplayName
Utils = self.bot.get_cog("Utils")
DisplayName = self.bot.get_cog("DisplayName")
def _can_xp(self, user, server, requiredXP = None, promoArray = None):
# Checks whether or not said user has access to the xp system
if requiredXP == None:
requiredXP = self.settings.getServerStat(server, "RequiredXPRole", None)
if promoArray == None:
promoArray = self.settings.getServerStat(server, "PromotionArray", [])
if not requiredXP:
return True
for checkRole in user.roles:
if str(checkRole.id) == str(requiredXP):
return True
# Still check if we have enough xp
userXP = self.settings.getUserStat(user, server, "XP")
for role in promoArray:
if str(role["ID"]) == str(requiredXP):
if userXP >= role["XP"]:
return True
break
return False
# Proof of concept stuff for reloading cog/extension
def _is_submodule(self, parent, child):
return parent == child or child.startswith(parent + ".")
@commands.Cog.listener()
async def on_unloaded_extension(self, ext):
# Called to shut things down
if not self._is_submodule(ext.__name__, self.__module__):
return
self.is_current = False
@commands.Cog.listener()
async def on_loaded_extension(self, ext):
# See if we were loaded
if not self._is_submodule(ext.__name__, self.__module__):
return
self.is_current = True
self.bot.loop.create_task(self.addXP())
async def addXP(self):
print("Starting XP loop: {}".format(datetime.datetime.now().time().isoformat()))
await self.bot.wait_until_ready()
while not self.bot.is_closed():
try:
await asyncio.sleep(600) # runs only every 10 minutes (600 seconds)
if not self.is_current:
# Bail if we're not the current instance
return
updates = await self.bot.loop.run_in_executor(None, self.update_xp)
t = time.time()
for update in updates:
await CheckRoles.checkroles(update["user"], update["chan"], self.settings, self.bot, **update["kwargs"])
# Sleep after for testing
except Exception as e:
print(str(e))
def update_xp(self):
responses = []
t = time.time()
print("Adding XP: {}".format(datetime.datetime.now().time().isoformat()))
# Get some values that don't require immediate query
server_dict = {}
for x in self.bot.get_all_members():
memlist = server_dict.get(str(x.guild.id), [])
memlist.append(x)
server_dict[str(x.guild.id)] = memlist
for server_id in server_dict:
server = self.bot.get_guild(int(server_id))
if not server:
continue
# Iterate through the servers and add them
xpAmount = int(self.settings.getServerStat(server, "HourlyXP"))
xpAmount = float(xpAmount/6)
xpRAmount = int(self.settings.getServerStat(server, "HourlyXPReal"))
xpRAmount = float(xpRAmount/6)
xpLimit = self.settings.getServerStat(server, "XPLimit")
xprLimit = self.settings.getServerStat(server, "XPReserveLimit")
onlyOnline = self.settings.getServerStat(server, "RequireOnline")
requiredXP = self.settings.getServerStat(server, "RequiredXPRole")
promoArray = self.settings.getServerStat(server, "PromotionArray")
xpblock = self.settings.getServerStat(server, "XpBlockArray")
targetChanID = self.settings.getServerStat(server, "DefaultChannel")
kwargs = {
"xp_promote":self.settings.getServerStat(server,"XPPromote"),
"xp_demote":self.settings.getServerStat(server,"XPDemote"),
"suppress_promotions":self.settings.getServerStat(server,"SuppressPromotions"),
"suppress_demotions":self.settings.getServerStat(server,"SuppressDemotions"),
"only_one_role":self.settings.getServerStat(server,"OnlyOneRole")
}
for user in server_dict[server_id]:
# First see if we're current - we want to bail quickly
if not self.is_current:
print("XP Interrupted, no longer current - took {} seconds.".format(time.time() - t))
return responses
if not self._can_xp(user, server, requiredXP, promoArray):
continue
bumpXP = False
if onlyOnline == False:
bumpXP = True
else:
if user.status == discord.Status.online:
bumpXP = True
# Check if we're blocked
if user.id in xpblock:
# No xp for you
continue
for role in user.roles:
if role.id in xpblock:
bumpXP = False
break
if bumpXP:
if xpAmount > 0:
# User is online add hourly xp reserve
# First we check if we'll hit our limit
skip = False
if not xprLimit == None:
# Get the current values
newxp = self.settings.getUserStat(user, server, "XPReserve")
# Make sure it's this xpr boost that's pushing us over
# This would only push us up to the max, but not remove
# any we've already gotten
if newxp + xpAmount > xprLimit:
skip = True
if newxp < xprLimit:
self.settings.setUserStat(user, server, "XPReserve", xprLimit)
if not skip:
xpLeftover = self.settings.getUserStat(user, server, "XPLeftover")
if xpLeftover == None:
xpLeftover = 0
else:
xpLeftover = float(xpLeftover)
gainedXp = xpLeftover+xpAmount
gainedXpInt = int(gainedXp) # Strips the decimal point off
xpLeftover = float(gainedXp-gainedXpInt) # Gets the < 1 value
self.settings.setUserStat(user, server, "XPLeftover", xpLeftover)
self.settings.incrementStat(user, server, "XPReserve", gainedXpInt)
if xpRAmount > 0:
# User is online add hourly xp
# First we check if we'll hit our limit
skip = False
if not xpLimit == None:
# Get the current values
newxp = self.settings.getUserStat(user, server, "XP")
# Make sure it's this xpr boost that's pushing us over
# This would only push us up to the max, but not remove
# any we've already gotten
if newxp + xpRAmount > xpLimit:
skip = True
if newxp < xpLimit:
self.settings.setUserStat(user, server, "XP", xpLimit)
if not skip:
xpRLeftover = self.settings.getUserStat(user, server, "XPRealLeftover")
if xpRLeftover == None:
xpRLeftover = 0
else:
xpRLeftover = float(xpRLeftover)
gainedXpR = xpRLeftover+xpRAmount
gainedXpRInt = int(gainedXpR) # Strips the decimal point off
xpRLeftover = float(gainedXpR-gainedXpRInt) # Gets the < 1 value
self.settings.setUserStat(user, server, "XPRealLeftover", xpRLeftover)
self.settings.incrementStat(user, server, "XP", gainedXpRInt)
# Check our default channels
targetChan = None
if len(str(targetChanID)):
# We *should* have a channel
tChan = self.bot.get_channel(int(targetChanID))
if tChan:
# We *do* have one
targetChan = tChan
responses.append({"user":user, "chan":targetChan if targetChan else self.bot.get_guild(int(server_id)), "kwargs":kwargs})
print("XP Done - took {} seconds.".format(time.time() - t))
return responses
@commands.command(pass_context=True)
async def xp(self, ctx, *, member = None, xpAmount : int = None):
"""Gift xp to other members."""
author = ctx.message.author
server = ctx.message.guild
channel = ctx.message.channel
# Check if we're suppressing @here and @everyone mentions
if self.settings.getServerStat(server, "SuppressMentions"):
suppress = True
else:
suppress = False
usage = 'Usage: `{}xp [role/member] [amount]`'.format(ctx.prefix)
isRole = False
if member == None:
await ctx.message.channel.send(usage)
return
# Check for formatting issues
if xpAmount == None:
# Either xp wasn't set - or it's the last section
if type(member) is str:
# It' a string - the hope continues
roleCheck = DisplayName.checkRoleForInt(member, server)
if not roleCheck:
# Returned nothing - means there isn't even an int
msg = 'I couldn\'t find *{}* on the server.'.format(Nullify.escape_all(member))
await ctx.message.channel.send(msg)
return
if roleCheck["Role"]:
isRole = True
member = roleCheck["Role"]
xpAmount = roleCheck["Int"]
else:
# Role is invalid - check for member instead
nameCheck = DisplayName.checkNameForInt(member, server)
if not nameCheck:
await ctx.message.channel.send(usage)
return
if not nameCheck["Member"]:
msg = 'I couldn\'t find *{}* on the server.'.format(Nullify.escape_all(member))
await ctx.message.channel.send(msg)
return
member = nameCheck["Member"]
xpAmount = nameCheck["Int"]
if xpAmount == None:
# Still no xp - let's run stats instead
if isRole:
await ctx.message.channel.send(usage)
else:
await ctx.invoke(self.stats, member=member)
return
if not type(xpAmount) is int:
await ctx.message.channel.send(usage)
return
# Get our user/server stats
isAdmin = author.permissions_in(channel).administrator
checkAdmin = self.settings.getServerStat(ctx.message.guild, "AdminArray")
# Check for bot admin
isBotAdmin = False
for role in ctx.message.author.roles:
for aRole in checkAdmin:
# Get the role that corresponds to the id
if str(aRole['ID']) == str(role.id):
isBotAdmin = True
break
botAdminAsAdmin = self.settings.getServerStat(server, "BotAdminAsAdmin")
adminUnlim = self.settings.getServerStat(server, "AdminUnlimited")
reserveXP = self.settings.getUserStat(author, server, "XPReserve")
requiredXP = self.settings.getServerStat(server, "RequiredXPRole")
xpblock = self.settings.getServerStat(server, "XpBlockArray")
approve = True
decrement = True
admin_override = False
# RequiredXPRole
if not self._can_xp(author, server):
approve = False
msg = 'You don\'t have the permissions to give xp.'
if xpAmount > int(reserveXP):
approve = False
msg = 'You can\'t give *{:,} xp*, you only have *{:,}!*'.format(xpAmount, reserveXP)
if author == member:
approve = False
msg = 'You can\'t give yourself xp! *Nice try...*'
if xpAmount < 0:
msg = 'Only admins can take away xp!'
approve = False
# Avoid admins gaining xp
decrement = False
if xpAmount == 0:
msg = 'Wow, very generous of you...'
approve = False
# Check bot admin
if isBotAdmin and botAdminAsAdmin:
# Approve as admin
approve = True
admin_override = True
if adminUnlim:
# No limit
decrement = False
else:
if xpAmount < 0:
# Don't decrement if negative
decrement = False
if xpAmount > int(reserveXP):
# Don't approve if we don't have enough
msg = 'You can\'t give *{:,} xp*, you only have *{:,}!*'.format(xpAmount, reserveXP)
approve = False
# Check admin last - so it overrides anything else
if isAdmin:
# No limit - approve
approve = True
admin_override = True
if adminUnlim:
# No limit
decrement = False
else:
if xpAmount < 0:
# Don't decrement if negative
decrement = False
if xpAmount > int(reserveXP):
# Don't approve if we don't have enough
msg = 'You can\'t give *{:,} xp*, you only have *{:,}!*'.format(xpAmount, reserveXP)
approve = False
# Check author and target for blocks
# overrides admin because admins set this.
if type(member) is discord.Role:
if member.id in xpblock:
msg = "That role cannot receive xp!"
approve = False
else:
# User
if member.id in xpblock:
msg = "That member cannot receive xp!"
approve = False
else:
for role in member.roles:
if role.id in xpblock:
msg = "That member's role cannot receive xp!"
approve = False
if ctx.author.id in xpblock:
msg = "You can't give xp!"
approve = False
else:
for role in ctx.author.roles:
if role.id in xpblock:
msg = "Your role cannot give xp!"
approve = False
if approve:
self.bot.dispatch("xp", member, ctx.author, xpAmount)
if isRole:
# XP was approved - let's iterate through the users of that role,
# starting with the lowest xp
#
# Work through our members
memberList = []
sMemberList = self.settings.getServerStat(server, "Members")
for amem in server.members:
if amem == author:
continue
if amem.id in xpblock:
# Blocked - only if not admin sending it
continue
roles = amem.roles
if member in roles:
# This member has our role
# Add to our list
for smem in sMemberList:
# Find our server entry
if str(smem) == str(amem.id):
# Add it.
sMemberList[smem]["ID"] = smem
memberList.append(sMemberList[smem])
memSorted = sorted(memberList, key=lambda x:int(x['XP']))
if len(memSorted):
# There actually ARE members in said role
totalXP = xpAmount
# Gather presets
xp_p = self.settings.getServerStat(server,"XPPromote")
xp_d = self.settings.getServerStat(server,"XPDemote")
xp_sp = self.settings.getServerStat(server,"SuppressPromotions")
xp_sd = self.settings.getServerStat(server,"SuppressDemotions")
xp_oo = self.settings.getServerStat(server,"OnlyOneRole")
if xpAmount > len(memSorted):
# More xp than members
leftover = xpAmount % len(memSorted)
eachXP = (xpAmount-leftover)/len(memSorted)
for i in range(0, len(memSorted)):
# Make sure we have anything to give
if leftover <= 0 and eachXP <= 0:
break
# Carry on with our xp distribution
cMember = DisplayName.memberForID(memSorted[i]['ID'], server)
if leftover>0:
self.settings.incrementStat(cMember, server, "XP", eachXP+1)
leftover -= 1
else:
self.settings.incrementStat(cMember, server, "XP", eachXP)
await CheckRoles.checkroles(
cMember,
channel,
self.settings,
self.bot,
xp_promote=xp_p,
xp_demote=xp_d,
suppress_promotions=xp_sp,
suppress_demotions=xp_sd,
only_one_role=xp_oo)
else:
for i in range(0, xpAmount):
cMember = DisplayName.memberForID(memSorted[i]['ID'], server)
self.settings.incrementStat(cMember, server, "XP", 1)
await CheckRoles.checkroles(
cMember,
channel,
self.settings,
self.bot,
xp_promote=xp_p,
xp_demote=xp_d,
suppress_promotions=xp_sp,
suppress_demotions=xp_sd,
only_one_role=xp_oo)
# Decrement if needed
if decrement:
self.settings.incrementStat(author, server, "XPReserve", (-1*xpAmount))
msg = '*{:,} collective xp* was given to *{}!*'.format(totalXP, Nullify.escape_all(member.name))
await channel.send(msg)
else:
msg = 'There are no eligible members in *{}!*'.format(Nullify.escape_all(member.name))
await channel.send(msg)
else:
# Decrement if needed
if decrement:
self.settings.incrementStat(author, server, "XPReserve", (-1*xpAmount))
# XP was approved! Let's say it - and check decrement from gifter's xp reserve
msg = '*{}* was given *{:,} xp!*'.format(DisplayName.name(member), xpAmount)
await channel.send(msg)
self.settings.incrementStat(member, server, "XP", xpAmount)
# Now we check for promotions
await CheckRoles.checkroles(member, channel, self.settings, self.bot)
else:
await channel.send(msg)
'''@xp.error
async def xp_error(self, ctx, error):
msg = 'xp Error: {}'.format(error)
await ctx.channel.send(msg)'''
@commands.command(pass_context=True)
async def defaultrole(self, ctx):
"""Lists the default role that new users are assigned."""
# Check if we're suppressing @here and @everyone mentions
if self.settings.getServerStat(ctx.message.guild, "SuppressMentions"):
suppress = True
else:
suppress = False
role = self.settings.getServerStat(ctx.message.guild, "DefaultRole")
if role == None or role == "":
msg = 'New users are not assigned a role on joining this server.'
await ctx.channel.send(msg)
else:
# Role is set - let's get its name
found = False
for arole in ctx.message.guild.roles:
if str(arole.id) == str(role):
found = True
msg = 'New users will be assigned to **{}**.'.format(Nullify.escape_all(arole.name))
if not found:
msg = 'There is no role that matches id: `{}` - consider updating this setting.'.format(role)
await ctx.message.channel.send(msg)
@commands.command(pass_context=True)
async def gamble(self, ctx, bet : int = None):
"""Gamble your xp reserves for a chance at winning xp!"""
author = ctx.message.author
server = ctx.message.guild
channel = ctx.message.channel
# bet must be a multiple of 10, member must have enough xpreserve to bet
msg = 'Usage: `{}gamble [xp reserve bet] (must be multiple of 10)`'.format(ctx.prefix)
if not (bet or type(bet) == int):
await channel.send(msg)
return
if not type(bet) == int:
await channel.send(msg)
return
isAdmin = author.permissions_in(channel).administrator
checkAdmin = self.settings.getServerStat(ctx.message.guild, "AdminArray")
# Check for bot admin
isBotAdmin = False
for role in ctx.message.author.roles:
for aRole in checkAdmin:
# Get the role that corresponds to the id
if str(aRole['ID']) == str(role.id):
isBotAdmin = True
break
botAdminAsAdmin = self.settings.getServerStat(server, "BotAdminAsAdmin")
adminUnlim = self.settings.getServerStat(server, "AdminUnlimited")
reserveXP = self.settings.getUserStat(author, server, "XPReserve")
minRole = self.settings.getServerStat(server, "MinimumXPRole")
requiredXP = self.settings.getServerStat(server, "RequiredXPRole")
xpblock = self.settings.getServerStat(server, "XpBlockArray")
approve = True
decrement = True
# Check Bet
if not bet % 10 == 0:
approve = False
msg = 'Bets must be in multiples of *10!*'
if bet > int(reserveXP):
approve = False
msg = 'You can\'t bet *{:,}*, you only have *{:,}* xp reserve!'.format(bet, reserveXP)
if bet < 0:
msg = 'You can\'t bet negative amounts!'
approve = False
if bet == 0:
msg = 'You can\'t bet *nothing!*'
approve = False
# RequiredXPRole
if not self._can_xp(author, server):
approve = False
msg = 'You don\'t have the permissions to gamble.'
# Check bot admin
if isBotAdmin and botAdminAsAdmin:
# Approve as admin
approve = True
if adminUnlim:
# No limit
decrement = False
else:
if bet < 0:
# Don't decrement if negative
decrement = False
if bet > int(reserveXP):
# Don't approve if we don't have enough
msg = 'You can\'t bet *{:,}*, you only have *{:,}* xp reserve!'.format(bet, reserveXP)
approve = False
# Check admin last - so it overrides anything else
if isAdmin:
# No limit - approve
approve = True
if adminUnlim:
# No limit
decrement = False
else:
if bet < 0:
# Don't decrement if negative
decrement = False
if bet > int(reserveXP):
# Don't approve if we don't have enough
msg = 'You can\'t bet *{:,}*, you only have *{:,}* xp reserve!'.format(bet, reserveXP)
approve = False
# Check if we're blocked
if ctx.author.id in xpblock:
msg = "You can't gamble for xp!"
approve = False
else:
for role in ctx.author.roles:
if role.id in xpblock:
msg = "Your role cannot gamble for xp!"
approve = False
if approve:
# Bet was approved - let's take the XPReserve right away
if decrement:
takeReserve = -1*bet
self.settings.incrementStat(author, server, "XPReserve", takeReserve)
# Bet more, less chance of winning, but more winnings!
if bet < 100:
betChance = 5
payout = int(bet/10)
elif bet < 500:
betChance = 15
payout = int(bet/4)
else:
betChance = 25
payout = int(bet/2)
# 1/betChance that user will win - and payout is 1/10th of the bet
randnum = random.randint(1, betChance)
# print('{} : {}'.format(randnum, betChance))
if randnum == 1:
# YOU WON!!
self.settings.incrementStat(author, server, "XP", int(payout))
msg = '*{}* bet *{:,}* and ***WON*** *{:,} xp!*'.format(DisplayName.name(author), bet, int(payout))
# Now we check for promotions
await CheckRoles.checkroles(author, channel, self.settings, self.bot)
else:
msg = '*{}* bet *{:,}* and.... *didn\'t* win. Better luck next time!'.format(DisplayName.name(author), bet)
await ctx.message.channel.send(msg)
@commands.command(pass_context=True)
async def recheckroles(self, ctx):
"""Re-iterate through all members and assign the proper roles based on their xp (admin only)."""
author = ctx.message.author
server = ctx.message.guild
channel = ctx.message.channel
isAdmin = author.permissions_in(channel).administrator
# Only allow admins to change server stats
if not isAdmin:
await channel.send('You do not have sufficient privileges to access this command.')
return
# Gather presets
xp_p = self.settings.getServerStat(server,"XPPromote")
xp_d = self.settings.getServerStat(server,"XPDemote")
xp_sp = self.settings.getServerStat(server,"SuppressPromotions")
xp_sd = self.settings.getServerStat(server,"SuppressDemotions")
xp_oo = self.settings.getServerStat(server,"OnlyOneRole")
message = await ctx.channel.send('Checking roles...')
changeCount = 0
for member in server.members:
# Now we check for promotions
if await CheckRoles.checkroles(
member,
channel,
self.settings,
self.bot,
True,
xp_promote=xp_p,
xp_demote=xp_d,
suppress_promotions=xp_sp,
suppress_demotions=xp_sd,
only_one_role=xp_oo):
changeCount += 1
if changeCount == 1:
await message.edit(content='Done checking roles.\n\n*1 user* updated.')
#await channel.send('Done checking roles.\n\n*1 user* updated.')
else:
await message.edit(content='Done checking roles.\n\n*{:,} users* updated.'.format(changeCount))
#await channel.send('Done checking roles.\n\n*{} users* updated.'.format(changeCount))
@commands.command(pass_context=True)
async def recheckrole(self, ctx, *, user : discord.Member = None):
"""Re-iterate through all members and assign the proper roles based on their xp (admin only)."""
author = ctx.message.author
server = ctx.message.guild
channel = ctx.message.channel
isAdmin = author.permissions_in(channel).administrator
# Only allow admins to change server stats
if not isAdmin:
await channel.send('You do not have sufficient privileges to access this command.')
return
if not user:
user = author
# Now we check for promotions
if await CheckRoles.checkroles(user, channel, self.settings, self.bot):
await channel.send('Done checking roles.\n\n*{}* was updated.'.format(DisplayName.name(user)))
else:
await channel.send('Done checking roles.\n\n*{}* was not updated.'.format(DisplayName.name(user)))
@commands.command(pass_context=True)
async def listxproles(self, ctx):
"""Lists all roles, id's, and xp requirements for the xp promotion/demotion system."""
server = ctx.message.guild
channel = ctx.message.channel
# Check if we're suppressing @here and @everyone mentions
if self.settings.getServerStat(server, "SuppressMentions"):
suppress = True
else:
suppress = False
# Get the array
promoArray = self.settings.getServerStat(server, "PromotionArray")
# Sort by XP first, then by name
# promoSorted = sorted(promoArray, key=itemgetter('XP', 'Name'))
promoSorted = sorted(promoArray, key=lambda x:int(x['XP']))
if not len(promoSorted):
roleText = "There are no roles in the xp role list. You can add some with the `{}addxprole [role] [xpamount]` command!\n".format(ctx.prefix)
else:
roleText = "**__Current Roles:__**\n\n"
for arole in promoSorted:
# Get current role name based on id
foundRole = False
for role in server.roles:
if str(role.id) == str(arole['ID']):
# We found it
foundRole = True
roleText = '{}**{}** : *{:,} XP*\n'.format(roleText, Nullify.escape_all(role.name), arole['XP'])
if not foundRole:
roleText = '{}**{}** : *{:,} XP* (removed from server)\n'.format(roleText, Nullify.escape_all(arole['Name']), arole['XP'])
# Get the required role for using the xp system
role = self.settings.getServerStat(ctx.message.guild, "RequiredXPRole")
if role == None or role == "":
roleText = '{}\n**Everyone** can give xp, gamble, and feed the bot.'.format(roleText)
else:
# Role is set - let's get its name
found = False
for arole in ctx.message.guild.roles:
if str(arole.id) == str(role):
found = True
vowels = "aeiou"
if arole.name[:1].lower() in vowels:
roleText = '{}\nYou need to be an **{}** to *give xp*, *gamble*, or *feed* the bot.'.format(roleText, Nullify.escape_all(arole.name))
else:
roleText = '{}\nYou need to be a **{}** to *give xp*, *gamble*, or *feed* the bot.'.format(roleText, Nullify.escape_all(arole.name))
# roleText = '{}\nYou need to be a/an **{}** to give xp, gamble, or feed the bot.'.format(roleText, arole.name)
if not found:
roleText = '{}\nThere is no role that matches id: `{}` for using the xp system - consider updating that setting.'.format(roleText, role)
await channel.send(roleText)
@commands.command(pass_context=True)
async def rank(self, ctx, *, member = None):
"""Say the highest rank of a listed member."""
# Check if we're suppressing @here and @everyone mentions
if self.settings.getServerStat(ctx.message.guild, "SuppressMentions"):
suppress = True
else:
suppress = False
if member is None:
member = ctx.message.author
if type(member) is str:
memberName = member
member = DisplayName.memberForName(memberName, ctx.message.guild)
if not member:
msg = 'I couldn\'t find *{}*...'.format(Nullify.escape_all(memberName))
await ctx.message.channel.send(msg)
return
# Create blank embed
stat_embed = discord.Embed(color=member.color)
promoArray = self.settings.getServerStat(ctx.message.guild, "PromotionArray")
# promoSorted = sorted(promoArray, key=itemgetter('XP', 'Name'))
promoSorted = sorted(promoArray, key=lambda x:int(x['XP']))
memName = member.name
# Get member's avatar url
avURL = member.avatar_url
if not len(avURL):
avURL = member.default_avatar_url
if member.nick:
# We have a nickname
# Add to embed
stat_embed.set_author(name='{}, who currently goes by {}'.format(member.name, member.nick), icon_url=avURL)
else:
# Add to embed
stat_embed.set_author(name='{}'.format(member.name), icon_url=avURL)
highestRole = ""
for role in promoSorted:
# We *can* have this role, let's see if we already do
currentRole = None
for aRole in member.roles:
# Get the role that corresponds to the id
if str(aRole.id) == str(role['ID']):
# We found it
highestRole = aRole.name
if highestRole == "":
msg = '*{}* has not acquired a rank yet.'.format(DisplayName.name(member))
# Add Rank
stat_embed.add_field(name="Current Rank", value='None acquired yet', inline=True)
else:
msg = '*{}* is a **{}**!'.format(DisplayName.name(member), highestRole)
# Add Rank
stat_embed.add_field(name="Current Rank", value=highestRole, inline=True)
# await ctx.message.channel.send(msg)
await ctx.message.channel.send(embed=stat_embed)
@rank.error
async def rank_error(self, error, ctx):
msg = 'rank Error: {}'.format(error)
await ctx.channel.send(msg)
async def _show_xp(self, ctx, reverse=False):
# Helper to list xp
message = await Message.EmbedText(title="Counting Xp...",color=ctx.author).send(ctx)
sorted_array = sorted([(int(await self.bot.loop.run_in_executor(None, self.settings.getUserStat,x,ctx.guild,"XP",0)),x) for x in ctx.guild.members],key=lambda x:(x[0],x[1].id),reverse=reverse)
# Update the array with the user's place in the list
xp_array = [{
"name":"{}. {} ({}#{} {})".format(i,x[1].display_name,x[1].name,x[1].discriminator,x[1].id),
"value":"{:,} XP".format(x[0])
} for i,x in enumerate(sorted_array,start=1)]
return await PickList.PagePicker(
title="{} Xp-Holders in {} ({:,} total)".format("Top" if reverse else "Bottom",ctx.guild.name,len(xp_array)),
list=xp_array,
color=ctx.author,
ctx=ctx,
message=message
).pick()
# List the top 10 xp-holders
@commands.command(pass_context=True)
async def leaderboard(self, ctx):
"""List the top xp-holders."""
return await self._show_xp(ctx,reverse=True)
# List the top 10 xp-holders
@commands.command(pass_context=True)
async def bottomxp(self, ctx):
"""List the bottom xp-holders."""
return await self._show_xp(ctx,reverse=False)
# List the xp and xp reserve of a user
@commands.command(pass_context=True)
async def stats(self, ctx, *, member= None):
"""List the xp and xp reserve of a listed member."""
if member is None:
member = ctx.message.author
if type(member) is str:
memberName = member
member = DisplayName.memberForName(memberName, ctx.message.guild)
if not member:
msg = 'I couldn\'t find *{}*...'.format(Nullify.escape_all(memberName))
await ctx.message.channel.send(msg)
return
url = member.avatar_url
if not len(url):
url = member.default_avatar_url
# Create blank embed
stat_embed = Message.Embed(color=member.color,thumbnail=url,pm_after=20)
# Get user's xp
newStat = int(self.settings.getUserStat(member, ctx.message.guild, "XP"))
newState = int(self.settings.getUserStat(member, ctx.message.guild, "XPReserve"))
# Add XP and XP Reserve
stat_embed.add_field(name="XP", value="{:,}".format(newStat), inline=True)
stat_embed.add_field(name="XP Reserve", value="{:,}".format(newState), inline=True)
# Get member's avatar url
avURL = member.avatar_url
if not len(avURL):
avURL = member.default_avatar_url
if member.nick:
# We have a nickname
msg = "__***{},*** **who currently goes by** ***{}:***__\n\n".format(member.name, member.nick)
# Add to embed
stat_embed.author = '{}, who currently goes by {}'.format(member.name, member.nick)
else:
msg = "__***{}:***__\n\n".format(member.name)
# Add to embed
stat_embed.author = '{}'.format(member.name)
# Get localized user time
if member.joined_at != None:
local_time = UserTime.getUserTime(ctx.author, self.settings, member.joined_at)
j_time_str = "{} {}".format(local_time['time'], local_time['zone'])
# Add Joined
stat_embed.add_field(name="Joined", value=j_time_str, inline=True)
else:
stat_embed.add_field(name="Joined", value="Unknown", inline=True)
# Get user's current role
promoArray = self.settings.getServerStat(ctx.message.guild, "PromotionArray")
# promoSorted = sorted(promoArray, key=itemgetter('XP', 'Name'))
promoSorted = sorted(promoArray, key=lambda x:int(x['XP']))
highestRole = None
if len(promoSorted):
nextRole = promoSorted[0]
else:
nextRole = None
for role in promoSorted:
if int(nextRole['XP']) < newStat:
nextRole = role
# We *can* have this role, let's see if we already do
currentRole = None
for aRole in member.roles:
# Get the role that corresponds to the id
if str(aRole.id) == str(role['ID']):
# We found it
highestRole = aRole.name
if len(promoSorted) > (promoSorted.index(role)+1):
# There's more roles above this
nRoleIndex = promoSorted.index(role)+1
nextRole = promoSorted[nRoleIndex]
if highestRole:
msg = '{}**Current Rank:** *{}*\n'.format(msg, highestRole)
# Add Rank
stat_embed.add_field(name="Current Rank", value=highestRole, inline=True)
else:
if len(promoSorted):
# Need to have ranks to acquire one
msg = '{}They have not acquired a rank yet.\n'.format(msg)
# Add Rank
stat_embed.add_field(name="Current Rank", value='None acquired yet', inline=True)
if nextRole and (newStat < int(nextRole['XP'])):
# Get role
next_role = DisplayName.roleForID(int(nextRole["ID"]), ctx.guild)
if not next_role:
next_role_text = "Role ID: {} (Removed from server)".format(nextRole["ID"])
else:
next_role_text = next_role.name
msg = '{}\n*{:,}* more *xp* required to advance to **{}**'.format(msg, int(nextRole['XP']) - newStat, next_role_text)
# Add Next Rank
stat_embed.add_field(name="Next Rank", value='{} ({:,} more xp required)'.format(next_role_text, int(nextRole['XP'])-newStat), inline=True)
# Add status
status_text = ":green_heart:"
if member.status == discord.Status.offline:
status_text = ":black_heart:"
elif member.status == discord.Status.dnd:
status_text = ":heart:"
elif member.status == discord.Status.idle:
status_text = ":yellow_heart:"
stat_embed.add_field(name="Status", value=status_text, inline=True)
stat_embed.add_field(name="ID", value=str(member.id), inline=True)
stat_embed.add_field(name="User Name", value="{}#{}".format(member.name, member.discriminator), inline=True)
if member.premium_since:
local_time = UserTime.getUserTime(ctx.author, self.settings, member.premium_since, clock=True)
c_time_str = "{} {}".format(local_time['time'], local_time['zone'])
stat_embed.add_field(name="Boosting Since",value=c_time_str)
if member.activity and member.activity.name:
# Playing a game!
play_list = [ "Playing", "Streaming", "Listening to", "Watching" ]
try:
play_string = play_list[member.activity.type]
except:
play_string = "Playing"
stat_embed.add_field(name=play_string, value=str(member.activity.name), inline=True)
if member.activity.type == 1:
# Add the URL too
stat_embed.add_field(name="Stream URL", value="[Watch Now]({})".format(member.activity.url), inline=True)
# Add joinpos
joinedList = sorted([{"ID":mem.id,"Joined":mem.joined_at} for mem in ctx.guild.members], key=lambda x:x["Joined"].timestamp() if x["Joined"] != None else -1)
if member.joined_at != None:
try:
check_item = { "ID" : member.id, "Joined" : member.joined_at }
total = len(joinedList)
position = joinedList.index(check_item) + 1
stat_embed.add_field(name="Join Position", value="{:,} of {:,}".format(position, total), inline=True)
except:
stat_embed.add_field(name="Join Position", value="Unknown", inline=True)
else:
stat_embed.add_field(name="Join Position", value="Unknown", inline=True)
# Get localized user time
local_time = UserTime.getUserTime(ctx.author, self.settings, member.created_at, clock=False)
c_time_str = "{} {}".format(local_time['time'], local_time['zone'])
# add created_at footer
created = "Created at " + c_time_str
stat_embed.footer = created
await stat_embed.send(ctx)
@stats.error
async def stats_error(self, ctx, error):
msg = 'stats Error: {}'.format(error)
await ctx.channel.send(msg)
# List the xp and xp reserve of a user
@commands.command(pass_context=True)
async def xpinfo(self, ctx):
"""Gives a quick rundown of the xp system."""
server = ctx.message.guild
channel = ctx.message.channel
# Check if we're suppressing @here and @everyone mentions
if self.settings.getServerStat(server, "SuppressMentions"):
suppress = True
else:
suppress = False
serverName = Nullify.escape_all(server.name)
hourlyXP = int(self.settings.getServerStat(server, "HourlyXP"))
hourlyXPReal = int(self.settings.getServerStat(server, "HourlyXPReal"))
xpPerMessage = int(self.settings.getServerStat(server, "XPPerMessage"))
xpRPerMessage = int(self.settings.getServerStat(server, "XPRPerMessage"))
if not xpPerMessage:
xpPerMessage = 0
if not xpRPerMessage:
xpRPerMessage = 0
if not hourlyXPReal:
hourlyXPReal = 0
if not hourlyXP:
hourlyXP = 0
onlyOnline = self.settings.getServerStat(server, "RequireOnline")
xpProm = self.settings.getServerStat(server, "XPPromote")
xpDem = self.settings.getServerStat(server, "XPDemote")
xpStr = None
if xpProm and xpDem:
# Bot promote and demote
xpStr = "This is what I check to handle promotions and demotions.\n"
else:
if xpProm:
xpStr = "This is what I check to handle promotions.\n"
elif xpDem:
xpStr = "This is what I check to handle demotions.\n"
msg = "__***{}'s*** **XP System**__\n\n__What's What:__\n\n".format(serverName)
msg = "{}**XP:** This is the xp you have *earned.*\nIt comes from other users gifting you xp, or if you're lucky enough to `{}gamble` and win.\n".format(msg, ctx.prefix)
if xpStr:
msg = "{}{}".format(msg, xpStr)
hourStr = None
if hourlyXPReal > 0:
hourStr = "Currently, you receive *{} xp* each hour".format(hourlyXPReal)
if onlyOnline:
hourStr = "{} (but *only* if your status is *Online*).".format(hourStr)
else:
hourStr = "{}.".format(hourStr)
if hourStr:
msg = "{}{}\n".format(msg, hourStr)
if xpPerMessage > 0:
msg = "{}Currently, you receive *{} xp* per message.\n".format(msg, xpPerMessage)
msg = "{}This can only be taken away by an *admin*.\n\n".format(msg)
msg = "{}**XP Reserve:** This is the xp you can *gift*, *gamble*, or use to *feed* me.\n".format(msg)
hourStr = None
if hourlyXP > 0:
hourStr = "Currently, you receive *{} xp reserve* each hour".format(hourlyXP)
if onlyOnline:
hourStr = "{} (but *only* if your status is *Online*).".format(hourStr)
else:
hourStr = "{}.".format(hourStr)
if hourStr:
msg = "{}{}\n".format(msg, hourStr)
if xpRPerMessage > 0:
msg = "{}Currently, you receive *{} xp reserve* per message.\n".format(msg, xpRPerMessage)
msg = "{}\n__How Do I Use It?:__\n\nYou can gift other users xp by using the `{}xp [user] [amount]` command.\n".format(msg, ctx.prefix)
msg = "{}This pulls from your *xp reserve*, and adds to their *xp*.\n".format(msg)
msg = "{}It does not change the *xp* you have *earned*.\n\n".format(msg)
msg = "{}You can gamble your *xp reserve* to have a chance to win a percentage back as *xp* for yourself.\n".format(msg)
msg = "{}You do so by using the `{}gamble [amount in multiple of 10]` command.\n".format(msg, ctx.prefix)
msg = "{}This pulls from your *xp reserve* - and if you win, adds to your *xp*.\n\n".format(msg)
msg = "{}You can also *feed* me.\n".format(msg)
msg = "{}This is done with the `{}feed [amount]` command.\n".format(msg, ctx.prefix)
msg = "{}This pulls from your *xp reserve* - and doesn't affect your *xp*.\n\n".format(msg)
msg = "{}You can check your *xp*, *xp reserve*, current role, and next role using the `{}stats` command.\n".format(msg, ctx.prefix)
msg = "{}You can check another user's stats with the `{}stats [user]` command.\n\n".format(msg, ctx.prefix)
# Get the required role for using the xp system
role = self.settings.getServerStat(server, "RequiredXPRole")
if role == None or role == "":
msg = '{}Currently, **Everyone** can *give xp*, *gamble*, and *feed* the bot.\n\n'.format(msg)
else:
# Role is set - let's get its name
found = False
for arole in server.roles:
if str(arole.id) == str(role):
found = True
vowels = "aeiou"
if arole.name[:1].lower() in vowels:
msg = '{}Currently, you need to be an **{}** to *give xp*, *gamble*, or *feed* the bot.\n\n'.format(msg, Nullify.escape_all(arole.name))
else:
msg = '{}Currently, you need to be a **{}** to *give xp*, *gamble*, or *feed* the bot.\n\n'.format(msg, Nullify.escape_all(arole.name))
if not found:
msg = '{}There is no role that matches id: `{}` for using the xp system - consider updating that setting.\n\n'.format(msg, role)
msg = "{}Hopefully that clears things up!".format(msg)
await ctx.message.channel.send(msg)
|
mit
| -1,479,371,113,077,877,200
| 34.143603
| 194
| 0.642081
| false
| 3.189386
| false
| false
| false
|
5t111111/markdown-preview.vim
|
markdownpreview_lib/markdown_preview/markdown_preview.py
|
1
|
2621
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import webbrowser
from bs4 import BeautifulSoup
import chardet
import markdown
class MarkdownPreview(object):
_source = None
_template_path = None
_html_path = None
_css_path = None
def __init__(self, source=None, template=None, html=None, css=None):
self._source = source
self._template_path = template
self._css_path = css
self._html_path = html
def _read_css(self):
with open(self._css_path, 'r') as f:
css = ''
uniconv = lambda x: x.decode(chardet.detect(x)['encoding'])
line = f.readline()
while line:
line = uniconv(line)
css = ''.join([css, line])
line = f.readline()
return css.encode('utf_8', errors='replace')
def create_html(self):
src = []
for line in self._source.split('\n'):
line = line.rstrip()
# Do not ignore continuous newlines...
if(line == ''):
src.append(' ')
else:
src.append(line)
src.append('\n')
content = ''.join(src)
uniconv = lambda x: x.decode(chardet.detect(x)['encoding'])
content = uniconv(content)
content = markdown.markdown(content, extensions=['extra', 'codehilite', 'nl2br'])
with open(self._template_path, 'r') as f:
html = f.read()
html = html.replace('{{ CSS }}', self._read_css())
html = html.replace('{{ CONTENT }}', content)
dirty_html = html
try:
soup = BeautifulSoup(dirty_html)
html = soup.prettify()
except:
# Failed to prettify a dirty HTML...
html = dirty_html
with open(self._html_path, 'w') as f:
f.write(html.encode('utf_8', errors='replace'))
if sys.platform[:3] == "win":
webbrowser.open(self._html_path)
else:
webbrowser.open('file://' + self._html_path)
def main():
argvs = sys.argv
src_file = argvs[1]
with open(src_file) as f:
src = f.read()
path_to_this = os.path.dirname(os.path.abspath(__file__))
css = os.path.join(path_to_this, 'preview', 'css', 'markdown.css')
template = os.path.join(path_to_this, 'preview', 'view', 'index.html')
html = os.path.join(path_to_this, 'preview', 'index.html')
mdp = MarkdownPreview(source=src, template=template, html=html, css=css)
mdp.create_html()
if __name__ == '__main__':
main()
|
lgpl-2.1
| 157,799,395,344,033,540
| 26.882979
| 89
| 0.533003
| false
| 3.696756
| false
| false
| false
|
Djiit/err-jenkins
|
test_jenkinsBot.py
|
1
|
4684
|
# coding: utf-8
from errbot.backends.test import testbot
import jenkinsBot
class TestJenkinsBot(object):
extra_plugin_dir = '.'
def test_jenkins_build_no_args(self, testbot):
testbot.push_message('!jenkins build')
assert ('What job would you like to build?'
in testbot.pop_message())
def test_jenkins_build_shortcut_no_args(self, testbot):
testbot.push_message('!build')
assert ('What job would you like to build?'
in testbot.pop_message())
def test_jenkins_param_no_args(self, testbot):
testbot.push_message('!jenkins param')
assert ('What Job would you like the parameters for?'
in testbot.pop_message())
def test_jenkins_createjob_no_args(self, testbot):
testbot.push_message('!jenkins createjob')
assert ('Oops, I need a type and a name for your new job.'
in testbot.pop_message())
def test_jenkins_deletejob_no_args(self, testbot):
testbot.push_message('!jenkins deletejob')
assert ('Oops, I need the name of the job you want me to delete.'
in testbot.pop_message())
def test_jenkins_enablejob_no_args(self, testbot):
testbot.push_message('!jenkins enablejob')
assert ('Oops, I need the name of the job you want me to enable.'
in testbot.pop_message())
def test_jenkins_disablejob_no_args(self, testbot):
testbot.push_message('!jenkins disablejob')
assert ('Oops, I need the name of the job you want me to disable.'
in testbot.pop_message())
def test_jenkins_createnode_no_args(self, testbot):
testbot.push_message('!jenkins createnode')
assert ('Oops, I need a name and a working dir for your new node.'
in testbot.pop_message())
def test_jenkins_deletenode_no_args(self, testbot):
testbot.push_message('!jenkins deletenode')
assert ('Oops, I need the name of the node you want me to delete.'
in testbot.pop_message())
def test_jenkins_enablenode_no_args(self, testbot):
testbot.push_message('!jenkins enablenode')
assert ('Oops, I need the name of the node you want me to enable.'
in testbot.pop_message())
def test_jenkins_disablenode_no_args(self, testbot):
testbot.push_message('!jenkins disablenode')
assert ('Oops, I need the name of the node you want me to disable.'
in testbot.pop_message())
class TestJenkinsBotStaticMethods(object):
def test_format_jobs_helper(self):
jobs = [{'name': 'foo',
'fullname': 'foo bar',
'url': 'http://jenkins.example.com/job/foo/'}]
result = jenkinsBot.JenkinsBot.format_jobs(jobs)
assert result == 'foo bar (http://jenkins.example.com/job/foo/)'
def test_format_jobs_helper_no_params(self):
jobs = []
result = jenkinsBot.JenkinsBot.format_jobs(jobs)
assert result == 'No jobs found.'
def test_format_params_helper(self):
params = [{
'defaultParameterValue': {'value': 'bar'},
'description': 'foo bar baz',
'name': 'FOO',
'type': 'StringParameterDefinition'
}]
result = jenkinsBot.JenkinsBot.format_params(params)
assert result == """Type: StringParameterDefinition
Description: foo bar baz
Default Value: bar
Parameter Name: FOO
"""
def test_build_parameters_helper(self):
params = ['FOO:bar', 'BAR:baz']
result = jenkinsBot.JenkinsBot.build_parameters(params)
assert result == {'FOO': 'bar', 'BAR': 'baz'}
def test_build_parameters_helper_no_params(self):
params = []
result = jenkinsBot.JenkinsBot.build_parameters(params)
assert result == {'': ''}
def test_format_notification(self):
body = {
"name": "dummy",
"url": "job/dummy/",
"build": {
"full_url": "http://jenkins.example.com/job/dummy/1/",
"number": 1,
"phase": "COMPLETED",
"status": "SUCCESS",
"url": "job/asgard/1/",
"scm": {
"url": "https://github.com/Djiit/err-jenkins.git",
"branch": "origin/master",
"commit": "0e51ed"
},
}
}
result = jenkinsBot.JenkinsBot.format_notification(body)
assert result == """Build #1 SUCCESS for Job dummy \
(http://jenkins.example.com/job/dummy/1/)
Based on https://github.com/Djiit/err-jenkins.git/commit/0e51ed \
(origin/master)"""
|
mit
| 1,269,524,832,738,409,500
| 36.472
| 75
| 0.591161
| false
| 3.871074
| true
| false
| false
|
jpurma/Kataja
|
kataja/SemanticsItem.py
|
1
|
5838
|
import math
from PyQt5 import QtCore, QtWidgets, QtGui
from kataja.globals import SMALL_FEATURE
from kataja.singletons import ctrl, qt_prefs
FREE = 0
SENTENCE = 1
NOUN_PHRASE = 2
class SemanticsItem(QtWidgets.QGraphicsSimpleTextItem):
def __init__(self, sm, label, array_id, color_key, x=0, y=0):
QtWidgets.QGraphicsSimpleTextItem.__init__(self, label)
self.label = label
self.setFont(qt_prefs.get_font(SMALL_FEATURE))
self.array_id = array_id
self.color_key = color_key
self.color_key_tr = color_key if color_key.endswith('tr') else color_key + 'tr'
self.members = []
self.setZValue(2)
self.setPos(x, y)
if not sm.visible:
self.hide()
def add_member(self, node):
if node not in self.members:
self.members.append(node)
def update_text(self):
words = [self.label]
for node in self.members:
if node.syntactic_object:
checked_features = getattr(node.syntactic_object, 'checked_features', [])
if checked_features and isinstance(checked_features, tuple):
checked_feat, valuing_feat = checked_features
feat_node = ctrl.forest.get_node(checked_feat)
parents = feat_node.get_parents()
words.append('(' + ' '.join([x.label for x in parents]) + ')')
feat_node = ctrl.forest.get_node(valuing_feat)
parents = feat_node.get_parents()
words.append(' '.join([x.label for x in parents]))
self.setText(' '.join(words))
def boundingRect(self):
base = self.label_rect()
if not self.members:
return base.adjusted(-2, -2, 2, 2)
scene_pos = self.pos()
x = scene_pos.x()
y = scene_pos.y()
left = x + base.left()
up = y + base.top()
right = x + base.right()
down = y + base.bottom()
for member in self.members:
p = member.scenePos()
px = p.x()
py = p.y()
if px < left:
left = px
elif px > right:
right = px
if py < up:
up = py
elif py > down:
down = py
return QtCore.QRectF(left - x, up - y, right - left + 2, down - up + 2)
def label_rect(self):
min_w = 40
if not self.members:
return QtCore.QRectF(-2, -1, min_w, 4)
r = QtWidgets.QGraphicsSimpleTextItem.boundingRect(self).adjusted(-2, -1, 2, 1)
if r.width() < min_w:
r.setWidth(min_w)
return r
def paint(self, painter, *args, **kwargs):
painter.setPen(QtCore.Qt.NoPen)
label_rect = self.label_rect()
if self.members:
painter.setBrush(ctrl.cm.get(self.color_key))
painter.drawRoundedRect(label_rect, 4, 4)
p = QtGui.QPen(ctrl.cm.get(self.color_key_tr), 3)
painter.setPen(p)
scene_pos = self.pos()
x = scene_pos.x()
y = scene_pos.y()
mid_height = label_rect.height() / 2
painter.setBrush(QtCore.Qt.NoBrush)
for member in self.members:
if member.cached_sorted_feature_edges:
max_i = len(member.cached_sorted_feature_edges)
i_shift = math.ceil((max_i - 1) / 2) * -3
else:
i_shift = 0
pos = member.scenePos()
px = pos.x()
py = pos.y()
px += i_shift
if True:
painter.setPen(QtCore.Qt.NoPen)
grad = QtGui.QLinearGradient(0, 0, px - x, 0)
grad.setColorAt(0, ctrl.cm.get(self.color_key))
grad.setColorAt(0.1, ctrl.cm.get(self.color_key_tr))
grad.setColorAt(0.6, ctrl.cm.get(self.color_key_tr))
grad.setColorAt(1, ctrl.cm.get(self.color_key))
painter.setBrush(grad)
# painter.setBrush(ctrl.cm.get(self.color_key_tr))
# p.lineTo(px - x, py - y)
if py < y:
p = QtGui.QPainterPath(QtCore.QPointF(0, mid_height + 2))
p.lineTo((px - x) / 2, mid_height + 2)
p.quadTo(((px - x) / 4) * 3 - 2, mid_height + 2, px - x - 0.5, py - y - 1)
p.lineTo(px - x + 3, py - y - 5)
p.quadTo(((px - x) / 4) * 3 + 2, mid_height - 2, (px - x) / 2, mid_height
- 2)
p.lineTo(0, mid_height - 2)
else:
p = QtGui.QPainterPath(QtCore.QPointF(0, mid_height - 2))
p.lineTo((px - x) / 2, mid_height - 2)
p.quadTo(((px - x) / 4) * 3 - 2, mid_height - 2, px - x - 0.5, py - y - 1)
p.lineTo(px - x + 3, py - y - 5)
p.quadTo(((px - x) / 4) * 3 + 2, mid_height + 2, (px - x) / 2, mid_height
+ 2)
p.lineTo(0, mid_height + 2)
painter.drawPath(p)
# else:
# p = QtGui.QPainterPath(QtCore.QPointF(0, mid_height))
# p.lineTo((px - x) / 2, mid_height)
# p.quadTo(((px - x) / 4) * 3, mid_height, px - x, py - y)
# painter.drawPath(p)
self.setBrush(ctrl.cm.paper())
QtWidgets.QGraphicsSimpleTextItem.paint(self, painter, *args, **kwargs)
else:
painter.setBrush(ctrl.cm.get(self.color_key_tr))
painter.drawRoundedRect(label_rect, 4, 4)
|
gpl-3.0
| 6,065,196,672,987,887,000
| 39.825175
| 98
| 0.477561
| false
| 3.577206
| false
| false
| false
|
MjnMixael/knossos
|
releng/macos/dmgbuild_cfg.py
|
1
|
4501
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import biplist
import os.path
import subprocess
# .. Useful stuff ..............................................................
application = 'dist/Knossos.app'
appname = os.path.basename(application)
def icon_from_app(app_path):
plist_path = os.path.join(app_path, 'Contents', 'Info.plist')
plist = biplist.readPlist(plist_path)
icon_name = plist['CFBundleIconFile']
icon_root,icon_ext = os.path.splitext(icon_name)
if not icon_ext:
icon_ext = '.icns'
icon_name = icon_root + icon_ext
return os.path.join(app_path, 'Contents', 'Resources', icon_name)
# .. Basics ....................................................................
# Uncomment to override the output filename
#filename = 'dist/Knossos.dmg'
# Uncomment to override the output volume name
volume_name = 'Knossos'
# Volume format (see hdiutil create -help)
format = 'UDBZ'
# Volume size (must be large enough for your files)
kn_size = subprocess.check_output(['du', '-sm', 'dist/Knossos.app'])
size = defines.get('size', '%dM' % (int(kn_size.split()[0]) + 2))
# Files to include
files = [ application ]
# Symlinks to create
symlinks = { 'Applications': '/Applications' }
# Volume icon
#
# You can either define icon, in which case that icon file will be copied to the
# image, *or* you can define badge_icon, in which case the icon file you specify
# will be used to badge the system's Removable Disk icon
#
badge_icon = icon_from_app(application)
# Where to put the icons
icon_locations = {
appname: (140, 120),
'Applications': (500, 120)
}
# .. Window configuration ......................................................
# Background
#
# This is a STRING containing any of the following:
#
# #3344ff - web-style RGB color
# #34f - web-style RGB color, short form (#34f == #3344ff)
# rgb(1,0,0) - RGB color, each value is between 0 and 1
# hsl(120,1,.5) - HSL (hue saturation lightness) color
# hwb(300,0,0) - HWB (hue whiteness blackness) color
# cmyk(0,1,0,0) - CMYK color
# goldenrod - X11/SVG named color
# builtin-arrow - A simple built-in background with a blue arrow
# /foo/bar/baz.png - The path to an image file
#
# The hue component in hsl() and hwb() may include a unit; it defaults to
# degrees ('deg'), but also supports radians ('rad') and gradians ('grad'
# or 'gon').
#
# Other color components may be expressed either in the range 0 to 1, or
# as percentages (e.g. 60% is equivalent to 0.6).
background = 'builtin-arrow'
show_status_bar = False
show_tab_view = False
show_toolbar = False
show_pathbar = False
show_sidebar = False
sidebar_width = 180
# Window position in ((x, y), (w, h)) format
window_rect = ((100, 100), (640, 280))
# Select the default view; must be one of
#
# 'icon-view'
# 'list-view'
# 'column-view'
# 'coverflow'
#
default_view = 'icon-view'
# General view configuration
show_icon_preview = False
# Set these to True to force inclusion of icon/list view settings (otherwise
# we only include settings for the default view)
include_icon_view_settings = 'auto'
include_list_view_settings = 'auto'
# .. Icon view configuration ...................................................
arrange_by = None
grid_offset = (0, 0)
grid_spacing = 120
scroll_position = (0, 0)
label_pos = 'bottom' # or 'right'
text_size = 16
icon_size = 128
# .. List view configuration ...................................................
# Column names are as follows:
#
# name
# date-modified
# date-created
# date-added
# date-last-opened
# size
# kind
# label
# version
# comments
#
list_icon_size = 16
list_text_size = 12
list_scroll_position = (0, 0)
list_sort_by = 'name'
list_use_relative_dates = True
list_calculate_all_sizes = False,
list_columns = ('name', 'date-modified', 'size', 'kind', 'date-added')
list_column_widths = {
'name': 300,
'date-modified': 181,
'date-created': 181,
'date-added': 181,
'date-last-opened': 181,
'size': 97,
'kind': 115,
'label': 100,
'version': 75,
'comments': 300,
}
list_column_sort_directions = {
'name': 'ascending',
'date-modified': 'descending',
'date-created': 'descending',
'date-added': 'descending',
'date-last-opened': 'descending',
'size': 'descending',
'kind': 'ascending',
'label': 'ascending',
'version': 'ascending',
'comments': 'ascending',
}
|
apache-2.0
| -4,118,771,942,043,023,000
| 26.284848
| 80
| 0.612531
| false
| 3.192199
| false
| false
| false
|
evildmp/django-curated-resources
|
curated_resources/admin.py
|
1
|
4333
|
from django.contrib import admin
from django.utils.translation import ugettext_lazy as _
from django import forms
from django.contrib.admin.widgets import FilteredSelectMultiple
from mptt.forms import TreeNodeMultipleChoiceField
from treeadmin.admin import TreeAdmin
from widgetry.tabs.admin import ModelAdminWithTabs
from widgetry import fk_lookup
# from widgetry.views import search
from arkestra_utilities.admin_mixins import AutocompleteMixin, InputURLMixin
from links import schema
from curated_resources.models import Resource, ResourceType, Audience, Topic, Domain
class ResourceAdminForm(InputURLMixin):
# disabled: https://github.com/django-mptt/django-mptt/issues/255
# domains = TreeNodeMultipleChoiceField(
# queryset=Domain.objects.all(),
# level_indicator=unichr(0x00A0) * 2,
# widget=FilteredSelectMultiple(
# "Domains",
# is_stacked=False,
# )
# )
def __init__(self, *args, **kwargs):
super(ResourceAdminForm, self).__init__(*args, **kwargs)
if self.instance.pk is not None and self.instance.destination_content_type:
destination_content_type = self.instance.destination_content_type.model_class()
else:
destination_content_type = None
# look up the correct widget from the content type
widget = fk_lookup.GenericFkLookup(
'id_%s-destination_content_type' % self.prefix,
destination_content_type,
)
self.fields['destination_object_id'].widget = widget
self.fields['destination_content_type'].widget.choices = schema.content_type_choices()
from django.contrib.admin import SimpleListFilter
class ResourceAdmin(ModelAdminWithTabs, AutocompleteMixin):
form = ResourceAdminForm
related_search_fields = ['destination_content_type']
filter_horizontal = (
'related_to',
'suitable_for',
'topics',
'domains',
'curators'
)
list_filter = ('resource_type', 'published')
list_display = ('title', 'published')
prepopulated_fields = {"slug": ("title",)}
tabs = [
('Description', {'fieldsets': [
[None, {'fields': [('title', 'short_title'), ('resource_type', 'published'), 'description',]}],
["Link to the resource",{'fields': [('destination_content_type', 'destination_object_id',)]}],
["Duration and cost",{'fields': [('duration', 'cost',)]}]
]}),
('Audience', {'fieldsets': [[None,{'fields': ['suitable_for',]}]]}),
('Domains', {'fieldsets': [[None,{'fields': ['domains',]}]]}),
('Topics', {'fieldsets': [[None,{'fields': ['topics',]}]]}),
('Related items', {'fieldsets': [[None,{'fields': ['related_to',]}]]}),
('Curators', {'fieldsets': [[None,{'fields': ['curators',]}]]}),
('Advanced options', {'fieldsets': [[None,{'fields': ['slug',]}]]}),
]
class TreeRoots(SimpleListFilter):
title = _('domain scheme')
parameter_name = 'tree'
def lookups(self, request, model_admin):
roots = Domain.objects.filter(parent=None)
t = [(root.tree_id, root.name) for root in roots]
return t
def queryset(self, request, queryset):
if self.value():
return queryset.filter(tree_id = self.value())
class DomainAdmin(TreeAdmin):
enable_object_permissions = False
jquery_use_google_cdn = True
search_fields = ('name',)
list_display = ('name', 'id_code', 'number_of_resources', 'number_of_children')
list_filter = (TreeRoots,)
filter_horizontal = ('curators',)
class TopicAdmin(admin.ModelAdmin):
search_fields = ('name',)
list_display = ('name', 'number_of_resources')
filter_horizontal = ('curators',)
class ResourceTypeAdmin(admin.ModelAdmin):
search_fields = ('resource_type',)
list_display = ('resource_type', 'number_of_resources')
class AudienceAdmin(admin.ModelAdmin):
search_fields = ('name',)
list_display = ('name', 'number_of_resources')
filter_horizontal = ('curators',)
admin.site.register(Resource, ResourceAdmin)
admin.site.register(ResourceType, ResourceTypeAdmin)
admin.site.register(Audience, AudienceAdmin)
admin.site.register(Topic, TopicAdmin)
admin.site.register(Domain, DomainAdmin)
|
bsd-2-clause
| 5,507,491,974,509,886,000
| 36.353448
| 107
| 0.643203
| false
| 3.896583
| false
| false
| false
|
palankai/baluster
|
src/baluster/utils.py
|
1
|
1844
|
from asyncio import iscoroutinefunction, coroutine
from contextlib import contextmanager
from functools import partial
import re
from .exceptions import MultipleExceptions
class Undefined:
pass
def make_if_none(obj, default):
if obj is not None:
return obj
return default
def dict_partial_copy(source, patterns):
keys = _find_matches(patterns, source.keys())
return dict(filter(lambda i: i[0] in keys, source.items()))
@contextmanager
def capture_exceptions():
exceptions = []
@contextmanager
def capture():
try:
yield
except Exception as ex:
exceptions.append(ex)
try:
yield capture
finally:
if exceptions:
if len(exceptions) == 1:
raise exceptions[0]
raise MultipleExceptions(exceptions)
async def as_async(func, *args, **kwargs):
if iscoroutinefunction(func):
return await func(*args, **kwargs)
return func(*args, **kwargs)
def async_partial(*args, **kwargs):
return coroutine(partial(*args, **kwargs))
def make_caller(what_to_call):
return lambda *a, **k: what_to_call()
def merge_dicts(dicts):
return {k: v for d in dicts for k, v in d.items()}
def get_member_name(own_name, name):
if own_name is None:
return name
return _join_names(own_name, name)
def find_instance(tree, name):
instance = tree
for part in name.split('.')[:-1]:
instance = getattr(instance, part)
return instance
def _join_names(*names):
return '.'.join(names)
def _find_matches(patterns, candidates):
pts = list(map(_compile_regex, patterns))
return list(filter(
lambda c: any(map(lambda p: p.match(c), pts)),
candidates
))
def _compile_regex(name):
return re.compile('^{}(\..*)?$'.format(name))
|
mit
| 1,445,387,088,957,653,500
| 19.954545
| 63
| 0.632863
| false
| 3.817805
| false
| false
| false
|
aylward/ITKTubeTK
|
setup.py
|
1
|
1986
|
# -*- coding: utf-8 -*-
from __future__ import print_function
from os import sys
import numpy as np
try:
from skbuild import setup
except ImportError:
print('scikit-build is required to build from source.', file=sys.stderr)
print('Please run:', file=sys.stderr)
print('', file=sys.stderr)
print(' python -m pip install scikit-build')
sys.exit(1)
setup(
name='itk-tubetk',
version='0.9.0',
author='Stephen R. Aylward',
author_email='stephen.aylward@kitware.com',
include_dirs=[np.get_include()],
packages=['itk'],
package_dir={'itk': 'itk'},
download_url=r'https://github.com/InsightSoftwareConsortium/ITKTubeTK',
description=r'An open-source toolkit, led by Kitware, Inc., for the segmentation, registration, and analysis of tubes and surfaces in images.',
long_description='TubeTK is an open-source toolkit for the segmentation, registration, and analysis of tubes and surfaces in images.',
classifiers=[
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Programming Language :: C++",
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: Healthcare Industry",
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering",
"Topic :: Scientific/Engineering :: Medical Science Apps.",
"Topic :: Scientific/Engineering :: Information Analysis",
"Topic :: Software Development :: Libraries",
"Operating System :: Android",
"Operating System :: Microsoft :: Windows",
"Operating System :: POSIX",
"Operating System :: Unix",
"Operating System :: MacOS"
],
license='Apache',
keywords='ITK InsightToolkit',
url=r'https://itk.org/',
install_requires=[
r'itk>=5.2.0.post2',
r'itk-minimalpathextraction>=1.2.0'
]
)
|
apache-2.0
| 1,078,648,965,849,893,400
| 36.471698
| 147
| 0.639476
| false
| 3.841393
| false
| false
| false
|
angst7/far
|
models/models1.py
|
1
|
3286
|
# SQLAlchemy, SQLElixir
from sqlalchemy import ForeignKey
from sqlalchemy.orm import relation, backref
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class AttackType:
UNDEFINED = 0
# Melee attacks
HIT = 1
CRUSH = 2
SLASH = 3
PIERCE = 4
CLEAVE = 5
CLAW = 6
KICK = 7
# Elemental attacks
AIR = 10
FIRE = 11
WATER = 12
EARTH = 13
# Magical attacks
GOOD_MAGIC = 20
EVIL_MAGIC = 21
NEUTRAL_MAGIC = 22
# Other attacks
DISEASE = 31
POISON = 32
class MobFlags:
UNDEFINED = 0
AGGRESSIVE = 1
SENTINAL = 2
ISNPC = 4
WIMPY = 8
class AffectFlags:
UNDEFINED = 0
SENSE_LIFE = 1
SENSE_HIDDEN = 2
SEE_INVISIBLE = 4
NIGHT_VISION = 8
FLYING = 16
class User(Base):
__tablename__ = 'users'
id = Column(Integer, primary_key=True)
username = Column(String)
password = Column(String)
email = Column(String, nullable=False)
firstname = Column(String)
lastname = Column(String)
def __init__(self, username, password, firstname, lastname):
self.username = username
self.password = password
self.firstname = firstname
self.lastname = lastname
def __repr__(self):
return "<User('%s', '%s', '%s', '%s')>" % (self.username, self.password, self.firstname, self.lastname)
class Attribute(Base):
__tablename__ = 'attributes'
id = Column(Integer, primary_key=True)
name = Column(String)
class AttributeEffects(Base):
__tablename__ = 'attributeeffects'
id = Column(Integer, primary_key=True)
name = Column(String)
attribute = relation(Attribute, backref('attributeeffects', order_by=id))
modifier = Column(Integer)
class Attack(Base):
__tablename__ = 'attack'
id = Column(Integer, primary_key=True)
name = Column(String)
attacktype = Column(Integer) # this will be one of AttackType class attacks
dice = Column(Integer) # d&d styke number of die
sides = Column(Integer) # d&d style die sides
bonus = Column(Integer) # attack bonus over dice roll
use = Column(Integer) # Percent chance to use this attack 0-100
class Skill(Base):
__tablename__ = 'skills'
id = Column(Integer, primary_key=True)
name = Column(String)
attributeeffects = relation(AttributeEffects, backref('skills'), order_by=id))
class ToonClass(Base):
__tablename__ = 'classes'
id = Column(Integer, primary_key=True)
name = Column(String)
attributeeffects = relation(AttributeEffects, backref('classes'), order_by=id))
class ToonLevel(Base):
__tablename__ = 'levels'
id = Column
toonclass = relation(ToonClass, backref=backref('levels', order_by=id))
level = Column(Integer)
class Toon(Base):
__tablename__ = 'toons'
id = Column(Integer, primary_key=True)
name = Column(String)
levels = relation(ToonLevel, backref=backref('toons', order_by=id))
affectflags = Column(Integer)
|
mit
| -6,676,001,317,496,061,000
| 23.340741
| 111
| 0.590992
| false
| 3.591257
| false
| false
| false
|
farert/farert
|
db/scripts/jr_db_reg_old_1.py
|
1
|
2553
|
#!python3.0.1
# -*- coding: utf-8 -*-
"""
f[^x[Xo^
kC¹ JRkC¹ Ùü Ùw ͱ¾Ä 0 0
"""
import os
import sqlite3
import sys
import re
from collections import defaultdict
if 1 < len(sys.argv):
fn = sys.argv[1]
else:
fn = 'jr.txt'
dbname = 'jr.db'
if os.access(dbname, os.F_OK):
os.unlink(dbname)
con = sqlite3.connect(dbname, isolation_level=None)
###########################################
sql = """
create table t_company (
name char(11) not null primary key
);
"""
con.execute(sql)
###########################################
sql = """
create table t_prefect(
name char(12) not null primary key
);
"""
con.execute(sql)
###########################################
sql = """
create table t_line(
name text not null primary key
);
"""
con.execute(sql)
###########################################
sql = """
create table t_station(
name text not null,
kana text not null,
company_id integer not null references t_company(rowid),
prefect_id integer not null references t_prefect(rowid),
jctflg integer not null,
sameflg integer not null default(0),
cityflg integer not null,
primary key(name, prefect_id)
);
"""
con.execute(sql)
###########################################
sql = """
create table t_lines (
line_id integer not null references t_line(rowid),
station_id integer not null references t_station(rowid),
sales_km integer not null,
calc_km integer not null,
spe_route integer not null default(0),
primary key (line_id, station_id)
);
"""
con.execute(sql)
###########################################
sql = """
create table t_jct (
line_id integer not null references t_line(rowid),
station_id integer not null references t_station(rowid),
primary key (line_id, station_id)
);
"""
con.execute(sql)
###########################################
items = [[], [], []]
h_items = [defaultdict(int), defaultdict(int), defaultdict(int)]
n_lin = 0
for lin in open(fn, 'r'):
n_lin += 1
if n_lin == 1:
continue
linitems = lin.split('\t')
for i in [0, 1, 2]:
key = linitems[i].strip();
h_items[i][key] += 1
if 1 == h_items[i][key]:
items[i].append([key])
con.executemany('insert into t_prefect values(?)', items[0])
print("registerd t_prefect.")
con.executemany('insert into t_company values(?)', items[1])
print("registerd t_company.")
con.executemany('insert into t_line values(?)', items[2])
print("registerd t_line.")
print("complete success.")
|
gpl-3.0
| 5,349,268,513,671,626,000
| 19.275
| 64
| 0.550333
| false
| 2.852514
| false
| false
| false
|
aziele/alfpy
|
alfpy/utils/seqrecords.py
|
1
|
3791
|
from . import fasta
class SeqRecords:
"""Object representing an ordered collection of sequence records.
Attributes:
id_list (list) : List of sequence record identifiers
seq_list (list) : List of sequence strings
count (int) : Number of sequence records
"""
def __init__(self, id_list=None, seq_list=None):
"""Create a collection (may be empty) of sequence records.
Example:
>>> ids = ['seq1', 'seq2']
>>> seqs = ['ATGCTG', 'TGCTGATAGTA']
>>> seq_records = SeqRecords(id_list=ids, seq_list=seqs)
>>> print seq_records
SeqRecords (noseqs: 2)
"""
self.count = 0 if not id_list else len(seq_list)
self.id_list = id_list if id_list else []
# Make all sequences uppercased.
self.seq_list = [s.upper() for s in seq_list] if seq_list else []
def add(self, seqid, seq):
"""Add a sequence record to the existing collection.
Args:
id (str) : sequence identifier
seq (str) : sequence string
Example:
>>> seq_record.add("seq3", "TGCTGA")
"""
self.id_list.append(seqid)
self.seq_list.append(seq.upper())
self.count += 1
def fasta(self, wrap=70):
"""Return sequence records as a mutli-FASTA string.
Example:
>>> ids = ['seq1', 'seq2']
>>> seqs = ['ATGCTG', 'TGCTGATAGTA']
>>> seq_records = SeqRecords(id_list=ids, seq_list=seqs)
>>> print seq_records.fasta()
>seq1
ATGCTG
>seq2
TGCTGATAGTA
"""
l = []
for seqid, seq in self:
seq_record = fasta.FastaRecord(seq=seq, seqid=seqid)
l.append(seq_record.format(wrap=wrap))
return "\n".join(l)
@property
def length_list(self):
"""Return a list of the sequences' length_list"""
return [len(seq) for seq in self.seq_list]
def __iter__(self):
"""
Iterate over sequence records in the collection.
Example:
>>> for amino_acid in record:
... print(amino_acid)
seq1
ATGCTG
seq2
TGCTGATAGTA
"""
for i in range(self.count):
seqid = self.id_list[i]
seq = self.seq_list[i]
yield seqid, seq
def __len__(self):
"""
Return the number of sequence records in the collection.
Example:
>>> len(seq_records)
3
"""
return len(self.seq_list)
def __repr__(self):
return "{0} (noseqs: {1})".format(self.__class__.__name__,
self.count)
def read_fasta(handle):
"""Create a SeqRecords object from Fasta file.
Args:
file handle : a file containing Fasta sequences.
"""
id_list = []
seq_list = []
for seq_record in fasta.parse(handle):
id_list.append(seq_record.id)
seq_list.append(seq_record.seq)
return SeqRecords(id_list=id_list, seq_list=seq_list)
def main():
seq_records = SeqRecords()
seq_records.add(
'seq1', 'AACGTACCATTGAACGTACCATTGAACGTACCATTGATGCATGGTAGAT')
seq_records.add('seq2', 'CTAGGGGACTTATCTAGGGGACTTATCTAGGGGACTTAT')
seq_records.add('seq3', 'CTAGGGAAAATTCTAGGGAAAATTCTAGGGAAAATT')
import uuid
import os
outfilename = uuid.uuid4().hex
oh = open(outfilename, 'w')
oh.write(seq_records.fasta())
oh.close()
fh = open(outfilename)
seq_records = read_fasta(fh)
fh.close()
os.remove(outfilename)
return seq_records
if __name__ == '__main__':
seq_records = main()
print(seq_records.fasta())
|
mit
| -537,299,720,962,794,500
| 26.273381
| 73
| 0.543128
| false
| 3.713026
| false
| false
| false
|
blukat29/notifyhere
|
notifyhere/dash/api/gmail.py
|
1
|
2935
|
from httplib import HTTPSConnection
import json
import imaplib
import re
import base
import tools
import secrets
class GmailApi(base.ApiBase):
list_re = re.compile(r'\((.+)\) "(.+)" "(.+)"')
def __init__(self):
base.ApiBase.__init__(self, "gmail")
self.token = ""
def icon_url(self):
return "https://mail.google.com/favicon.ico"
def oauth_link(self):
url = "https://accounts.google.com/o/oauth2/auth"
args = {
"response_type":"code",
"client_id":secrets.GMAIL_CLIENT_ID,
"redirect_uri":secrets.BASE_REDIRECT_URL + "gmail",
"scope":"https://mail.google.com/ https://www.googleapis.com/auth/userinfo.email",
"approval_prompt":"force",
}
return url + "?" + tools.encode_params(args)
def oauth_callback(self, params):
if 'code' not in params:
return None
conn = HTTPSConnection("accounts.google.com")
body = tools.encode_params({
"grant_type":"authorization_code",
"code":params['code'],
"client_id":secrets.GMAIL_CLIENT_ID,
"client_secret":secrets.GMAIL_CLIENT_SECRET,
"redirect_uri":secrets.BASE_REDIRECT_URL + "gmail",
})
headers = {
"Content-Type":"application/x-www-form-urlencoded",
}
conn.request("POST", "/o/oauth2/token", body, headers)
resp = conn.getresponse()
try:
self.token = json.loads(resp.read())['access_token']
self.is_auth = True
except (KeyError, ValueError):
return None
conn.close()
conn = HTTPSConnection("www.googleapis.com")
conn.request("GET","/oauth2/v1/tokeninfo?alt=json&access_token="+self.token,"",{})
resp = conn.getresponse()
self.username = json.loads(resp.read())['email']
def update(self):
auth = "user=%s\1auth=Bearer %s\1\1" % (self.username, self.token)
m = imaplib.IMAP4_SSL("imap.gmail.com")
m.authenticate("XOAUTH2", lambda x: auth)
status, raw_list = m.list()
boxes = []
for line in raw_list:
attr, root, raw_name = GmailApi.list_re.search(line).groups()
if "Noselect" in attr:
continue
decoded_name = raw_name.replace("&","+").decode("utf-7")
boxes.append((raw_name, decoded_name))
noti = {}
for box in boxes:
raw_name, decoded_name = box
status, result = m.select(raw_name)
total = int(result[0])
status, result = m.search(None, "(UNSEEN)")
unseen = len(result[0].split())
if unseen > 0:
noti[decoded_name] = unseen
m.close()
m.logout()
return noti
def logout(self):
self.is_auth = False
self.token = ""
|
mit
| 1,942,986,103,054,837,800
| 28.94898
| 94
| 0.539353
| false
| 3.758003
| false
| false
| false
|
kashefy/nideep
|
nideep/iow/copy_lmdb.py
|
1
|
1826
|
'''
Created on May 30, 2016
@author: kashefy
'''
import lmdb
from lmdb_utils import IDX_FMT, MAP_SZ
def copy_samples_lmdb(path_lmdb, path_dst, keys, func_data=None):
"""
Copy select samples from an lmdb into another.
Can be used for sampling from an lmdb into another and generating a random shuffle
of lmdb content.
Parameters:
path_lmdb -- source lmdb
path_dst -- destination lmdb
keys -- list of keys or indices to sample from source lmdb
"""
db = lmdb.open(path_dst, map_size=MAP_SZ)
key_dst = 0
with db.begin(write=True) as txn_dst:
with lmdb.open(path_lmdb, readonly=True).begin() as txn_src:
for key_src in keys:
if not isinstance(key_src, basestring):
key_src = IDX_FMT.format(key_src)
if func_data is None:
txn_dst.put(IDX_FMT.format(key_dst), txn_src.get(key_src))
else:
txn_dst.put(IDX_FMT.format(key_dst), func_data(txn_src.get(key_src)))
key_dst += 1
db.close()
def concatenate_lmdb(paths_lmdb, path_dst):
"""
Copy select samples from an lmdb into another.
Can be used for sampling from an lmdb into another and generating a random shuffle
of lmdb content.
Parameters:
paths_lmdb -- list of lmdbs to conatenate
path_dst -- destination lmdb
keys -- list of keys or indices to sample from source lmdb
"""
db = lmdb.open(path_dst, map_size=MAP_SZ)
key_dst = 0
with db.begin(write=True) as txn_dst:
for p in paths_lmdb:
with lmdb.open(p, readonly=True).begin() as txn_src:
for _, value in txn_src.cursor():
txn_dst.put(IDX_FMT.format(key_dst), value)
key_dst += 1
db.close()
|
bsd-2-clause
| -6,608,046,657,129,439,000
| 30.482759
| 89
| 0.594743
| false
| 3.478095
| false
| false
| false
|
leductan-nguyen/RaionPi
|
src/octoprint/plugins/softwareupdate/scripts/update-octoprint.py
|
1
|
6006
|
#!/bin/env python
from __future__ import absolute_import
__author__ = "Gina Haeussge <osd@foosel.net>"
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
__copyright__ = "Copyright (C) 2014 The RaionPi Project - Released under terms of the AGPLv3 License"
import errno
import subprocess
import sys
def _get_git_executables():
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
return GITS
def _git(args, cwd, hide_stderr=False, verbose=False, git_executable=None):
if git_executable is not None:
commands = [git_executable]
else:
commands = _get_git_executables()
for c in commands:
try:
p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % args[0])
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version >= '3':
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % args[0])
return p.returncode, stdout
def _python(args, cwd, python_executable, sudo=False):
command = [python_executable] + args
if sudo:
command = ["sudo"] + command
try:
p = subprocess.Popen(command, cwd=cwd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except:
return None, None
stdout = p.communicate()[0].strip()
if sys.version >= "3":
stdout = stdout.decode()
return p.returncode, stdout
def update_source(git_executable, folder, target, force=False):
print(">>> Running: git diff --shortstat")
returncode, stdout = _git(["diff", "--shortstat"], folder, git_executable=git_executable)
if returncode != 0:
raise RuntimeError("Could not update, \"git diff\" failed with returncode %d: %s" % (returncode, stdout))
if stdout and stdout.strip():
# we got changes in the working tree, maybe from the user, so we'll now rescue those into a patch
import time
import os
timestamp = time.strftime("%Y%m%d%H%M")
patch = os.path.join(folder, "%s-preupdate.patch" % timestamp)
print(">>> Running: git diff and saving output to %s" % timestamp)
returncode, stdout = _git(["diff"], folder, git_executable=git_executable)
if returncode != 0:
raise RuntimeError("Could not update, installation directory was dirty and state could not be persisted as a patch to %s" % patch)
with open(patch, "wb") as f:
f.write(stdout)
print(">>> Running: git reset --hard")
returncode, stdout = _git(["reset", "--hard"], folder, git_executable=git_executable)
if returncode != 0:
raise RuntimeError("Could not update, \"git reset --hard\" failed with returncode %d: %s" % (returncode, stdout))
print(">>> Running: git pull")
returncode, stdout = _git(["pull"], folder, git_executable=git_executable)
if returncode != 0:
raise RuntimeError("Could not update, \"git pull\" failed with returncode %d: %s" % (returncode, stdout))
print(stdout)
if force:
reset_command = ["reset"]
reset_command += [target]
print(">>> Running: git %s" % " ".join(reset_command))
returncode, stdout = _git(reset_command, folder, git_executable=git_executable)
if returncode != 0:
raise RuntimeError("Error while updating, \"git %s\" failed with returncode %d: %s" % (" ".join(reset_command), returncode, stdout))
print(stdout)
def install_source(python_executable, folder, user=False, sudo=False):
print(">>> Running: python setup.py clean")
returncode, stdout = _python(["setup.py", "clean"], folder, python_executable)
if returncode != 0:
print("\"python setup.py clean\" failed with returncode %d: %s" % (returncode, stdout))
print("Continuing anyways")
print(stdout)
print(">>> Running: python setup.py install")
args = ["setup.py", "install"]
if user:
args.append("--user")
returncode, stdout = _python(args, folder, python_executable, sudo=sudo)
if returncode != 0:
raise RuntimeError("Could not update, \"python setup.py install\" failed with returncode %d: %s" % (returncode, stdout))
print(stdout)
def parse_arguments():
import argparse
parser = argparse.ArgumentParser(prog="update-octoprint.py")
parser.add_argument("--git", action="store", type=str, dest="git_executable",
help="Specify git executable to use")
parser.add_argument("--python", action="store", type=str, dest="python_executable",
help="Specify python executable to use")
parser.add_argument("--force", action="store_true", dest="force",
help="Set this to force the update to only the specified version (nothing newer)")
parser.add_argument("--sudo", action="store_true", dest="sudo",
help="Install with sudo")
parser.add_argument("--user", action="store_true", dest="user",
help="Install to the user site directory instead of the general site directory")
parser.add_argument("folder", type=str,
help="Specify the base folder of the RaionPi installation to update")
parser.add_argument("target", type=str,
help="Specify the commit or tag to which to update")
args = parser.parse_args()
return args
def main():
args = parse_arguments()
git_executable = None
if args.git_executable:
git_executable = args.git_executable
python_executable = sys.executable
if args.python_executable:
python_executable = args.python_executable
folder = args.folder
target = args.target
import os
if not os.access(folder, os.W_OK):
raise RuntimeError("Could not update, base folder is not writable")
update_source(git_executable, folder, target, force=args.force)
install_source(python_executable, folder, user=args.user, sudo=args.sudo)
if __name__ == "__main__":
main()
|
agpl-3.0
| 8,245,839,758,198,759,000
| 32.366667
| 135
| 0.670663
| false
| 3.445783
| false
| false
| false
|
sassoftware/pymaven
|
pymaven/artifact.py
|
1
|
4769
|
#
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
The artifact module provides objects and functions for working with artifacts
in a maven repository
"""
import functools
import re
import sys
import six
from .errors import ArtifactParseError
from .versioning import VersionRange
if sys.version_info > (2,):
from .utils import cmp
MAVEN_COORDINATE_RE = re.compile(
r'(?P<group_id>[^:]+)'
r':(?P<artifact_id>[^:]+)'
r'(:(?P<type>[^:]+)(:(?P<classifier>[^:]+))?)?'
r':(?P<version>[^:])'
)
@functools.total_ordering
class Artifact(object):
"""Represents an artifact within a maven repository."""
__slots__ = ("group_id", "artifact_id", "version", "type", "classifier",
"contents")
def __init__(self, coordinate):
self.version = None
self.type = "jar"
self.classifier = None
self.contents = None
parts = coordinate.split(':')
length = len(parts)
if length < 2 or length > 5:
raise ArtifactParseError(
"Too many items in coordinate: '%s'" % coordinate)
self.group_id, self.artifact_id = parts[:2]
if length == 3:
self.version = parts[2]
elif length == 4:
self.type = parts[2]
self.version = parts[3]
elif length == 5:
self.type = parts[2]
self.classifier = parts[3]
self.version = parts[4]
if self.version:
self.version = VersionRange(self.version)
def __cmp__(self, other):
if self is other:
return 0
if not isinstance(other, Artifact):
if isinstance(other, six.string_types):
try:
return cmp(self, Artifact(other))
except ArtifactParseError:
pass
return 1
result = cmp(self.group_id, other.group_id)
if result == 0:
result = cmp(self.artifact_id, other.artifact_id)
if result == 0:
result = cmp(self.type, other.type)
if result == 0:
if self.classifier is None:
if other.classifier is not None:
result = 1
else:
if other.classifier is None:
result = -1
else:
result = cmp(self.classifier, other.classifier)
if result == 0:
result = cmp(self.version.version,
other.version.version)
return result
def __eq__(self, other):
return self.__cmp__(other) == 0
def __lt__(self, other):
return self.__cmp__(other) < 0
def __ne__(self, other):
return self.__cmp__(other) != 0
def __hash__(self):
return hash((self.group_id, self.artifact_id, self.version, self.type,
self.classifier))
def __str__(self):
s = ':'.join((self.group_id, self.artifact_id))
if self.version:
s += ':' + self.type
if self.classifier:
s += ':' + self.classifier
s += ':' + str(self.version.version if self.version.version
else self.vserion)
return s
def __repr__(self):
return "<pymaven.Artifact(%r)" % self.coordinate
@property
def coordinate(self):
coordinate = "%s:%s" % (self.group_id, self.artifact_id)
if self.type != "jar":
coordinate += ":%s" % self.type
if self.classifier is not None:
coordinate += ":%s" % self.classifier
if self.version is not None:
coordinate += ":%s" % self.version
return coordinate
@property
def path(self):
path = "%s/%s" % (self.group_id.replace('.', '/'), self.artifact_id)
if self.version and self.version.version:
version = self.version.version
path += "/%s/%s-%s" % (version, self.artifact_id, version)
if self.classifier:
path += "-%s" % self.classifier
path += ".%s" % self.type
return path
|
apache-2.0
| 7,652,122,665,989,008,000
| 30.375
| 78
| 0.534284
| false
| 4.176007
| false
| false
| false
|
krzychb/rtd-test-bed
|
components/partition_table/test_gen_esp32part_host/gen_esp32part_tests.py
|
1
|
16713
|
#!/usr/bin/env python
from __future__ import print_function, division
import unittest
import struct
import csv
import sys
import subprocess
import tempfile
import os
import io
import re
try:
import gen_esp32part
except ImportError:
sys.path.append("..")
import gen_esp32part
SIMPLE_CSV = """
# Name,Type,SubType,Offset,Size,Flags
factory,0,2,65536,1048576,
"""
LONGER_BINARY_TABLE = b""
# type 0x00, subtype 0x00,
# offset 64KB, size 1MB
LONGER_BINARY_TABLE += b"\xAA\x50\x00\x00" + \
b"\x00\x00\x01\x00" + \
b"\x00\x00\x10\x00" + \
b"factory\0" + (b"\0" * 8) + \
b"\x00\x00\x00\x00"
# type 0x01, subtype 0x20,
# offset 0x110000, size 128KB
LONGER_BINARY_TABLE += b"\xAA\x50\x01\x20" + \
b"\x00\x00\x11\x00" + \
b"\x00\x02\x00\x00" + \
b"data" + (b"\0" * 12) + \
b"\x00\x00\x00\x00"
# type 0x10, subtype 0x00,
# offset 0x150000, size 1MB
LONGER_BINARY_TABLE += b"\xAA\x50\x10\x00" + \
b"\x00\x00\x15\x00" + \
b"\x00\x10\x00\x00" + \
b"second" + (b"\0" * 10) + \
b"\x00\x00\x00\x00"
# MD5 checksum
LONGER_BINARY_TABLE += b"\xEB\xEB" + b"\xFF" * 14
LONGER_BINARY_TABLE += b'\xf9\xbd\x06\x1b\x45\x68\x6f\x86\x57\x1a\x2c\xd5\x2a\x1d\xa6\x5b'
# empty partition
LONGER_BINARY_TABLE += b"\xFF" * 32
def _strip_trailing_ffs(binary_table):
"""
Strip all FFs down to the last 32 bytes (terminating entry)
"""
while binary_table.endswith(b"\xFF" * 64):
binary_table = binary_table[0:len(binary_table) - 32]
return binary_table
class Py23TestCase(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(Py23TestCase, self).__init__(*args, **kwargs)
try:
self.assertRaisesRegex
except AttributeError:
# assertRaisesRegexp is deprecated in Python3 but assertRaisesRegex doesn't exist in Python2
# This fix is used in order to avoid using the alias from the six library
self.assertRaisesRegex = self.assertRaisesRegexp
class CSVParserTests(Py23TestCase):
def test_simple_partition(self):
table = gen_esp32part.PartitionTable.from_csv(SIMPLE_CSV)
self.assertEqual(len(table), 1)
self.assertEqual(table[0].name, "factory")
self.assertEqual(table[0].type, 0)
self.assertEqual(table[0].subtype, 2)
self.assertEqual(table[0].offset, 65536)
self.assertEqual(table[0].size, 1048576)
def test_require_type(self):
csv = """
# Name,Type, SubType,Offset,Size
ihavenotype,
"""
with self.assertRaisesRegex(gen_esp32part.InputError, "type"):
gen_esp32part.PartitionTable.from_csv(csv)
def test_type_subtype_names(self):
csv_magicnumbers = """
# Name, Type, SubType, Offset, Size
myapp, 0, 0,, 0x100000
myota_0, 0, 0x10,, 0x100000
myota_1, 0, 0x11,, 0x100000
myota_15, 0, 0x1f,, 0x100000
mytest, 0, 0x20,, 0x100000
myota_status, 1, 0,, 0x100000
"""
csv_nomagicnumbers = """
# Name, Type, SubType, Offset, Size
myapp, app, factory,, 0x100000
myota_0, app, ota_0,, 0x100000
myota_1, app, ota_1,, 0x100000
myota_15, app, ota_15,, 0x100000
mytest, app, test,, 0x100000
myota_status, data, ota,, 0x100000
"""
# make two equivalent partition tables, one using
# magic numbers and one using shortcuts. Ensure they match
magic = gen_esp32part.PartitionTable.from_csv(csv_magicnumbers)
magic.verify()
nomagic = gen_esp32part.PartitionTable.from_csv(csv_nomagicnumbers)
nomagic.verify()
self.assertEqual(nomagic["myapp"].type, 0)
self.assertEqual(nomagic["myapp"].subtype, 0)
self.assertEqual(nomagic["myapp"], magic["myapp"])
self.assertEqual(nomagic["myota_0"].type, 0)
self.assertEqual(nomagic["myota_0"].subtype, 0x10)
self.assertEqual(nomagic["myota_0"], magic["myota_0"])
self.assertEqual(nomagic["myota_15"], magic["myota_15"])
self.assertEqual(nomagic["mytest"], magic["mytest"])
self.assertEqual(nomagic["myota_status"], magic["myota_status"])
# self.assertEqual(nomagic.to_binary(), magic.to_binary())
def test_unit_suffixes(self):
csv = """
# Name, Type, Subtype, Offset, Size
one_megabyte, app, factory, 64k, 1M
"""
t = gen_esp32part.PartitionTable.from_csv(csv)
t.verify()
self.assertEqual(t[0].offset, 64 * 1024)
self.assertEqual(t[0].size, 1 * 1024 * 1024)
def test_default_offsets(self):
csv = """
# Name, Type, Subtype, Offset, Size
first, app, factory,, 1M
second, data, 0x15,, 1M
minidata, data, 0x40,, 32K
otherapp, app, factory,, 1M
"""
t = gen_esp32part.PartitionTable.from_csv(csv)
# 'first'
self.assertEqual(t[0].offset, 0x010000) # 64KB boundary as it's an app image
self.assertEqual(t[0].size, 0x100000) # Size specified in CSV
# 'second'
self.assertEqual(t[1].offset, 0x110000) # prev offset+size
self.assertEqual(t[1].size, 0x100000) # Size specified in CSV
# 'minidata'
self.assertEqual(t[2].offset, 0x210000)
# 'otherapp'
self.assertEqual(t[3].offset, 0x220000) # 64KB boundary as it's an app image
def test_negative_size_to_offset(self):
csv = """
# Name, Type, Subtype, Offset, Size
first, app, factory, 0x10000, -2M
second, data, 0x15, , 1M
"""
t = gen_esp32part.PartitionTable.from_csv(csv)
t.verify()
# 'first'
self.assertEqual(t[0].offset, 0x10000) # in CSV
self.assertEqual(t[0].size, 0x200000 - t[0].offset) # Up to 2M
# 'second'
self.assertEqual(t[1].offset, 0x200000) # prev offset+size
def test_overlapping_offsets_fail(self):
csv = """
first, app, factory, 0x100000, 2M
second, app, ota_0, 0x200000, 1M
"""
with self.assertRaisesRegex(gen_esp32part.InputError, "overlap"):
t = gen_esp32part.PartitionTable.from_csv(csv)
t.verify()
def test_unique_name_fail(self):
csv = """
first, app, factory, 0x100000, 1M
first, app, ota_0, 0x200000, 1M
"""
with self.assertRaisesRegex(gen_esp32part.InputError, "Partition names must be unique"):
t = gen_esp32part.PartitionTable.from_csv(csv)
t.verify()
class BinaryOutputTests(Py23TestCase):
def test_binary_entry(self):
csv = """
first, 0x30, 0xEE, 0x100400, 0x300000
"""
t = gen_esp32part.PartitionTable.from_csv(csv)
tb = _strip_trailing_ffs(t.to_binary())
self.assertEqual(len(tb), 64 + 32)
self.assertEqual(b'\xAA\x50', tb[0:2]) # magic
self.assertEqual(b'\x30\xee', tb[2:4]) # type, subtype
eo, es = struct.unpack("<LL", tb[4:12])
self.assertEqual(eo, 0x100400) # offset
self.assertEqual(es, 0x300000) # size
self.assertEqual(b"\xEB\xEB" + b"\xFF" * 14, tb[32:48])
self.assertEqual(b'\x43\x03\x3f\x33\x40\x87\x57\x51\x69\x83\x9b\x40\x61\xb1\x27\x26', tb[48:64])
def test_multiple_entries(self):
csv = """
first, 0x30, 0xEE, 0x100400, 0x300000
second,0x31, 0xEF, , 0x100000
"""
t = gen_esp32part.PartitionTable.from_csv(csv)
tb = _strip_trailing_ffs(t.to_binary())
self.assertEqual(len(tb), 96 + 32)
self.assertEqual(b'\xAA\x50', tb[0:2])
self.assertEqual(b'\xAA\x50', tb[32:34])
def test_encrypted_flag(self):
csv = """
# Name, Type, Subtype, Offset, Size, Flags
first, app, factory,, 1M, encrypted
"""
t = gen_esp32part.PartitionTable.from_csv(csv)
self.assertTrue(t[0].encrypted)
tb = _strip_trailing_ffs(t.to_binary())
tr = gen_esp32part.PartitionTable.from_binary(tb)
self.assertTrue(tr[0].encrypted)
class BinaryParserTests(Py23TestCase):
def test_parse_one_entry(self):
# type 0x30, subtype 0xee,
# offset 1MB, size 2MB
entry = b"\xAA\x50\x30\xee" + \
b"\x00\x00\x10\x00" + \
b"\x00\x00\x20\x00" + \
b"0123456789abc\0\0\0" + \
b"\x00\x00\x00\x00" + \
b"\xFF" * 32
# verify that parsing 32 bytes as a table
# or as a single Definition are the same thing
t = gen_esp32part.PartitionTable.from_binary(entry)
self.assertEqual(len(t), 1)
t[0].verify()
e = gen_esp32part.PartitionDefinition.from_binary(entry[:32])
self.assertEqual(t[0], e)
e.verify()
self.assertEqual(e.type, 0x30)
self.assertEqual(e.subtype, 0xEE)
self.assertEqual(e.offset, 0x100000)
self.assertEqual(e.size, 0x200000)
self.assertEqual(e.name, "0123456789abc")
def test_multiple_entries(self):
t = gen_esp32part.PartitionTable.from_binary(LONGER_BINARY_TABLE)
t.verify()
self.assertEqual(3, len(t))
self.assertEqual(t[0].type, gen_esp32part.APP_TYPE)
self.assertEqual(t[0].name, "factory")
self.assertEqual(t[1].type, gen_esp32part.DATA_TYPE)
self.assertEqual(t[1].name, "data")
self.assertEqual(t[2].type, 0x10)
self.assertEqual(t[2].name, "second")
round_trip = _strip_trailing_ffs(t.to_binary())
self.assertEqual(round_trip, LONGER_BINARY_TABLE)
def test_bad_magic(self):
bad_magic = b"OHAI" + \
b"\x00\x00\x10\x00" + \
b"\x00\x00\x20\x00" + \
b"0123456789abc\0\0\0" + \
b"\x00\x00\x00\x00"
with self.assertRaisesRegex(gen_esp32part.InputError, "Invalid magic bytes"):
gen_esp32part.PartitionTable.from_binary(bad_magic)
def test_bad_length(self):
bad_length = b"OHAI" + \
b"\x00\x00\x10\x00" + \
b"\x00\x00\x20\x00" + \
b"0123456789"
with self.assertRaisesRegex(gen_esp32part.InputError, "32 bytes"):
gen_esp32part.PartitionTable.from_binary(bad_length)
class CSVOutputTests(Py23TestCase):
def _readcsv(self, source_str):
return list(csv.reader(source_str.split("\n")))
def test_output_simple_formatting(self):
table = gen_esp32part.PartitionTable.from_csv(SIMPLE_CSV)
as_csv = table.to_csv(True)
c = self._readcsv(as_csv)
# first two lines should start with comments
self.assertEqual(c[0][0][0], "#")
self.assertEqual(c[1][0][0], "#")
row = c[2]
self.assertEqual(row[0], "factory")
self.assertEqual(row[1], "0")
self.assertEqual(row[2], "2")
self.assertEqual(row[3], "0x10000") # reformatted as hex
self.assertEqual(row[4], "0x100000") # also hex
# round trip back to a PartitionTable and check is identical
roundtrip = gen_esp32part.PartitionTable.from_csv(as_csv)
self.assertEqual(roundtrip, table)
def test_output_smart_formatting(self):
table = gen_esp32part.PartitionTable.from_csv(SIMPLE_CSV)
as_csv = table.to_csv(False)
c = self._readcsv(as_csv)
# first two lines should start with comments
self.assertEqual(c[0][0][0], "#")
self.assertEqual(c[1][0][0], "#")
row = c[2]
self.assertEqual(row[0], "factory")
self.assertEqual(row[1], "app")
self.assertEqual(row[2], "2")
self.assertEqual(row[3], "0x10000")
self.assertEqual(row[4], "1M")
# round trip back to a PartitionTable and check is identical
roundtrip = gen_esp32part.PartitionTable.from_csv(as_csv)
self.assertEqual(roundtrip, table)
class CommandLineTests(Py23TestCase):
def test_basic_cmdline(self):
try:
binpath = tempfile.mktemp()
csvpath = tempfile.mktemp()
# copy binary contents to temp file
with open(binpath, 'wb') as f:
f.write(LONGER_BINARY_TABLE)
# run gen_esp32part.py to convert binary file to CSV
output = subprocess.check_output([sys.executable, "../gen_esp32part.py",
binpath, csvpath], stderr=subprocess.STDOUT)
# reopen the CSV and check the generated binary is identical
self.assertNotIn(b"WARNING", output)
with open(csvpath, 'r') as f:
from_csv = gen_esp32part.PartitionTable.from_csv(f.read())
self.assertEqual(_strip_trailing_ffs(from_csv.to_binary()), LONGER_BINARY_TABLE)
# run gen_esp32part.py to conver the CSV to binary again
output = subprocess.check_output([sys.executable, "../gen_esp32part.py",
csvpath, binpath], stderr=subprocess.STDOUT)
self.assertNotIn(b"WARNING", output)
# assert that file reads back as identical
with open(binpath, 'rb') as f:
binary_readback = f.read()
binary_readback = _strip_trailing_ffs(binary_readback)
self.assertEqual(binary_readback, LONGER_BINARY_TABLE)
finally:
for path in binpath, csvpath:
try:
os.remove(path)
except OSError:
pass
class VerificationTests(Py23TestCase):
def test_bad_alignment(self):
csv = """
# Name,Type, SubType,Offset,Size
app,app, factory, 32K, 1M
"""
with self.assertRaisesRegex(gen_esp32part.ValidationError, r"Offset.+not aligned"):
t = gen_esp32part.PartitionTable.from_csv(csv)
t.verify()
def test_warnings(self):
try:
sys.stderr = io.StringIO() # capture stderr
csv_1 = "app, 1, 2, 32K, 1M\n"
gen_esp32part.PartitionTable.from_csv(csv_1).verify()
self.assertIn("WARNING", sys.stderr.getvalue())
self.assertIn("partition type", sys.stderr.getvalue())
sys.stderr = io.StringIO()
csv_2 = "ota_0, app, ota_1, , 1M\n"
gen_esp32part.PartitionTable.from_csv(csv_2).verify()
self.assertIn("WARNING", sys.stderr.getvalue())
self.assertIn("partition subtype", sys.stderr.getvalue())
finally:
sys.stderr = sys.__stderr__
class PartToolTests(Py23TestCase):
def _run_parttool(self, csvcontents, args, info):
csvpath = tempfile.mktemp()
with open(csvpath, "w") as f:
f.write(csvcontents)
try:
output = subprocess.check_output([sys.executable, "../parttool.py"] + args.split(" ")
+ ["--partition-table-file", csvpath, "get_partition_info", "--info", info],
stderr=subprocess.STDOUT)
self.assertNotIn(b"WARNING", output)
m = re.search(b"0x[0-9a-fA-F]+", output)
return m.group(0) if m else ""
finally:
os.remove(csvpath)
def test_find_basic(self):
csv = """
nvs, data, nvs, 0x9000, 0x4000
otadata, data, ota, 0xd000, 0x2000
phy_init, data, phy, 0xf000, 0x1000
factory, app, factory, 0x10000, 1M
"""
def rpt(args, info):
return self._run_parttool(csv, args, info)
self.assertEqual(
rpt("--partition-type=data --partition-subtype=nvs -q", "offset"), b"0x9000")
self.assertEqual(
rpt("--partition-type=data --partition-subtype=nvs -q", "size"), b"0x4000")
self.assertEqual(
rpt("--partition-name=otadata -q", "offset"), b"0xd000")
self.assertEqual(
rpt("--partition-boot-default -q", "offset"), b"0x10000")
def test_fallback(self):
csv = """
nvs, data, nvs, 0x9000, 0x4000
otadata, data, ota, 0xd000, 0x2000
phy_init, data, phy, 0xf000, 0x1000
ota_0, app, ota_0, 0x30000, 1M
ota_1, app, ota_1, , 1M
"""
def rpt(args, info):
return self._run_parttool(csv, args, info)
self.assertEqual(
rpt("--partition-type=app --partition-subtype=ota_1 -q", "offset"), b"0x130000")
self.assertEqual(
rpt("--partition-boot-default -q", "offset"), b"0x30000") # ota_0
csv_mod = csv.replace("ota_0", "ota_2")
self.assertEqual(
self._run_parttool(csv_mod, "--partition-boot-default -q", "offset"),
b"0x130000") # now default is ota_1
if __name__ == "__main__":
unittest.main()
|
apache-2.0
| 5,730,980,972,176,826,000
| 35.097192
| 121
| 0.585891
| false
| 3.233314
| true
| false
| false
|
moxgreen/partial_corr.py
|
partial_corr.py
|
1
|
3301
|
#!/usr/bin/env python
from sys import stdin, stderr
from optparse import OptionParser
import numpy as np
from scipy import stats, linalg
"""
Partial Correlation in Python (clone of Matlab's partialcorr)
This uses the linear regression approach to compute the partial
correlation (might be slow for a huge number of variables). The
algorithm is detailed here:
http://en.wikipedia.org/wiki/Partial_correlation#Using_linear_regression
Taking X and Y two variables of interest and Z the matrix with all the variable minus {X, Y},
the algorithm can be summarized as
1) perform a normal linear least-squares regression with X as the target and Z as the predictor
2) calculate the residuals in Step #1
3) perform a normal linear least-squares regression with Y as the target and Z as the predictor
4) calculate the residuals in Step #3
5) calculate the correlation coefficient between the residuals from Steps #2 and #4;
The result is the partial correlation between X and Y while controlling for the effect of Z
Date: Nov 2014
Author: Fabian Pedregosa-Izquierdo, f@bianp.net
Testing: Valentina Borghesani, valentinaborghesani@gmail.com
Date: March 2015:
Modified by: Ivan Molineris, ivan.molineris@gmail.com
"""
def partial_corr(C):
"""
Returns the sample linear partial correlation coefficients between pairs of variables in C, controlling
for the remaining variables in C.
Parameters
----------
C : array-like, shape (n, p)
Array with the different variables. Each column of C is taken as a variable
Returns
-------
P : array-like, shape (p, p)
P[i, j] contains the partial correlation of C[:, i] and C[:, j] controlling
for the remaining variables in C.
"""
C = np.asarray(C)
p = C.shape[1]
P_corr = np.zeros((p, p), dtype=np.float)
for i in range(p):
P_corr[i, i] = 1
for j in range(i+1, p):
idx = np.ones(p, dtype=np.bool)
idx[i] = False
idx[j] = False
beta_i = linalg.lstsq(C[:, idx], C[:, j])[0]
beta_j = linalg.lstsq(C[:, idx], C[:, i])[0]
res_j = C[:, j] - C[:, idx].dot( beta_i)
res_i = C[:, i] - C[:, idx].dot(beta_j)
corr = stats.pearsonr(res_i, res_j)[0]
P_corr[i, j] = corr
P_corr[j, i] = corr
return P_corr
def main():
usage = '''%prog < STDIN
Returns the sample linear partial correlation coefficients between pairs of rows in the STDIN, controlling
for the remaining variables in STDIN.
The first column of each row of the input matrix is intended as row_id
'''
parser = OptionParser(usage=usage)
options, args = parser.parse_args()
if len(args) != 0:
exit('Unexpected argument number.')
cols_len=None
matrix=[]
row_ids=[]
for line in stdin:
cols = line.rstrip().split('\t')
row_ids.append(cols.pop(0))
cols = [float(c) for c in cols]
if cols_len is None:
cols_len = len(cols)
assert cols_len == len(cols)
matrix.append(cols)
matrix = np.asarray(matrix)
matrix = matrix.T
C=partial_corr(matrix)
for i,k in enumerate(row_ids):
for j,l in enumerate(row_ids):
if j>i:
print row_ids[i], row_ids[j], C[i,j]
if __name__ == '__main__':
main()
|
agpl-3.0
| 8,374,982,293,936,116,000
| 27.704348
| 108
| 0.646471
| false
| 3.403093
| false
| false
| false
|
htcondor/htcondor
|
src/condor_contrib/condor_pigeon/src/condor_pigeon_client/skype_linux_tools/Skype4Py/utils.py
|
1
|
17863
|
'''Utility functions and classes used internally by Skype4Py.
'''
import sys
import weakref
import threading
from new import instancemethod
def chop(s, n=1, d=None):
'''Chops initial words from a string and returns a list of them and the rest of the string.
@param s: String to chop from.
@type s: str or unicode
@param n: Number of words to chop.
@type n: int
@param d: Optional delimeter. Any white-char by default.
@type d: str or unicode
@return: A list of n first words from the string followed by the rest of the string
(C{[w1, w2, ..., wn, rest_of_string]}).
@rtype: list of str or unicode
'''
spl = s.split(d, n)
if len(spl) == n:
spl.append(s[:0])
if len(spl) != n + 1:
raise ValueError('chop: Could not chop %d words from \'%s\'' % (n, s))
return spl
def args2dict(s):
'''Converts a string in 'ARG="value", ARG2="value2"' format into a dictionary.
@param s: Input string with comma-separated 'ARG="value"' strings.
@type s: str or unicode
@return: C{{'ARG': 'value'}} dictionary.
@rtype: dict
'''
d = {}
while s:
t, s = chop(s, 1, '=')
if s.startswith('"'):
i = 0
while True:
i = s.find('"', i+1)
# XXX How are the double-quotes escaped? The code below implements VisualBasic technique.
try:
if s[i+1] != '"':
break
else:
i += 1
except IndexError:
break
if i > 0:
d[t] = s[1:i]
s = s[i+1:]
else:
d[t] = s
break
else:
i = s.find(', ')
if i >= 0:
d[t] = s[:i]
s = s[i+2:]
else:
d[t] = s
break
return d
def quote(s, always=False):
'''Adds double-quotes to string if needed.
@param s: String to add double-quotes to.
@type s: str or unicode
@param always: If True, adds quotes even if the input string contains no spaces.
@type always: bool
@return: If the given string contains spaces or always=True, returns the string enclosed
in double-quotes (if it contained quotes too, they are preceded with a backslash).
Otherwise returns the string unchnaged.
@rtype: str or unicode
'''
if always or ' ' in s:
return '"%s"' % s.replace('"', '\\"')
return s
def esplit(s, d=None):
'''Splits a string into words.
@param s: String to split.
@type s: str or unicode
@param d: Optional delimeter. Any white-char by default.
@type d: str or unicode
@return: A list of words or C{[]} if the string was empty.
@rtype: list of str or unicode
@note: This function works like C{s.split(d)} except that it always returns an
empty list instead of C{['']} for empty strings.
'''
if s:
return s.split(d)
return []
def cndexp(condition, truevalue, falsevalue):
'''Simulates a conditional expression known from C or Python 2.5+.
@param condition: Boolean value telling what should be returned.
@type condition: bool, see note
@param truevalue: Value returned if condition was True.
@type truevalue: any
@param falsevalue: Value returned if condition was False.
@type falsevalue: any
@return: Either truevalue or falsevalue depending on condition.
@rtype: same as type of truevalue or falsevalue
@note: The type of condition parameter can be anything as long as
C{bool(condition)} returns a bool value.
'''
if condition:
return truevalue
return falsevalue
class _WeakMethod(object):
'''Helper class for WeakCallableRef function (see below).
Don't use directly.
'''
def __init__(self, method, callback=None):
'''__init__.
@param method: Method to be referenced.
@type method: method
@param callback: Callback to be called when the method is collected.
@type callback: callable
'''
self.im_func = method.im_func
try:
self.weak_im_self = weakref.ref(method.im_self, self._dies)
except TypeError:
self.weak_im_self = None
self.im_class = method.im_class
self.callback = callback
def __call__(self):
if self.weak_im_self:
im_self = self.weak_im_self()
if im_self is None:
return None
else:
im_self = None
return instancemethod(self.im_func, im_self, self.im_class)
def __repr__(self):
obj = self()
objrepr = repr(obj)
if obj is None:
objrepr = 'dead'
return '<weakref at 0x%x; %s>' % (id(self), objrepr)
def _dies(self, ref):
# weakref to im_self died
self.im_func = self.im_class = None
if self.callback is not None:
self.callback(self)
def WeakCallableRef(c, callback=None):
'''Creates and returns a new weak reference to a callable object.
In contrast to weakref.ref() works on all kinds of callables.
Usage is same as weakref.ref().
@param c: A callable that the weak reference should point at.
@type c: callable
@param callback: Callback called when the callable is collected (freed).
@type callback: callable
@return: A weak callable reference.
@rtype: weakref
'''
try:
return _WeakMethod(c, callback)
except AttributeError:
return weakref.ref(c, callback)
class _EventHandlingThread(threading.Thread):
def __init__(self, name=None):
'''__init__.
@param name: name
@type name: unicode
'''
threading.Thread.__init__(self, name='%s event handler' % name)
self.setDaemon(False)
self.lock = threading.Lock()
self.queue = []
def enqueue(self, target, args, kwargs):
'''enqueue.
@param target: Callable to be called.
@type target: callable
@param args: Positional arguments for the callable.
@type args: tuple
@param kwargs: Keyword arguments for the callable.
@type kwargs: dict
'''
self.queue.append((target, args, kwargs))
def run(self):
'''Executes all enqueued targets.
'''
while True:
try:
try:
self.lock.acquire()
h = self.queue[0]
del self.queue[0]
except IndexError:
break
finally:
self.lock.release()
h[0](*h[1], **h[2])
class EventHandlingBase(object):
'''This class is used as a base by all classes implementing event handlers.
Look at known subclasses (above in epydoc) to see which classes will allow you to
attach your own callables (event handlers) to certain events occuring in them.
Read the respective classes documentations to learn what events are provided by them. The
events are always defined in a class whose name consist of the name of the class it provides
events for followed by C{Events}). For example class L{ISkype} provides events defined in
L{ISkypeEvents}. The events class is always defined in the same submodule as the main class.
The events class is just informative. It tells you what events you can assign your event
handlers to, when do they occur and what arguments lists should your event handlers
accept.
There are three ways of attaching an event handler to an event.
1. C{Events} object.
Use this method if you need to attach many event handlers to many events.
Write your event handlers as methods of a class. The superclass of your class
doesn't matter, Skype4Py will just look for methods with apropriate names.
The names of the methods and their arguments lists can be found in respective
events classes (see above).
Pass an instance of this class as the C{Events} argument to the constructor of
a class whose events you are interested in. For example::
import Skype4Py
class MySkypeEvents:
def UserStatus(self, Status):
print 'The status of the user changed'
skype = Skype4Py.Skype(Events=MySkypeEvents())
The C{UserStatus} method will be called when the status of the user currently logged
into skype is changed.
2. C{On...} properties.
This method lets you use any callables as event handlers. Simply assign them to C{On...}
properties (where "C{...}" is the name of the event) of the object whose events you are
interested in. For example::
import Skype4Py
def user_status(Status):
print 'The status of the user changed'
skype = Skype4Py.Skype()
skype.OnUserStatus = user_status
The C{user_status} function will be called when the status of the user currently logged
into skype is changed.
The names of the events and their arguments lists should be taken from respective events
classes (see above). Note that there is no C{self} argument (which can be seen in the events
classes) simply because our event handler is a function, not a method.
3. C{RegisterEventHandler} / C{UnregisterEventHandler} methods.
This method, like the second one, also let you use any callables as event handlers. However,
it additionally let you assign many event handlers to a single event.
In this case, you use L{RegisterEventHandler} and L{UnregisterEventHandler} methods
of the object whose events you are interested in. For example::
import Skype4Py
def user_status(Status):
print 'The status of the user changed'
skype = Skype4Py.Skype()
skype.RegisterEventHandler('UserStatus', user_status)
The C{user_status} function will be called when the status of the user currently logged
into skype is changed.
The names of the events and their arguments lists should be taken from respective events
classes (see above). Note that there is no C{self} argument (which can be seen in the events
classes) simply because our event handler is a function, not a method.
B{Important notes!}
The event handlers are always called on a separate thread. At any given time, there is at most
one handling thread per event type. This means that when a lot of events of the same type are
generated at once, handling of an event will start only after the previous one is handled.
Handling of events of different types may happen simultaneously.
In case of second and third method, only weak references to the event handlers are stored. This
means that you must make sure that Skype4Py is not the only one having a reference to the callable
or else it will be garbage collected and silently removed from Skype4Py's handlers list. On the
other hand, it frees you from worrying about cyclic references.
'''
_EventNames = []
def __init__(self):
'''Initializes the object.
'''
self._EventHandlerObj = None
self._DefaultEventHandlers = {}
self._EventHandlers = {}
self._EventThreads = {}
for event in self._EventNames:
self._EventHandlers[event] = []
def _CallEventHandler(self, Event, *args, **kwargs):
'''Calls all event handlers defined for given Event (str), additional parameters
will be passed unchanged to event handlers, all event handlers are fired on
separate threads.
'''
# get list of relevant handlers
handlers = dict([(x, x()) for x in self._EventHandlers[Event]])
if None in handlers.values():
# cleanup
self._EventHandlers[Event] = list([x[0] for x in handlers.items() if x[1] is not None])
handlers = filter(None, handlers.values())
# try the On... handlers
try:
h = self._DefaultEventHandlers[Event]()
if h:
handlers.append(h)
except KeyError:
pass
# try the object handlers
try:
handlers.append(getattr(self._EventHandlerObj, Event))
except AttributeError:
pass
# if no handlers, leave
if not handlers:
return
# initialize event handling thread if needed
if Event in self._EventThreads:
t = self._EventThreads[Event]
t.lock.acquire()
if not self._EventThreads[Event].isAlive():
t = self._EventThreads[Event] = _EventHandlingThread(Event)
else:
t = self._EventThreads[Event] = _EventHandlingThread(Event)
# enqueue handlers in thread
for h in handlers:
t.enqueue(h, args, kwargs)
# start serial event processing
try:
t.lock.release()
except:
t.start()
def RegisterEventHandler(self, Event, Target):
'''Registers any callable as an event handler.
@param Event: Name of the event. For event names, see the respective C{...Events} class.
@type Event: str
@param Target: Callable to register as the event handler.
@type Target: callable
@return: True is callable was successfully registered, False if it was already registered.
@rtype: bool
@see: L{EventHandlingBase}
'''
if not callable(Target):
raise TypeError('%s is not callable' % repr(Target))
if Event not in self._EventHandlers:
raise ValueError('%s is not a valid %s event name' % (Event, self.__class__.__name__))
# get list of relevant handlers
handlers = dict([(x, x()) for x in self._EventHandlers[Event]])
if None in handlers.values():
# cleanup
self._EventHandlers[Event] = list([x[0] for x in handlers.items() if x[1] is not None])
if Target in handlers.values():
return False
self._EventHandlers[Event].append(WeakCallableRef(Target))
return True
def UnregisterEventHandler(self, Event, Target):
'''Unregisters a previously registered event handler (a callable).
@param Event: Name of the event. For event names, see the respective C{...Events} class.
@type Event: str
@param Target: Callable to unregister.
@type Target: callable
@return: True if callable was successfully unregistered, False if it wasn't registered first.
@rtype: bool
@see: L{EventHandlingBase}
'''
if not callable(Target):
raise TypeError('%s is not callable' % repr(Target))
if Event not in self._EventHandlers:
raise ValueError('%s is not a valid %s event name' % (Event, self.__class__.__name__))
# get list of relevant handlers
handlers = dict([(x, x()) for x in self._EventHandlers[Event]])
if None in handlers.values():
# cleanup
self._EventHandlers[Event] = list([x[0] for x in handlers.items() if x[1] is not None])
for wref, trg in handlers.items():
if trg == Target:
self._EventHandlers[Event].remove(wref)
return True
return False
def _SetDefaultEventHandler(self, Event, Target):
if Target:
if not callable(Target):
raise TypeError('%s is not callable' % repr(Target))
self._DefaultEventHandlers[Event] = WeakCallableRef(Target)
else:
try:
del self._DefaultEventHandlers[Event]
except KeyError:
pass
def _GetDefaultEventHandler(self, Event):
try:
return self._DefaultEventHandlers[Event]()
except KeyError:
pass
def _SetEventHandlerObj(self, Obj):
'''Registers an object (Obj) as event handler, object should contain methods with names
corresponding to event names, only one obj is allowed at a time.
'''
self._EventHandlerObj = Obj
@staticmethod
def __AddEvents_make_event(Event):
# TODO: rework to make compatible with cython
return property(lambda self: self._GetDefaultEventHandler(Event),
lambda self, value: self._SetDefaultEventHandler(Event, value))
@classmethod
def _AddEvents(cls, klass):
'''Adds events to class based on 'klass' attributes.'''
for event in dir(klass):
if not event.startswith('_'):
setattr(cls, 'On%s' % event, cls.__AddEvents_make_event(event))
cls._EventNames.append(event)
class Cached(object):
'''Base class for all cached objects.
Every object is identified by an Id specified as first parameter of the constructor.
Trying to create two objects with same Id yields the same object. Uses weak references
to allow the objects to be deleted normally.
@warning: C{__init__()} is always called, don't use it to prevent initializing an already
initialized object. Use C{_Init()} instead, it is called only once.
'''
_cache_ = weakref.WeakValueDictionary()
def __new__(cls, Id, *args, **kwargs):
h = cls, Id
try:
return cls._cache_[h]
except KeyError:
o = object.__new__(cls)
cls._cache_[h] = o
if hasattr(o, '_Init'):
o._Init(Id, *args, **kwargs)
return o
def __copy__(self):
return self
|
apache-2.0
| -8,032,516,272,118,471,000
| 34.869478
| 105
| 0.60477
| false
| 4.369618
| false
| false
| false
|
bhrzslm/uncertainty-reasoning
|
my_engine/others/pbnt/examples/ExampleModels.py
|
1
|
3873
|
# PBNT: Python Bayes Network Toolbox
#
# Copyright (c) 2005, Elliot Cohen
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * The name "Elliot Cohen" may not be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#!/usr/bin/env python
import sys
from numarray import *
sys.path.append('../lib')
import numarray.objects as obj
from pbnt.Graph import *
from pbnt.Distribution import *
from pbnt.Node import *
def water():
""" This is an example of how to implement the basic water network (4 nodes, cloudy, sprinkler, rain, and wetgrass. sprinkler and rain are children of cloudy, and wetgrass is a child of both sprinkler and rain).
"""
#testing basic bayes net class implementation
numberOfNodes = 4
#name the nodes
cloudy = 0
sprinkler = 1
rain = 2
wetgrass = 3
cNode = BayesNode(0, 2, name="cloudy")
sNode = BayesNode(1, 2, name="sprinkler")
rNode = BayesNode(2, 2, name="rain")
wNode = BayesNode(3, 2, name="wetgrass")
#cloudy
cNode.add_child(sNode)
cNode.add_child(rNode)
#sprinkler
sNode.add_parent(cNode)
sNode.add_child(wNode)
#rain
rNode.add_parent(cNode)
rNode.add_child(wNode)
#wetgrass
wNode.add_parent(sNode)
wNode.add_parent(rNode)
nodes = [cNode, sNode, rNode, wNode]
#create distributions
#cloudy distribution
cDistribution = DiscreteDistribution(cNode)
index = cDistribution.generate_index([],[])
cDistribution[index] = 0.5
cNode.set_dist(cDistribution)
#sprinkler
dist = zeros([cNode.size(),sNode.size()], type=Float32)
dist[0,] = 0.5
dist[1,] = [0.9,0.1]
sDistribution = ConditionalDiscreteDistribution(nodes=[cNode, sNode], table=dist)
sNode.set_dist(sDistribution)
#rain
dist = zeros([cNode.size(), rNode.size()], type=Float32)
dist[0,] = [0.8,0.2]
dist[1,] = [0.2,0.8]
rDistribution = ConditionalDiscreteDistribution(nodes=[cNode, rNode], table=dist)
rNode.set_dist(rDistribution)
#wetgrass
dist = zeros([sNode.size(), rNode.size(), wNode.size()], type=Float32)
dist[0,0,] = [1.0,0.0]
dist[1,0,] = [0.1,0.9]
dist[0,1,] = [0.1,0.9]
dist[1,1,] = [0.01,0.99]
wgDistribution = ConditionalDiscreteDistribution(nodes=[sNode, rNode, wNode], table=dist)
wNode.set_dist(wgDistribution)
#create bayes net
bnet = BayesNet(nodes)
return bnet
|
mit
| 3,967,815,240,085,659,600
| 32.580357
| 216
| 0.67338
| false
| 3.433511
| false
| false
| false
|
fungos/gemuo
|
src/gemuo/data.py
|
1
|
8601
|
#
# GemUO
#
# (c) 2005-2012 Max Kellermann <max@duempel.org>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# Loader for the UO client data files.
import os
import struct
FLAG_IMPASSABLE = 0x40
FLAG_SURFACE = 0x200
class TileData:
def __init__(self, path):
f = file(path)
# detect file format
f.seek(36)
x = f.read(20).rstrip('\0')
f.seek(0)
if x.find('\0') == -1:
# old file format
read_flags = lambda f: struct.unpack('<I', f.read(4))[0]
item_count = 0x200
else:
# new file format (>= 7.0)
read_flags = lambda f: struct.unpack('<Q', f.read(8))[0]
item_count = 0x400
self.land_flags = []
for a in range(0x200):
f.seek(4, 1)
for b in range(0x20):
self.land_flags.append(read_flags(f))
f.seek(22, 1) # skip texture and name
assert len(self.land_flags) == 0x4000
self.item_flags = []
for a in range(item_count):
f.seek(4, 1)
for b in range(0x20):
self.item_flags.append(read_flags(f))
f.seek(33, 1)
assert len(self.item_flags) == item_count * 0x20
def land_passable(self, id):
assert id >= 0 and id < len(self.land_flags)
return (self.land_flags[id] & FLAG_IMPASSABLE) == 0
def item_passable(self, id):
assert id >= 0 and id < len(self.item_flags)
return (self.item_flags[id] & FLAG_IMPASSABLE) == 0
def item_surface(self, id):
assert id >= 0 and id < len(self.item_flags)
return (self.item_flags[id] & FLAG_SURFACE) == 0
class LandBlock:
def __init__(self, data):
assert len(data) == 192
self.data = data
def get_id(self, x, y):
assert x >= 0 and x < 8
assert y >= 0 and y < 8
i = (y * 8 + x) * 3
return struct.unpack_from('<H', self.data, i)[0]
def get_height(self, x, y):
assert x >= 0 and x < 8
assert y >= 0 and y < 8
i = (y * 8 + x) * 3
return ord(self.data[i + 2])
class LandLoader:
def __init__(self, path, width, height):
self.file = file(path)
self.width = width
self.height = height
def load_block(self, x, y):
assert x >= 0 and x < self.width
assert y >= 0 and y < self.height
self.file.seek(((x * self.height) + y) * 196 + 4)
return LandBlock(self.file.read(192))
class IndexLoader:
def __init__(self, path, width, height):
self.file = file(path)
self.width = width
self.height = height
def load_block(self, x, y):
assert x >= 0 and x < self.width
assert y >= 0 and y < self.height
self.file.seek(((x * self.height) + y) * 12)
data = self.file.read(8)
offset, length = struct.unpack('<ii', data)
if offset < 0 or length <= 0:
return None, 0
return offset, length
class Static:
def __init__(self, id, x, y, z, hue=None):
self.id = id
self.x = x
self.y = y
self.z = z
self.hue = hue
class StaticsList:
def __init__(self, data):
self.data = data
self.passable = None # bit field, see _build_passable()
self.surface = None
def __iter__(self):
i = 0
while i < len(self.data):
id, x, y, z, hue = struct.unpack_from('<HBBbH', self.data, i)
yield id, x, y, z, hue
i += 7
def iter_at(self, x, y):
for id, ix, iy, z, hue in self:
if ix == x and iy == y:
yield id, z, hue
def _build_passable(self, tile_data):
# each of the 64 bits tells whether the position is passable
passable = 0xffffffffffffffffL
for id, x, y, z, hue in self:
if not tile_data.item_passable(id):
bit = x * 8 + y
passable &= ~(1 << bit)
self.passable = passable
def is_passable(self, tile_data, x, y, z):
if self.passable is None:
self._build_passable(tile_data)
bit = x * 8 + y
return (self.passable & (1 << bit)) != 0
def _build_surface(self, tile_data):
# each of the 64 bits tells whether the position is surface
surface = 0L
for id, x, y, z, hue in self:
if not tile_data.item_surface(id):
bit = x * 8 + y
surface |= 1 << bit
self.surface = surface
def is_surface(self, tile_data, x, y):
if self.surface is None:
self._build_surface(tile_data)
bit = x * 8 + y
return (self.surface & (1 << bit)) != 0
class StaticsLoader:
def __init__(self, path):
self.file = file(path)
def load_block(self, offset, length):
self.file.seek(offset)
return StaticsList(self.file.read(length))
class StaticsGlue:
def __init__(self, index, static):
self.index = index
self.static = static
def load_block(self, x, y):
offset, length = self.index.load_block(x, y)
if length == 0: return None
return self.static.load_block(offset, length)
class MapGlue:
def __init__(self, tile_data, map_path, index_path, statics_path, width, height):
self.tile_data = tile_data
self.land = LandLoader(map_path, width, height)
self.statics = StaticsGlue(IndexLoader(index_path, width, height),
StaticsLoader(statics_path))
def land_tile_id(self, x, y):
block = self.land.load_block(x / 8, y / 8)
return block.get_id(x % 8, y % 8)
def land_tile_flags(self, x, y):
return self.tile_data.land_flags[self.land_tile_id(x, y)]
def land_tile_height(self, x, y):
block = self.land.load_block(x / 8, y / 8)
return block.get_height(x % 8, y % 8)
def statics_at(self, x, y):
block = self.statics.load_block(x / 8, y / 8)
if block is None: return iter(())
return block.iter_at(x % 8, y %8)
def is_passable(self, x, y, z):
statics = self.statics.load_block(x / 8, y / 8)
if statics is not None and not statics.is_passable(self.tile_data, x % 8, y % 8, z):
return False
# even if land is impassable, there may be statics that build
# a "surface" to walk on
block = self.land.load_block(x / 8, y / 8)
if not self.tile_data.land_passable(block.get_id(x % 8, y % 8)) and \
(statics is None or not statics.is_surface(self.tile_data, x % 8, y % 8)):
return False
#bz = block.get_height(x % 8, y % 8)
#if bz > z: return False
return True
def surface_at(self, x, y):
for id, z, hue in self.statics_at(x, y):
if self.tile_data.item_surface(id):
return Static(id, x, y, z, hue)
return None
def flush_cache(self):
# not implemented in this base class
pass
class BlockCache:
def __init__(self, loader):
self._loader = loader
self._cache = dict()
def load_block(self, x, y):
i = x * 65536 + y
try:
return self._cache[i]
except KeyError:
b = self._loader.load_block(x, y)
self._cache[i] = b
return b
class CachedMapGlue(MapGlue):
def __init__(self, *args, **keywords):
MapGlue.__init__(self, *args, **keywords)
self.land = BlockCache(self.land)
self.statics = BlockCache(self.statics)
class TileCache:
def __init__(self, path):
self._path = path
self._tile_data = TileData(os.path.join(self._path, 'tiledata.mul'))
self._maps = {}
def get_map(self, i):
if i in self._maps:
return self._maps[i]
m = CachedMapGlue(self._tile_data,
os.path.join(self._path, 'map%u.mul' % i),
os.path.join(self._path, 'staidx%u.mul' % i),
os.path.join(self._path, 'statics%u.mul' % i),
768, 512)
self._maps[i] = m
return m
|
gpl-2.0
| -6,442,318,921,544,303,000
| 29.938849
| 92
| 0.537379
| false
| 3.346693
| false
| false
| false
|
gonicus/gosa
|
backend/src/gosa/backend/objects/filter/strings.py
|
1
|
10619
|
# This file is part of the GOsa framework.
#
# http://gosa-project.org
#
# Copyright:
# (C) 2016 GONICUS GmbH, Germany, http://www.gonicus.de
#
# See the LICENSE file in the project's top-level directory for details.
import json
import logging
import re
from gosa.backend.objects.filter import ElementFilter
import datetime
from gosa.common.gjson import loads, dumps
class SplitString(ElementFilter):
"""
splits a string by the given separator
=========== ===========================
Key Description
=========== ===========================
glue The separator string
=========== ===========================
e.g.:
>>> <FilterEntry>
>>> <Filter>
>>> <Name>SplitString</Name>
>>> <Param>,</Param>
>>> </Filter>
>>> </FilterEntry>
>>> ...
"""
def __init__(self, obj):
super(SplitString, self).__init__(obj)
def process(self, obj, key, valDict, glue=", "):
if type(valDict[key]['value']) is not None and len(valDict[key]['value']):
tmp = valDict[key]['value'][0].split(glue)
new_val = [n for n in tmp if n != ""]
valDict[key]['value'] = new_val
return key, valDict
class JoinArray(ElementFilter):
"""
Joins an array into a single string using the given separator
=========== ===========================
Key Description
=========== ===========================
glue The joining string
=========== ===========================
e.g.:
>>> <FilterEntry>
>>> <Filter>
>>> <Name>JoinArray</Name>
>>> <Param>,</Param>
>>> </Filter>
>>> </FilterEntry>
>>> ...
"""
def __init__(self, obj):
super(JoinArray, self).__init__(obj)
def process(self, obj, key, valDict, glue=", "):
if type(valDict[key]['value'] is not None):
new_val = glue.join(valDict[key]['value'])
if not new_val:
valDict[key]['value'] = []
else:
valDict[key]['value'] = [new_val]
return key, valDict
class ConcatString(ElementFilter):
"""
Concatenate a string to the current value.
=========== ===========================
Key Description
=========== ===========================
appstr The string to concatenate
position The position 'left' or 'right' we want to concatenate the string
=========== ===========================
e.g.:
>>> <FilterEntry>
>>> <Filter>
>>> <Name>ConcatString</Name>
>>> <Param>Hello Mr. </Param>
>>> <Param>left</Param>
>>> </Filter>
>>> </FilterEntry>
>>> ...
"""
def __init__(self, obj):
super(ConcatString, self).__init__(obj)
def process(self, obj, key, valDict, appstr, position):
if type(valDict[key]['value'] is not None):
if position == "right":
new_val = list(map(lambda x: x + appstr, valDict[key]['value']))
else:
new_val = list(map(lambda x: appstr + x, valDict[key]['value']))
valDict[key]['value'] = new_val
return key, valDict
class Replace(ElementFilter):
"""
Perform a replacement using a reqular expression.
=========== ===========================
Key Description
=========== ===========================
regex The regular expression to use
replacement The replacement string
=========== ===========================
e.g.:
>>> <FilterEntry>
>>> <Filter>
>>> <Name>Replace</Name>
>>> <Param>^{([^}]*)}.*$</Param>
>>> <Param>Result: \1</Param>
>>> </Filter>
>>> </FilterEntry>
>>> ...
"""
def __init__(self, obj):
super(Replace, self).__init__(obj)
def process(self, obj, key, valDict, regex, replacement):
if type(valDict[key]['value'] is not None):
valDict[key]['value'] = list(map(lambda x: re.sub(regex, str(replacement), x), valDict[key]['value']))
return key, valDict
class DateToString(ElementFilter):
"""
Converts a datetime object into a string.
=========== ===========================
Key Description
=========== ===========================
fmt The outgoing format string. E.g. '%Y%m%d%H%M%SZ'
=========== ===========================
e.g.:
>>> <FilterEntry>
>>> <Filter>
>>> <Name>DateToString</Name>
>>> <Param>%Y-%m-%d</Param>
>>> </Filter>
>>> </FilterEntry>
>>> ...
"""
def __init__(self, obj):
super(DateToString, self).__init__(obj)
def process(self, obj, key, valDict, fmt="%Y%m%d%H%M%SZ"):
if type(valDict[key]['value'] is not None):
valDict[key]['value'] = list(map(lambda x: x.strftime(fmt), valDict[key]['value']))
return key, valDict
class TimeToString(DateToString):
"""
Converts a datetime object into a string.
=========== ===========================
Key Description
=========== ===========================
fmt The outgoing format string. E.g. '%Y%m%d%H%M%SZ'
=========== ===========================
e.g.:
>>> <FilterEntry>
>>> <Filter>
>>> <Name>DateToString</Name>
>>> <Param>%Y-%m-%d</Param>
>>> </Filter>
>>> </FilterEntry>
>>> ...
"""
def __init__(self, obj):
super(TimeToString, self).__init__(obj)
class StringToDate(ElementFilter):
"""
Converts a string object into a datetime.date object..
=========== ===========================
Key Description
=========== ===========================
fmt The format string. E.g. '%Y%m%d%H%M%SZ'
=========== ===========================
e.g.:
>>> <FilterEntry>
>>> <Filter>
>>> <Name>StringToDate</Name>
>>> <Param>%Y-%m-%d</Param>
>>> </Filter>
>>> </FilterEntry>
>>> ...
"""
def __init__(self, obj):
super(StringToDate, self).__init__(obj)
def process(self, obj, key, valDict, fmt="%Y%m%d%H%M%SZ"):
if type(valDict[key]['value'] is not None):
valDict[key]['value'] = list(map(lambda x: datetime.datetime.strptime(x, fmt).date(), valDict[key]['value']))
return key, valDict
class StringToTime(ElementFilter):
"""
Converts a string object into a datetime.datetime object..
=========== ===========================
Key Description
=========== ===========================
fmt The format string. E.g. '%Y%m%d%H%M%SZ'
=========== ===========================
e.g.:
>>> <FilterEntry>
>>> <Filter>
>>> <Name>StringToTime</Name>
>>> <Param>%Y%m%d%H%M%SZ</Param>
>>> </Filter>
>>> </FilterEntry>
>>> ...
"""
def __init__(self, obj):
super(StringToTime, self).__init__(obj)
def process(self, obj, key, valDict, fmt="%Y%m%d%H%M%SZ"):
if type(valDict[key]['value'] is not None):
valDict[key]['value'] = list(map(lambda x: datetime.datetime.strptime(x, fmt), valDict[key]['value']))
return key, valDict
class IdnaToUnicode(ElementFilter):
"""
Converts a idna object into a unicode object..
e.g.:
>>> <FilterEntry>
>>> <Filter>
>>> <Name>IdnaToUnicode</Name>
>>> </Filter>
>>> </FilterEntry>
>>> ...
"""
def __init__(self, obj):
super(IdnaToUnicode, self).__init__(obj)
def process(self, obj, key, valDict):
valDict[key]['value'] = list(map(lambda x: x.encode('ascii').decode('idna'), valDict[key]['value']))
valDict[key]['backend_type'] = 'UnicodeString'
return key, valDict
class UnicodeToIdna(ElementFilter):
"""
Converts an unicode object into a idna value ...
e.g.:
>>> <FilterEntry>
>>> <Filter>
>>> <Name>UnicodeToIdna</Name>
>>> </Filter>
>>> </FilterEntry>
>>> ...
"""
def __init__(self, obj):
super(UnicodeToIdna, self).__init__(obj)
def process(self, obj, key, valDict):
valDict[key]['value'] = list(map(lambda x: x.encode('idna'), valDict[key]['value']))
valDict[key]['backend_type'] = 'String'
return key, valDict
class StringToJson(ElementFilter):
"""
Parses a string with the json parser.
e.g.:
>>> <FilterEntry>
>>> <Filter>
>>> <Name>StringToJson</Name>
>>> </Filter>
>>> </FilterEntry>
>>> ...
"""
def __init__(self, obj):
super(StringToJson, self).__init__(obj)
self.log = logging.getLogger(__name__)
def process(self, obj, key, valDict):
if type(valDict[key]['value'] is not None):
try:
valDict[key]['value'] = list(map(lambda x: loads(x), valDict[key]['value']))
except json.decoder.JSONDecodeError as e:
self.log.error("invalid JSON value property %s [DN=%s]: %s" % (key, obj.dn if obj is not None else '', valDict[key]['value']))
return key, valDict
class JsonToString(ElementFilter):
"""
Serializes an object to a json string.
e.g.:
>>> <FilterEntry>
>>> <Filter>
>>> <Name>JsonToString</Name>
>>> </Filter>
>>> </FilterEntry>
>>> ...
"""
def __init__(self, obj):
super(JsonToString, self).__init__(obj)
def process(self, obj, key, valDict):
if type(valDict[key]['value'] is not None):
valDict[key]['value'] = list(map(lambda x: dumps(x), valDict[key]['value']))
return key, valDict
class IntegerToString(ElementFilter):
"""
Converts a integer into a string.
e.g.:
>>> <FilterEntry>
>>> <Filter>
>>> <Name>IntegerToString</Name>
>>> </Filter>
>>> </FilterEntry>
>>> ...
"""
def __init__(self, obj):
super(IntegerToString, self).__init__(obj)
def process(self, obj, key, valDict):
if type(valDict[key]['value'] is not None):
valDict[key]['value'] = [str(i) for i in valDict[key]['value']]
return key, valDict
class StringToInteger(ElementFilter):
"""
Converts a string into an integer.
e.g.:
>>> <FilterEntry>
>>> <Filter>
>>> <Name>StringToInteger</Name>
>>> </Filter>
>>> </FilterEntry>
>>> ...
"""
def __init__(self, obj):
super(StringToInteger, self).__init__(obj)
def process(self, obj, key, valDict):
if type(valDict[key]['value'] is not None):
valDict[key]['value'] = [int(i) for i in valDict[key]['value']]
return key, valDict
|
lgpl-2.1
| -5,871,137,458,832,482,000
| 26.371134
| 142
| 0.486957
| false
| 3.733826
| false
| false
| false
|
takkaneko/netengtools
|
boreasprov.py
|
1
|
2030
|
#!/usr/bin/env python3
# boreasprov.py
import hafwl2l3prov
import fwl2l3prov
import halbl2l3prov
import lbl2l3prov
import ipsprov
import hafwiloprov
import fwiloprov
def main():
title ='\n****************************************\n'
title += 'BOREAS NETWORK DEVICE PROVISIONING SUITE\n'
title += '****************************************\n\n'
title += 'Please select your provisioning task from the following menus:\n'
print(title)
options = '1. HA firewall (CheckPoint or ASA5500 series)\n'
options += '2. Stand-alone firewall (CheckPoint or ASA5500 series)\n'
options += '3. HA Alteon4408\n'
options += '4. Stand-alone Alteon4408\n'
options += '5. IPS only (To monitor multiple devices or an HA *standby*)\n'
options += '6. HA firewall SecNet iLO segment (Not common)\n'
options += '7. Stand-alone firewall SecNet iLO segment (Not common)\n'
print(options)
while True:
try:
choice = int(input('Type your selection then hit Enter: '))
if 1 <= choice <=7:
break
else:
print('ERROR: DATA OUT OF RANGE\n')
except ValueError:
print('ERROR: INVALID DATA PROVIDED\n')
if choice == 1:
print('Starting HA firewall provisioning...\n')
hafwl2l3prov.main()
if choice == 2:
print('Starting stand-alone firewall provisioning...\n')
fwl2l3prov.main()
if choice == 3:
print('Starting HA Alteon4408 provisioning...\n')
halbl2l3prov.main()
if choice == 4:
print('Starting stand-alone Alteon4408 provisioning...\n')
lbl2l3prov.main()
if choice == 5:
print('Starting IPS provisioning...\n')
ipsprov.main()
if choice == 6:
print('Starting HA firewall SecNet iLO provisioning...\n')
hafwiloprov.main()
if choice == 7:
print('Starting stand-alone firewall SecNet iLO provisioning...\n')
fwiloprov.main()
if __name__ == '__main__':
main()
|
mit
| 8,231,743,044,508,089,000
| 32.833333
| 79
| 0.587192
| false
| 3.690909
| false
| false
| false
|
cdemulde/wwdata
|
wwdata/Class_HydroData.py
|
1
|
84432
|
# -*- coding: utf-8 -*-
"""
Class_HydroData provides functionalities for handling data obtained in the context of (waste)water treatment.
Copyright (C) 2016 Chaim De Mulder
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see http://www.gnu.org/licenses/.
"""
#import sys
import os
#from os import listdir
import pandas as pd
import scipy as sp
import numpy as np
import datetime as dt
import matplotlib.pyplot as plt #plotten in python
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
import warnings as wn
import wwdata.data_reading_functions
class HydroData():
"""
Attributes
----------
timedata_column : str
name of the column containing the time data
data_type : str
type of data provided
experiment_tag : str
A tag identifying the experiment; can be a date or a code used by
the producer/owner of the data.
time_unit : str
The time unit in which the time data is given
units : dict
The units of the variables in the columns
"""
def __init__(self,data,timedata_column='index',data_type='WWTP',
experiment_tag='No tag given',time_unit=None,
units={}):
"""
initialisation of a HydroData object.
"""
if isinstance(data, pd.DataFrame):
self.data = data.copy()
else:
try:
self.data = pd.DataFrame(data.copy())
except:
raise Exception("Input data not convertable to DataFrame.")
if timedata_column == 'index':
self.timename = 'index'
self.time = self.data.index
else:
self.timename = timedata_column
self.time = self.data[timedata_column].values.ravel()
self.columns = np.array(self.data.columns)
self.data_type = data_type
self.tag = experiment_tag
self.time_unit = time_unit
self.meta_valid = pd.DataFrame(index=self.data.index)
self.units = units
#self.highs = pd.DataFrame(data=0,columns=['highs'],index=self.data.index)
#wn.warn('WARNING: Some functions in the OnlineSensorBased Class assume ' + \
#'equidistant data!!! This is primarily of importance when indexes are ' + \
#'missing!')
def set_tag(self,tag):
"""
Sets the tag element of the HydroData object to the given tag
Returns
-------
None
"""
self.tag = tag
def set_units(self,units):
"""
Set the units element of the HydroData object to a given dataframe
"""
if isinstance(units, pd.DataFrame):
self.units = units.copy()
else:
try:
self.units = pd.DataFrame(units.copy())
except:
raise Exception("Unit data not convertable to DataFrame type.")
def set_time_unit(self,unit):
"""
Sets the time_unit element of the HydroData object to a given unit
Returns
-------
None
"""
self.time_unit = unit
def head(self, n=5):
"""piping pandas head function, see https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.head.html for documentation"""
return self.data.head(n)
def tail(self, n=5):
"""piping pandas tail function, see https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.tail.html for documentation"""
return self.data.tail(n)
def index(self):
"""piping pandas index function, see http://pandas.pydata.org/pandas-docs/version/0.22/generated/pandas.Index.html for documentation"""
return self.data.index
#####################
### FORMATTING
#####################
def fill_index(self,arange,index_type='float'):
"""
function to fill in missing index values
"""
wn.warn('This function assumes equidistant data and fills the indexes '+\
'accordingly')
first_part = self.data[self.data.index < arange[0]]
if isinstance(self.data.index[0],dt.datetime):
delta_time = self.data.index[1]-self.data.index[0]
index = [arange[0] + delta_time * x for x in range(0, int((arange[1]-arange[0])/delta_time))]
elif isinstance(self.data.index[0],float):
day_length = float(len(self.data[0:1]))
index = np.arange(arange[0],arange[1],(arange[1]-arange[0])/day_length)
fill_part = pd.DataFrame(index=index,columns=self.data.columns)
last_part = self.data[self.data.index > arange[1]]
self.data = first_part.append(fill_part).append(last_part)
self._update_time()
def _reset_meta_valid(self,data_name=None):
"""
reset the meta dataframe, possibly for only a certain data series,
should wrong labels have been assigned at some point
"""
if data_name == None:
self.meta_valid = pd.DataFrame(index=self.data.index)
else:
try:
self.meta_valid[data_name] = pd.Series(['original']*len(self.meta_valid),index=self.index())
#self.meta_valid.drop(data_name,axis=1)
except:
pass
#wn.warn(data_name + ' is not contained in self.meta_valid yet, so cannot\
#be removed from it!')
def drop_index_duplicates(self):
"""
drop rows with a duplicate index. Also updates the meta_valid dataframe
Note
----
It is assumed that the dropped rows containt the same data as their index-
based duplicate, i.e. that no data is lost using the function.
"""
#len_orig = len(self.data)
self.data = self.data.groupby(self.index()).first()
self.meta_valid = self.meta_valid.groupby(self.meta_valid.index).first()
self._update_time()
if isinstance(self.index()[1],str):
wn.warn('Rows may change order using this function based on '+ \
'string values. Convert to datetime, int or float and use '+ \
'.sort_index() or .sort_value() to avoid. (see also ww.to_datetime())')
def replace(self,to_replace,value,inplace=False):
"""piping pandas replace function, see http://pandas.pydata.org/pandas-docs/version/0.22/generated/pandas.DataFrame.replace.html for documentation"""
if inplace == False:
return self.__class__(self.data.replace(to_replace,value,inplace=False),
self.data.timename,self.data_type,
self.tag,self.time_unit)
elif inplace == True:
return self.data.replace(to_replace,value,inplace=inplace)
def set_index(self,keys,key_is_time=False,drop=True,inplace=False,
verify_integrity=False,save_prev_index=True):
"""
piping and extending pandas set_index function, see https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.set_index.html for documentation
Notes
----------
key_is_time : bool
when true, the new index will we known as the time data from here on
(other arguments cfr pd.set_index)
Returns
-------
HydroData object (if inplace=False)
None (if inplace=True)
"""
if save_prev_index:
self.prev_index = self.data.index
if not inplace:
if key_is_time:
if isinstance(self.time[0],str):
raise ValueError('Time values of type "str" can not be used as index')
timedata_column = 'index'
elif key_is_time == False:
timedata_column = self.timename
data = self.data.set_index(keys,drop=drop,inplace=False,
verify_integrity=verify_integrity)
return self.__class__(pd.DataFrame(data),timedata_column=timedata_column,
data_type=self.data_type,experiment_tag=self.tag,
time_unit=self.time_unit)
elif inplace:
if key_is_time:
if self.timename == 'index':
raise IndexError('There already is a timeseries in the dataframe index!')
if isinstance(self.time[0],str):
raise ValueError('Time values of type "str" can not be used as index')
self.data.set_index(keys,drop=drop,inplace=True,
verify_integrity=verify_integrity)
self.columns = np.array(self.data.columns)
self._update_meta_valid_index()
if key_is_time:
self.timename = 'index'
self.time = self.data.index
def _update_time(self):
"""
adjust the value of self.time, needed in some functions
"""
if self.timename == 'index':
self.time = self.index()
else:
self.time = self.data[self.timename]
def _update_meta_valid_index(self):
"""
update the index of the meta_valid dataframe to be the same as the one of the dataframe
with the data
"""
self.meta_valid.index = self.index()
def to_float(self,columns='all'):
"""
convert values in given columns to float values
Parameters
---------
columns : array of strings
column names of the columns where values need to be converted to floats
"""
if columns == 'all':
columns = self.columns#.levels[0]
for column in columns:
try:
self.data[column] = self.data[column].astype(float)
except TypeError:
print('Data type of column '+ str(column) + ' not convertible to float')
self._update_time()
def to_datetime(self,time_column='index',time_format='%dd-%mm-%yy',
unit='D'):
"""
Piping and modifying pandas to_datetime function
Parameters
---------
time_column : str
column name of the column where values need to be converted to date-
time values. Default 'index' converts index values to datetime
time_format : str
the format to use by to_datetime function to convert strings to
datetime format
unit : str
unit to use by to_datetime function to convert int or float values
to datetime format
"""
if time_column == 'index':
if isinstance(self.time[0],int) or isinstance(self.time[0],float):
self.data.index = pd.to_datetime(self.time,unit=unit)
self.data.sort_index(inplace=True)
elif isinstance(self.time[0],str):
self.data.index = pd.to_datetime(self.time,format=time_format)
self.data.sort_index(inplace=True)
else:
if isinstance(self.time[0],int) or isinstance(self.time[0],float):
self.data.index = pd.to_datetime(self.data[time_column],unit=unit)
self.data.sort_values(inplace=True)
elif isinstance(self.time[0],str):
self.data[time_column] = pd.to_datetime(self.data[time_column].values.ravel(),
format=time_format)
self.data.sort_values(time_column,inplace=True)
self._update_time()
def absolute_to_relative(self,time_data='index',unit='d',inplace=True,
save_abs=True,decimals=5):
"""
converts a pandas series with datetime timevalues to relative timevalues
in the given unit, starting from 0
Parameters
----------
time_data : str
name of the column containing the time data. If this is the index
column, just give 'index' (also default)
unit : str
unit to which to convert the time values (sec, min, hr or d)
Returns
-------
None if inplace is True
HydroData object if inplace it False
"""
if time_data == 'index':
timedata = self.time
else:
timedata = self.data[time_data]
time_delta = timedata - timedata[0]
relative = time_delta.map(total_seconds)
if unit == 'sec':
relative = np.array(relative)
elif unit == 'min':
relative = np.array(relative) / (60)
elif unit == 'hr':
relative = np.array(relative) / (60*60)
elif unit == 'd':
relative = np.array(relative) / (60*60*24)
self.time_unit = unit
if inplace == False:
data = self.data.copy()
data['time_rel'] = relative.round(decimals)
return self.__class__(data,self.timename)
elif inplace == True:
if save_abs == True:
self.data['time_abs'] = timedata
self.columns = np.array(self.data.columns)
if time_data == 'index':
self.data.index = relative.round(decimals)
self._update_time()
self.columns = np.array(self.data.columns)
return None
else:
self.data[time_data] = relative.round(decimals)
return None
def write(self,filename,filepath=os.getcwd(),method='all'):
"""
Parameters
----------
filepath : str
the path the output file should be saved to
filename : str
the name of the output file
method : str (all,filtered,filled)
depending on the method choice, different values will be written out:
all values, only the filtered values or the filled values
for_WEST : bool
include_units : bool
Returns
-------
None; write an output file
"""
if method == 'all':
self.data.to_csv(os.path.join(filepath,filename),sep='\t')
elif method == 'filtered':
to_write = self.data.copy()
for column in self.meta_valid.columns:
to_write[column] = self.data[column][self.meta_valid[column]=='original']
to_write.to_csv(os.path.join(filepath,filename),sep='\t')
elif method == 'filled':
self.filled.to_csv(os.path.join(filepath,filename),sep='\t')
#######################
### DATA EXPLORATION
#######################
def get_avg(self,name=None,only_checked=True):
"""
Gets the averages of all or certain columns in a dataframe
Parameters
----------
name : arary of str
name(s) of the column(s) containing the data to be averaged;
defaults to ['none'] and will calculate average for every column
Returns
-------
pd.DataFrame :
pandas dataframe, containing the average slopes of all or certain
columns
"""
mean = []
if only_checked:
df = self.data.copy()
df[self.meta_valid == 'filtered']=np.nan
if name == None:
mean = df.mean()
elif isinstance(name,str):
mean = df[name].mean()
else:
for i in name:
mean.append(df[name].mean())
else:
if name == None:
mean = self.data.mean()
elif isinstance(name,str):
mean = self.data[name].mean()
else:
for i in name:
mean.append(self.data[name].mean())
return mean
def get_std(self,name=None,only_checked=True):
"""
Gets the standard deviations of all or certain columns in a dataframe
Parameters
----------
dataframe : pd.DataFrame
dataframe containing the columns to calculate the standard deviation for
name : arary of str
name(s) of the column(s) containing the data to calculate standard
deviation for; defaults to ['none'] and will calculate standard
deviation for every column
plot : bool
if True, plots the calculated standard deviations, defaults to False
Returns
-------
pd.DataFrame :
pandas dataframe, containing the average slopes of all or certain
columns
"""
std=[]
if only_checked:
df = self.data.copy()
df[self.meta_valid == 'filtered']=np.nan
if name == None:
std = df.std()
elif isinstance(name,str):
std = df[name].std()
else:
for i in name:
std.append(df[name].std())
else:
if name == None:
std = self.data.std()
elif isinstance(name,str):
std = self.data[name].std()
else:
for i in name:
std.append(self.data[name].std())
return std
def get_highs(self,data_name,bound_value,arange,method='percentile',plot=False):
"""
creates a dataframe with tags indicating what indices have data-values
higher than a certain value; example: the definition/tagging of rain
events.
Parameters
----------
data_name : str
name of the column to execute the function on
bound_value : float
the boundary value above which points will be tagged
arange : array of two values
the range within which high values need to be tagged
method: str (value or percentile)
when percentile, the bound value is a given percentile above which
data points will be tagged, when value, bound_values is used directly
to tag data points.
Returns
-------
None
"""
self._reset_highs()
try:
data_to_use = self.data[data_name][arange[0]:arange[1]].copy()
except TypeError:
raise TypeError("Slicing not possible for index type " + \
str(type(self.data.index[0])) + " and arange argument type " + \
str(type(arange[0])) + ". Try changing the type of the arange " + \
"values to one compatible with " + str(type(self.data.index[0])) + \
" slicing.")
# get indexes where flow is higher then bound_value
if method is 'value':
bound_value = bound_value
elif method is 'percentile':
bound_value = data_to_use.dropna().quantile(bound_value)
indexes = data_to_use.loc[data_to_use > bound_value].index
self.highs['highs'].loc[indexes] = 1
if plot:
fig = plt.figure(figsize=(16,6))
ax = fig.add_subplot(111)
ax.plot(data_to_use[self.highs['highs']==0].index,
data_to_use[self.highs['highs']==0],
'-g')
ax.plot(data_to_use[self.highs['highs']==1].index,
data_to_use[self.highs['highs']==1],
'.b',label='high')
ax.legend(fontsize=17)
ax.tick_params(labelsize=15)
ax.set_ylabel(data_name,size=17)
ax.set_xlabel('Time',size=17)
def _reset_highs(self):
"""
"""
self.highs = pd.DataFrame(data=0,columns=['highs'],index=self.index())
##############
### FILTERING
##############
def add_to_meta_valid(self,column_names):
"""
Adds (a) column(s) with the given column_name(s) to the self.meta_filled
DataFrame, where all tags are set to 'original'. This makes sure that
also data that already is very reliable can be used further down the
process (e.g. filling etc.)
Parameters
----------
column_names : array
array containing the names of the columns to add to the meta_valied
dataframe
"""
self._plot = 'valid'
# Create/adjust self.filled
self.meta_valid = self.meta_valid.reindex(self.index())
for column in column_names:
if not column in self.meta_valid.columns:
self.meta_valid[column] = 'original'
else:
pass
wn.warn('self.meta_valid already contains a column named ' +
column + '. The original column was kept.')
def tag_nan(self,data_name,arange=None,clear=False):
"""
adds a tag 'filtered' in self.meta_valid for every NaN value in the given
column
Parameters
----------
data_name : str
column name of the column to apply the function to
arange : array of two values
the range within which nan values need to be tagged
clear : bool
when true, resets the tags in meta_valid for the data in column
data_name
Returns
-------
None
"""
self._plot='valid'
if clear:
self._reset_meta_valid(data_name)
self.meta_valid = self.meta_valid.reindex(self.index(),fill_value='!!')
if not data_name in self.meta_valid.columns:
# if the data_name column doesn't exist yet in the meta_valid dataset,
# add it
self.add_to_meta_valid([data_name])
if arange == None:
len_orig = len(self.data[data_name])
self.meta_valid[data_name] = np.where(pd.isnull(self.data[data_name]),
'filtered','original')
len_new = self.data[data_name].count()
else:
# check if arange has the right type
try:
len_orig = len(self.data[data_name][arange[0]:arange[1]])
except TypeError:
raise TypeError("Slicing not possible for index type " + \
str(type(self.data.index[0])) + " and arange "+\
"argument type " + str(type(arange[0])) + " or " +\
str(type(arange[1])) + ". Try changing the type "+\
"of the arange values to one compatible with " + \
str(type(self.data.index[0])) + " slicing.")
self.meta_valid[data_name][arange[0]:arange[1]] = np.where(pd.isnull(self.data[data_name][arange[0]:arange[1]]),
'filtered','original')
len_new = self.data[data_name][arange[0]:arange[1]].count()
_print_removed_output(len_orig,len_new,'NaN tagging')
def tag_doubles(self,data_name,bound,arange=None,clear=False,inplace=False,log_file=None,
plot=False,final=False):
'''
tags double values that subsequently occur in a measurement series.
This is relevant in case a sensor has failed and produces a constant
signal. A band is provided within which the signal can vary and still
be filtered out
Parameters
----------
data_name : str
column name of the column from which double values will be sought
bound : float
boundary value of the band to use. When the difference between a
point and the next one is smaller then the bound value, the latter
datapoint is tagged as 'filtered'.
arange : array of two values
the range within which double values need to be tagged
clear : bool
if True, the tags added to datapoints before will be removed and put
back to 'original'.
inplace : bool
indicates whether a new dataframe is created and returned or whether
the operations are executed on the existing dataframe (nothing is
returned). (This argument only comes into play when the 'final'
argument is True)
log_file : str
string containing the directory to a log file to be written out
when using this function
plot : bool
whether or not to make a plot of the newly tagged data points
final : bool
if true, the values are actually replaced with nan values (either
inplace or in a new wwdata object)
Returns
-------
HydroData object (if inplace=False)
the dataframe from which the double values of 'data' are removed or
replaced
None (if inplace=True)
'''
self._plot = 'valid'
len_orig = self.data[data_name].count()
# Make temporary object for operations
df_temp = self.__class__(self.data.copy(),timedata_column=self.timename,
data_type=self.data_type,experiment_tag=self.tag,
time_unit=self.time_unit)
# Make a mask with False values for double values to be dropped
bound_mask = abs(self.data[data_name].dropna().diff()) >= bound
# Make sure the indexes are still the same in the mask and df_temp, so the
# tagging can happen
bound_mask = bound_mask.reindex(df_temp.index()).fillna(True)
# Make a mask with False values where data needs to be filtered
if arange == None:
mask = bound_mask
else:
try:
range_mask = (self.index() < arange[0]) | (arange[1] < self.index())
mask = bound_mask | range_mask
except TypeError:
raise TypeError("Slicing not possible for index type " + \
str(type(self.data.index[0])) + " and arange "+\
"argument type " + str(type(arange[0])) + " or " +\
str(type(arange[1])) + ". Try changing the type "+\
"of the arange values to one compatible with " + \
str(type(self.data.index[0])) + " slicing.")
# Update the index of self.meta_valid
if clear:
self._reset_meta_valid(data_name)
self.meta_valid = self.meta_valid.reindex(self.index(),fill_value='!!')
if not data_name in self.meta_valid.columns:
# if the data_name column doesn't exist yet in the meta_valid dataset,
# add it
self.add_to_meta_valid([data_name])
# Do the actual filtering, based on the mask
df_temp.data[data_name] = df_temp.data[data_name].drop(df_temp.data[mask==False].index)
len_new = df_temp.data[data_name].count()
if log_file == None:
_print_removed_output(len_orig,len_new,'double value tagging')
elif type(log_file) == str:
_log_removed_output(log_file,len_orig,len_new,'filtered')
else:
raise TypeError('Provide the location of the log file \
as a string type, or drop the argument if \
no log file is needed.')
self.meta_valid[data_name][mask==False] = 'filtered'
# Create new temporary object, where the dropped datapoints are replaced
# by nan values (by assigning a new column to the original dataframe)
#df_temp_2 = self.__class__(self.data.copy(),timedata_column=self.timename,
# experiment_tag=self.tag,time_unit=self.time_unit)
#df_temp_2.data[data_name] = df_temp.data[data_name]
#df_temp_2._update_time()
# Update the self.meta_valid dataframe, to contain False values for dropped
# datapoints. This is done by tracking the nan values in df_temp_2
#if data_name in self.meta_valid.columns:
# temp_1 = self.meta_valid[data_name].isin(['filtered'])
# temp_2 = pd.DataFrame(np.where(np.isnan(df_temp_2.data[data_name]),True,False))
# temp_3 = temp_1 | temp_2
# self.meta_valid[data_name] = np.where(temp_3,'filtered','original')
#else:
# self.meta_valid[data_name] = np.isnan(df_temp_2.data[data_name])
# self.meta_valid[data_name] = np.where(self.meta_valid[data_name],'filtered','original')
if plot == True:
self.plot_analysed(data_name)
if final:
if inplace:
self.data[data_name] = df_temp.data[data_name]
self._update_time()
elif not inplace:
return df_temp
if not final:
return None
def tag_extremes(self,data_name,arange=None,limit=0,method='below',
clear=False,plot=False):
"""
Tags values above or below a given limit.
Parameters
----------
data_name : str
name of the column containing the data to be tagged
arange : array of two values
the range within which extreme values need to be tagged
limit : int/float
limit below or above which values need to be tagged
method : 'below' or 'above'
below tags all the values below the given limit, above tags
the values above the limit
clear : bool
if True, the tags added before will be removed and put
back to 'original'.
plot : bool
whether or not to make a plot of the newly tagged data points
Returns
-------
None;
"""
if clear:
self._reset_meta_valid(data_name)
self.meta_valid = self.meta_valid.reindex(self.index(),fill_value='!!')
if not data_name in self.meta_valid.columns:
# if the data_name column doesn't exist yet in the meta_valid dataset,
# add it
self.add_to_meta_valid([data_name])
if arange == None:
len_orig = len(self.data[data_name])
mask_valid = np.where(self.meta_valid[data_name] == 'filtered',True,False)
if method == 'below':
mask_tagging = np.where(self.data[data_name]<limit,True,False)
mask = pd.DataFrame(np.transpose([mask_tagging,mask_valid])).any(axis=1)
self.meta_valid[data_name] = np.where(mask,'filtered','original')
elif method == 'above':
mask_tagging = np.where(self.data[data_name]>limit,True,False)
mask = pd.DataFrame(np.transpose([mask_tagging,mask_valid])).any(axis=1)
self.meta_valid[data_name] = np.where(mask,'filtered','original')
else:
# check if arange has the right type
try:
len_orig = len(self.data[data_name][arange[0]:arange[1]])
mask_valid = np.where(self.meta_valid[data_name][arange[0]:arange[1]] == 'filtered',True,False)
except TypeError:
raise TypeError("Slicing not possible for index type " + \
str(type(self.data.index[0])) + " and arange "+\
"argument type " + str(type(arange[0])) + " or " +\
str(type(arange[1])) + ". Try changing the type "+\
"of the arange values to one compatible with " + \
str(type(self.data.index[0])) + " slicing.")
if method == 'below':
mask_tagging = np.where(self.data[data_name][arange[0]:arange[1]]<limit,True,False)
mask = pd.DataFrame(np.transpose([mask_tagging,mask_valid])).any(axis=1)
self.meta_valid[data_name][arange[0]:arange[1]] = np.where(mask,'filtered','original')
elif method == 'above':
mask_tagging = np.where(self.data[data_name][arange[0]:arange[1]]>limit,True,False)
mask = pd.DataFrame(np.transpose([mask_tagging,mask_valid])).any(axis=1)
self.meta_valid[data_name][arange[0]:arange[1]] = np.where(mask,'filtered','original')
len_new = len_orig - mask_tagging.sum()
_print_removed_output(len_orig,len_new,'tagging of extremes ('+method+')')
if plot == True:
self.plot_analysed(data_name)
def calc_slopes(self,xdata,ydata,time_unit=None,slope_range=None):
"""
Calculates slopes for given xdata and data_name; if a time unit is given as
an argument, the time values (xdata) will first be converted to this
unit, which will then be used to calculate the slopes with.
Parameters
----------
xdata : str
name of the column containing the xdata for slope calculation
(e.g. time). If 'index', the index is used as xdata. If datetime
objects, a time_unit is expected to calculate the slopes.
data_name : str
name of the column containing the data_name for slope calculation
time_unit : str
time unit to be used for the slope calculation (in case this is
based on time); if None, slopes are simply calculated based on the
values given
!! This value has no impact if the xdata column is the index and is
not a datetime type. If that is the case, it is assumed that the
user knows the unit of the xdata !!
Returns
-------
pd.Series
pandas Series object containing the slopes calculated for the
chosen variable
"""
slopes = pd.DataFrame()
if xdata == 'index':
self.data[xdata] = self.data.index
date_time = isinstance(self.data[xdata][0],np.datetime64) or \
isinstance(self.data[xdata][0],dt.datetime) or \
isinstance(self.data[xdata][0],pd.Timestamp)
if time_unit == None or date_time == False:
try:
slopes = self.data[ydata].diff() / self.data[xdata].diff()
self.time_unit = time_unit
except TypeError:
raise TypeError('Slope calculation cannot be executed, probably due to a \
non-handlable datatype. Either use the time_unit argument or \
use timedata of type np.datetime64, dt.datetime or pd.Timestamp.')
return None
elif time_unit == 'sec':
slopes = self.data[ydata].diff()/ \
(self.data[xdata].diff().dt.seconds)
elif time_unit == 'min':
slopes = self.data[ydata].diff()/ \
(self.data[xdata].diff().dt.seconds / 60)
elif time_unit == 'hr':
slopes = self.data[ydata].diff()/ \
(self.data[xdata].diff().dt.seconds / 3600)
elif time_unit == 'd':
slopes = self.data[ydata].diff()/ \
(self.data[xdata].diff().dt.days + \
self.data[xdata].diff().dt.seconds / 3600 / 24)
else :
raise ValueError('Could not calculate slopes. If you are using \
time-units to calculate slopes, please make sure you entered a \
valid time unit for slope calculation (sec, min, hr or d)')
if xdata == 'index':
self.data.drop(xdata,axis=1,inplace=True)
return slopes
def moving_slope_filter(self,xdata,data_name,cutoff,arange,time_unit=None,
clear=False,inplace=False,log_file=None,plot=False,
final=False):
"""
Filters out datapoints based on the difference between the slope in one
point and the next (sudden changes like noise get filtered out), based
on a given cut off value. Replaces the dropped values with NaN values.
Parameters
----------
xdata : str
name of the column containing the xdata for slope calculation
(e.g. time). If 'index', the index is used as xdata. If datetime
objects, a time_unit is expected to calculate the slopes.
data_name : str
name of the column containing the data that needs to be filtered
cutoff: int
the cutoff value to compare the slopes with to apply the filtering.
arange : array of two values
the range within which the moving slope filter needs to be applied
time_unit : str
time unit to be used for the slope calculation (in case this is
based on time); if None, slopes are calculated based on the values
given
clear : bool
if True, the tags added to datapoints before will be removed and put
back to 'original'.
inplace : bool
indicates whether a new dataframe is created and returned or whether
the operations are executed on the existing dataframe (nothing is
returned)
log_file : str
string containing the directory to a log file to be written out
when using this function
plot : bool
if true, a plot is made, comparing the original dataset with the
new, filtered dataset
final : bool
if true, the values are actually replaced with nan values (either
inplace or in a new wwdata object)
Returns
-------
HydroData object (if inplace=False)
the dataframe from which the double values of 'data' are removed
None (if inplace=True)
Creates
-------
A new column in the self.meta_valid dataframe, containing a mask indicating
what values are filtered
"""
self._plot = 'valid'
try:
len_orig = self.data[data_name][arange[0]:arange[1]].count()
except TypeError:
raise TypeError("Slicing not possible for index type " + \
str(type(self.data.index[0])) + " and arange argument type " + \
str(type(arange[0])) + ". Try changing the type of the arange " + \
"values to one compatible with " + str(type(self.data.index[0])) + \
" slicing.")
#if plot == True:
# original = self.__class__(self.data.copy(),timedata_column=self.timename,
# experiment_tag=self.tag,time_unit=self.time_unit)
# Make temporary object for operations
df_temp = self.__class__(self.data[arange[0]:arange[1]].copy(),
timedata_column=self.timename,experiment_tag=self.tag,
time_unit=self.time_unit)
# Update the index of self.meta_valid
if clear:
self._reset_meta_valid(data_name)
self.meta_valid = self.meta_valid.reindex(self.index(),fill_value='!!')
# Calculate slopes and drop values in temporary object
slopes = df_temp.calc_slopes(xdata,data_name,time_unit=time_unit)
if slopes is None:
return None
while abs(slopes).max() > cutoff:
df_temp.data[data_name] = df_temp.data[data_name].drop(slopes[abs(slopes) > cutoff].index)
slopes = df_temp.calc_slopes(xdata,data_name,time_unit=time_unit)
len_new = df_temp.data[data_name].count()
if log_file == None:
_print_removed_output(len_orig,len_new,'moving slope filter')
elif type(log_file) == str:
_log_removed_output(log_file,len_orig,len_new,'filtered')
else :
raise TypeError('Please provide the location of the log file as '+ \
'a string type, or leave the argument if no log '+ \
'file is needed.')
# Create new temporary object, where the dropped datapoints are replaced
# by nan values
df_temp_2 = self.__class__(self.data.copy(),
timedata_column=self.timename,experiment_tag=self.tag,
time_unit=self.time_unit)
df_temp_2.data[data_name] = df_temp.data[data_name]
df_temp_2._update_time()
# Update the self.meta_valid dataframe, to contain False values for dropped
# datapoints and for datapoints already filtered. This is done by
# tracking the nan values in df_temp_2
if data_name in self.meta_valid.columns:
temp_1 = self.meta_valid[data_name].isin(['filtered'])
temp_2 = np.where(pd.isnull(df_temp_2.data[data_name]),True,False)
temp_3 = temp_1 | temp_2
self.meta_valid[data_name] = np.where(temp_3,'filtered','original')
else:
self.meta_valid[data_name] = pd.isnull(df_temp_2.data[data_name])
self.meta_valid[data_name] = np.where(self.meta_valid[data_name],'filtered','original')
if plot == True:
self.plot_analysed(data_name)
if final:
if inplace:
self.data[data_name] = df_temp_2.data[data_name]
self._update_time()
elif not inplace:
return df_temp_2
if not final:
return None
def simple_moving_average(self,arange,window,data_name=None,inplace=False,
plot=True):
"""
Calculate the Simple Moving Average of a dataseries from a dataframe,
using a window within which the datavalues are averaged.
Parameters
----------
arange : array of two values
the range within which the moving average needs to be calculated
window : int
the number of values from the dataset that are used to take the
average at the current point. Defaults to 10
data_name : str or array of str
name of the column(s) containing the data that needs to be
smoothened. If None, smoothened data is computed for the whole
dataframe. Defaults to None
inplace : bool
indicates whether a new dataframe is created and returned or whether
the operations are executed on the existing dataframe (nothing is
returned)
plot : bool
if True, a plot is given for comparison between original and smooth
data
Returns
-------
HydroData (or subclass) object
either a new object (inplace=False) or an adjusted object, con-
taining the smoothened data values
"""
try:
original = self.data[arange[0]:arange[1]].copy()
except TypeError:
raise TypeError("Slicing not possible for index type " + \
str(type(self.data.index[0])) + " and arange argument type " + \
str(type(arange[0])) + ". Try changing the type of the arange " + \
"values to one compatible with " + str(type(self.data.index[0])) + \
" slicing.")
if len(original) < window:
raise ValueError("Window width exceeds number of datapoints!")
if plot == True:
original = self.__class__(self.data[arange[0]:arange[1]].copy(),
timedata_column=self.timename,experiment_tag=self.tag,
time_unit=self.time_unit)
if inplace == False:
df_temp = self.__class__(self.data[arange[0]:arange[1]].copy(),
timedata_column=self.timename, experiment_tag=self.tag,
time_unit=self.time_unit)
if data_name == None:
df_temp = self.data.rolling(window=window,center=True).mean()
elif isinstance(data_name,str):
df_temp.data[data_name] = self.data[data_name].interpolate().\
rolling(window=window,center=True).mean()
else:
for name in data_name:
df_temp.data[name] = self.data[name].interpolate().\
rolling(window=window,center=True).mean()
elif inplace == True:
if data_name == None:
self.data = self.data.rolling(window=window,center=True).mean()
elif isinstance(data_name,str):
self.data[data_name] = self.data[data_name].interpolate().\
rolling(window=window,center=True).mean()
else:
for name in data_name:
self.data[name] = self.data[name].interpolate().\
rolling(window=window,center=True).mean()
if plot == True:
fig = plt.figure(figsize=(16,6))
ax = fig.add_subplot(111)
ax.plot(original.time,original.data[data_name],'r--',label='original data')
if inplace == False:
ax.plot(df_temp.time,df_temp.data[data_name],'b-',label='averaged data')
elif inplace is True:
ax.plot(self.time,self.data[data_name],'b-',label='averaged data')
ax.legend(fontsize=16)
ax.set_xlabel(self.timename,fontsize=14)
ax.set_ylabel(data_name,fontsize=14)
ax.tick_params(labelsize=15)
if inplace == False:
return df_temp
def moving_average_filter(self,data_name,window,cutoff_frac,arange,clear=False,
inplace=False,log_file=None,plot=False,final=False):
"""
Filters out the peaks/outliers in a dataset by comparing its values to a
smoothened representation of the dataset (Moving Average Filtering). The
filtered values are replaced by NaN values.
Parameters
----------
data_name : str
name of the column containing the data that needs to be filtered
window : int
the number of values from the dataset that are used to take the
average at the current point.
cutoff_frac: float
the cutoff value (in fraction 0-1) to compare the data and smoothened
data: a deviation higher than a certain percentage drops the data-
point.
arange : array of two values
the range within which the moving average filter needs to be applied
clear : bool
if True, the tags added to datapoints before will be removed and put
back to 'original'.
inplace : bool
indicates whether a new dataframe is created and returned or whether
the operations are executed on the existing dataframe (nothing is
returned)
log_file : str
string containing the directory to a log file to be written out
when using this function
plot : bool
if true, a plot is made, comparing the original dataset with the
new, filtered dataset
final : bool
if true, the values are actually replaced with nan values (either
inplace or in a new wwdata object)
Returns
-------
HydroData object (if inplace=False)
the dataframe from which the double values of 'data' are removed
None (if inplace=True)
"""
self._plot = 'valid'
try:
len_orig = self.data[data_name][arange[0]:arange[1]].count()
except TypeError:
raise TypeError("Slicing not possible for index type " + \
str(type(self.data.index[0])) + " and arange argument type " + \
str(type(arange[0])) + ". Try changing the type of the arange " + \
"values to one compatible with " + str(type(self.data.index[0])) + \
" slicing.")
#if plot == True:
# original = self.__class__(self.data.copy(),timedata_column=self.timename,
# experiment_tag=self.tag,time_unit=self.time_unit)
# Make temporary object for operations
df_temp = self.__class__(self.data[arange[0]:arange[1]].copy(),
timedata_column=self.timename,experiment_tag=self.tag,
time_unit=self.time_unit)
# Make a hydropy object with the smoothened data
smooth_data = self.simple_moving_average(arange,window,data_name,inplace=False,
plot=False)
# Make a mask by comparing smooth and original data, using the given
# cut-off percentage
mask = (abs(smooth_data.data[data_name][arange[0]:arange[1]] - self.data[data_name][arange[0]:arange[1]])/\
smooth_data.data[data_name][arange[0]:arange[1]]) < cutoff_frac
# Update the index of self.meta_valid
if clear:
self._reset_meta_valid(data_name)
self.meta_valid = self.meta_valid.reindex(self.index(),fill_value=True)
# Do the actual filtering, based on the mask
df_temp.data[data_name] = df_temp.data[data_name].drop(df_temp.data[mask==False].index)
len_new = df_temp.data[data_name].count()
if log_file == None:
_print_removed_output(len_orig,len_new,'moving average filter')
elif type(log_file) == str:
_log_removed_output(log_file,len_orig,len_new,'filtered')
else :
raise TypeError('Please provide the location of the log file as \
a string type, or leave the argument if no log \
file is needed.')
# Create new temporary object, where the dropped datapoints are replaced
# by nan values (by assigning a new column to the original dataframe)
df_temp_2 = self.__class__(self.data.copy(),timedata_column=self.timename,
experiment_tag=self.tag,time_unit=self.time_unit)
df_temp_2.data[data_name] = df_temp.data[data_name]
df_temp_2._update_time()
# Update the self.meta_valid dataframe, to contain False values for dropped
# datapoints. This is done by tracking the nan values in df_temp_2
if data_name in self.meta_valid.columns:
temp_1 = self.meta_valid[data_name].isin(['filtered'])
temp_2 = np.where(pd.isnull(df_temp_2.data[data_name]),True,False)
temp_3 = temp_1 | temp_2
self.meta_valid[data_name] = np.where(temp_3,'filtered','original')
else:
self.meta_valid[data_name] = pd.isnull(df_temp_2.data[data_name])
self.meta_valid[data_name] = np.where(self.meta_valid[data_name],'filtered','original')
if plot:
self.plot_analysed(data_name)
if final:
if inplace:
self.data[data_name] = df_temp_2.data[data_name]
self._update_time()
elif not inplace:
return df_temp_2
if not final:
return None
def savgol(self,data_name,window=55,polyorder=2,plot=False,inplace=False):
"""
Uses the scipy.signal Savitzky-Golay filter to smoothen the data of a column;
The values are either replaced or a new dataframe is returned.
Parameters
----------
data_name : str
name of the column containing the data that needs to be filtered
window : int
the length of the filter window; default to 55
polyorder : int
The order of the polynomial used to fit the samples.
polyorder must be less than window. default to 1
plot : bool
if true, a plot is made, comparing the original dataset with the
new, filtered dataset
inplace : bool
indicates whether a new dataframe is created and returned or whether
the operations are executed on the existing dataframe (nothing is
returned)
Returns
-------
HydroData object (if inplace=False)
None (if inplace=True)
"""
from scipy import signal
df_temp = self.__class__(self.data.copy(),timedata_column=self.timename,
experiment_tag=self.tag,time_unit=self.time_unit)
df_temp.data[data_name] = sp.signal.savgol_filter(self.data[data_name]\
,window,polyorder)
if plot:
fig = plt.figure(figsize=(16,6))
ax = fig.add_subplot(111)
ax.plot(self.time,self.data[data_name],'g--',label='original data')
ax.plot(self.time,df_temp.data[data_name],'b-',label='filtered data')
ax.legend(fontsize=16)
ax.set_xlabel(self.timename,fontsize=20)
ax.set_ylabel(data_name,fontsize=20)
ax.tick_params(labelsize=15)
if inplace:
self.data[data_name] = df_temp.data[data_name]
else:
return df_temp
#==============================================================================
# DATA (COR)RELATION
#==============================================================================
def calc_ratio(self,data_1,data_2,arange,only_checked=False):
"""
Given two datasets or -columns, calculates the average ratio between
the first and second dataset, within the given range. Also the standard
deviation on this is calculated
Parameters
----------
data_1 : str
name of the data column containing the data to be in the numerator
of the ratio calculation
data_2 : str
name of the data column containing the data to be in the denominator
of the ratio calculation
arange : array of two values
the range within which the ratio needs to be calculated
only_checked : bool
if 'True', filtered values are excluded; default to 'False'
Returns
-------
The average ratio of the first data column over the second one within
the given range and including the standard deviation
"""
# If indexes are in datetime format, convert the arange array to date-
# time values
#if isinstance(self.data.index[0],pd.Timestamp):
# arange = [(self.data.index[0] + dt.timedelta(arange[0]-1)),
# (self.data.index[0] + dt.timedelta(arange[1]-1))]
try:
self.data.loc[arange[0]:arange[1]]
except TypeError:
raise TypeError("Slicing not possible for index type " + \
str(type(self.data.index[0])) + " and arange argument type " + \
str(type(arange[0])) + ". Try changing the type of the arange " + \
"values to one compatible with " + str(type(self.data.index[0])) + \
" slicing.")
mean = (self.data[data_1]/self.data[data_2])[arange[0]:arange[1]]\
if arange[0] < self.index()[0] or arange[1] > self.index()[-1]:
raise IndexError('Index out of bounds. Check whether the values of ' + \
'"arange" are within the index range of the data.')
if only_checked == True:
#create new pd.Dataframes for original values in range,
#merge only rows in which both values are original
data_1_checked = pd.DataFrame(self.data[arange[0]:arange[1]][data_1][self.meta_valid[data_1]=='original'].values,
index=self.data[arange[0]:arange[1]][data_1][self.meta_valid[data_1]=='original'].index)
data_2_checked = pd.DataFrame(self.data[arange[0]:arange[1]][data_2][self.meta_valid[data_2]=='original'].values, \
index=self.data[data_2][arange[0]:arange[1]][self.meta_valid[data_2]=='original'].index)
ratio_data = pd.merge(data_1_checked,data_2_checked,left_index=True, right_index=True, how = 'inner')
ratio_data.columns = data_1,data_2
mean = (ratio_data[data_1]/ratio_data[data_2])\
.replace(np.inf,np.nan).mean()
std = (ratio_data[data_1]/ratio_data[data_2])\
.replace(np.inf,np.nan).std()
else:
mean = (self.data[arange[0]:arange[1]][data_1]/self.data[arange[0]:arange[1]][data_2])\
.replace(np.inf,np.nan).mean()
std = (self.data[arange[0]:arange[1]][data_1]/self.data[arange[0]:arange[1]][data_2])\
.replace(np.inf,np.nan).std()
#print('mean : '+str(mean)+ '\n' +'standard deviation : '+str(std))
return mean,std
def compare_ratio(self,data_1,data_2,arange,only_checked=False):
"""
Compares the average ratios of two datasets in multiple different ranges
and returns the most reliable one, based on the standard deviation on
the ratio values
Parameters
----------
data_1 : str
name of the data column containing the data to be in the numerator
of the ratio calculation
data_2 : str
name of the data column containing the data to be in the denominator
of the ratio calculation
arange : int
the range (in days) for which the ratios need to be calculated and
compared
only_checked : bool
if 'True', filtered values are excluded; default to 'False'
Returns
-------
The average ratio within the range that has been found to be the most
reliable one
"""
# Make the array with ranges within which to compute ratios, based on
# arange, indicating what the interval should be.
if isinstance(self.data.index[0],pd.Timestamp):
days = [self.index()[0] + dt.timedelta(arange) * x for x in \
range(0, int((self.index()[-1]-self.index()[0]).days/arange))]
starts = [[y] for y in days]
ends = [[x + dt.timedelta(arange)] for x in days]
#end = (self.data.index[-1] - self.data.index[0]).days+1
elif isinstance(self.data.index[0],float):
end = int(self.index()[-1]+1) # +1 because int rounds downwards
starts = [[y] for y in range(0,end)]
ends = [[x] for x in range(arange,end+arange)]
ranges = np.append(starts,ends,1)
rel_std = np.inf
for r in range(0,len(ranges)):
average,stdev = self.calc_ratio(data_1,data_2,ranges[r],only_checked)
try:
relative_std = stdev/average
if relative_std < rel_std:
std = stdev
avg = average
index = r
rel_std = std/avg
except (ZeroDivisionError):
pass
print('Best ratio (' + str(avg) + ' ± ' + str(std) + \
') was found in the range: ' + str(ranges[index]))
return avg,std
def get_correlation(self,data_1,data_2,arange,zero_intercept=False,
only_checked=False,plot=False):
"""
Calculates the linear regression coefficients that relate data_1 to
data_2
Parameters
----------
data_1 and data_2 : str
names of the data columns containing the data between which the
correlation will be calculated. data_1: independent data; data_2:
dependent data
arange : array
array containing the beginning and end value between which the
correlation needs to be calculated
zero_intercept : bool
indicates whether or not to assume a zero-intercept
only_checked: bool
if 'True', filtered values are excluded from calculation and plotting;
default to 'False'
if a value in one column is filtered, the corresponding value in the second
column also gets excluded!
Returns
-------
the linear regression coefficients of the correlation, as well as the
r-squared -value
"""
# If indexes are in datetime format, and arange values are not,
# convert the arange array to datetime values
if isinstance(self.data.index[0],pd.Timestamp) and \
isinstance(arange[0],int) or isinstance(arange[0],float):
wn.warn('Replacing arange values, assumed to be relative time' + \
' values, with absolute values of type dt.datetime')
arange = [(self.data.index[0] + dt.timedelta(arange[0]-1)),
(self.data.index[0] + dt.timedelta(arange[1]-1))]
#if arange[0] < self.time[0] or arange[1] > self.time[-1]:
# raise IndexError('Index out of bounds. Check whether the values of '+ \
# '"arange" are within the index range of the data.')
self.data = self.data.sort_index()
if only_checked:
#create new pd.Dataframes for original values in range,
#merge only rows in which both values are original
data_1_checked = pd.DataFrame(self.data[data_1][arange[0]:arange[1]][self.meta_valid[data_1]=='original'].values,
index=self.data[data_1][arange[0]:arange[1]][self.meta_valid[data_1]=='original'].index)
data_2_checked = pd.DataFrame(self.data[data_2][arange[0]:arange[1]][self.meta_valid[data_2]=='original'].values,
index=self.data[data_2][arange[0]:arange[1]][self.meta_valid[data_2]=='original'].index)
corr_data = pd.merge(data_1_checked,data_2_checked,left_index=True, right_index=True, how = 'inner')
else:
corr_data = pd.DataFrame(self.data[[data_1,data_2]][arange[0]:arange[1]])
corr_data.columns = data_1,data_2
corr_data = corr_data[[data_1,data_2]].dropna()
import statsmodels.api as sm
X = corr_data[data_1]
Y = corr_data[data_2]
if zero_intercept == False:
X = sm.add_constant(X)
model = sm.OLS(Y,X)
results = model.fit()
slope = results.params[data_1]
r_sq = results.rsquared
if zero_intercept:
intercept = 0
else:
intercept = results.params['const']
if plot:
x = corr_data[data_1].copy().sort_values(inplace=False)
#x = np.arange(self.data[data_2][arange[0]:arange[1]].min(),
# self.data[data_2][arange[0]:arange[1]].max())
#y = slope * x + intercept
if zero_intercept:
y = results.predict(x)
exog = x
else:
x2 = sm.add_constant(x)
y = results.predict(x2)
exog = x2
fig = plt.figure(figsize=(6,6))
ax = fig.add_subplot(111)
# plot data
ax.plot(corr_data[data_1],corr_data[data_2],'o',markerfacecolor=None,
markeredgewidth=1,markeredgecolor='b',markersize=4,label='Data')
# plot predictions
ax.plot(x,y,'k',label='Linear fit')
# plot prediction intervals
from statsmodels.stats.outliers_influence import summary_table
st, data, ss2 = summary_table(results, alpha=0.05)
lower = data[:,6]
lower.sort()
upper = data[:,7]
upper.sort()
ax.fill_between(x.astype(float), lower, upper, color='k', alpha=0.2,
label='Prediction interval (95%)')
ax.legend(fontsize=15)
ax.tick_params(labelsize=15)
ax.set_ylabel(data_1,size=17)
ax.set_xlabel(data_2,size=17)
fig.tight_layout()
print('slope: ' + str(slope) + ' intercept: ' + str(intercept) + ' R2: ' + str(r_sq))
return fig, ax
return slope,intercept,r_sq
#==============================================================================
# DAILY PROFILE CALCULATION
#==============================================================================
def calc_daily_profile(self,column_name,arange,quantile=0.9,plot=False,
plot_method='quantile',clear=False,only_checked=False):
"""
Calculates a typical daily profile based on data from the indicated
consecutive days. Also saves this average day, along with standard
deviation and lower and upper percentiles as given in the arguments.
Plotting is possible.
Parameters
----------
column_name : str
name of the column containing the data to calculate an average day
for
arange : 2-element array of ints
contains the beginning and end day of the period to use for average
day calculation
quantile : float between 0 and 1
value to use for the calculation of the quantiles
plot : bool
plot or not
plot_method : str
method to use for plotting. Available: "quantile" or "stdev"
clear : bool
wether or not to clear the key in the self.daily_profile dictionary
that is already present
Returns
-------
None
creates a dictionary self.daily_profile containing information
on the average day as calculated.
"""
# several checks to make sure the right types, columns... are used
try:
if not isinstance(self.daily_profile,dict):
self.daily_profile = {}
except AttributeError:
self.daily_profile = {}
if clear:
try:
self.daily_profile.pop(column_name, None)
except KeyError:
pass
if column_name in self.daily_profile.keys():
raise KeyError('self.daily_profile dictionary already contains a ' +\
'key ' + column_name + '. Set argument "clear" to True to erase the ' + \
'key and create a new one.')
# Give warning when replacing data from rain events and at the same time
# check if arange has the right type
try:
rain = (self.data_type == 'WWTP') and \
(self.highs['highs'].loc[arange[0]:arange[1]].sum() > 1)
except TypeError:
raise TypeError("Slicing not possible for index type " + \
str(type(self.data.index[0])) + " and arange argument type " + \
str(type(arange[0])) + ". Try changing the type of the arange " \
"values to one compatible with " + str(type(self.data.index[0])) + \
" slicing.")
except AttributeError:
raise AttributeError('OnlineSensorBased instance has no attribute "highs". '\
'run .get_highs to tag the peaks in the dataset.')
if rain :
wn.warn("Data points obtained during a rain event will be used for" \
" the calculation of an average day. This might lead to a not-" \
"representative average day and/or high standard deviations.")
daily_profile = pd.DataFrame()
if not isinstance(arange[0],int) and not isinstance(arange[0],dt.datetime):
raise TypeError('The values of arange must be of type int or dt.datetime')
if isinstance(self.data.index[0],dt.datetime):
range_days = pd.date_range(arange[0],arange[1])
indexes = [self.data.index[0],self.data.index[0]+dt.timedelta(1)]
else :
range_days = range(arange[0],arange[1])
indexes = [0,1]
#if isinstance(arange[0],dt.datetime):
# range_days = pd.date_range(arange[0],arange[1])
#if only_checked:
# for i in range_days:
# daily_profile = pd.merge(daily_profile,
# pd.DataFrame(self.data[column_name][i:i+1]\
# [self.meta_valid[column_name]=='original'].values),
# left_index=True, right_index=True,how='outer')
# mean_day = pd.DataFrame(index=daily_profile.index)
# self.data.loc[indexes[0]:indexes[1]].index)#\
# [self.meta_valid[column_name]=='original'].index)
# if isinstance(self.data.index[0],dt.datetime):
# mean_day.index = mean_day.index.time
#else:
if only_checked and column_name in self.meta_valid:
for i in range_days:
if isinstance(i,dt.datetime) or isinstance(i,np.datetime64) or isinstance(i,pd.Timestamp):
name = str(i.month) + '-' + str(i.day)
else:
name = str(i)
mask_valid = pd.DataFrame((self.meta_valid[column_name][i:i+1] == 'original').values,columns=[name])
daily_profile = pd.merge(daily_profile,
pd.DataFrame(self.data[column_name][i:i+1].values,
columns=[name]).where(mask_valid),
left_index=True, right_index=True,how='outer')
else:
if only_checked:
wn.warn('No values of selected column were filtered yet. All values '+ \
'will be displayed.')
for i in range_days:
if isinstance(i,dt.datetime) or isinstance(i,np.datetime64) or isinstance(i,pd.Timestamp):
name = str(i.month) + '-' + str(i.day)
else:
name = str(i)
daily_profile = pd.merge(daily_profile,
pd.DataFrame(self.data[column_name][i:i+1*i.freq].values,
columns=[name]),
left_index=True, right_index=True,how='outer')
daily_profile['index'] = self.data.loc[indexes[0]:indexes[1]].index.time
daily_profile = daily_profile.drop_duplicates(subset='index', keep='first')\
.set_index('index').sort_index()
mean_day = pd.DataFrame(index=daily_profile.index.values)
mean_day['avg'] = daily_profile.mean(axis=1).values
mean_day['std'] = daily_profile.std(axis=1).values
mean_day['Qupper'] = daily_profile.quantile(quantile,axis=1).values
mean_day['Qlower'] = daily_profile.quantile(1-quantile,axis=1).values
self.daily_profile[column_name] = mean_day
if plot:
fig = plt.figure(figsize=(10,6))
ax = fig.add_subplot(111)
ax.plot(mean_day.index,mean_day['avg'],'g')
if plot_method == 'quantile':
ax.plot(mean_day.index,mean_day['Qupper'],'b',alpha=0.5)
ax.plot(mean_day.index,mean_day['Qlower'],'b',alpha=0.5)
ax.fill_between(mean_day.index,mean_day['avg'],mean_day['Qupper'],
color='grey', alpha=0.3)
ax.fill_between(mean_day.index,mean_day['avg'],mean_day['Qlower'],
color='grey', alpha=0.3)
elif plot_method == 'stdev':
ax.plot(mean_day.index,mean_day['avg']+mean_day['std'],'b',alpha=0.5)
ax.plot(mean_day.index,mean_day['avg']-mean_day['std'],'b',alpha=0.5)
ax.fill_between(mean_day.index,mean_day['avg'],
mean_day['avg']+mean_day['std'],
color='grey', alpha=0.3)
ax.fill_between(mean_day.index,mean_day['avg'],
mean_day['avg']-mean_day['std'],
color='grey', alpha=0.3)
ax.tick_params(labelsize=15)
ax.set_xlim(mean_day.index[0],mean_day.index[-1])
ax.set_ylabel(column_name,size=17)
ax.set_xlabel('Time',size=17)
return fig,ax
##############
### PLOTTING
##############
def plot_analysed(self,data_name,time_range='default',only_checked = False):
"""
plots the values and their types (original, filtered, filled) \
of a given column in the given time range.
Parameters
----------
data_name : str
name of the column containing the data to plot
time_range : array of two values
the range within which the values are plotted; default is all
only_checked : bool
if 'True', filtered values are excluded; default to 'False'
Returns
-------
Plot
"""
# time range settings
if time_range == 'default':
if isinstance(self.time[0],float):
time_range = [int(self.time[0]),int(self.time[-1])+1]
elif isinstance(self.time[0],dt.datetime):
time_range = [self.time[0],self.time[-1]]
else:
if not isinstance(time_range[0],type(self.time[0])) or not \
isinstance(time_range[1],type(self.time[-1])):
raise TypeError('The value type of the values in time_range must ' + \
'be the same as the value type of index values')
if time_range[0] < self.time[0] or time_range[1] > int(self.time[-1]):
raise IndexError('Index out of bounds. Check whether the values of '+\
'"time_range" are within the index range of the data.')
fig = plt.figure(figsize=(16,6))
ax = fig.add_subplot(111)
#create new object with only the values within the given time range
df = self.__class__(self.data[time_range[0]:time_range[1]].copy(),timedata_column=self.timename,
experiment_tag=self.tag,time_unit=self.time_unit)
if self._plot == 'filled':
df.meta_filled = self.meta_filled[time_range[0]:time_range[1]].copy()
df.filled = self.filled[time_range[0]:time_range[1]].copy()
ax.plot(df.time[df.meta_filled[data_name]=='original'],
df.filled[data_name][df.meta_filled[data_name]=='original'],
'.g',label='original')
if only_checked == False:
if (df.meta_filled[data_name]=='filtered').any():
ax.plot(df.time[df.meta_filled[data_name]=='filtered'],
df.data[data_name][df.meta_filled[data_name]=='filtered'],
'.r',label='filtered')
if (df.meta_filled[data_name]=='filled_interpol').any():
ax.plot(df.time[df.meta_filled[data_name]=='filled_interpol'],
df.filled[data_name][df.meta_filled[data_name]=='filled_interpol'],
'.b',label='filled (interpolation)')
if (df.meta_filled[data_name]=='filled_ratio').any():
ax.plot(df.time[df.meta_filled[data_name]=='filled_ratio'],
df.filled[data_name][df.meta_filled[data_name]=='filled_ratio'],
'.m',label='filled (ratio-based)')
if (df.meta_filled[data_name]=='filled_correlation').any():
ax.plot(df.time[df.meta_filled[data_name]=='filled_correlation'],
df.filled[data_name][df.meta_filled[data_name]=='filled_correlation'],
'.k',label='filled (correlation-based)')
if (df.meta_filled[data_name]=='filled_average_profile').any():
ax.plot(df.time[df.meta_filled[data_name]=='filled_average_profile'],
df.filled[data_name][df.meta_filled[data_name]=='filled_average_profile'],
'.y',label='filled (typical day)')
if (df.meta_filled[data_name]=='filled_infl_model').any():
ax.plot(df.time[df.meta_filled[data_name]=='filled_infl_model'],
df.filled[data_name][df.meta_filled[data_name]=='filled_infl_model'],
'.c',label='filled (influent model)')
if (df.meta_filled[data_name]=='filled_profile_day_before').any():
ax.plot(df.time[df.meta_filled[data_name]=='filled_profile_day_before'],
df.filled[data_name][df.meta_filled[data_name]=='filled_profile_day_before'],
'.',label='filled (previous day)')
#if (df.meta_filled[data_name]=='filled_savitzky_golay').any():
# ax.plot(df.time[df.meta_filled[data_name]=='filled_savitzky_golay'],
# df.filled[data_name][df.meta_filled[data_name]=='filled_savitzky_golay'],
# '.m',label='filled (Savitzky-Golay filter)')
elif self._plot == 'valid':
df.meta_valid = self.meta_valid[time_range[0]:time_range[1]].copy()
ax.plot(df.time[self.meta_valid[data_name]=='original'],
df.data[data_name][df.meta_valid[data_name]=='original'],
'.g',label='original')
if only_checked == False:
if (df.meta_valid[data_name]=='filtered').any():
if data_name in df.filled.columns:
ax.plot(df.time[df.meta_valid[data_name]=='filtered'],
df.filled[data_name][df.meta_valid[data_name]=='filtered'],
'.r',label='filtered')
else:
ax.plot(df.time[df.meta_valid[data_name]=='filtered'],
df.data[data_name][df.meta_valid[data_name]=='filtered'],
'.r',label='filtered')
print (str(float(df.meta_valid.groupby(data_name).size()['original']*100)/ \
float(df.meta_valid[data_name].count())) + \
'% datapoints are left over from the original ' + \
str(float(df.meta_valid[data_name].count())))
ax.legend(bbox_to_anchor=(1.05,1),loc=2,fontsize=16)
ax.set_xlabel(self.timename,fontsize=20)
ax.set_xlim(time_range[0],time_range[1])
ax.set_ylabel(data_name,fontsize=20)
ax.tick_params(labelsize=14)
return fig, ax
# def plot_analysed(self,data_name):
# """
#
# """
# fig = plt.figure(figsize=(16,6))
# ax = fig.add_subplot(111)
#
# if not self._plot == 'filled' or self._plot == 'valid':
# ValueError('No filtering or filling of the current dataset has been done.\
# Run any filter or filling function to start the data analysis.')
#
# if self._plot == 'filled':
# ax.plot(self.time[self.meta_filled[data_name]=='original'],
# self.data[data_name][self.meta_filled[data_name]=='original'],
# '.g',label='original')
# if (self.meta_filled[data_name]=='filtered').any():
# ax.plot(self.time[self.meta_filled[data_name]=='filtered'],
# self.data[data_name][self.meta_filled[data_name]=='filtered'],
# '.r',label='filtered')
# if (self.meta_filled[data_name]=='filled_interpol').any():
# ax.plot(self.time[self.meta_filled[data_name]=='filled_interpol'],
# self.filled[data_name][self.meta_filled[data_name]=='filled_interpol'],
# '.b',label='filled (interpolation)')
# if (self.meta_filled[data_name]=='filled_ratio').any():
# ax.plot(self.time[self.meta_filled[data_name]=='filled_ratio'],
# self.filled[data_name][self.meta_filled[data_name]=='filled_ratio'],
# '.m',label='filled (ratio-based)')
# if (self.meta_filled[data_name]=='filled_correlation').any():
# ax.plot(self.time[self.meta_filled[data_name]=='filled_correlation'],
# self.filled[data_name][self.meta_filled[data_name]=='filled_correlation'],
# '.k',label='filled (correlation-based)')
# if (self.meta_filled[data_name]=='filled_average_profile').any():
# ax.plot(self.time[self.meta_filled[data_name]=='filled_average_profile'],
# self.filled[data_name][self.meta_filled[data_name]=='filled_average_profile'],
# '.y',label='filled (typical day)')
# if (self.meta_filled[data_name]=='filled_infl_model').any():
# ax.plot(self.time[self.meta_filled[data_name]=='filled_infl_model'],
# self.filled[data_name][self.meta_filled[data_name]=='filled_infl_model'],
# '.c',label='filled (influent model)')
#
# elif self._plot == 'valid':
# ax.plot(self.time[self.meta_valid[data_name]=='original'],
# self.data[data_name][self.meta_valid[data_name]=='original'],
# '.g',label='original')
# if (self.meta_valid[data_name]=='filtered').any():
# if data_name in self.filled.columns:
# ax.plot(self.time[self.meta_valid[data_name]=='filtered'],
# self.filled[data_name][self.meta_valid[data_name]=='filtered'],
# '.r',label='filtered')
# else:
# ax.plot(self.time[self.meta_valid[data_name]=='filtered'],
# self.data[data_name][self.meta_valid[data_name]=='filtered'],
# '.r',label='filtered')
#
# ax.legend(fontsize=16)
# ax.set_xlabel(self.timename,fontsize=14)
# ax.set_ylabel(data_name,fontsize=14)
# ax.tick_params(labelsize=14)
#
# print str(float(self.meta_valid.groupby(data_name).size()['original']*100)/ \
# float(self.meta_valid[data_name].count())) + \
# '% datapoints are left over from the original ' + \
# str(float(self.meta_valid[data_name].count()))
# return fig, ax
##############################
### NON-CLASS FUNCTIONS ###
##############################
def total_seconds(timedelta_value):
return timedelta_value.total_seconds()
def _print_removed_output(original,new,function):
"""
function printing the output of functions that tag datapoints.
Parameters
----------
original : int
original length of the dataset
new : int
length of the new dataset
function : str
info on the function used to filter the data
"""
print(str(original-new) + ' values detected and tagged as filtered by function ' + function)
def _log_removed_output(log_file,original,new,type_):
"""
function writing the output of functions that remove datapoints to a log file.
Parameters
----------
log_file : str
string containing the directory to the log file to be written out
original : int
original length of the dataset
new : int
length of the new dataset
type_ : str
'removed' or 'dropped'
"""
log_file = open(log_file,'a')
log_file.write(str('\nOriginal dataset: '+str(original)+' datapoints; new dataset: '+
str(new)+' datapoints'+str(original-new)+' datapoints ',type_))
log_file.close()
# Prepends a WEST-header to read-in text files, to make them WEST compatible
def _prepend_WEST_header(filepath,sep,column_names,outputfilename,
comment='no comments'):
"""
"""
f = open(filepath,'r')
columns = f.readlines()
temp = f.readlines()[1:]
f.close()
f = open(outputfilename, 'w')
#f.write("%%Version3.3\ %%BeginComment\ ")
#f.write(comment)
#f.write("%%EndComment\ %%BeginHeader\ ")
#f.write(str())#write the names
#f.write(str())#write the units
f.write(temp)
f.close()
|
agpl-3.0
| -375,071,956,958,271,900
| 42.792012
| 164
| 0.55373
| false
| 4.1518
| false
| false
| false
|
54lihaoxin/leetcode_python
|
src/StringToInteger/solution.py
|
1
|
2389
|
# String to Integer (atoi)
#
# Implement atoi to convert a string to an integer.
#
# Hint: Carefully consider all possible input cases. If you want a challenge, please do not see below and ask yourself what are the possible input cases.
#
# Notes: It is intended for this problem to be specified vaguely (ie, no given input specs). You are responsible to gather all the input requirements up front.
#
# spoilers alert... click to show requirements for atoi.
#
# Requirements for atoi:
# The function first discards as many whitespace characters as necessary until the first non-whitespace character is found. Then, starting from this character, takes an optional initial plus or minus sign followed by as many numerical digits as possible, and interprets them as a numerical value.
#
# The string can contain additional characters after those that form the integral number, which are ignored and have no effect on the behavior of this function.
#
# If the first sequence of non-whitespace characters in str is not a valid integral number, or if no such sequence exists because either str is empty or it contains only whitespace characters, no conversion is performed.
#
# If no valid conversion could be performed, a zero value is returned. If the correct value is out of the range of representable values, INT_MAX (2147483647) or INT_MIN (-2147483648) is returned.
debug = True
debug = False
from CommonClasses import *
class Solution:
# @return an integer
def atoi(self, str):
if str == None or len(str) == 0:
return 0
newStr = ''
sign = 1
while str[0] == ' ':
str = str[1:]
if len(str) > 1 and str[0] in '-+':
if str[0] == '-':
sign = -1
str = str[1:]
if len(str) == 0:
return 0
for c in str:
if c in '1234567890':
newStr += c
else:
break
if len(newStr) == 0:
return 0
if sign == 1:
# hxl: OJ doesn't allow sys.maxint... so hard code max of int as 2147483647 here
return min(2147483647, long(newStr))
else:
return max(-2147483648, -long(newStr))
|
apache-2.0
| -8,124,587,870,390,814,000
| 35.359375
| 296
| 0.60653
| false
| 4.304505
| false
| false
| false
|
foer/linuxmuster-client-unity
|
tests/autopilot/unity/emulators/panel.py
|
1
|
11333
|
# -*- Mode: Python; coding: utf-8; indent-tabs-mode: nil; tab-width: 4 -*-
# Copyright 2012 Canonical
# Author: Marco Trevisan (Treviño)
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3, as published
# by the Free Software Foundation.
#
from __future__ import absolute_import
import logging
from time import sleep
from autopilot.input import Mouse
from autopilot.keybindings import KeybindingsHelper
from unity.emulators import UnityIntrospectionObject
logger = logging.getLogger(__name__)
class PanelController(UnityIntrospectionObject):
"""The PanelController class."""
def get_panel_for_monitor(self, monitor_num):
"""Return an instance of panel for the specified monitor, or None."""
panels = self.get_children_by_type(UnityPanel, monitor=monitor_num)
assert(len(panels) == 1)
return panels[0]
def get_active_panel(self):
"""Return the active panel, or None."""
panels = self.get_children_by_type(UnityPanel, active=True)
assert(len(panels) == 1)
return panels[0]
def get_active_indicator(self):
for panel in self.get_panels:
active = panel.get_active_indicator()
if active:
return active
return None
@property
def get_panels(self):
"""Return the available panels, or None."""
return self.get_children_by_type(UnityPanel)
class UnityPanel(UnityIntrospectionObject, KeybindingsHelper):
"""An individual panel for a monitor."""
def __init__(self, *args, **kwargs):
super(UnityPanel, self).__init__(*args, **kwargs)
self._mouse = Mouse.create()
def __get_menu_view(self):
"""Return the menu view."""
menus = self.get_children_by_type(MenuView)
assert(len(menus) == 1)
return menus[0]
def __get_window_buttons(self):
"""Return the window buttons view."""
buttons = self.menus.get_children_by_type(WindowButtons)
assert(len(buttons) == 1)
return buttons[0]
def __get_grab_area(self):
"""Return the panel grab area."""
grab_areas = self.menus.get_children_by_type(GrabArea)
assert(len(grab_areas) == 1)
return grab_areas[0]
def __get_indicators_view(self):
"""Return the menu view."""
indicators = self.get_children_by_type(Indicators)
assert(len(indicators) == 1)
return indicators[0]
def move_mouse_below_the_panel(self):
"""Places the mouse to bottom of this panel."""
(x, y, w, h) = self.geometry
target_x = x + w / 2
target_y = y + h + 10
logger.debug("Moving mouse away from panel.")
self._mouse.move(target_x, target_y)
def move_mouse_over_menus(self):
"""Move the mouse over the menu area for this panel."""
(x, y, w, h) = self.menus.geometry
target_x = x + w / 2
target_y = y + h / 2
# The menu view has bigger geometry than the real layout
menu_entries = self.menus.get_entries()
if len(menu_entries) > 0:
first_x = menu_entries[0].x
last_x = menu_entries[-1].x + menu_entries[-1].width / 2
target_x = first_x + (last_x - first_x) / 2
logger.debug("Moving mouse to center of menu area.")
self._mouse.move(target_x, target_y)
def move_mouse_over_grab_area(self):
"""Move the mouse over the grab area for this panel."""
(x, y, w, h) = self.grab_area.geometry
target_x = x + w / 2
target_y = y + h / 2
logger.debug("Moving mouse to center of grab area.")
self._mouse.move(target_x, target_y)
def move_mouse_over_window_buttons(self):
"""Move the mouse over the center of the window buttons area for this panel."""
(x, y, w, h) = self.window_buttons.geometry
target_x = x + w / 2
target_y = y + h / 2
logger.debug("Moving mouse to center of the window buttons.")
self._mouse.move(target_x, target_y)
def move_mouse_over_indicators(self):
"""Move the mouse over the center of the indicators area for this panel."""
(x, y, w, h) = self.indicators.geometry
target_x = x + w / 2
target_y = y + h / 2
logger.debug("Moving mouse to center of the indicators area.")
self._mouse.move(target_x, target_y)
def get_indicator_entries(self, visible_only=True, include_hidden_menus=False):
"""Returns a list of entries for this panel including both menus and indicators"""
entries = []
if include_hidden_menus or self.menus_shown:
entries = self.menus.get_entries()
entries += self.indicators.get_ordered_entries(visible_only)
return entries
def get_active_indicator(self):
"""Returns the indicator entry that is currently active"""
entries = self.get_indicator_entries(False, True)
entries = filter(lambda e: e.active == True, entries)
assert(len(entries) <= 1)
return entries[0] if entries else None
def get_indicator_entry(self, entry_id):
"""Returns the indicator entry for the given ID or None"""
entries = self.get_indicator_entries(False, True)
entries = filter(lambda e: e.entry_id == entry_id, entries)
assert(len(entries) <= 1)
return entries[0] if entries else None
@property
def title(self):
return self.menus.panel_title
@property
def desktop_is_active(self):
return self.menus.desktop_active
@property
def menus_shown(self):
return self.active and self.menus.draw_menus
@property
def window_buttons_shown(self):
return self.menus.draw_window_buttons
@property
def window_buttons(self):
return self.__get_window_buttons()
@property
def menus(self):
return self.__get_menu_view()
@property
def grab_area(self):
return self.__get_grab_area()
@property
def indicators(self):
return self.__get_indicators_view()
@property
def geometry(self):
"""Returns a tuple of (x,y,w,h) for the current panel."""
return (self.x, self.y, self.width, self.height)
class MenuView(UnityIntrospectionObject):
"""The Menu View class."""
def get_entries(self):
"""Return a list of menu entries"""
entries = self.get_children_by_type(IndicatorEntry)
# We need to filter out empty entries, which are seperators - those
# are not valid, visible and working entries
# For instance, gedit adds some of those, breaking our tests
entries = [e for e in entries if (e.label != "")]
return entries
def get_menu_by_label(self, entry_label):
"""Return the first indicator entry found with the given label"""
indicators = self.get_children_by_type(IndicatorEntry, label=entry_label)
return indicators[0] if indicators else None
@property
def geometry(self):
"""Returns a tuple of (x,y,w,h) for the current menu view."""
return (self.x, self.y, self.width, self.height)
class WindowButtons(UnityIntrospectionObject):
"""The window buttons class"""
def get_buttons(self, visible_only=True):
"""Return a list of window buttons"""
if visible_only:
return self.get_children_by_type(WindowButton, visible=True)
else:
return self.get_children_by_type(WindowButton)
def get_button(self, type):
buttons = self.get_children_by_type(WindowButton, type=type)
assert(len(buttons) == 1)
return buttons[0]
@property
def visible(self):
return len(self.get_buttons()) != 0
@property
def close(self):
return self.get_button("Close")
@property
def minimize(self):
return self.get_button("Minimize")
@property
def unmaximize(self):
return self.get_button("Unmaximize")
@property
def maximize(self):
return self.get_button("Maximize")
@property
def geometry(self):
"""Returns a tuple of (x,y,w,h) for the current panel."""
return (self.x, self.y, self.width, self.height)
class WindowButton(UnityIntrospectionObject):
"""The Window WindowButton class."""
def __init__(self, *args, **kwargs):
super(WindowButton, self).__init__(*args, **kwargs)
self._mouse = Mouse.create()
def mouse_move_to(self):
target_x = self.x + self.width / 2
target_y = self.y + self.height / 2
self._mouse.move(target_x, target_y, rate=20, time_between_events=0.005)
def mouse_click(self):
self.mouse_move_to()
sleep(.2)
self._mouse.click(press_duration=.1)
sleep(.01)
@property
def geometry(self):
"""Returns a tuple of (x,y,w,h) for the window button."""
return (self.x, self.y, self.width, self.height)
class GrabArea(UnityIntrospectionObject):
"""The grab area class"""
@property
def geometry(self):
"""Returns a tuple of (x,y,w,h) for the grab area."""
return (self.x, self.y, self.width, self.height)
class Indicators(UnityIntrospectionObject):
"""The Indicators View class."""
def get_ordered_entries(self, visible_only=True):
"""Return a list of indicators, ordered by their priority"""
if visible_only:
entries = self.get_children_by_type(IndicatorEntry, visible=True)
else:
entries = self.get_children_by_type(IndicatorEntry)
return sorted(entries, key=lambda entry: entry.priority)
def get_indicator_by_name_hint(self, name_hint):
"""Return the IndicatorEntry with the name_hint"""
indicators = self.get_children_by_type(IndicatorEntry, name_hint=name_hint)
assert(len(indicators) == 1)
return indicators[0]
@property
def geometry(self):
"""Returns a tuple of (x,y,w,h) for the indicators area."""
return (self.x, self.y, self.width, self.height)
class IndicatorEntry(UnityIntrospectionObject):
"""The IndicatorEntry View class."""
def __init__(self, *args, **kwargs):
super(IndicatorEntry, self).__init__(*args, **kwargs)
self._mouse = Mouse.create()
def mouse_move_to(self):
target_x = self.x + self.width / 2
target_y = self.y + self.height / 2
self._mouse.move(target_x, target_y, rate=20, time_between_events=0.005)
def mouse_click(self, button=1):
self.mouse_move_to()
sleep(.2)
assert(self.visible)
self._mouse.click(press_duration=.1)
sleep(.01)
@property
def geometry(self):
"""Returns a tuple of (x,y,w,h) for the indicator entry."""
return (self.x, self.y, self.width, self.height)
@property
def menu_geometry(self):
"""Returns a tuple of (x,y,w,h) for the opened menu geometry."""
return (self.menu_x, self.menu_y, self.menu_width, self.menu_height)
def __repr__(self):
with self.no_automatic_refreshing():
return "<IndicatorEntry 0x%x (%s)>" % (id(self), self.label)
class Tray(UnityIntrospectionObject):
"""A panel tray object."""
|
gpl-3.0
| -4,977,705,003,350,863,000
| 31.193182
| 90
| 0.618602
| false
| 3.709329
| false
| false
| false
|
ComputerArchitectureGroupPWr/Floorplan-Maker
|
src/device.py
|
1
|
7566
|
import xml.dom.minidom as dom
from math import *
class Element:
x = 0
y = 0
def __eq__(self,other):
if(isinstance(other,Element)):
if( self.x == other.x and self.y == other.y):
return True
else:
return False
return NotImplemented
def __ne__(self,other):
if(isinstance(other,Element)):
if(self.x != other.x or self.y != other.y):
return True
else:
return False
return NotImplemented
def __init__(self, xpos, ypos):
self.x = xpos
self.y = ypos
class Device:
__columns = 0
__rows = 0
__occupiedSpace = list()
__freeSpace = list()
__firstUnit = 0
def __init__(self):
pass
def getColumns(self):
return self.__columns
def getRows(self):
return self.__rows
def getOccupiedSpace(self):
return self.__occupiedSpace
def getFreeSpace(self):
return self.__freeSpace
def setFreeSpaceFromFile(self, xmlDocument):
self.setOccupiedSpaceFromFile(xmlDocument)
occ = self.getOccupiedSpace()
oldY = occ[0].y
freeList = list()
for element in occ:
diff = element.y - oldY
if(diff > 1):
for i in range(1,diff):
newElement = Element(element.x, oldY + i)
freeList.append(newElement)
oldY = element.y
sortedFreeList = sorted(freeList, key= lambda obj: (obj.x, obj.y))
self.__freeSpace = sortedFreeList
def setDeviceSizeFromFile(self,xmlDocument):
size = xmlDocument.getElementsByTagName("size")
size = size[0]
self.__columns = size.getAttribute("cols")
self.__rows = size.getAttribute("rows")
def setOccupiedSpaceFromFile(self,xmlDocument):
obstacles = xmlDocument.getElementsByTagName("obstacle")
units = xmlDocument.getElementsByTagName("unit")
self.getFirstUnitOccurence(units)
occupied = obstacles + units
occ = list()
for element in occupied:
x = element.getAttribute("x")
y = element.getAttribute("y")
newElement = Element(int(x),int(y))
occ.append(newElement)
sortedOccupied = sorted(occ, key= lambda obj: (obj.x, obj.y))
self.__occupiedSpace = sortedOccupied
def generateLinearThermometers(self,xmlOutputDocument, thermNumber):
root = xmlOutputDocument.getElementsByTagName("board")
root = root[0]
oldY = 0
thermID = 0
occList = self.getOccupiedSpace()
for occ in occList:
col = occ.x
row = occ.y
diff = row - oldY
if(diff > 1):
for i in range(1,diff):
newTherm = xmlOutputDocument.createElement("thermometer")
newTherm.setAttribute("name", "t{}".format(thermID))
newTherm.setAttribute("type", "RO7")
newTherm.setAttribute("col", str(col))
newTherm.setAttribute("row", str(oldY + i))
root.appendChild(newTherm)
thermID = thermID + 1
if(thermID > int(thermNumber) - 1):
return xmlOutputDocument
oldY = row
return xmlOutputDocument
def getFreeRowLenList(self,freeList):
rowsLen = list()
freeList = self.getFreeSpace()
oldRowLen = freeList[0].x
#make a list of rows length
for element in freeList:
diff = element.x - oldRowLen
if(diff < 0):
rowsLen.append(int(oldRowLen + 1))
elif(freeList[-1] is element):
rowsLen.append(int(element.x + 1))
oldRowLen = element.x
return rowsLen
def getFirstUnitOccurence(self,units):
unitsList = list()
for unit in units:
x = unit.getAttribute("x")
y = unit.getAttribute("y")
newElement = Element(int(x),int(y))
unitsList.append(newElement)
firstElement = unitsList[1]
self.__firstUnit = firstElement
print("First Unit x: {} y: {}".format(firstElement.x,firstElement.y))
def getFreeColumnLenList(self,freeList):
colsLen = list()
oldColLen = freeList[0].y
for element in freeList:
diff = element.y - oldColLen
if(diff < 0):
colsLen.append(int(oldColLen + 1))
elif(freeList[-1] is element):
colsLen.append(int(element.y + 1))
return colsLen
def getFreeRowLen(self,sortedFreeList):
maximum = -1
l = 0
listLen = len(sortedFreeList)
colLen = self.getFreeColLen(sortedFreeList)
for i in range(0,listLen,colLen):
if(sortedFreeList[i] > maximum):
maximum = sortedFreeList[i].x
l = l + 1
else:
break
return l
def getFreeColLen(self,sortedFreeList):
maximum = -1
l = 0
for i in sortedFreeList:
if(i.y > maximum):
maximum = i.y
l = l + 1
else:
break
return l
def getFreeSingleRow(self,freeList,index):
singleColumnList = list()
for item in freeList:
if(item.y == index):
singleColumnList.append(item.x)
return singleColumnList
def getFreeSingleColumn(self, freeList, index):
singleRowList = list()
for item in freeList:
if(item.x == index):
singleRowList.append(item.y)
elif(item.x > index):
break
return singleRowList
def generateCoords(self, coordList, termNumber):
coordLen = len(coordList)
posList = list()
for i in range(1,coordLen):
termsLeft = termNumber
newList = list()
for item in range(0,coordLen,i):
newList.append(coordList[item])
termsLeft = termsLeft - 1
if(termsLeft < 0 or termsLeft == 0):
break
if(termsLeft == 0):
posList = newList
return posList
def generateThermometersNet(self, xmlOutDocument,thermsInRow, rowsNumber):
xmlList = xmlOutDocument.getElementsByTagName("board")
root = xmlList[0]
freeList = self.getFreeSpace()
row = self.getFreeSingleRow(freeList,6)
column = self.getFreeSingleColumn(freeList,38)
colsCoords = self.generateCoords(row,int(thermsInRow))
rowsCoords = self.generateCoords(column, int(rowsNumber))
thermID = 0
for row in rowsCoords:
for col in colsCoords:
newElement = xmlOutDocument.createElement("thermometer")
newElement.setAttribute("type","RO7")
newElement.setAttribute("name","T{}".format(str(thermID)))
thermID = thermID + 1
newElement.setAttribute("col",str(col))
newElement.setAttribute("row",str(row))
root.appendChild(newElement)
return xmlOutDocument
def generateXmlHeader(self, xmlOutputDocument, ncdFile):
root = xmlOutputDocument.createElement("board")
root.setAttribute("device", "Virtex5")
root.setAttribute("mode", "emulation")
root.setAttribute("version", "0.1")
xmlOutputDocument.appendChild(root)
inputComponent = xmlOutputDocument.createElement("input")
outputComponent = xmlOutputDocument.createElement("output")
inputComponent.setAttribute("name", str(ncdFile))
ncdName = str(ncdFile).rsplit(".")
ncdName = ncdName[0]
outputComponent.setAttribute("name", "{}_new.ncd".format(ncdName))
root.appendChild(inputComponent)
root.appendChild(outputComponent)
return xmlOutputDocument
|
mit
| 7,512,026,252,719,605,000
| 23.888158
| 85
| 0.597145
| false
| 3.582386
| false
| false
| false
|
beeftornado/sentry
|
tests/sentry/lang/native/test_utils.py
|
1
|
1580
|
from __future__ import absolute_import
from sentry.lang.native.utils import get_sdk_from_event, is_minidump_event
def test_get_sdk_from_event():
sdk_info = get_sdk_from_event(
{
"debug_meta": {
"sdk_info": {
"sdk_name": "iOS",
"version_major": 9,
"version_minor": 3,
"version_patchlevel": 0,
}
}
}
)
assert sdk_info["sdk_name"] == "iOS"
assert sdk_info["version_major"] == 9
assert sdk_info["version_minor"] == 3
assert sdk_info["version_patchlevel"] == 0
sdk_info = get_sdk_from_event(
{"contexts": {"os": {"type": "os", "name": "iOS", "version": "9.3.1.1234"}}}
)
assert sdk_info["sdk_name"] == "iOS"
assert sdk_info["version_major"] == 9
assert sdk_info["version_minor"] == 3
assert sdk_info["version_patchlevel"] == 1
def test_is_minidump():
assert is_minidump_event({"exception": {"values": [{"mechanism": {"type": "minidump"}}]}})
assert not is_minidump_event({"exception": {"values": [{"mechanism": {"type": "other"}}]}})
assert not is_minidump_event({"exception": {"values": [{"mechanism": {"type": None}}]}})
assert not is_minidump_event({"exception": {"values": [{"mechanism": None}]}})
assert not is_minidump_event({"exception": {"values": [None]}})
assert not is_minidump_event({"exception": {"values": []}})
assert not is_minidump_event({"exception": {"values": None}})
assert not is_minidump_event({"exception": None})
|
bsd-3-clause
| 1,585,955,638,993,456,400
| 36.619048
| 95
| 0.555063
| false
| 3.419913
| false
| false
| false
|
YuxuanLing/trunk
|
trunk/code/study/python/core_python_appilication/ch04/mtsleepF.py
|
1
|
1118
|
#!/usr/bin/env python
from atexit import register
from random import randrange
from threading import Thread, Lock, currentThread
from time import sleep, ctime
class CleanOutputSet(set):
def __str__(self):
return ', '.join(x for x in self)
lock = Lock()
loops = (randrange(2, 5) for x in xrange(randrange(3, 7)))
remaining = CleanOutputSet()
def loop(nsec):
myname = currentThread().name
lock.acquire()
remaining.add(myname)
print '[%s] Started %s' % (ctime(), myname) #print '[{0}] Started {1}'.format(ctime(), myname)
lock.release()
sleep(nsec)
lock.acquire()
remaining.remove(myname)
print '[%s] Completed %s (%d secs)' % ( #print '[{0}] Completed {1} ({2} secs)'.format(
ctime(), myname, nsec)
print ' (remaining: %s)' % (remaining or 'NONE') #print ' (remaining: {0})'.format(remaining or 'NONE')
lock.release()
def _main():
for pause in loops:
Thread(target=loop, args=(pause,)).start()
@register
def _atexit():
print 'all DONE at:', ctime()
if __name__ == '__main__':
_main()
|
gpl-3.0
| 8,837,571,305,694,491,000
| 26.666667
| 113
| 0.595707
| false
| 3.317507
| false
| false
| false
|
kaizentech/skeleton
|
urls.py
|
1
|
1052
|
"""temp URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import include, url
from django.contrib import admin
urlpatterns = [
url(r'^grappelli/', include('grappelli.urls')), # grappelli URLS
url(r'^admin/', admin.site.urls),
]
# Change to whatever you like
admin.site.site_title = '{{ project_name }} Administration'
admin.site.index_title = '{{ project_name }} Administration'
admin.site.site_header = '{{ project_name }} Administration'
|
apache-2.0
| 4,995,287,408,869,362,000
| 37.962963
| 79
| 0.701521
| false
| 3.44918
| false
| false
| false
|
habibmasuro/django-wiki
|
wiki/urls.py
|
1
|
8204
|
# -*- coding: utf-8 -*-
from django.conf.urls import patterns, url, include
from wiki.conf import settings
from wiki.core.plugins import registry
from wiki.views import article, accounts
from wiki.core.utils import get_class_from_str
from django.contrib.auth.views import password_reset
class WikiURLPatterns(object):
'''
configurator for wiki urls.
To customize, you can define your own subclass, either overriding
the view providers, or overriding the functions that collect
views.
'''
# basic views
article_view_class = article.ArticleView
article_create_view_class = article.Create
article_delete_view_class = article.Delete
article_deleted_view_class = article.Deleted
article_dir_view_class = article.Dir
article_edit_view_class = article.Edit
article_preview_view_class = article.Preview
article_history_view_class = article.History
article_settings_view_class = article.Settings
article_source_view_class = article.Source
article_plugin_view_class = article.Plugin
revision_change_view = article.ChangeRevisionView
revision_merge_view = 'wiki.views.article.merge'
search_view_class = settings.SEARCH_VIEW
article_diff_view = 'wiki.views.article.diff'
# account views
signup_view_class = accounts.Signup
login_view_class = accounts.Login
logout_view_class = accounts.Logout
def get_urls(self):
urlpatterns = self.get_root_urls()
urlpatterns += self.get_accounts_urls()
urlpatterns += self.get_revision_urls()
urlpatterns += self.get_article_urls()
urlpatterns += self.get_plugin_urls()
# This ALWAYS has to be the last of all the patterns since
# the paths in theory could wrongly match other targets.
urlpatterns += self.get_article_path_urls()
return urlpatterns
def get_root_urls(self):
urlpatterns = patterns('',
url('^$', self.article_view_class.as_view(), name='root', kwargs={'path': ''}),
url('^create-root/$', article.CreateRootView.as_view(), name='root_create'),
url('^missing-root/$', article.MissingRootView.as_view(), name='root_missing'),
url('^_search/$', get_class_from_str(self.search_view_class).as_view(), name='search'),
url('^_revision/diff/(?P<revision_id>\d+)/$', self.article_diff_view, name='diff'),
)
return urlpatterns
def get_accounts_urls(self):
urlpatterns = patterns('',
url('^_accounts/sign-up/$', self.signup_view_class.as_view(), name='signup'),
url('^_accounts/logout/$', self.logout_view_class.as_view(), name='logout'),
url('^_accounts/login/$', self.login_view_class.as_view(), name='login'),
url(r'^accounts/password/reset$', 'django.contrib.auth.views.password_reset', {'template_name': 'wiki/registration/password_reset_form.html'}),
)
return urlpatterns
def get_revision_urls(self):
urlpatterns = patterns('',
# This one doesn't work because it don't know where to redirect after...
url('^_revision/change/(?P<article_id>\d+)/(?P<revision_id>\d+)/$', self.revision_change_view.as_view(), name='change_revision'),
url('^_revision/preview/(?P<article_id>\d+)/$', self.article_preview_view_class.as_view(), name='preview_revision'),
url('^_revision/merge/(?P<article_id>\d+)/(?P<revision_id>\d+)/preview/$', self.revision_merge_view, name='merge_revision_preview', kwargs={'preview': True}),
)
return urlpatterns
def get_article_urls(self):
urlpatterns = patterns('',
# Paths decided by article_ids
url('^(?P<article_id>\d+)/$', self.article_view_class.as_view(), name='get'),
url('^(?P<article_id>\d+)/delete/$', self.article_delete_view_class.as_view(), name='delete'),
url('^(?P<article_id>\d+)/deleted/$', self.article_deleted_view_class.as_view(), name='deleted'),
url('^(?P<article_id>\d+)/edit/$', self.article_edit_view_class.as_view(), name='edit'),
url('^(?P<article_id>\d+)/preview/$', self.article_preview_view_class.as_view(), name='preview'),
url('^(?P<article_id>\d+)/history/$', self.article_history_view_class.as_view(), name='history'),
url('^(?P<article_id>\d+)/settings/$', self.article_settings_view_class.as_view(), name='settings'),
url('^(?P<article_id>\d+)/source/$', self.article_source_view_class.as_view(), name='source'),
url('^(?P<article_id>\d+)/revision/change/(?P<revision_id>\d+)/$', self.revision_change_view.as_view(), name='change_revision'),
url('^(?P<article_id>\d+)/revision/merge/(?P<revision_id>\d+)/$', self.revision_merge_view, name='merge_revision'),
url('^(?P<article_id>\d+)/plugin/(?P<slug>\w+)/$', self.article_plugin_view_class.as_view(), name='plugin'),
)
return urlpatterns
def get_article_path_urls(self):
urlpatterns = patterns('',
# Paths decided by URLs
url('^(?P<path>.+/|)_create/$', self.article_create_view_class.as_view(), name='create'),
url('^(?P<path>.+/|)_delete/$', self.article_delete_view_class.as_view(), name='delete'),
url('^(?P<path>.+/|)_deleted/$', self.article_deleted_view_class.as_view(), name='deleted'),
url('^(?P<path>.+/|)_edit/$', self.article_edit_view_class.as_view(), name='edit'),
url('^(?P<path>.+/|)_preview/$', self.article_preview_view_class.as_view(), name='preview'),
url('^(?P<path>.+/|)_history/$', self.article_history_view_class.as_view(), name='history'),
url('^(?P<path>.+/|)_dir/$', self.article_dir_view_class.as_view(), name='dir'),
url('^(?P<path>.+/|)_settings/$', self.article_settings_view_class.as_view(), name='settings'),
url('^(?P<path>.+/|)_source/$', self.article_source_view_class.as_view(), name='source'),
url('^(?P<path>.+/|)_revision/change/(?P<revision_id>\d+)/$', self.revision_change_view.as_view(), name='change_revision'),
url('^(?P<path>.+/|)_revision/merge/(?P<revision_id>\d+)/$', self.revision_merge_view, name='merge_revision'),
url('^(?P<path>.+/|)_plugin/(?P<slug>\w+)/$', self.article_plugin_view_class.as_view(), name='plugin'),
# This should always go last!
url('^(?P<path>.+/|)$', self.article_view_class.as_view(), name='get'),
)
return urlpatterns
def get_plugin_urls(self):
urlpatterns = patterns('',)
for plugin in registry.get_plugins().values():
slug = getattr(plugin, 'slug', None)
if slug:
article_urlpatterns = plugin.urlpatterns.get('article', [])
urlpatterns += patterns('',
url('^(?P<article_id>\d+)/plugin/' + slug + '/', include(article_urlpatterns)),
url('^(?P<path>.+/|)_plugin/' + slug + '/', include(article_urlpatterns)),
)
root_urlpatterns = plugin.urlpatterns.get('root', [])
urlpatterns += patterns('',
url('^_plugin/' + slug + '/', include(root_urlpatterns)),
)
return urlpatterns
def get_pattern(app_name="wiki", namespace="wiki", url_config_class=None):
"""Every url resolution takes place as "wiki:view_name".
You should not attempt to have multiple deployments of the wiki in a
single Django project.
https://docs.djangoproject.com/en/dev/topics/http/urls/#topics-http-reversing-url-namespaces
"""
if url_config_class is None:
url_config_classname=getattr(settings, 'URL_CONFIG_CLASS', None)
if url_config_classname is None:
url_config_class = WikiURLPatterns
else:
url_config_class = get_class_from_str(url_config_classname)
urlpatterns = url_config_class().get_urls()
return urlpatterns, app_name, namespace
######################
# PLUGINS
######################
from wiki.core.plugins.loader import load_wiki_plugins
load_wiki_plugins()
|
gpl-3.0
| 7,259,859,571,351,220,000
| 50.597484
| 175
| 0.60312
| false
| 3.777164
| true
| false
| false
|
insomnia-lab/calibre
|
src/calibre/ebooks/metadata/book/render.py
|
1
|
8008
|
#!/usr/bin/env python
# vim:fileencoding=utf-8
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2014, Kovid Goyal <kovid at kovidgoyal.net>'
import os
from functools import partial
from calibre import prepare_string_for_xml, force_unicode
from calibre.ebooks.metadata import fmt_sidx
from calibre.ebooks.metadata.sources.identify import urls_from_identifiers
from calibre.constants import filesystem_encoding
from calibre.library.comments import comments_to_html
from calibre.utils.icu import sort_key
from calibre.utils.formatter import EvalFormatter
from calibre.utils.date import is_date_undefined
from calibre.utils.localization import calibre_langcode_to_name
default_sort = ('title', 'title_sort', 'authors', 'author_sort', 'series', 'rating', 'pubdate', 'tags', 'publisher', 'identifiers')
def field_sort(mi, name):
try:
title = mi.metadata_for_field(name)['name']
except:
title = 'zzz'
return {x:(i, None) for i, x in enumerate(default_sort)}.get(name, (10000, sort_key(title)))
def displayable_field_keys(mi):
for k in mi.all_field_keys():
try:
m = mi.metadata_for_field(k)
except:
continue
if (
m is not None and m['kind'] == 'field' and m['datatype'] is not None and
k not in ('au_map', 'marked', 'ondevice', 'cover', 'series_sort') and
not k.endswith('_index')
):
yield k
def get_field_list(mi):
for field in sorted(displayable_field_keys(mi), key=partial(field_sort, mi)):
yield field, True
def mi_to_html(mi, field_list=None, default_author_link=None, use_roman_numbers=True, rating_font='Liberation Serif'):
if field_list is None:
field_list = get_field_list(mi)
ans = []
comment_fields = []
isdevice = not hasattr(mi, 'id')
row = u'<td class="title">%s</td><td class="value">%s</td>'
p = prepare_string_for_xml
a = partial(prepare_string_for_xml, attribute=True)
for field in (field for field, display in field_list if display):
try:
metadata = mi.metadata_for_field(field)
except:
continue
if not metadata:
continue
if field == 'sort':
field = 'title_sort'
if metadata['datatype'] == 'bool':
isnull = mi.get(field) is None
else:
isnull = mi.is_null(field)
if isnull:
continue
name = metadata['name']
if not name:
name = field
name += ':'
if metadata['datatype'] == 'comments' or field == 'comments':
val = getattr(mi, field)
if val:
val = force_unicode(val)
comment_fields.append(comments_to_html(val))
elif metadata['datatype'] == 'rating':
val = getattr(mi, field)
if val:
val = val/2.0
ans.append((field,
u'<td class="title">%s</td><td class="rating value" '
'style=\'font-family:"%s"\'>%s</td>'%(
name, rating_font, u'\u2605'*int(val))))
elif metadata['datatype'] == 'composite' and \
metadata['display'].get('contains_html', False):
val = getattr(mi, field)
if val:
val = force_unicode(val)
ans.append((field,
row % (name, comments_to_html(val))))
elif field == 'path':
if mi.path:
path = force_unicode(mi.path, filesystem_encoding)
scheme = u'devpath' if isdevice else u'path'
url = prepare_string_for_xml(path if isdevice else
unicode(mi.id), True)
pathstr = _('Click to open')
extra = ''
if isdevice:
durl = url
if durl.startswith('mtp:::'):
durl = ':::'.join((durl.split(':::'))[2:])
extra = '<br><span style="font-size:smaller">%s</span>'%(
prepare_string_for_xml(durl))
link = u'<a href="%s:%s" title="%s">%s</a>%s' % (scheme, url,
prepare_string_for_xml(path, True), pathstr, extra)
ans.append((field, row % (name, link)))
elif field == 'formats':
if isdevice:
continue
path = ''
if mi.path:
h, t = os.path.split(mi.path)
path = '/'.join((os.path.basename(h), t))
data = ({
'fmt':x, 'path':a(path or ''), 'fname':a(mi.format_files.get(x, '')),
'ext':x.lower(), 'id':mi.id
} for x in mi.formats)
fmts = [u'<a title="{path}/{fname}.{ext}" href="format:{id}:{fmt}">{fmt}</a>'.format(**x) for x in data]
ans.append((field, row % (name, u', '.join(fmts))))
elif field == 'identifiers':
urls = urls_from_identifiers(mi.identifiers)
links = [u'<a href="%s" title="%s:%s">%s</a>' % (a(url), a(id_typ), a(id_val), p(name))
for name, id_typ, id_val, url in urls]
links = u', '.join(links)
if links:
ans.append((field, row % (_('Ids')+':', links)))
elif field == 'authors' and not isdevice:
authors = []
formatter = EvalFormatter()
for aut in mi.authors:
link = ''
if mi.author_link_map[aut]:
link = mi.author_link_map[aut]
elif default_author_link:
vals = {'author': aut.replace(' ', '+')}
try:
vals['author_sort'] = mi.author_sort_map[aut].replace(' ', '+')
except:
vals['author_sort'] = aut.replace(' ', '+')
link = formatter.safe_format(
default_author_link, vals, '', vals)
aut = p(aut)
if link:
authors.append(u'<a calibre-data="authors" title="%s" href="%s">%s</a>'%(a(link), a(link), aut))
else:
authors.append(aut)
ans.append((field, row % (name, u' & '.join(authors))))
elif field == 'languages':
if not mi.languages:
continue
names = filter(None, map(calibre_langcode_to_name, mi.languages))
ans.append((field, row % (name, u', '.join(names))))
else:
val = mi.format_field(field)[-1]
if val is None:
continue
val = p(val)
if metadata['datatype'] == 'series':
sidx = mi.get(field+'_index')
if sidx is None:
sidx = 1.0
val = _('Book %(sidx)s of <span class="series_name">%(series)s</span>')%dict(
sidx=fmt_sidx(sidx, use_roman=use_roman_numbers),
series=p(getattr(mi, field)))
elif metadata['datatype'] == 'datetime':
aval = getattr(mi, field)
if is_date_undefined(aval):
continue
ans.append((field, row % (name, val)))
dc = getattr(mi, 'device_collections', [])
if dc:
dc = u', '.join(sorted(dc, key=sort_key))
ans.append(('device_collections',
row % (_('Collections')+':', dc)))
def classname(field):
try:
dt = mi.metadata_for_field(field)['datatype']
except:
dt = 'text'
return 'datatype_%s'%dt
ans = [u'<tr id="%s" class="%s">%s</tr>'%(field.replace('#', '_'),
classname(field), html) for field, html in ans]
# print '\n'.join(ans)
return u'<table class="fields">%s</table>'%(u'\n'.join(ans)), comment_fields
|
gpl-3.0
| 1,372,773,711,632,978,400
| 39.649746
| 131
| 0.49975
| false
| 3.87234
| false
| false
| false
|
pferreir/indico-backup
|
indico/web/flask/blueprints/legacy.py
|
1
|
21964
|
# -*- coding: utf-8 -*-
##
##
## This file is part of Indico.
## Copyright (C) 2002 - 2014 European Organization for Nuclear Research (CERN).
##
## Indico is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 3 of the
## License, or (at your option) any later version.
##
## Indico is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Indico. If not, see <http://www.gnu.org/licenses/>.
import MaKaC.webinterface.rh.xmlGateway as mod_rh_xmlGateway
from indico.web.flask.wrappers import IndicoBlueprint
legacy = IndicoBlueprint('legacy', __name__)
# Routes for xmlGateway.py
legacy.add_url_rule('/xmlGateway.py',
'xmlGateway',
mod_rh_xmlGateway.RHLoginStatus,
methods=('GET', 'POST'))
legacy.add_url_rule('/xmlGateway.py/getCategoryInfo',
'xmlGateway-getCategoryInfo',
mod_rh_xmlGateway.RHCategInfo,
methods=('GET', 'POST'))
legacy.add_url_rule('/xmlGateway.py/getStatsIndico',
'xmlGateway-getStatsIndico',
mod_rh_xmlGateway.RHStatsIndico,
methods=('GET', 'POST'))
legacy.add_url_rule('/xmlGateway.py/loginStatus',
'xmlGateway-loginStatus',
mod_rh_xmlGateway.RHLoginStatus,
methods=('GET', 'POST'))
legacy.add_url_rule('/xmlGateway.py/signIn',
'xmlGateway-signIn',
mod_rh_xmlGateway.RHSignIn,
methods=('GET', 'POST'))
legacy.add_url_rule('/xmlGateway.py/signOut',
'xmlGateway-signOut',
mod_rh_xmlGateway.RHSignOut,
methods=('GET', 'POST'))
legacy.add_url_rule('/xmlGateway.py/webcastForthcomingEvents',
'xmlGateway-webcastForthcomingEvents',
mod_rh_xmlGateway.RHWebcastForthcomingEvents,
methods=('GET', 'POST'))
legacy.add_url_rule('/xmlGateway.py/webcastOnAir',
'xmlGateway-webcastOnAir',
mod_rh_xmlGateway.RHWebcastOnAir,
methods=('GET', 'POST'))
# Legacy endpoints defined in htdocs/*.py files (which need compatibility routes)
# Note: When removing/renaming endpoints, feel free to remove them in here, too, but
# it's not absolutely necessary - if there's no non-legacy endpoint with that name
# the entry in here simply does nothing.
legacy_endpoints = {
'about', 'abstractDisplay', 'abstractDisplay-getAttachedFile', 'abstractDisplay-pdf', 'abstractManagment',
'abstractManagment-abstractToPDF', 'abstractManagment-accept', 'abstractManagment-acceptMultiple',
'abstractManagment-backToSubmitted', 'abstractManagment-changeTrack', 'abstractManagment-comments',
'abstractManagment-directAccess', 'abstractManagment-editComment', 'abstractManagment-editData',
'abstractManagment-markAsDup', 'abstractManagment-mergeInto', 'abstractManagment-newComment',
'abstractManagment-notifLog', 'abstractManagment-orderByRating', 'abstractManagment-propToAcc',
'abstractManagment-propToRej', 'abstractManagment-reject', 'abstractManagment-rejectMultiple',
'abstractManagment-remComment', 'abstractManagment-trackProposal', 'abstractManagment-unMarkAsDup',
'abstractManagment-unmerge', 'abstractManagment-withdraw', 'abstractManagment-xml', 'abstractModify',
'abstractReviewing-notifTpl', 'abstractReviewing-notifTplCondNew', 'abstractReviewing-notifTplCondRem',
'abstractReviewing-notifTplDisplay', 'abstractReviewing-notifTplDown', 'abstractReviewing-notifTplEdit',
'abstractReviewing-notifTplNew', 'abstractReviewing-notifTplPreview', 'abstractReviewing-notifTplRem',
'abstractReviewing-notifTplUp', 'abstractReviewing-reviewingSetup', 'abstractReviewing-reviewingTeam',
'abstractsManagment', 'abstractsManagment-abstractsActions', 'abstractsManagment-mergeAbstracts',
'abstractsManagment-newAbstract', 'abstractsManagment-participantList', 'abstractSubmission',
'abstractSubmission-confirmation', 'abstractTools', 'abstractTools-delete', 'abstractWithdraw',
'abstractWithdraw-recover', 'adminAnnouncement', 'adminAnnouncement-save', 'adminConferenceStyles', 'adminLayout',
'adminLayout-addStyle', 'adminLayout-deleteStyle', 'adminLayout-saveSocial', 'adminLayout-saveTemplateSet',
'adminLayout-setDefaultPDFOptions', 'adminLayout-styles', 'adminList',
'adminList-switchNewsActive', 'adminMaintenance', 'adminMaintenance-pack', 'adminMaintenance-performPack',
'adminMaintenance-performTmpCleanup', 'adminMaintenance-tmpCleanup', 'adminPlugins', 'adminPlugins-clearAllInfo',
'adminPlugins-reload', 'adminPlugins-reloadAll', 'adminPlugins-saveOptionReloadAll',
'adminPlugins-savePluginOptions', 'adminPlugins-savePluginTypeOptions', 'adminPlugins-toggleActive',
'adminPlugins-toggleActivePluginType', 'adminProtection', 'adminServices-analytics', 'adminServices-apiKeys',
'adminServices-apiOptions', 'adminServices-apiOptionsSet', 'adminServices-ipbasedacl',
'adminServices-ipbasedacl_fagrant', 'adminServices-ipbasedacl_farevoke', 'adminServices-oauthAuthorized',
'adminServices-oauthConsumers', 'adminServices-saveAnalytics', 'adminServices-webcast',
'adminServices-webcastAddChannel', 'adminServices-webcastAddOnAir', 'adminServices-webcastAddStream',
'adminServices-webcastAddWebcast', 'adminServices-webcastArchive', 'adminServices-webcastArchiveWebcast',
'adminServices-webcastManualSynchronization', 'adminServices-webcastModifyChannel',
'adminServices-webcastMoveChannelDown', 'adminServices-webcastMoveChannelUp', 'adminServices-webcastRemoveChannel',
'adminServices-webcastRemoveFromAir', 'adminServices-webcastRemoveStream', 'adminServices-webcastRemoveWebcast',
'adminServices-webcastSaveWebcastSynchronizationURL', 'adminServices-webcastSetup',
'adminServices-webcastSwitchChannel', 'adminServices-webcastUnArchiveWebcast', 'adminSystem', 'adminSystem-modify',
'adminUpcomingEvents', 'assignContributions', 'assignContributions-downloadAcceptedPapers', 'badgeTemplates',
'badgeTemplates-badgeDesign', 'badgeTemplates-badgePrinting', 'categoryAC', 'categoryAC-setVisibility',
'categoryConfCreationControl-setCreateConferenceControl', 'categoryConfCreationControl-setNotifyCreation',
'categoryCreation', 'categoryCreation-create', 'categoryDataModification', 'categoryDataModification-modify',
'categoryDataModification-tasksOption', 'categoryDisplay', 'categoryDisplay-atom', 'categoryDisplay-getIcon',
'categoryDisplay-ical', 'categoryDisplay-rss', 'categoryFiles', 'categoryFiles-addMaterial', 'categoryMap',
'categoryModification', 'categoryModification-actionConferences', 'categoryModification-actionSubCategs',
'categoryStatistics', 'categoryTasks', 'categoryTasks-taskAction', 'categoryTools', 'categoryTools-delete',
'categOverview', 'categOverview-rss', 'changeLang', 'confAbstractBook', 'confAuthorIndex', 'confDisplayEvaluation',
'confDisplayEvaluation-display', 'confDisplayEvaluation-modif', 'confDisplayEvaluation-signIn',
'confDisplayEvaluation-submit', 'confDisplayEvaluation-submitted', 'conferenceCFA', 'conferenceCreation',
'conferenceCreation-createConference', 'conferenceDisplay', 'conferenceDisplay-abstractBook',
'conferenceDisplay-abstractBookLatex', 'conferenceDisplay-accessKey', 'conferenceDisplay-getCSS',
'conferenceDisplay-getLogo', 'conferenceDisplay-getPic', 'conferenceDisplay-ical', 'conferenceDisplay-marcxml',
'conferenceDisplay-matPkg', 'conferenceDisplay-next', 'conferenceDisplay-performMatPkg', 'conferenceDisplay-prev',
'conferenceDisplay-xml', 'conferenceModification', 'conferenceModification-addContribType',
'conferenceModification-close', 'conferenceModification-closeModifKey', 'conferenceModification-data',
'conferenceModification-dataPerform', 'conferenceModification-editContribType',
'conferenceModification-managementAccess', 'conferenceModification-materialsAdd',
'conferenceModification-materialsShow', 'conferenceModification-modifKey', 'conferenceModification-open',
'conferenceModification-removeContribType',
'conferenceModification-screenDates', 'conferenceOtherViews', 'conferenceProgram', 'conferenceProgram-pdf',
'conferenceTimeTable', 'conferenceTimeTable-customizePdf', 'conferenceTimeTable-pdf', 'confListContribToJudge',
'confListContribToJudge-asEditor', 'confListContribToJudge-asReviewer', 'confLogin', 'confLogin-active',
'confLogin-disabledAccount', 'confLogin-sendActivation', 'confLogin-sendLogin', 'confLogin-unactivatedAccount',
'confModBOA', 'confModBOA-toogleShowIds', 'confModifAC', 'confModifAC-grantModificationToAllConveners',
'confModifAC-grantSubmissionToAllSpeakers', 'confModifAC-modifySessionCoordRights',
'confModifAC-removeAllSubmissionRights', 'confModifAC-setVisibility', 'confModifCFA', 'confModifCFA-absFieldDown',
'confModifCFA-absFieldUp', 'confModifCFA-abstractFields', 'confModifCFA-changeStatus',
'confModifCFA-makeTracksMandatory', 'confModifCFA-modifyData', 'confModifCFA-performModifyData',
'confModifCFA-preview', 'confModifCFA-removeAbstractField', 'confModifCFA-switchAttachFiles',
'confModifCFA-switchMultipleTracks', 'confModifCFA-switchSelectSpeakerMandatory',
'confModifCFA-switchShowAttachedFiles', 'confModifCFA-switchShowSelectSpeaker', 'confModifContribList',
'confModifContribList-contribQuickAccess', 'confModifContribList-contribsActions',
'confModifContribList-contribsToPDFMenu', 'confModifContribList-matPkg', 'confModifContribList-moveToSession',
'confModifContribList-participantList', 'confModifContribList-proceedings', 'confModifDisplay',
'confModifDisplay-addLink', 'confModifDisplay-addPage', 'confModifDisplay-addSpacer',
'confModifDisplay-confHeader', 'confModifDisplay-custom', 'confModifDisplay-downLink',
'confModifDisplay-formatTitleBgColor', 'confModifDisplay-formatTitleTextColor', 'confModifDisplay-menu',
'confModifDisplay-modifyData', 'confModifDisplay-modifySystemData', 'confModifDisplay-previewCSS',
'confModifDisplay-removeCSS', 'confModifDisplay-removeLink', 'confModifDisplay-removeLogo',
'confModifDisplay-resources', 'confModifDisplay-saveCSS', 'confModifDisplay-saveLogo', 'confModifDisplay-savePic',
'confModifDisplay-tickerTapeAction', 'confModifDisplay-toggleHomePage', 'confModifDisplay-toggleLinkStatus',
'confModifDisplay-toggleNavigationBar', 'confModifDisplay-toggleSearch', 'confModifDisplay-upLink',
'confModifDisplay-useCSS', 'confModifEpayment', 'confModifEpayment-changeStatus', 'confModifEpayment-dataModif',
'confModifEpayment-enableSection', 'confModifEpayment-modifModule', 'confModifEpayment-performDataModif',
'confModifEvaluation', 'confModifEvaluation-changeStatus', 'confModifEvaluation-dataModif',
'confModifEvaluation-edit', 'confModifEvaluation-editPerformChanges', 'confModifEvaluation-performDataModif',
'confModifEvaluation-preview', 'confModifEvaluation-results', 'confModifEvaluation-resultsOptions',
'confModifEvaluation-resultsSubmittersActions', 'confModifEvaluation-setup', 'confModifEvaluation-specialAction',
'confModifListings-allSpeakers', 'confModifListings-allSpeakersAction', 'confModifLog', 'confModifParticipants',
'confModifParticipants-action', 'confModifParticipants-declinedParticipants', 'confModifParticipants-invitation',
'confModifParticipants-pendingParticipants', 'confModifParticipants-refusal', 'confModifParticipants-setup',
'confModifParticipants-statistics', 'confModifPendingQueues', 'confModifPendingQueues-actionConfSubmitters',
'confModifPendingQueues-actionCoordinators', 'confModifPendingQueues-actionManagers',
'confModifPendingQueues-actionSubmitters', 'confModifProgram', 'confModifProgram-addTrack',
'confModifProgram-deleteTracks', 'confModifProgram-moveTrackDown', 'confModifProgram-moveTrackUp',
'confModifProgram-performAddTrack', 'confModifRegistrants', 'confModifRegistrants-action',
'confModifRegistrants-getAttachedFile', 'confModifRegistrants-modification',
'confModifRegistrants-modifyAccommodation', 'confModifRegistrants-modifyMiscInfo',
'confModifRegistrants-modifyReasonParticipation', 'confModifRegistrants-modifySessions',
'confModifRegistrants-modifySocialEvents', 'confModifRegistrants-modifyStatuses',
'confModifRegistrants-modifyTransaction', 'confModifRegistrants-newRegistrant',
'confModifRegistrants-peformModifyTransaction', 'confModifRegistrants-performModifyAccommodation',
'confModifRegistrants-performModifyMiscInfo', 'confModifRegistrants-performModifyReasonParticipation',
'confModifRegistrants-performModifySessions', 'confModifRegistrants-performModifySocialEvents',
'confModifRegistrants-performModifyStatuses', 'confModifRegistrants-remove', 'confModifRegistrationForm',
'confModifRegistrationForm-actionStatuses', 'confModifRegistrationForm-changeStatus',
'confModifRegistrationForm-dataModif', 'confModifRegistrationForm-modifStatus',
'confModifRegistrationForm-performDataModif', 'confModifRegistrationForm-performModifStatus',
'confModifRegistrationPreview', 'confModifReviewing-access', 'confModifReviewing-downloadTemplate',
'confModifReviewing-paperSetup', 'confModifReviewing-setTemplate', 'confModifReviewingControl',
'confModifSchedule', 'confModifSchedule-edit', 'confModifSchedule-reschedule', 'confModifTools',
'confModifTools-addAlarm', 'confModifTools-allSessionsConveners', 'confModifTools-allSessionsConvenersAction',
'confModifTools-badgeDesign', 'confModifTools-badgeGetBackground', 'confModifTools-badgePrinting',
'confModifTools-badgePrintingPDF', 'confModifTools-badgeSaveBackground', 'confModifTools-clone',
'confModifTools-delete', 'confModifTools-deleteAlarm', 'confModifTools-displayAlarm', 'confModifTools-matPkg',
'confModifTools-modifyAlarm', 'confModifTools-offline', 'confModifTools-performCloning',
'confModifTools-performMatPkg', 'confModifTools-posterDesign', 'confModifTools-posterGetBackground',
'confModifTools-posterPrinting', 'confModifTools-posterPrintingPDF', 'confModifTools-posterSaveBackground',
'confModifTools-saveAlarm', 'confModifTools-sendAlarmNow', 'confModifUserCompetences',
'confRegistrantsDisplay-list', 'confRegistrationFormDisplay', 'confRegistrationFormDisplay-conditions',
'confRegistrationFormDisplay-confirmBooking', 'confRegistrationFormDisplay-confirmBookingDone',
'confRegistrationFormDisplay-creation', 'confRegistrationFormDisplay-creationDone',
'confRegistrationFormDisplay-display', 'confRegistrationFormDisplay-modify',
'confRegistrationFormDisplay-performModify', 'confRegistrationFormDisplay-signIn', 'confSpeakerIndex', 'confUser',
'confUser-created', 'confUser-userExists', 'contact', 'contribAuthorDisplay', 'contributionAC',
'contributionAC-setVisibility', 'contributionDisplay', 'contributionDisplay-ical', 'contributionDisplay-marcxml',
'contributionDisplay-pdf', 'contributionDisplay-xml', 'contributionEditingJudgement', 'contributionGiveAdvice',
'contributionListDisplay', 'contributionListDisplay-contributionsToPDF', 'contributionModification',
'contributionModification-browseMaterial', 'contributionModification-data', 'contributionModification-materials',
'contributionModification-materialsAdd', 'contributionModification-modifData', 'contributionModification-pdf',
'contributionModification-setSession', 'contributionModification-setTrack', 'contributionModification-withdraw',
'contributionModification-xml', 'contributionModifSubCont', 'contributionModifSubCont-actionSubContribs',
'contributionModifSubCont-add', 'contributionModifSubCont-create', 'contributionReviewing',
'contributionReviewing-assignEditing', 'contributionReviewing-assignReferee',
'contributionReviewing-assignReviewing', 'contributionReviewing-contributionReviewingJudgements',
'contributionReviewing-contributionReviewingMaterials', 'contributionReviewing-removeAssignEditing',
'contributionReviewing-removeAssignReferee', 'contributionReviewing-removeAssignReviewing',
'contributionReviewing-reviewingHistory', 'contributionTools', 'contributionTools-delete', 'domainCreation',
'domainCreation-create', 'domainDataModification', 'domainDataModification-modify', 'domainDetails', 'domainList',
'EMail', 'EMail-send', 'EMail-sendcontribparticipants', 'EMail-sendconvener', 'EMail-sendreg', 'errors',
'generalInfoModification', 'generalInfoModification-update', 'getConvertedFile', 'getFile-access',
'getFile-accessKey', 'getFile-flash', 'getFile-offlineEvent', 'getFile-wmv', 'groupDetails', 'groupList',
'groupModification', 'groupModification-update', 'groupRegistration', 'groupRegistration-update', 'help',
'identityCreation', 'identityCreation-changePassword', 'identityCreation-create', 'identityCreation-remove',
'index', 'internalPage', 'JSContent-getVars', 'logOut', 'materialDisplay', 'materialDisplay-accessKey',
'myconference', 'myconference-myContributions', 'myconference-mySessions', 'myconference-myTracks', 'news',
'oauth-access_token', 'oauth-authorize', 'oauth-authorize_consumer', 'oauth-request_token', 'oauth-thirdPartyAuth',
'oauth-userThirdPartyAuth', 'paperReviewingDisplay', 'paperReviewingDisplay-downloadTemplate',
'paperReviewingDisplay-uploadPaper', 'payment', 'posterTemplates', 'posterTemplates-posterDesign',
'posterTemplates-posterPrinting', 'resetSessionTZ', 'roomBooking', 'roomBooking-acceptBooking',
'roomBooking-admin', 'roomBooking-adminLocation', 'roomBooking-bookingDetails',
'roomBooking-bookingForm', 'roomBooking-bookingList', 'roomBooking-bookRoom', 'roomBooking-cancelBooking',
'roomBooking-cancelBookingOccurrence', 'roomBooking-cloneBooking',
'roomBooking-deleteBooking', 'roomBooking-deleteCustomAttribute', 'roomBooking-deleteEquipment',
'roomBooking-deleteLocation', 'roomBooking-mapOfRooms', 'roomBooking-mapOfRoomsWidget',
'roomBooking-rejectBooking', 'roomBooking-rejectBookingOccurrence',
'roomBooking-roomDetails', 'roomBooking-roomList', 'roomBooking-roomStats',
'roomBooking-saveBooking', 'roomBooking-saveCustomAttributes', 'roomBooking-saveEquipment',
'roomBooking-saveLocation', 'roomBooking-search4Bookings', 'roomBooking-search4Rooms',
'roomBooking-setDefaultLocation', 'roomBooking-statement', 'roomBookingPluginAdmin',
'roomBookingPluginAdmin-zodbSave', 'roomMapper',
'roomMapper-creation', 'roomMapper-details', 'roomMapper-modify', 'roomMapper-performCreation',
'roomMapper-performModify', 'sessionDisplay', 'sessionDisplay-ical', 'sessionModifAC',
'sessionModifAC-setVisibility', 'sessionModifComm', 'sessionModifComm-edit', 'sessionModification',
'sessionModification-addContribs', 'sessionModification-close', 'sessionModification-contribAction',
'sessionModification-contribList', 'sessionModification-contribQuickAccess', 'sessionModification-contribsToPDF',
'sessionModification-editContrib', 'sessionModification-materials', 'sessionModification-materialsAdd',
'sessionModification-modify', 'sessionModification-open', 'sessionModification-participantList',
'sessionModifSchedule', 'sessionModifSchedule-fitSlot', 'sessionModifSchedule-slotCalc', 'sessionModifTools',
'sessionModifTools-delete', 'signIn', 'signIn-active', 'signIn-disabledAccount', 'signIn-sendActivation',
'signIn-sendLogin', 'signIn-sso', 'signIn-unactivatedAccount', 'subContributionDisplay',
'subContributionDisplay-marcxml', 'subContributionModification', 'subContributionModification-data',
'subContributionModification-materials', 'subContributionModification-materialsAdd',
'subContributionModification-modifData', 'subContributionTools', 'subContributionTools-delete', 'taskManager',
'trackAbstractModif', 'trackAbstractModif-abstractAction', 'trackAbstractModif-abstractToPDF',
'trackAbstractModif-accept', 'trackAbstractModif-commentEdit', 'trackAbstractModif-commentNew',
'trackAbstractModif-commentRem', 'trackAbstractModif-comments', 'trackAbstractModif-directAccess',
'trackAbstractModif-markAsDup', 'trackAbstractModif-proposeForOtherTracks', 'trackAbstractModif-proposeToBeAcc',
'trackAbstractModif-proposeToBeRej', 'trackAbstractModif-reject', 'trackAbstractModif-unMarkAsDup',
'trackModContribList', 'trackModContribList-contribAction', 'trackModContribList-contribQuickAccess',
'trackModContribList-contribsToPDF', 'trackModContribList-participantList', 'trackModifAbstracts',
'trackModifCoordination', 'trackModification', 'trackModification-modify', 'trackModification-performModify',
'updateNews', 'userAbstracts', 'userAbstracts-pdf', 'userAPI', 'userAPI-block', 'userAPI-create', 'userAPI-delete',
'userBaskets', 'userDashboard', 'userDetails', 'userList', 'userManagement',
'userManagement-switchAuthorisedAccountCreation', 'userManagement-switchModerateAccountCreation',
'userManagement-switchNotifyAccountCreation', 'userMerge', 'userPreferences', 'userRegistration',
'userRegistration-active', 'userRegistration-created', 'userRegistration-disable', 'userRegistration-UserExist',
'wcalendar', 'wcalendar-select', 'xmlGateway', 'xmlGateway-getCategoryInfo', 'xmlGateway-getStatsIndico',
'xmlGateway-getStatsRoomBooking', 'xmlGateway-loginStatus', 'xmlGateway-signIn', 'xmlGateway-signOut',
'xmlGateway-webcastForthcomingEvents', 'xmlGateway-webcastOnAir'
}
|
gpl-3.0
| -6,249,722,430,769,871,000
| 83.153257
| 119
| 0.785103
| false
| 3.743013
| false
| false
| false
|
merc-devel/merc
|
merc/features/rfc1459/nick.py
|
1
|
1773
|
import regex
from merc import errors
from merc import feature
from merc import message
MAX_NICKNAME_LENGTH = 12
NICKNAME_REGEX = regex.compile(r"^[\p{L}\p{So}_\[\]\\^{}|`][\p{L}\p{So}\p{N}_\[\]\\^{}|`-]*$")
class NickFeature(feature.Feature):
NAME = __name__
install = NickFeature.install
class _Nick(message.Command):
def handle_for(self, app, user, prefix):
target = self.get_target(app, user)
old_hostmask = target.hostmask
if NICKNAME_REGEX.match(self.nickname) is None or \
len(self.nickname) > MAX_NICKNAME_LENGTH:
raise errors.ErroneousNickname
app.users.rename(target, self.nickname)
if target.is_registered:
app.network.user_broadcast(target, old_hostmask, Nick(self.nickname))
target.send(old_hostmask, Nick(self.nickname))
else:
target.registration_latch.decrement()
@NickFeature.register_user_command
class Nick(_Nick):
NAME = "NICK"
MIN_ARITY = 1
def __init__(self, nickname, *args):
self.nickname = nickname
def as_command_params(self):
return [self.nickname]
def get_target(self, app, user):
return user
@NickFeature.register_user_command
class SANick(_Nick):
NAME = "SANICK"
MIN_ARITY = 2
def __init__(self, target, nickname, *args):
self.target = target
self.nickname = nickname
def get_target(self, app, user):
return app.users.get(self.target)
@message.Command.requires_registration
def handle_for(self, app, user, prefix):
user.check_is_irc_operator()
super().handle_for(app, user, prefix)
@NickFeature.hook("server.isupport.modify")
def modify_isupport(app, isupport):
isupport["NICKLEN"] = MAX_NICKNAME_LENGTH
@NickFeature.hook("user.connect")
def on_connect(app, user):
user.registration_latch.increment()
|
mit
| 8,278,732,483,026,297,000
| 22.025974
| 94
| 0.684715
| false
| 3.110526
| false
| false
| false
|
yang0110/comPy
|
modulation.py
|
1
|
10528
|
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from sklearn import preprocessing
import pylab
import itertools
from line_coding import polar_nrz
import math
from scipy import signal
from scipy.integrate import simps
import numpy.matlib
from compy.bin2gray import *
# m: order of modulation
# input_bits_array: np.array of binary bits
__all__=['constellation','bpsk_mod','mpsk_mod','mpsk_ref_symbol','qam_ref_symbol','pam_ref_symbol','qam_mod','mpsk_dem',
'qam_dem','pam_mod','pam_dem','spatial_modulation_qam','sm_constellation','generalized_spatial_modulation_qam','gsm_ref_symbol_combination',
'gsm_look_up_table','mimo_look_up_table','ncr','Ber']
def constellation(data):
re=np.real(data)
im=np.imag(data)
plt.scatter(re,im,s=50)
plt.xlim(min(re)-1,max(re)+1)
plt.ylim(min(im)-1,max(im)+1)
plt.title('qma_%s'%(len(data)))
plt.show()
def bpsk_mod(input_bits_array):
bpsk=2*np.round(input_bits_array)-1
return bpsk
# output bits array [-1,1....]
def mpsk_mod(input_bits_array,m):
# m_array=[2.0,4.0,8.0,16.0]
m=float(m)
input_ints=bits_to_binary_to_int(input_bits_array,m)
I=np.cos(input_ints/m*2*np.pi+np.pi/4.0)
Q=np.sin(input_ints/m*2*np.pi+np.pi/4.0)
mpsk=I+1j*Q
return mpsk
def mpsk_ref_symbol(m):
m=float(m)
ref_bits=np.arange(m)
s_i=np.cos(ref_bits/m*2*np.pi+np.pi/4.0)
s_q=np.sin(ref_bits/m*2*np.pi+np.pi/4.0)
mpsk_ref_symbol=s_i+1j*s_q
return mpsk_ref_symbol
def qam_ref_symbol(m):
if m==8:
m=16
m=float(m)
ref_values=np.arange(1,np.sqrt(m))
ref_values=ref_values[0::2]
v1=ref_values
v2=ref_values*(-1)
ref=np.hstack((v1,v2))
ref_com=np.array(list(itertools.product(ref,repeat=2)))
ref_symbol=ref_com[:,0]+1j*ref_com[:,1]
qam=ref_symbol[np.where(abs(np.imag(ref_symbol))<=1)]
elif m==32:
m=64
m=float(m)
ref_values=np.arange(1,np.sqrt(m))
ref_values=ref_values[0::2]
v1=ref_values
v2=ref_values*(-1)
ref=np.hstack((v1,v2))
ref_com=np.array(list(itertools.product(ref,repeat=2)))
ref_symbol=ref_com[:,0]+1j*ref_com[:,1]
qam=ref_symbol[np.where(abs(ref_symbol)<7.07)]
else:
m=float(m)
ref_values=np.arange(1,np.sqrt(m))
ref_values=ref_values[0::2]
v1=ref_values
v2=ref_values*(-1)
ref=np.hstack((v1,v2))
ref_com=np.array(list(itertools.product(ref,repeat=2)))
ref_symbol=ref_com[:,0]+1j*ref_com[:,1]
qam=ref_symbol
return qam
def pam_ref_symbol(m,ini_phase):
ref_symbol=np.arange(-(m-1),m,2)*np.exp(1j*ini_phase)
return ref_symbol
def qam_mod(input_bits_array,m,type='binary'):
#m_array=[4.0,16.0,64.0]
m=float(m)
ref_symbol=qam_ref_symbol(m)
if type=='binary':
input_ints=bits_to_binary_to_int(input_bits_array,m)
elif type=='gray':
input_ints=bits_to_gray_to_int(input_bits_array,m)
else:
print 'error type: type must be "binary" or "gray"'
input_sym=ref_symbol[input_ints]
qam_symbol=input_sym
return qam_symbol
def mpsk_dem(received_symbols,m):
m=float(m)
mpsk_symbol=mpsk_ref_symbol(m)
mpsk_symbol=np.reshape(mpsk_symbol,(1,len(mpsk_symbol)))
repeat_mpsk=np.repeat(mpsk_symbol,len(received_symbols),axis=0)
reshape_received=np.reshape(received_symbols,(len(received_symbols),1))
repeat_received=np.repeat(reshape_received,mpsk_symbol.shape[1],axis=1)
distance=np.sqrt((np.real(repeat_received)-np.real(repeat_mpsk))**2+
(np.imag(repeat_received)-np.imag(repeat_mpsk))**2)
min_distance_index=np.argmin(distance,axis=1)
return min_distance_index
def qam_dem(received_symbols,m):
m=float(m)
qam_symbol=qam_ref_symbol(m)
qam_symbol=np.reshape(qam_symbol,(1,len(qam_symbol)))
repeat_qam=np.repeat(qam_symbol,len(received_symbols),axis=0)
reshape_received=np.reshape(received_symbols,(len(received_symbols),1))
repeat_received=np.repeat(reshape_received,qam_symbol.shape[1],axis=1)
distance=np.sqrt((np.real(repeat_received)-np.real(repeat_qam))**2+
(np.imag(repeat_received)-np.imag(repeat_qam))**2)
min_distance_index=np.argmin(distance,axis=1)
return min_distance_index
def pam_mod(input_bits,m,ini_phase,type='binary'):
m=float(m)
if type=='binary':
input_ints=bits_to_binary_to_int(input_bits,m)
elif type=='gray':
input_ints=bits_to_gray_to_int(input_bits,m)
else:
print 'error type: type must be "binary" or "gray"'
ref_symbol=np.arange(-(m-1),m,2)*np.exp(1j*ini_phase)
pam_symbol=ref_symbol[input_ints]
return pam_symbol
def pam_dem(received_symbols,m,ini_phase):
ref_symbol=np.arange(-(m-1),m,2)*np.exp(1j*ini_phase)
ref_symbol=np.reshape(ref_symbol,(1,len(ref_symbol)))
repeat_pam=np.repeat(ref_symbol,len(received_symbols),axis=0)
reshape_received=np.reshape(received_symbols,(len(received_symbols),1))
repeat_received=np.repeat(reshape_received,ref_symbol.shape[1],axis=1)
distance=np.sqrt((np.real(repeat_received)-np.real(repeat_pam))**2+
(np.imag(repeat_received)-np.imag(repeat_pam))**2)
min_distance_index=np.argmin(distance,axis=1)
received_ints=min_distance_index
return received_ints
def spatial_modulation_qam(input_bits,nt,m,type='None'):
k=np.log2(m)+np.log2(nt)
a=np.log2(m)
b=np.log2(nt)
nb=len(input_bits)
reshape_input_bits=np.transpose(np.reshape(input_bits,(nb/k,k)))
symbol_input_bits=reshape_input_bits[:a,:]
attenna_input_bits=reshape_input_bits[a:,:]
symbol_input_bits2=np.reshape(np.transpose(symbol_input_bits),(1,
symbol_input_bits.shape[0]*symbol_input_bits.shape[1])).ravel()
attenna_input_bits2=np.reshape(np.transpose(attenna_input_bits),(1,
attenna_input_bits.shape[0]*attenna_input_bits.shape[1])).ravel()
if type=='None' or 'binary':
symbol_input_int=bits_to_binary_to_int(symbol_input_bits2,m)
attenna_input_int=bits_to_binary_to_int(attenna_input_bits2,nt)
elif type=='gray':
symbol_input_int=bits_to_gray_to_int(symbol_input_bits2,m)
attenna_input_int=bits_to_gray_to_int(attenna_input_bits2,nt)
else:
print 'error type: type must be "binary" or "gray"'
norm_ref_symbol=qam_ref_symbol(m)
norm_input_symbol=norm_ref_symbol[symbol_input_int]
symbol_and_attenna=np.vstack((norm_input_symbol,attenna_input_int))
X=np.zeros((nt,symbol_and_attenna.shape[1]))*(1j)
for i in np.arange(symbol_and_attenna.shape[1]):
attenna_number=int(symbol_and_attenna[1,i])
X[attenna_number,i]=symbol_and_attenna[0,i]
sm_modulated_symbol=X
return sm_modulated_symbol
def sm_constellation(ref_symbol,nt):
all_symbol_position=np.zeros((nt,nt*len(ref_symbol)))*1j
for j in np.arange(len(ref_symbol)):
for i in np.arange(j*nt,(j+1)*nt):
all_symbol_position[i-j*nt,i]=ref_symbol[j]
return all_symbol_position
def generalized_spatial_modulation_qam(input_bits,nt,n_act,m):
nb_attenna_com=ncr(nt,n_act)
a=np.log2(m)
b=np.log2(2**np.floor(np.log2(nb_attenna_com)))
nb=len(input_bits)
k=float(a+b)
reshape_input_bits=np.transpose(np.reshape(input_bits,((nb/k),k)))
symbol_input_bits=reshape_input_bits[:a,:]
attenna_input_bits=reshape_input_bits[a:,:]
symbol_input_bits2=np.reshape(np.transpose(symbol_input_bits),(1,
symbol_input_bits.shape[0]*symbol_input_bits.shape[1])).ravel()
attenna_input_bits2=np.reshape(np.transpose(attenna_input_bits),(1,
attenna_input_bits.shape[0]*attenna_input_bits.shape[1])).ravel()
if type=='None' or 'binary':
symbol_input_int=bits_to_binary_to_int(symbol_input_bits2,m)
attenna_input_int=bits_to_binary_to_int(attenna_input_bits2,2**b)
elif type=='gray':
symbol_input_int=bits_to_gray_to_int(symbol_input_bits2,m)
attenna_input_int=bits_to_gray_to_int(attenna_input_bits2,2**b)
else:
print 'error type: type must be "binary" or "gray"'
norm_ref_symbol=qam_ref_symbol(m)
norm_input_symbol=norm_ref_symbol[symbol_input_int]
symbol_and_attenna=np.vstack((norm_input_symbol,attenna_input_int))
attenna_com=np.array(list(itertools.combinations(np.arange(nt),n_act)))
nb_com=np.reshape(np.arange(len(attenna_com)),(len(attenna_com),1))
nb_and_com=np.hstack((nb_com,attenna_com))
X=np.zeros((nt,symbol_and_attenna.shape[1]))*(1j)
for i in np.arange(symbol_and_attenna.shape[1]):
attenna_number=(nb_and_com[symbol_and_attenna[1,i],1:]).astype(int)
X[attenna_number,i]=symbol_and_attenna[0,i]
return X
def gsm_ref_symbol_combination(nt,n_act,ref_symbol):
attenna_combination=np.array(list(itertools.combinations(np.arange(nt),n_act)))
b=2**np.floor(np.log2(len(attenna_combination)))
attenna_combination=attenna_combination[:b,:]
symbol_combination=np.reshape(ref_symbol,(len(ref_symbol),1))
symbol_attenna_combination=np.array(list(itertools.product(symbol_combination,attenna_combination)))
look_up_table1=np.transpose(symbol_attenna_combination)
ref_symbol_combination=np.zeros((nt,look_up_table1.shape[1]))*1j
for i in np.arange(look_up_table1.shape[1]):
ref_symbol_combination[look_up_table1[1,i][0],i]=look_up_table1[0,i][0]
ref_symbol_combination[look_up_table1[1,i][1],i]=look_up_table1[0,i][0]
return ref_symbol_combination
def gsm_look_up_table(nt,n_act,ref_symbol):
b=2**np.floor(np.log2(ncr(nt,n_act)))
symbol_int_combination=np.arange(len(ref_symbol))
symbol_attenna_int_combination=np.array(list(itertools.product(symbol_int_combination,np.arange(b))))
return symbol_attenna_int_combination.astype(int)
def Ber(input_bits,cap_bits):
ber=np.sum(cap_bits!=input_bits)/float(len(input_bits))
return ber
def ncr(n,r):
import math
f=math.factorial
return f(n)/f(r)/f(n-r)
def mimo_look_up_table(nt,ref_symbol):
symbol_order=np.reshape(np.arange(len(ref_symbol)),(1,len(ref_symbol)))
row_1=np.repeat(symbol_order,4,axis=1)
attenna_order=np.reshape(np.arange(nt),(1,nt))
row_2=np.reshape(np.repeat(attenna_order,len(ref_symbol),axis=0),(1,nt*len(ref_symbol)))
look_up_table=np.vstack((row_1,row_2))
look_up_table=np.transpose(look_up_table)
return look_up_table
# input_bits=np.random.randint(2,size=300)
# pam_modulation=pam_mod(input_bits,8,np.pi/4.0,'binary')
# constellation(pam_modulation)
# dem_pam=pam_dem(pam_modulation,8,np.pi/4.0)
# input_ints=bits_to_binary_to_int(input_bits,8)
# ber=np.sum(input_ints!=dem_pam)
# print ber
# input_bits=np.random.randint(2,size=300)
# pam_modulation=pam_mod(input_bits,8,np.pi/4.0,'gray')
# constellation(pam_modulation)
# dem_pam=pam_dem(pam_modulation,8,np.pi/4.0)
# input_ints=bits_to_gray_to_int(input_bits,8)
# ber=np.sum(input_ints!=dem_pam)
# print ber
|
mit
| 2,672,909,887,420,412,000
| 33.567568
| 140
| 0.698898
| false
| 2.385679
| false
| false
| false
|
ivanbgd/Genetic_Algorithm_01
|
GA01v2.py
|
1
|
7880
|
# Python 2.7
"""
An Application of Genetic Algorithms
Task:
Inscribe a triangle of the maximum area in a given ellipse.
Ellipse is defined as: (x/a)^2 + (y/b)^2 = 1
"""
import math
import matplotlib.pyplot as plt
import numpy as np
import random
from timeit import default_timer as timer
tstart = timer()
# Definition of parameters of the GA
N = 100 # number of units (chromosomes)
L = 36 # length of a chromosome (12 bits per vertex)
pc = 0.8 # crossover probability
pm = 0.001 # mutation probability
G = 0.8 # generation gap
# Parameters of the ellipse
a = 5
b = 3
#################
# The algorithm #
#################
# Maximum number with L bits
maxnum = 2**L - 1
# We'll use these a lot
a2 = float(a * a)
b2 = float(b * b)
a2b2 = a2 * b2
Lthird = L//3
twoLthirds = 2 * Lthird
maxl3 = 2**Lthird - 1
piHalf = np.pi / 2
threePiHalf = 3. * piHalf
a2rec = 1. / a2
mask = long((1 << Lthird) - 1)
# The first generation - Array of N chromosomes, each consisting of L bits
newgen = [random.randint(0, maxnum) for i in range(N)]
oldgen = np.empty(N, dtype = np.uint64)
# Vertices of the triangles; a vertex is defined by its angle to the positive x-axis in radians
V1 = np.empty(N)
V2 = np.empty(N)
V3 = np.empty(N)
# Coordinates of the vertices
x1 = np.empty(N)
y1 = np.empty(N)
x2 = np.empty(N)
y2 = np.empty(N)
x3 = np.empty(N)
y3 = np.empty(N)
# Fitness function
f = np.empty(N)
# Array that holds the maximum value of fitness function in every generation
Fmax = []
# generation number counter
gencnt = 0
# condition for staying in the loop
cond = True
#The main loop
while cond:
# Evaluation of the newly formed generation
for i in range(N):
V1[i] = (float((newgen[i] >> twoLthirds) & mask) / maxl3) * 2.0 * np.pi
V2[i] = (float((newgen[i] >> Lthird) & mask) / maxl3) * 2.0 * np.pi
V3[i] = (float(newgen[i] & mask) / maxl3) * 2.0 * np.pi
# Coordinates of vertex V1
if (V1[i] < piHalf) or (V1[i] > threePiHalf):
x = math.sqrt(a2b2 / (a2 * math.tan(V1[i])**2 + b2))
else:
x = -math.sqrt(a2b2 / (a2 * math.tan(V1[i])**2 + b2))
y = x * math.tan(V1[i])
x1[i] = x
y1[i] = y
# Coordinates of vertex V2
if (V2[i] < piHalf) or (V2[i] > threePiHalf):
x = math.sqrt(a2b2 / (a2 * math.tan(V2[i])**2 + b2))
else:
x = -math.sqrt(a2b2 / (a2 * math.tan(V2[i])**2 + b2))
y = x * math.tan(V2[i])
x2[i] = x
y2[i] = y
# Coordinates of vertex V3
if (V3[i] < piHalf) or (V3[i] > threePiHalf):
x = math.sqrt(a2b2 / (a2 * math.tan(V3[i])**2 + b2))
else:
x = -math.sqrt(a2b2 / (a2 * math.tan(V3[i])**2 + b2))
y = x * math.tan(V3[i])
x3[i] = x
y3[i] = y
# Lenghts of the triangle's edges
la = math.sqrt((x2[i] - x1[i])**2 + (y2[i] - y1[i])**2)
lb = math.sqrt((x3[i] - x1[i])**2 + (y3[i] - y1[i])**2)
lc = math.sqrt((x3[i] - x2[i])**2 + (y3[i] - y2[i])**2)
# Semiperimeter of the triangle
s = (la + lb + lc) / 2.
# Fitness function (Heron's formula)
f[i] = math.sqrt(s * (s - la) * (s - lb) * (s - lc))
# The highest (best) value of f
maxf = np.amax(f)
# Index of the highest value of f
maxfindex = np.argmax(f)
Fmax.append(maxf)
# Plotting the result
plt.figure("An Application of Genetic Algorithms")
plt.hold(True)
plt.title("Generation number {}\nThe best result: {:.4f}".format(gencnt + 1, maxf))
plt.xlim(-a - 1, a + 1)
plt.ylim(-b - 1, b + 1)
# Drawing the ellipse
ellipse = np.array([[0.] * 361, [0.] * 361], dtype = float)
for i in range(361):
theta = 2.*np.pi*i/360.
if (theta <= piHalf) or (theta > threePiHalf):
x = math.fabs(math.sqrt(1./(a2rec + (math.tan(theta)**2)/b2)))
else:
x = -math.fabs(math.sqrt(1./(a2rec + (math.tan(theta)**2)/b2)))
y = x * math.tan(theta)
ellipse[0][i] = x
ellipse[1][i] = y
plt.plot(ellipse[0], ellipse[1], 'g', linewidth = 4.0) # thick green line
# Drawing the triangles that we got
for i in range(N):
if f[i] == maxf:
# The best chromosome - the triangle with the largest area
plt.plot([x1[i], x2[i], x3[i], x1[i]], [y1[i], y2[i], y3[i], y1[i]], 'r', linewidth = 4.0) # thick red line
else:
# The other chromosomes (triangles); they are all inscribed in the ellipse, but they don't have the largest area
plt.plot([x1[i], x2[i], x3[i], x1[i]], [y1[i], y2[i], y3[i], y1[i]], 'b', linewidth = 0.5) # thin blue line
# Hold the graph for a given amount of time in seconds
plt.pause(0.1)
plt.hold(False)
plt.plot()
### Natural selection by the roulette wheel method ###
oldgen = np.copy(newgen)
# Cumulative function
cumf = f.cumsum()
# We let the best chromosome pass to the next generation directly.
newgen[0] = oldgen[maxfindex]
# We also let another randomly chosen (1-G)*N-1 chromosomes pass. Probability of their selection depends on f(i).
for i in range(1, int(round((1-G)*N))):
tmp = np.random.random() * cumf[-1]
firstPositive, firstPositiveIndex = np.amax(np.sign(cumf - tmp)), np.argmax(np.sign(cumf - tmp))
newgen[i] = oldgen[firstPositiveIndex]
### The rest of the new generation is formed by crossover (crossbreeding) ###
# There are two parents, and two offsprings
for i in range((N - int(round((1-G)*N)))//2):
tmp = np.random.random() * cumf[-1]
firstPositive, firstPositiveIndex = np.amax(np.sign(cumf - tmp)), np.argmax(np.sign(cumf - tmp))
parent1 = oldgen[firstPositiveIndex]
tmp = np.random.random() * cumf[-1]
firstPositive, firstPositiveIndex = np.amax(np.sign(cumf - tmp)), np.argmax(np.sign(cumf - tmp))
parent2 = oldgen[firstPositiveIndex]
if np.random.random() < pc:
# crossover
crossPoint = np.random.randint(1, L) # the crossover point can be after MSB and before LSB
maskLo = long((1 << (L - crossPoint)) - 1)
maskHi = maxnum & (~maskLo)
newgen[int(round((1-G)*N)) + 2*i] = (parent1 & maskHi) | (parent2 & maskLo) # offspring 1
newgen[int(round((1-G)*N)) + 2*i + 1] = (parent2 & maskHi) | (parent1 & maskLo) # offspring 2
else:
# no crossover
newgen[int(round((1-G)*N)) + 2*i] = parent1 # offspring 1
newgen[int(round((1-G)*N)) + 2*i + 1] = parent2 # offspring 2
### Mutation ###
for i in range(int(L * N * pm)):
chromosomeIndex = np.random.randint(N)
bitPosition = np.random.randint(L)
maskM = 1 << (L - bitPosition - 1)
newgen[chromosomeIndex] ^= maskM
gencnt += 1
# Exit condition - We want fitness functions of the first numchrom chromosomes to be inside of a given difference.
numchrom = 10
difference = 0.001
f.sort()
if abs(f[-1] - f[-numchrom]) < difference:
cond = False
tend = timer()
print("The result is: {}".format(max(Fmax)))
print("The algorithm took {} generations, and {} seconds to complete.".format(gencnt, round(tend - tstart, 3)))
print("The maximum value of fitness function through generations:\n{}".format(Fmax))
plt.figure("The maximum value of fitness function through generations")
plt.title("The maximum value of fitness function through generations")
plt.xlim(0, gencnt - 1)
plt.plot(Fmax)
plt.show()
|
mit
| -7,571,025,908,308,426,000
| 30.970711
| 124
| 0.55736
| false
| 2.905605
| false
| false
| false
|
ayy1337/CryptoPriceWatcher
|
grabtrex.py
|
1
|
5552
|
#!/usr/bin/python3
'''
Version: 1.0.02
Author: ayy1337
Licence: GNU GPL v3.0
'''
import sys
import time
import os
import datetime
import urllib.request
import collections
from operator import attrgetter
from operator import itemgetter
import shelve
from trexapi import Bittrex
condperc = .01
mins = 5
period = mins * 60
timebetweenticks = 2 #in seconds
minutesofdatatokeep = 30
cwd = os.getcwd()
if os.name in ['posix','linux']:
databasepath = cwd + "/db"
else:
databasepath = cwd + "\\db"
class minute:
def __init__(self, ticker, o, change, timestamp, volume, prevday):
self.ticker = ticker
self.change = float(change)
self.average = self.open = self.high = self.low = self.close = float(o)
self.volume = float(volume)
self.timestamp = int(timestamp)
self.numprices = 1
self.prevday = prevday
class coin:
def __init__(self, ticker):
self.ticker = ticker
self.minutes = collections.deque(maxlen = (int(minutesofdatatokeep) + 1))
def addminute(self,ticker, timestamp):
i = ticker
t = i['MarketName']
try:
price = float(i['Last'])
prevday = float(i['PrevDay'])
volume = float(i['BaseVolume'])
except:
price = 0
prevday = 0
volume = 0
try:
change = (price/prevday) -1
except:
change = 0
self.minutes.append(minute(t,price,change,timestamp,volume, prevday)) #ticker, price, change, volume, timestamp
def updateminute(self,ticker,timestamp):
currmin = self.minutes[-1]
if (timestamp - currmin.timestamp) > 60:
self.addminute(ticker,timestamp)
else:
if ticker['Last'] == None:
print("New market added: {}".format(ticker["MarketName"]))
try:
last = float(ticker['Last'])
except:
last = 0
currmin.close = last
a = (currmin.average * currmin.numprices) + last
currmin.numprices += 1
currmin.average = a / currmin.numprices
if last > currmin.high:
currmin.high = last
if last < currmin.low:
currmin.low = last
try:
currmin.change = (currmin.close/currmin.prevday) -1
except:
currmin.change = 0
timestamp = int(time.time())
class updater:
def __init__(self):
self.coins = {}
try:
d = shelve.open(databasepath)
self.coins = d["trexcoins"]
d.close()
except:
pass
self.bittrex = Bittrex("","")
self.pcstatus = None
def update(self):
global timestamp
timestamp = int(time.time())
try:
self.coins = self.updatecoins(self.coins)
except:
return 1
gainers, losers = self.checkcond(self.coins)
try:
d = shelve.open(databasepath)
d['trexcoins'] = self.coins
d.close()
except:
pass
gainers = sorted(gainers, key=itemgetter(6,4))
losers = sorted(losers, key=itemgetter(6,4))
return gainers,losers
def updatecoins(self, coins):
data = self.bittrex.get_market_summaries()
if data['success'] == 1:
tickers = data['result']
else:
return
timestamp = int(time.time())
for item in tickers:
t = item['MarketName']
if item['MarketName'] not in coins:
coins[item['MarketName']] = coin(item['MarketName'])
if len(coins[t].minutes) > 0:
coins[t].updateminute(item,timestamp)
else:
coins[t].addminute(item, timestamp)
return coins
def checkcond(self, coins):
out = []
gainers = []
losers = []
for key in coins:
coin = coins[key]
mins = coin.minutes
tmp = []
endtime = mins[-1].timestamp
largestgain = 0
largestloss = 0
periodchange = 0
lowvol = ""
splt = key.split('-')
suffix = splt[0]
coinname = splt [1]
if suffix != "BTC":
continue
if 100 < mins[-1].volume < 500:
lowvol = 'l'
elif mins[-1].volume <= 100:
lowvol = 'v'
for i in range(1,len(mins)):
tick = mins[-i]
if (endtime - tick.timestamp) <= period:
tmp.append(tick) #tmp[0] is most recent minute, tmp[-1] is oldest/least-recent minute
else:
break
for i in range(1,len(tmp)+1):
for n in range(i+1, len(tmp)+1):
root = tmp[-i]
end = tmp[-n]
try:
changeup = (end.high - root.low) / root.low
except:
changeup = 0
if changeup > largestgain:
largestgain = changeup
try:
changedown = (end.low-root.high)/root.high
except:
changedown = 0
if changedown < largestloss:
largestloss = changedown
if(len(tmp) > 0):
try:
periodchange = ((mins[-1].close-mins[-0].average) / mins[0].average) * 100
except:
periodchange = 0
else:
continue
if (largestgain >= condperc) or (periodchange > 2):
gainers.append([coinname,largestgain*100,mins[-1].close,suffix, periodchange, int(mins[-1].change * 100), lowvol])
if ((largestloss*-1) >= condperc) or (periodchange < -2):
losers.append([coinname, largestloss * 100, mins[-1].close, suffix, periodchange, int(mins[-1].change * 100), lowvol])
return gainers, losers
def getfav(self, ticker):
splt = ticker.split('-')
c = self.coins[ticker]
mins = c.minutes
oldprice = mins[-(min(len(mins),5))].open
currprice = mins[-1].close
fiveminchange = ((currprice/oldprice)-1) * 100
oldprice = mins[-(min(len(mins),30))].open
thirtyminchange = ((currprice/oldprice)-1)*100
price = currprice
volume = mins[-1].volume
if volume > 500:
v = ' '
elif volume >100:
v = 'l'
else:
v = 'v'
tfhourchange = mins[-1].change * 100
return [splt[1]+'(b)', fiveminchange, price, thirtyminchange, tfhourchange, v]
def getlast(self, ticker):
return self.coins[ticker].minutes[-1].close
if __name__ == "__main__":
a = updater()
while 1:
a.update()
time.sleep(2)
|
gpl-3.0
| -6,008,384,891,887,564,000
| 22.629787
| 122
| 0.634726
| false
| 2.835546
| false
| false
| false
|
noinil/euler_pysolution
|
python_solutions/p098_Anagramic_squares.py
|
1
|
1634
|
#!/usr/bin/env python3
from itertools import permutations as pm
def is_sqr(n):
i = round(n**0.5)
if i ** 2 == n:
return True
else:
return False
def main():
words = []
with open("../data/p98_words.txt", "r") as fin:
for lines in fin:
for i in lines.split(','):
if i != i[::-1]:
words.append(i[1:-1])
vals = []
anagramic_pairs = []
c_vals = []
for i in words:
t = 0
for c in i[:]:
t += 10**(ord(c) - ord('A'))
if t in vals:
if t in c_vals:
anagramic_pairs.append((words[vals.index(t, vals.index(t)+1)], i))
c_vals.append(t)
anagramic_pairs.append((words[vals.index(t)], i))
vals.append(t)
sqr_list = [i**2 for i in range(0, 4*10**4) if i**2 < 10**9]
digi_list = [i for i in range(0, 10)]
for i in anagramic_pairs:
worda, wordb = i[0], i[1]
chl = []
for c in worda:
if c not in chl:
chl.append(c)
n = len(chl)
print(worda, wordb, n)
pmiter = pm(digi_list, n)
for j in pmiter:
wa, wb = worda, wordb
for k in range(0, n):
wa = wa.replace(chl[k], str(j[k]))
wb = wb.replace(chl[k], str(j[k]))
if wa[0] == '0' or wb[0] == '0':
continue
va, vb = int(wa), int(wb)
# if va in sqr_list and vb in sqr_list:
if is_sqr(va) and is_sqr(vb):
print(worda, wordb, va, vb)
if __name__ == '__main__':
main()
|
gpl-2.0
| -1,285,471,491,725,333,200
| 26.694915
| 82
| 0.442472
| false
| 3.03154
| false
| false
| false
|
nofdev/fastforward
|
fastforward/horizon.py
|
1
|
1829
|
import sys
from fastforward.cliutil import priority
from playback.api import Horizon
def install(args):
try:
target = Horizon(user=args.user, hosts=args.hosts.split(','), key_filename=args.key_filename, password=args.password)
except AttributeError:
sys.stderr.write('No hosts found. Please using --hosts param.')
sys.exit(1)
target.install(
args.openstack_host,
args.memcached_servers,
args.time_zone)
@priority(20)
def make(parser):
"""provison Horizon with HA"""
s = parser.add_subparsers(
title='commands',
metavar='COMMAND',
help='description',
)
def install_f(args):
install(args)
install_parser = s.add_parser('install', help='install horizon')
install_parser.add_argument('--openstack-host',
help='configure the dashboard to use OpenStack services on the controller node e.g. CONTROLLER_VIP',
action='store',
default=None,
dest='openstack_host')
install_parser.add_argument('--memcached-servers',
help='django memcache e.g. CONTROLLER1:11211,CONTROLLER2:11211',
action='store',
default=None,
dest='memcached_servers')
install_parser.add_argument('--time-zone',
help='the timezone of the server. This should correspond with the timezone of your entire OpenStack installation e.g. Asia/Shanghai',
action='store',
default=None,
dest='time_zone')
install_parser.set_defaults(func=install_f)
|
mit
| 66,383,570,392,045,620
| 40.568182
| 165
| 0.545653
| false
| 4.701799
| false
| false
| false
|
yausern/stlab
|
devices/Cryocon_44C.py
|
1
|
3712
|
import visa
import numpy as np
import time
from stlab.devices.instrument import instrument
class Cryocon_44C(instrument):
def __init__(self,addr='TCPIP::192.168.1.5::5000::SOCKET',reset=True,verb=True,**kwargs):
#RST reboots the instrument. Avoid... Also needs special read_termination = '\r\n'
if 'read_termination' not in kwargs:
kwargs['read_termination'] = '\r\n'
super().__init__(addr,reset=False,verb=verb,**kwargs)
self.id()
self.channellist=['A','B','C','D']
if reset:
for channel in self.channellist: #set all units to K
self.write('INP ' + channel + ':UNIT K')
def write(self,mystr): #REQUIRES SPECIAL WRITE WITH OPC CHECK...
self.query(mystr + ';*OPC?')
def GetTemperature(self,channel='C'):
mystr = 'INP? ' + channel
curr = self.query(mystr)
try:
curr = float(curr)
except ValueError:
print('Channel ',channel,' out of range')
curr = -20.
return curr
def GetTemperatureAll(self):
result = []
for chan in self.channellist:
result.append(self.GetTemperature(chan))
return result
def SetSetPoint(self,setp,loop=2):
mystr = 'LOOP ' + str(loop) + ':SETP ' + str(setp)
self.write(mystr)
def GetSetPoint(self,loop=2):
mystr = 'LOOP ' + str(loop) + ':SETP?'
setp = self.query(mystr)
return float(setp)
def SetSetPoint(self,setp,loop=2):
mystr = 'LOOP ' + str(loop) + ':SETP ' + str(setp)
self.write(mystr)
def GetSetPoint(self,loop=2):
mystr = 'LOOP ' + str(loop) + ':SETP?'
setp = self.query(mystr)
channel = self.query('LOOP '+ str(loop) +':SOUR?')
unit = self.query('INP ' + str(channel) + ':UNIT?')
print(setp)
return float(setp.strip(unit))
def SetPman(self,setp,loop=2):
mystr = 'LOOP ' + str(loop) + ':PMAN ' + str(setp)
self.write(mystr)
def GetPman(self,loop=2):
mystr = 'LOOP ' + str(loop) + ':PMAN?'
setp = self.query(mystr)
return float(setp)
def ControlOn(self):
self.write('CONT')
return
def ControlOff(self):
self.write('STOP')
return
def SetLoopMode(self,loop,mode): #OFF, PID, MAN, TABLE, RAMPP
self.write('LOOP ' + str(loop) + ':TYPE ' + str(mode))
return
def WaitForTStable(self,loop=2,tol=0.05,timeout=300.,tsettle=40.):
channel = self.query('LOOP ' + str(loop) + ':SOUR?') #Get channel on chosen loop
channel = channel.strip('CH')
Tset = self.GetSetPoint(loop)
t0 = time.time()
tnow = time.time()
tstablestart = None
success = False
while tnow-t0 < timeout:
tnow = time.time()
TT = self.GetTemperature(channel) #Get current temperature
if abs(TT-Tset)<tol:
if tstablestart == None:
tstablestart = tnow
print('T in tolerance. Settling...')
elif abs(TT-Tset)>=tol:
if tstablestart != None:
print('T left tolerance')
tstablestart = None
continue
if tnow-tstablestart > tsettle:
success = True
break
time.sleep(0.2)
if success:
print("Channel " + channel + " STABLE at " + str(Tset) + ' K')
else:
print("Channel " + channel + " UNSTABLE for " + str(Tset) + ' K')
return success
def GetMetadataString(self): #Should return a string of metadata adequate to write to a file
pass
|
gpl-3.0
| 2,980,873,264,327,063,000
| 36.494949
| 96
| 0.545797
| false
| 3.555556
| false
| false
| false
|
luozhaoyu/leetcode
|
valid_number.py
|
1
|
2387
|
# -*- coding: utf-8 -*-
"""
valid_number.py
~~~~~~~~~~~~~~
A brief description goes here.
"""
import re
class Solution:
# @param s, a string
# @return a boolean
def isNumber(self, s):
s = s.strip().lower()
if len(s) == 0:
return False
if s.count('.') > 1 or s.count('e') > 1:
return False
if s.startswith("-") or s.startswith("+"):
s = s[1:]
if s.isdigit():
return True
elif s.find("e") >= 0:
front, back = s.split('e')
#print front, back
if self.isDecimal(front) and self.isIntegerWithFrontZero(back):
return True
else:
return False
elif self.isZero(s):
return True
else:
return self.isDecimal(s)
def isZero(self, s):
if re.search(r"\.[0]+", s) or re.search(r"[0]\.", s) or s == "0":
return True
else:
return False
def isIntegerWithFrontZero(self, s):
if s.startswith('-') or s.startswith('+'):
s = s[1:]
if re.search(r"^\d+$", s):
return True
else:
return False
def isDecimal(self, s):
if s.startswith('-') or s.startswith('+'):
s = s[1:]
if re.search(r"^[0]{0,1}\.\d*[1-9]+\d*$", s):
return True
elif re.search(r"^[1-9]\d*\.{0,1}\d*$", s):
return True
else:
return False
def _main(argv):
s = Solution()
print s.isNumber("3")
print s.isNumber("0.1")
print s.isNumber(".1")
print s.isNumber(" 0.1")
print s.isNumber("2e10")
print -1, s.isNumber("-1")
print "+1.0", s.isNumber("+1.0")
print s.isNumber("46.0e7")
print s.isNumber("46.e7")
print s.isNumber("3.")
print s.isNumber(".2e7")
print s.isNumber(".0")
print s.isNumber(".00")
print s.isNumber("01.")
print s.isNumber("3")
print s.isNumber("1 a")
print s.isNumber("abc")
print s.isNumber("..2")
print s.isNumber("3..2")
print s.isNumber("")
print s.isNumber(".")
print s.isNumber(". 0e7")
print s.isNumber(".0e7")
print s.isNumber(".e7")
print s.isNumber("e7")
print s.isNumber("ee")
print s.isNumber("0..")
if __name__ == '__main__':
import sys
_main(sys.argv)
|
mit
| -6,678,119,990,164,091,000
| 22.87
| 75
| 0.485547
| false
| 3.315278
| false
| false
| false
|
Orav/kbengine
|
kbe/res/scripts/common/Lib/site-packages/pip/vcs/git.py
|
1
|
8092
|
import tempfile
import re
import os.path
from pip.util import call_subprocess
from pip.util import display_path, rmtree
from pip.vcs import vcs, VersionControl
from pip.log import logger
from pip.backwardcompat import url2pathname, urlparse
urlsplit = urlparse.urlsplit
urlunsplit = urlparse.urlunsplit
class Git(VersionControl):
name = 'git'
dirname = '.git'
repo_name = 'clone'
schemes = ('git', 'git+http', 'git+https', 'git+ssh', 'git+git', 'git+file')
bundle_file = 'git-clone.txt'
guide = ('# This was a Git repo; to make it a repo again run:\n'
'git init\ngit remote add origin %(url)s -f\ngit checkout %(rev)s\n')
def __init__(self, url=None, *args, **kwargs):
# Works around an apparent Git bug
# (see http://article.gmane.org/gmane.comp.version-control.git/146500)
if url:
scheme, netloc, path, query, fragment = urlsplit(url)
if scheme.endswith('file'):
initial_slashes = path[:-len(path.lstrip('/'))]
newpath = initial_slashes + url2pathname(path).replace('\\', '/').lstrip('/')
url = urlunsplit((scheme, netloc, newpath, query, fragment))
after_plus = scheme.find('+') + 1
url = scheme[:after_plus] + urlunsplit((scheme[after_plus:], netloc, newpath, query, fragment))
super(Git, self).__init__(url, *args, **kwargs)
def parse_vcs_bundle_file(self, content):
url = rev = None
for line in content.splitlines():
if not line.strip() or line.strip().startswith('#'):
continue
url_match = re.search(r'git\s*remote\s*add\s*origin(.*)\s*-f', line)
if url_match:
url = url_match.group(1).strip()
rev_match = re.search(r'^git\s*checkout\s*-q\s*(.*)\s*', line)
if rev_match:
rev = rev_match.group(1).strip()
if url and rev:
return url, rev
return None, None
def export(self, location):
"""Export the Git repository at the url to the destination location"""
temp_dir = tempfile.mkdtemp('-export', 'pip-')
self.unpack(temp_dir)
try:
if not location.endswith('/'):
location = location + '/'
call_subprocess(
[self.cmd, 'checkout-index', '-a', '-f', '--prefix', location],
filter_stdout=self._filter, show_stdout=False, cwd=temp_dir)
finally:
rmtree(temp_dir)
def check_rev_options(self, rev, dest, rev_options):
"""Check the revision options before checkout to compensate that tags
and branches may need origin/ as a prefix.
Returns the SHA1 of the branch or tag if found.
"""
revisions = self.get_refs(dest)
origin_rev = 'origin/%s' % rev
if origin_rev in revisions:
# remote branch
return [revisions[origin_rev]]
elif rev in revisions:
# a local tag or branch name
return [revisions[rev]]
else:
logger.warn("Could not find a tag or branch '%s', assuming commit." % rev)
return rev_options
def switch(self, dest, url, rev_options):
call_subprocess(
[self.cmd, 'config', 'remote.origin.url', url], cwd=dest)
call_subprocess(
[self.cmd, 'checkout', '-q'] + rev_options, cwd=dest)
self.update_submodules(dest)
def update(self, dest, rev_options):
# First fetch changes from the default remote
call_subprocess([self.cmd, 'fetch', '-q'], cwd=dest)
# Then reset to wanted revision (maby even origin/master)
if rev_options:
rev_options = self.check_rev_options(rev_options[0], dest, rev_options)
call_subprocess([self.cmd, 'reset', '--hard', '-q'] + rev_options, cwd=dest)
#: update submodules
self.update_submodules(dest)
def obtain(self, dest):
url, rev = self.get_url_rev()
if rev:
rev_options = [rev]
rev_display = ' (to %s)' % rev
else:
rev_options = ['origin/master']
rev_display = ''
if self.check_destination(dest, url, rev_options, rev_display):
logger.notify('Cloning %s%s to %s' % (url, rev_display, display_path(dest)))
call_subprocess([self.cmd, 'clone', '-q', url, dest])
#: repo may contain submodules
self.update_submodules(dest)
if rev:
rev_options = self.check_rev_options(rev, dest, rev_options)
# Only do a checkout if rev_options differs from HEAD
if not self.get_revision(dest).startswith(rev_options[0]):
call_subprocess([self.cmd, 'checkout', '-q'] + rev_options, cwd=dest)
def get_url(self, location):
url = call_subprocess(
[self.cmd, 'config', 'remote.origin.url'],
show_stdout=False, cwd=location)
return url.strip()
def get_revision(self, location):
current_rev = call_subprocess(
[self.cmd, 'rev-parse', 'HEAD'], show_stdout=False, cwd=location)
return current_rev.strip()
def get_refs(self, location):
"""Return map of named refs (branches or tags) to commit hashes."""
output = call_subprocess([self.cmd, 'show-ref'],
show_stdout=False, cwd=location)
rv = {}
for line in output.strip().splitlines():
commit, ref = line.split(' ', 1)
ref = ref.strip()
ref_name = None
if ref.startswith('refs/remotes/'):
ref_name = ref[len('refs/remotes/'):]
elif ref.startswith('refs/heads/'):
ref_name = ref[len('refs/heads/'):]
elif ref.startswith('refs/tags/'):
ref_name = ref[len('refs/tags/'):]
if ref_name is not None:
rv[ref_name] = commit.strip()
return rv
def get_src_requirement(self, dist, location, find_tags):
repo = self.get_url(location)
if not repo.lower().startswith('git:'):
repo = 'git+' + repo
egg_project_name = dist.egg_name().split('-', 1)[0]
if not repo:
return None
current_rev = self.get_revision(location)
refs = self.get_refs(location)
# refs maps names to commit hashes; we need the inverse
# if multiple names map to a single commit, this arbitrarily picks one
names_by_commit = dict((commit, ref) for ref, commit in refs.items())
if current_rev in names_by_commit:
# It's a tag
full_egg_name = '%s-%s' % (egg_project_name, names_by_commit[current_rev])
else:
full_egg_name = '%s-dev' % egg_project_name
return '%s@%s#egg=%s' % (repo, current_rev, full_egg_name)
def get_url_rev(self):
"""
Prefixes stub URLs like 'user@hostname:user/repo.git' with 'ssh://'.
That's required because although they use SSH they sometimes doesn't
work with a ssh:// scheme (e.g. Github). But we need a scheme for
parsing. Hence we remove it again afterwards and return it as a stub.
"""
if not '://' in self.url:
assert not 'file:' in self.url
self.url = self.url.replace('git+', 'git+ssh://')
url, rev = super(Git, self).get_url_rev()
url = url.replace('ssh://', '')
else:
url, rev = super(Git, self).get_url_rev()
return url, rev
def update_submodules(self, location):
if not os.path.exists(os.path.join(location, '.gitmodules')):
return
call_subprocess([self.cmd, 'submodule', 'update', '--init', '--recursive', '-q'],
cwd=location)
vcs.register(Git)
|
lgpl-3.0
| 4,057,708,579,213,142,000
| 39.71134
| 111
| 0.547578
| false
| 3.964723
| false
| false
| false
|
pataquets/namecoin-core
|
test/functional/wallet_importdescriptors.py
|
1
|
26430
|
#!/usr/bin/env python3
# Copyright (c) 2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the importdescriptors RPC.
Test importdescriptors by generating keys on node0, importing the corresponding
descriptors on node1 and then testing the address info for the different address
variants.
- `get_generate_key()` is called to generate keys and return the privkeys,
pubkeys and all variants of scriptPubKey and address.
- `test_importdesc()` is called to send an importdescriptors call to node1, test
success, and (if unsuccessful) test the error code and error message returned.
- `test_address()` is called to call getaddressinfo for an address on node1
and test the values returned."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.descriptors import descsum_create
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
find_vout_for_address,
)
from test_framework.wallet_util import (
get_generate_key,
test_address,
)
class ImportDescriptorsTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.extra_args = [["-addresstype=legacy"],
["-addresstype=bech32", "-keypool=5"]
]
self.setup_clean_chain = True
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
self.skip_if_no_sqlite()
def test_importdesc(self, req, success, error_code=None, error_message=None, warnings=None, wallet=None):
"""Run importdescriptors and assert success"""
if warnings is None:
warnings = []
wrpc = self.nodes[1].get_wallet_rpc('w1')
if wallet is not None:
wrpc = wallet
result = wrpc.importdescriptors([req])
observed_warnings = []
if 'warnings' in result[0]:
observed_warnings = result[0]['warnings']
assert_equal("\n".join(sorted(warnings)), "\n".join(sorted(observed_warnings)))
assert_equal(result[0]['success'], success)
if error_code is not None:
assert_equal(result[0]['error']['code'], error_code)
assert_equal(result[0]['error']['message'], error_message)
def run_test(self):
self.log.info('Setting up wallets')
self.nodes[0].createwallet(wallet_name='w0', disable_private_keys=False)
w0 = self.nodes[0].get_wallet_rpc('w0')
self.nodes[1].createwallet(wallet_name='w1', disable_private_keys=True, blank=True, descriptors=True)
w1 = self.nodes[1].get_wallet_rpc('w1')
assert_equal(w1.getwalletinfo()['keypoolsize'], 0)
self.nodes[1].createwallet(wallet_name="wpriv", disable_private_keys=False, blank=True, descriptors=True)
wpriv = self.nodes[1].get_wallet_rpc("wpriv")
assert_equal(wpriv.getwalletinfo()['keypoolsize'], 0)
self.log.info('Mining coins')
w0.generatetoaddress(101, w0.getnewaddress())
# RPC importdescriptors -----------------------------------------------
# # Test import fails if no descriptor present
key = get_generate_key()
self.log.info("Import should fail if a descriptor is not provided")
self.test_importdesc({"timestamp": "now"},
success=False,
error_code=-8,
error_message='Descriptor not found.')
# # Test importing of a P2PKH descriptor
key = get_generate_key()
self.log.info("Should import a p2pkh descriptor")
self.test_importdesc({"desc": descsum_create("pkh(" + key.pubkey + ")"),
"timestamp": "now",
"label": "Descriptor import test"},
success=True)
test_address(w1,
key.p2pkh_addr,
solvable=True,
ismine=True,
labels=["Descriptor import test"])
assert_equal(w1.getwalletinfo()['keypoolsize'], 0)
self.log.info("Internal addresses cannot have labels")
self.test_importdesc({"desc": descsum_create("pkh(" + key.pubkey + ")"),
"timestamp": "now",
"internal": True,
"label": "Descriptor import test"},
success=False,
error_code=-8,
error_message="Internal addresses should not have a label")
# # Test importing of a P2SH-P2WPKH descriptor
key = get_generate_key()
self.log.info("Should not import a p2sh-p2wpkh descriptor without checksum")
self.test_importdesc({"desc": "sh(wpkh(" + key.pubkey + "))",
"timestamp": "now"
},
success=False,
error_code=-5,
error_message="Missing checksum")
self.log.info("Should not import a p2sh-p2wpkh descriptor that has range specified")
self.test_importdesc({"desc": descsum_create("sh(wpkh(" + key.pubkey + "))"),
"timestamp": "now",
"range": 1,
},
success=False,
error_code=-8,
error_message="Range should not be specified for an un-ranged descriptor")
self.log.info("Should not import a p2sh-p2wpkh descriptor and have it set to active")
self.test_importdesc({"desc": descsum_create("sh(wpkh(" + key.pubkey + "))"),
"timestamp": "now",
"active": True,
},
success=False,
error_code=-8,
error_message="Active descriptors must be ranged")
self.log.info("Should import a (non-active) p2sh-p2wpkh descriptor")
self.test_importdesc({"desc": descsum_create("sh(wpkh(" + key.pubkey + "))"),
"timestamp": "now",
"active": False,
},
success=True)
assert_equal(w1.getwalletinfo()['keypoolsize'], 0)
test_address(w1,
key.p2sh_p2wpkh_addr,
ismine=True,
solvable=True)
# Check persistence of data and that loading works correctly
w1.unloadwallet()
self.nodes[1].loadwallet('w1')
test_address(w1,
key.p2sh_p2wpkh_addr,
ismine=True,
solvable=True)
# # Test importing of a multisig descriptor
key1 = get_generate_key()
key2 = get_generate_key()
self.log.info("Should import a 1-of-2 bare multisig from descriptor")
self.test_importdesc({"desc": descsum_create("multi(1," + key1.pubkey + "," + key2.pubkey + ")"),
"timestamp": "now"},
success=True)
self.log.info("Should not treat individual keys from the imported bare multisig as watchonly")
test_address(w1,
key1.p2pkh_addr,
ismine=False)
# # Test ranged descriptors
xpriv = "tprv8ZgxMBicQKsPeuVhWwi6wuMQGfPKi9Li5GtX35jVNknACgqe3CY4g5xgkfDDJcmtF7o1QnxWDRYw4H5P26PXq7sbcUkEqeR4fg3Kxp2tigg"
xpub = "tpubD6NzVbkrYhZ4YNXVQbNhMK1WqguFsUXceaVJKbmno2aZ3B6QfbMeraaYvnBSGpV3vxLyTTK9DYT1yoEck4XUScMzXoQ2U2oSmE2JyMedq3H"
addresses = ["2N7yv4p8G8yEaPddJxY41kPihnWvs39qCMf", "2MsHxyb2JS3pAySeNUsJ7mNnurtpeenDzLA"] # hdkeypath=m/0'/0'/0' and 1'
addresses += ["ncrt1qrd3n235cj2czsfmsuvqqpr3lu6lg0ju76qa6px", "ncrt1qfqeppuvj0ww98r6qghmdkj70tv8qpcheap27pj"] # wpkh subscripts corresponding to the above addresses
desc = "sh(wpkh(" + xpub + "/0/0/*" + "))"
self.log.info("Ranged descriptors cannot have labels")
self.test_importdesc({"desc":descsum_create(desc),
"timestamp": "now",
"range": [0, 100],
"label": "test"},
success=False,
error_code=-8,
error_message='Ranged descriptors should not have a label')
self.log.info("Private keys required for private keys enabled wallet")
self.test_importdesc({"desc":descsum_create(desc),
"timestamp": "now",
"range": [0, 100]},
success=False,
error_code=-4,
error_message='Cannot import descriptor without private keys to a wallet with private keys enabled',
wallet=wpriv)
self.log.info("Ranged descriptor import should warn without a specified range")
self.test_importdesc({"desc": descsum_create(desc),
"timestamp": "now"},
success=True,
warnings=['Range not given, using default keypool range'])
assert_equal(w1.getwalletinfo()['keypoolsize'], 0)
# # Test importing of a ranged descriptor with xpriv
self.log.info("Should not import a ranged descriptor that includes xpriv into a watch-only wallet")
desc = "sh(wpkh(" + xpriv + "/0'/0'/*'" + "))"
self.test_importdesc({"desc": descsum_create(desc),
"timestamp": "now",
"range": 1},
success=False,
error_code=-4,
error_message='Cannot import private keys to a wallet with private keys disabled')
for address in addresses:
test_address(w1,
address,
ismine=False,
solvable=False)
self.test_importdesc({"desc": descsum_create(desc), "timestamp": "now", "range": -1},
success=False, error_code=-8, error_message='End of range is too high')
self.test_importdesc({"desc": descsum_create(desc), "timestamp": "now", "range": [-1, 10]},
success=False, error_code=-8, error_message='Range should be greater or equal than 0')
self.test_importdesc({"desc": descsum_create(desc), "timestamp": "now", "range": [(2 << 31 + 1) - 1000000, (2 << 31 + 1)]},
success=False, error_code=-8, error_message='End of range is too high')
self.test_importdesc({"desc": descsum_create(desc), "timestamp": "now", "range": [2, 1]},
success=False, error_code=-8, error_message='Range specified as [begin,end] must not have begin after end')
self.test_importdesc({"desc": descsum_create(desc), "timestamp": "now", "range": [0, 1000001]},
success=False, error_code=-8, error_message='Range is too large')
# Make sure ranged imports import keys in order
w1 = self.nodes[1].get_wallet_rpc('w1')
self.log.info('Key ranges should be imported in order')
xpub = "tpubDAXcJ7s7ZwicqjprRaEWdPoHKrCS215qxGYxpusRLLmJuT69ZSicuGdSfyvyKpvUNYBW1s2U3NSrT6vrCYB9e6nZUEvrqnwXPF8ArTCRXMY"
addresses = [
'ncrt1qtmp74ayg7p24uslctssvjm06q5phz4yrvy646e', # m/0'/0'/0
'ncrt1q8vprchan07gzagd5e6v9wd7azyucksq2vqu8lj', # m/0'/0'/1
'ncrt1qtuqdtha7zmqgcrr26n2rqxztv5y8rafjtaapkf', # m/0'/0'/2
'ncrt1qau64272ymawq26t90md6an0ps99qkrse7le8u6', # m/0'/0'/3
'ncrt1qsg97266hrh6cpmutqen8s4s962aryy77ced5p6', # m/0'/0'/4
]
self.test_importdesc({'desc': descsum_create('wpkh([80002067/0h/0h]' + xpub + '/*)'),
'active': True,
'range' : [0, 2],
'timestamp': 'now'
},
success=True)
self.test_importdesc({'desc': descsum_create('sh(wpkh([abcdef12/0h/0h]' + xpub + '/*))'),
'active': True,
'range' : [0, 2],
'timestamp': 'now'
},
success=True)
self.test_importdesc({'desc': descsum_create('pkh([12345678/0h/0h]' + xpub + '/*)'),
'active': True,
'range' : [0, 2],
'timestamp': 'now'
},
success=True)
assert_equal(w1.getwalletinfo()['keypoolsize'], 5 * 3)
for i, expected_addr in enumerate(addresses):
received_addr = w1.getnewaddress('', 'bech32')
assert_raises_rpc_error(-4, 'This wallet has no available keys', w1.getrawchangeaddress, 'bech32')
assert_equal(received_addr, expected_addr)
bech32_addr_info = w1.getaddressinfo(received_addr)
assert_equal(bech32_addr_info['desc'][:23], 'wpkh([80002067/0\'/0\'/{}]'.format(i))
shwpkh_addr = w1.getnewaddress('', 'p2sh-segwit')
shwpkh_addr_info = w1.getaddressinfo(shwpkh_addr)
assert_equal(shwpkh_addr_info['desc'][:26], 'sh(wpkh([abcdef12/0\'/0\'/{}]'.format(i))
pkh_addr = w1.getnewaddress('', 'legacy')
pkh_addr_info = w1.getaddressinfo(pkh_addr)
assert_equal(pkh_addr_info['desc'][:22], 'pkh([12345678/0\'/0\'/{}]'.format(i))
assert_equal(w1.getwalletinfo()['keypoolsize'], 4 * 3) # After retrieving a key, we don't refill the keypool again, so it's one less for each address type
w1.keypoolrefill()
assert_equal(w1.getwalletinfo()['keypoolsize'], 5 * 3)
# Check active=False default
self.log.info('Check imported descriptors are not active by default')
self.test_importdesc({'desc': descsum_create('pkh([12345678/0h/0h]' + xpub + '/*)'),
'range' : [0, 2],
'timestamp': 'now',
'internal': True
},
success=True)
assert_raises_rpc_error(-4, 'This wallet has no available keys', w1.getrawchangeaddress, 'legacy')
# # Test importing a descriptor containing a WIF private key
wif_priv = "cTe1f5rdT8A8DFgVWTjyPwACsDPJM9ff4QngFxUixCSvvbg1x6sh"
address = "2MuhcG52uHPknxDgmGPsV18jSHFBnnRgjPg"
desc = "sh(wpkh(" + wif_priv + "))"
self.log.info("Should import a descriptor with a WIF private key as spendable")
self.test_importdesc({"desc": descsum_create(desc),
"timestamp": "now"},
success=True,
wallet=wpriv)
test_address(wpriv,
address,
solvable=True,
ismine=True)
txid = w0.sendtoaddress(address, 49.99995540)
w0.generatetoaddress(6, w0.getnewaddress())
self.sync_blocks()
tx = wpriv.createrawtransaction([{"txid": txid, "vout": 0}], {w0.getnewaddress(): 49.999})
signed_tx = wpriv.signrawtransactionwithwallet(tx)
w1.sendrawtransaction(signed_tx['hex'])
# Make sure that we can use import and use multisig as addresses
self.log.info('Test that multisigs can be imported, signed for, and getnewaddress\'d')
self.nodes[1].createwallet(wallet_name="wmulti_priv", disable_private_keys=False, blank=True, descriptors=True)
wmulti_priv = self.nodes[1].get_wallet_rpc("wmulti_priv")
assert_equal(wmulti_priv.getwalletinfo()['keypoolsize'], 0)
self.test_importdesc({"desc":"wsh(multi(2,tprv8ZgxMBicQKsPevADjDCWsa6DfhkVXicu8NQUzfibwX2MexVwW4tCec5mXdCW8kJwkzBRRmAay1KZya4WsehVvjTGVW6JLqiqd8DdZ4xSg52/84h/0h/0h/*,tprv8ZgxMBicQKsPdSNWUhDiwTScDr6JfkZuLshTRwzvZGnMSnGikV6jxpmdDkC3YRc4T3GD6Nvg9uv6hQg73RVv1EiTXDZwxVbsLugVHU8B1aq/84h/0h/0h/*,tprv8ZgxMBicQKsPeonDt8Ka2mrQmHa61hQ5FQCsvWBTpSNzBFgM58cV2EuXNAHF14VawVpznnme3SuTbA62sGriwWyKifJmXntfNeK7zeqMCj1/84h/0h/0h/*))#m2sr93jn",
"active": True,
"range": 1000,
"next_index": 0,
"timestamp": "now"},
success=True,
wallet=wmulti_priv)
self.test_importdesc({"desc":"wsh(multi(2,tprv8ZgxMBicQKsPevADjDCWsa6DfhkVXicu8NQUzfibwX2MexVwW4tCec5mXdCW8kJwkzBRRmAay1KZya4WsehVvjTGVW6JLqiqd8DdZ4xSg52/84h/1h/0h/*,tprv8ZgxMBicQKsPdSNWUhDiwTScDr6JfkZuLshTRwzvZGnMSnGikV6jxpmdDkC3YRc4T3GD6Nvg9uv6hQg73RVv1EiTXDZwxVbsLugVHU8B1aq/84h/1h/0h/*,tprv8ZgxMBicQKsPeonDt8Ka2mrQmHa61hQ5FQCsvWBTpSNzBFgM58cV2EuXNAHF14VawVpznnme3SuTbA62sGriwWyKifJmXntfNeK7zeqMCj1/84h/1h/0h/*))#q3sztvx5",
"active": True,
"internal" : True,
"range": 1000,
"next_index": 0,
"timestamp": "now"},
success=True,
wallet=wmulti_priv)
assert_equal(wmulti_priv.getwalletinfo()['keypoolsize'], 1001) # Range end (1000) is inclusive, so 1001 addresses generated
addr = wmulti_priv.getnewaddress('', 'bech32')
assert_equal(addr, 'ncrt1qdt0qy5p7dzhxzmegnn4ulzhard33s2809arjqgjndx87rv5vd0fqhrnwwh') # Derived at m/84'/0'/0'/0
change_addr = wmulti_priv.getrawchangeaddress('bech32')
assert_equal(change_addr, 'ncrt1qt9uhe3a9hnq7vajl7a094z4s3crm9ttf8zw3f5v9gr2nyd7e3lnsewy2df')
assert_equal(wmulti_priv.getwalletinfo()['keypoolsize'], 1000)
txid = w0.sendtoaddress(addr, 10)
self.nodes[0].generate(6)
self.sync_all()
send_txid = wmulti_priv.sendtoaddress(w0.getnewaddress(), 8)
decoded = wmulti_priv.decoderawtransaction(wmulti_priv.gettransaction(send_txid)['hex'])
assert_equal(len(decoded['vin'][0]['txinwitness']), 4)
self.nodes[0].generate(6)
self.sync_all()
self.nodes[1].createwallet(wallet_name="wmulti_pub", disable_private_keys=True, blank=True, descriptors=True)
wmulti_pub = self.nodes[1].get_wallet_rpc("wmulti_pub")
assert_equal(wmulti_pub.getwalletinfo()['keypoolsize'], 0)
self.test_importdesc({"desc":"wsh(multi(2,[7b2d0242/84h/0h/0h]tpubDCJtdt5dgJpdhW4MtaVYDhG4T4tF6jcLR1PxL43q9pq1mxvXgMS9Mzw1HnXG15vxUGQJMMSqCQHMTy3F1eW5VkgVroWzchsPD5BUojrcWs8/*,[59b09cd6/84h/0h/0h]tpubDDBF2BTR6s8drwrfDei8WxtckGuSm1cyoKxYY1QaKSBFbHBYQArWhHPA6eJrzZej6nfHGLSURYSLHr7GuYch8aY5n61tGqgn8b4cXrMuoPH/*,[e81a0532/84h/0h/0h]tpubDCsWoW1kuQB9kG5MXewHqkbjPtqPueRnXju7uM2NK7y3JYb2ajAZ9EiuZXNNuE4661RAfriBWhL8UsnAPpk8zrKKnZw1Ug7X4oHgMdZiU4E/*))#tsry0s5e",
"active": True,
"range": 1000,
"next_index": 0,
"timestamp": "now"},
success=True,
wallet=wmulti_pub)
self.test_importdesc({"desc":"wsh(multi(2,[7b2d0242/84h/1h/0h]tpubDCXqdwWZcszwqYJSnZp8eARkxGJfHAk23KDxbztV4BbschfaTfYLTcSkSJ3TN64dRqwa1rnFUScsYormKkGqNbbPwkorQimVevXjxzUV9Gf/*,[59b09cd6/84h/1h/0h]tpubDCYfZY2ceyHzYzMMVPt9MNeiqtQ2T7Uyp9QSFwYXh8Vi9iJFYXcuphJaGXfF3jUQJi5Y3GMNXvM11gaL4txzZgNGK22BFAwMXynnzv4z2Jh/*,[e81a0532/84h/1h/0h]tpubDC6UGqnsQStngYuGD4MKsMy7eD1Yg9NTJfPdvjdG2JE5oZ7EsSL3WHg4Gsw2pR5K39ZwJ46M1wZayhedVdQtMGaUhq5S23PH6fnENK3V1sb/*))#c08a2rzv",
"active": True,
"internal" : True,
"range": 1000,
"next_index": 0,
"timestamp": "now"},
success=True,
wallet=wmulti_pub)
assert_equal(wmulti_pub.getwalletinfo()['keypoolsize'], 1000) # The first one was already consumed by previous import and is detected as used
addr = wmulti_pub.getnewaddress('', 'bech32')
assert_equal(addr, 'ncrt1qp8s25ckjl7gr6x2q3dx3tn2pytwp05upkjztk6ey857tt50r5aeqwp24f4') # Derived at m/84'/0'/0'/1
change_addr = wmulti_pub.getrawchangeaddress('bech32')
assert_equal(change_addr, 'ncrt1qt9uhe3a9hnq7vajl7a094z4s3crm9ttf8zw3f5v9gr2nyd7e3lnsewy2df')
assert_equal(wmulti_pub.getwalletinfo()['keypoolsize'], 999)
txid = w0.sendtoaddress(addr, 10)
vout = find_vout_for_address(self.nodes[0], txid, addr)
self.nodes[0].generate(6)
self.sync_all()
assert_equal(wmulti_pub.getbalance(), wmulti_priv.getbalance())
# Make sure that descriptor wallets containing multiple xpubs in a single descriptor load correctly
wmulti_pub.unloadwallet()
self.nodes[1].loadwallet('wmulti_pub')
self.log.info("Multisig with distributed keys")
self.nodes[1].createwallet(wallet_name="wmulti_priv1", descriptors=True)
wmulti_priv1 = self.nodes[1].get_wallet_rpc("wmulti_priv1")
res = wmulti_priv1.importdescriptors([
{
"desc": descsum_create("wsh(multi(2,tprv8ZgxMBicQKsPevADjDCWsa6DfhkVXicu8NQUzfibwX2MexVwW4tCec5mXdCW8kJwkzBRRmAay1KZya4WsehVvjTGVW6JLqiqd8DdZ4xSg52/84h/0h/0h/*,[59b09cd6/84h/0h/0h]tpubDDBF2BTR6s8drwrfDei8WxtckGuSm1cyoKxYY1QaKSBFbHBYQArWhHPA6eJrzZej6nfHGLSURYSLHr7GuYch8aY5n61tGqgn8b4cXrMuoPH/*,[e81a0532/84h/0h/0h]tpubDCsWoW1kuQB9kG5MXewHqkbjPtqPueRnXju7uM2NK7y3JYb2ajAZ9EiuZXNNuE4661RAfriBWhL8UsnAPpk8zrKKnZw1Ug7X4oHgMdZiU4E/*))"),
"active": True,
"range": 1000,
"next_index": 0,
"timestamp": "now"
},
{
"desc": descsum_create("wsh(multi(2,tprv8ZgxMBicQKsPevADjDCWsa6DfhkVXicu8NQUzfibwX2MexVwW4tCec5mXdCW8kJwkzBRRmAay1KZya4WsehVvjTGVW6JLqiqd8DdZ4xSg52/84h/1h/0h/*,[59b09cd6/84h/1h/0h]tpubDCYfZY2ceyHzYzMMVPt9MNeiqtQ2T7Uyp9QSFwYXh8Vi9iJFYXcuphJaGXfF3jUQJi5Y3GMNXvM11gaL4txzZgNGK22BFAwMXynnzv4z2Jh/*,[e81a0532/84h/1h/0h]tpubDC6UGqnsQStngYuGD4MKsMy7eD1Yg9NTJfPdvjdG2JE5oZ7EsSL3WHg4Gsw2pR5K39ZwJ46M1wZayhedVdQtMGaUhq5S23PH6fnENK3V1sb/*))"),
"active": True,
"internal" : True,
"range": 1000,
"next_index": 0,
"timestamp": "now"
}])
assert_equal(res[0]['success'], True)
assert_equal(res[0]['warnings'][0], 'Not all private keys provided. Some wallet functionality may return unexpected errors')
assert_equal(res[1]['success'], True)
assert_equal(res[1]['warnings'][0], 'Not all private keys provided. Some wallet functionality may return unexpected errors')
self.nodes[1].createwallet(wallet_name='wmulti_priv2', blank=True, descriptors=True)
wmulti_priv2 = self.nodes[1].get_wallet_rpc('wmulti_priv2')
res = wmulti_priv2.importdescriptors([
{
"desc": descsum_create("wsh(multi(2,[7b2d0242/84h/0h/0h]tpubDCJtdt5dgJpdhW4MtaVYDhG4T4tF6jcLR1PxL43q9pq1mxvXgMS9Mzw1HnXG15vxUGQJMMSqCQHMTy3F1eW5VkgVroWzchsPD5BUojrcWs8/*,tprv8ZgxMBicQKsPdSNWUhDiwTScDr6JfkZuLshTRwzvZGnMSnGikV6jxpmdDkC3YRc4T3GD6Nvg9uv6hQg73RVv1EiTXDZwxVbsLugVHU8B1aq/84h/0h/0h/*,[e81a0532/84h/0h/0h]tpubDCsWoW1kuQB9kG5MXewHqkbjPtqPueRnXju7uM2NK7y3JYb2ajAZ9EiuZXNNuE4661RAfriBWhL8UsnAPpk8zrKKnZw1Ug7X4oHgMdZiU4E/*))"),
"active": True,
"range": 1000,
"next_index": 0,
"timestamp": "now"
},
{
"desc": descsum_create("wsh(multi(2,[7b2d0242/84h/1h/0h]tpubDCXqdwWZcszwqYJSnZp8eARkxGJfHAk23KDxbztV4BbschfaTfYLTcSkSJ3TN64dRqwa1rnFUScsYormKkGqNbbPwkorQimVevXjxzUV9Gf/*,tprv8ZgxMBicQKsPdSNWUhDiwTScDr6JfkZuLshTRwzvZGnMSnGikV6jxpmdDkC3YRc4T3GD6Nvg9uv6hQg73RVv1EiTXDZwxVbsLugVHU8B1aq/84h/1h/0h/*,[e81a0532/84h/1h/0h]tpubDC6UGqnsQStngYuGD4MKsMy7eD1Yg9NTJfPdvjdG2JE5oZ7EsSL3WHg4Gsw2pR5K39ZwJ46M1wZayhedVdQtMGaUhq5S23PH6fnENK3V1sb/*))"),
"active": True,
"internal" : True,
"range": 1000,
"next_index": 0,
"timestamp": "now"
}])
assert_equal(res[0]['success'], True)
assert_equal(res[0]['warnings'][0], 'Not all private keys provided. Some wallet functionality may return unexpected errors')
assert_equal(res[1]['success'], True)
assert_equal(res[1]['warnings'][0], 'Not all private keys provided. Some wallet functionality may return unexpected errors')
rawtx = self.nodes[1].createrawtransaction([{'txid': txid, 'vout': vout}], {w0.getnewaddress(): 9.999})
tx_signed_1 = wmulti_priv1.signrawtransactionwithwallet(rawtx)
assert_equal(tx_signed_1['complete'], False)
tx_signed_2 = wmulti_priv2.signrawtransactionwithwallet(tx_signed_1['hex'])
assert_equal(tx_signed_2['complete'], True)
self.nodes[1].sendrawtransaction(tx_signed_2['hex'])
self.log.info("Combo descriptors cannot be active")
self.test_importdesc({"desc": descsum_create("combo(tpubDCJtdt5dgJpdhW4MtaVYDhG4T4tF6jcLR1PxL43q9pq1mxvXgMS9Mzw1HnXG15vxUGQJMMSqCQHMTy3F1eW5VkgVroWzchsPD5BUojrcWs8/*)"),
"active": True,
"range": 1,
"timestamp": "now"},
success=False,
error_code=-4,
error_message="Combo descriptors cannot be set to active")
self.log.info("Descriptors with no type cannot be active")
self.test_importdesc({"desc": descsum_create("pk(tpubDCJtdt5dgJpdhW4MtaVYDhG4T4tF6jcLR1PxL43q9pq1mxvXgMS9Mzw1HnXG15vxUGQJMMSqCQHMTy3F1eW5VkgVroWzchsPD5BUojrcWs8/*)"),
"active": True,
"range": 1,
"timestamp": "now"},
success=True,
warnings=["Unknown output type, cannot set descriptor to active."])
if __name__ == '__main__':
ImportDescriptorsTest().main()
|
mit
| 4,546,330,020,616,663,000
| 56.707424
| 464
| 0.59319
| false
| 3.115276
| true
| false
| false
|
Shiva-Iyer/kepler
|
pykepler/riseset.py
|
1
|
3379
|
# riseset.py - Wrapper for celestial body rise/transit/set times
# Copyright (C) 2016 Shiva Iyer <shiva.iyer AT g m a i l DOT c o m>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
if __name__ == "__main__":
exit()
from ctypes import *
from pykepler import _libkepler
def riseset(df, ra, dec, gast, lon, lat, delt, h0):
"""Calculate rise/transit/set times for an object given its
positions in equatorial coordinates.
df -- List of day fractions, each in the range [0,1]. Positions
must be given for one full day at intervals of 6 hours or
smaller for satisfactory results, especially for the Moon
and Mercury.
ra -- RA of the object in radians at the times in <df>.
dec -- Declination of the object in radians at the times in <df>.
gast -- Greenwich apparent sidereal time in radians at <df[0]>.
lon -- Observer's longitude in radians, positive east of Greenwich.
lat -- Observer's latitude in radians, positive north of the equator.
delt -- Delta-T in seconds at <df[0]>.
h0 -- Correction to use for atmospheric refraction in radians.
Return: rts[0] = rise, rts[1] = transit, rts[2] = setting times,
all in UTC day fractions in the range [0,1]. Values will be -1
for objects that don't rise/transit/set.
"""
N = len(df)
rts = (c_double*3)()
_libkepler.riseset(c_int(N),
cast((c_double*N)(*df), POINTER(c_double)),
cast((c_double*N)(*ra), POINTER(c_double)),
cast((c_double*N)(*dec), POINTER(c_double)),
c_double(gast),
c_double(lon),
c_double(lat),
c_double(delt),
c_double(h0),
pointer(rts))
return(rts[0], rts[1], rts[2])
def interpolate(X, Y, xint):
"""Interpolate using Lagrange's interpolation formula.
X -- x-values for interpolation.
Y -- y-values for interpolation.
xint -- Interpolant.
Return: Interpolated y-value corresponding to <xint>.
"""
N = len(X)
return(_libkepler.interpolate(c_int(N),
cast((c_double*N)(*X), POINTER(c_double)),
cast((c_double*N)(*Y), POINTER(c_double)),
c_double(xint)))
_libkepler.riseset.argtypes = [
c_int,
POINTER(c_double),
POINTER(c_double),
POINTER(c_double),
c_double,
c_double,
c_double,
c_double,
c_double,
POINTER(c_double*3)
]
_libkepler.interpolate.restype = c_double
_libkepler.interpolate.argtypes = [
c_int,
POINTER(c_double),
POINTER(c_double),
c_double
]
__all__ = [
"riseset",
"interpolate"
]
|
gpl-3.0
| -8,199,793,369,504,019,000
| 32.79
| 76
| 0.606096
| false
| 3.52714
| false
| false
| false
|
tylertian/Openstack
|
openstack F/glance/glance/registry/api/v1/members.py
|
1
|
13468
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010-2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob.exc
from glance.common import exception
from glance.common import utils
from glance.common import wsgi
import glance.db
import glance.openstack.common.log as logging
LOG = logging.getLogger(__name__)
class Controller(object):
def _check_can_access_image_members(self, context):
if context.owner is None and not context.is_admin:
raise webob.exc.HTTPUnauthorized(_("No authenticated user"))
def __init__(self):
self.db_api = glance.db.get_api()
self.db_api.configure_db()
def index(self, req, image_id):
"""
Get the members of an image.
"""
try:
self.db_api.image_get(req.context, image_id)
except exception.NotFound:
msg = _("Image %(id)s not found")
LOG.info(msg % {'id': image_id})
raise webob.exc.HTTPNotFound()
except exception.Forbidden:
# If it's private and doesn't belong to them, don't let on
# that it exists
msg = _("Access denied to image %(id)s but returning 'not found'")
LOG.info(msg % {'id': image_id})
raise webob.exc.HTTPNotFound()
members = self.db_api.image_member_find(req.context, image_id=image_id)
msg = _("Returning member list for image %(id)s")
LOG.info(msg % {'id': image_id})
return dict(members=make_member_list(members,
member_id='member',
can_share='can_share'))
@utils.mutating
def update_all(self, req, image_id, body):
"""
Replaces the members of the image with those specified in the
body. The body is a dict with the following format::
{"memberships": [
{"member_id": <MEMBER_ID>,
["can_share": [True|False]]}, ...
]}
"""
self._check_can_access_image_members(req.context)
# Make sure the image exists
session = self.db_api.get_session()
try:
image = self.db_api.image_get(req.context, image_id,
session=session)
except exception.NotFound:
msg = _("Image %(id)s not found")
LOG.info(msg % {'id': image_id})
raise webob.exc.HTTPNotFound()
except exception.Forbidden:
# If it's private and doesn't belong to them, don't let on
# that it exists
msg = _("Access denied to image %(id)s but returning 'not found'")
LOG.info(msg % {'id': image_id})
raise webob.exc.HTTPNotFound()
# Can they manipulate the membership?
if not self.db_api.is_image_sharable(req.context, image):
msg = _("User lacks permission to share image %(id)s")
LOG.info(msg % {'id': image_id})
msg = _("No permission to share that image")
raise webob.exc.HTTPForbidden(msg)
# Get the membership list
try:
memb_list = body['memberships']
except Exception, e:
# Malformed entity...
msg = _("Invalid membership association specified for "
"image %(id)s")
LOG.info(msg % {'id': image_id})
msg = _("Invalid membership association: %s") % e
raise webob.exc.HTTPBadRequest(explanation=msg)
add = []
existing = {}
# Walk through the incoming memberships
for memb in memb_list:
try:
datum = dict(image_id=image['id'],
member=memb['member_id'],
can_share=None)
except Exception, e:
# Malformed entity...
msg = _("Invalid membership association specified for "
"image %(id)s")
LOG.info(msg % {'id': image_id})
msg = _("Invalid membership association: %s") % e
raise webob.exc.HTTPBadRequest(explanation=msg)
# Figure out what can_share should be
if 'can_share' in memb:
datum['can_share'] = bool(memb['can_share'])
# Try to find the corresponding membership
members = self.db_api.image_member_find(req.context,
image_id=datum['image_id'],
member=datum['member'],
session=session)
try:
member = members[0]
except IndexError:
# Default can_share
datum['can_share'] = bool(datum['can_share'])
add.append(datum)
else:
# Are we overriding can_share?
if datum['can_share'] is None:
datum['can_share'] = members[0]['can_share']
existing[member['id']] = {
'values': datum,
'membership': member,
}
# We now have a filtered list of memberships to add and
# memberships to modify. Let's start by walking through all
# the existing image memberships...
existing_members = self.db_api.image_member_find(req.context,
image_id=image['id'])
for memb in existing_members:
if memb['id'] in existing:
# Just update the membership in place
update = existing[memb['id']]['values']
self.db_api.image_member_update(req.context, memb, update,
session=session)
else:
# Outdated one; needs to be deleted
self.db_api.image_member_delete(req.context, memb,
session=session)
# Now add the non-existent ones
for memb in add:
self.db_api.image_member_create(req.context, memb, session=session)
# Make an appropriate result
msg = _("Successfully updated memberships for image %(id)s")
LOG.info(msg % {'id': image_id})
return webob.exc.HTTPNoContent()
@utils.mutating
def update(self, req, image_id, id, body=None):
"""
Adds a membership to the image, or updates an existing one.
If a body is present, it is a dict with the following format::
{"member": {
"can_share": [True|False]
}}
If "can_share" is provided, the member's ability to share is
set accordingly. If it is not provided, existing memberships
remain unchanged and new memberships default to False.
"""
self._check_can_access_image_members(req.context)
# Make sure the image exists
try:
image = self.db_api.image_get(req.context, image_id)
except exception.NotFound:
msg = _("Image %(id)s not found")
LOG.info(msg % {'id': image_id})
raise webob.exc.HTTPNotFound()
except exception.Forbidden:
# If it's private and doesn't belong to them, don't let on
# that it exists
msg = _("Access denied to image %(id)s but returning 'not found'")
LOG.info(msg % {'id': image_id})
raise webob.exc.HTTPNotFound()
# Can they manipulate the membership?
if not self.db_api.is_image_sharable(req.context, image):
msg = _("User lacks permission to share image %(id)s")
LOG.info(msg % {'id': image_id})
msg = _("No permission to share that image")
raise webob.exc.HTTPForbidden(msg)
# Determine the applicable can_share value
can_share = None
if body:
try:
can_share = bool(body['member']['can_share'])
except Exception, e:
# Malformed entity...
msg = _("Invalid membership association specified for "
"image %(id)s")
LOG.info(msg % {'id': image_id})
msg = _("Invalid membership association: %s") % e
raise webob.exc.HTTPBadRequest(explanation=msg)
# Look up an existing membership...
session = self.db_api.get_session()
members = self.db_api.image_member_find(req.context,
image_id=image_id,
member=id,
session=session)
if members:
if can_share is not None:
values = dict(can_share=can_share)
self.db_api.image_member_update(req.context, members[0],
values, session=session)
else:
values = dict(image_id=image['id'], member=id,
can_share=bool(can_share))
self.db_api.image_member_create(req.context, values,
session=session)
msg = _("Successfully updated a membership for image %(id)s")
LOG.info(msg % {'id': image_id})
return webob.exc.HTTPNoContent()
@utils.mutating
def delete(self, req, image_id, id):
"""
Removes a membership from the image.
"""
self._check_can_access_image_members(req.context)
# Make sure the image exists
try:
image = self.db_api.image_get(req.context, image_id)
except exception.NotFound:
msg = _("Image %(id)s not found")
LOG.info(msg % {'id': image_id})
raise webob.exc.HTTPNotFound()
except exception.Forbidden:
# If it's private and doesn't belong to them, don't let on
# that it exists
msg = _("Access denied to image %(id)s but returning 'not found'")
LOG.info(msg % {'id': image_id})
raise webob.exc.HTTPNotFound()
# Can they manipulate the membership?
if not self.db_api.is_image_sharable(req.context, image):
msg = _("User lacks permission to share image %(id)s")
LOG.info(msg % {'id': image_id})
msg = _("No permission to share that image")
raise webob.exc.HTTPForbidden(msg)
# Look up an existing membership
try:
session = self.db_api.get_session()
members = self.db_api.image_member_find(req.context,
image_id=image_id,
member=id,
session=session)
self.db_api.image_member_delete(req.context,
members[0],
session=session)
except exception.NotFound:
pass
# Make an appropriate result
msg = _("Successfully deleted a membership from image %(id)s")
LOG.info(msg % {'id': image_id})
return webob.exc.HTTPNoContent()
def index_shared_images(self, req, id):
"""
Retrieves images shared with the given member.
"""
try:
members = self.db_api.image_member_find(req.context, member=id)
except exception.NotFound, e:
msg = _("Member %(id)s not found")
LOG.info(msg % {'id': id})
msg = _("Membership could not be found.")
raise webob.exc.HTTPBadRequest(explanation=msg)
msg = _("Returning list of images shared with member %(id)s")
LOG.info(msg % {'id': id})
return dict(shared_images=make_member_list(members,
image_id='image_id',
can_share='can_share'))
def make_member_list(members, **attr_map):
"""
Create a dict representation of a list of members which we can use
to serialize the members list. Keyword arguments map the names of
optional attributes to include to the database attribute.
"""
def _fetch_memb(memb, attr_map):
return dict([(k, memb[v]) for k, v in attr_map.items()
if v in memb.keys()])
# Return the list of members with the given attribute mapping
return [_fetch_memb(memb, attr_map) for memb in members
if not memb.deleted]
def create_resource():
"""Image members resource factory method."""
deserializer = wsgi.JSONRequestDeserializer()
serializer = wsgi.JSONResponseSerializer()
return wsgi.Resource(Controller(), deserializer, serializer)
|
apache-2.0
| -568,843,902,991,611,650
| 39.444444
| 79
| 0.529032
| false
| 4.492328
| false
| false
| false
|
jsenko/repour
|
repour/server/server.py
|
1
|
3266
|
import asyncio
import logging
from aiohttp import web
from .endpoint import cancel
from .endpoint import endpoint
from ..adjust import adjust
from .. import clone
from .. import pull
from .. import repo
from .endpoint import validation
from ..auth import auth
from ..config import config
logger = logging.getLogger(__name__)
#
# Setup
#
shutdown_callbacks = []
@asyncio.coroutine
def init(loop, bind, repo_provider, adjust_provider):
logger.debug("Running init")
c = yield from config.get_configuration()
auth_provider = c.get('auth', {}).get('provider', None)
logger.info("Using auth provider '" + str(auth_provider) + "'.")
app = web.Application(loop=loop, middlewares=[auth.providers[auth_provider]] if auth_provider else {})
logger.debug("Adding application resources")
app["repo_provider"] = repo.provider_types[repo_provider["type"]](**repo_provider["params"])
if repo_provider["type"] == "modeb":
logger.warn("Mode B selected, guarantees rescinded")
pull_source = endpoint.validated_json_endpoint(shutdown_callbacks, validation.pull_modeb, pull.pull)
adjust_source = endpoint.validated_json_endpoint(shutdown_callbacks, validation.adjust_modeb, adjust.adjust)
else:
pull_source = endpoint.validated_json_endpoint(shutdown_callbacks, validation.pull, pull.pull)
adjust_source = endpoint.validated_json_endpoint(shutdown_callbacks, validation.adjust, adjust.adjust)
logger.debug("Setting up handlers")
app.router.add_route("POST", "/pull", pull_source)
app.router.add_route("POST", "/adjust", adjust_source)
app.router.add_route("POST", "/clone", endpoint.validated_json_endpoint(shutdown_callbacks, validation.clone, clone.clone))
app.router.add_route("POST", "/cancel", cancel.handle_cancel)
logger.debug("Creating asyncio server")
srv = yield from loop.create_server(app.make_handler(), bind["address"], bind["port"])
for socket in srv.sockets:
logger.info("Server started on socket: {}".format(socket.getsockname()))
def start_server(bind, repo_provider, adjust_provider):
logger.debug("Starting server")
loop = asyncio.get_event_loop()
# Monkey patch for Python 3.4.1
if not hasattr(loop, "create_task"):
loop.create_task = lambda c: asyncio.async(c, loop=loop)
loop.run_until_complete(init(
loop=loop,
bind=bind,
repo_provider=repo_provider,
adjust_provider=adjust_provider,
))
try:
loop.run_forever()
except KeyboardInterrupt:
logger.debug("KeyboardInterrupt")
finally:
logger.info("Stopping tasks")
tasks = asyncio.Task.all_tasks()
for task in tasks:
task.cancel()
results = loop.run_until_complete(asyncio.gather(*tasks, loop=loop, return_exceptions=True))
for shutdown_callback in shutdown_callbacks:
shutdown_callback()
exception_results = [r for r in results if
isinstance(r, Exception) and not isinstance(r, asyncio.CancelledError)]
if len(exception_results) > 1:
raise Exception(exception_results)
elif len(exception_results) == 1:
raise exception_results[0]
loop.close()
|
apache-2.0
| 3,060,298,820,635,262,000
| 34.89011
| 127
| 0.676975
| false
| 3.98779
| false
| false
| false
|
fracpete/python-weka-wrapper
|
python/weka/flow/container.py
|
1
|
7925
|
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# container.py
# Copyright (C) 2015 Fracpete (pythonwekawrapper at gmail dot com)
import re
from weka.core.dataset import Instances
class Container(object):
"""
Container for storing multiple objects and passing them around together in the flow.
"""
def __init__(self):
"""
Initializes the container.
"""
self._data = {}
self._allowed = []
def get(self, name):
"""
Returns the stored data.
:param name: the name of the item to return
:type name: str
:return: the data
:rtype: object
"""
return self._data[name]
def set(self, name, value):
"""
Stores the given data (if not None).
:param name: the name of the item to store
:type name: str
:param value: the value to store
:type value: object
"""
if value is not None:
self._data[name] = value
@property
def allowed(self):
"""
Returns the all the allowed keys.
:return: the list of allowed keys.
:rtype: list
"""
return self._allowed
def is_valid(self):
"""
Checks whether the container is valid.
:return: True if the container is valid
:rtype: bool
"""
return True
def __str__(self):
"""
Returns the content of the container as string.
:return: the content
:rtype: str
"""
return str(self._data)
def generate_help(self):
"""
Generates a help string for this container.
:return: the help string
:rtype: str
"""
result = []
result.append(self.__class__.__name__)
result.append(re.sub(r'.', '=', self.__class__.__name__))
result.append("")
result.append("Supported value names:")
for a in self.allowed:
result.append(a)
return '\n'.join(result)
def print_help(self):
"""
Prints a help string for this actor to stdout.
"""
print(self.generate_help())
class ModelContainer(Container):
"""
Container for models.
"""
def __init__(self, model=None, header=None):
"""
Initializes the container.
:param model: the model to store (eg Classifier or Clusterer)
:type model: object
:param header: the header instances
:type header: Instances
"""
super(ModelContainer, self).__init__()
self.set("Model", model)
if header is not None:
header = Instances.template_instances(header)
self.set("Header", header)
self._allowed = ["Model", "Header"]
def is_valid(self):
"""
Checks whether the container is valid.
:return: True if the container is valid
:rtype: bool
"""
return ("Model" in self._data) or ("Model" in self._data and "Header" in self._data)
class AttributeSelectionContainer(Container):
"""
Container for models.
"""
def __init__(self, original=None, reduced=None, num_atts=None, selected=None, results=None):
"""
Initializes the container.
:param original: the original dataset
:type original: Instances
:param reduced: the reduced dataset
:type reduced: Instances
:param num_atts: the number of attributes
:type num_atts: int
:param selected: the list of selected attribute indices (0-based)
:type selected: list
:param results: the generated results string
:type results: str
"""
super(AttributeSelectionContainer, self).__init__()
self.set("Original", original)
self.set("Reduced", reduced)
self.set("NumAttributes", num_atts)
self.set("Selected", selected)
self.set("Results", results)
self._allowed = ["Original", "Reduced", "NumAttributes", "Selected", "Results"]
def is_valid(self):
"""
Checks whether the container is valid.
:return: True if the container is valid
:rtype: bool
"""
return ("Reduced" in self._data) and ("NumAttributes" in self._data) and ("Selected" in self._data)
class ModelContainer(Container):
"""
Container for models.
"""
def __init__(self, model=None, header=None):
"""
Initializes the container.
:param model: the model to store (eg Classifier or Clusterer)
:type model: object
:param header: the header instances
:type header: Instances
"""
super(ModelContainer, self).__init__()
self.set("Model", model)
if header is not None:
header = Instances.template_instances(header)
self.set("Header", header)
self._allowed = ["Model", "Header"]
def is_valid(self):
"""
Checks whether the container is valid.
:return: True if the container is valid
:rtype: bool
"""
return ("Model" in self._data) or ("Model" in self._data and "Header" in self._data)
class ClassificationContainer(Container):
"""
Container for predictions (classifiers).
"""
def __init__(self, inst=None, classification=None, label=None, distribution=None):
"""
Initializes the container.
:param inst: the instance used for making the prediction
:type inst: Instance
:param classification: the classification (numeric value or 0-based label index)
:type classification: float
:param label: classification label (for nominal classes)
:type label: str
:param distribution: the class distribution
:type distribution: ndarray
"""
super(ClassificationContainer, self).__init__()
self.set("Instance", inst)
self.set("Classification", classification)
self.set("Label", label)
self.set("Distribution", distribution)
self._allowed = ["Instance", "Classification", "Label", "Distribution"]
def is_valid(self):
"""
Checks whether the container is valid.
:return: True if the container is valid
:rtype: bool
"""
return ("Instance" in self._data) and ("Classification" in self._data)
class ClusteringContainer(Container):
"""
Container for predictions (clusterers).
"""
def __init__(self, inst=None, cluster=None, distribution=None):
"""
Initializes the container.
:param inst: the instance used for making the prediction
:type inst: Instance
:param cluster: the cluster
:type cluster: int
:param distribution: the class distribution
:type distribution: ndarray
"""
super(ClusteringContainer, self).__init__()
self.set("Instance", inst)
self.set("Cluster", cluster)
self.set("Distribution", distribution)
self._allowed = ["Instance", "Cluster", "Distribution"]
def is_valid(self):
"""
Checks whether the container is valid.
:return: True if the container is valid
:rtype: bool
"""
return ("Instance" in self._data) and ("Cluster" in self._data)
|
gpl-3.0
| -7,964,519,696,073,500,000
| 28.243542
| 107
| 0.592303
| false
| 4.474873
| false
| false
| false
|
clwainwright/CosmoTransitions
|
cosmoTransitions/finiteT.py
|
1
|
10960
|
"""
This module provides the functions for the one-loop finite
temperature corrections to a potential in QFT. The two basic
functions are:
Jb(x) = int[0->inf] dy +y^2 log( 1 - exp(-sqrt(x^2 + y^2)) )
Jf(x) = int[0->inf] dy -y^2 log( 1 + exp(-sqrt(x^2 + y^2)) )
Call them by:
Jb(x, approx='high', deriv=0, n = 8)
Here, approx can either be 'exact', 'spline', 'high', or 'low'.
Exact calculates the integral numerically, while high and low
calculate the high and low x expansions of J to order n.
Specify the derivative with the 'deriv' parameter.
"""
import os
import numpy
from scipy import integrate, interpolate
from scipy import special
try:
from scipy.misc import factorial as fac
except ImportError:
from scipy.special import factorial as fac
pi = numpy.pi
euler_gamma = 0.577215661901532
log, exp, sqrt = numpy.log, numpy.exp, numpy.sqrt
array = numpy.array
spline_data_path = os.path.dirname(__file__)
# The following are the exact integrals:
def _Jf_exact(x):
f = lambda y: -y*y*log(1+exp(-sqrt(y*y+x*x)))
if(x.imag == 0):
x = abs(x)
return integrate.quad(f, 0, numpy.inf)[0]
else:
f1 = lambda y: -y*y*log(2*abs(numpy.cos(sqrt(abs(x*x)-y*y)/2)))
return (
integrate.quad(f1,0,abs(x))[0] +
integrate.quad(f,abs(x),numpy.inf)[0]
)
def _Jf_exact2(theta):
# Note that this is a function of theta so that you can get negative values
f = lambda y: -y*y*log(1+exp(-sqrt(y*y+theta))).real
if theta >= 0:
return integrate.quad(f, 0, numpy.inf)[0]
else:
f1 = lambda y: -y*y*log(2*abs(numpy.cos(sqrt(-theta-y*y)/2)))
return (
integrate.quad(f, abs(theta)**.5, numpy.inf)[0] +
integrate.quad(f1, 0, abs(theta)**.5)[0]
)
def _Jb_exact(x):
f = lambda y: y*y*log(1-exp(-sqrt(y*y+x*x)))
if(x.imag == 0):
x = abs(x)
return integrate.quad(f, 0, numpy.inf)[0]
else:
f1 = lambda y: y*y*log(2*abs(numpy.sin(sqrt(abs(x*x)-y*y)/2)))
return (
integrate.quad(f1,0,abs(x))[0] +
integrate.quad(f,abs(x),numpy.inf)[0]
)
def _Jb_exact2(theta):
# Note that this is a function of theta so that you can get negative values
f = lambda y: y*y*log(1-exp(-sqrt(y*y+theta))).real
if theta >= 0:
return integrate.quad(f, 0, numpy.inf)[0]
else:
f1 = lambda y: y*y*log(2*abs(numpy.sin(sqrt(-theta-y*y)/2)))
return (
integrate.quad(f, abs(theta)**.5, numpy.inf)[0] +
integrate.quad(f1, 0, abs(theta)**.5)[0]
)
def _dJf_exact(x):
f = lambda y: y*y*(exp(sqrt(y*y+x*x))+1)**-1*x/sqrt(y*y+x*x)
return integrate.quad(f, 0, numpy.inf)[0]
def _dJb_exact(x):
f = lambda y: y*y*(exp(sqrt(y*y+x*x))-1)**-1*x/sqrt(y*y+x*x)
return integrate.quad(f, 0, numpy.inf)[0]
def arrayFunc(f, x, typ=float):
# This function allows a 1D array to be passed to something that
# normally can't handle it
i = 0
try:
n = len(x)
except:
return f(x) # x isn't an array
s = numpy.empty(n, typ)
while(i < n):
try:
s[i] = f(x[i])
except:
s[i] = numpy.NaN
i += 1
return s
def Jf_exact(x):
"""Jf calculated directly from the integral."""
return arrayFunc(_Jf_exact, x, complex)
def Jf_exact2(theta):
"""Jf calculated directly form the integral; input is theta = x^2."""
return arrayFunc(_Jf_exact2, theta)
def Jb_exact(x):
"""Jb calculated directly from the integral."""
return arrayFunc(_Jb_exact, x)
def Jb_exact2(theta):
"""Jb calculated directly form the integral; input is theta = x^2."""
return arrayFunc(_Jb_exact2, theta)
def dJf_exact(x):
"""dJf/dx calculated directly from the integral."""
return arrayFunc(_dJf_exact, x)
def dJb_exact(x):
"""dJb/dx calculated directly from the integral."""
return arrayFunc(_dJb_exact, x)
# Spline fitting, Jf
_xfmin = -6.82200203 # -11.2403168
_xfmax = 1.35e3
_Jf_dat_path = spline_data_path+"/finiteT_f.dat.txt"
if os.path.exists(_Jf_dat_path):
_xf, _yf = numpy.loadtxt(_Jf_dat_path).T
else:
# x = |xmin|*sinh(y), where y in linear
# (so that we're not overpopulating the uniteresting region)
_xf = numpy.linspace(numpy.arcsinh(-1.3*20),
numpy.arcsinh(-20*_xfmax/_xfmin), 1000)
_xf = abs(_xfmin)*numpy.sinh(_xf)/20
_yf = Jf_exact2(_xf)
numpy.savetxt(_Jf_dat_path, numpy.array([_xf, _yf]).T)
_tckf = interpolate.splrep(_xf, _yf)
def Jf_spline(X,n=0):
"""Jf interpolated from a saved spline. Input is (m/T)^2."""
X = numpy.array(X, copy=False)
x = X.ravel()
y = interpolate.splev(x,_tckf, der=n).ravel()
y[x < _xfmin] = interpolate.splev(_xfmin,_tckf, der=n)
y[x > _xfmax] = 0
return y.reshape(X.shape)
# Spline fitting, Jb
_xbmin = -3.72402637
# We're setting the lower acceptable bound as the point where it's a minimum
# This guarantees that it's a monatonically increasing function, and the first
# deriv is continuous.
_xbmax = 1.41e3
_Jb_dat_path = spline_data_path+"/finiteT_b.dat.txt"
if os.path.exists(_Jb_dat_path):
_xb, _yb = numpy.loadtxt(_Jb_dat_path).T
else:
# x = |xmin|*sinh(y), where y in linear
# (so that we're not overpopulating the uniteresting region)
_xb = numpy.linspace(numpy.arcsinh(-1.3*20),
numpy.arcsinh(-20*_xbmax/_xbmin), 1000)
_xb = abs(_xbmin)*numpy.sinh(_xb)/20
_yb = Jb_exact2(_xb)
numpy.savetxt(_Jb_dat_path, numpy.array([_xb, _yb]).T)
_tckb = interpolate.splrep(_xb, _yb)
def Jb_spline(X,n=0):
"""Jb interpolated from a saved spline. Input is (m/T)^2."""
X = numpy.array(X, copy=False)
x = X.ravel()
y = interpolate.splev(x,_tckb, der=n).ravel()
y[x < _xbmin] = interpolate.splev(_xbmin,_tckb, der=n)
y[x > _xbmax] = 0
return y.reshape(X.shape)
# Now for the low x expansion (require that n <= 50)
a,b,c,d = -pi**4/45, pi*pi/12, -pi/6, -1/32.
logab = 1.5 - 2*euler_gamma + 2*log(4*pi)
l = numpy.arange(50)+1
g = (-2*pi**3.5 * (-1)**l*(1+special.zetac(2*l+1)) *
special.gamma(l+.5)/(fac(l+2)*(2*pi)**(2*l+4)))
lowCoef_b = (a,b,c,d,logab,l,g)
del (a,b,c,d,logab,l,g) # clean up name space
a,b,d = -7*pi**4/360, pi*pi/24, 1/32.
logaf = 1.5 - 2*euler_gamma + 2*log(pi)
l = numpy.arange(50)+1
g = (.25*pi**3.5 * (-1)**l*(1+special.zetac(2*l+1)) *
special.gamma(l+.5)*(1-.5**(2*l+1))/(fac(l+2)*pi**(2*l+4)))
lowCoef_f = (a,b,d,logaf,l,g)
del (a,b,d,logaf,l,g) # clean up name space
def Jb_low(x,n=20):
"""Jb calculated using the low-x (high-T) expansion."""
(a,b,c,d,logab,l,g) = lowCoef_b
y = a + x*x*(b + x*(c + d*x*(numpy.nan_to_num(log(x*x)) - logab)))
i = 1
while i <= n:
y += g[i-1]*x**(2*i+4)
i += 1
return y
def Jf_low(x,n=20):
"""Jf calculated using the low-x (high-T) expansion."""
(a,b,d,logaf,l,g) = lowCoef_f
y = a + x*x*(b + d*x*x*(numpy.nan_to_num(log(x*x)) - logaf))
i = 1
while i <= n:
y += g[i-1]*x**(2*i+4)
i += 1
return y
# The next few functions are all for the high approximation
def x2K2(k,x):
y = -x*x*special.kn(2, k*x)/(k*k)
if(isinstance(x, numpy.ndarray)):
y[x == 0] = numpy.ones(len(y[x == 0]))*-2.0/k**4
elif(x == 0):
return -2.0/k**4
return y
def dx2K2(k,x):
y = abs(x)
return numpy.nan_to_num(x*y*special.kn(1,k*y)/k)
def d2x2K2(k,x):
x = abs(x)
y = numpy.nan_to_num(x*(special.kn(1,k*x)/k - x*special.kn(0,k*x)))
if(isinstance(x, numpy.ndarray)):
y[x == 0] = numpy.ones(len(y[x == 0]))*1.0/k**2
elif(x == 0):
return 1.0/k**2
return y
def d3x2K2(k,x):
y = abs(x)
return numpy.nan_to_num(x*(y*k*special.kn(1,k*y) - 3*special.kn(0,k*y)))
def Jb_high(x, deriv=0, n=8):
"""Jb calculated using the high-x (low-T) expansion."""
K = (x2K2, dx2K2, d2x2K2, d3x2K2)[deriv]
y, k = 0.0, 1
while k <= n:
y += K(k,x)
k += 1
return y
def Jf_high(x, deriv=0, n=8):
"""Jf calculated using the high-x (low-T) expansion."""
K = (x2K2, dx2K2, d2x2K2, d3x2K2)[deriv]
y, k, i = 0.0, 1, 1
while k <= n:
y += i*K(k,x)
i *= -1
k += 1
return y
# And here are the final functions:
# Note that if approx = 'spline', the function called is
# J(theta) (x^2 -> theta so you can get negative mass squared)
def Jb(x, approx='high', deriv=0, n=8):
"""
A shorthand for calling one of the Jb functions above.
Parameters
----------
approx : str, optional
One of 'exact', 'high', 'low', or 'spline'.
deriv : int, optional
The order of the derivative (0 for no derivative).
Must be <= (1, 3, 0, 3) for approx = (exact, high, low, spline).
n : int, optional
Number of terms to use in the low and high-T approximations.
"""
if(approx == 'exact'):
if(deriv == 0):
return Jb_exact(x)
elif(deriv == 1):
return dJb_exact(x)
else:
raise ValueError("For approx=='exact', deriv must be 0 or 1.")
elif(approx == 'spline'):
return Jb_spline(x, deriv)
elif(approx == 'low'):
if(n > 100):
raise ValueError("Must have n <= 100")
if(deriv == 0):
return Jb_low(x,n)
else:
raise ValueError("For approx=='low', deriv must be 0.")
elif(approx == 'high'):
if(deriv > 3):
raise ValueError("For approx=='high', deriv must be 3 or less.")
else:
return Jb_high(x, deriv, n)
raise ValueError("Unexpected value for 'approx'.")
def Jf(x, approx='high', deriv=0, n=8):
"""
A shorthand for calling one of the Jf functions above.
Parameters
----------
approx : str, optional
One of 'exact', 'high', 'low', or 'spline'.
deriv : int, optional
The order of the derivative (0 for no derivative).
Must be <= (1, 3, 0, 3) for approx = (exact, high, low, spline).
n : int, optional
Number of terms to use in the low and high-T approximations.
"""
if(approx == 'exact'):
if(deriv == 0):
return Jf_exact(x)
elif(deriv == 1):
return dJf_exact(x)
else:
raise ValueError("For approx=='exact', deriv must be 0 or 1.")
elif(approx == 'spline'):
return Jf_spline(x, deriv)
elif(approx == 'low'):
if(n > 100):
raise ValueError("Must have n <= 100")
if(deriv == 0):
return Jf_low(x,n)
else:
raise ValueError("For approx=='low', deriv must be 0.")
elif(approx == 'high'):
if(deriv > 3):
raise ValueError("For approx=='high', deriv must be 3 or less.")
else:
return Jf_high(x, deriv, n)
raise ValueError("Unexpected value for 'approx'.")
|
mit
| 2,406,444,055,076,283,400
| 28.226667
| 79
| 0.565967
| false
| 2.76001
| false
| false
| false
|
isthatme/openHomeControl
|
src/A10-LIME/mqtt_PCA9685_first_copy.py
|
1
|
2431
|
#!/usr/bin/env python
import paho.mqtt.client as mqtt
from pyA10Lime import i2c
import time
'''
Overall design of the program:
Set up mqtt
set up the PCA9685
sub to relevent channels
how do channels work?
devID/controllerID/pinNum maybe?
so A10-1/0x42/4 would reference pyA10 number 1, with a controller addressed at 0x42, pin 4 (5th pin)
example message contents:
subject: A10-1/0x42/4
message: 75
this would set the above pin to a value of 75
don't bother with the LED_ALL stuff, it is kinda useless
'''
'''
TODO:
maybe get a config file set up
ERROR HANDLING
fade-in and fade-out
maybe randomising start times to minimize peak current draw?
'''
clientID = "A10-1" #maybe change this to the hostmname?
mqttBroker = "something" #the URL/IP of the mqtt broker
driverNum = 1 #number of LED drivers needed to initialize
def on_connect(client, userdata, flags, rc):
print("Connected")
client.subscribe(clientID + "/#") #subscribe for things
def on_message(client, userdata, msg):
print("Topic is: " + msg.topic)
print("Message is: " + msg.payload)
topic = msg.topic.split()
if topic[0] = clientID:
devAddr = int(topic[1])
pin = int(topic[1])
value = int(msg.payload)
endTime = (4096 * value) / 100 #value is percent and you set the end time based on % of 4096
register = 6 + (pin * 4) #used to find the register number
i2c.open(devAddr)
i2c.write([register, 0x00]) #LEDn_ON_X starts at 0
register++
i2c.write([register, 0x00])
register++
i2c.write([register, endTime]) #LEDn_OFF_X is defined by endTime
register++
i2c.write{[register, endTime >> 8])
i2c.close() #a more efficient way would be to auto-increment
#might have to auto increment when I implement fading
else:
print("Wrong topic") #later I should add diagnostic topics and the such
def init():
i2c.init("/dev/i2c-0"); #we'll be using i2c-0, I think
client = mqtt.Client(clientID) #create a client with an ID
client.on_connect = on_connect
client.on_message = on_message
client.connect(mqttBroker, 1883, 60)
client.loop_start() #when do we stop this loop?
def PCA9685_init():
addr = 0x40
j = driverNum
while j > 0:
i2c.open(addr)
i2c.write([0x00, 0x80]) #reset
i2c.write([0x00, 0x10]) #sleep
i2c.write([0xFE, 0x1E]) #PRE_SCALE to 200Hz
i2c.write([0x00, 0x00]) #wake up
i2c.close()
addr += 1
j -= 1
|
bsd-2-clause
| 8,889,925,543,877,168,000
| 20.324561
| 100
| 0.679144
| false
| 3.012392
| false
| false
| false
|
EvilDako/PyTraining
|
test/test_edit_contact.py
|
1
|
1268
|
__author__ = 'dako'
# -*- coding: utf-8 -*-
from model.contact import Contact
import random
def test_edit_contact(app, db, check_ui):
if len(db.get_contact_list()) == 0:
app.contact.create(Contact(firstname="test"))
old_contacts = db.get_contact_list()
contact = random.choice(old_contacts)
contact_new = Contact(firstname="Petr", middlename="Petrovich", lastname="Petrov", nickname="PETRO", title="Mr", company="SUPERCOMPANY_2", address="Moscow, Old Arbat, 10",
tel_home="595555555", tel_mobile="89009009091", tel_work="495123555", tel_fax="+799999999", email="petrov@mail.ru", email2="petr@mail.ru", email3="petrovich@mail.ru", homepage="www.petrusha.com",
address2="none_2", phone2="none_2", notes="too many funny comments")
contact_new.id = contact.id
app.contact.edit_contact_by_id(contact.id, contact_new)
new_contacts = db.get_contact_list()
assert len(old_contacts) == app.contact.count()
#old_contacts[index] = contact
#assert sorted(old_contacts, key=Contact.id_or_max) == sorted(new_contacts, key=Contact.id_or_max)
if check_ui:
assert sorted(new_contacts, key=Contact.id_or_max) == sorted(app.contact.get_contact_list(), key=Contact.id_or_max)
|
gpl-2.0
| 2,127,943,103,430,762,500
| 59.428571
| 221
| 0.665615
| false
| 3.092683
| false
| false
| false
|
AdrianGaudebert/socorro-crashstats
|
crashstats/api/views.py
|
1
|
6278
|
import re
import datetime
from django import http
from django.shortcuts import render
from django.contrib.sites.models import RequestSite
from django.core.urlresolvers import reverse
from django.conf import settings
from django import forms
from ratelimit.decorators import ratelimit
from waffle.decorators import waffle_switch
from crashstats.crashstats import models
from crashstats.crashstats import utils
from .cleaner import Cleaner
class APIWhitelistError(Exception):
pass
class MultipleStringField(forms.TypedMultipleChoiceField):
"""Field that do not validate if the field values are in self.choices"""
def to_python(self, value):
"""Override checking method"""
return map(self.coerce, value)
def validate(self, value):
"""Nothing to do here"""
if self.required and not value:
raise forms.ValidationError(self.error_messages['required'])
TYPE_MAP = {
basestring: forms.CharField,
list: MultipleStringField,
datetime.date: forms.DateField,
datetime.datetime: forms.DateTimeField,
int: forms.IntegerField,
}
def fancy_init(self, model, *args, **kwargs):
self.model = model
self.__old_init__(*args, **kwargs)
for parameter in model.get_annotated_params():
required = parameter['required']
name = parameter['name']
if parameter['type'] not in TYPE_MAP:
raise NotImplementedError(parameter['type'])
field_class = TYPE_MAP[parameter['type']]
self.fields[name] = field_class(required=required)
class FormWrapperMeta(forms.Form.__metaclass__):
def __new__(cls, name, bases, attrs):
attrs['__old_init__'] = bases[0].__init__
attrs['__init__'] = fancy_init
return super(FormWrapperMeta, cls).__new__(cls, name, bases, attrs)
class FormWrapper(forms.Form):
__metaclass__ = FormWrapperMeta
# Names of models we don't want to serve at all
BLACKLIST = (
# not because it's sensitive but because it's only used for writes
'ReleasesFeatured',
)
@waffle_switch('app_api_all')
@ratelimit(method=['GET', 'POST', 'PUT'], rate='10/m')
@utils.json_view
def model_wrapper(request, model_name):
if model_name in BLACKLIST:
raise http.Http404("Don't know what you're talking about!")
try:
model = getattr(models, model_name)
except AttributeError:
raise http.Http404('no model called `%s`' % model_name)
# XXX use RatelimitMiddleware instead of this in case
# we ratelimit multiple views
if getattr(request, 'limited', False):
# http://tools.ietf.org/html/rfc6585#page-3
return http.HttpResponse('Too Many Requests', status=429)
instance = model()
if request.method == 'POST':
function = instance.post
else:
function = instance.get
form = FormWrapper(model, request.REQUEST)
if form.is_valid():
try:
result = function(**form.cleaned_data)
except models.BadStatusCodeError as e:
try:
error_code = int(str(e).split(':')[0].strip())
if error_code >= 400 and error_code < 500:
return http.HttpResponse(e, status=error_code)
if error_code >= 500:
return http.HttpResponse(e, status=424)
except Exception:
# that means we can't assume that the BadStatusCodeError
# has a typically formatted error message
pass
raise
except ValueError as e:
if 'No JSON object could be decoded' in e:
return http.HttpResponse(
'Not a valid JSON response',
status=400
)
raise
# it being set to None means it's been deliberately disabled
if getattr(model, 'API_WHITELIST', -1) == -1:
raise APIWhitelistError('No API_WHITELIST defined for %r' % model)
clean_scrub = getattr(model, 'API_CLEAN_SCRUB', None)
if model.API_WHITELIST:
cleaner = Cleaner(
model.API_WHITELIST,
clean_scrub=clean_scrub,
# if True, uses warnings.warn() to show fields not whitelisted
debug=settings.DEBUG,
)
cleaner.start(result)
else:
result = {'errors': dict(form.errors)}
return result
@waffle_switch('app_api_all')
def documentation(request):
endpoints = [
]
for name in dir(models):
model = getattr(models, name)
try:
if not issubclass(model, models.SocorroMiddleware):
continue
if model is models.SocorroMiddleware:
continue
if model.__name__ in BLACKLIST:
continue
except TypeError:
# most likely a builtin class or something
continue
endpoints.append(_describe_model(model))
base_url = (
'%s://%s' % (request.is_secure() and 'https' or 'http',
RequestSite(request).domain)
)
data = {
'endpoints': endpoints,
'base_url': base_url,
}
return render(request, 'api/documentation.html', data)
def _describe_model(model):
params = list(model.get_annotated_params())
params.sort(key=lambda x: (not x['required'], x['name']))
methods = []
if model.get:
methods.append('GET')
elif models.post:
methods.append('POST')
docstring = model.__doc__
if docstring:
docstring = dedent_left(docstring.rstrip(), 4)
data = {
'name': model.__name__,
'url': reverse('api:model_wrapper', args=(model.__name__,)),
'parameters': params,
'defaults': getattr(model, 'defaults', {}),
'methods': methods,
'docstring': docstring,
}
return data
def dedent_left(text, spaces):
"""
If the string is:
' One\n'
' Two\n'
'Three\n'
And you set @spaces=2
Then return this:
' One\n'
' Two\n'
'Three\n'
"""
lines = []
regex = re.compile('^\s{%s}' % spaces)
for line in text.splitlines():
line = regex.sub('', line)
lines.append(line)
return '\n'.join(lines)
|
mpl-2.0
| 5,001,584,025,281,107,000
| 28.336449
| 78
| 0.597005
| false
| 4.116721
| false
| false
| false
|
timurbakibayev/trains
|
tutu/views.py
|
1
|
9410
|
from django.shortcuts import render
from django.http import HttpResponseRedirect
from tutu.models import Track
from django.views.decorators.csrf import csrf_exempt
from tutu.models import Switch
from tutu import draw
def index(request):
tracks = Track.objects.all()
# draw.something()
tracks_plus = []
for track in tracks:
t = {"id": track.id, "name": track.name, "start_name": track.start_name}
t["length"] = track.length()
tracks_plus.append(t)
context = {"tracks": tracks_plus}
return render(request, 'index.html', context)
def reset(request):
tracks = Track.objects.all()
for i in tracks:
i.simulation_in_progress = False
i.save()
# draw.something()
tracks_plus = []
for track in tracks:
t = {"id": track.id, "name": track.name, "start_name": track.start_name, "length": track.length()}
tracks_plus.append(t)
context = {"tracks": tracks_plus}
return render(request, 'index.html', context)
def new_track(request):
context = {}
if request.method == "POST":
track_name = request.POST["track_name"]
if (track_name is None) or (track_name == ""):
return render(request, "new_tarif.html", context)
t = Track()
t.name = track_name
t.start_name = request.POST["track_start_name"]
t.length = 0
try:
t.length = float(request.POST["track_length"])
except:
pass
t.save()
return HttpResponseRedirect("/")
return render(request, 'new_track.html', context={})
def new_switch(request, track_id):
try:
track = Track.objects.get(pk=int(track_id))
except:
return render(request, "error.html")
context = {"track": track}
if request.method == "POST":
switch_name = request.POST["switch_name"]
if (switch_name is None) or (switch_name == ""):
return render(request, "new_switch.html", context)
s = Switch()
s.track_id = track.id
s.name = switch_name
try:
s.position = float(request.POST["switch_position"])
except:
s.position = 0
try:
s.mins_acc = float(request.POST["switch_acc"])
except:
s.mins_acc = 0
try:
s.mins_main_fw = float(request.POST["switch_main_fw"])
except:
s.mins_main_fw = 0
try:
s.mins_main_bk = float(request.POST["switch_main_bk"])
except:
s.mins_main_bk = 0
try:
s.mins_station = float(request.POST["switch_station"])
except:
s.mins_station = 0
try:
s.mins_brk = float(request.POST["switch_brk"])
except:
s.mins_brk = 0
try:
s.number_of_tracks = float(request.POST["switch_number_of_tracks"])
except:
pass
try:
s.trains_fit = float(request.POST["switch_trains_fit"])
except:
pass
s.save()
return HttpResponseRedirect("/track/" + track_id)
return render(request, 'new_switch.html', context=context)
@csrf_exempt
def edit_track(request, track_id):
try:
t = Track.objects.get(pk=int(track_id))
except:
return render(request, "error.html")
context = {"track": t}
if request.method == "POST":
track_name = request.POST["track_name"]
if (track_name is None) or (track_name == ""):
pass
else:
t.name = track_name
t.start_name = request.POST["track_start_name"]
t.length = 0
try:
t.length = float(request.POST["track_length"])
except:
pass
try:
t.number_of_passenger_trains = float(request.POST["number_of_passenger_trains"])
except:
pass
try:
t.number_of_cargo_trains = float(request.POST["number_of_cargo_trains"])
except:
pass
try:
t.density_netto = float(request.POST["density_netto"])
except:
pass
t.save()
return HttpResponseRedirect("/")
return render(request, "edit_track.html", context)
@csrf_exempt
def delete_track(request, track_id):
try:
t = Track.objects.get(pk=int(track_id))
except:
return render(request, "error.html")
context = {"track": t}
if request.method == "POST":
t.delete()
return HttpResponseRedirect("/")
return render(request, "delete_track.html", context)
@csrf_exempt
def delete_switch(request, track_id, switch_id):
try:
t = Track.objects.get(pk=int(track_id))
s = Switch.objects.get(pk=int(switch_id))
except:
return render(request, "error.html")
context = {"switch": s, "track": t}
if request.method == "POST":
s.delete()
return HttpResponseRedirect("/track/" + track_id)
return render(request, "delete_switch.html", context)
@csrf_exempt
def edit_switch(request, track_id, switch_id):
try:
t = Track.objects.get(pk=int(track_id))
s = Switch.objects.get(pk=int(switch_id))
except:
return render(request, "error.html")
context = {"switch": s, "track": t}
if request.method == "POST":
switch_name = request.POST["switch_name"]
if (switch_name is None) or (switch_name == ""):
pass
else:
s.name = switch_name
try:
s.position = float(request.POST["switch_position"])
except:
pass
try:
s.mins_acc = float(request.POST["switch_acc"])
except:
s.mins_acc = 0
try:
s.mins_main_fw = float(request.POST["switch_main_fw"])
except:
s.mins_main_fw = 0
try:
s.mins_main_bk = float(request.POST["switch_main_bk"])
except:
s.mins_main_bk = 0
try:
s.mins_brk = float(request.POST["switch_brk"])
except:
s.mins_brk = 0
try:
s.mins_station = float(request.POST["switch_station"])
except:
s.mins_station = 0
try:
s.number_of_tracks = float(request.POST["switch_number_of_tracks"])
except:
pass
try:
s.trains_fit = float(request.POST["switch_trains_fit"])
except:
pass
s.save()
return HttpResponseRedirect("/track/" + track_id)
return render(request, "edit_switch.html", context)
def round(a):
return "%.3f" % a
@csrf_exempt
def show_track(request, track_id):
try:
track = Track.objects.get(pk=int(track_id))
switches_orig = Switch.objects.filter(track_id=track_id)
except:
return render(request, "error.html")
switches = []
prev_pos = 0
worst = 1000
for i, switch in enumerate(switches_orig):
new_sw = {"switch": switch, "sum":
switch.mins_acc + switch.mins_brk + switch.mins_main_fw +
switch.mins_main_bk + switch.mins_station}
single = int((60 * 23) * 0.96 / new_sw["sum"])
double = single * int(new_sw["sum"] / 8)
new_sw["capacity"] = (double, single)[switch.number_of_tracks < 2]
new_sw["number"] = i + 2
length = switch.position - prev_pos
new_sw["length"] = length
time = new_sw["sum"] / 60
new_sw["speed"] = int(float(length) / time * 10) / 10
new_sw["nalich"] = new_sw["capacity"] - \
(((track.number_of_cargo_trains + track.number_of_passenger_trains) / 0.85) -
(track.number_of_cargo_trains + track.number_of_passenger_trains)) - track.number_of_passenger_trains
new_sw["potreb"] = (track.number_of_cargo_trains + track.number_of_passenger_trains) / 0.85
new_sw["reserve_pairs"] = new_sw["nalich"] - new_sw["potreb"]
new_sw["train_weight"] = (track.density_netto * 1000000) / track.number_of_cargo_trains / 365
new_sw["reserve_cargo"] = new_sw["train_weight"] * new_sw["reserve_pairs"] * 365 / 1000000
new_sw["reserve_cargo_f"] = new_sw["reserve_cargo"]
if new_sw["reserve_cargo_f"] < worst:
worst = new_sw["reserve_cargo_f"]
new_sw["positive"] = new_sw["reserve_cargo"] > 0
new_sw["nalich"] = round(new_sw["nalich"])
new_sw["potreb"] = round(new_sw["potreb"])
new_sw["reserve_pairs"] = round(new_sw["reserve_pairs"])
new_sw["reserve_cargo"] = round(new_sw["reserve_cargo"])
new_sw["train_weight"] = round(new_sw["train_weight"])
switches.append(new_sw)
prev_pos = switch.position
switches_last_stage = []
for i in switches:
i["worst"] = (i["reserve_cargo_f"] == worst)
switches_last_stage.append(i)
context = {"track": track, "switches": switches_last_stage}
return render(request, "show_track.html", context)
def thumbnail_track(request, track_id):
try:
track = Track.objects.get(pk=int(track_id))
except:
return render(request, "error.html")
return draw.draw_track(track)
|
gpl-3.0
| 1,335,998,830,436,119,000
| 32.133803
| 129
| 0.547928
| false
| 3.616449
| false
| false
| false
|
cloudera/hue
|
desktop/core/src/desktop/lib/raz/raz_client.py
|
1
|
7363
|
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import json
import logging
import socket
import sys
import uuid
import requests
import requests_kerberos
from datetime import datetime, timedelta
from desktop.conf import AUTH_USERNAME
from desktop.lib.exceptions_renderable import PopupException
import desktop.lib.raz.signer_protos_pb2 as raz_signer
if sys.version_info[0] > 2:
from urllib.parse import urlparse as lib_urlparse
else:
from urlparse import urlparse as lib_urlparse
LOG = logging.getLogger(__name__)
class RazToken:
def __init__(self, raz_url, auth_handler):
self.raz_url = raz_url
self.auth_handler = auth_handler
self.init_time = datetime.now()
self.raz_token = None
o = lib_urlparse(self.raz_url)
if not o.netloc:
raise PopupException('Could not parse the host of the Raz server %s' % self.raz_url)
self.raz_hostname, self.raz_port = o.netloc.split(':')
self.scheme = o.scheme
def get_delegation_token(self, user):
ip_address = socket.gethostbyname(self.raz_hostname)
GET_PARAMS = {"op": "GETDELEGATIONTOKEN", "service": "%s:%s" % (ip_address, self.raz_port), "renewer": AUTH_USERNAME.get(), "doAs": user}
r = requests.get(self.raz_url, GET_PARAMS, auth=self.auth_handler, verify=False)
self.raz_token = json.loads(r.text)['Token']['urlString']
return self.raz_token
def renew_delegation_token(self, user):
if self.raz_token is None:
self.raz_token = self.get_delegation_token(user=user)
if (self.init_time - timedelta(hours=8)) > datetime.now():
r = requests.put("%s?op=RENEWDELEGATIONTOKEN&token=%s"%(self.raz_url, self.raz_token), auth=self.auth_handler, verify=False)
return self.raz_token
class RazClient(object):
def __init__(self, raz_url, raz_token, username, service='s3', service_name='cm_s3', cluster_name='myCluster'):
self.raz_url = raz_url.strip('/')
self.raz_token = raz_token
self.username = username
self.service = service
if self.service == 'adls':
self.service_params = {
'endpoint_prefix': 'adls',
'service_name': 'adls',
'serviceType': 'adls'
}
else:
self.service_params = {
'endpoint_prefix': 's3',
'service_name': 's3',
'serviceType': 's3'
}
self.service_name = service_name
self.cluster_name = cluster_name
self.requestid = str(uuid.uuid4())
def check_access(self, method, url, params=None, headers=None):
LOG.debug("Check access: method {%s}, url {%s}, params {%s}, headers {%s}" % (method, url, params, headers))
path = lib_urlparse(url)
url_params = dict([p.split('=') if '=' in p else (p, '') for p in path.query.split('&') if path.query]) # ?delete, ?prefix=/hue
params = params if params is not None else {}
headers = headers if headers is not None else {}
allparams = [raz_signer.StringListStringMapProto(key=key, value=[val]) for key, val in url_params.items()]
allparams.extend([raz_signer.StringListStringMapProto(key=key, value=[val]) for key, val in params.items()])
headers = [raz_signer.StringStringMapProto(key=key, value=val) for key, val in headers.items()]
endpoint = "%s://%s" % (path.scheme, path.netloc)
resource_path = path.path.lstrip("/")
LOG.debug(
"Preparing sign request with http_method: {%s}, headers: {%s}, parameters: {%s}, endpoint: {%s}, resource_path: {%s}" %
(method, headers, allparams, endpoint, resource_path)
)
raz_req = raz_signer.SignRequestProto(
endpoint_prefix=self.service_params['endpoint_prefix'],
service_name=self.service_params['service_name'],
endpoint=endpoint,
http_method=method,
headers=headers,
parameters=allparams,
resource_path=resource_path,
time_offset=0
)
raz_req_serialized = raz_req.SerializeToString()
signed_request = base64.b64encode(raz_req_serialized)
request_data = {
"requestId": self.requestid,
"serviceType": self.service_params['serviceType'],
"serviceName": self.service_name,
"user": self.username,
"userGroups": [],
"accessTime": "",
"clientIpAddress": "",
"clientType": "",
"clusterName": self.cluster_name,
"clusterType": "",
"sessionId": "",
"context": {
"S3_SIGN_REQUEST": signed_request
}
}
headers = {"Content-Type":"application/json", "Accept-Encoding":"gzip,deflate"}
raz_url = "%s/api/authz/s3/access?delegation=%s" % (self.raz_url, self.raz_token)
LOG.debug('Raz url: %s' % raz_url)
LOG.debug("Sending access check headers: {%s} request_data: {%s}" % (headers, request_data))
raz_req = requests.post(raz_url, headers=headers, json=request_data, verify=False)
signed_response_result = None
signed_response = None
if raz_req.ok:
result = raz_req.json().get("operResult", False) and raz_req.json()["operResult"]["result"]
if result == "NOT_DETERMINED":
msg = "Failure %s" % raz_req.json()
LOG.error(msg)
raise PopupException(msg)
if result != "ALLOWED":
msg = "Permission missing %s" % raz_req.json()
raise PopupException(msg, error_code=401)
if result == "ALLOWED":
LOG.debug('Received allowed response %s' % raz_req.json())
signed_response_data = raz_req.json()["operResult"]["additionalInfo"]
if self.service == 'adls':
LOG.debug("Received SAS %s" % signed_response_data["ADLS_DSAS"])
return {'token': signed_response_data["ADLS_DSAS"]}
else:
signed_response_result = signed_response_data["S3_SIGN_RESPONSE"]
if signed_response_result:
raz_response_proto = raz_signer.SignResponseProto()
signed_response = raz_response_proto.FromString(base64.b64decode(signed_response_result))
LOG.debug("Received signed Response %s" % signed_response)
# Signed headers "only"
if signed_response:
return dict([(i.key, i.value) for i in signed_response.signer_generated_headers])
def get_raz_client(raz_url, username, auth='kerberos', service='s3', service_name='cm_s3', cluster_name='myCluster'):
if auth == 'kerberos' or True: # True until JWT option
auth_handler = requests_kerberos.HTTPKerberosAuth(mutual_authentication=requests_kerberos.OPTIONAL)
raz = RazToken(raz_url, auth_handler)
raz_token = raz.get_delegation_token(user=username)
return RazClient(raz_url, raz_token, username, service=service, service_name=service_name, cluster_name=cluster_name)
|
apache-2.0
| -8,960,432,691,178,515,000
| 37.752632
| 141
| 0.665082
| false
| 3.431034
| false
| false
| false
|
druce/safewithdrawal_tensorflow
|
run_safewithdrawal_linearalloc.py
|
1
|
5015
|
#!/home/ubuntu/anaconda2/bin/python
# MIT License
# Copyright (c) 2016 Druce Vertes drucev@gmail.com
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import print_function
import argparse
import pickle
from time import strftime
import sys
import os
import numpy as np
import pandas as pd
fileprefix = "best08"
bestfile = "%s.pickle" % (fileprefix)
max_unimproved_steps = 200
gamma = 8.0
#Objective: 8315.064674
# const_spend = 2.321413
# var_spend_pcts = pd.Series([0.021015604501457775, 0.021761051829444631, 0.022312098346990435, 0.022785170076322969, 0.023285983064484993, 0.023897465220170052, 0.024584673876801872, 0.02556106756991109, 0.026657864441448173, 0.028031748201320435, 0.029551066581589736, 0.031201618742953394, 0.032978432086452118, 0.034516254916809298, 0.036027857701909138, 0.037763940480250287, 0.03992129323858909, 0.042635694985269881, 0.045638329119485004, 0.049069352739346678, 0.052383268763417638, 0.056951126091794861, 0.063470193195596478, 0.070974811737827201, 0.082180160879307573, 0.098169174319082841, 0.1205906552280696, 0.15769373320000857, 0.23376809386762137, 0.51005368542831198])
# stock_allocations = pd.Series([0.82085705309182722, 0.8208564375532369, 0.80809230790394848, 0.80474242187125467, 0.80321803760810162, 0.80214299804721623, 0.80178790048600157, 0.7839705620587375, 0.77739050153152156, 0.77699016168709201, 0.77517208520407443, 0.76706047015389667, 0.76676220145412832, 0.76576837231963391, 0.76098570290996814, 0.74113354059879621, 0.73793102049167558, 0.73650905089885166, 0.72707794679494286, 0.72393066589418387, 0.7210099158662584, 0.71370848573117784, 0.7038219623712294, 0.68848317679023907, 0.61956979054659567, 0.61331107236876559, 0.59738860596743892, 0.59391944015033249, 0.59164222259062249, 0.53441829378265526])
# startval = 100
# years_retired = 30
# const_spend_pct = .02
# const_spend = startval * const_spend_pct
# var_spend_pcts = pd.Series(np.ones(years_retired) * 0.02)
# var_spend_pcts[-1]=1.0
# stock_allocations = pd.Series(np.ones(years_retired) * 0.65)
startval = 100
years_retired = 30
# 1.5% constant spending
const_spend_pct = 0.015
const_spend = startval * const_spend_pct
# var spending a function of years left
var_spend_pcts = pd.Series([ 1.0 / (years_retired - ix) - 0.01 for ix in range(years_retired)])
#Objective: 4.390120
const_spend = 1.494627
var_spend_pcts = pd.Series([0.026510001745962072, 0.027818217278890313, 0.028605532721252741, 0.028943515850045034, 0.029650425909075188, 0.030749598116744672, 0.031600262214435557, 0.032732508555050478, 0.034385383513833988, 0.036029103781616605, 0.03767831801390633, 0.039574695022857952, 0.04181956456859641, 0.043933810727326675, 0.046368133990928623, 0.049770890997431427, 0.053761145655487272, 0.058701327619542831, 0.064816641182696089, 0.072273502883599586, 0.081202909789127517, 0.0923868781223499, 0.10647268828242094, 0.1245451336773581, 0.14860396109790044, 0.18220604185509723, 0.23242068590691847, 0.31581923728426176, 0.48186646557743196, 0.98999999999999999])
start_alloc = 0.8
end_alloc = 0.5
# save starting scenario
pickle_list = [const_spend, var_spend_pcts, start_alloc, end_alloc]
pickle.dump( pickle_list, open( bestfile, "wb" ) )
# start with a learning rate that learns quickly, gradually reduce it
# run once with 50 or 100 steps to see which learning rates are effective
# then plug in that solution and run each til no improvement for a large number of steps
for learning_rate in [
#0.00001, # too coarse, may be NaN
0.00003, # too coarse, may be NaN
0.000001, # coarse
0.000003, # coarse
0.0000001, # workhorse
0.00000003,
0.00000001, # diminishing returns
#0.000000003,
#0.000000001, #superfine
#0.0000000003,
#0.0000000001,
#0.00000000001,
]:
cmdstr = './safewithdrawal_linearalloc.py %.12f %d %f %s' % (learning_rate, max_unimproved_steps, gamma, fileprefix)
print(cmdstr)
os.system(cmdstr)
|
mit
| -5,372,145,557,114,190,000
| 48.653465
| 683
| 0.771087
| false
| 2.694788
| false
| false
| false
|
cemarchi/biosphere
|
Src/BioDataManagement/DataAccess/Entities/MessengerRnaSample.py
|
1
|
1434
|
from typing import List, Dict
from Src.BioDataManagement.DataAccess.Entities.BiologicalSampleBase import BiologicalSampleBase
from Src.BioDataManagement.DataAccess.Entities.GeneExpressionLevel import GeneExpressionLevel
class MessengerRnaSample(BiologicalSampleBase):
"""description of class"""
def __init__(self, **kargs):
"""
:param kargs:
"""
super().__init__(**kargs)
self.__exp_levels = kargs.get('exp_levels')
if self.__exp_levels:
self.__exp_levels = list(set([GeneExpressionLevel(**exp) for exp in self.__exp_levels]))
def __hash__(self):
return hash(self.patient_id)
def __eq__(self, other):
return isinstance(other, MessengerRnaSample) and \
self.patient_id == other.patient_id
@property
def exp_levels(self)-> List[GeneExpressionLevel]:
"""description of property"""
return self.__exp_levels[:]
@exp_levels.setter
def exp_levels(self, value: List):
"""
:param value:
:return:
"""
self.__exp_levels = list(set(value))
def validate(self):
super().validate()
for g in self.__exp_levels:
g.validate()
def as_dict(self)-> Dict:
sample_dict = super().as_dict()
sample_dict.update({'exp_levels': list(map(lambda exp: exp.as_dict(), self.__exp_levels))})
return sample_dict
|
bsd-3-clause
| 579,169,642,502,552,600
| 26.576923
| 100
| 0.603208
| false
| 3.865229
| false
| false
| false
|
atumanov/ray
|
python/ray/rllib/optimizers/replay_buffer.py
|
1
|
8536
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import random
import sys
from ray.rllib.optimizers.segment_tree import SumSegmentTree, MinSegmentTree
from ray.rllib.utils.annotations import DeveloperAPI
from ray.rllib.utils.compression import unpack_if_needed
from ray.rllib.utils.window_stat import WindowStat
@DeveloperAPI
class ReplayBuffer(object):
@DeveloperAPI
def __init__(self, size):
"""Create Prioritized Replay buffer.
Parameters
----------
size: int
Max number of transitions to store in the buffer. When the buffer
overflows the old memories are dropped.
"""
self._storage = []
self._maxsize = size
self._next_idx = 0
self._hit_count = np.zeros(size)
self._eviction_started = False
self._num_added = 0
self._num_sampled = 0
self._evicted_hit_stats = WindowStat("evicted_hit", 1000)
self._est_size_bytes = 0
def __len__(self):
return len(self._storage)
@DeveloperAPI
def add(self, obs_t, action, reward, obs_tp1, done, weight):
data = (obs_t, action, reward, obs_tp1, done)
self._num_added += 1
if self._next_idx >= len(self._storage):
self._storage.append(data)
self._est_size_bytes += sum(sys.getsizeof(d) for d in data)
else:
self._storage[self._next_idx] = data
if self._next_idx + 1 >= self._maxsize:
self._eviction_started = True
self._next_idx = (self._next_idx + 1) % self._maxsize
if self._eviction_started:
self._evicted_hit_stats.push(self._hit_count[self._next_idx])
self._hit_count[self._next_idx] = 0
def _encode_sample(self, idxes):
obses_t, actions, rewards, obses_tp1, dones = [], [], [], [], []
for i in idxes:
data = self._storage[i]
obs_t, action, reward, obs_tp1, done = data
obses_t.append(np.array(unpack_if_needed(obs_t), copy=False))
actions.append(np.array(action, copy=False))
rewards.append(reward)
obses_tp1.append(np.array(unpack_if_needed(obs_tp1), copy=False))
dones.append(done)
self._hit_count[i] += 1
return (np.array(obses_t), np.array(actions), np.array(rewards),
np.array(obses_tp1), np.array(dones))
@DeveloperAPI
def sample(self, batch_size):
"""Sample a batch of experiences.
Parameters
----------
batch_size: int
How many transitions to sample.
Returns
-------
obs_batch: np.array
batch of observations
act_batch: np.array
batch of actions executed given obs_batch
rew_batch: np.array
rewards received as results of executing act_batch
next_obs_batch: np.array
next set of observations seen after executing act_batch
done_mask: np.array
done_mask[i] = 1 if executing act_batch[i] resulted in
the end of an episode and 0 otherwise.
"""
idxes = [
random.randint(0,
len(self._storage) - 1) for _ in range(batch_size)
]
self._num_sampled += batch_size
return self._encode_sample(idxes)
@DeveloperAPI
def stats(self, debug=False):
data = {
"added_count": self._num_added,
"sampled_count": self._num_sampled,
"est_size_bytes": self._est_size_bytes,
"num_entries": len(self._storage),
}
if debug:
data.update(self._evicted_hit_stats.stats())
return data
@DeveloperAPI
class PrioritizedReplayBuffer(ReplayBuffer):
@DeveloperAPI
def __init__(self, size, alpha):
"""Create Prioritized Replay buffer.
Parameters
----------
size: int
Max number of transitions to store in the buffer. When the buffer
overflows the old memories are dropped.
alpha: float
how much prioritization is used
(0 - no prioritization, 1 - full prioritization)
See Also
--------
ReplayBuffer.__init__
"""
super(PrioritizedReplayBuffer, self).__init__(size)
assert alpha > 0
self._alpha = alpha
it_capacity = 1
while it_capacity < size:
it_capacity *= 2
self._it_sum = SumSegmentTree(it_capacity)
self._it_min = MinSegmentTree(it_capacity)
self._max_priority = 1.0
self._prio_change_stats = WindowStat("reprio", 1000)
@DeveloperAPI
def add(self, obs_t, action, reward, obs_tp1, done, weight):
"""See ReplayBuffer.store_effect"""
idx = self._next_idx
super(PrioritizedReplayBuffer, self).add(obs_t, action, reward,
obs_tp1, done, weight)
if weight is None:
weight = self._max_priority
self._it_sum[idx] = weight**self._alpha
self._it_min[idx] = weight**self._alpha
def _sample_proportional(self, batch_size):
res = []
for _ in range(batch_size):
# TODO(szymon): should we ensure no repeats?
mass = random.random() * self._it_sum.sum(0, len(self._storage))
idx = self._it_sum.find_prefixsum_idx(mass)
res.append(idx)
return res
@DeveloperAPI
def sample(self, batch_size, beta):
"""Sample a batch of experiences.
compared to ReplayBuffer.sample
it also returns importance weights and idxes
of sampled experiences.
Parameters
----------
batch_size: int
How many transitions to sample.
beta: float
To what degree to use importance weights
(0 - no corrections, 1 - full correction)
Returns
-------
obs_batch: np.array
batch of observations
act_batch: np.array
batch of actions executed given obs_batch
rew_batch: np.array
rewards received as results of executing act_batch
next_obs_batch: np.array
next set of observations seen after executing act_batch
done_mask: np.array
done_mask[i] = 1 if executing act_batch[i] resulted in
the end of an episode and 0 otherwise.
weights: np.array
Array of shape (batch_size,) and dtype np.float32
denoting importance weight of each sampled transition
idxes: np.array
Array of shape (batch_size,) and dtype np.int32
idexes in buffer of sampled experiences
"""
assert beta > 0
self._num_sampled += batch_size
idxes = self._sample_proportional(batch_size)
weights = []
p_min = self._it_min.min() / self._it_sum.sum()
max_weight = (p_min * len(self._storage))**(-beta)
for idx in idxes:
p_sample = self._it_sum[idx] / self._it_sum.sum()
weight = (p_sample * len(self._storage))**(-beta)
weights.append(weight / max_weight)
weights = np.array(weights)
encoded_sample = self._encode_sample(idxes)
return tuple(list(encoded_sample) + [weights, idxes])
@DeveloperAPI
def update_priorities(self, idxes, priorities):
"""Update priorities of sampled transitions.
sets priority of transition at index idxes[i] in buffer
to priorities[i].
Parameters
----------
idxes: [int]
List of idxes of sampled transitions
priorities: [float]
List of updated priorities corresponding to
transitions at the sampled idxes denoted by
variable `idxes`.
"""
assert len(idxes) == len(priorities)
for idx, priority in zip(idxes, priorities):
assert priority > 0
assert 0 <= idx < len(self._storage)
delta = priority**self._alpha - self._it_sum[idx]
self._prio_change_stats.push(delta)
self._it_sum[idx] = priority**self._alpha
self._it_min[idx] = priority**self._alpha
self._max_priority = max(self._max_priority, priority)
@DeveloperAPI
def stats(self, debug=False):
parent = ReplayBuffer.stats(self, debug)
if debug:
parent.update(self._prio_change_stats.stats())
return parent
|
apache-2.0
| -6,601,539,590,877,492,000
| 32.73913
| 77
| 0.575679
| false
| 3.950023
| false
| false
| false
|
shreyasp/erpnext
|
erpnext/controllers/accounts_controller.py
|
1
|
28222
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _, throw
from frappe.utils import today, flt, cint, fmt_money, formatdate, getdate
from erpnext.setup.utils import get_company_currency, get_exchange_rate
from erpnext.accounts.utils import get_fiscal_years, validate_fiscal_year, get_account_currency
from erpnext.utilities.transaction_base import TransactionBase
from erpnext.controllers.recurring_document import convert_to_recurring, validate_recurring_document
from erpnext.controllers.sales_and_purchase_return import validate_return
from erpnext.accounts.party import get_party_account_currency, validate_party_frozen_disabled
from erpnext.exceptions import InvalidCurrency
force_item_fields = ("item_group", "barcode", "brand", "stock_uom")
class AccountsController(TransactionBase):
def __init__(self, arg1, arg2=None):
super(AccountsController, self).__init__(arg1, arg2)
@property
def company_currency(self):
if not hasattr(self, "__company_currency"):
self.__company_currency = get_company_currency(self.company)
return self.__company_currency
def onload(self):
self.get("__onload").make_payment_via_journal_entry = frappe.db.get_single_value('Accounts Settings', 'make_payment_via_journal_entry')
def validate(self):
if self.get("_action") and self._action != "update_after_submit":
self.set_missing_values(for_validate=True)
self.validate_date_with_fiscal_year()
if self.meta.get_field("currency"):
self.calculate_taxes_and_totals()
if not self.meta.get_field("is_return") or not self.is_return:
self.validate_value("base_grand_total", ">=", 0)
validate_return(self)
self.set_total_in_words()
if self.doctype in ("Sales Invoice", "Purchase Invoice") and not self.is_return:
self.validate_due_date()
self.validate_advance_entries()
if self.meta.get_field("taxes_and_charges"):
self.validate_enabled_taxes_and_charges()
self.validate_party()
self.validate_currency()
if self.meta.get_field("is_recurring"):
if self.amended_from and self.recurring_id:
self.recurring_id = None
if not self.get("__islocal"):
validate_recurring_document(self)
convert_to_recurring(self, self.get("posting_date") or self.get("transaction_date"))
if self.doctype == 'Purchase Invoice':
self.validate_paid_amount()
def validate_paid_amount(self):
if hasattr(self, "is_pos") or hasattr(self, "is_paid"):
is_paid = self.get("is_pos") or self.get("is_paid")
if cint(is_paid) == 1:
if flt(self.paid_amount) == 0 and flt(self.outstanding_amount) > 0:
if self.cash_bank_account:
self.paid_amount = flt(flt(self.grand_total) - flt(self.write_off_amount),
self.precision("paid_amount"))
self.base_paid_amount = flt(self.paid_amount * self.conversion_rate, self.precision("base_paid_amount"))
else:
# show message that the amount is not paid
self.paid_amount = 0
frappe.throw(_("Note: Payment Entry will not be created since 'Cash or Bank Account' was not specified"))
else:
frappe.db.set(self,'paid_amount',0)
def on_update_after_submit(self):
if self.meta.get_field("is_recurring"):
validate_recurring_document(self)
convert_to_recurring(self, self.get("posting_date") or self.get("transaction_date"))
def set_missing_values(self, for_validate=False):
if frappe.flags.in_test:
for fieldname in ["posting_date","transaction_date"]:
if self.meta.get_field(fieldname) and not self.get(fieldname):
self.set(fieldname, today())
break
def calculate_taxes_and_totals(self):
from erpnext.controllers.taxes_and_totals import calculate_taxes_and_totals
calculate_taxes_and_totals(self)
if self.doctype in ["Quotation", "Sales Order", "Delivery Note", "Sales Invoice"]:
self.calculate_commission()
self.calculate_contribution()
def validate_date_with_fiscal_year(self):
if self.meta.get_field("fiscal_year") :
date_field = ""
if self.meta.get_field("posting_date"):
date_field = "posting_date"
elif self.meta.get_field("transaction_date"):
date_field = "transaction_date"
if date_field and self.get(date_field):
validate_fiscal_year(self.get(date_field), self.fiscal_year,
self.meta.get_label(date_field), self)
def validate_due_date(self):
from erpnext.accounts.party import validate_due_date
if self.doctype == "Sales Invoice":
if not self.due_date:
frappe.throw(_("Due Date is mandatory"))
validate_due_date(self.posting_date, self.due_date, "Customer", self.customer, self.company)
elif self.doctype == "Purchase Invoice":
validate_due_date(self.posting_date, self.due_date, "Supplier", self.supplier, self.company)
def set_price_list_currency(self, buying_or_selling):
if self.meta.get_field("currency"):
# price list part
fieldname = "selling_price_list" if buying_or_selling.lower() == "selling" \
else "buying_price_list"
if self.meta.get_field(fieldname) and self.get(fieldname):
self.price_list_currency = frappe.db.get_value("Price List",
self.get(fieldname), "currency")
if self.price_list_currency == self.company_currency:
self.plc_conversion_rate = 1.0
elif not self.plc_conversion_rate:
self.plc_conversion_rate = get_exchange_rate(
self.price_list_currency, self.company_currency)
# currency
if not self.currency:
self.currency = self.price_list_currency
self.conversion_rate = self.plc_conversion_rate
elif self.currency == self.company_currency:
self.conversion_rate = 1.0
elif not self.conversion_rate:
self.conversion_rate = get_exchange_rate(self.currency,
self.company_currency)
def set_missing_item_details(self, for_validate=False):
"""set missing item values"""
from erpnext.stock.get_item_details import get_item_details
if hasattr(self, "items"):
parent_dict = {}
for fieldname in self.meta.get_valid_columns():
parent_dict[fieldname] = self.get(fieldname)
if self.doctype in ["Quotation", "Sales Order", "Delivery Note", "Sales Invoice"]:
document_type = "{} Item".format(self.doctype)
parent_dict.update({"document_type": document_type})
for item in self.get("items"):
if item.get("item_code"):
args = parent_dict.copy()
args.update(item.as_dict())
args["doctype"] = self.doctype
args["name"] = self.name
if not args.get("transaction_date"):
args["transaction_date"] = args.get("posting_date")
if self.get("is_subcontracted"):
args["is_subcontracted"] = self.is_subcontracted
ret = get_item_details(args)
for fieldname, value in ret.items():
if item.meta.get_field(fieldname) and value is not None:
if (item.get(fieldname) is None or fieldname in force_item_fields):
item.set(fieldname, value)
elif fieldname == "cost_center" and not item.get("cost_center"):
item.set(fieldname, value)
elif fieldname == "conversion_factor" and not item.get("conversion_factor"):
item.set(fieldname, value)
if ret.get("pricing_rule"):
# if user changed the discount percentage then set user's discount percentage ?
item.set("discount_percentage", ret.get("discount_percentage"))
if ret.get("pricing_rule_for") == "Price":
item.set("pricing_list_rate", ret.get("pricing_list_rate"))
if item.price_list_rate:
item.rate = flt(item.price_list_rate *
(1.0 - (flt(item.discount_percentage) / 100.0)), item.precision("rate"))
if self.doctype == "Purchase Invoice":
self.set_expense_account(for_validate)
def set_taxes(self):
if not self.meta.get_field("taxes"):
return
tax_master_doctype = self.meta.get_field("taxes_and_charges").options
if not self.get("taxes"):
if not self.get("taxes_and_charges"):
# get the default tax master
self.set("taxes_and_charges", frappe.db.get_value(tax_master_doctype, {"is_default": 1}))
self.append_taxes_from_master(tax_master_doctype)
def append_taxes_from_master(self, tax_master_doctype=None):
if self.get("taxes_and_charges"):
if not tax_master_doctype:
tax_master_doctype = self.meta.get_field("taxes_and_charges").options
self.extend("taxes", get_taxes_and_charges(tax_master_doctype, self.get("taxes_and_charges")))
def set_other_charges(self):
self.set("taxes", [])
self.set_taxes()
def validate_enabled_taxes_and_charges(self):
taxes_and_charges_doctype = self.meta.get_options("taxes_and_charges")
if frappe.db.get_value(taxes_and_charges_doctype, self.taxes_and_charges, "disabled"):
frappe.throw(_("{0} '{1}' is disabled").format(taxes_and_charges_doctype, self.taxes_and_charges))
def get_gl_dict(self, args, account_currency=None):
"""this method populates the common properties of a gl entry record"""
fiscal_years = get_fiscal_years(self.posting_date, company=self.company)
if len(fiscal_years) > 1:
frappe.throw(_("Multiple fiscal years exist for the date {0}. Please set company in Fiscal Year").format(formatdate(self.posting_date)))
else:
fiscal_year = fiscal_years[0][0]
gl_dict = frappe._dict({
'company': self.company,
'posting_date': self.posting_date,
'fiscal_year': fiscal_year,
'voucher_type': self.doctype,
'voucher_no': self.name,
'remarks': self.get("remarks"),
'debit': 0,
'credit': 0,
'debit_in_account_currency': 0,
'credit_in_account_currency': 0,
'is_opening': self.get("is_opening") or "No",
'party_type': None,
'party': None,
'project': self.get("project")
})
gl_dict.update(args)
if not account_currency:
account_currency = get_account_currency(gl_dict.account)
if self.doctype not in ["Journal Entry", "Period Closing Voucher", "Payment Entry"]:
self.validate_account_currency(gl_dict.account, account_currency)
set_balance_in_account_currency(gl_dict, account_currency, self.get("conversion_rate"), self.company_currency)
return gl_dict
def validate_account_currency(self, account, account_currency=None):
valid_currency = [self.company_currency]
if self.get("currency") and self.currency != self.company_currency:
valid_currency.append(self.currency)
if account_currency not in valid_currency:
frappe.throw(_("Account {0} is invalid. Account Currency must be {1}")
.format(account, _(" or ").join(valid_currency)))
def clear_unallocated_advances(self, childtype, parentfield):
self.set(parentfield, self.get(parentfield, {"allocated_amount": ["not in", [0, None, ""]]}))
frappe.db.sql("""delete from `tab%s` where parentfield=%s and parent = %s
and allocated_amount = 0""" % (childtype, '%s', '%s'), (parentfield, self.name))
def set_advances(self):
"""Returns list of advances against Account, Party, Reference"""
res = self.get_advance_entries()
self.set("advances", [])
for d in res:
self.append("advances", {
"doctype": self.doctype + " Advance",
"reference_type": d.reference_type,
"reference_name": d.reference_name,
"reference_row": d.reference_row,
"remarks": d.remarks,
"advance_amount": flt(d.amount),
"allocated_amount": flt(d.amount) if d.against_order else 0
})
def get_advance_entries(self, include_unallocated=True):
if self.doctype == "Sales Invoice":
party_account = self.debit_to
party_type = "Customer"
party = self.customer
amount_field = "credit_in_account_currency"
order_field = "sales_order"
order_doctype = "Sales Order"
else:
party_account = self.credit_to
party_type = "Supplier"
party = self.supplier
amount_field = "debit_in_account_currency"
order_field = "purchase_order"
order_doctype = "Purchase Order"
order_list = list(set([d.get(order_field)
for d in self.get("items") if d.get(order_field)]))
journal_entries = get_advance_journal_entries(party_type, party, party_account,
amount_field, order_doctype, order_list, include_unallocated)
payment_entries = get_advance_payment_entries(party_type, party, party_account,
order_doctype, order_list, include_unallocated)
res = journal_entries + payment_entries
return res
def validate_advance_entries(self):
order_field = "sales_order" if self.doctype == "Sales Invoice" else "purchase_order"
order_list = list(set([d.get(order_field)
for d in self.get("items") if d.get(order_field)]))
if not order_list: return
advance_entries = self.get_advance_entries(include_unallocated=False)
if advance_entries:
advance_entries_against_si = [d.reference_name for d in self.get("advances")]
for d in advance_entries:
if not advance_entries_against_si or d.reference_name not in advance_entries_against_si:
frappe.msgprint(_("Payment Entry {0} is linked against Order {1}, check if it should be pulled as advance in this invoice.")
.format(d.reference_name, d.against_order))
def update_against_document_in_jv(self):
"""
Links invoice and advance voucher:
1. cancel advance voucher
2. split into multiple rows if partially adjusted, assign against voucher
3. submit advance voucher
"""
if self.doctype == "Sales Invoice":
party_type = "Customer"
party = self.customer
party_account = self.debit_to
dr_or_cr = "credit_in_account_currency"
else:
party_type = "Supplier"
party = self.supplier
party_account = self.credit_to
dr_or_cr = "debit_in_account_currency"
lst = []
for d in self.get('advances'):
if flt(d.allocated_amount) > 0:
args = frappe._dict({
'voucher_type': d.reference_type,
'voucher_no' : d.reference_name,
'voucher_detail_no' : d.reference_row,
'against_voucher_type' : self.doctype,
'against_voucher' : self.name,
'account' : party_account,
'party_type': party_type,
'party': party,
'is_advance' : 'Yes',
'dr_or_cr' : dr_or_cr,
'unadjusted_amount' : flt(d.advance_amount),
'allocated_amount' : flt(d.allocated_amount),
'exchange_rate': (self.conversion_rate
if self.party_account_currency != self.company_currency else 1),
'grand_total': (self.base_grand_total
if self.party_account_currency==self.company_currency else self.grand_total),
'outstanding_amount': self.outstanding_amount
})
lst.append(args)
if lst:
from erpnext.accounts.utils import reconcile_against_document
reconcile_against_document(lst)
def validate_multiple_billing(self, ref_dt, item_ref_dn, based_on, parentfield):
from erpnext.controllers.status_updater import get_tolerance_for
item_tolerance = {}
global_tolerance = None
for item in self.get("items"):
if item.get(item_ref_dn):
ref_amt = flt(frappe.db.get_value(ref_dt + " Item",
item.get(item_ref_dn), based_on), self.precision(based_on, item))
if not ref_amt:
frappe.msgprint(_("Warning: System will not check overbilling since amount for Item {0} in {1} is zero").format(item.item_code, ref_dt))
else:
already_billed = frappe.db.sql("""select sum(%s) from `tab%s`
where %s=%s and docstatus=1 and parent != %s""" %
(based_on, self.doctype + " Item", item_ref_dn, '%s', '%s'),
(item.get(item_ref_dn), self.name))[0][0]
total_billed_amt = flt(flt(already_billed) + flt(item.get(based_on)),
self.precision(based_on, item))
tolerance, item_tolerance, global_tolerance = get_tolerance_for(item.item_code,
item_tolerance, global_tolerance)
max_allowed_amt = flt(ref_amt * (100 + tolerance) / 100)
if total_billed_amt - max_allowed_amt > 0.01:
frappe.throw(_("Cannot overbill for Item {0} in row {1} more than {2}. To allow overbilling, please set in Stock Settings").format(item.item_code, item.idx, max_allowed_amt))
def get_company_default(self, fieldname):
from erpnext.accounts.utils import get_company_default
return get_company_default(self.company, fieldname)
def get_stock_items(self):
stock_items = []
item_codes = list(set(item.item_code for item in self.get("items")))
if item_codes:
stock_items = [r[0] for r in frappe.db.sql("""select name
from `tabItem` where name in (%s) and is_stock_item=1""" % \
(", ".join((["%s"]*len(item_codes))),), item_codes)]
return stock_items
def set_total_advance_paid(self):
if self.doctype == "Sales Order":
dr_or_cr = "credit_in_account_currency"
party = self.customer
else:
dr_or_cr = "debit_in_account_currency"
party = self.supplier
advance = frappe.db.sql("""
select
account_currency, sum({dr_or_cr}) as amount
from
`tabGL Entry`
where
against_voucher_type = %s and against_voucher = %s and party=%s
and docstatus = 1
""".format(dr_or_cr=dr_or_cr), (self.doctype, self.name, party), as_dict=1)
if advance:
advance = advance[0]
advance_paid = flt(advance.amount, self.precision("advance_paid"))
formatted_advance_paid = fmt_money(advance_paid, precision=self.precision("advance_paid"),
currency=advance.account_currency)
frappe.db.set_value(self.doctype, self.name, "party_account_currency",
advance.account_currency)
if advance.account_currency == self.currency:
order_total = self.grand_total
formatted_order_total = fmt_money(order_total, precision=self.precision("grand_total"),
currency=advance.account_currency)
else:
order_total = self.base_grand_total
formatted_order_total = fmt_money(order_total, precision=self.precision("base_grand_total"),
currency=advance.account_currency)
if self.currency == self.company_currency and advance_paid > order_total:
frappe.throw(_("Total advance ({0}) against Order {1} cannot be greater than the Grand Total ({2})")
.format(formatted_advance_paid, self.name, formatted_order_total))
frappe.db.set_value(self.doctype, self.name, "advance_paid", advance_paid)
@property
def company_abbr(self):
if not hasattr(self, "_abbr"):
self._abbr = frappe.db.get_value("Company", self.company, "abbr")
return self._abbr
def validate_party(self):
party_type, party = self.get_party()
validate_party_frozen_disabled(party_type, party)
def get_party(self):
party_type = None
if self.doctype in ("Opportunity", "Quotation", "Sales Order", "Delivery Note", "Sales Invoice"):
party_type = 'Customer'
elif self.doctype in ("Supplier Quotation", "Purchase Order", "Purchase Receipt", "Purchase Invoice"):
party_type = 'Supplier'
elif self.meta.get_field("customer"):
party_type = "Customer"
elif self.meta.get_field("supplier"):
party_type = "Supplier"
party = self.get(party_type.lower()) if party_type else None
return party_type, party
def validate_currency(self):
if self.get("currency"):
party_type, party = self.get_party()
if party_type and party:
party_account_currency = get_party_account_currency(party_type, party, self.company)
if (party_account_currency
and party_account_currency != self.company_currency
and self.currency != party_account_currency):
frappe.throw(_("Accounting Entry for {0}: {1} can only be made in currency: {2}")
.format(party_type, party, party_account_currency), InvalidCurrency)
# Note: not validating with gle account because we don't have the account
# at quotation / sales order level and we shouldn't stop someone
# from creating a sales invoice if sales order is already created
def validate_fixed_asset(self):
for d in self.get("items"):
if d.is_fixed_asset:
if d.qty > 1:
frappe.throw(_("Row #{0}: Qty must be 1, as item is a fixed asset. Please use separate row for multiple qty.").format(d.idx))
if d.meta.get_field("asset"):
if not d.asset:
frappe.throw(_("Row #{0}: Asset is mandatory for fixed asset purchase/sale")
.format(d.idx))
else:
asset = frappe.get_doc("Asset", d.asset)
if asset.company != self.company:
frappe.throw(_("Row #{0}: Asset {1} does not belong to company {2}")
.format(d.idx, d.asset, self.company))
elif asset.item_code != d.item_code:
frappe.throw(_("Row #{0}: Asset {1} does not linked to Item {2}")
.format(d.idx, d.asset, d.item_code))
elif asset.docstatus != 1:
frappe.throw(_("Row #{0}: Asset {1} must be submitted").format(d.idx, d.asset))
elif self.doctype == "Purchase Invoice":
if asset.status != "Submitted":
frappe.throw(_("Row #{0}: Asset {1} is already {2}")
.format(d.idx, d.asset, asset.status))
elif getdate(asset.purchase_date) != getdate(self.posting_date):
frappe.throw(_("Row #{0}: Posting Date must be same as purchase date {1} of asset {2}").format(d.idx, asset.purchase_date, d.asset))
elif asset.is_existing_asset:
frappe.throw(_("Row #{0}: Purchase Invoice cannot be made against an existing asset {1}").format(d.idx, d.asset))
elif self.docstatus=="Sales Invoice" and self.docstatus == 1:
if self.update_stock:
frappe.throw(_("'Update Stock' cannot be checked for fixed asset sale"))
elif asset.status in ("Scrapped", "Cancelled", "Sold"):
frappe.throw(_("Row #{0}: Asset {1} cannot be submitted, it is already {2}")
.format(d.idx, d.asset, asset.status))
@frappe.whitelist()
def get_tax_rate(account_head):
return frappe.db.get_value("Account", account_head, ["tax_rate", "account_name"], as_dict=True)
@frappe.whitelist()
def get_default_taxes_and_charges(master_doctype):
default_tax = frappe.db.get_value(master_doctype, {"is_default": 1})
return get_taxes_and_charges(master_doctype, default_tax)
@frappe.whitelist()
def get_taxes_and_charges(master_doctype, master_name):
if not master_name:
return
from frappe.model import default_fields
tax_master = frappe.get_doc(master_doctype, master_name)
taxes_and_charges = []
for i, tax in enumerate(tax_master.get("taxes")):
tax = tax.as_dict()
for fieldname in default_fields:
if fieldname in tax:
del tax[fieldname]
taxes_and_charges.append(tax)
return taxes_and_charges
def validate_conversion_rate(currency, conversion_rate, conversion_rate_label, company):
"""common validation for currency and price list currency"""
company_currency = frappe.db.get_value("Company", company, "default_currency", cache=True)
if not conversion_rate:
throw(_("{0} is mandatory. Maybe Currency Exchange record is not created for {1} to {2}.").format(
conversion_rate_label, currency, company_currency))
def validate_taxes_and_charges(tax):
if tax.charge_type in ['Actual', 'On Net Total'] and tax.row_id:
frappe.throw(_("Can refer row only if the charge type is 'On Previous Row Amount' or 'Previous Row Total'"))
elif tax.charge_type in ['On Previous Row Amount', 'On Previous Row Total']:
if cint(tax.idx) == 1:
frappe.throw(_("Cannot select charge type as 'On Previous Row Amount' or 'On Previous Row Total' for first row"))
elif not tax.row_id:
frappe.throw(_("Please specify a valid Row ID for row {0} in table {1}".format(tax.idx, _(tax.doctype))))
elif tax.row_id and cint(tax.row_id) >= cint(tax.idx):
frappe.throw(_("Cannot refer row number greater than or equal to current row number for this Charge type"))
if tax.charge_type == "Actual":
tax.rate = None
def validate_inclusive_tax(tax, doc):
def _on_previous_row_error(row_range):
throw(_("To include tax in row {0} in Item rate, taxes in rows {1} must also be included").format(tax.idx,
row_range))
if cint(getattr(tax, "included_in_print_rate", None)):
if tax.charge_type == "Actual":
# inclusive tax cannot be of type Actual
throw(_("Charge of type 'Actual' in row {0} cannot be included in Item Rate").format(tax.idx))
elif tax.charge_type == "On Previous Row Amount" and \
not cint(doc.get("taxes")[cint(tax.row_id) - 1].included_in_print_rate):
# referred row should also be inclusive
_on_previous_row_error(tax.row_id)
elif tax.charge_type == "On Previous Row Total" and \
not all([cint(t.included_in_print_rate) for t in doc.get("taxes")[:cint(tax.row_id) - 1]]):
# all rows about the reffered tax should be inclusive
_on_previous_row_error("1 - %d" % (tax.row_id,))
elif tax.get("category") == "Valuation":
frappe.throw(_("Valuation type charges can not marked as Inclusive"))
def set_balance_in_account_currency(gl_dict, account_currency=None, conversion_rate=None, company_currency=None):
if (not conversion_rate) and (account_currency!=company_currency):
frappe.throw(_("Account: {0} with currency: {1} can not be selected")
.format(gl_dict.account, account_currency))
gl_dict["account_currency"] = company_currency if account_currency==company_currency \
else account_currency
# set debit/credit in account currency if not provided
if flt(gl_dict.debit) and not flt(gl_dict.debit_in_account_currency):
gl_dict.debit_in_account_currency = gl_dict.debit if account_currency==company_currency \
else flt(gl_dict.debit / conversion_rate, 2)
if flt(gl_dict.credit) and not flt(gl_dict.credit_in_account_currency):
gl_dict.credit_in_account_currency = gl_dict.credit if account_currency==company_currency \
else flt(gl_dict.credit / conversion_rate, 2)
def get_advance_journal_entries(party_type, party, party_account, amount_field,
order_doctype, order_list, include_unallocated=True):
dr_or_cr = "credit_in_account_currency" if party_type=="Customer" else "debit_in_account_currency"
conditions = []
if include_unallocated:
conditions.append("ifnull(t2.reference_name, '')=''")
if order_list:
order_condition = ', '.join(['%s'] * len(order_list))
conditions.append(" (t2.reference_type = '{0}' and ifnull(t2.reference_name, '') in ({1}))"\
.format(order_doctype, order_condition))
reference_condition = " and (" + " or ".join(conditions) + ")" if conditions else ""
journal_entries = frappe.db.sql("""
select
"Journal Entry" as reference_type, t1.name as reference_name,
t1.remark as remarks, t2.{0} as amount, t2.name as reference_row,
t2.reference_name as against_order
from
`tabJournal Entry` t1, `tabJournal Entry Account` t2
where
t1.name = t2.parent and t2.account = %s
and t2.party_type = %s and t2.party = %s
and t2.is_advance = 'Yes' and t1.docstatus = 1
and {1} > 0 {2}
order by t1.posting_date""".format(amount_field, dr_or_cr, reference_condition),
[party_account, party_type, party] + order_list, as_dict=1)
return list(journal_entries)
def get_advance_payment_entries(party_type, party, party_account,
order_doctype, order_list=None, include_unallocated=True, against_all_orders=False):
party_account_field = "paid_from" if party_type == "Customer" else "paid_to"
payment_type = "Receive" if party_type == "Customer" else "Pay"
payment_entries_against_order, unallocated_payment_entries = [], []
if order_list or against_all_orders:
if order_list:
reference_condition = " and t2.reference_name in ({0})"\
.format(', '.join(['%s'] * len(order_list)))
else:
reference_condition = ""
order_list = []
payment_entries_against_order = frappe.db.sql("""
select
"Payment Entry" as reference_type, t1.name as reference_name,
t1.remarks, t2.allocated_amount as amount, t2.name as reference_row,
t2.reference_name as against_order, t1.posting_date
from `tabPayment Entry` t1, `tabPayment Entry Reference` t2
where
t1.name = t2.parent and t1.{0} = %s and t1.payment_type = %s
and t1.party_type = %s and t1.party = %s and t1.docstatus = 1
and t2.reference_doctype = %s {1}
""".format(party_account_field, reference_condition),
[party_account, payment_type, party_type, party, order_doctype] + order_list, as_dict=1)
if include_unallocated:
unallocated_payment_entries = frappe.db.sql("""
select "Payment Entry" as reference_type, name as reference_name,
remarks, unallocated_amount as amount
from `tabPayment Entry`
where
{0} = %s and party_type = %s and party = %s and payment_type = %s
and docstatus = 1 and unallocated_amount > 0
""".format(party_account_field), (party_account, party_type, party, payment_type), as_dict=1)
return list(payment_entries_against_order) + list(unallocated_payment_entries)
|
gpl-3.0
| -5,101,145,368,860,695,000
| 38.035961
| 180
| 0.690383
| false
| 3.166386
| false
| false
| false
|
cortext/crawtextV2
|
page.py
|
1
|
4427
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#from __future__ import print_function
import datetime
from os.path import exists
import sys
import requests
import json
import re
#from goose import Goose
from pymongo import errors as mongo_err
#from bs4 import BeautifulSoup as bs
#import beautifulsoup4 as bs
from urlparse import urlparse
from random import choice
from tld import get_tld
from scrapper import *
from utils.url import *
from scrapper.article import Article
class Page(object):
'''Page factory'''
def __init__(self, url, step = 0, output_format="defaut"):
self.url = url
#~ if query is not None:
#~ self.match_query = regexify(query)
self.step = step
self.crawl_date = datetime.datetime.now()
self.status = {"msg":None, "status": None, "code": None, "step": "page creation", "url": self.url}
#~ self.error_type = "Ok"
#~ self.status = "Ok"
#~ self.status_code = 0
self.output_format = output_format
def check(self):
self.status["step"] = "check page"
self.status["status"], self.status["code"], self.status["msg"], self.status["url"] = check_url(self.url)
self.url = self.status["url"]
return self.status["status"]
def request(self):
'''Bool request a webpage: return boolean and update raw_html'''
self.status["step"] = "request page"
try:
requests.adapters.DEFAULT_RETRIES = 2
user_agents = [u'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1', u'Mozilla/5.0 (Windows NT 6.1; rv:15.0) Gecko/20120716 Firefox/15.0a2', u'Mozilla/5.0 (compatible; MSIE 10.6; Windows NT 6.1; Trident/5.0; InfoPath.2; SLCC1; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET CLR 2.0.50727) 3gpp-gba UNTRUSTED/1.0', u'Opera/9.80 (Windows NT 6.1; U; es-ES) Presto/2.9.181 Version/12.00']
headers = {'User-Agent': choice(user_agents),}
proxies = {"https":"77.120.126.35:3128", "https":'88.165.134.24:3128', }
try:
self.req = requests.get((self.url), headers = headers,allow_redirects=True, proxies=None, timeout=5)
try:
self.raw_html = self.req.text
self.status["status"] = True
self.status["code"] = 200
except Exception, e:
self.status["msg"] = "Request answer was not understood %s" %e
self.status["code"] = 400
self.status["status"] = False
except Exception, e:
self.status["msg"] = "Request answer was not understood %s" %e
self.status["code"] = 400
self.status["status"] = False
except requests.exceptions.MissingSchema:
self.status["msg"] = "Incorrect url - Missing sheme for : %s" %self.url
self.status["code"] = 406
self.status["status"] = False
except Exception as e:
self.status["msg"] = "Another wired exception: %s %s" %(e, e.args)
self.status["code"] = 204
self.status["status"] = False
return self.status["status"]
def control(self):
'''Bool control the result if text/html or if content available'''
self.status["step"] = "control"
#Content-type is not html
try:
self.req.headers['content-type']
if 'text/html' not in self.req.headers['content-type']:
self.status["msg"]="Content type is not TEXT/HTML"
self.status["code"] = 404
self.status["status"] = False
#Error on ressource or on server
elif self.req.status_code in range(400,520):
self.status["code"] = self.req.status_code
self.status["msg"]="Request error on connexion no ressources or not able to reach server"
self.status["status"] = False
#Redirect
#~ elif len(self.req.history) > 0 | self.req.status_code in range(300,320):
#~ self.error_type="Redirection"
#~ self.bad_status()
#~ return False
else:
self.status["status"] = True
#Headers problems
except Exception:
self.status["msg"]="Request headers were not found"
self.status["code"] = 403
self.status["status"] = False
return self.status["status"]
def extract(self, type="article"):
'''Dict extract content and info of webpage return boolean and self.info'''
#self.status["step"] = "extract %s" %type
a = Article(self.url, self.raw_html)
return a.get()
'''
def is_relevant(self, query, content):
if query.match(self,unicode(content)) is False:
self.status = {"url":self.url, "code": -1, "msg": "Not Relevant","status": False, "title": self.title, "content": self.content}
return False
else:
self.status =
return True
'''
|
mit
| 8,397,858,714,817,989,000
| 33.585938
| 445
| 0.658911
| false
| 2.991216
| false
| false
| false
|
galuszkak/djangodash
|
game/migrations/0007_auto__chg_field_game_host.py
|
1
|
5665
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Game.host'
db.alter_column(u'game_game', 'host_id', self.gf('django.db.models.fields.related.ForeignKey')(null=True, to=orm['game.MemoUser']))
def backwards(self, orm):
# Changing field 'Game.host'
db.alter_column(u'game_game', 'host_id', self.gf('django.db.models.fields.related.ForeignKey')(default=None, to=orm['game.MemoUser']))
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'game.configuration': {
'Meta': {'object_name': 'Configuration'},
'charValue': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'game': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['game.Game']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'intValue': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '3'})
},
u'game.game': {
'Meta': {'object_name': 'Game'},
'comment': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'host': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'hosted_games_set'", 'null': 'True', 'to': u"orm['game.MemoUser']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'players': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['game.MemoUser']", 'symmetrical': 'False'}),
'state': ('django.db.models.fields.CharField', [], {'default': "'WA'", 'max_length': '2'})
},
u'game.memouser': {
'Meta': {'object_name': 'MemoUser'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'game.statistic': {
'Meta': {'object_name': 'Statistic'},
'charValue': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'game': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['game.Game']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'intValue': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'player': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['game.MemoUser']", 'null': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '3'})
}
}
complete_apps = ['game']
|
gpl-2.0
| -7,510,298,253,660,137,000
| 66.452381
| 187
| 0.55481
| false
| 3.60369
| false
| false
| false
|
wraiden/spacewalk
|
backend/satellite_tools/repo_plugins/__init__.py
|
1
|
12733
|
#
# Copyright (c) 2008--2016 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
import os
import sys
import re
import time
from Queue import Queue, Empty
from threading import Thread, Lock
try:
# python 2
import urlparse
except ImportError:
# python3
import urllib.parse as urlparse # pylint: disable=F0401,E0611
from urllib import quote
import pycurl
import rpm
from urlgrabber.grabber import URLGrabberOptions, PyCurlFileObject, URLGrabError
from spacewalk.common import rhn_pkg
from spacewalk.common.checksum import getFileChecksum
from spacewalk.common.rhnConfig import CFG, initCFG
from spacewalk.common.rhnException import rhnFault
from spacewalk.server import rhnPackageUpload
from spacewalk.satellite_tools.syncLib import log, log2
class ProgressBarLogger:
def __init__(self, msg, total):
self.msg = msg
self.total = total
self.status = 0
self.lock = Lock()
def log(self, *_):
self.lock.acquire()
self.status += 1
self._print_progress_bar(self.status, self.total, prefix=self.msg, bar_length=50)
self.lock.release()
# from here http://stackoverflow.com/questions/3173320/text-progress-bar-in-the-console
# Print iterations progress
@staticmethod
def _print_progress_bar(iteration, total, prefix='', suffix='', decimals=2, bar_length=100):
"""
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : number of decimals in percent complete (Int)
bar_length - Optional : character length of bar (Int)
"""
filled_length = int(round(bar_length * iteration / float(total)))
percents = round(100.00 * (iteration / float(total)), decimals)
bar_char = '#' * filled_length + '-' * (bar_length - filled_length)
sys.stdout.write('\r%s |%s| %s%s %s' % (prefix, bar_char, percents, '%', suffix))
sys.stdout.flush()
if iteration == total:
sys.stdout.write('\n')
sys.stdout.flush()
class TextLogger:
def __init__(self, _, total):
self.total = total
self.status = 0
self.lock = Lock()
def log(self, success, param):
self.lock.acquire()
self.status += 1
if success:
log(0, "%d/%d : %s" % (self.status, self.total, str(param)))
else:
log2(0, 0, "%d/%d : %s (failed)" % (self.status, self.total, str(param)), stream=sys.stderr)
self.lock.release()
# Older versions of urlgrabber don't allow to set proxy parameters separately
# Simplified version from yumRepository class
def get_proxies(proxy, user, password):
if not proxy:
return {}
proxy_string = proxy
if user:
auth = quote(user)
if password:
auth += ':' + quote(password)
proto, rest = re.match(r'(\w+://)(.+)', proxy_string).groups()
proxy_string = "%s%s@%s" % (proto, auth, rest)
proxies = {'http': proxy_string, 'https': proxy_string, 'ftp': proxy_string}
return proxies
class PyCurlFileObjectThread(PyCurlFileObject):
def __init__(self, url, filename, opts, curl_cache):
self.curl_cache = curl_cache
PyCurlFileObject.__init__(self, url, filename, opts)
def _do_open(self):
self.curl_obj = self.curl_cache
self.curl_obj.reset()
self._set_opts()
self._do_grab()
return self.fo
class FailedDownloadError(Exception):
pass
class DownloadThread(Thread):
def __init__(self, parent):
Thread.__init__(self)
self.parent = parent
# pylint: disable=E1101
self.curl = pycurl.Curl()
self.mirror = 0
@staticmethod
def __is_file_done(local_path=None, file_obj=None, checksum_type=None, checksum=None):
if checksum_type and checksum:
if local_path and os.path.isfile(local_path):
return getFileChecksum(checksum_type, filename=local_path) == checksum
elif file_obj:
return getFileChecksum(checksum_type, file_obj=file_obj) == checksum
if local_path and os.path.isfile(local_path):
return True
elif file_obj:
return True
return False
def __can_retry(self, retry, mirrors, opts, url, e):
retrycode = getattr(e, 'errno', None)
code = getattr(e, 'code', None)
if retry < (self.parent.retries - 1):
# No codes at all or some specified codes
# 58, 77 - Couple of curl error codes observed in multithreading on RHEL 7 - probably a bug
if (retrycode is None and code is None) or (retrycode in opts.retrycodes or code in [58, 77]):
log2(0, 2, "ERROR: Download failed: %s - %s. Retrying..." % (url, sys.exc_info()[1]),
stream=sys.stderr)
return True
# 14 - HTTP Error
if retry < (mirrors - 1) and retrycode == 14:
log2(0, 2, "ERROR: Download failed: %s - %s. Trying next mirror..." % (url, sys.exc_info()[1]),
stream=sys.stderr)
return True
log2(0, 1, "ERROR: Download failed: %s - %s." % (url, sys.exc_info()[1]),
stream=sys.stderr)
return False
def __next_mirror(self, total):
if self.mirror < (total - 1):
self.mirror += 1
else:
self.mirror = 0
def __fetch_url(self, params):
# Skip existing file if exists and matches checksum
if not self.parent.force:
if self.__is_file_done(local_path=params['target_file'], checksum_type=params['checksum_type'],
checksum=params['checksum']):
return True
opts = URLGrabberOptions(ssl_ca_cert=params['ssl_ca_cert'], ssl_cert=params['ssl_client_cert'],
ssl_key=params['ssl_client_key'], range=params['bytes_range'],
proxy=params['proxy'], username=params['proxy_username'],
password=params['proxy_password'], proxies=params['proxies'])
mirrors = len(params['urls'])
for retry in max(range(self.parent.retries), mirrors):
fo = None
url = urlparse.urljoin(params['urls'][self.mirror], params['relative_path'])
try:
try:
fo = PyCurlFileObjectThread(url, params['target_file'], opts, self.curl)
# Check target file
if not self.__is_file_done(file_obj=fo, checksum_type=params['checksum_type'],
checksum=params['checksum']):
raise FailedDownloadError("Target file isn't valid. Checksum should be %s (%s)."
% (params['checksum'], params['checksum_type']))
break
except (FailedDownloadError, URLGrabError):
e = sys.exc_info()[1]
if not self.__can_retry(retry, mirrors, opts, url, e):
return False
self.__next_mirror(mirrors)
finally:
if fo:
fo.close()
# Delete failed download file
elif os.path.isfile(params['target_file']):
os.unlink(params['target_file'])
return True
def run(self):
while not self.parent.queue.empty():
try:
params = self.parent.queue.get(block=False)
except Empty:
break
self.mirror = 0
success = self.__fetch_url(params)
if self.parent.log_obj:
# log_obj must be thread-safe
self.parent.log_obj.log(success, os.path.basename(params['relative_path']))
self.parent.queue.task_done()
class ThreadedDownloader:
def __init__(self, retries=3, log_obj=None, force=False):
self.queue = Queue()
initCFG('server.satellite')
self.threads = CFG.REPOSYNC_DOWNLOAD_THREADS
self.retries = retries
self.log_obj = log_obj
self.force = force
def set_log_obj(self, log_obj):
self.log_obj = log_obj
def set_force(self, force):
self.force = force
@staticmethod
def _validate(ssl_ca_cert, ssl_cert, ssl_key):
for certificate_file in (ssl_ca_cert, ssl_cert, ssl_key):
if certificate_file and not os.path.isfile(certificate_file):
log2(0, 0, "ERROR: Certificate file not found: %s" % certificate_file, stream=sys.stderr)
return False
return True
def add(self, params):
if self._validate(params['ssl_ca_cert'], params['ssl_client_cert'], params['ssl_client_key']):
self.queue.put(params)
def run(self):
size = self.queue.qsize()
if size <= 0:
return
log(1, "Downloading %s files." % str(size))
started_threads = []
for _ in range(self.threads):
thread = DownloadThread(self)
thread.setDaemon(True)
thread.start()
started_threads.append(thread)
# wait to finish
while any(t.isAlive() for t in started_threads):
time.sleep(1)
class ContentPackage:
def __init__(self):
# map of checksums
self.checksum_type = None
self.checksum = None
# unique ID that can be used by plugin
self.unique_id = None
self.name = None
self.version = None
self.release = None
self.epoch = None
self.arch = None
self.path = None
self.a_pkg = None
def __cmp__(self, other):
ret = cmp(self.name, other.name)
if ret == 0:
rel_self = str(self.release).split('.')[0]
rel_other = str(other.release).split('.')[0]
# pylint: disable=E1101
ret = rpm.labelCompare((str(self.epoch), str(self.version), rel_self),
(str(other.epoch), str(other.version), rel_other))
if ret == 0:
ret = cmp(self.arch, other.arch)
return ret
def getNRA(self):
rel = re.match(".*?\\.(.*)",self.release)
rel = rel.group(1)
nra = str(self.name) + str(rel) + str(self.arch)
return nra
def setNVREA(self, name, version, release, epoch, arch):
self.name = name
self.version = version
self.release = release
self.arch = arch
self.epoch = epoch
def getNVREA(self):
if self.epoch:
return self.name + '-' + self.version + '-' + self.release + '-' + self.epoch + '.' + self.arch
else:
return self.name + '-' + self.version + '-' + self.release + '.' + self.arch
def getNEVRA(self):
if self.epoch is None:
self.epoch = '0'
return self.name + '-' + self.epoch + ':' + self.version + '-' + self.release + '.' + self.arch
def load_checksum_from_header(self):
if self.path is None:
raise rhnFault(50, "Unable to load package", explain=0)
self.a_pkg = rhn_pkg.package_from_filename(self.path)
self.a_pkg.read_header()
self.a_pkg.payload_checksum()
self.a_pkg.input_stream.close()
def upload_package(self, channel, metadata_only=False):
if not metadata_only:
rel_package_path = rhnPackageUpload.relative_path_from_header(
self.a_pkg.header, channel['org_id'], self.a_pkg.checksum_type, self.a_pkg.checksum)
else:
rel_package_path = None
_unused = rhnPackageUpload.push_package(self.a_pkg,
force=False,
relative_path=rel_package_path,
org_id=channel['org_id'])
|
gpl-2.0
| -7,994,968,211,476,632,000
| 36.014535
| 107
| 0.56758
| false
| 3.95312
| false
| false
| false
|
umitproject/tease-o-matic
|
django_mongodb_engine/widgets.py
|
1
|
2621
|
from django.conf import settings
from django.forms import widgets
from django.db import models
from django.utils.safestring import mark_safe
import warnings
warnings.warn("django_mongodb_engine.widgets is deprecated and will be removed "
"in version 0.5", DeprecationWarning)
class DictWidget(widgets.Widget):
def value_from_datadict(self, data, files, name):
if data.has_key("%s_rows" % name):
returnlist ={}
rows= int( data["%s_rows" % name])
while rows > 0:
rows -= 1
rowname = "%s_%d" % (name, rows )
if data.has_key("%s_key" % rowname ) :
k = data["%s_key" % rowname]
if k != "":
v = None
if data.has_key("%s_value" % rowname ) :
v = data["%s_value"%rowname]
returnlist[k]=v
rowname = "%s_new" % name
if data.has_key("%s_key" % rowname ) :
k = data["%s_key" % rowname]
if k != "":
v = None
if data.has_key("%s_value" % rowname ) :
v = data["%s_value"%rowname]
returnlist[k]=v
return returnlist
else:
return None
def render(self, name, value, attrs=None):
htmlval="<table><tr><td>#</td><td>Key</td><td>Value</td></tr>"
linenum=0
idname = attrs['id']
if (value is not None) and (type(value).__name__=='dict') :
for key, val in value.items():
idname_row = "%s_%d" % ( idname, linenum )
htmlval += '<tr><td><label for="%s_key">%d</label></td><td><input type="txt" id="%s_key" name="%s_%d_key" value="%s" /></td>' % (
idname_row, linenum ,idname_row, name,linenum, key )
htmlval += '<td><input type="txt" id="%s_value" name="%s_%d_value" value="%s" /></td></tr>' % (
idname_row, name,linenum, val)
linenum += 1
idname_row = "%s_new" % ( idname )
htmlval += '<tr><td><label for="%s_key">new</label></td><td><input type="txt" id="%s_key" name="%s_new_key" value="" /></td>' % (
idname_row, idname_row, name)
htmlval += '<td><input type="txt" id="%s_value" name="%s_new_value" value="" /></td></tr>' % (
idname_row, name )
htmlval += "</table>"
htmlval += "<input type='hidden' name='%s_rows' value='%d'>" % ( name, linenum )
return mark_safe(htmlval)
|
bsd-3-clause
| 4,250,996,487,729,507,000
| 40.603175
| 145
| 0.47272
| false
| 3.640278
| false
| false
| false
|
ib-lundgren/django-oauthlib
|
django_oauthlib/views.py
|
1
|
4525
|
from __future__ import absolute_import
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect, HttpResponse
from django.views.decorators.csrf import csrf_exempt
from django.views.generic.base import View
# TODO: don't import errors like this
from oauthlib.oauth2.draft25 import errors
from oauthlib.oauth2 import Server
from .validator import DjangoValidator
from .utils import extract_params, log
def get_credentials(request):
return {}
def get_authorization(request):
return request.POST.getlist(['scopes']), {'user': request.user}
def get_actual_authorization_view(request):
# TODO: use template?
def basic_view(request, client_id=None, scopes=None, **kwargs):
response = HttpResponse()
response.write('<h1> Authorize access to %s </h1>' % client_id)
response.write('<form method="POST" action="/post_authorization">')
for scope in scopes or []:
response.write('<input type="checkbox" name="scopes" value="%s"/> %s' % (scope, scope))
response.write('<input type="submit" value="Authorize"/>')
return response
return basic_view
class AuthorizationView(View):
def __init__(self):
validator = DjangoValidator()
# TODO: this should probably be tunable through settings
self._authorization_endpoint = Server(validator)
self._error_uri = reverse('oauth2_error')
def get(self, request, *args, **kwargs):
uri, http_method, body, headers = extract_params(request)
redirect_uri = request.GET.get('redirect_uri', None)
log.debug('Found redirect uri %s.', redirect_uri)
try:
scopes, credentials = self._authorization_endpoint.validate_authorization_request(
uri, http_method, body, headers)
log.debug('Saving credentials to session, %r.', credentials)
request.session['oauth2_credentials'] = credentials
kwargs['scopes'] = scopes
kwargs.update(credentials)
actual_view = get_actual_authorization_view(request)
log.debug('Invoking actual view method, %r.', actual_view)
return actual_view(request, *args, **kwargs)
except errors.FatalClientError as e:
log.debug('Fatal client error, redirecting to error page.')
return HttpResponseRedirect(e.in_uri(self._error_uri))
except errors.OAuth2Error as e:
log.debug('Client error, redirecting back to client.')
# TODO: remove after federico PR
e.redirect_uri = redirect_uri or 'https://localhost'
return HttpResponseRedirect(e.in_uri(e.redirect_uri))
@csrf_exempt
def post(self, request, *args, **kwargs):
uri, http_method, body, headers = extract_params(request)
scopes, credentials = get_authorization(request)
log.debug('Fetched credentials view, %r.', credentials)
credentials.update(request.session.get('oauth2_credentials', {}))
log.debug('Fetched credentials from session, %r.', credentials)
redirect_uri = credentials.get('redirect_uri')
log.debug('Found redirect uri %s.', redirect_uri)
try:
url, headers, body, status = self._authorization_endpoint.create_authorization_response(
uri, http_method, body, headers, scopes, credentials)
log.debug('Authorization successful, redirecting to client.')
return HttpResponseRedirect(url)
except errors.FatalClientError as e:
log.debug('Fatal client error, redirecting to error page.')
return HttpResponseRedirect(e.in_uri(self._error_uri))
except errors.OAuth2Error as e:
log.debug('Client error, redirecting back to client.')
return HttpResponseRedirect(e.in_uri(redirect_uri))
class TokenView(View):
def __init__(self, token_endpoint):
self._token_endpoint = token_endpoint
def post(self, request, *args, **kwargs):
uri, http_method, body, headers = extract_params(request)
credentials = get_credentials(request)
log.debug('Fetched credentials view, %r.', credentials)
url, headers, body, status = self._token_endpoint.create_token_response(
uri, http_method, body, headers, credentials)
response = HttpResponse(content=body, status=status)
for k, v in headers.items():
response[k] = v
return response
class ErrorView(View):
pass
|
bsd-3-clause
| -8,697,564,746,311,574,000
| 40.136364
| 100
| 0.655249
| false
| 4.309524
| false
| false
| false
|
sorpaas/reread
|
reader/views/queries/read_records.py
|
1
|
1465
|
from django.shortcuts import render, redirect
from django.http import HttpResponse, HttpResponseRedirect
from django.contrib.auth.decorators import login_required
from reader.documents import *
import datetime
import json
import urllib
@login_required
def like_article(request, record_id):
record = ReadRecord.objects.get(id=record_id)
record.is_liked = True
record.save()
return HttpResponse(json.dumps({"status": "ok"}),
content_type="application/json")
@login_required
def unlike_article(request, record_id):
record = ReadRecord.objects.get(id=record_id)
record.is_liked = False
record.save()
return HttpResponse(json.dumps({"status": "ok"}),
content_type="application/json")
@login_required
def create(request, article_id):
reader = Reader.reader_for(request.user)
article = Article.objects.get(id=article_id)
reader.reading_list = [x for x in reader.reading_list if str(x['article_id']) != str(article_id)]
reader.save()
try:
record = ReadRecord.objects.get(reader=reader, article=article)
except ReadRecord.DoesNotExist:
record = ReadRecord(reader=reader, article=article)
record.save()
return HttpResponse(json.dumps({"status": "ok",
"record_id": str(record.id),
"is_liked": record.is_liked}),
content_type="application/json")
|
mit
| 6,258,733,803,302,709,000
| 36.564103
| 101
| 0.651877
| false
| 4.024725
| false
| false
| false
|
wujuguang/scrapyd
|
scrapyd/webservice.py
|
3
|
6619
|
from copy import copy
import traceback
import uuid
try:
from cStringIO import StringIO as BytesIO
except ImportError:
from io import BytesIO
from twisted.python import log
from .utils import get_spider_list, JsonResource, UtilsCache, native_stringify_dict
class WsResource(JsonResource):
def __init__(self, root):
JsonResource.__init__(self)
self.root = root
def render(self, txrequest):
try:
return JsonResource.render(self, txrequest).encode('utf-8')
except Exception as e:
if self.root.debug:
return traceback.format_exc().encode('utf-8')
log.err()
r = {"node_name": self.root.nodename, "status": "error", "message": str(e)}
return self.render_object(r, txrequest).encode('utf-8')
class DaemonStatus(WsResource):
def render_GET(self, txrequest):
pending = sum(q.count() for q in self.root.poller.queues.values())
running = len(self.root.launcher.processes)
finished = len(self.root.launcher.finished)
return {"node_name": self.root.nodename, "status":"ok", "pending": pending, "running": running, "finished": finished}
class Schedule(WsResource):
def render_POST(self, txrequest):
args = native_stringify_dict(copy(txrequest.args), keys_only=False)
settings = args.pop('setting', [])
settings = dict(x.split('=', 1) for x in settings)
args = dict((k, v[0]) for k, v in args.items())
project = args.pop('project')
spider = args.pop('spider')
version = args.get('_version', '')
priority = float(args.pop('priority', 0))
spiders = get_spider_list(project, version=version)
if not spider in spiders:
return {"status": "error", "message": "spider '%s' not found" % spider}
args['settings'] = settings
jobid = args.pop('jobid', uuid.uuid1().hex)
args['_job'] = jobid
self.root.scheduler.schedule(project, spider, priority=priority, **args)
return {"node_name": self.root.nodename, "status": "ok", "jobid": jobid}
class Cancel(WsResource):
def render_POST(self, txrequest):
args = dict((k, v[0])
for k, v in native_stringify_dict(copy(txrequest.args),
keys_only=False).items())
project = args['project']
jobid = args['job']
signal = args.get('signal', 'TERM')
prevstate = None
queue = self.root.poller.queues[project]
c = queue.remove(lambda x: x["_job"] == jobid)
if c:
prevstate = "pending"
spiders = self.root.launcher.processes.values()
for s in spiders:
if s.project == project and s.job == jobid:
s.transport.signalProcess(signal)
prevstate = "running"
return {"node_name": self.root.nodename, "status": "ok", "prevstate": prevstate}
class AddVersion(WsResource):
def render_POST(self, txrequest):
eggf = BytesIO(txrequest.args.pop(b'egg')[0])
args = native_stringify_dict(copy(txrequest.args), keys_only=False)
project = args['project'][0]
version = args['version'][0]
self.root.eggstorage.put(eggf, project, version)
spiders = get_spider_list(project, version=version)
self.root.update_projects()
UtilsCache.invalid_cache(project)
return {"node_name": self.root.nodename, "status": "ok", "project": project, "version": version, \
"spiders": len(spiders)}
class ListProjects(WsResource):
def render_GET(self, txrequest):
projects = list(self.root.scheduler.list_projects())
return {"node_name": self.root.nodename, "status": "ok", "projects": projects}
class ListVersions(WsResource):
def render_GET(self, txrequest):
args = native_stringify_dict(copy(txrequest.args), keys_only=False)
project = args['project'][0]
versions = self.root.eggstorage.list(project)
return {"node_name": self.root.nodename, "status": "ok", "versions": versions}
class ListSpiders(WsResource):
def render_GET(self, txrequest):
args = native_stringify_dict(copy(txrequest.args), keys_only=False)
project = args['project'][0]
version = args.get('_version', [''])[0]
spiders = get_spider_list(project, runner=self.root.runner, version=version)
return {"node_name": self.root.nodename, "status": "ok", "spiders": spiders}
class ListJobs(WsResource):
def render_GET(self, txrequest):
args = native_stringify_dict(copy(txrequest.args), keys_only=False)
project = args.get('project', [None])[0]
spiders = self.root.launcher.processes.values()
queues = self.root.poller.queues
pending = [
{"project": qname, "spider": x["name"], "id": x["_job"]}
for qname in (queues if project is None else [project])
for x in queues[qname].list()
]
running = [
{
"project": s.project,
"spider": s.spider,
"id": s.job, "pid": s.pid,
"start_time": str(s.start_time),
} for s in spiders if project is None or s.project == project
]
finished = [
{
"project": s.project,
"spider": s.spider, "id": s.job,
"start_time": str(s.start_time),
"end_time": str(s.end_time)
} for s in self.root.launcher.finished
if project is None or s.project == project
]
return {"node_name": self.root.nodename, "status": "ok",
"pending": pending, "running": running, "finished": finished}
class DeleteProject(WsResource):
def render_POST(self, txrequest):
args = native_stringify_dict(copy(txrequest.args), keys_only=False)
project = args['project'][0]
self._delete_version(project)
UtilsCache.invalid_cache(project)
return {"node_name": self.root.nodename, "status": "ok"}
def _delete_version(self, project, version=None):
self.root.eggstorage.delete(project, version)
self.root.update_projects()
class DeleteVersion(DeleteProject):
def render_POST(self, txrequest):
args = native_stringify_dict(copy(txrequest.args), keys_only=False)
project = args['project'][0]
version = args['version'][0]
self._delete_version(project, version)
UtilsCache.invalid_cache(project)
return {"node_name": self.root.nodename, "status": "ok"}
|
bsd-3-clause
| -5,762,503,081,443,580,000
| 37.935294
| 125
| 0.599033
| false
| 3.724817
| false
| false
| false
|
ohrstrom/django-nunjucks
|
nunjucks/management/commands/nunjucks_tools.py
|
1
|
4480
|
import os
import sys
from collections import OrderedDict
from optparse import make_option
from django.core.files.storage import FileSystemStorage
from django.core.management.base import CommandError, NoArgsCommand
from django.contrib.staticfiles import finders, storage
from django.template.loader import render_to_string
from nunjucks.compiler import NunjucksCompiler
class Command(NoArgsCommand):
"""
Command that allows to copy or symlink static files from different
locations to the settings.STATIC_ROOT.
"""
option_list = NoArgsCommand.option_list + (
make_option('--compile',
action='store_false', dest='do_compile', default=False,
help="Compile nunjucks templates"),
make_option('-n', '--dry-run',
action='store_true', dest='dry_run', default=False,
help="Do everything except modify the filesystem."),
)
help = "Collect static files in a single location."
requires_model_validation = False
def __init__(self, *args, **kwargs):
super(NoArgsCommand, self).__init__(*args, **kwargs)
self.storage = storage.staticfiles_storage
try:
self.storage.path('')
except NotImplementedError:
self.local = False
else:
self.local = True
self.compiler = NunjucksCompiler()
def set_options(self, **options):
self.do_compile = options['do_compile']
self.dry_run = options['dry_run']
def collect(self):
target = 'apps/nunjucks/static/nunjucks/js/templates.js'
templates = []
for finder in finders.get_finders():
for path, storage in finder.list(['*zinnia*']):
if getattr(storage, 'prefix', None):
prefixed_path = os.path.join(storage.prefix, path)
else:
prefixed_path = path
# TOTO: find a correct way to get nj-paths
if '/nj/' in path:
templates.append( {
'path': path,
'inner': self.compiler.compile(storage.path(path))
}
)
tpl = render_to_string('nunjucks/compile/templates.js', {'templates': templates})
open(target, "w").write(tpl)
return
def handle_noargs(self, **options):
self.set_options(**options)
# Warn before doing anything more.
if (isinstance(self.storage, FileSystemStorage) and
self.storage.location):
destination_path = self.storage.location
destination_display = ':\n\n %s' % destination_path
else:
destination_path = None
destination_display = '.'
collected = self.collect()
def compile_file(self, path, prefixed_path, source_storage):
# dummy, to test compiler
source_path = source_storage.path(path)
if 'on_air_item.html' in path:
print
print 'path: %s' % path
print 'prefixed_path: %s' % prefixed_path
print 'source_path: %s' % source_path
print 'source_storage: %s' % source_storage
print self.compiler.compile(source_path)
def copy_file(self, path, prefixed_path, source_storage):
"""
Attempt to copy ``path`` with storage
"""
# Skip this file if it was already copied earlier
if prefixed_path in self.copied_files:
return self.log("Skipping '%s' (already copied earlier)" % path)
# Delete the target file if needed or break
if not self.delete_file(path, prefixed_path, source_storage):
return
# The full path of the source file
source_path = source_storage.path(path)
# Finally start copying
if self.dry_run:
self.log("Pretending to copy '%s'" % source_path, level=1)
else:
self.log("Copying '%s'" % source_path, level=1)
if self.local:
full_path = self.storage.path(prefixed_path)
try:
os.makedirs(os.path.dirname(full_path))
except OSError:
pass
with source_storage.open(path) as source_file:
self.storage.save(prefixed_path, source_file)
if not prefixed_path in self.copied_files:
self.copied_files.append(prefixed_path)
|
bsd-3-clause
| 7,821,409,506,878,498,000
| 30.556338
| 89
| 0.577902
| false
| 4.303554
| false
| false
| false
|
CaptainHayashi/lass
|
urysite/url_regexes.py
|
1
|
1298
|
"""
Common regular expression stubs for URLconfs.
These are collected in a common module to ensure consistency across
the LASS platform.
"""
# Helper functions #
def relative(partials):
"""
Given a sequence of partial regexes, constructs a full regex that
treats the partial regexes as stages in a directory hierarchy
relative to the current root.
"""
return r'^{0}/$'.format('/'.join(partials))
def relatives(partial_sequences):
"""
A wrapper around `relative` that processes a sequence of partial
regex sequences.
"""
return (relative(x) for x in partial_sequences)
# Dates #
## Partial regexes
# NB: The year regex is limited to years 1-9999.
# This is intentional and mirrors the MAX_YEAR Python used at time of
# writing (thus preventing year overflows).
YEAR_PARTIAL = r'(?P<year>[1-9]\d?\d?\d?)'
WEEK_PARTIAL = r'[wW](eek)?(?P<week>([0-4]?\d|5[0-3]))'
WEEKDAY_PARTIAL = r'[dD]?(ay)?(?P<weekday>[1-7])'
MONTH_PARTIAL = r'(?P<month>(0?\d|1[12]))'
DAY_PARTIAL = r'(?P<day>([0-2]?\d|3[01]))'
## Full relative regexes
WEEK_REGEX, WEEKDAY_REGEX, DAY_REGEX = (
relative(x) for x in (
(YEAR_PARTIAL, WEEK_PARTIAL),
(YEAR_PARTIAL, WEEK_PARTIAL, WEEKDAY_PARTIAL),
(YEAR_PARTIAL, MONTH_PARTIAL, DAY_PARTIAL),
)
)
|
gpl-2.0
| 6,314,977,351,025,411,000
| 24.96
| 69
| 0.657935
| false
| 3.054118
| false
| false
| false
|
tbenthompson/LMS_public
|
lms_code/plots/plot_hazard.py
|
1
|
3557
|
import matplotlib.pyplot as plt
import numpy as np
from lms_code.analysis.run_bem import get_slip_magnitude
import lms_code.lib.rep2 as rep2
import lms_code.plots.plot_all as lms_plot
def main():
lms_plot.setup()
fig = plt.figure()
which_model = 'all_details'
bem_soln = rep2.load('bem_' + which_model)
shortening = rep2.load('shortening_estimate_' + which_model)
est = shortening['lsqr_shortening']
est_low = est - shortening['lsqr_shortening_error']
est_high = est + shortening['lsqr_shortening_error']
total_length = 0.0
slip = 0.0
slip_low = 0.0
slip_high = 0.0
joint = [4.20012e5 + 1.6, -2.006e4 - 5]
for e in bem_soln['fault_mesh']:
if e.vertex1.loc[0] < joint[0] - 10:
continue
total_length += e.length
slip_mag = np.linalg.norm(get_slip_magnitude(e))
slip += e.length * est * slip_mag
slip_low += e.length * est_low * slip_mag
slip_high += e.length * est_high * slip_mag
s = (slip / total_length) / 1000
s_low = (slip_low / total_length) / 1000
s_high = (slip_high / total_length) / 1000
slip_err = s_high - s
# s = 6.1 / 1000
# s_low = 4.6 / 1000
# s_high = 7.6 / 1000
T = np.linspace(0, 3000, 100)
d = T * s
T_high = d / s_low
T_low = d / s_high
wenchuan_d = 4.0
wenchuan_T_low = wenchuan_d / s_low
wenchuan_T = wenchuan_d / s
wenchuan_T_high = wenchuan_d / s_high
print("Wenchuan recurrence: " + str(wenchuan_T) + " (low: " + str(wenchuan_T_low) + ", high: " + str(wenchuan_T_high) + ")")
a_wells = 6.93
b_wells = 0.82
mag7_ad = np.exp((7.0 - a_wells) / b_wells)
mag7_T = mag7_ad / s
paleo_T = 2300
paleo_ad = paleo_T * s
paleo_mag = (np.log(paleo_ad) * b_wells) + a_wells
plt.plot(d, T, 'k-')
plt.fill_between(d, T_low, T_high, facecolor = '#AAAAAA')
plt.plot([0, paleo_ad + 100], [paleo_T, paleo_T], 'k--')
plt.plot([wenchuan_d, mag7_ad, paleo_ad], [wenchuan_T, mag7_T, paleo_T],
linestyle = 'None',
marker = 'o',
markeredgewidth = 4.0,
markeredgecolor = (0, 0, 0, 1.0),
markerfacecolor = (1, 1, 1, 1.0),
markersize = 15)
# Plot Wenchuan
text = 'Wenchuan-like $\\textrm{M}_{\\textrm{w}}$ 7.9 (' + '%.0f'%wenchuan_d + ' m, ' +\
'%.0f'%wenchuan_T + ' years)'
plt.annotate(text, (wenchuan_d, wenchuan_T),
xytext = (wenchuan_d + 0.5, wenchuan_T - 50))
# Plot the Mw 7 pt
text = 'Typical $\\textrm{M}_{\\textrm{w}}$ 7.0 (' + '%.0f'%mag7_ad + ' m, ' +\
'%.0f'%mag7_T + ' years)'
plt.annotate(text, (mag7_ad, mag7_T),
xytext = (mag7_ad + 0.9, mag7_T - 30))
# Plot the paleoseismic pt
text = 'Low paleoseismic estimate'
plt.text(1.7, 2350, text)
text = '($Ran$ $et$ $al.$ 2010)'
plt.text(1.7, 2200, text)
text = '$\\textrm{M}_{\\textrm{w}}$ ' + '%0.f'%paleo_mag + ', ' + '%0.f'%paleo_ad + ' m'
plt.annotate(text, (paleo_ad, paleo_T),
xytext = (paleo_ad - 3.2, paleo_T + 30))
plt.text(2.0, 40, '($Wells$ $and$ $Coppersmith$ 1994)')
plt.text(0.5, 1800, 'average slip rate = ' + '%.1f'%(s * 1000) + ' $\pm$ %.1f'%(slip_err * 1000) + ' mm/yr')
plt.ylabel('$T$ (years)')
plt.xlabel('$d$ (meters)')
plt.ylim([0, 2500])
plt.xlim([0, 2500 * s])
width = 7.0
fig.set_size_inches([width, (6.0 / 8.0) * width])
plt.savefig('hazard_' + which_model)
if __name__ == '__main__':
main()
|
mit
| 8,852,281,276,699,813,000
| 33.201923
| 128
| 0.536688
| false
| 2.531673
| false
| false
| false
|
jbest/digitization_tools
|
sort/sort.py
|
1
|
5387
|
import argparse
#import glob
#import os
import shutil
from pathlib import Path
DEFAULT_HERBARIUM_PREFIX = 'BRIT'
DEFAULT_FOLDER_INCREMENT = 1000
DEFAULT_NUMBER_PAD = 7
files_analyzed = 0
files_sorted = 0
verbose = False
# set up argument parser
ap = argparse.ArgumentParser()
ap.add_argument("-d", "--directory", required=True, \
help="Path to the directory that contains the images to be sorted.")
ap.add_argument("-o", "--output_directory", required=True, default=None,\
help="Path to an existing directory where sorted files and directories will be written.")
ap.add_argument("-p", "--pattern", required=True, \
help="Pattern of filenames to be sorted - eg '*.jpg'")
ap.add_argument("-r", "--recursive", action="store_true", \
help="Recurse sub-directories")
ap.add_argument("-c", "--catalog_prefix", default=DEFAULT_HERBARIUM_PREFIX, \
help="Prefix string for catalog numbers. Default is BRIT.")
ap.add_argument("-i", "--increment", default=DEFAULT_FOLDER_INCREMENT, \
help="Increment for folder numbers.")
ap.add_argument("-l", "--length", default=DEFAULT_NUMBER_PAD, \
help="Length for folder numbers, pre-padded with 0.")
ap.add_argument("-v", "--verbose", action="store_true", \
help="Detailed output.")
ap.add_argument("-n", "--dry_run", action="store_true", \
help="No files moved, no directories created.")
args = vars(ap.parse_args())
HERBARIUM_PREFIX = args["catalog_prefix"]
FOLDER_INCREMENT = int(args["increment"])
PAD = int(args["length"])
recurse_subdirectories = args["recursive"]
dry_run = args["dry_run"]
def sort_file(source=None, destination=None):
global files_sorted
if destination.exists():
if dry_run:
print('DRY-RUN: Filename exists, cannot move:', destination)
if verbose:
print('Filename exists, cannot move:', destination)
return False
else:
if dry_run:
print('DRY-RUN: Moved:', destination)
else:
shutil.move(source, destination)
if verbose:
print('Moved:', destination)
files_sorted += 1
return True
#iterate files matching pattern in directory passed from args
#source_directory_path = os.path.realpath(args["directory"])
source_directory_path = Path(args["directory"])
pattern = args["pattern"]
output_directory = args["output_directory"]
output_directory_path = Path(output_directory)
if output_directory_path:
# test to ensure output_directory exists
#if os.path.isdir(output_directory):
if output_directory_path.is_dir():
print('output_directory_path:', output_directory_path)
else:
print(f'ERROR: directory {output_directory_path} does not exist.')
print('Terminating script.')
quit()
if args['verbose']:
verbose = True
print("Verbose report...")
if dry_run:
print('DRY-RUN: starting dry run:')
print('Scanning directory:', source_directory_path, 'for files matching', pattern)
if recurse_subdirectories:
path_matches = source_directory_path.rglob(pattern)
else:
path_matches = source_directory_path.glob(pattern)
#for matching_path in source_directory_path.rglob('*.jpg'):
for matching_path in path_matches:
files_analyzed += 1
#basename = os.path.basename(source_path)
basename = matching_path.name
if basename.startswith(HERBARIUM_PREFIX):
file_name = matching_path.stem
file_extension = matching_path.suffix
#print('file_name:', file_name)
#print('file_extension:', file_extension)
accession_id = file_name[len(HERBARIUM_PREFIX):]
try:
accession_number = int(accession_id)
folder_number = int(accession_number//FOLDER_INCREMENT*FOLDER_INCREMENT)
padded_folder_number = str(folder_number).zfill(PAD)
# zfill may be deprecated in future? Look into string formatting with fill
# https://stackoverflow.com/a/339013
#destination_folder_name = HERBARIUM_PREFIX + str(int(accession_number//FOLDER_INCREMENT*FOLDER_INCREMENT))
destination_folder_name = HERBARIUM_PREFIX + padded_folder_number
if output_directory:
output_directory_path = Path(output_directory)
destination_directory_path = output_directory_path.joinpath(destination_folder_name)
else:
# no output_directory specified, using source directory
destination_directory_path = source_directory_path.joinpath(destination_folder_name)
destination_file_path = destination_directory_path.joinpath(basename)
# Check if destination directory exists
if destination_directory_path.is_dir():
sort_file(source=matching_path, destination=destination_file_path)
else:
if verbose:
print('Creating folder: ' + destination_directory_path)
destination_directory_path.mkdir()
sort_file(source=matching_path, destination=destination_file_path)
except ValueError:
print('Cannot parse', file_name)
else:
if verbose:
print(f'Ignoring {basename} - does not start with {HERBARIUM_PREFIX}.')
print('Sort complete.')
print('Encountered files:', files_analyzed)
print('Sorted files:', files_sorted)
if dry_run:
print('DRY-RUN: ending dry run.')
|
mit
| -1,070,387,984,230,873,100
| 39.810606
| 119
| 0.664377
| false
| 3.94652
| false
| false
| false
|
tsileo/incremental-backups-tools
|
incremental_backups_tools/__init__.py
|
1
|
11140
|
# -*- coding: utf-8 -*-
import os
import tarfile
import logging
import tempfile
import shutil
from datetime import datetime
import json
import itertools
import librsync
from dirtools import Dir, DirState, compute_diff
import sigvault
logging.basicConfig(level=logging.INFO)
log = logging
CACHE_PATH = os.path.expanduser('~/.cache/bakthat')
if not os.path.exists(CACHE_PATH):
os.makedirs(CACHE_PATH)
class FileFinder(object):
base_paths = [CACHE_PATH, tempfile.gettempdir()]
@classmethod
def make_key(cls, key_type, key, dt):
ext = 'tgz'
if key_type == 'state':
ext = 'json'
return '{0}.{1}.{2}.{3}'.format(key,
key_type,
dt.isoformat(),
ext)
@classmethod
def check(cls, path):
for bp in cls.base_paths:
abs_path = os.path.join(bp, path)
if os.path.exists(abs_path):
return abs_path
@classmethod
def check_key(cls, key_type, key, dt):
k = cls.make_key(key_type, key, dt)
return cls.check(k)
def full_backup(path, cache_path=None):
if cache_path is None:
cache_path = tempfile.gettempdir()
backup_date = datetime.utcnow()
backup_dir = Dir(path)
backup_key = backup_dir.path.strip('/').split('/')[-1]
backup_dir_state = DirState(backup_dir)
state_file = backup_dir_state.to_json(cache_path, dt=backup_date, fmt='{0}.state.{1}.json')
created_file = FileFinder.make_key('full',
backup_key,
backup_date)
created_file = os.path.join(cache_path, created_file)
backup_dir.compress_to(created_file)
# Create a new SigVault
sigvault_file = FileFinder.make_key('sigvault',
backup_key,
backup_date)
sigvault_file = os.path.join(CACHE_PATH, sigvault_file)
sv = sigvault.open_vault(sigvault_file, 'w', base_path=backup_dir.path)
for f in backup_dir.iterfiles():
sv.add(f)
sv.close()
files = [state_file, created_file, sigvault_file]
files = [{'path': f, 'size': os.path.getsize(f)} for f in files]
total = sum([f['size'] for f in files])
return {'backup_key': backup_key, 'backup_date': backup_date, 'files': files, 'total': total}
def incremental_backup(path, cache_path=None):
if cache_path is None:
cache_path = tempfile.gettempdir()
files = []
backup_date = datetime.utcnow()
backup_dir = Dir(path)
backup_key = backup_dir.path.strip('/').split('/')[-1]
# TODO check if it's really the last state on the remote storage
last_state = Dir(cache_path).get('{0}.state.*'.format(backup_key), sort_reverse=True, abspath=True)
last_state = DirState.from_json(last_state)
current_state = DirState(backup_dir)
last_sv = sigvault.SigVaultReader(CACHE_PATH, backup_key)
diff = current_state - last_state
state_file = current_state.to_json(cache_path, dt=backup_date, fmt='{0}.state.{1}.json')
files.append(state_file)
created_file = FileFinder.make_key('created',
backup_key,
backup_date)
created_file = os.path.join(cache_path, created_file)
# Store files from diff['created'] into a new archive
created_file = process_created(created_file,
diff['created'],
backup_dir.path)
if created_file:
files.append(created_file)
updated_file = FileFinder.make_key('updated',
backup_key,
backup_date)
updated_file = os.path.join(cache_path, updated_file)
# Compute and store delta from the list of updated files
updated_file = process_updated(updated_file,
diff['updated'],
backup_dir.path,
last_sv)
if updated_file:
files.append(updated_file)
if diff['created'] or diff['updated']:
sigvault_file = FileFinder.make_key('sigvault',
backup_key,
backup_date)
sigvault_file = os.path.join(CACHE_PATH, sigvault_file)
new_sv = sigvault.open_vault(sigvault_file, 'w', base_path=backup_dir.path)
for f in itertools.chain(diff['created'], diff['updated']):
new_sv.add(f)
new_sv.close()
files.append(sigvault_file)
files = [{'path': f, 'size': os.path.getsize(f)} for f in files]
total = sum([f['size'] for f in files])
return {'backup_key': backup_key, 'backup_date': backup_date, 'files': files, 'total': total}
def process_created(path, created, base_path):
""" Put new files in a new archive. """
if created:
created_archive = tarfile.open(path, 'w:gz')
for f in created:
f_abs = os.path.join(base_path, f)
created_archive.add(f_abs, arcname=f)
created_archive.close()
return path
def process_updated(path, updated, base_path, sigvault):
""" Process upated files, create a new SigVault if needed,
and create a new archives with delta (from the previous SigVault signatures).
"""
if updated:
updated_archive = tarfile.open(path, 'w:gz')
for f in updated:
f_abs = os.path.join(base_path, f)
delta = librsync.delta(open(f_abs, 'rb'),
sigvault.extract(f))
delta_size = os.fstat(delta.fileno()).st_size
delta_info = tarfile.TarInfo(f)
delta_info.size = delta_size
updated_archive.addfile(delta_info, delta)
updated_archive.close()
return path
def patch_diff(base_path, diff, created_archive=None, updated_archive=None):
# First, we iterate the created files
if diff['created']:
for crtd in diff['created']:
created_tar = tarfile.open(created_archive, 'r:gz')
try:
src_file = created_tar.extractfile(crtd)
abspath = os.path.join(base_path, crtd)
dirname = os.path.dirname(abspath)
# Create directories if they doesn't exist yet
if not os.path.exists(dirname):
os.makedirs(dirname)
# We copy the file from the archive directly to its destination
with open(abspath, 'wb') as f:
shutil.copyfileobj(src_file, f)
except KeyError as exc:
# It means that a file is missing in the archive.
log.exception(exc)
raise Exception("Diff seems corrupted.")
finally:
created_tar.close()
# Next, we iterate updated files in order to patch them
if diff['updated']:
for updtd in diff['updated']:
try:
updated_tar = tarfile.open(updated_archive, 'r:gz')
abspath = os.path.join(base_path, updtd)
# Load the librsync delta
delta_file = updated_tar.extractfile(updtd)
# A tempfile file to store the patched file/result
# before replacing the original
patched = tempfile.NamedTemporaryFile()
# Patch the current version of the file with the delta
# and store the result in the previously created tempfile
with open(abspath, 'rb') as f:
librsync.patch(f, delta_file, patched)
patched.seek(0)
# Now we replace the orignal file with the patched version
with open(abspath, 'wb') as f:
shutil.copyfileobj(patched, f)
patched.close()
except KeyError as exc:
# It means that a file is missing in the archive.
log.exception(exc)
raise Exception("Diff seems corrupted.")
finally:
updated_tar.close()
# Then, we iterate the deleted files
for dltd in diff['deleted']:
abspath = os.path.join(base_path, dltd)
if os.path.isfile(abspath):
os.remove(abspath)
# Finally, we iterate the deleted directories
for dltd_drs in diff['deleted_dirs']:
abspath = os.path.join(base_path, dltd_drs)
if os.path.isdir(abspath):
os.rmdir(abspath)
def _extract_dt_from_key(key):
key_date = '.'.join(key.split('.')[-3:-1])
key_dt = datetime.strptime(key_date, '%Y-%m-%dT%H:%M:%S.%f')
return key_date, key_dt
def get_full_and_incremental(key, cache_path=None):
""" From a directory as source, iterate over states files from a full backup,
till the end/or another full backup. The first item is actually the full backup. """
if cache_path is None:
cache_path = tempfile.gettempdir()
_dir = Dir(cache_path)
last_full = _dir.get('{0}.full.*'.format(key), sort_reverse=True, abspath=True)
last_full_date, last_full_dt = _extract_dt_from_key(last_full)
previous_state = FileFinder.check_key('state', key, last_full_dt)
yield last_full, None, last_full_dt
for s_file in _dir.files('{0}.state.*'.format(key)):
s_str = '.'.join(s_file.split('.')[-3:-1])
s_dt = datetime.strptime(s_str, '%Y-%m-%dT%H:%M:%S.%f')
if s_dt > last_full_dt and not FileFinder.check_key('full', key, s_dt):
yield s_file, previous_state, s_dt
previous_state = s_file
def restore_backup(key, dest, cache_path=None):
""" Restore backups given the key to dest using cache_path as source
for state and deltas. """
if cache_path is None:
cache_path = tempfile.gettempdir()
for index, (state_file, previous_state_file, state_dt) in enumerate(get_full_and_incremental(key)):
if index == 0:
# At index == 0, state is the full archive
log.info('Restored full backup ({})'.format(state_dt))
tarfile.open(state_file, 'r:gz').extractall(dest)
else:
with open(state_file, 'rb') as f:
state = json.loads(f.read())
with open(previous_state_file, 'rb') as f:
previous_state = json.loads(f.read())
diff = compute_diff(state, previous_state)
patch_diff(dest, diff,
FileFinder.check_key('created', key, state_dt),
FileFinder.check_key('updated', key, state_dt))
log.info('Patched incremental backup ({})'.format(state_dt))
return dest
def get_full_backups(key, cache_path=None):
if cache_path is None:
cache_path = tempfile.gettempdir()
_dir = Dir(cache_path)
fulls = _dir.files('{0}.full.*'.format(key), sort_reverse=True, abspath=True)
fulls = [_extract_dt_from_key(k)[1] for k in fulls]
return fulls
|
mit
| 4,781,952,771,890,564,000
| 33.918495
| 103
| 0.566299
| false
| 3.879833
| false
| false
| false
|
gxxjjj/QuantEcon.py
|
quantecon/markov/core.py
|
1
|
19386
|
r"""
Authors: Chase Coleman, Spencer Lyon, Daisuke Oyama, Tom Sargent,
John Stachurski
Filename: core.py
This file contains some useful objects for handling a finite-state
discrete-time Markov chain.
Definitions and Some Basic Facts about Markov Chains
----------------------------------------------------
Let :math:`\{X_t\}` be a Markov chain represented by an :math:`n \times
n` stochastic matrix :math:`P`. State :math:`i` *has access* to state
:math:`j`, denoted :math:`i \to j`, if :math:`i = j` or :math:`P^k[i, j]
> 0` for some :math:`k = 1, 2, \ldots`; :math:`i` and `j` *communicate*,
denoted :math:`i \leftrightarrow j`, if :math:`i \to j` and :math:`j \to
i`. The binary relation :math:`\leftrightarrow` is an equivalent
relation. A *communication class* of the Markov chain :math:`\{X_t\}`,
or of the stochastic matrix :math:`P`, is an equivalent class of
:math:`\leftrightarrow`. Equivalently, a communication class is a
*strongly connected component* (SCC) in the associated *directed graph*
:math:`\Gamma(P)`, a directed graph with :math:`n` nodes where there is
an edge from :math:`i` to :math:`j` if and only if :math:`P[i, j] > 0`.
The Markov chain, or the stochastic matrix, is *irreducible* if it
admits only one communication class, or equivalently, if
:math:`\Gamma(P)` is *strongly connected*.
A state :math:`i` is *recurrent* if :math:`i \to j` implies :math:`j \to
i`; it is *transient* if it is not recurrent. For any :math:`i, j`
contained in a communication class, :math:`i` is recurrent if and only
if :math:`j` is recurrent. Therefore, recurrence is a property of a
communication class. Thus, a communication class is a *recurrent class*
if it contains a recurrent state. Equivalently, a recurrent class is a
SCC that corresponds to a sink node in the *condensation* of the
directed graph :math:`\Gamma(P)`, where the condensation of
:math:`\Gamma(P)` is a directed graph in which each SCC is replaced with
a single node and there is an edge from one SCC :math:`C` to another SCC
:math:`C'` if :math:`C \neq C'` and some node in :math:`C` has access to
some node in :math:`C'`. A recurrent class is also called a *closed
communication class*. The condensation is acyclic, so that there exists
at least one recurrent class.
For example, if the entries of :math:`P` are all strictly positive, then
the whole state space is a communication class as well as a recurrent
class. (More generally, if there is only one communication class, then
it is a recurrent class.) As another example, consider the stochastic
matrix :math:`P = [[1, 0], [0,5, 0.5]]`. This has two communication
classes, :math:`\{0\}` and :math:`\{1\}`, and :math:`\{0\}` is the only
recurrent class.
A *stationary distribution* of the Markov chain :math:`\{X_t\}`, or of
the stochastic matrix :math:`P`, is a nonnegative vector :math:`x` such
that :math:`x' P = x'` and :math:`x' \mathbf{1} = 1`, where
:math:`\mathbf{1}` is the vector of ones. The Markov chain has a unique
stationary distribution if and only if it has a unique recurrent class.
More generally, each recurrent class has a unique stationary
distribution whose support equals that recurrent class. The set of all
stationary distributions is given by the convex hull of these unique
stationary distributions for the recurrent classes.
A natural number :math:`d` is the *period* of state :math:`i` if it is
the greatest common divisor of all :math:`k`'s such that :math:`P^k[i,
i] > 0`; equivalently, it is the GCD of the lengths of the cycles in
:math:`\Gamma(P)` passing through :math:`i`. For any :math:`i, j`
contained in a communication class, :math:`i` has period :math:`d` if
and only if :math:`j` has period :math:`d`. The *period* of an
irreducible Markov chain (or of an irreducible stochastic matrix) is the
period of any state. We define the period of a general (not necessarily
irreducible) Markov chain to be the least common multiple of the periods
of its recurrent classes, where the period of a recurrent class is the
period of any state in that class. A Markov chain is *aperiodic* if its
period is one. A Markov chain is irreducible and aperiodic if and only
if it is *uniformly ergodic*, i.e., there exists some :math:`m` such
that :math:`P^m[i, j] > 0` for all :math:`i, j` (in this case, :math:`P`
is also called *primitive*).
Suppose that an irreducible Markov chain has period :math:`d`. Fix any
state, say state :math:`0`. For each :math:`m = 0, \ldots, d-1`, let
:math:`S_m` be the set of states :math:`i` such that :math:`P^{kd+m}[0,
i] > 0` for some :math:`k`. These sets :math:`S_0, \ldots, S_{d-1}`
constitute a partition of the state space and are called the *cyclic
classes*. For each :math:`S_m` and each :math:`i \in S_m`, we have
:math:`\sum_{j \in S_{m+1}} P[i, j] = 1`, where :math:`S_d = S_0`.
"""
from __future__ import division
import numpy as np
from scipy import sparse
from fractions import gcd
from .gth_solve import gth_solve
from ..graph_tools import DiGraph
# -Check if Numba is Available- #
from ..util import searchsorted, check_random_state, numba_installed, jit
class MarkovChain(object):
"""
Class for a finite-state discrete-time Markov chain. It stores
useful information such as the stationary distributions, and
communication, recurrent, and cyclic classes, and allows simulation
of state transitions.
Parameters
----------
P : array_like or scipy sparse matrix (float, ndim=2)
The transition matrix. Must be of shape n x n.
Attributes
----------
P : ndarray or scipy.sparse.csr_matrix (float, ndim=2)
See Parameters
stationary_distributions : array_like(float, ndim=2)
Array containing stationary distributions, one for each
recurrent class, as rows.
is_irreducible : bool
Indicate whether the Markov chain is irreducible.
num_communication_classes : int
The number of the communication classes.
communication_classes : list(ndarray(int))
List of numpy arrays containing the communication classes.
num_recurrent_classes : int
The number of the recurrent classes.
recurrent_classes : list(ndarray(int))
List of numpy arrays containing the recurrent classes.
is_aperiodic : bool
Indicate whether the Markov chain is aperiodic.
period : int
The period of the Markov chain.
cyclic_classes : list(ndarray(int))
List of numpy arrays containing the cyclic classes. Defined only
when the Markov chain is irreducible.
Notes
-----
In computing stationary distributions, if the input matrix is a
sparse matrix, internally it is converted to a dense matrix.
"""
def __init__(self, P):
if sparse.issparse(P): # Sparse matrix
self.P = sparse.csr_matrix(P)
self.is_sparse = True
else: # Dense matrix
self.P = np.asarray(P)
self.is_sparse = False
# Check Properties
# Double check that P is a square matrix
if len(self.P.shape) != 2 or self.P.shape[0] != self.P.shape[1]:
raise ValueError('P must be a square matrix')
# The number of states
self.n = self.P.shape[0]
# Double check that P is a nonnegative matrix
if not self.is_sparse:
data_nonnegative = (self.P >= 0) # ndarray
else:
data_nonnegative = (self.P.data >= 0) # csr_matrx
if not np.all(data_nonnegative):
raise ValueError('P must be nonnegative')
# Double check that the rows of P sum to one
row_sums = self.P.sum(axis=1)
if self.is_sparse: # row_sums is np.matrix (ndim=2)
row_sums = row_sums.getA1()
if not np.allclose(row_sums, np.ones(self.n)):
raise ValueError('The rows of P must sum to 1')
# To analyze the structure of P as a directed graph
self._digraph = None
self._stationary_dists = None
self._cdfs = None # For dense matrix
self._cdfs1d = None # For sparse matrix
def __repr__(self):
msg = "Markov chain with transition matrix \nP = \n{0}"
if self._stationary_dists is None:
return msg.format(self.P)
else:
msg = msg + "\nand stationary distributions \n{1}"
return msg.format(self.P, self._stationary_dists)
def __str__(self):
return str(self.__repr__)
@property
def digraph(self):
if self._digraph is None:
self._digraph = DiGraph(self.P)
return self._digraph
@property
def is_irreducible(self):
return self.digraph.is_strongly_connected
@property
def num_communication_classes(self):
return self.digraph.num_strongly_connected_components
@property
def communication_classes(self):
return self.digraph.strongly_connected_components
@property
def num_recurrent_classes(self):
return self.digraph.num_sink_strongly_connected_components
@property
def recurrent_classes(self):
return self.digraph.sink_strongly_connected_components
@property
def is_aperiodic(self):
if self.is_irreducible:
return self.digraph.is_aperiodic
else:
return self.period == 1
@property
def period(self):
if self.is_irreducible:
return self.digraph.period
else:
rec_classes = self.recurrent_classes
# Determine the period, the LCM of the periods of rec_classes
d = 1
for rec_class in rec_classes:
period = self.digraph.subgraph(rec_class).period
d = (d * period) // gcd(d, period)
return d
@property
def cyclic_classes(self):
if not self.is_irreducible:
raise NotImplementedError(
'Not defined for a reducible Markov chain'
)
else:
return self.digraph.cyclic_components
def _compute_stationary(self):
"""
Store the stationary distributions in self._stationary_distributions.
"""
if self.is_irreducible:
if not self.is_sparse: # Dense
stationary_dists = gth_solve(self.P).reshape(1, self.n)
else: # Sparse
stationary_dists = \
gth_solve(self.P.toarray(),
overwrite=True).reshape(1, self.n)
else:
rec_classes = self.recurrent_classes
stationary_dists = np.zeros((len(rec_classes), self.n))
for i, rec_class in enumerate(rec_classes):
if not self.is_sparse: # Dense
stationary_dists[i, rec_class] = \
gth_solve(self.P[rec_class, :][:, rec_class])
else: # Sparse
stationary_dists[i, rec_class] = \
gth_solve(self.P[rec_class, :][:, rec_class].toarray(),
overwrite=True)
self._stationary_dists = stationary_dists
@property
def stationary_distributions(self):
if self._stationary_dists is None:
self._compute_stationary()
return self._stationary_dists
@property
def cdfs(self):
if (self._cdfs is None) and not self.is_sparse:
# See issue #137#issuecomment-96128186
cdfs = np.empty((self.n, self.n), order='C')
np.cumsum(self.P, axis=-1, out=cdfs)
self._cdfs = cdfs
return self._cdfs
@property
def cdfs1d(self):
if (self._cdfs1d is None) and self.is_sparse:
data = self.P.data
indices = self.P.indices
indptr = self.P.indptr
cdfs1d = np.empty(self.P.nnz, order='C')
for i in range(self.n):
cdfs1d[indptr[i]:indptr[i+1]] = \
data[indptr[i]:indptr[i+1]].cumsum()
self._cdfs1d = cdfs1d
return self._cdfs1d
def simulate(self, ts_length, init=None, num_reps=None, random_state=None):
"""
Simulate time series of state transitions.
Parameters
----------
ts_length : scalar(int)
Length of each simulation.
init : scalar(int) or array_like(int, ndim=1),
optional(default=None)
Initial state(s). If None, the initial state is randomly
drawn.
num_reps : scalar(int), optional(default=None)
Number of repetitions of simulation.
random_state : scalar(int) or np.random.RandomState,
optional(default=None)
Random seed (integer) or np.random.RandomState instance to
set the initial state of the random number generator for
reproducibility. If None, a randomly initialized RandomState
is used.
Returns
-------
X : ndarray(int, ndim=1 or 2)
Array containing the sample path(s), of shape (ts_length,)
if init is a scalar (integer) or None and num_reps is None;
of shape (k, ts_length) otherwise, where k = len(init) if
(init, num_reps) = (array, None), k = num_reps if (init,
num_reps) = (int or None, int), and k = len(init)*num_reps
if (init, num_reps) = (array, int).
"""
random_state = check_random_state(random_state)
dim = 1 # Dimension of the returned array: 1 or 2
try:
k = len(init) # init is an array
dim = 2
init_states = np.asarray(init, dtype=int)
if num_reps is not None:
k *= num_reps
init_states = np.tile(init_states, num_reps)
except TypeError: # init is a scalar(int) or None
k = 1
if num_reps is not None:
dim = 2
k = num_reps
if init is None:
init_states = random_state.randint(self.n, size=k)
elif isinstance(init, int):
init_states = np.ones(k, dtype=int) * init
else:
raise ValueError(
'init must be int, array_like of ints, or None'
)
# === set up array to store output === #
X = np.empty((k, ts_length), dtype=int)
# Random values, uniformly sampled from [0, 1)
random_values = random_state.random_sample(size=(k, ts_length-1))
# Generate sample paths and store in X
if not self.is_sparse: # Dense
_generate_sample_paths(
self.cdfs, init_states, random_values, out=X
)
else: # Sparse
_generate_sample_paths_sparse(
self.cdfs1d, self.P.indices, self.P.indptr, init_states,
random_values, out=X
)
if dim == 1:
return X[0]
else:
return X
def _generate_sample_paths(P_cdfs, init_states, random_values, out):
"""
Generate num_reps sample paths of length ts_length, where num_reps =
out.shape[0] and ts_length = out.shape[1].
Parameters
----------
P_cdfs : ndarray(float, ndim=2)
Array containing as rows the CDFs of the state transition.
init_states : array_like(int, ndim=1)
Array containing the initial states. Its length must be equal to
num_reps.
random_values : ndarray(float, ndim=2)
Array containing random values from [0, 1). Its shape must be
equal to (num_reps, ts_length-1)
out : ndarray(int, ndim=2)
Array to store the sample paths.
Notes
-----
This routine is jit-complied if the module Numba is vailable.
"""
num_reps, ts_length = out.shape
for i in range(num_reps):
out[i, 0] = init_states[i]
for t in range(ts_length-1):
out[i, t+1] = searchsorted(P_cdfs[out[i, t]], random_values[i, t])
if numba_installed:
_generate_sample_paths = jit(nopython=True)(_generate_sample_paths)
def _generate_sample_paths_sparse(P_cdfs1d, indices, indptr, init_states,
random_values, out):
"""
For sparse matrix.
Generate num_reps sample paths of length ts_length, where num_reps =
out.shape[0] and ts_length = out.shape[1].
Parameters
----------
P_cdfs1d : ndarray(float, ndim=1)
1D array containing the CDFs of the state transition.
indices : ndarray(int, ndim=1)
CSR format index array.
indptr : ndarray(int, ndim=1)
CSR format index pointer array.
init_states : array_like(int, ndim=1)
Array containing the initial states. Its length must be equal to
num_reps.
random_values : ndarray(float, ndim=2)
Array containing random values from [0, 1). Its shape must be
equal to (num_reps, ts_length-1)
out : ndarray(int, ndim=2)
Array to store the sample paths.
Notes
-----
This routine is jit-complied if the module Numba is vailable.
"""
num_reps, ts_length = out.shape
for i in range(num_reps):
out[i, 0] = init_states[i]
for t in range(ts_length-1):
k = searchsorted(P_cdfs1d[indptr[out[i, t]]:indptr[out[i, t]+1]],
random_values[i, t])
out[i, t+1] = indices[indptr[out[i, t]]+k]
if numba_installed:
_generate_sample_paths_sparse = \
jit(nopython=True)(_generate_sample_paths_sparse)
def mc_compute_stationary(P):
"""
Computes stationary distributions of P, one for each recurrent
class. Any stationary distribution is written as a convex
combination of these distributions.
Returns
-------
stationary_dists : array_like(float, ndim=2)
Array containing the stationary distributions as its rows.
"""
return MarkovChain(P).stationary_distributions
def mc_sample_path(P, init=0, sample_size=1000, random_state=None):
"""
Generates one sample path from the Markov chain represented by
(n x n) transition matrix P on state space S = {{0,...,n-1}}.
Parameters
----------
P : array_like(float, ndim=2)
A Markov transition matrix.
init : array_like(float ndim=1) or scalar(int), optional(default=0)
If init is an array_like, then it is treated as the initial
distribution across states. If init is a scalar, then it
treated as the deterministic initial state.
sample_size : scalar(int), optional(default=1000)
The length of the sample path.
random_state : scalar(int) or np.random.RandomState,
optional(default=None)
Random seed (integer) or np.random.RandomState instance to set
the initial state of the random number generator for
reproducibility. If None, a randomly initialized RandomState is
used.
Returns
-------
X : array_like(int, ndim=1)
The simulation of states.
"""
random_state = check_random_state(random_state)
if isinstance(init, int):
X_0 = init
else:
cdf0 = np.cumsum(init)
u_0 = random_state.random_sample()
X_0 = searchsorted(cdf0, u_0)
mc = MarkovChain(P)
return mc.simulate(ts_length=sample_size, init=X_0,
random_state=random_state)
|
bsd-3-clause
| -8,939,832,694,485,968,000
| 35.033457
| 79
| 0.615444
| false
| 3.682051
| false
| false
| false
|
garbersc/keras-galaxies
|
solutionToClass.py
|
1
|
1410
|
import numpy as np
import os
import csv
with open(TRAIN_LABELS_PATH, 'r') as f:
reader = csv.reader(f, delimiter=",")
train_ids = []
for k, line in enumerate(reader):
if k == 0: continue # skip header
train_ids.append(int(line[0]))
isEndClass = np.asarray([0,0,1,
0,0,
0,0,
0,0,
0,0,0,0,
0,1,
0,0,0,
1,1,1,1,1,1,1,
0,0,0,
0,0,0,
1,1,1,1,1,1])
d = pd.read_csv(TRAIN_LABELS_PATH)
targets = d.as_matrix()[:, 1:].astype('float32')
classes = np.argmax( np.mnultiply(targets,isEndClass) )
TRAIN_IDS_PATH = "data/train_ids.npy"
# TRAIN_LABELS_PATH = "data/raw/solutions_training.csv"
TRAIN_LABELS_PATH = "data/raw/training_solutions_rev1.csv"
import numpy as np
import os
import csv
with open(TRAIN_LABELS_PATH, 'r') as f:
reader = csv.reader(f, delimiter=",")
train_ids = []
for k, line in enumerate(reader):
if k == 0: continue # skip header
train_ids.append(int(line[0]))
train_ids = np.array(train_ids)
print "Saving %s" % TRAIN_IDS_PATH
np.save(TRAIN_IDS_PATH, train_ids)
# TRAIN_LABELS_PATH = "data/raw/solutions_training.csv"
TRAIN_LABELS_PATH = "data/raw/training_solutions_rev1.csv"
TARGET_PATH = "data/solutions_train.npy"
import pandas as pd
import numpy as np
d = pd.read_csv(TRAIN_LABELS_PATH)
targets = d.as_matrix()[:, 1:].astype('float32')
print "Saving %s" % TARGET_PATH
np.save(TARGET_PATH, targets)
|
bsd-3-clause
| -187,993,293,303,596,700
| 20.692308
| 58
| 0.648227
| false
| 2.558984
| false
| false
| false
|
unreal666/outwiker
|
src/outwiker/core/iconmaker.py
|
3
|
1151
|
# -*- coding: utf-8 -*-
from PIL import Image
from outwiker.core.defines import ICON_WIDTH, ICON_HEIGHT
class IconMaker(object):
""" Class for creation icons by images. """
def create(self, fname_in, fname_out):
""" Create icon by file fname_in. Result will have saved as fname_out.
"""
img_new = Image.new('RGBA', (ICON_WIDTH, ICON_HEIGHT))
img_src = Image.open(fname_in)
# Resize source imaga, if it is required
width_src, height_src = img_src.size
scale = max(float(width_src) / float(ICON_WIDTH), float(height_src) /
float(ICON_HEIGHT))
if scale > 1:
img_src = img_src.resize((int(width_src / scale),
int(height_src / scale)),
Image.ANTIALIAS)
# Paste source image to result image
dx = int((ICON_WIDTH - img_src.size[0]) / 2.0)
dy = int((ICON_HEIGHT - img_src.size[1]) / 2.0)
assert dx >= 0 and dx < ICON_WIDTH
assert dy >= 0 and dy < ICON_HEIGHT
img_new.paste(img_src, (dx, dy))
img_new.save(fname_out)
|
gpl-3.0
| 5,693,076,158,499,590,000
| 33.878788
| 78
| 0.546481
| false
| 3.519878
| false
| false
| false
|
hofschroeer/shinysdr
|
shinysdr/test/test_telemetry.py
|
1
|
4109
|
# Copyright 2015 Kevin Reid <kpreid@switchb.org>
#
# This file is part of ShinySDR.
#
# ShinySDR is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ShinySDR is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ShinySDR. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division
from twisted.internet.task import Clock
from twisted.trial import unittest
from zope.interface import implements
from shinysdr.telemetry import ITelemetryMessage, ITelemetryObject, TelemetryItem, TelemetryStore, Track, empty_track
class TestTrack(unittest.TestCase):
def test_init_from_partial_json(self):
self.assertEquals(
empty_track._replace(
latitude=TelemetryItem(1, 1000),
longitude=TelemetryItem(2, 1000)),
Track({
u'latitude': {u'value': 1, u'timestamp': 1000},
u'longitude': {u'value': 2, u'timestamp': 1000},
}))
class TestTelemetryStore(unittest.TestCase):
def setUp(self):
self.clock = Clock()
self.clock.advance(1000)
self.store = TelemetryStore(time_source=self.clock)
def test_new_object(self):
self.assertEqual([], self.store.state().keys())
self.store.receive(Msg('foo', 1000))
self.assertEqual(['foo'], self.store.state().keys())
obj = self.store.state()['foo'].get()
self.assertIsInstance(obj, Obj)
def test_receive_called(self):
self.store.receive(Msg('foo', 1000, 1))
obj = self.store.state()['foo'].get()
self.assertEquals(obj.last_msg, 1)
self.store.receive(Msg('foo', 1000, 2))
self.assertEquals(obj.last_msg, 2)
def test_drop_old(self):
self.store.receive(Msg('foo', 1000))
self.assertEqual(['foo'], self.store.state().keys())
self.clock.advance(1799.5)
self.store.receive(Msg('bar', 2799.5))
self.assertEqual({'bar', 'foo'}, set(self.store.state().keys()))
self.clock.advance(0.5)
self.store.receive(Msg('bar', 2800))
self.assertEqual(['bar'], self.store.state().keys())
def test_become_interesting(self):
self.store.receive(Msg('foo', 1000, 'boring'))
self.assertEqual([], self.store.state().keys())
self.store.receive(Msg('foo', 1001, 'interesting'))
self.assertEqual(['foo'], self.store.state().keys())
# 'become boring' is not implemented, so also not tested yet
def test_drop_old_boring(self):
'''
Make sure that dropping a boring object doesn't fail.
'''
self.store.receive(Msg('foo', 1000, 'boring'))
self.assertEqual([], self.store.state().keys())
self.clock.advance(1800)
self.store.receive(Msg('bar', 2800, 'boring'))
self.assertEqual([], self.store.state().keys())
class Msg(object):
implements(ITelemetryMessage)
def __init__(self, object_id, timestamp, value='no value'):
self.__id = object_id
self.timestamp = timestamp
self.value = value
def get_object_id(self):
return self.__id
def get_object_constructor(self):
return Obj
class Obj(object):
implements(ITelemetryObject)
def __init__(self, object_id):
self.__id = object_id
self.last_msg = 'no message'
self.last_time = None
def receive(self, message):
self.last_msg = message.value
self.last_time = message.timestamp
def is_interesting(self):
return self.last_msg != 'boring'
def get_object_expiry(self):
return self.last_time + 1800
|
gpl-3.0
| 1,781,579,853,708,462,800
| 33.822034
| 117
| 0.627403
| false
| 3.78361
| true
| false
| false
|
Ecogenomics/GtdbTk
|
gtdbtk/config/config.py
|
1
|
12048
|
import json
import os
import sys
"""
Load the reference package. This will fail if the directory doesn't exist.
"""
try:
GENERIC_PATH = os.environ['GTDBTK_DATA_PATH']
except KeyError:
print('\n' + '=' * 80)
print(' ERROR '.center(80))
print('_' * 80 + '\n')
print("The 'GTDBTK_DATA_PATH' environment variable is not defined.".center(80) + '\n')
print('Please set this variable to your reference data package.'.center(80))
print('https://github.com/Ecogenomics/GTDBTk#installation'.center(80))
print('=' * 80)
sys.exit(1)
"""
If the reference package sub-folders still exist in GTDBTK_DATA_PATH, then there
is no need to edit the variables below.
"""
MIN_REF_DATA_VERSION = 'r202'
MSA_FOLDER = os.path.join(GENERIC_PATH, "msa/")
MASK_DIR = os.path.join(GENERIC_PATH, "masks/")
PPLACER_DIR = os.path.join(GENERIC_PATH, "pplacer/")
FASTANI_DIR = os.path.join(GENERIC_PATH, "fastani/")
TAX_FOLDER = os.path.join(GENERIC_PATH, "taxonomy/")
RADII_DIR = os.path.join(GENERIC_PATH, "radii/")
METADATA_DIR = os.path.join(GENERIC_PATH, "metadata/")
RED_DIR = os.path.join(GENERIC_PATH, "mrca_red/")
MARKER_DIR = os.path.join(GENERIC_PATH, 'markers/')
TIGRFAM_HMMS = os.path.join(MARKER_DIR, 'tigrfam/tigrfam.hmm')
PFAM_HMM_DIR = os.path.join(MARKER_DIR, 'pfam/')
SPLIT_DIR = os.path.join(GENERIC_PATH, 'split')
HIGH_SPLIT_DIR = os.path.join(SPLIT_DIR, 'high')
LOW_SPLIT_DIR = os.path.join(SPLIT_DIR, 'low')
HIGH_PPLACER_DIR = os.path.join(HIGH_SPLIT_DIR, 'pplacer')
LOW_PPLACER_DIR = os.path.join(LOW_SPLIT_DIR, 'pplacer')
HIGH_RED_DIR = os.path.join(HIGH_SPLIT_DIR, 'red')
LOW_RED_DIR = os.path.join(LOW_SPLIT_DIR, 'red')
LOW_TREE_MAPPING_FILE = os.path.join(LOW_SPLIT_DIR, 'tree_mapping.tsv')
HIGH_PPLACER_REF_PKG = 'gtdbtk_package_high_level'
HIGH_RED_FILE = os.path.join(HIGH_RED_DIR, 'high_red_value.tsv')
LOW_PPLACER_REF_PKG = os.path.join(LOW_PPLACER_DIR, 'gtdbtk.package.{iter}.refpkg')
LOW_RED_FILE = os.path.join(LOW_RED_DIR, 'red_value_{iter}.tsv')
RED_DIST_BAC_DICT = ''
RED_DIST_ARC_DICT = ''
VERSION_DATA = ''
try:
with open(os.path.join(METADATA_DIR, "metadata.txt")) as metadataData:
for line in metadataData:
try:
line_infos = line.strip().split('=')
if line_infos[0] == 'RED_DIST_BAC_DICT':
RED_DIST_BAC_DICT = json.loads(line_infos[1])
elif line_infos[0] == 'RED_DIST_ARC_DICT':
RED_DIST_ARC_DICT = json.loads(line_infos[1])
elif line_infos[0] == 'VERSION_DATA':
VERSION_DATA = line_infos[1]
except ValueError:
print(f"Skipping invalid line {repr(line)}")
except IOError:
print('\n' + '=' * 80)
print(' ERROR '.center(80))
print('_' * 80 + '\n')
print('The GTDB-Tk reference data does not exist or is corrupted.'.center(80))
print(('GTDBTK_DATA_PATH=%s' % GENERIC_PATH).center(80) + '\n')
print('Please compare the checksum to those provided in the download repository.'.center(80))
print('https://github.com/Ecogenomics/GTDBTk#gtdb-tk-reference-data'.center(80))
print('=' * 80)
sys.exit(1)
# Relative Evolution Distance
RED_INTERVAL = 0.1
RED_MIN_SUPPORT = 0.0
RED_MIN_CHILDREN = 2
# Marker information
BAC120_MARKERS = {"PFAM": ["PF00380.20.hmm", "PF00410.20.hmm", "PF00466.21.hmm",
"PF01025.20.hmm", "PF02576.18.hmm", "PF03726.15.hmm"],
"TIGRFAM": ["TIGR00006.HMM", "TIGR00019.HMM", "TIGR00020.HMM",
"TIGR00029.HMM", "TIGR00043.HMM", "TIGR00054.HMM",
"TIGR00059.HMM", "TIGR00061.HMM", "TIGR00064.HMM",
"TIGR00065.HMM", "TIGR00082.HMM", "TIGR00083.HMM",
"TIGR00084.HMM", "TIGR00086.HMM", "TIGR00088.HMM",
"TIGR00090.HMM", "TIGR00092.HMM", "TIGR00095.HMM",
"TIGR00115.HMM", "TIGR00116.HMM", "TIGR00138.HMM",
"TIGR00158.HMM", "TIGR00166.HMM", "TIGR00168.HMM",
"TIGR00186.HMM", "TIGR00194.HMM", "TIGR00250.HMM",
"TIGR00337.HMM", "TIGR00344.HMM", "TIGR00362.HMM",
"TIGR00382.HMM", "TIGR00392.HMM", "TIGR00396.HMM",
"TIGR00398.HMM", "TIGR00414.HMM", "TIGR00416.HMM",
"TIGR00420.HMM", "TIGR00431.HMM", "TIGR00435.HMM",
"TIGR00436.HMM", "TIGR00442.HMM", "TIGR00445.HMM",
"TIGR00456.HMM", "TIGR00459.HMM", "TIGR00460.HMM",
"TIGR00468.HMM", "TIGR00472.HMM", "TIGR00487.HMM",
"TIGR00496.HMM", "TIGR00539.HMM", "TIGR00580.HMM",
"TIGR00593.HMM", "TIGR00615.HMM", "TIGR00631.HMM",
"TIGR00634.HMM", "TIGR00635.HMM", "TIGR00643.HMM",
"TIGR00663.HMM", "TIGR00717.HMM", "TIGR00755.HMM",
"TIGR00810.HMM", "TIGR00922.HMM", "TIGR00928.HMM",
"TIGR00959.HMM", "TIGR00963.HMM", "TIGR00964.HMM",
"TIGR00967.HMM", "TIGR01009.HMM", "TIGR01011.HMM",
"TIGR01017.HMM", "TIGR01021.HMM", "TIGR01029.HMM",
"TIGR01032.HMM", "TIGR01039.HMM", "TIGR01044.HMM",
"TIGR01059.HMM", "TIGR01063.HMM", "TIGR01066.HMM",
"TIGR01071.HMM", "TIGR01079.HMM", "TIGR01082.HMM",
"TIGR01087.HMM", "TIGR01128.HMM", "TIGR01146.HMM",
"TIGR01164.HMM", "TIGR01169.HMM", "TIGR01171.HMM",
"TIGR01302.HMM", "TIGR01391.HMM", "TIGR01393.HMM",
"TIGR01394.HMM", "TIGR01510.HMM", "TIGR01632.HMM",
"TIGR01951.HMM", "TIGR01953.HMM", "TIGR02012.HMM",
"TIGR02013.HMM", "TIGR02027.HMM", "TIGR02075.HMM",
"TIGR02191.HMM", "TIGR02273.HMM", "TIGR02350.HMM",
"TIGR02386.HMM", "TIGR02397.HMM", "TIGR02432.HMM",
"TIGR02729.HMM", "TIGR03263.HMM", "TIGR03594.HMM",
"TIGR03625.HMM", "TIGR03632.HMM", "TIGR03654.HMM",
"TIGR03723.HMM", "TIGR03725.HMM", "TIGR03953.HMM"]}
AR122_MARKERS = {"PFAM": ["PF01868.17.hmm", "PF01282.20.hmm", "PF01655.19.hmm",
"PF01092.20.hmm", "PF01000.27.hmm", "PF00368.19.hmm",
"PF00827.18.hmm", "PF01269.18.hmm", "PF00466.21.hmm",
"PF01015.19.hmm", "PF13685.7.hmm", "PF02978.20.hmm",
"PF04919.13.hmm", "PF01984.21.hmm", "PF04104.15.hmm",
"PF00410.20.hmm", "PF01798.19.hmm", "PF01864.18.hmm",
"PF01990.18.hmm", "PF07541.13.hmm", "PF04019.13.hmm",
"PF00900.21.hmm", "PF01090.20.hmm", "PF02006.17.hmm",
"PF01157.19.hmm", "PF01191.20.hmm", "PF01866.18.hmm",
"PF01198.20.hmm", "PF01496.20.hmm", "PF00687.22.hmm",
"PF03874.17.hmm", "PF01194.18.hmm", "PF01200.19.hmm",
"PF13656.7.hmm", "PF01280.21.hmm"],
"TIGRFAM": ["TIGR00468.HMM", "TIGR01060.HMM", "TIGR03627.HMM",
"TIGR01020.HMM", "TIGR02258.HMM", "TIGR00293.HMM",
"TIGR00389.HMM", "TIGR01012.HMM", "TIGR00490.HMM",
"TIGR03677.HMM", "TIGR03636.HMM", "TIGR03722.HMM",
"TIGR00458.HMM", "TIGR00291.HMM", "TIGR00670.HMM",
"TIGR00064.HMM", "TIGR03629.HMM", "TIGR00021.HMM",
"TIGR03672.HMM", "TIGR00111.HMM", "TIGR03684.HMM",
"TIGR01077.HMM", "TIGR01213.HMM", "TIGR01080.HMM",
"TIGR00501.HMM", "TIGR00729.HMM", "TIGR01038.HMM",
"TIGR00270.HMM", "TIGR03628.HMM", "TIGR01028.HMM",
"TIGR00521.HMM", "TIGR03671.HMM", "TIGR00240.HMM",
"TIGR02390.HMM", "TIGR02338.HMM", "TIGR00037.HMM",
"TIGR02076.HMM", "TIGR00335.HMM", "TIGR01025.HMM",
"TIGR00471.HMM", "TIGR00336.HMM", "TIGR00522.HMM",
"TIGR02153.HMM", "TIGR02651.HMM", "TIGR03674.HMM",
"TIGR00323.HMM", "TIGR00134.HMM", "TIGR02236.HMM",
"TIGR03683.HMM", "TIGR00491.HMM", "TIGR00658.HMM",
"TIGR03680.HMM", "TIGR00392.HMM", "TIGR00422.HMM",
"TIGR00279.HMM", "TIGR01052.HMM", "TIGR00442.HMM",
"TIGR00308.HMM", "TIGR00398.HMM", "TIGR00456.HMM",
"TIGR00549.HMM", "TIGR00408.HMM", "TIGR00432.HMM",
"TIGR00264.HMM", "TIGR00982.HMM", "TIGR00324.HMM",
"TIGR01952.HMM", "TIGR03626.HMM", "TIGR03670.HMM",
"TIGR00337.HMM", "TIGR01046.HMM", "TIGR01018.HMM",
"TIGR00936.HMM", "TIGR00463.HMM", "TIGR01309.HMM",
"TIGR03653.HMM", "TIGR00042.HMM", "TIGR02389.HMM",
"TIGR00307.HMM", "TIGR03673.HMM", "TIGR00373.HMM",
"TIGR01008.HMM", "TIGR00283.HMM", "TIGR00425.HMM",
"TIGR00405.HMM", "TIGR03665.HMM", "TIGR00448.HMM"]}
# Information for Multiple hits markers:
DEFAULT_MULTIHIT_THRESHOLD = 10.0
# Information for aligning genomes
DEFAULT_DOMAIN_THRESHOLD = 10.0
AR_MARKER_COUNT = 122
BAC_MARKER_COUNT = 120
# Information about alignment Fraction to resolve fastANI results
AF_THRESHOLD = 0.65
# MSA file names
CONCAT_BAC120 = os.path.join(MSA_FOLDER, f"gtdb_{VERSION_DATA}_bac120.faa")
CONCAT_AR122 = os.path.join(MSA_FOLDER, f"gtdb_{VERSION_DATA}_ar122.faa")
# Taxonomy file name
TAXONOMY_FILE = os.path.join(TAX_FOLDER, "gtdb_taxonomy.tsv")
# Type Strain radii file
RADII_FILE = os.path.join(RADII_DIR, "gtdb_radii.tsv")
# Mask file names
MASK_BAC120 = f"gtdb_{VERSION_DATA}_bac120.mask"
MASK_AR122 = f"gtdb_{VERSION_DATA}_ar122.mask"
MASK_RPS23 = f"gtdb_{VERSION_DATA}_rps23.mask"
# Pplacer configuration
PPLACER_BAC120_REF_PKG = f"gtdb_{VERSION_DATA}_bac120.refpkg"
PPLACER_AR122_REF_PKG = f"gtdb_{VERSION_DATA}_ar122.refpkg"
PPLACER_RPS23_REF_PKG = f"gtdb_{VERSION_DATA}_rps23.refpkg"
PPLACER_MIN_RAM_BAC = 204
PPLACER_MIN_RAM_ARC = 13
# Fastani configuration
FASTANI_SPECIES_THRESHOLD = 95.0
FASTANI_GENOMES = os.path.join(FASTANI_DIR, "database/")
FASTANI_GENOME_LIST = os.path.join(FASTANI_DIR, "genome_paths.tsv")
FASTANI_GENOMES_EXT = "_genomic.fna.gz"
# MRCA RED VALUE
MRCA_RED_BAC120 = os.path.join(RED_DIR, f"gtdbtk_{VERSION_DATA}_bac120.tsv")
MRCA_RED_AR122 = os.path.join(RED_DIR, f"gtdbtk_{VERSION_DATA}_ar122.tsv")
# Hashing information for validating the reference package.
REF_HASHES = {PPLACER_DIR: '4d931b5109a240602f55228029b87ee768da8141',
MASK_DIR: '36d6ac371d247b2b952523b9798e78908ea323fa',
MARKER_DIR: '2ba5ae35fb272462663651d18fd9e523317e48cd',
RADII_DIR: '9f9a2e21e27b9049044d04d731795499414a365c',
MSA_FOLDER: 'b426865245c39ee9f01b0392fb8f7867a9f76f0a',
METADATA_DIR: '7640aed96fdb13707a2b79b746a94335faabd6df',
TAX_FOLDER: '4a7a1e4047c088e92dee9740206499cdb7e5beca',
FASTANI_DIR: '70439cf088d0fa0fdbb4f47b4a6b47e199912139',
RED_DIR: 'ad6a184150e7b6e58547912660a17999fadcfbff'}
# Config values for checking GTDB-Tk on startup.
GTDBTK_VER_CHECK = True
GTDBTK_VER_TIMEOUT = 3 # seconds
# Internal settings used for logging.
LOG_TASK = 21
|
gpl-3.0
| 698,190,801,838,647,900
| 51.842105
| 97
| 0.560508
| false
| 2.614016
| false
| false
| false
|
m3talstorm/foe-bot
|
foe/models/resources.py
|
1
|
2337
|
"""
"""
# Native
import time
import pprint
import json
from collections import OrderedDict
# 3rd-Party
from sqlalchemy import Table, Column, ForeignKey, Integer, String, Boolean, Float
from sqlalchemy.orm import relationship, backref
import pydash
#
from request import Request
from models.model import Model
class Resources(Model):
"""
"""
REQUEST_CLASS = "ResourceService"
__tablename__ = 'resources'
# Attributes
# ---------------------------------------------------------
money = Column(Integer, default=0)
supplies = Column(Integer, default=0)
granite = Column(Integer, default=0)
carnival_roses = Column(Integer, default=0)
stars = Column(Integer, default=0)
cloth = Column(Integer, default=0)
honey = Column(Integer, default=0)
lead = Column(Integer, default=0)
population = Column(Integer, default=0)
gems = Column(Integer, default=0)
sandstone = Column(Integer, default=0)
wine = Column(Integer, default=0)
guild_expedition_attempt = Column(Integer, default=0)
medals = Column(Integer, default=0)
alabaster = Column(Integer, default=0)
dye = Column(Integer, default=0)
cypress = Column(Integer, default=0)
ebony = Column(Integer, default=0)
limestone = Column(Integer, default=0)
negotiation_game_turn = Column(Integer, default=0)
expansions = Column(Integer, default=0)
summer_tickets = Column(Integer, default=0)
spring_lanterns = Column(Integer, default=0)
tavern_silver = Column(Integer, default=0)
premium = Column(Integer, default=0)
raw_cypress = Column(Integer, default=0)
raw_dye = Column(Integer, default=0)
raw_cloth = Column(Integer, default=0)
raw_ebony = Column(Integer, default=0)
raw_granite = Column(Integer, default=0)
# Back-refs
# ---------------------------------------------------------
account_id = Column(Integer, ForeignKey('account.player_id'), primary_key=True)
def __init__(self, *args, **kwargs):
"""
"""
return super(Resources, self).__init__(*args, **kwargs)
def __repr__(self):
"""
"""
return "Resources"
def populate(self, *args, **kwargs):
"""
"""
return super(Resources, self).populate(*args, **kwargs)
|
mit
| -8,472,919,973,206,751,000
| 19.146552
| 83
| 0.611896
| false
| 3.775444
| false
| false
| false
|
SectorLabs/django-postgres-extra
|
psqlextra/models/partitioned.py
|
1
|
1374
|
from django.db.models.base import ModelBase
from psqlextra.types import PostgresPartitioningMethod
from .base import PostgresModel
from .options import PostgresPartitionedModelOptions
class PostgresPartitionedModelMeta(ModelBase):
"""Custom meta class for :see:PostgresPartitionedModel.
This meta class extracts attributes from the inner
`PartitioningMeta` class and copies it onto a `_partitioning_meta`
attribute. This is similar to how Django's `_meta` works.
"""
default_method = PostgresPartitioningMethod.RANGE
default_key = []
def __new__(cls, name, bases, attrs, **kwargs):
new_class = super().__new__(cls, name, bases, attrs, **kwargs)
meta_class = attrs.pop("PartitioningMeta", None)
method = getattr(meta_class, "method", None)
key = getattr(meta_class, "key", None)
patitioning_meta = PostgresPartitionedModelOptions(
method=method or cls.default_method, key=key or cls.default_key
)
new_class.add_to_class("_partitioning_meta", patitioning_meta)
return new_class
class PostgresPartitionedModel(
PostgresModel, metaclass=PostgresPartitionedModelMeta
):
"""Base class for taking advantage of PostgreSQL's 11.x native support for
table partitioning."""
class Meta:
abstract = True
base_manager_name = "objects"
|
mit
| -2,785,277,113,692,309,000
| 30.953488
| 78
| 0.700873
| false
| 3.994186
| false
| false
| false
|
vially/googlemusic-xbmc
|
resources/Lib/navigation.py
|
1
|
31743
|
import time
from urllib import quote_plus, urlencode
import api
import utils
import xbmc
import xbmcplugin
from xbmcgui import ListItem
fanart = utils.addon.getAddonInfo('fanart')
class Navigation:
def __init__(self):
self.lang = utils.addon.getLocalizedString
self.api = api.Api()
self.contextmenu_action = "XBMC.RunPlugin("+utils.addon_url+"?action=%s&%s)"
self.main_menu = (
{'title': self.lang(30224), 'params': {'path': "home_menu"}, 'user': ['library', 'subscriber']},
{'title': self.lang(30219), 'params': {'path': "listennow"}, 'user': ['subscriber', 'free']},
{'title': self.lang(30220), 'params': {'path': "topcharts"}, 'user': ['subscriber']},
{'title': self.lang(30221), 'params': {'path': "newreleases"}, 'user': ['subscriber']},
{'title': self.lang(30209), 'params': {'path': "library"}, 'user': ['library']},
{'title': self.lang(30202), 'params': {'path': "playlists_menu"}, 'user': ['library', 'subscriber']},
{'title': self.lang(30222), 'params': {'path': "browse_stations"}, 'user': ['subscriber', 'free']},
{'title': self.lang(30208), 'params': {'path': "search"}, 'user': ['library', 'subscriber']}
)
self.lib_menu = (
{'title': self.lang(30203), 'params': {'path': "playlists", 'type': "radio"}},
{'title': self.lang(30210), 'params': {'path': "playlist", 'playlist_id': "feellucky"}},
{'title': self.lang(30214), 'params': {'path': "playlist", 'playlist_id': "shuffled_albums"}},
{'title': self.lang(30201), 'params': {'path': "playlist", 'playlist_id': "all_songs"}},
{'title': self.lang(30205), 'params': {'path': "filter", 'criteria': "artist"}},
{'title': self.lang(30206), 'params': {'path': "filter", 'criteria': "album"}},
{'title': self.lang(30207), 'params': {'path': "filter", 'criteria': "genre"}},
{'title': self.lang(30212), 'params': {'path': "filter", 'criteria': "composer"}},
)
self.playlists_menu = (
{'title': self.lang(30225), 'params': {'path': "playlists", 'type': "recent"}, 'user': ['library', 'subscriber']},
{'title': self.lang(30204), 'params': {'path': "playlists", 'type': "auto"}, 'user': ['library', 'subscriber']},
{'title': self.lang(30202), 'params': {'path': "playlists", 'type': "user"}, 'user': ['library', 'subscriber']},
)
self.home_menu = (
{'title': self.lang(30211), 'params': {'path': "ifl"}, 'user': ['library', 'subscriber']},
{'title': self.lang(30225), 'params': {'path': "home_recents"}, 'user': ['library', 'subscriber']},
)
def listMenu(self, params):
get = params.get
path = get("path", "root")
utils.log("PATH: " + path)
listItems = []
content = ''
sortMethods = [xbmcplugin.SORT_METHOD_UNSORTED]
if path == "root":
# assemble menu depending on user info
subscriber = utils.addon.getSettingBool('subscriber')
library = utils.addon.getSettingInt('fetched_count') > 0
utils.log("Assembling menu for subscriber=%r and library=%r" % (subscriber, library))
for item in self.main_menu:
user = item.pop('user')
if (subscriber and 'subscriber' in user) or \
(library and 'library' in user) or \
(not subscriber and 'free' in user):
listItems.append(item)
listItems = self.getMenuItems(listItems)
elif path == "ifl":
listItems = self.addSongsFromLibrary(self.api.getStationTracks("IFL"), 'library')
content = "songs"
elif path == "home_menu":
listItems = self.getMenuItems(self.home_menu)
listItems.extend(self.get_situations())
content = "albums"
elif path == "situation_items":
listItems = self.get_situations_items(get('situation_id'))
content = "albums"
elif path == "library":
listItems = self.getMenuItems(self.lib_menu)
elif path == "playlists_menu":
listItems = self.getMenuItems(self.playlists_menu)
elif path == "playlist":
listItems = self.listPlaylistSongs(get("playlist_id"))
if get("playlist_id") == 'all_songs':
sortMethods = [xbmcplugin.SORT_METHOD_LABEL_IGNORE_THE]
content = "songs"
elif path == "station":
listItems = self.addSongsFromLibrary(self.api.getStationTracks(get('id')), 'library')
content = "songs"
elif path == "playlists":
listItems = self.getPlaylists(get('type'))
elif path == "filter" and 'album' == get('criteria'):
listItems = self.listAlbums(get('criteria'))
sortMethods = [xbmcplugin.SORT_METHOD_ALBUM_IGNORE_THE, xbmcplugin.SORT_METHOD_VIDEO_YEAR,
xbmcplugin.SORT_METHOD_ARTIST, xbmcplugin.SORT_METHOD_ALBUM, xbmcplugin.SORT_METHOD_DATE]
content = "albums"
elif path in ["artist", "genre"] and get('name'):
album_name = get('name')
paramsAllSongs = {'path': "allcriteriasongs", 'criteria': path, 'name': album_name}
listItems.insert(0, self.createFolder('* ' + self.lang(30201), paramsAllSongs))
listItems.extend(self.listAlbums(path, album_name))
sortMethods = [xbmcplugin.SORT_METHOD_ALBUM_IGNORE_THE, xbmcplugin.SORT_METHOD_VIDEO_YEAR,
xbmcplugin.SORT_METHOD_ARTIST, xbmcplugin.SORT_METHOD_ALBUM, xbmcplugin.SORT_METHOD_DATE]
content = "albums"
elif path == "filter":
listItems = self.getCriteria(get('criteria'))
sortMethods = [xbmcplugin.SORT_METHOD_LABEL_IGNORE_THE]
elif path == "allcriteriasongs":
listItems = self.listAllCriteriaSongs(get('criteria'), get('name'))
sortMethods = [xbmcplugin.SORT_METHOD_LABEL_IGNORE_THE]
content = "songs"
elif path in ["genre", "artist", "album", "composer"]:
songs = self.api.getFilterSongs(path, get('album'), get('artist', ''))
listItems = self.addSongsFromLibrary(songs, 'library')
sortMethods = [xbmcplugin.SORT_METHOD_TRACKNUM, xbmcplugin.SORT_METHOD_TITLE_IGNORE_THE,
xbmcplugin.SORT_METHOD_PLAYCOUNT, xbmcplugin.SORT_METHOD_SONG_RATING]
content = "songs"
elif path == "search":
listItems.append(self.createFolder(self.lang(30223), {'path': 'search_new'}))
history = utils.addon.getSetting('search-history').split('|')
for item in history:
if item:
listItems.append(self.createFolder(item, {'path': 'search_query', 'query': item}))
elif path == "search_new":
keyboard = xbmc.Keyboard('', self.lang(30208))
keyboard.doModal()
if keyboard.isConfirmed() and keyboard.getText():
listItems = self.getSearch(keyboard.getText())
history = utils.addon.getSetting('search-history')
history = keyboard.getText() + ('|' + history if history else '')
if len(history.split('|')) > 10:
history = '|'.join(history.split('|')[0:-1])
utils.addon.setSetting('search-history', history)
content = "songs"
else:
return
elif path == "search_query":
listItems = self.getSearch(get("query"))
content = "songs"
elif path == "search_result":
utils.log("SEARCH_RESULT: " + get('query'))
listItems = self.getSearch(params)
content = "songs"
elif path == "listennow":
listItems = self.getListennow(self.api.getApi().get_listen_now_items())
content = "albums"
elif path == "topcharts":
listItems.append(self.createFolder(self.lang(30206), {'path': 'topcharts_albums'}))
listItems.append(self.createFolder(self.lang(30213), {'path': 'topcharts_songs'}))
elif path == "topcharts_songs":
listItems = self.addSongsFromLibrary(self.api.getTopcharts(), 'library')
content = "songs"
elif path == "topcharts_albums":
listItems = self.createAlbumFolder(self.api.getTopcharts(content_type='albums'))
content = "albums"
elif path == "newreleases":
listItems = self.createAlbumFolder(self.api.getNewreleases())
content = "albums"
elif path == "browse_stations":
listItems = self.browseStations(get('category'))
elif path == "get_stations":
listItems = self.getCategoryStations(self.api.getApi().get_stations(get('subcategory')))
elif path == "create_station":
if not utils.addon.getSettingBool('subscriber'):
xbmc.executebuiltin("XBMC.Notification(%s,%s,5000,%s)" % (
utils.plugin, utils.tryEncode("Song skipping is limited!"), utils.addon.getAddonInfo('icon')))
tracks = self.api.startRadio(get('name'), artist_id=get('artistid'), genre_id=get('genreid'),
curated_station_id=get('curatedid'), track_id=get('trackid'))
listItems = self.addSongsFromLibrary(tracks, 'library')
content = "songs"
# utils.playAll(tracks)
# utils.setResolvedUrl(listItems[0][1])
elif path == "genres":
listItems = self.getGenres(self.api.getApi().get_top_chart_genres())
elif path == "store_album":
utils.log("ALBUM: " + get('album_id'))
listItems = self.addSongsFromLibrary(self.api.getAlbum(get('album_id')), 'library')
content = "songs"
elif path == "artist_topsongs":
listItems = self.addSongsFromLibrary(self.api.getArtistInfo(get('artistid'))['tracks'], 'library')
content = "songs"
elif path == "related_artists":
listItems = []
items = self.api.getArtistInfo(get('artistid'), False, 0, relartists=10)['relartists']
for item in items:
params = {'path': 'artist_topsongs', 'artistid': item['artistId']}
listItems.append(self.createFolder(item['name'], params, arturl=item['artistArtRef']))
elif path == "home_recents":
listItems = self.get_recents()
content = "album"
else:
utils.log("Invalid path: " + get("path"))
return
utils.setDirectory(listItems, content, sortMethods)
def getMenuItems(self, items):
menuItems = []
for menu_item in items:
params = menu_item['params']
cm = []
if 'playlist_id' in params:
cm = self.getPlayAllContextMenu(menu_item['title'], params['playlist_id'])
elif 'type' in params:
cm.append(self.create_menu(30304, "update_playlists", {'playlist_type': params['type']}))
cm.append(self.create_menu(30306, "add_favourite", {'path': 'playlists', 'playlist_type': params['type'], 'title': menu_item['title']}))
cm.append(self.create_menu(30316, "create_playlist"))
elif params['path'] == 'library':
cm.append(self.create_menu(30305, "update_library"))
cm.append(self.create_menu(30306, "add_favourite", {'path': 'library', 'title': menu_item['title']}))
elif 'criteria' in params:
cm.append(self.create_menu(30306, "add_favourite", {'path': 'filter', 'criteria': params['criteria'], 'title': menu_item['title']}))
menuItems.append(self.createFolder(menu_item['title'], params, cm))
return menuItems
def listPlaylistSongs(self, playlist_id):
utils.log("Loading playlist: " + playlist_id)
songs = self.api.getPlaylistSongs(playlist_id)
if playlist_id == 'videos':
return self.addVideosFromLibrary(songs)
if playlist_id in ('thumbsup', 'lastadded', 'mostplayed', 'freepurchased', 'feellucky', 'all_songs', 'shuffled_albums'):
return self.addSongsFromLibrary(songs, 'library')
return self.addSongsFromLibrary(songs, 'playlist' + playlist_id)
def addVideosFromLibrary(self, library):
listItems = []
for song in library:
li = ListItem(song['display_name'], offscreen=True)
li.setArt({'thumb': song['albumart'], 'fanart': song['artistart']})
li.setProperties({'IsPlayable': 'true', 'Video': 'true'})
listItems.append(["plugin://plugin.video.youtube/play/?video_id=%s" % song['videoid'], li])
return listItems
def addSongsFromLibrary(self, library, song_type):
return [[utils.getUrl(song), self.createItem(song, song_type)] for song in library]
def listAllCriteriaSongs(self, filter_type, filter_criteria):
songs = self.api.getFilterSongs(filter_type, filter_criteria, '')
listItems = []
# add album name when showing all artist songs
for song in songs:
songItem = self.createItem(song, 'library')
songItem.setLabel("".join(['[', song['album'], '] ', song['title']]))
songItem.setLabel2(song['album'])
listItems.append([utils.getUrl(song), songItem])
return listItems
def createItem(self, song, song_type):
infoLabels = {
'tracknumber': song['tracknumber'], 'duration': song['duration'], 'year': song['year'],
'genre': song['genre'], 'album': song['album'], 'artist': song['artist'], 'title': song['title'],
'playcount': song['playcount'], 'rating': song['rating'], 'discnumber': song['discnumber'],
'mediatype': 'song'
}
li = utils.createItem(song['display_name'], song['albumart'], song['artistart'])
li.setInfo(type='Music', infoLabels=infoLabels)
li.addContextMenuItems(self.getSongContextMenu(song['song_id'], song['display_name'], song_type))
return li
def getPlaylists(self, playlist_type):
utils.log("Getting playlists of type: " + playlist_type)
listItems = []
append = listItems.append
addFolder = self.createFolder
if playlist_type == 'radio':
for rs in self.api.getStations():
# utils.log(repr(rs))
image = rs['compositeArtRefs'][0]['url'] if 'compositeArtRefs' in rs else rs['imageUrls'][0]['url'] if 'imageUrls' in rs else None
cm = self.getRadioContextMenu(rs['name'], rs['id'])
append(addFolder(rs['name'], {'path': "station", 'id': rs['id']}, cm, image))
elif playlist_type == 'auto':
auto = [['thumbsup', self.lang(30215)], ['lastadded', self.lang(30216)],
['freepurchased', self.lang(30217)], ['mostplayed', self.lang(30218)],
['videos', 'Videos']]
for pl_id, pl_name in auto:
cm = self.getPlayAllContextMenu(pl_name, pl_id)
append(addFolder(pl_name, {'path': "playlist", 'playlist_id': pl_id}, cm))
else:
for pl_id, pl_name, pl_arturl, pl_token, pl_recent in self.api.getPlaylistsByType(playlist_type):
cm = self.getPlayAllContextMenu(pl_name, pl_id, pl_token)
append(addFolder(pl_name, {'path': "playlist", 'playlist_id': pl_id}, cm, pl_arturl))
return listItems
def listAlbums(self, criteria, name=''):
utils.log("LIST ALBUMS: " + repr(criteria) + " " + repr(name))
listItems = []
getCm = self.getFilterContextMenu
items = self.api.getCriteria(criteria, name)
for item in items:
# utils.log(repr(item))
album = item['album']
artist = item['album_artist']
params = {'path': criteria, 'album': album, 'artist': artist}
folder = self.createFolder(album, params, getCm(criteria, album, artist), item['arturl'], artist, item['artistart'])
folder[1].setInfo(type='Music', infoLabels={'year': item['year'], 'artist': artist, 'album': album,
'date': time.strftime('%d.%m.%Y', time.gmtime(item['date'] / 1000000)), 'mediatype': 'album'})
listItems.append(folder)
return listItems
def getCriteria(self, criteria):
utils.log("CRITERIA: " + repr(criteria))
folder = self.createFolder
getCm = self.getFilterContextMenu
items = self.api.getCriteria(criteria)
if criteria in ('artist', 'genre'):
return [folder(item['criteria'], {'path': criteria, 'name': item['criteria']},
getCm(criteria, item['criteria']), item['arturl'], fanarturl=item['arturl']) for item in items]
else:
return [folder(item['criteria'], {'path': criteria, 'album': item['criteria']},
getCm(criteria, item['criteria'])) for item in items]
def get_recents(self):
listItems = []
dictItems = {}
addFolder = self.createFolder
for pl_id, pl_name, pl_arturl, pl_token, pl_recent in self.api.getPlaylistsByType('user'):
cm = self.getPlayAllContextMenu(pl_name, pl_id, pl_token)
dictItems[int(pl_recent)] = addFolder(pl_name+" (Playlist)", {'path': 'playlist', 'playlist_id': pl_id}, cm, pl_arturl)
from datetime import datetime, timedelta
filtertime = ((datetime.today() - timedelta(40)) - datetime(1970,1,1)).total_seconds() * 1000000
for rs in self.api.getStations():
if int(rs['lastModifiedTimestamp']) < filtertime:
continue
image = rs['compositeArtRefs'][0]['url'] if 'compositeArtRefs' in rs else rs['imageUrls'][0]['url'] if 'imageUrls' in rs else None
cm = self.getRadioContextMenu(rs['name'], rs['id'])
if rs['seed']['seedType'] == '3':
rs['name'] = rs['name'] + " Radio"
dictItems[int(rs['recentTimestamp'])] = addFolder(rs['name'], {'path': 'station', 'id': rs['id']}, cm, image)
#for song in self.api.getRecent():
# cm = self.getFilterContextMenu("album", song['album'], song['artist'])
# dictItems[song['recent']] = addFolder(song['album'], {'path': 'album', 'album': song['album'], 'artist': song['artist']}, cm, song['albumart'])
for key in sorted(dictItems.keys(), reverse=True):
#utils.log("RECENTS: "+str(key)+" "+repr(dictItems[key][1].getLabel()))
listItems.append(dictItems[key])
return listItems
def getListennow(self, items):
listItems = []
for item in items:
suggestion = item.get('suggestion_text')
image = item.get('images', [{'url': ''}])[0]['url']
# defualt to radio station
item_type = item.get('type', '3')
if item['type'] == '1':
album = item['album']
listItems.extend(self.createAlbumFolder([{
'name': album['title'] + ' (' + suggestion + ')',
'artist': album['artist_name'],
'albumArtRef': image,
'albumId': album['id']['metajamCompactKey']}]))
elif item['type'] == '3':
radio = item['radio_station']
params = {'path': 'create_station',
'name': utils.tryEncode('Radio %s (%s)' % (radio['title'], suggestion))}
params.update(self.getStationSeed(radio['id']['seeds'][0]))
listItems.append(self.createFolder(params['name'], params, arturl=image))
else:
utils.log("ERROR item type unknown " + repr(item['type']))
return listItems
def get_situations(self):
listItems = []
items = self.api.get_situations()
for item in items:
params = {'path': 'situation_items', 'situation_id': item['id']}
listItems.append(self.createFolder(item['title'], params, arturl=item.get('imageUrl'), fanarturl=item.get('wideImageUrl')))
return listItems
def get_situations_items(self, situation_id):
listItems = []
items = self.api.get_situations()
for item in items:
if item['id'] == situation_id:
##return self.getListennow(item['stations'])
return self.getCategoryStations(item['stations'])
utils.log("ERROR Situation not found: "+situation_id)
return None
def browseStations(self, index=None):
listItems = []
items = self.api.getStationsCategories()
utils.log("INDEX:"+repr(index)+"\n"+repr(items))
if index:
# list subcategories from category index
items = items[int(index)].get('subcategories')
for item in items:
# populate with categories or subcategories
if 'subcategories' in item:
params = {'path': 'browse_stations'}
else:
params = {'path': 'get_stations'}
params['category'] = items.index(item)
params['subcategory'] = item['id']
listItems.append(self.createFolder(item['display_name'], params))
return listItems
def getCategoryStations(self, items):
listItems = []
utils.log("STATIONS: "+repr(items))
for item in items:
#utils.log("STATION: "+repr(item))
params = {'path': 'create_station', 'name': utils.tryEncode(item['name'])}
params.update(self.getStationSeed(item['seed']))
url1 = item['compositeArtRefs'][0]['url'] if 'compositeArtRefs' in item else ''
url2 = item['imageUrls'][0]['url']
folder = self.createFolder(item['name'], params, arturl=url1, name2=item.get('description'), fanarturl=url2)
folder[1].setInfo(type='Music', infoLabels={'comment': item.get('description', 'No description'),
'date': time.strftime('%d.%m.%Y', time.gmtime(item.get('recentTimestamp', 0) / 1000000))})
listItems.append(folder)
return listItems
def getStationSeed(self, seed):
seed_id = {}
if seed['seedType'] == '3':
seed_id['artistid'] = seed['artistId']
elif seed['seedType'] == '5':
seed_id['genreid'] = seed['genreId']
elif seed['seedType'] == '2':
seed_id['trackid'] = seed['trackId']
elif seed['seedType'] == '9':
seed_id['curatedid'] = seed['curatedStationId']
else:
utils.log("ERROR seedtype unknown " + repr(seed['seedType']))
return seed_id
def createAlbumFolder(self, items):
listItems = []
for item in items:
params = {'path': 'store_album', 'album_id': item['albumId']}
cm = [self.create_menu(30301, "play_all", params),
self.create_menu(30309, "add_album_library", params),
self.create_menu(30315, "add_to_queue", params)]
folder = self.createFolder("[%s] %s" % (item['artist'], item['name']), params, cm, item.get('albumArtRef', ''),
item.get('description'), fanarturl=item.get('artistArtRef', ''))
folder[1].setInfo(type='Music', infoLabels={'comment': item.get('description', 'No description'),
'artist': item['artist'], 'album': item['name'], 'mediatype': 'album'})
listItems.append(folder)
# print repr(items)
return listItems
def createFolder(self, name, params, contextMenu=[], arturl='', name2='*', fanarturl=fanart):
li = ListItem(label=name, label2=name2, offscreen=True)
li.setArt({'thumb': arturl, 'fanart': fanarturl})
li.addContextMenuItems(contextMenu)
return "?".join([utils.addon_url, urlencode(params, doseq=True)]), li, "true"
def getSongContextMenu(self, song_id, display_name, song_type):
params = {'song_id': song_id, 'display_name': display_name}
cm = []
if song_id.startswith('T'):
cm.append(self.create_menu(30309, "add_library", params))
cm.append(self.create_menu(30319, "artist_topsongs", params))
cm.append(self.create_menu(30320, "related_artists", params))
if song_type == 'library':
cm.append(self.create_menu(30307, "add_playlist", params))
elif song_type.startswith('playlist'):
playlist = {'song_id': song_id, 'display_name': display_name, 'playlist_id': song_type[8:]}
cm.append(self.create_menu(30322, "play_all", playlist))
cm.append(self.create_menu(30308, "del_from_playlist", playlist))
cm.append(self.create_menu(30409, "set_thumbs", params))
cm.append(self.create_menu(30313, "play_yt", params))
cm.append(self.create_menu(30311, "search_yt", params))
cm.append(self.create_menu(30310, "start_radio", params))
return cm
def getRadioContextMenu(self, name, radio_id):
params = {'radio_id': radio_id, 'title': name}
shuffle = params.copy()
shuffle.update({'shuffle': 'true'})
return [
self.create_menu(30301, "play_all", params),
self.create_menu(30302, "play_all", shuffle),
self.create_menu(30312, "play_all_yt", params),
self.create_menu(30321, "play_all_yt", shuffle),
self.create_menu(30306, "add_favourite", {'radio_id': radio_id, 'title': name, 'path': 'playlist'}),
self.create_menu(30315, "add_to_queue", params),
self.create_menu(30318, "delete_station", params)
]
def getPlayAllContextMenu(self, name, playlist, token=None):
params = {'playlist_id': playlist, 'title': name}
shuffle = params.copy()
shuffle.update({'shuffle': 'true'})
cm = [
self.create_menu(30301, "play_all", params),
self.create_menu(30302, "play_all", shuffle),
self.create_menu(30312, "play_all_yt",params),
self.create_menu(30321, "play_all_yt", shuffle),
self.create_menu(30306, "add_favourite", {'playlist_id': playlist, 'title': name, 'path': 'playlist'}),
self.create_menu(30315, "add_to_queue", params),
self.create_menu(30317, "delete_playlist", params)
]
if token:
cm.append(self.create_menu(30310, "start_radio", {'playlist_id': playlist, 'title': name, 'token': token}))
return cm
def getFilterContextMenu(self, filter_type, filter_criteria, artist=''):
params = {'filter_type': filter_type, 'filter_criteria': filter_criteria, 'artist': artist}
shuffle = params.copy()
shuffle.update({'shuffle': 'true'})
return [
self.create_menu(30301, "play_all", params),
self.create_menu(30302, "play_all", shuffle),
self.create_menu(30312, "play_all_yt", params),
self.create_menu(30321, "play_all_yt", shuffle),
self.create_menu(30306, "add_favourite", {'path': filter_type, 'name': filter_criteria, 'title': filter_criteria}),
self.create_menu(30315, "add_to_queue", params),
self.create_menu(30208, "search", params),
]
def create_menu(self, text_code, action, params={'1':1}):
return self.lang(text_code), self.contextmenu_action % (action, urlencode(params, doseq=True))
def getSearch(self, query):
listItems = []
def listAlbumsResults():
for album in result['albums']:
if 'albumId' in album:
listItems.extend(self.createAlbumFolder([album]))
else:
params = {'path': 'album', 'album': utils.tryEncode(album['name']), 'artist': utils.tryEncode(album['artist'])}
cm = self.getFilterContextMenu('album', album['name'])
folder_name = "[%s] %s" % (album['artist'], album['name'])
listItems.append(self.createFolder(folder_name, params, cm, album['albumart'], album['artistart']))
def listArtistsResults():
cm = []
for artist in result['artists']:
params = {'path': 'artist', 'name': utils.tryEncode(artist['name'])}
if 'artistId' in artist:
params = {'path': 'search_result', 'artistid': artist['artistId'], 'query': utils.tryEncode(artist['name'])}
cm = [self.create_menu(30301, "play_all", {'artist_id': artist['artistId']})]
art = artist['artistArtRef']
listItems.append(self.createFolder(artist['name'], params, cm, arturl=art, fanarturl=art))
if isinstance(query, str):
result = self.api.getSearch(query)
if result['artists']:
listItems.append(self.createFolder('[COLOR orange]*** ' + self.lang(30205) + ' ***[/COLOR] +>',
{'path': 'search_result', 'type': 'artist', 'query': query}))
listArtistsResults()
if result['albums']:
listItems.append(self.createFolder('[COLOR orange]*** ' + self.lang(30206) + ' ***[/COLOR] +>',
{'path': 'search_result', 'type': 'album', 'query': query}))
listAlbumsResults()
if result['tracks']:
listItems.append(self.createFolder('[COLOR orange]*** ' + self.lang(30213) + ' ***[/COLOR] +>',
{'path': 'search_result', 'type': 'track', 'query': query}))
listItems.extend(self.addSongsFromLibrary(result['tracks'], 'library'))
if result['stations']:
listItems.append(
self.createFolder('[COLOR orange]*** ' + self.lang(30203) + ' ***[/COLOR]', {'path': 'none'}))
listItems.extend(self.getCategoryStations(result['stations']))
if result['videos']:
listItems.append(self.createFolder('[COLOR orange]*** Youtube ***[/COLOR]', {'path': 'none'}))
for video in result['videos']:
listItems.append(
self.createFolder(video['title'], {'action': 'play_yt', 'display_name': video['title']}))
elif 'artistid' in query:
result = self.api.getArtistInfo(query['artistid'], True, 20, 0)
if result['albums']:
listItems.append(
self.createFolder('[COLOR orange]*** ' + self.lang(30206) + ' ***[/COLOR]', {'path': 'none'}))
listAlbumsResults()
listItems.append(
self.createFolder('[COLOR orange]*** ' + self.lang(30213) + ' ***[/COLOR]', {'path': 'none'}))
listItems.extend(self.addSongsFromLibrary(result['tracks'], 'library'))
elif 'type' in query:
result = self.api.getSearch(query['query'], max_results=50)
if query['type'] == 'artist':
listArtistsResults()
elif query['type'] == 'album':
listAlbumsResults()
elif query['type'] == 'track':
listItems.extend(self.addSongsFromLibrary(result['tracks'], 'library'))
else:
listItems.extend(self.getSearch(query['query']))
return listItems
|
gpl-3.0
| 4,076,895,093,247,734,000
| 48.290373
| 156
| 0.560502
| false
| 3.83277
| false
| false
| false
|
siavooshpayandehazad/high_level_test_pattern_gen
|
src/DetPatGen/ALU.py
|
1
|
1113
|
import numpy
def alu( op1, op2, alu_op, c_in):
if alu_op == "0000": # mov
result = op2
elif alu_op == "0001": # add
result = op1 + op2
elif alu_op == "0010": # sub
result = op1 - op2
elif alu_op == "0011": # cmp
result = op1
elif alu_op == "0100": # and
result = numpy.bitwise_and(op1, op2)
elif alu_op == "0101": # or
result = numpy.bitwise_or(op1, op2)
elif alu_op == "0110": # xor
result = numpy.bitwise_xor(op1, op2)
elif alu_op == "0111": # not
result = numpy.invert(op2)
elif alu_op == "1000": # shl
result = numpy.left_shift(op1,1)
elif alu_op == "1001": # shr
result = numpy.right_shift(op1,1)
elif alu_op == "1010": # asr
result = numpy.bitwise_or(numpy.bitwise_and(op1, 128), numpy.right_shift(op1,1))
elif alu_op == "1011": # inc
result = op1 + 1
elif alu_op == "1100": # dec
result = op1 - 1
elif alu_op == "1101": # rlc
result = numpy.bitwise_or(128*c_in, numpy.right_shift(op1,1))
elif alu_op == "1110": # rrc
result = numpy.bitwise_or(c_in, numpy.left_shift(op1,1))
elif alu_op == "1111": # nop
result = 0
return result
|
gpl-3.0
| 402,474,652,033,670,900
| 29.944444
| 82
| 0.592992
| false
| 2.212724
| false
| false
| false
|
shoopio/shoop
|
shuup/gdpr/models.py
|
1
|
8040
|
# -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2019, Shoop Commerce Ltd. All rights reserved.
#
# This source code is licensed under the OSL-3.0 license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import unicode_literals
from django.conf import settings
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import activate, get_language
from parler.models import TranslatableModel, TranslatedFields
from reversion.models import Version
from shuup.gdpr.utils import get_active_consent_pages
from shuup.simple_cms.models import Page
GDPR_ANONYMIZE_TASK_TYPE_IDENTIFIER = "gdpr_anonymize"
@python_2_unicode_compatible
class GDPRSettings(TranslatableModel):
shop = models.OneToOneField("shuup.Shop", related_name="gdpr_settings")
enabled = models.BooleanField(
default=False,
verbose_name=_('enabled'),
help_text=_("Define if the GDPR is active.")
)
skip_consent_on_auth = models.BooleanField(
default=False,
verbose_name=_("skip consent on login"),
help_text=_("Do not require consent on login when GDPR is activated.")
)
privacy_policy_page = models.ForeignKey(
"shuup_simple_cms.Page",
null=True,
verbose_name=_("privacy policy page"),
help_text=_("Choose your privacy policy page here. If this page changes, customers will be "
"prompted for new consent."))
consent_pages = models.ManyToManyField(
"shuup_simple_cms.Page",
verbose_name=_("consent pages"),
related_name="consent_settings",
help_text=_("Choose pages here which are being monitored for customer consent. If any of these pages change"
", the customer is being prompted for a new consent."))
translations = TranslatedFields(
cookie_banner_content=models.TextField(
blank=True,
verbose_name=_("cookie banner content"),
help_text=_("The text to be presented to users in a pop-up warning.")
),
cookie_privacy_excerpt=models.TextField(
blank=True,
verbose_name=_("cookie privacy excerpt"),
help_text=_("The summary text to be presented about cookie privacy.")
),
auth_consent_text=models.TextField(
blank=True,
verbose_name=_("login consent text"),
help_text=_("Shown in login page between the form and the button. "
"Optional but should be considered when the consent on login is disabled.")
)
)
class Meta:
verbose_name = _('GDPR settings')
verbose_name_plural = _('GDPR settings')
def __str__(self):
return _("GDPR for {}").format(self.shop)
def set_default_content(self):
language = get_language()
for code, name in settings.LANGUAGES:
activate(code)
self.set_current_language(code)
self.cookie_banner_content = settings.SHUUP_GDPR_DEFAULT_BANNER_STRING
self.cookie_privacy_excerpt = settings.SHUUP_GDPR_DEFAULT_EXCERPT_STRING
self.save()
self.set_current_language(language)
activate(language)
@classmethod
def get_for_shop(cls, shop):
instance, created = cls.objects.get_or_create(shop=shop)
if created or not instance.safe_translation_getter("cookie_banner_content"):
instance.set_default_content()
return instance
@python_2_unicode_compatible
class GDPRCookieCategory(TranslatableModel):
shop = models.ForeignKey("shuup.Shop", related_name="gdpr_cookie_categories")
always_active = models.BooleanField(default=False, verbose_name=_('always active'))
default_active = models.BooleanField(
verbose_name=_('active by default'),
default=False,
help_text=_('whether this cookie category is active by default')
)
cookies = models.TextField(
verbose_name=_("cookies used"),
help_text=_(
"Comma separated list of cookies names, prefix or suffix "
"that will be included in this category, "
"e.g. _ga, mysession, user_c_"
),
)
translations = TranslatedFields(
name=models.CharField(max_length=64, verbose_name=_("name")),
how_is_used=models.TextField(
verbose_name=_("how we use"),
help_text=_("Describe the purpose of this category of cookies and how it is used."),
blank=True
)
)
class Meta:
verbose_name = _('GDPR cookie category')
verbose_name_plural = _('GDPR cookie categories')
def __str__(self):
return _("GDPR cookie category for {}").format(self.shop)
@python_2_unicode_compatible
class GDPRUserConsent(models.Model):
created_on = models.DateTimeField(
auto_now_add=True,
editable=False,
db_index=True,
verbose_name=_("created on")
)
shop = models.ForeignKey(
"shuup.Shop",
related_name="gdpr_consents",
editable=False
)
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
related_name='gdpr_consents',
on_delete=models.PROTECT,
editable=False
)
documents = models.ManyToManyField(
"GDPRUserConsentDocument",
verbose_name=_("consent documents"),
blank=True,
editable=False
)
class Meta:
verbose_name = _('GDPR user consent')
verbose_name_plural = _('GDPR user consents')
@classmethod
def ensure_for_user(cls, user, shop, consent_documents):
documents = []
for page in consent_documents:
Page.create_initial_revision(page)
version = Version.objects.get_for_object(page).first()
consent_document = GDPRUserConsentDocument.objects.create(
page=page,
version=version
)
documents.append(consent_document)
# ensure only one consent exists for this user in this shop
consent = cls.objects.filter(shop=shop, user=user).first()
if consent:
consents = cls.objects.filter(shop=shop, user=user).order_by("-created_on")
if consents.count() > 1:
# There are multiple consents, remove excess
ids = [c.id for c in consents.all() if c.id != consent.id]
cls.objects.filter(pk__in=ids).delete()
else:
consent = cls.objects.create(shop=shop, user=user)
consent.documents = documents
return consent
@classmethod
def get_for_user(cls, user, shop):
return cls.objects.filter(user=user, shop=shop).order_by("-created_on").first()
def should_reconsent(self, shop, user):
consent_pages_ids = set([page.id for page in get_active_consent_pages(shop)])
page_ids = set([doc.page.id for doc in self.documents.all()])
if consent_pages_ids != page_ids:
return True
# all matches, check versions
for consent_document in self.documents.all():
version = Version.objects.get_for_object(consent_document.page).first()
if consent_document.version != version:
return True
return False
def should_reconsent_to_page(self, page):
version = Version.objects.get_for_object(page).first()
return not self.documents.filter(page=page, version=version).exists()
def __str__(self):
return _("GDPR user consent in {} for user {} in shop {}").format(self.created_on, self.user, self.shop)
@python_2_unicode_compatible
class GDPRUserConsentDocument(models.Model):
page = models.ForeignKey("shuup_simple_cms.Page")
version = models.ForeignKey(Version)
def __str__(self):
return _("GDPR user consent document for {} (Version: {})").format(self.page, self.version)
|
agpl-3.0
| -4,302,238,486,277,676,000
| 36.050691
| 116
| 0.631219
| false
| 3.990074
| false
| false
| false
|
jowolf/thelibregroup
|
fabfile.py
|
1
|
16139
|
import os
import re
import sys
from functools import wraps
from getpass import getpass, getuser
from glob import glob
from contextlib import contextmanager
from posixpath import join
from fabric.api import env, cd, prefix, sudo as _sudo, run as _run, hide, task
from fabric.contrib.files import exists, upload_template
from fabric.colors import yellow, green, blue, red
################
# Config setup #
################
conf = {}
if sys.argv[0].split(os.sep)[-1] in ("fab", # POSIX
"fab-script.py"): # Windows
# Ensure we import settings from the current dir
try:
conf = __import__("settings", globals(), locals(), [], 0).FABRIC
try:
conf["HOSTS"][0]
except (KeyError, ValueError):
raise ImportError
except (ImportError, AttributeError):
print "Aborting, no hosts defined."
exit()
env.db_pass = conf.get("DB_PASS", None)
env.admin_pass = conf.get("ADMIN_PASS", None)
env.user = conf.get("SSH_USER", getuser())
env.password = conf.get("SSH_PASS", None)
env.key_filename = conf.get("SSH_KEY_PATH", None)
env.hosts = conf.get("HOSTS", [])
env.proj_name = conf.get("PROJECT_NAME", os.getcwd().split(os.sep)[-1])
env.venv_home = conf.get("VIRTUALENV_HOME", "/home/%s" % env.user)
env.venv_path = "%s/%s" % (env.venv_home, env.proj_name)
env.proj_dirname = "project"
env.proj_path = "%s/%s" % (env.venv_path, env.proj_dirname)
env.manage = "%s/bin/python %s/project/manage.py" % (env.venv_path,
env.venv_path)
env.live_host = conf.get("LIVE_HOSTNAME", env.hosts[0] if env.hosts else None)
env.repo_url = conf.get("REPO_URL", "")
env.git = env.repo_url.startswith("git") or env.repo_url.endswith(".git")
env.reqs_path = conf.get("REQUIREMENTS_PATH", None)
env.gunicorn_port = conf.get("GUNICORN_PORT", 8000)
env.locale = conf.get("LOCALE", "en_US.UTF-8")
env.secret_key = conf.get("SECRET_KEY", "")
env.nevercache_key = conf.get("NEVERCACHE_KEY", "")
##################
# Template setup #
##################
# Each template gets uploaded at deploy time, only if their
# contents has changed, in which case, the reload command is
# also run.
templates = {
"nginx": {
"local_path": "deploy/nginx.conf",
"remote_path": "/etc/nginx/sites-enabled/%(proj_name)s.conf",
"reload_command": "service nginx restart",
},
"supervisor": {
"local_path": "deploy/supervisor.conf",
"remote_path": "/etc/supervisor/conf.d/%(proj_name)s.conf",
"reload_command": "supervisorctl reload",
},
"cron": {
"local_path": "deploy/crontab",
"remote_path": "/etc/cron.d/%(proj_name)s",
"owner": "root",
"mode": "600",
},
"gunicorn": {
"local_path": "deploy/gunicorn.conf.py",
"remote_path": "%(proj_path)s/gunicorn.conf.py",
},
"settings": {
"local_path": "deploy/live_settings.py",
"remote_path": "%(proj_path)s/local_settings.py",
},
}
######################################
# Context for virtualenv and project #
######################################
@contextmanager
def virtualenv():
"""
Runs commands within the project's virtualenv.
"""
with cd(env.venv_path):
with prefix("source %s/bin/activate" % env.venv_path):
yield
@contextmanager
def project():
"""
Runs commands within the project's directory.
"""
with virtualenv():
with cd(env.proj_dirname):
yield
@contextmanager
def update_changed_requirements():
"""
Checks for changes in the requirements file across an update,
and gets new requirements if changes have occurred.
"""
reqs_path = join(env.proj_path, env.reqs_path)
get_reqs = lambda: run("cat %s" % reqs_path, show=False)
old_reqs = get_reqs() if env.reqs_path else ""
yield
if old_reqs:
new_reqs = get_reqs()
if old_reqs == new_reqs:
# Unpinned requirements should always be checked.
for req in new_reqs.split("\n"):
if req.startswith("-e"):
if "@" not in req:
# Editable requirement without pinned commit.
break
elif req.strip() and not req.startswith("#"):
if not set(">=<") & set(req):
# PyPI requirement without version.
break
else:
# All requirements are pinned.
return
pip("-r %s/%s" % (env.proj_path, env.reqs_path))
###########################################
# Utils and wrappers for various commands #
###########################################
def _print(output):
print
print output
print
def print_command(command):
_print(blue("$ ", bold=True) +
yellow(command, bold=True) +
red(" ->", bold=True))
@task
def run(command, show=True):
"""
Runs a shell comand on the remote server.
"""
if show:
print_command(command)
with hide("running"):
return _run(command)
@task
def sudo(command, show=True):
"""
Runs a command as sudo.
"""
if show:
print_command(command)
with hide("running"):
return _sudo(command)
def log_call(func):
@wraps(func)
def logged(*args, **kawrgs):
header = "-" * len(func.__name__)
_print(green("\n".join([header, func.__name__, header]), bold=True))
return func(*args, **kawrgs)
return logged
def get_templates():
"""
Returns each of the templates with env vars injected.
"""
injected = {}
for name, data in templates.items():
injected[name] = dict([(k, v % env) for k, v in data.items()])
return injected
def upload_template_and_reload(name):
"""
Uploads a template only if it has changed, and if so, reload a
related service.
"""
template = get_templates()[name]
local_path = template["local_path"]
if not os.path.exists(local_path):
project_root = os.path.dirname(os.path.abspath(__file__))
local_path = os.path.join(project_root, local_path)
remote_path = template["remote_path"]
reload_command = template.get("reload_command")
owner = template.get("owner")
mode = template.get("mode")
remote_data = ""
if exists(remote_path):
with hide("stdout"):
remote_data = sudo("cat %s" % remote_path, show=False)
with open(local_path, "r") as f:
local_data = f.read()
# Escape all non-string-formatting-placeholder occurrences of '%':
local_data = re.sub(r"%(?!\(\w+\)s)", "%%", local_data)
if "%(db_pass)s" in local_data:
env.db_pass = db_pass()
local_data %= env
clean = lambda s: s.replace("\n", "").replace("\r", "").strip()
if clean(remote_data) == clean(local_data):
return
upload_template(local_path, remote_path, env, use_sudo=True, backup=False)
if owner:
sudo("chown %s %s" % (owner, remote_path))
if mode:
sudo("chmod %s %s" % (mode, remote_path))
if reload_command:
sudo(reload_command)
@task
def nginx_config():
"""
Installs nginx config from template
"""
return upload_template_and_reload ('nginx')
def db_pass():
"""
Prompts for the database password if unknown.
"""
if not env.db_pass:
env.db_pass = getpass("Enter the database password: ")
return env.db_pass
@task
def apt(packages):
"""
Installs one or more system packages via apt.
"""
return sudo("apt-get install -y -q " + packages)
@task
def pip(packages):
"""
Installs one or more Python packages within the virtual environment.
"""
with virtualenv():
return sudo("pip install %s" % packages)
def postgres(command):
"""
Runs the given command as the postgres user.
"""
show = not command.startswith("psql")
return run("sudo -u root sudo -u postgres %s" % command, show=show)
@task
def psql(sql, show=True):
"""
Runs SQL against the project's database.
"""
out = postgres('psql -c "%s"' % sql)
if show:
print_command(sql)
return out
@task
def backup(filename):
"""
Backs up the database.
"""
return postgres("pg_dump -Fc %s > %s" % (env.proj_name, filename))
@task
def restore(filename):
"""
Restores the database.
"""
return postgres("pg_restore -c -d %s %s" % (env.proj_name, filename))
@task
def python(code, show=True):
"""
Runs Python code in the project's virtual environment, with Django loaded.
"""
setup = "import os; os.environ[\'DJANGO_SETTINGS_MODULE\']=\'settings\';"
full_code = 'python -c "%s%s"' % (setup, code.replace("`", "\\\`"))
with project():
result = run(full_code, show=False)
if show:
print_command(code)
return result
def static():
"""
Returns the live STATIC_ROOT directory.
"""
return python("from django.conf import settings;"
"print settings.STATIC_ROOT", show=False).split("\n")[-1]
@task
def manage(command):
"""
Runs a Django management command.
"""
return run("%s %s" % (env.manage, command))
#########################
# Install and configure #
#########################
@task
@log_call
def install():
"""
Installs the base system and Python requirements for the entire server.
"""
locale = "LC_ALL=%s" % env.locale
with hide("stdout"):
if locale not in sudo("cat /etc/default/locale"):
sudo("update-locale %s" % locale)
run("exit")
sudo("apt-get update -y -q")
apt("nginx libjpeg-dev python-dev python-setuptools git-core "
"postgresql libpq-dev memcached supervisor")
sudo("easy_install pip")
sudo("pip install virtualenv mercurial")
@task
@log_call
def create():
"""
Create a new virtual environment for a project.
Pulls the project's repo from version control, adds system-level
configs for the project, and initialises the database with the
live host.
"""
# Create virtualenv
with cd(env.venv_home):
if exists(env.proj_name):
prompt = raw_input("\nVirtualenv exists: %s\nWould you like "
"to replace it? (yes/no) " % env.proj_name)
if prompt.lower() != "yes":
print "\nAborting!"
return False
remove()
run("virtualenv %s --distribute" % env.proj_name)
vcs = "git" if env.git else "hg"
run("%s clone %s %s" % (vcs, env.repo_url, env.proj_path))
# Create DB and DB user.
pw = db_pass()
user_sql_args = (env.proj_name, pw.replace("'", "\'"))
user_sql = "CREATE USER %s WITH ENCRYPTED PASSWORD '%s';" % user_sql_args
psql(user_sql, show=False)
shadowed = "*" * len(pw)
print_command(user_sql.replace("'%s'" % pw, "'%s'" % shadowed))
psql("CREATE DATABASE %s WITH OWNER %s ENCODING = 'UTF8' "
"LC_CTYPE = '%s' LC_COLLATE = '%s' TEMPLATE template0;" %
(env.proj_name, env.proj_name, env.locale, env.locale))
# Set up SSL certificate.
conf_path = "/etc/nginx/conf"
if not exists(conf_path):
sudo("mkdir %s" % conf_path)
with cd(conf_path):
crt_file = env.proj_name + ".crt"
key_file = env.proj_name + ".key"
if not exists(crt_file) and not exists(key_file):
try:
crt_local, = glob(join("deploy", "*.crt"))
key_local, = glob(join("deploy", "*.key"))
except ValueError:
parts = (crt_file, key_file, env.live_host)
sudo("openssl req -new -x509 -nodes -out %s -keyout %s "
"-subj '/CN=%s' -days 3650" % parts)
else:
upload_template(crt_local, crt_file, use_sudo=True)
upload_template(key_local, key_file, use_sudo=True)
# Set up project.
upload_template_and_reload("settings")
with project():
if env.reqs_path:
pip("-r %s/%s" % (env.proj_path, env.reqs_path))
pip("gunicorn setproctitle south psycopg2 "
"django-compressor python-memcached")
manage("createdb --noinput --nodata")
python("from django.conf import settings;"
"from django.contrib.sites.models import Site;"
"site, _ = Site.objects.get_or_create(id=settings.SITE_ID);"
"site.domain = '" + env.live_host + "';"
"site.save();")
if env.admin_pass:
pw = env.admin_pass
user_py = ("from mezzanine.utils.models import get_user_model;"
"User = get_user_model();"
"u, _ = User.objects.get_or_create(username='admin');"
"u.is_staff = u.is_superuser = True;"
"u.set_password('%s');"
"u.save();" % pw)
python(user_py, show=False)
shadowed = "*" * len(pw)
print_command(user_py.replace("'%s'" % pw, "'%s'" % shadowed))
return True
@task
@log_call
def remove():
"""
Blow away the current project.
"""
if exists(env.venv_path):
sudo("rm -rf %s" % env.venv_path)
for template in get_templates().values():
remote_path = template["remote_path"]
if exists(remote_path):
sudo("rm %s" % remote_path)
psql("DROP DATABASE %s;" % env.proj_name)
psql("DROP USER %s;" % env.proj_name)
##############
# Deployment #
##############
@task
@log_call
def restart():
"""
Restart gunicorn worker processes for the project.
"""
pid_path = "%s/gunicorn.pid" % env.proj_path
if exists(pid_path):
sudo("kill -HUP `cat %s`" % pid_path)
else:
start_args = (env.proj_name, env.proj_name)
sudo("supervisorctl start %s:gunicorn_%s" % start_args)
@task
@log_call
def deploy():
"""
Deploy latest version of the project.
Check out the latest version of the project from version
control, install new requirements, sync and migrate the database,
collect any new static assets, and restart gunicorn's work
processes for the project.
"""
if not exists(env.venv_path):
prompt = raw_input("\nVirtualenv doesn't exist: %s\nWould you like "
"to create it? (yes/no) " % env.proj_name)
if prompt.lower() != "yes":
print "\nAborting!"
return False
create()
for name in get_templates():
upload_template_and_reload(name)
with project():
backup("last.db")
static_dir = static()
if exists(static_dir):
run("tar -cf last.tar %s" % static_dir)
git = env.git
last_commit = "git rev-parse HEAD" if git else "hg id -i"
run("%s > last.commit" % last_commit)
with update_changed_requirements():
run("git pull origin master -f" if git else "hg pull && hg up -C")
manage("collectstatic -v 0 --noinput")
manage("syncdb --noinput")
manage("migrate --noinput")
restart()
return True
@task
@log_call
def rollback():
"""
Reverts project state to the last deploy.
When a deploy is performed, the current state of the project is
backed up. This includes the last commit checked out, the database,
and all static files. Calling rollback will revert all of these to
their state prior to the last deploy.
"""
with project():
with update_changed_requirements():
update = "git checkout" if env.git else "hg up -C"
run("%s `cat last.commit`" % update)
with cd(join(static(), "..")):
run("tar -xf %s" % join(env.proj_path, "last.tar"))
restore("last.db")
restart()
@task
@log_call
def all():
"""
Installs everything required on a new system and deploy.
From the base software, up to the deployed project.
"""
install()
if create():
deploy()
|
agpl-3.0
| 8,274,986,944,124,153,000
| 28.831793
| 78
| 0.564719
| false
| 3.659637
| false
| false
| false
|
Cadasta/cadasta-qgis-plugin
|
cadasta/gui/tools/utilities/edit_text_dialog.py
|
1
|
4651
|
# coding=utf-8
"""
Cadasta Widget -**Edit Text Dialog**
This module provides: Login : Login for cadasta and save authnetication
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
import os
import logging
from qgis.PyQt.QtCore import pyqtSignal
from qgis.PyQt.QtGui import (
QDialog
)
from PyQt4.QtCore import QUrl, QRegExp, Qt
from PyQt4.QtGui import (
QDesktopServices,
QColor,
QTextCharFormat,
QFont,
QSyntaxHighlighter
)
from cadasta.utilities.resources import get_ui_class
__copyright__ = "Copyright 2016, Cadasta"
__license__ = "GPL version 3"
__email__ = "info@kartoza.org"
__revision__ = '$Format:%H$'
LOGGER = logging.getLogger('CadastaQGISPlugin')
FORM_CLASS = get_ui_class(os.path.join(
'utilities', 'edit_text_dialog.ui'))
class EditTextDialog(QDialog, FORM_CLASS):
"""Dialog for just contains edit text
"""
edit_text_done = pyqtSignal()
def __init__(self, parent=None, iface=None, text=""):
"""Constructor for the dialog.
.. note:: In QtDesigner the advanced editor's predefined keywords
list should be shown in english always, so when adding entries to
cboKeyword, be sure to choose :safe_qgis:`Properties<<` and untick
the :safe_qgis:`translatable` property.
:param parent: Parent widget of this dialog.
:type parent: QWidget
:param iface: QGIS QGisAppInterface instance.
:type iface: QGisAppInterface
:param text: Default text to be shown
:type text: str
:param ok_method: Method that will be called if finished
:type ok_method: function
"""
QDialog.__init__(self, parent)
self.setupUi(self)
self.setWindowTitle('Cadasta Questionnaire')
self.highlighter = Highlighter(self.edit_text.document())
self.show()
self.edit_text.setPlainText(text)
self.ok_button.clicked.connect(
self.close_edit_text_dialog
)
self.data_schema_help.mousePressEvent = self.show_advanced_help
def show_advanced_help(self, event):
"""Show advanced help
"""
QDesktopServices().openUrl(
QUrl("https://cadasta.github.io/api-docs/#questionnaires"))
def close_edit_text_dialog(self):
"""Function that call when ok button is clicked.
"""
self.edit_text_done.emit()
self.close()
def get_text(self):
"""Getting current text in edit text.
:return: edited text
:rtype: str
"""
return self.edit_text.toPlainText()
class Highlighter(QSyntaxHighlighter):
def __init__(self, parent=None):
super(Highlighter, self).__init__(parent)
self.highlighting_rules = []
value_format = QTextCharFormat()
value_format.setForeground(Qt.darkRed)
self.highlighting_rules.append((
QRegExp("\\btrue\\b|\\bnull\\b|\\bfalse\\b|\\b[0-9]+\\b"),
value_format
))
quotation_format = QTextCharFormat()
quotation_format.setForeground(Qt.darkGreen)
self.highlighting_rules.append((QRegExp("\".*\""),
quotation_format))
self.comment_start_expression = QRegExp("/\\*")
self.comment_end_expression = QRegExp("\\*/")
def highlightBlock(self, text):
for pattern, highlight_format in self.highlighting_rules:
expression = QRegExp(pattern)
index = expression.indexIn(text)
while index >= 0:
length = expression.matchedLength()
self.setFormat(index, length, highlight_format)
index = expression.indexIn(text, index + length)
self.setCurrentBlockState(0)
start_index = 0
if self.previousBlockState() != 1:
start_index = self.comment_start_expression.indexIn(text)
while start_index >= 0:
end_index = self.comment_end_expression.indexIn(text, start_index)
if end_index == -1:
self.setCurrentBlockState(1)
comment_length = len(text) - start_index
else:
comment_length = end_index - start_index + \
self.comment_end_expression.matchedLength()
start_index = self.comment_start_expression.indexIn(
text,
start_index + comment_length)
|
gpl-3.0
| -5,289,462,695,124,991,000
| 31.298611
| 78
| 0.614061
| false
| 3.988851
| false
| false
| false
|
poppogbr/genropy
|
tutorial/projects/warhammer/packages/warh/webpages/carriere.py
|
1
|
2502
|
#!/usr/bin/env python
# encoding: utf-8
"""
Created by Softwell on 2010-05-15.
Copyright (c) 2008 Softwell. All rights reserved.
"""
class GnrCustomWebPage(object):
maintable = 'warh.carriera'
py_requires = 'public:Public,standard_tables:TableHandlerLight,public:IncludedView'
######################## STANDARD TABLE OVERRIDDEN METHODS ################
def windowTitle(self):
return '!!Carriere personaggi'
def barTitle(self):
return '!!Carriere personaggi'
def lstBase(self, struct):
r = struct.view().rows()
r.fieldcell('nome', width='11em')
r.fieldcell('ac', width='5em')
r.fieldcell('ab', width='5em')
r.fieldcell('forza', width='3em')
r.fieldcell('resistenza', width='5em')
r.fieldcell('agilita', width='3em')
r.fieldcell('intelligenza', width='5em')
r.fieldcell('volonta', width='4em')
r.fieldcell('simpatia', width='4em')
r.fieldcell('attacchi', width='4em')
r.fieldcell('ferite', width='3em')
r.fieldcell('bonus_forza', width='3em')
r.fieldcell('bonus_res', width='5em')
r.fieldcell('mov', width='5em')
r.fieldcell('magia', width='3em')
r.fieldcell('follia', width='5em')
r.fieldcell('fato', width='5em')
return struct
def printActionBase(self):
return True
def exportActionBase(self):
return True
def orderBase(self):
return 'nome'
def queryBase(self):
return dict(column='nome', op='contains', val='')
def userCanWrite(self):
return True
def userCanDelete(self):
return True
############################## FORM METHODS ##################################
def formBaseDimension(self):
return dict(height='220px', width='800px')
def formBase(self, parentBC, disabled=False, **kwargs):
pane = parentBC.contentPane(**kwargs)
fb = pane.formbuilder(cols=8, border_spacing='4px', fld_width='2em')
fb.field('nome', width='12em', colspan=8)
fb.field('ac')
fb.field('ab')
fb.field('forza')
fb.field('resistenza')
fb.field('agilita')
fb.field('intelligenza')
fb.field('volonta')
fb.field('simpatia')
fb.field('attacchi')
fb.field('ferite')
fb.field('bonus_forza')
fb.field('bonus_res')
fb.field('mov')
fb.field('magia')
fb.field('follia')
fb.field('fato')
|
lgpl-2.1
| -5,609,397,692,937,491,000
| 29.52439
| 87
| 0.564349
| false
| 3.465374
| false
| false
| false
|
davidh-ssec/polar2grid
|
polar2grid/readers/amsr2_l1b.py
|
1
|
6911
|
#!/usr/bin/env python3
# encoding: utf-8
# Copyright (C) 2016 Space Science and Engineering Center (SSEC),
# University of Wisconsin-Madison.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# This file is part of the polar2grid software package. Polar2grid takes
# satellite observation data, remaps it, and writes it to a file format for
# input into another program.
# Documentation: http://www.ssec.wisc.edu/software/polar2grid/
#
# Written by David Hoese August 2016
# University of Wisconsin-Madison
# Space Science and Engineering Center
# 1225 West Dayton Street
# Madison, WI 53706
# david.hoese@ssec.wisc.edu
"""AMSR2 L1B files contain various parameters from the GCOM-W1 AMSR2
instrument. This reader can be used by specifying the reader name
``amsr2_l1b`` to the ``polar2grid.sh`` script.
Supported files usually have the following naming scheme::
GW1AM2_201607201808_128A_L1DLBTBR_1110110.h5
This reader's default remapping algorithm is ``nearest`` for nearest
neighbor resampling due to the instruments scan pattern and swath shape.
The ``--distance_upper_bound`` flag defaults to 12.
Currently this reader provides only the following datasets:
+---------------------------+-----------------------------------------------------------+
| Product Name | Description |
+===========================+===========================================================+
| btemp_36.5v | Brightness Temperature 36.5GHz Polarization Vertical |
+---------------------------+-----------------------------------------------------------+
| btemp_36.5h | Brightness Temperature 36.5GHz Polarization Horizontal |
+---------------------------+-----------------------------------------------------------+
| btemp_89.0av | Brightness Temperature 89.0GHz A Polarization Vertical |
+---------------------------+-----------------------------------------------------------+
| btemp_89.0ah | Brightness Temperature 89.0GHz A Polarization Horizontal |
+---------------------------+-----------------------------------------------------------+
| btemp_89.0bv | Brightness Temperature 89.0GHz B Polarization Vertical |
+---------------------------+-----------------------------------------------------------+
| btemp_89.0bh | Brightness Temperature 89.0GHz B Polarization Horizontal |
+---------------------------+-----------------------------------------------------------+
Special AMSR2 Naval Research Lab (NRL) PNG Scaling
--------------------------------------------------
A common use case for the AMSR2 L1B reader is to generate PNG images similar
to those generated by the U.S. Naval Research Lab (NRL) with a colormap and
coastlines. This requires using an alternate non-default scaling configuration
provided in the tarball. It can be used by providing the
``--rescale-configs $POLAR2GRID_HOME/rescale_configs/amsr2_png.ini``
flag when generating AMSR2 L1B GeoTIFFs.
Once this rescaling has been done, colormap files can be found in
``$POLAR2GRID_HOME/colormaps`` which can then be applied using the
the `add_colormap.sh` script.
See the :ref:`util_add_coastlines` and :ref:`util_add_colormap` documentation
for more information on generating these NRL-like PNGs.
"""
__docformat__ = "restructuredtext en"
import sys
import logging
from polar2grid.readers import ReaderWrapper, main
LOG = logging.getLogger(__name__)
DEFAULT_CHANNELS = [
# "btemp_10.7v",
# "btemp_10.7h",
"btemp_36.5v",
"btemp_36.5h",
"btemp_89.0av",
"btemp_89.0ah",
"btemp_89.0bv",
"btemp_89.0bh",
]
class Frontend(ReaderWrapper):
FILE_EXTENSIONS = [".h5"]
DEFAULT_READER_NAME = "amsr2_l1b"
DEFAULT_DATASETS = DEFAULT_CHANNELS
PRIMARY_FILE_TYPE = "amsr2_l1b"
def add_frontend_argument_groups(parser):
"""Add command line arguments to an existing parser.
:returns: list of group titles added
"""
from polar2grid.core.script_utils import ExtendAction
# Set defaults for other components that may be used in polar2grid processing
parser.set_defaults(fornav_D=10, fornav_d=1, remap_method="nearest", distance_upper_bound=12)
# Use the append_const action to handle adding products to the list
group_title = "Frontend Initialization"
group = parser.add_argument_group(title=group_title, description="swath extraction initialization options")
group.add_argument("--list-products", dest="list_products", action="store_true",
help="List available frontend products and exit")
# group.add_argument("--no-tc", dest="use_terrain_corrected", action="store_false",
# help="Don't use terrain-corrected navigation")
# group.add_argument("--day-fraction", dest="day_fraction", type=float, default=float(os.environ.get("P2G_DAY_FRACTION", 0.10)),
# help="Fraction of day required to produce reflectance products (default 0.10)")
# group.add_argument("--night-fraction", dest="night_fraction", type=float, default=float(os.environ.get("P2G_NIGHT_FRACTION", 0.10)),
# help="Fraction of night required to product products like fog (default 0.10)")
# group.add_argument("--sza-threshold", dest="sza_threshold", type=float, default=float(os.environ.get("P2G_SZA_THRESHOLD", 100)),
# help="Angle threshold of solar zenith angle used when deciding day or night (default 100)")
# group.add_argument("--dnb-saturation-correction", action="store_true",
# help="Enable dynamic DNB saturation correction (normally used for aurora scenes)")
group_title = "Frontend Swath Extraction"
group = parser.add_argument_group(title=group_title, description="swath extraction options")
group.add_argument("-p", "--products", dest="products", nargs="+", default=None, action=ExtendAction,
help="Specify frontend products to process")
return ["Frontend Initialization", "Frontend Swath Extraction"]
if __name__ == "__main__":
sys.exit(main(description="Extract VIIRS L1B swath data into binary files",
add_argument_groups=add_frontend_argument_groups))
|
gpl-3.0
| 9,032,680,333,966,982,000
| 49.816176
| 138
| 0.620605
| false
| 3.911149
| false
| false
| false
|
senuido/stash-scanner
|
lib/ItemHelper.py
|
1
|
21784
|
import itertools
import re
from enum import IntEnum, Enum
from array import array
from lib.CurrencyManager import cm
from lib.ItemCollection import ItemCollection
from lib.Utility import logger, dround
from lib.ItemClass import ItemClass, dir_to_id
float_expr = '[0-9]+|[0-9]+\s*\.\s*[0-9]+'
_BO_PRICE_REGEX = re.compile('.*~(?:b/o|price)({num})(?:[/\\\\]({num}))?([a-z\-]+)'.format(num=float_expr))
# _BO_PRICE_REGEX = re.compile('.*~(b/o|price)\s+([0-9]+|[0-9]+\.[0-9]+)\s+([a-z\-]+)')
_LOCALIZATION_REGEX = re.compile("<<.*>>")
superior_expr = re.compile('^Superior ')
dir_expr = re.compile(r'.*2DItems[/\\](.*)')
expr_level = re.compile('([0-9]+).*')
phys_expr = re.compile('([0-9]+)% increased Physical Damage$')
es_expr = re.compile('([0-9]+)% increased (?!maximum).*Energy Shield$')
armour_expr = re.compile('([0-9]+)% increased Armour(?! during).*$')
evasion_expr = re.compile('([0-9]+)% increased .*Evasion(?: Rating)?(?!.*during).*$')
life_expr = re.compile('([\-+][0-9]+) to maximum Life$')
strength_expr = re.compile('([\-+][0-9]+) to Strength')
dex_expr = re.compile('([\-+][0-9]+) to .*Dexterity$')
int_expr = re.compile('([\-+][0-9]+) to .*Intelligence$')
attributes_expr = re.compile('([\-+][0-9]+) to all Attributes$')
str_mods = [strength_expr, attributes_expr]
cold_res_expr = re.compile('([\-+][0-9]+)% to(?: Fire and)? Cold(?: and Lightning)? Resistances?$')
fire_res_expr = re.compile('([\-+][0-9]+)% to Fire(?: and (?:Cold|Lightning))? Resistances?$')
lightning_res_expr = re.compile('([\-+][0-9]+)% to(?: (?:Cold|Fire) and)? Lightning Resistances?$')
chaos_res_expr = re.compile('([\-+][0-9]+)% to Chaos Resistance$')
ele_res_expr = re.compile('([\-+][0-9]+)% to all Elemental Resistances$')
def get_price(price):
match = _BO_PRICE_REGEX.match(price.lower().replace(' ', ''))
if match:
num, denom, curr = match.groups()
denom = 1 if denom is None or float(denom) == 0 else float(denom)
amount = float(num) / denom
if amount == 0:
return None
return amount, curr
return None
class Item:
__slots__ = ('_item', 'c_name', 'c_base', 'ilvl', 'links_count', 'corrupted', 'mirrored', 'identified', 'stacksize',
'implicit', 'explicit', 'enchant', 'craft', '_mods', 'sockets_count', 'buyout', 'type',
'crafted', 'enchanted', 'modcount',
'_quality', '_level', '_exp', '_tier',
'price', # price before conversion
'c_price',
'_iclass', 'rarity',
'_armour', '_evasion', '_es', '_life',
'_fres', '_cres', '_lres', '_chres', '_ele_res',
'_aps', '_crit', '_block',
'_dps', '_pdps', '_edps',
'_formatted_properties',
'_strength_bonus', '_dex_bonus', '_int_bonus', '_attributes_bonus')
def __init__(self, item, stash_price):
self._item = item
self.links_count = self._get_item_links_count()
self.sockets_count = len(self.sockets)
self.ilvl = item['ilvl']
self.corrupted = item['corrupted']
self.mirrored = item.get('duplicated', False)
self.identified = item['identified']
self.c_base = self.base.lower()
self.c_name = '{} {}'.format(self._get_name().lower(), self.c_base).strip()
# self.type = _ITEM_TYPE[item['frameType']]
self.type = item['frameType']
self.rarity = self.get_rarity()
self.stacksize = item.get("stackSize", 1)
self.price = self.get_price(stash_price)
self.c_price = cm.convert(*self.price) if self.price is not None else None
self.buyout = self.c_price is not None and self.c_price > 0
self.implicit = self._item.get('implicitMods', [])
self.explicit = self._item.get('explicitMods', [])
self.enchant = self._item.get('enchantMods', [])
self.craft = self._item.get('craftedMods', [])
self._mods = None
self.crafted = len(self.craft) > 0
self.enchanted = len(self.enchant) > 0
self.modcount = len(self.implicit) + len(self.explicit) + len(self.enchant) + len(self.craft)
# Properties and on-demand computed fields
self._iclass = None
self._quality = None
self._level = None
self._exp = None
self._tier = None
self._es = None
self._armour = None
self._evasion = None
self._aps = None
self._crit = None
self._block = None
self._edps = None
self._pdps = None
self._dps = None
self._attributes_bonus = None
self._strength_bonus = None
self._dex_bonus = None
self._int_bonus = None
self._life = None
self._fres = None
self._cres = None
self._lres = None
self._chres = None
self._ele_res = None
self._formatted_properties = None
@property
def mods(self):
if self._mods is None:
self._mods = list(itertools.chain(self.explicit, self.implicit, self.enchant, self.craft))
return self._mods
@property
def modifiable(self):
return not (self.corrupted or self.mirrored)
@property
def iclass(self):
if self._iclass is None:
self._iclass = self.get_item_class()
return self._iclass
@property
def quality(self):
if self._quality is None:
quality = self.get_prop_value('Quality')
self._quality = int(quality[0][0].strip('+%')) if quality else 0
return self._quality
@property
def level(self):
if self._level is None:
level = self.get_prop_value('Level')
self._level = float(level[0][0].split()[0]) if level else 0
return self._level
@property
def tier(self):
if self._tier is None:
tier = self.get_prop_value('Map Tier')
self._tier = int(tier[0][0]) if tier else 0
return self._tier
@property
def exp(self):
if self._exp is None:
exp = self.get_item_prop('Experience')
self._exp = float(exp['progress']) * 100 if exp else 0
return self._exp
@property
def es(self):
if self._es is None:
val = self.get_prop_value('Energy Shield')
self._es = self.get_item_es(self.quality, self.modifiable,
self.mods, float(val[0][0])) if val else 0
return self._es
@property
def armour(self):
if self._armour is None:
armour = self.get_prop_value('Armour')
self._armour = self.get_item_armour(self.quality, self.modifiable,
self.mods, float(armour[0][0])) if armour else 0
return self._armour
@property
def evasion(self):
if self._evasion is None:
val = self.get_prop_value('Evasion Rating')
self._evasion = self.get_item_evasion(self.quality, self.modifiable,
self.mods, float(val[0][0])) if val else 0
return self._evasion
@property
def edps(self):
if self._edps is None:
self._fill_dps()
return self._edps
@property
def pdps(self):
if self._pdps is None:
self._fill_dps()
return self._pdps
@property
def dps(self):
if self._dps is None:
self._fill_dps()
return self._dps
@property
def aps(self):
if self._aps is None:
aps = self.get_prop_value('Attacks per Second')
self._aps = float(aps[0][0]) if aps else 0
return self._aps
@property
def crit(self):
if self._crit is None:
crit = self.get_prop_value('Critical Strike Chance')
self._crit = float(crit[0][0].strip('%')) if crit else 0
return self._crit
@property
def block(self):
if self._block is None:
block = self.get_prop_value('Chance to Block')
self._block = float(block[0][0].strip('%')) if block else 0
return self._block
def _fill_dps(self):
if self.aps:
pavg, eavg, cavg = self.get_prop_value('Physical Damage'), \
self.get_prop_value('Elemental Damage'), self.get_prop_value('Chaos Damage')
if pavg:
pavg = sum((float(i) for i in pavg[0][0].split('-'))) / 2
self._pdps = self.get_item_pdps(self.quality, self.modifiable, self.mods, pavg, self.aps)
else:
self._pdps = 0
self._edps = sum((float(i) for i in eavg[0][0].split('-'))) / 2 * self.aps if eavg else 0
cavg = sum((float(i) for i in cavg[0][0].split('-')))/2 if cavg else 0
self._dps = self._pdps + self._edps + cavg * self.aps
else:
self._dps = 0
self._pdps = 0
self._edps = 0
@property
def fres(self):
if self._fres is None:
self._fres = Item.get_mod_total(fire_res_expr, self.mods)
return self._fres
@property
def cres(self):
if self._cres is None:
self._cres = Item.get_mod_total(cold_res_expr, self.mods)
return self._cres
@property
def lres(self):
if self._lres is None:
self._lres = Item.get_mod_total(lightning_res_expr, self.mods)
return self._lres
@property
def chres(self):
if self._chres is None:
self._chres = Item.get_mod_total(chaos_res_expr, self.mods)
return self._chres
@property
def ele_res(self):
if self._ele_res is None:
self._ele_res = Item.get_mod_total(ele_res_expr, self.mods)
return self._ele_res
@property
def strength_bonus(self):
if self._strength_bonus is None:
self._strength_bonus = Item.get_mod_total(strength_expr, self.mods)
return self._strength_bonus
@property
def dex_bonus(self):
if self._dex_bonus is None:
self._dex_bonus = Item.get_mod_total(dex_expr, self.mods)
return self._dex_bonus
@property
def int_bonus(self):
if self._int_bonus is None:
self._int_bonus = Item.get_mod_total(int_expr, self.mods)
return self._int_bonus
@property
def attributes_bonus(self):
if self._attributes_bonus is None:
self._attributes_bonus = Item.get_mod_total(attributes_expr, self.mods)
return self._attributes_bonus
@property
def life(self):
if self._life is None:
self._life = Item.get_mod_total(life_expr, self.mods)
return self._life
@property
def formatted_properties(self):
if self._formatted_properties is None:
self._formatted_properties = \
[ItemProperty.format_property(prop['name'], prop['values'])
for prop in self.properties
if prop['displayMode'] == PropDisplayMode.Format]
return self._formatted_properties
@property
def prophecy(self):
return self._item.get('prophecyText', '')
@property
def w(self):
return self._item['w']
@property
def h(self):
return self._item['h']
@property
def x(self):
return self._item['x']
@property
def y(self):
return self._item['y']
@property
def league(self):
return self._item['league']
@property
def utility(self):
return self._item.get('utilityMods', [])
@property
def icon(self):
return self._item['icon']
@property
def requirements(self):
return self._item.get('requirements', [])
@property
def properties(self):
return self._item.get('properties', [])
@property
def additional_properties(self):
return self._item.get('additionalProperties', [])
@property
def note(self):
return self._item.get('note', '')
@property
def base(self):
return _LOCALIZATION_REGEX.sub('', self._item['typeLine'])
@property
def name(self):
return '{} {}'.format(self._get_name(), self.base).strip()
@property
def sockets(self):
return self._item['sockets']
@property
def id(self):
return self._item['id']
def _get_name(self):
return _LOCALIZATION_REGEX.sub('', self._item['name'])
def _get_item_links_count(self):
groups = array('I', [0]) * 6
for socket in self.sockets:
groups[socket['group']] += 1
return max(groups)
def get_item_prop(self, name):
for prop in itertools.chain(self.properties, self.additional_properties):
if prop['name'] == name:
return prop
return None
def get_prop_value(self, name):
prop = self.get_item_prop(name)
if prop:
return prop['values']
return None
# def get_property_value(self, name):
# vals = get_prop_value(self._item, name)
# if vals:
# vals = [val[0] for val in vals]
# return vals
def get_rarity(self):
try:
return ItemRarity(self.type)
except ValueError:
return ItemRarity.Normal
@staticmethod
def get_mod_total(expr, mods, skip_vals=False):
total = 0
matched = False
if not expr.groups:
skip_vals = True
for mod in mods:
match = expr.match(mod)
if match:
if skip_vals:
return 1
matched = True
for val in match.groups():
total += float(val)
# return total / expr.groups
if matched:
return total / expr.groups
return 0
# return None maybe do this to allow differentiation between unmatched and a total of 0
def get_item_links_string(self):
links = ''
link_group = None
for socket in self.sockets:
if link_group is not None:
links += '-' if link_group == socket['group'] else ' '
links += socket['attr']
link_group = socket['group']
return links
@staticmethod
def get_item_pdps(quality, modifiable, mods, pavg, aps):
if not modifiable or quality == 20:
return pavg * aps
total = 0
for mod in mods:
match = phys_expr.match(mod)
if match:
total += float(match.group(1))
return pavg * (120 + total) / (quality + 100 + total) * aps
@staticmethod
def get_item_es(quality, modifiable, mods, es):
if not modifiable or quality == 20:
return es
total = 0
for mod in mods:
match = es_expr.match(mod)
if match:
total += float(match.group(1))
return es * (120 + total) / (quality + 100 + total)
@staticmethod
def get_item_armour(quality, modifiable, mods, armour):
if not modifiable or quality == 20:
return armour
total = 0
for mod in mods:
match = armour_expr.match(mod)
if match:
total += float(match.group(1))
return armour * (120 + total) / (quality + 100 + total)
@staticmethod
def get_item_evasion(quality, modifiable, mods, evasion):
if not modifiable or quality == 20:
return evasion
total = 0
for mod in mods:
match = evasion_expr.match(mod)
if match:
total += float(match.group(1))
return evasion * (120 + total) / (quality + 100 + total)
def get_item_class(self):
global superior_expr
base_line = superior_expr.sub('', self.base, 1)
item_class = ItemClass(0)
try:
# this will fail for magic items with affixes since we dont strip those
item_class = ItemClass[ItemCollection.base_type_to_id[base_line]]
except KeyError:
match = dir_expr.match(self.icon)
# seems to be accurate for the remaining cases
if match:
item_dirs = re.split(r'[/\\]', match.group(1))[:-1]
for item_dir in item_dirs:
class_id = dir_to_id.get(item_dir)
if class_id:
item_class = ItemClass[class_id]
break
# not all flasks have a traditional link
elif 'Flask' in base_line:
item_class = ItemClass.Flask
if not item_class:
logger.warn('Failed determining item class. item: {}, base_line: {}, link {}'.format(self.name, base_line, self.icon))
return item_class
def get_item_base(self):
if self.iclass:
bases = ItemCollection.get_base_types_by_class(self.iclass)
typeLine = self._item['typeLine']
for base in bases:
if re.search(r'\b{}\b'.format(base), typeLine):
return base
return None
def get_max_sockets(self):
""" ignores item type, only considers ilvl """
if self.ilvl >= 50:
return 6
if self.ilvl >= 35:
return 5
if self.ilvl >= 25:
return 4
if self.ilvl >= 2:
return 3
return 2
def get_type_max_sockets(self):
iclass = self.iclass
# if self.name in ItemCollection.SIX_LINK_EXCEPTIONS:
# return 6
if (ItemClass.OneHandWeapon | ItemClass.Shield) & iclass == iclass:
return 3
if (ItemClass.BodyArmour | ItemClass.TwoHandWeapon) & iclass == iclass:
return 6
if (ItemClass.Helmet | ItemClass.Boots | ItemClass.Gloves) & iclass == iclass:
return 4
# if iclass & (ItemClass.Ring | ItemClass.Amulet) != 0:
if (ItemClass.Ring | ItemClass.Amulet) & iclass == iclass:
return 1 # Unset Ring, and Black Maw Talisman
return 0
def get_item_price_raw(self):
if get_price(self.note):
return self.note
return None
def get_price(self, stash_price):
price = get_price(self.note)
return price if price is not None else stash_price
def get_price_raw(self, stash_raw_price):
raw_price = self.get_item_price_raw()
if raw_price is not None:
return raw_price
return stash_raw_price
# TODO MOVE?
def get_item_price_whisper(self):
# Returns format of {amount} {currency}
price = self.price
if price is not None:
amount, currency = price
return dround(amount), cm.toWhisper(currency)
return None
# TODO MOVE?
def get_item_price_display(self):
# Returns format of {amount} {currency}
price = self.price
if price is not None:
amount, currency = price
return dround(amount), cm.toFull(currency)
return None
# TODO MOVE?
def get_whisper_msg(self, stash):
template = '@{} Hi, I would like to buy your {}{} listed{} in {} (stash tab \"{}\"; position: left {}, top {})'
price = self.get_item_price_whisper()
price_str = ' for {} {}'.format(*price) if price is not None else ''
# stack_size_str = ''
# stack_size_str = '' if self.stacksize == 1 else str(self.stacksize) + ' '
if self.iclass and ItemClass.Gem & self.iclass == self.iclass:
gem_text = 'level {} {}% '.format(int(self.level), self.quality)
else:
gem_text = ''
return template.format(stash['lastCharacterName'], gem_text, self.name,
price_str, self.league, stash['stash'],
self.x + 1, self.y + 1)
class PropValueType(IntEnum):
WhiteOrPhysical = 0
BlueOrModified = 1
Fire = 4
Cold = 5
Lightning = 6
Chaos = 7
class PropDisplayMode(IntEnum):
Normal = 0
StatReq = 1
Progress = 2
Format = 3
class ItemType(IntEnum):
Normal = 0
Magic = 1
Rare = 2
Unique = 3
Gem = 4
Currency = 5
DivinationCard = 6
QuestItem = 7
Prophecy = 8
Relic = 9
class ItemRarity(IntEnum):
Normal = ItemType.Normal
Magic = ItemType.Magic
Rare = ItemType.Rare
Unique = ItemType.Unique
Relic = ItemType.Relic
class ItemProperty:
class PropertyValue:
def __init__(self, val):
self.val = val[0]
# try:
self.type = PropValueType(val[1])
# except ValueError:
# self.type = PropValueType.WhiteOrPhysical
def __init__(self, prop):
self.values = [ItemProperty.PropertyValue(val) for val in prop.get('values', [])]
# try:
self.display_mode = PropDisplayMode(prop['displayMode'])
# except ValueError:
# self.display_mode = PropDisplayMode.Normal
self.name = prop['name']
self.progress = prop.get('progress')
def format(self):
format_string = re.sub('%[0-9]+', '{}', self.name)
return format_string.format(*[pv.val for pv in self.values])
@staticmethod
def format_property(name, values):
format_string = re.sub('%[0-9]+', '{}', name)
return format_string.format(*[val[0] for val in values])
class ItemSocketType(Enum):
Strength = 'S'
Dexterity = 'D'
Intelligence = 'I'
Generic = 'G'
class ItemSocket:
def __init__(self, socket):
self.type = ItemSocketType(socket['attr'])
self.group = socket['group']
|
gpl-3.0
| -404,214,246,043,495,740
| 29.98862
| 130
| 0.54843
| false
| 3.637335
| false
| false
| false
|
TheCoSMoCompany/biopredyn
|
Prototype/src/libsbml-5.10.0/examples/python/arrays/CreateArrays2.py
|
1
|
2436
|
from libsbml import *
arraysNs = ArraysPkgNamespaces();
doc = SBMLDocument(arraysNs);
doc.setPackageRequired("arrays", True);
model = doc.createModel();
# create parameters
param = model.createParameter();
param.setId("n");
param.setValue(10);
param.setConstant(True);
param = model.createParameter();
param.setId("m");
param.setValue(10);
param.setConstant(True);
param = model.createParameter();
param.setId("x");
param.setValue(5.7);
param.setConstant(True);
paramPlugin = param.getPlugin("arrays");
dim = paramPlugin.createDimension();
dim.setId("i");
dim.setSize("n");
param = model.createParameter();
param.setId("y");
param.setConstant(False);
paramPlugin = param.getPlugin("arrays");
dim = paramPlugin.createDimension();
dim.setId("i");
dim.setSize("n");
param = model.createParameter();
param.setId("z");
param.setConstant(False);
paramPlugin = param.getPlugin("arrays");
dim = paramPlugin.createDimension();
dim.setId("i");
dim.setSize("n");
# create initial assignments
assignment = model.createInitialAssignment();
assignment.setSymbol("y");
ast = ASTNode(AST_REAL);
ast.setValue(3.2);
assignment.setMath(ast);
assignment = model.createInitialAssignment();
assignment.setSymbol("z");
ast = ASTNode(AST_REAL);
ast.setValue(5.7);
assignment.setMath(ast);
assignmentPlugin = assignment.getPlugin("arrays");
dim = assignmentPlugin.createDimension();
dim.setId("i");
dim.setSize("m");
index = assignmentPlugin.createIndex();
newAst = NewASTNode(AST_ARRAYS_FUNCTION_SELECTOR);
ci1 = NewASTNode(AST_NAME);
ci1.setName("z");
newAst.addChild(ci1);
ci2 = NewASTNode(AST_NAME);
ci2.setName("i");
newAst.addChild(ci2);
index.setMath(newAst);
assignment = model.createInitialAssignment();
assignment.setSymbol("z");
ast = ASTNode(AST_REAL);
ast.setValue(3.2);
assignment.setMath(ast);
assignmentPlugin = assignment.getPlugin("arrays");
dim = assignmentPlugin.createDimension();
dim.setId("i");
dim.setSize("m");
index = assignmentPlugin.createIndex();
newAst = NewASTNode(AST_ARRAYS_FUNCTION_SELECTOR);
ci = NewASTNode(AST_NAME);
ci.setName("z");
newAst.addChild(ci);
plus = NewASTNode(AST_PLUS);
ci1 = NewASTNode(AST_NAME);
ci1.setName("i");
plus.addChild(ci1);
ci2 = NewASTNode(AST_NAME);
ci2.setName("m");
plus.addChild(ci2);
newAst.addChild(plus);
index.setMath(newAst);
writeSBMLToFile(doc, "arrays2.xml");
|
bsd-3-clause
| 8,932,212,683,409,533,000
| 21.631068
| 50
| 0.706656
| false
| 2.890736
| false
| true
| false
|
fako/datascope
|
src/online_discourse/management/commands/analyse_wizenoze.py
|
1
|
2367
|
from operator import itemgetter
from pprint import pprint
import logging
import numpy as np
from django.core.management.base import BaseCommand
from core.models import Collective
log = logging.getLogger("datascope")
class Command(BaseCommand):
"""
Base command for Community centered commands
"""
def add_arguments(self, parser):
return
def handle(self, *args, **options):
collective = Collective.objects.get(id=11577)
reading_levels = {}
level_probability_diffs = {}
missing_audience = 0
missing_probabilities = 0
for individual in collective.individual_set.all():
if "audience" not in individual.properties:
missing_audience += 1
continue
if individual["audience_probabilities"] is None:
missing_probabilities += 1
continue
audience = individual["audience"]
if audience["level"] < 4 and "argument_score" in individual.properties:
print(audience["level"], individual["argument_score"], individual["url"])
audience_propabilities = {
individual["audience"]["level"]: individual["probability"]
for individual in individual["audience_probabilities"]
}
level_probabilities = dict(sorted(audience_propabilities.items(), key=itemgetter(0), reverse=True))
if audience["label"] not in reading_levels:
reading_levels[audience["label"]] = 1
else:
reading_levels[audience["label"]] += 1
for level, probability in level_probabilities.items():
if level == 1:
continue
if level not in level_probability_diffs:
level_probability_diffs[level] = [level_probabilities[level-1]]
else:
level_probability_diffs[level].append(level_probabilities[level-1])
for level, diff in level_probability_diffs.items():
level_probability_diffs[level] = np.mean(diff)
print("Missing audience is {} and missing probabilities is {}, while total is {}".format(
missing_audience, missing_probabilities, collective.individual_set.count()))
pprint(reading_levels)
pprint(level_probability_diffs)
|
gpl-3.0
| -7,041,660,738,028,607,000
| 38.466667
| 111
| 0.60921
| false
| 4.743487
| false
| false
| false
|
SoCdesign/EHA
|
Tools/Minimization_Tool/find_problematic_checkers.py
|
1
|
1772
|
# copyright 2016 Siavoosh Payandeh Azad and Behrad Niazmand
import package_file
from area_coverage_calc import calculate_area, calculate_coverage
from file_generator import generate_specific_file
import copy
from for_testing import gen_dummy_dict
def find_problematic_checkers():
if package_file.test_mode:
package_file.area_coverage_results = copy.deepcopy(gen_dummy_dict())
print "----------------------------------"
print "Starting checks for problematic checkers..."
problematic_checker_detected = False
checked_checkers = []
counter = 0
for item_1 in range(1, package_file.number_of_checkers+1):
print "starting calculation for item", item_1
initial_list = [str(item_1)]
generate_specific_file(initial_list)
calculate_area(initial_list)
initial_coverage = calculate_coverage(initial_list)
for item_2 in range(1, package_file.number_of_checkers+1):
if item_1 != item_2:
if (item_1, item_2) not in checked_checkers:
counter += 1
new_list = [str(item_1), str(item_2)]
print "checking round:", counter
checked_checkers.append((item_1, item_2))
generate_specific_file(new_list)
calculate_area(new_list)
new_coverage = calculate_coverage(new_list)
if new_coverage == initial_coverage:
print "\033[91m* NOTE::\033[0m PROBLEMATIC SET OF CHECKERS DETECTED:", \
item_2, " IS DOMINATED BY", item_1
problematic_checker_detected = True
return problematic_checker_detected
|
gpl-3.0
| -6,955,556,709,692,845,000
| 44.435897
| 100
| 0.580135
| false
| 4.209026
| false
| false
| false
|
ktmud/david
|
david/core/accounts/__init__.py
|
1
|
1743
|
# coding: utf-8
from flask.ext.security import UserMixin, RoleMixin
from config import BABEL_DEFAULT_LOCALE, BABEL_DEFAULT_TIMEZONE
from david.core.mapper import add_kind
from david.core.db import db, UidMixin
from david.lib.mixins.props import PropsMixin, PropsItem
from flask.ext.security import SQLAlchemyUserDatastore
# Define models
roles_users = db.Table(
'roles_users',
db.Column('user_id', db.Integer(), db.ForeignKey('user.id')),
db.Column('role_id', db.Integer(), db.ForeignKey('role.id'))
)
class Role(db.Model, RoleMixin):
id = db.Column(db.Integer(), primary_key=True)
name = db.Column(db.String(80), unique=True)
desc = db.Column(db.String(255))
K_USER = 100
class User(db.Model, UserMixin, UidMixin, PropsMixin):
kind = K_USER
kins_name = 'user'
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(255), unique=True)
password = db.Column(db.String(255))
active = db.Column(db.Boolean())
confirmed_at = db.Column(db.DateTime())
roles = db.relationship('Role', secondary=roles_users,
backref=db.backref('users', lazy='dynamic'))
last_login_at = PropsItem('ll')
current_login_at = PropsItem('cl')
last_login_ip = PropsItem('llip')
current_login_ip = PropsItem('clip')
login_count = PropsItem('lc')
name = PropsItem('name')
locale = PropsItem('locale', BABEL_DEFAULT_LOCALE)
timezone = PropsItem('timezone', BABEL_DEFAULT_TIMEZONE)
@property
def display_name(self):
return self.name or (self.uid if self.uid else self.email.split('@')[0])
Account = User
user_datastore = SQLAlchemyUserDatastore(db, User, Role)
add_kind(K_USER, User)
|
mit
| 3,447,524,261,894,055,000
| 30.690909
| 80
| 0.667814
| false
| 3.239777
| false
| false
| false
|
JaneliaSciComp/osgpyplusplus
|
examples/rough_translated1/osgscalarbar.py
|
1
|
5130
|
#!/bin/env python
# Automatically translated python version of
# OpenSceneGraph example program "osgscalarbar"
# !!! This program will need manual tuning before it will work. !!!
import sys
from osgpypp import osg
from osgpypp import osgDB
from osgpypp import osgGA
from osgpypp import osgSim
from osgpypp import osgUtil
from osgpypp import osgViewer
# Translated from file 'osgscalarbar.cpp'
# OpenSceneGraph example, osgscalarbar.
#*
#* Permission is hereby granted, free of charge, to any person obtaining a copy
#* of this software and associated documentation files (the "Software"), to deal
#* in the Software without restriction, including without limitation the rights
#* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#* copies of the Software, and to permit persons to whom the Software is
#* furnished to do so, subject to the following conditions:
#*
#* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#* THE SOFTWARE.
#
#include <osg/Geode>
#include <osg/ShapeDrawable>
#include <osg/Material>
#include <osg/Texture2D>
#include <osg/MatrixTransform>
#include <osg/PositionAttitudeTransform>
#include <osg/BlendFunc>
#include <osg/ClearNode>
#include <osg/Projection>
#include <osgUtil/CullVisitor>
#include <osgGA/TrackballManipulator>
#include <osgViewer/Viewer>
#include <osgDB/ReadFile>
#include <osgSim/ScalarsToColors>
#include <osgSim/ColorRange>
#include <osgSim/ScalarBar>
#include <sstream>
#include <iostream>
#include <math.h>
using namespace osgSim
using osgSim.ScalarBar
#if defined(_MSC_VER)
# not have to have this pathway for just VS6.0 as its unable to handle the full
# ScalarBar.ScalarPrinter.printScalar scoping.
# Create a custom scalar printer
class MyScalarPrinter (ScalarBar.ScalarPrinter) :
def printScalar(scalar):
print "In MyScalarPrinter.printScalar"
if scalar==0.0 : return ScalarPrinter.printScalar(scalar)+" Bottom"
elif scalar==0.5 : return ScalarPrinter.printScalar(scalar)+" Middle"
elif scalar==1.0 : return ScalarPrinter.printScalar(scalar)+" Top"
else return ScalarPrinter.printScalar(scalar)
#else:
# Create a custom scalar printer
class MyScalarPrinter (ScalarBar.ScalarPrinter) :
def printScalar(scalar):
print "In MyScalarPrinter.printScalar"
if scalar==0.0 : return ScalarBar.ScalarPrinter.printScalar(scalar)+" Bottom"
elif scalar==0.5 : return ScalarBar.ScalarPrinter.printScalar(scalar)+" Middle"
elif scalar==1.0 : return ScalarBar.ScalarPrinter.printScalar(scalar)+" Top"
else return ScalarBar.ScalarPrinter.printScalar(scalar)
#endif
def createScalarBar():
#if 1
#ScalarsToColors* stc = ScalarsToColors(0.0,1.0)
#ScalarBar* sb = ScalarBar(2,3,stc,"STC_ScalarBar")
# Create a custom color set
cs = std.vector<osg.Vec4>()
cs.push_back(osg.Vec4(1.0,0.0,0.0,1.0)) # R
cs.push_back(osg.Vec4(0.0,1.0,0.0,1.0)) # G
cs.push_back(osg.Vec4(1.0,1.0,0.0,1.0)) # G
cs.push_back(osg.Vec4(0.0,0.0,1.0,1.0)) # B
cs.push_back(osg.Vec4(0.0,1.0,1.0,1.0)) # R
cr = ColorRange(0.0,1.0,cs)
sb = ScalarBar(20, 11, cr, "ScalarBar", ScalarBar.VERTICAL, 0.1, MyScalarPrinter)()
sb.setScalarPrinter(MyScalarPrinter)()
return sb
#else:
sb = ScalarBar()
tp = ScalarBar.TextProperties()
tp._fontFile = "fonts/times.ttf"
sb.setTextProperties(tp)
return sb
#endif
def createScalarBar_HUD():
geode = osgSim.ScalarBar()
tp = osgSim.ScalarBar.TextProperties()
tp._fontFile = "fonts/times.ttf"
geode.setTextProperties(tp)
stateset = geode.getOrCreateStateSet()
stateset.setMode(GL_LIGHTING, osg.StateAttribute.OFF)
stateset.setMode(GL_DEPTH_TEST,osg.StateAttribute.OFF)
stateset.setRenderBinDetails(11, "RenderBin")
modelview = osg.MatrixTransform()
modelview.setReferenceFrame(osg.Transform.ABSOLUTE_RF)
matrix = osg.Matrixd(osg.Matrixd.scale(1000,1000,1000) * osg.Matrixd.translate(120,10,0)) # I've played with these values a lot and it seems to work, but I have no idea why
modelview.setMatrix(matrix)
modelview.addChild(geode)
projection = osg.Projection()
projection.setMatrix(osg.Matrix.ortho2D(0,1280,0,1024)) # or whatever the OSG window res is
projection.addChild(modelview)
return projection #make sure you delete the return sb line
int main(int , char **)
# construct the viewer.
viewer = osgViewer.Viewer()
group = osg.Group()
group.addChild(createScalarBar())
group.addChild(createScalarBar_HUD())
# add model to viewer.
viewer.setSceneData( group )
return viewer.run()
if __name__ == "__main__":
main(sys.argv)
|
bsd-3-clause
| -8,399,547,675,412,330,000
| 30.472393
| 176
| 0.719688
| false
| 3.335501
| false
| false
| false
|
fusionbox/buggy
|
buggy/views.py
|
1
|
10158
|
import json
from django.contrib import messages
from django.contrib.auth.mixins import LoginRequiredMixin
from django.views.generic import ListView, FormView, View
from django.http import Http404, HttpResponseRedirect, HttpResponse, HttpResponseForbidden
from django.shortcuts import redirect
from django.db.models import Prefetch
from django.db import transaction
from django.utils.functional import cached_property
from django.core.exceptions import ValidationError
from django.contrib.auth import get_user_model
from django.conf import settings
from django.template.defaultfilters import capfirst, pluralize
from .models import Bug, Action, Comment
from .forms import FilterForm, PresetFilterForm
from .mutation import BuggyBugMutator
from .enums import State, Priority
from . import webhook
User = get_user_model()
class BugListView(LoginRequiredMixin, ListView):
ORDER_FIELDS = {
'number': 'id',
'project': 'project__name',
'bug': 'title',
'modified': 'modified_at',
'creator': 'created_by__name',
'assigned_to': 'assigned_to__name',
'state': 'state',
'priority': 'priority',
}
mutator_class = BuggyBugMutator
queryset = Bug.objects.select_related(
'project', 'created_by', 'assigned_to'
).order_by(
'-modified_at'
).defer('fulltext') # We don't use the column, so there's no need to detoast a long string.
context_object_name = 'bugs'
form_class = FilterForm
def get_form_kwargs(self):
return {
'data': self.request.GET,
'label_suffix': '',
}
def get_form(self):
return self.form_class(**self.get_form_kwargs())
def get_bulk_action_form_kwargs(self):
kwargs = {
'queryset': self.object_list,
'bug_actions': self.get_bug_actions(),
}
if self.request.POST:
kwargs['data'] = self.request.POST
return kwargs
def get_bulk_action_form(self):
form_class = self.mutator_class.get_bulk_action_form_class()
return form_class(**self.get_bulk_action_form_kwargs())
def get(self, *args, **kwargs):
self.form = self.get_form()
return super().get(*args, **kwargs)
def post(self, *args, **kwargs):
self.form = self.get_form()
self.object_list = self.get_queryset()
bulk_action_form = self.get_bulk_action_form()
errors = None
if bulk_action_form.is_valid():
try:
with transaction.atomic():
for bug in bulk_action_form.cleaned_data['bugs']:
state_machine = self.mutator_class(self.request.user, bug)
state_machine.process_action(bulk_action_form.cleaned_data)
except ValidationError as e:
errors = e
else:
errors = sum(bulk_action_form.errors.values(), [])
if errors:
for error in errors:
messages.error(self.request, 'Bulk Action Failed: {}'.format(error))
else:
bug_count = len(bulk_action_form.cleaned_data['bugs'])
messages.success(
self.request,
'Success: {} bug{} updated'.format(bug_count, pluralize(bug_count)),
)
return HttpResponseRedirect(self.request.get_full_path())
def get_bug_actions(self):
bug_actions = {}
for bug in self.object_list:
mutator = self.mutator_class(self.request.user, bug)
action_choices = mutator.action_choices(mutator.get_actions())
bug_actions[bug.number] = [x[0] for x in action_choices]
return bug_actions
def get_sort_links(self):
sort_links = {}
querydict = self.request.GET.copy()
if '_pjax' in querydict:
del querydict['_pjax'] # pjax adds this param for cache purposes.
current_sort, desc = self.sort_type()
for order_field in self.ORDER_FIELDS.keys():
if 'desc' in querydict:
del querydict['desc']
if current_sort == order_field and not desc:
querydict['desc'] = True
querydict['sort'] = order_field
sort_links[order_field] = '?{}'.format(querydict.urlencode())
return sort_links
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
if 'bulk_action_form' not in kwargs:
context['bulk_action_form'] = self.get_bulk_action_form()
context['form'] = self.form
context['bulk_actions'] = self.mutator_class.get_bulk_actions()
context['preset_form'] = PresetFilterForm(label_suffix='')
context['sort_links'] = self.get_sort_links()
context['sort_by'], context['sort_desc'] = self.sort_type()
return context
def get_queryset(self):
qs = super().get_queryset()
if self.form.is_valid():
qs = self.form.filter(qs)
order_field, desc = self.sort_type()
return qs.order_by(('-' if desc else '') + self.ORDER_FIELDS[order_field])
else:
return qs.none()
def get_template_names(self):
if self.request.META.get('HTTP_X_PJAX'):
return ['buggy/_bug_list.html']
else:
return super().get_template_names()
def sort_type(self):
order_field = self.request.GET.get('sort')
if order_field not in self.ORDER_FIELDS:
return ('modified', True)
else:
return (order_field, bool(self.request.GET.get('desc')))
class BugMutationMixin(LoginRequiredMixin):
mutator_class = BuggyBugMutator
@cached_property
def state_machine(self):
return self.mutator_class(self.request.user, self.object)
def get_form_class(self):
return self.state_machine.get_form_class()
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['actions'] = self.state_machine.get_actions()
context['buggy_user_names'] = [
user.get_short_name().lower() for user in User.objects.filter(is_active=True)
]
context['buggy_open_bugs'] = [
{
'title': bug.title,
'number': bug.number,
} for bug in Bug.objects.exclude(state=State.CLOSED).defer('fulltext')
]
return context
def form_valid(self, form):
try:
action = self.state_machine.process_action(form.cleaned_data)
except ValidationError as e:
for error in e.error_list:
form.add_error(None, e)
return self.form_invalid(form)
else:
messages.success(self.request, capfirst(action.description))
return HttpResponseRedirect(action.bug.get_absolute_url())
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['label_suffix'] = ''
return kwargs
class BugDetailView(BugMutationMixin, FormView):
template_name = 'buggy/bug_detail.html'
queryset = Bug.objects.select_related(
'created_by', 'assigned_to', 'project'
)
def get(self, request, *args, **kwargs):
self.object = self.get_object()
return super().get(request, *args, **kwargs)
@transaction.atomic
def post(self, request, *args, **kwargs):
self.object = self.get_object()
return super().post(request, *args, **kwargs)
def get_object(self):
try:
if self.request.method == 'POST':
# We'd like to just use select_for_update on the main queryset,
# but the select_related does a left join. Postgres does not
# support locking the outer side of an outer join. The SQL we
# want is `SELECT ... FOR UPDATE OF buggy_bug`, which would
# only lock the one table, but Django can't yet generate that
# SQL: <https://code.djangoproject.com/ticket/28010>.
# BBB: This extra query can be replaced with
# select_for_update(of=('self',)) as soon as it's supported in
# Django.
Bug.objects.all().select_for_update().get_by_number(self.kwargs['bug_number'])
return self.queryset.get_by_number(self.kwargs['bug_number'])
except Bug.DoesNotExist as e:
raise Http404(*e.args)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['bug'] = self.object
return context
def get_initial(self):
return {
'title': self.object.title,
'priority': self.object.priority.value,
}
class BugCreateView(BugMutationMixin, FormView):
template_name = 'buggy/bug_create.html'
object = None
class AddPresetView(LoginRequiredMixin, View):
def post(self, request):
data = request.POST.copy()
data['user'] = request.user.id
form = PresetFilterForm(data)
if form.is_valid():
form.save()
else:
messages.error(request, 'Preset names must be unique.')
return redirect('buggy:bug_list')
class RemovePresetView(LoginRequiredMixin, View):
def post(self, request, pk):
request.user.presetfilter_set.filter(pk=pk).delete()
return redirect('buggy:bug_list')
class MarkdownPreviewView(LoginRequiredMixin, View):
def post(self, request):
return HttpResponse(Comment(comment=request.POST.get('preview', '')).html)
class GitCommitWebhookView(View):
def post(self, request):
if settings.GIT_COMMIT_WEBHOOK_SECRET is None or webhook.validate_signature(
settings.GIT_COMMIT_WEBHOOK_SECRET,
request.body,
request.META['HTTP_X_HUB_SIGNATURE'],
):
data = json.loads(request.body.decode('utf-8'))
for commit in data['commits']:
webhook.process_commit(commit)
return HttpResponse('', status=201)
else:
return HttpResponseForbidden('Signature does not match.')
|
bsd-3-clause
| 3,617,712,142,736,727,600
| 34.642105
| 96
| 0.60189
| false
| 3.947921
| false
| false
| false
|
hughperkins/pub-prototyping
|
py/change_indent.py
|
1
|
5169
|
#!/usr/bin/python
"""
Copyright Hugh Perkins 2016
You can use this under the BSDv2 license
This script re-indent files, without changing git blame. It will create
a new commit for each author present in the original blame, with commit message
'automated re-indentation'
"""
import sys
import os
from os import path
import subprocess
filename=sys.argv[1]
print(subprocess.check_output([
'git', 'checkout', filename
]))
out = subprocess.check_output([
'git', 'blame', '--line-porcelain', filename
])
print('out', out)
author_info_by_email = {}
lines_by_author = {}
def process_line_info(line_info):
print(line_info)
author_email = line_info['author-mail']
if author_email not in author_info_by_email:
author_info = {}
author_info['email'] = author_email
author_info['name'] = line_info['author']
author_info_by_email[author_email] = author_info
line_num = line_info['line_num']
if author_email not in lines_by_author:
lines_by_author[author_email] = []
lines_by_author[author_email].append(line_num)
line_num = 0 # 1-based, otherwise inconsistent with all of: lua, text editors, and git blame output
line_info = {}
in_boundary = False
boundary_line = -1
for line in out.split('\n'):
key = line.split(' ')[0]
if len(key) > 39:
if len(line_info.keys()) > 0:
process_line_info(line_info)
in_boundary = False
line_num = line_num + 1
line_info = {}
line_info['line_num'] = line_num
continue
if in_boundary:
if boundary_line == 2:
line_info['contents'] = line.rstrip()[1:]
boundary_line = boundary_line + 1
else:
if key == 'boundary':
in_boundary = True
boundary_line = 1
else:
if key is not None and key != '' and len(key) < 40:
value = line.strip().replace(key + ' ', '')
if value.strip() != '':
if key in ['author', 'author-mail', 'summary']:
line_info[key] = value
if len(line_info.keys()) > 0:
process_line_info(line_info)
print(lines_by_author)
def reindent(filepath, lines, indentsize=2):
f = open(filepath, 'r')
contents = f.read()
f.close()
f = open(filepath, 'w')
indent = 0
indent = 0
nextindent = 0
line_num = 1
last_line = None
in_code_block = False
block_indent = 0
next_block_indent = 0
for line in contents.split('\n'):
original_line = line
line = line.strip()
prefix = ''
if not in_code_block:
comment_pos = line.find('--')
if comment_pos >= 0:
pc = line[:comment_pos]
comments = line[comment_pos:]
else:
pc = line
comments = ''
if '[[' in pc:
codeblock_pos = pc.find('[[')
pc = pc[:codeblock_pos]
comments = pc[codeblock_pos:]
in_code_block = True
block_indent = 0
next_block_indent = 1
if in_code_block:
if ']]' in line:
codeblock_end = line.find(']]') + 2
prefix = line[:codeblock_end]
pc = line[codeblock_end:]
in_code_block = False
comments = ''
else:
pc = ''
comments = line
if(comments.startswith('if') or comments.startswith('for ') or comments.startswith('while') or comments.startswith('function')
or comments.startswith('local function') or comments.find(' = function(') >= 0):
next_block_indent += 1
elif comments.startswith('elseif') or comments.startswith('else'):
block_indent -= 1
if comments.startswith('end') or comments.endswith('end'):
block_indent -= 1
indent += block_indent
block_indent = next_block_indent
pcs = pc.strip()
if(pcs.startswith('if') or pcs.endswith(' do') or pcs == 'do' or pcs.startswith('function')
or pcs.startswith('local function') or pcs.find(' function(') >= 0 or pcs.find('=function(') >= 0):
nextindent += 1
elif pcs.startswith('elseif') or pcs.startswith('else'):
indent -= 1
if pcs.startswith('end') or pcs.endswith('end'):
indent -= 1
nextindent -= 1
# handle brackets...
excess_brackets = pc.count('(') + pc.count('{') - pc.count(')') - pc.count('}')
nextindent += excess_brackets
if excess_brackets < 0 and (pc[0] == ')' or pc[0] == '}'):
indent = nextindent
if line_num in lines:
f.write(' ' * (indentsize * indent) + prefix + pc + comments + '\n')
else:
f.write(original_line + '\n')
indent = nextindent
last_line = line
line_num = line_num + 1
if last_line != '':
f.write('\n')
f.close()
for author_email in lines_by_author:
author_info = author_info_by_email[author_email]
print(author_info)
print(subprocess.check_output([
'git', 'config', '--local', '--add', 'user.name', author_info['name']
]))
print(subprocess.check_output([
'git', 'config', '--local', '--add', 'user.email', author_email
]))
print(subprocess.check_output([
'git', 'config', '--local', '-l'
]))
reindent(filename, lines_by_author[author_email])
print(subprocess.check_output([
'git', 'add', filename
]))
print(subprocess.check_output([
'git', 'commit', '-m', 'automated re-indentation of ' + filename
]))
|
apache-2.0
| 4,955,187,367,232,464,000
| 28.706897
| 134
| 0.60089
| false
| 3.374021
| false
| false
| false
|
tcp813/mouTools
|
qt/table_editor.py
|
1
|
7125
|
"""
PyQt5.6 python3.4
Table has editor
"""
import random
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
txt = '''The ASCII character set is suitable for encoding English-language documents,but it does not have much in the way of special characters, such as the French ‘ç.’
It is wholly unsuited for encoding documents in languages such as Greek, Russian, and
Chinese. Over the years, a variety of methods have been developed to encode text for
different languages. The Unicode Consortium has devised the most comprehensive and widely
accepted standard for encoding text. The current Unicode standard (version 5.0) has a repertoire
of nearly 100,000 characters supporting languages ranging from Albanian to Xamtanga
(a language spoken by the Xamir people of Ethiopia).
# '''
# txt = '''
# The ASCII character set is suitable for encoding English-language documents, but it does not have much in the way of special characters, such as the French ‘ç.’ It is wholly unsuited for encoding documents in languages such as Greek, Russian, and Chinese. Over the years, a variety of methods have been developed to encode text for different languages. The Unicode Consortium has devised the most comprehensive and widely accepted standard for encoding text. The current Unicode standard (version 5.0) has a repertoire of nearly 100,000 characters supporting languages ranging from Albanian to Xamtanga (a language spoken by the Xamir people of Ethiopia).
# '''
class Note:
HIGHTLIGHT = 0
NOTE = 1
def __init__(self):
self.content = ''
self.date = '2017.7.21 11:11:11'
self.kind = random.sample([Note.HIGHTLIGHT, Note.NOTE], 1)[0]
def genDatas():
notes = []
for i in range(50):
note = Note()
note.content = txt[:random.randint(100, len(txt)-1)]
notes.append(note)
return notes
"""
Delegate
"""
class Delegate(QStyledItemDelegate):
def __init__(self, notes, parent=None):
QStyledItemDelegate.__init__(self, parent)
self.parent = parent
self.notes = notes
def paint(self, painter, option, index):
QStyledItemDelegate.paint(self, painter, option, index)
if index.column() == 1:
txt = self.notes[index.row()].date
txtRect = QRect(option.rect.x(), option.rect.y()+option.rect.height()-50, option.rect.width(), 50)
painter.setPen(QColor('#666666'))
painter.drawText(txtRect, Qt.AlignLeft | Qt.AlignTop | Qt.TextSingleLine, txt)
elif index.column() in [0, 2]:
painter.fillRect(option.rect, QColor('#FFFFFF'))
def updateEditorGeometry(self, editor, option, index):
editor.setGeometry(option.rect.x(), option.rect.y(), option.rect.width(), option.rect.height()-50)
# editor.setGeometry(option.rect)
def createEditor(self, parent, option, index):
# editor = QTextEdit(parent)
editor = QPlainTextEdit(parent)
return editor
def setModelData(self, editor, model, index):
model.setData(index, editor.toPlainText(), Qt.DisplayRole)
"""
Model
"""
class Model(QAbstractTableModel):
def __init__(self, notes, parent=None):
QAbstractTableModel.__init__(self, parent)
self.parent = parent
self.notes = notes
def rowCount(self, index):
return len(self.notes)
def columnCount(self, index):
return 3
def setData(self, index, value, role):
if index.column() == 1:
# if role == Qt.EditRole:
self.notes[index.row()].content = value
self.dataChanged.emit(index, index)
return True
return False
def data(self, index, role):
if role == Qt.DisplayRole:
if index.column() == 0:
return index.row()+1
elif index.column() == 1:
return self.notes[index.row()].content
elif role == Qt.EditRole:
if index.column() == 1:
return self.notes[index.row()].content
elif role == Qt.TextAlignmentRole:
if index.column() == 1:
return Qt.AlignTop | Qt.AlignLeft
# if role == Qt.BackgroundRole:
# if index.column() == 1:
# if self.notes[index.row()].kind is Note.HIGHTLIGHT:
# return QColor('#0000FF')
# elif self.notes[index.row()].kind is Note.NOTE:
# return QColor('#00FF00')
# elif role == Qt.SizeHintRole:
# return QSize(50, 100)
def flags(self, index):
if index.isValid():
if index.column() == 1:
return QAbstractItemModel.flags(self, index) | Qt.ItemIsEditable
return Qt.ItemIsEnabled
return Qt.NoItemFlags
"""
View
"""
class View(QTableView):
def __init__(self, parent=None):
QTableView.__init__(self, parent)
self.verticalHeader().setVisible(False)
self.horizontalHeader().setVisible(False)
self.setVerticalScrollBarPolicy(Qt.ScrollBarAsNeeded)
self.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.setFocusPolicy(Qt.NoFocus)
# self.setWordWrap(False)
# self.setTextElideMode(Qt.ElideMiddle)
self.setShowGrid(False)
# def enterEvent(self, event):
# self.setVerticalScrollBarPolicy(Qt.ScrollBarAsNeeded)
#
# def leaveEvent(self, event):
# self.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
class Widget(QWidget):
def __init__(self, parent=None):
QWidget.__init__(self, parent)
self.resize(800, 600)
self.notes = genDatas()
self.view = View(self)
self.model = Model(self.notes, self)
self.delegate = Delegate(self.notes, self)
self.view.setModel(self.model)
self.view.setItemDelegate(self.delegate)
self.view.setColumnWidth(0, 70)
self.view.setColumnWidth(2, 100)
# self.view.setColumnWidth(1, 200)
# self.view.setColumnHidden(2, True)
self.view.horizontalHeader().setSectionResizeMode(1, QHeaderView.Stretch)
self.view.resizeRowsToContents()
self.view.verticalHeader().setSectionResizeMode(QHeaderView.ResizeToContents)
self.view.setStyleSheet('''
QTableView::item {
margin-bottom: 50px;
border: 1px solid #D9EAFA;
}
QTableView {
selection-color: #FFFFFF;
}
QTableView::item {
border-radius: 10px;
background-color: #D9EAFA;
}
QTableView::item:selected:!active {
background: qlineargradient(x1: 0, y1: 0, x2: 0, y2: 1, stop: 0 #ABAFE5, stop: 1 #8588B2);
color: #FFFFFF;
border-radius: 10px;
}
QTableView::item:hover {
/*background: #FF0000;*/
border: 1px solid #FF0000;
}
''')
self.layout = QHBoxLayout(self)
self.layout.addWidget(self.view)
if __name__ == '__main__':
app = QApplication([])
w = Widget()
w.show()
app.exec_()
|
mit
| 3,111,446,913,765,119,000
| 33.712195
| 657
| 0.626001
| false
| 3.740799
| false
| false
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.