repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
eladkarakuli/anyway
|
process.py
|
1
|
10121
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import glob
import os
import argparse
import json
from flask.ext.sqlalchemy import SQLAlchemy
import field_names
from models import Marker
import models
from utilities import ProgressSpinner, ItmToWGS84, init_flask, CsvReader
import itertools
import localization
import re
from datetime import datetime
directories_not_processes = {}
progress_wheel = ProgressSpinner()
content_encoding = 'cp1255'
accident_type_regex = re.compile("Accidents Type (?P<type>\d)")
ACCIDENTS = 'accidents'
CITIES = 'cities'
STREETS = 'streets'
ROADS = "roads"
URBAN_INTERSECTION = 'urban_intersection'
NON_URBAN_INTERSECTION = 'non_urban_intersection'
DICTIONARY = "dictionary"
lms_files = {ACCIDENTS: "AccData.csv",
URBAN_INTERSECTION: "IntersectUrban.csv",
NON_URBAN_INTERSECTION: "IntersectNonUrban.csv",
STREETS: "DicStreets.csv",
DICTIONARY: "Dictionary.csv",
}
coordinates_converter = ItmToWGS84()
app = init_flask(__name__)
db = SQLAlchemy(app)
def get_street(settlement_sign, street_sign, streets):
"""
extracts the street name using the settlement id and street id
"""
if settlement_sign not in streets:
return None
street_name = [x[field_names.street_name].decode(content_encoding) for x in streets[settlement_sign] if
x[field_names.street_sign] == street_sign]
# there should be only one street name, or none if it wasn't found.
return street_name[0] if len(street_name) == 1 else None
def get_address(accident, streets):
"""
extracts the address of the main street.
tries to build the full address: <street_name> <street_number>, <settlement>,
but might return a partial one if unsuccessful.
"""
street = get_street(accident[field_names.settlement_sign], accident[field_names.street1], streets)
if not street:
return u""
# the home field is invalid if it's empty or if it contains 9999
home = accident[field_names.home] if accident[field_names.home] != 9999 else None
settlement = localization.get_city_name(accident[field_names.settlement_sign])
if not home and not settlement:
return street
if not home and settlement:
return u"{}, {}".format(street, settlement)
if home and not settlement:
return u"{} {}".format(street, home)
return u"{} {}, {}".format(street, home, settlement)
def get_streets(accident, streets):
"""
extracts the streets the accident occurred in.
every accident has a main street and a secondary street.
:return: a tuple containing both streets.
"""
main_street = get_address(accident, streets)
secondary_street = get_street(accident[field_names.settlement_sign], accident[field_names.street2], streets)
return main_street, secondary_street
def get_junction(accident, roads):
"""
extracts the junction from an accident
:return: returns the junction or None if it wasn't found
"""
key = accident[field_names.road1], accident[field_names.road2]
junction = roads.get(key, None)
return junction.decode(content_encoding) if junction else None
def parse_date(accident):
"""
parses an accident's date
"""
year = accident[field_names.accident_year]
month = accident[field_names.accident_month]
day = accident[field_names.accident_day]
hour = accident[field_names.accident_hour] % 24
accident_date = datetime(year, month, day, hour, 0, 0)
return accident_date
def load_extra_data(accident, streets, roads):
"""
loads more data about the accident
:return: a dictionary containing all the extra fields and their values
:rtype: dict
"""
extra_fields = {}
# if the accident occurred in an urban setting
if bool(accident[field_names.urban_intersection]):
main_street, secondary_street = get_streets(accident, streets)
if main_street:
extra_fields[field_names.street1] = main_street
if secondary_street:
extra_fields[field_names.street2] = secondary_street
# if the accident occurred in a non urban setting (highway, etc')
if bool(accident[field_names.non_urban_intersection]):
junction = get_junction(accident, roads)
if junction:
extra_fields[field_names.junction_name] = junction
# localize static accident values
for field in localization.get_supported_tables():
if accident[field]:
# if we have a localized field for that particular field, save the field value
# it will be fetched we deserialized
if localization.get_field(field, accident[field]):
extra_fields[field] = accident[field]
return extra_fields
def import_accidents(provider_code, accidents, streets, roads):
print("reading accidents from file %s" % (accidents.name(),))
for accident in accidents:
if field_names.x_coordinate not in accident or field_names.y_coordinate not in accident:
raise ValueError("x and y coordinates are missing from the accidents file!")
if not accident[field_names.x_coordinate] or not accident[field_names.y_coordinate]:
continue
lng, lat = coordinates_converter.convert(accident[field_names.x_coordinate], accident[field_names.y_coordinate])
marker = {
"id":int("{0}{1}".format(provider_code, accident[field_names.id])),
"title":"Accident",
"description":json.dumps(load_extra_data(accident, streets, roads), encoding=models.db_encoding),
"address":get_address(accident, streets),
"latitude":lat,
"longitude":lng,
"type":Marker.MARKER_TYPE_ACCIDENT,
"subtype":int(accident[field_names.accident_type]),
"severity":int(accident[field_names.accident_severity]),
"created":parse_date(accident),
"locationAccuracy":int(accident[field_names.igun])
}
yield marker
accidents.close()
def get_files(directory):
for name, filename in lms_files.iteritems():
if name not in [STREETS, NON_URBAN_INTERSECTION, ACCIDENTS]:
continue
files = filter(lambda path: filename.lower() in path.lower(), os.listdir(directory))
amount = len(files)
if amount == 0:
raise ValueError(
"file doesn't exist directory, cannot parse it; directory: {0};filename: {1}".format(directory,
filename))
if amount > 1:
raise ValueError("there are too many files in the directory, cannot parse!;directory: {0};filename: {1}"
.format(directory, filename))
csv = CsvReader(os.path.join(directory, files[0]))
if name == STREETS:
streets_map = {}
for settlement in itertools.groupby(csv, lambda street: street.get(field_names.settlement, "OTHER")):
key, val = tuple(settlement)
streets_map[key] = [{field_names.street_sign: x[field_names.street_sign],
field_names.street_name: x[field_names.street_name]} for x in val if
field_names.street_name in x and field_names.street_sign in x]
csv.close()
yield name, streets_map
elif name == NON_URBAN_INTERSECTION:
roads = {(x[field_names.road1], x[field_names.road2]): x[field_names.junction_name] for x in csv if
field_names.road1 in x and field_names.road2 in x}
csv.close()
yield ROADS, roads
elif name == ACCIDENTS:
yield name, csv
def import_to_datastore(directory, provider_code, batch_size):
"""
goes through all the files in a given directory, parses and commits them
"""
try:
files_from_lms = dict(get_files(directory))
if len(files_from_lms) == 0:
return
print("importing data from directory: {}".format(directory))
now = datetime.now()
accidents = list(import_accidents(provider_code=provider_code, **files_from_lms))
db.session.execute(Marker.__table__.insert(), accidents)
db.session.commit()
took = int((datetime.now() - now).total_seconds())
print("imported {0} items from directory: {1} in {2} seconds".format(len(accidents), directory, took))
except Exception as e:
directories_not_processes[directory] = e.message
def get_provider_code(directory_name=None):
if directory_name:
match = accident_type_regex.match(directory_name)
if match:
return int(match.groupdict()['type'])
ans = ""
while not ans.isdigit():
ans = raw_input("directory provider code is invalid, please enter a valid code: ")
if ans.isdigit():
return int(ans)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--path', type=str, default="static/data/lms")
parser.add_argument('--batch_size', type=int, default=100)
parser.add_argument('--delete_all', dest='delete_all', action='store_true', default=True)
parser.add_argument('--provider_code', type=int)
args = parser.parse_args()
# wipe all the Markers first
if args.delete_all:
print("deleting the entire db!")
db.session.query(Marker).delete()
db.session.commit()
for directory in glob.glob("{0}/*/*".format(args.path)):
parent_directory = os.path.basename(os.path.dirname(os.path.join(os.pardir, directory)))
provider_code = args.provider_code if args.provider_code else get_provider_code(parent_directory)
import_to_datastore(directory, provider_code, args.batch_size)
failed = ["{0}: {1}".format(directory, fail_reason) for directory, fail_reason in
directories_not_processes.iteritems()]
print("finished processing all directories, except: %s" % "\n".join(failed))
if __name__ == "__main__":
main()
|
bsd-3-clause
| -1,322,978,184,679,433,700
| 37.196226
| 120
| 0.644106
| false
| 3.775084
| false
| false
| false
|
kylbarnes/blox
|
deploy/demo-cli/blox-create-environment.py
|
1
|
2812
|
#!/usr/bin/env python
import json, os, sys
import common
def main(argv):
# Command Line Arguments
args = [{'arg':'--apigateway', 'dest':'apigateway', 'default':None, 'type':'boolean', 'help':'Call API Gateway endpoint'}]
if '--apigateway' in argv:
args.extend([{'arg':'--stack', 'dest':'stack', 'default':None, 'help':'CloudFormation stack name'}])
else:
args.extend([{'arg':'--host', 'dest':'host', 'default':'localhost:2000', 'help':'Blox Scheduler <Host>:<Port>'}])
args.extend([{'arg':'--environment', 'dest':'environment', 'default':None, 'help':'Blox environment name'}])
args.extend([{'arg':'--cluster', 'dest':'cluster', 'default':None, 'help':'ECS cluster name'}])
args.extend([{'arg':'--task-definition', 'dest':'taskDef', 'default':None, 'help':'ECS task definition arn'}])
# Parse Command Line Arguments
params = common.parse_cli_args('Create Blox Environment', args)
if params.apigateway:
run_apigateway(params)
else:
run_local(params)
# Call Blox Scheduler API Gateway Endpoint
def run_apigateway(params):
command = ["cloudformation", "describe-stack-resource", "--stack-name", params.stack, "--logical-resource-id", "RestApi"]
restApi = common.run_shell_command(params.region, command)
command = ["cloudformation", "describe-stack-resource", "--stack-name", params.stack, "--logical-resource-id", "ApiResource"]
restResource = common.run_shell_command(params.region, command)
body = {'name': params.environment, 'instanceGroup': {'cluster': params.cluster}, 'taskDefinition': params.taskDef}
command = ["apigateway", "test-invoke-method", "--rest-api-id", restApi['StackResourceDetail']['PhysicalResourceId'], "--resource-id", restResource['StackResourceDetail']['PhysicalResourceId'], "--http-method", "POST", "--headers", "{}", "--path-with-query-string", "/v1/environments", "--body", json.dumps(body)]
response = common.run_shell_command(params.region, command)
print "HTTP Response Code: %d" % response['status']
try:
obj = json.loads(response['body'])
print json.dumps(obj, indent=2)
except Exception as e:
print "Error: Could not parse response - %s" % e
print json.dumps(response, indent=2)
sys.exit(1)
# Call Blox Scheduler Local Endpoint
def run_local(params):
api = common.Object()
api.method = 'POST'
api.headers = {}
api.host = params.host
api.uri = '/v1/environments'
api.queryParams = {}
api.data = {'name': params.environment, 'instanceGroup': {'cluster': params.cluster}, 'taskDefinition': params.taskDef}
response = common.call_api(api)
print "HTTP Response Code: %d" % response.status
try:
obj = json.loads(response.body)
print json.dumps(obj, indent=2)
except Exception as e:
print "Error: Could not parse response - %s" % e
print response.body
sys.exit(1)
if __name__ == "__main__":
main(sys.argv[1:])
|
apache-2.0
| 5,035,756,915,522,746,000
| 39.753623
| 314
| 0.687411
| false
| 3.180995
| false
| false
| false
|
lhfei/spark-in-action
|
spark-2.x/src/main/python/mllib/correlations.py
|
1
|
2149
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Correlations using MLlib.
"""
from __future__ import print_function
import sys
from pyspark import SparkContext
from pyspark.mllib.regression import LabeledPoint
from pyspark.mllib.stat import Statistics
from pyspark.mllib.util import MLUtils
if __name__ == "__main__":
if len(sys.argv) not in [1, 2]:
print("Usage: correlations (<file>)", file=sys.stderr)
sys.exit(-1)
sc = SparkContext(appName="PythonCorrelations")
if len(sys.argv) == 2:
filepath = sys.argv[1]
else:
filepath = 'data/mllib/sample_linear_regression_data.txt'
corrType = 'pearson'
points = MLUtils.loadLibSVMFile(sc, filepath)\
.map(lambda lp: LabeledPoint(lp.label, lp.features.toArray()))
print()
print('Summary of data file: ' + filepath)
print('%d data points' % points.count())
# Statistics (correlations)
print()
print('Correlation (%s) between label and each feature' % corrType)
print('Feature\tCorrelation')
numFeatures = points.take(1)[0].features.size
labelRDD = points.map(lambda lp: lp.label)
for i in range(numFeatures):
featureRDD = points.map(lambda lp: lp.features[i])
corr = Statistics.corr(labelRDD, featureRDD, corrType)
print('%d\t%g' % (i, corr))
print()
sc.stop()
|
apache-2.0
| 2,444,104,559,011,618,300
| 33.229508
| 74
| 0.679851
| false
| 3.790123
| false
| false
| false
|
130s/bloom
|
bloom/commands/update.py
|
1
|
4643
|
# Software License Agreement (BSD License)
#
# Copyright (c) 2013, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import print_function
import argparse
import atexit
import bloom
import json
import os
import sys
import xmlrpclib
from bloom.logging import warning
from bloom.util import add_global_arguments
from bloom.util import handle_global_arguments
from pkg_resources import parse_version
from threading import Lock
_updater_running = False
_updater_lock = Lock()
UPDATE_MSG = """\
This version of bloom is '{current}', but the newest available version is '{newest}'. Please update.\
"""
def start_updater():
global _updater_running, _updater_lock
with _updater_lock:
if _updater_running:
return
_updater_running = True
import subprocess
subprocess.Popen('bloom-update --quiet', shell=True)
@atexit.register
def check_for_updates():
if sys.argv[0].endswith('bloom-update'):
return
user_bloom = os.path.join(os.path.expanduser('~'), '.bloom')
if os.path.exists(user_bloom):
with open(user_bloom, 'r') as f:
raw = f.read()
if not raw:
return
version_dict = json.loads(raw)
os.remove(user_bloom) # Remove only on successful parse
if type(version_dict) == dict and len(version_dict) == 2 and version_dict['current'] == bloom.__version__:
warning(UPDATE_MSG.format(**version_dict))
def get_argument_parser():
parser = argparse.ArgumentParser(description="Checks for updates")
add_global_arguments(parser)
return parser
_quiet = False
def info(msg):
global _quiet
if not _quiet:
print(msg)
def fetch_update(user_bloom):
if os.path.exists(user_bloom):
return
open(user_bloom, 'w').close() # Touch the file
pypi = xmlrpclib.ServerProxy('http://pypi.python.org/pypi')
newest_version = pypi.package_releases('bloom')
newest_version = newest_version[0] if newest_version else None
current_version = bloom.__version__
if newest_version and bloom.__version__ != 'unset':
if parse_version(bloom.__version__) < parse_version(newest_version):
version_dict = {
'current': str(current_version),
'newest': str(newest_version)
}
with open(user_bloom, 'w') as f:
f.write(json.dumps(version_dict))
info(UPDATE_MSG.format(**version_dict))
if _quiet:
return
else:
info("Bloom is up-to-date!")
else:
info("Cannot determine newest version of bloom.")
os.remove(user_bloom)
def main(sysargs=None):
global _quiet
parser = get_argument_parser()
args = parser.parse_args(sysargs)
handle_global_arguments(args)
_quiet = args.quiet
user_bloom = os.path.join(os.path.expanduser('~'), '.bloom')
try:
fetch_update(user_bloom)
except Exception as e:
if not _quiet:
print('Error fetching latest version: ' + str(e), file=sys.stderr)
if os.path.exists(user_bloom):
os.remove(user_bloom)
|
bsd-3-clause
| -7,645,146,209,222,274,000
| 32.402878
| 114
| 0.677795
| false
| 3.934746
| false
| false
| false
|
jakesyl/BitTornado
|
BitTornado/Network/selectpoll.py
|
2
|
1284
|
import select
import time
import bisect
POLLIN = 1
POLLOUT = 2
POLLERR = 8
POLLHUP = 16
class poll(object):
def __init__(self):
self.rlist = []
self.wlist = []
def register(self, f, t):
if not isinstance(f, int):
f = f.fileno()
if t & POLLIN:
insert(self.rlist, f)
else:
remove(self.rlist, f)
if t & POLLOUT:
insert(self.wlist, f)
else:
remove(self.wlist, f)
def unregister(self, f):
if not isinstance(f, int):
f = f.fileno()
remove(self.rlist, f)
remove(self.wlist, f)
def poll(self, timeout=None):
if self.rlist or self.wlist:
try:
r, w, _ = select.select(self.rlist, self.wlist, [], timeout)
except ValueError:
return None
else:
if timeout:
time.sleep(timeout / 1000)
return []
return [(s, POLLIN) for s in r] + [(s, POLLOUT) for s in w]
def remove(list, item):
i = bisect.bisect(list, item)
if i > 0 and list[i - 1] == item:
del list[i - 1]
def insert(list, item):
i = bisect.bisect(list, item)
if i == 0 or list[i - 1] != item:
list.insert(i, item)
|
mit
| -7,630,042,393,807,928,000
| 21.928571
| 76
| 0.499221
| false
| 3.387863
| false
| false
| false
|
OpenMined/PySyft
|
packages/syft/examples/duet/word_language_model/original/generate.py
|
1
|
3193
|
###############################################################################
# Language Modeling on Wikitext-2
#
# This file generates new sentences sampled from the language model
#
###############################################################################
# stdlib
import argparse
# third party
import torch
import data # isort:skip
parser = argparse.ArgumentParser(description="PyTorch Wikitext-2 Language Model")
# Model parameters.
parser.add_argument(
"--data",
type=str,
default="./data/wikitext-2",
help='location of the data corpus; default: "./data/wikitext-2"',
)
parser.add_argument(
"--checkpoint",
type=str,
default="./model.pt",
help='model checkpoint to use; default: "./model.pt"',
)
parser.add_argument(
"--outf",
type=str,
default="generated.txt",
help='output file for generated text; default: "generated.txt"',
)
parser.add_argument(
"--words",
type=int,
default="1000",
help="number of words to generate; default: 1000",
)
parser.add_argument("--seed", type=int, default=1111, help="random seed; default: 1111")
parser.add_argument("--cuda", action="store_true", help="use CUDA")
parser.add_argument(
"--temperature",
type=float,
default=1.0,
help="temperature - higher will increase diversity; default: 1.0",
)
parser.add_argument(
"--log-interval", type=int, default=100, help="reporting interval; default: 100"
)
args = parser.parse_args()
# Set the random seed manually for reproducibility.
torch.manual_seed(args.seed)
if torch.cuda.is_available():
if not args.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
device = torch.device("cuda" if args.cuda else "cpu")
if args.temperature < 1e-3:
parser.error("--temperature has to be greater or equal 1e-3")
with open(args.checkpoint, "rb") as f:
model = torch.load(f).to(device)
model.eval()
corpus = data.Corpus(args.data)
ntokens = len(corpus.dictionary)
is_transformer_model = (
hasattr(model, "model_type") and model.model_type == "Transformer"
)
if not is_transformer_model:
hidden = model.init_hidden(1)
input = torch.randint(ntokens, (1, 1), dtype=torch.long).to(device)
with open(args.outf, "w") as outf:
with torch.no_grad(): # no tracking history
for i in range(args.words):
if is_transformer_model:
output = model(input, False)
word_weights = output[-1].squeeze().div(args.temperature).exp().cpu()
word_idx = torch.multinomial(word_weights, 1)[0]
word_tensor = torch.Tensor([[word_idx]]).long().to(device)
input = torch.cat([input, word_tensor], 0)
else:
output, hidden = model(input, hidden)
word_weights = output.squeeze().div(args.temperature).exp().cpu()
word_idx = torch.multinomial(word_weights, 1)[0]
input.fill_(word_idx)
word = corpus.dictionary.idx2word[word_idx]
outf.write(word + ("\n" if i % 20 == 19 else " "))
if i % args.log_interval == 0:
print(f"| Generated {i}/{args.words} words")
|
apache-2.0
| 7,456,513,447,870,492,000
| 30.613861
| 88
| 0.603821
| false
| 3.640821
| false
| false
| false
|
robsonfs/HackerRankChallenge_30DaysOfCode
|
day_12/tests.py
|
1
|
1507
|
from unittest import TestCase, mock
from day12 import Person, Student
class TestDay12(TestCase):
def test_student_a_person_subclass(self):
self.assertTrue(issubclass(Student, Person))
def test_student_has_att_scores(self):
student = Student("Sophia", "Fernandes", 201302, [90, 100, 100, 80])
self.assertTrue(hasattr(student, 'scores'))
def test_student_calculate_testcase0(self):
student = Student("Sophia", "Fernandes", 201302, [90, 100, 100, 80])
grade = student.calculate()
self.assertEqual('O', grade)
def test_student_calculate_testcase1(self):
student = Student("Sophia", "Fernandes", 201302, [90, 80, 99, 80])
grade = student.calculate()
self.assertEqual('E', grade)
def test_student_calculate_testcase2(self):
student = Student("Sophia", "Fernandes", 201302, [76])
grade = student.calculate()
self.assertEqual('A', grade)
def test_student_calculate_testcase3(self):
student = Student("Sophia", "Fernandes", 201302, [66])
grade = student.calculate()
self.assertEqual('P', grade)
def test_student_calculate_testcase4(self):
student = Student("Sophia", "Fernandes", 201302, [54])
grade = student.calculate()
self.assertEqual('D', grade)
def test_student_calculate_testcase5(self):
student = Student("Sophia", "Fernandes", 201302, [39])
grade = student.calculate()
self.assertEqual('T', grade)
|
gpl-3.0
| -552,155,690,027,600,500
| 35.756098
| 76
| 0.639018
| false
| 3.640097
| true
| false
| false
|
dts-ait/qgis-edge-bundling
|
qgis3_plugin/processing_edgebundling/edgebundling.py
|
1
|
8883
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
edgebundlingProviderPlugin.py
---------------------
Date : January 2018
Copyright : (C) 2018 by Anita Graser
Email : anitagraser@gmx.at
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Anita Graser'
__date__ = 'January 2018'
__copyright__ = '(C) 2018, Anita Graser'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from qgis.PyQt.QtCore import QCoreApplication, QVariant
from qgis.PyQt.QtGui import QIcon
from qgis.core import (QgsField,
QgsFeature,
QgsFeatureSink,
QgsFeatureRequest,
QgsProcessing,
QgsProcessingAlgorithm,
QgsProcessingParameterFeatureSource,
QgsProcessingParameterField,
QgsProcessingParameterNumber,
QgsProcessingParameterBoolean,
QgsProcessingParameterFeatureSink
)
from processing_edgebundling.edgebundlingUtils import EdgeCluster
pluginPath = os.path.dirname(__file__)
class Edgebundling(QgsProcessingAlgorithm):
INPUT = 'INPUT'
CLUSTER_FIELD = 'CLUSTER_FIELD'
USE_CLUSTERING = 'USE_CLUSTERING'
INITIAL_STEP_SIZE = 'INITIAL_STEP_SIZE'
COMPATIBILITY = 'COMPATIBILITY'
CYCLES = 'CYCLES'
ITERATIONS = 'ITERATIONS'
OUTPUT = 'OUTPUT'
def __init__(self):
super().__init__()
def createInstance(self):
return type(self)()
def icon(self):
return QIcon(os.path.join(pluginPath, "icons", "icon.png"))
def tr(self, text):
return QCoreApplication.translate("edgebundling", text)
def name(self):
return "edgebundling"
def displayName(self):
return self.tr("Force-directed edge bundling")
def group(self):
return self.tr("Edge Bundling")
def groupId(self):
return "edgebundling"
def tags(self):
return self.tr("edgebundling,flows").split(",")
def shortHelpString(self):
return self.tr("""
Implementation of force-directed edge bundling for the QGIS Processing toolbox as described in
https://anitagraser.com/2017/10/08/movement-data-in-gis-8-edge-bundling-for-flow-maps/
Usage:
Pre-process your data first!
- Use only Linestrings (no Multilinestrings)
- Your data should only contain lines with exactly 2 nodes: an origin node and a destination node.
- Your data should also only contain lines with a length greater than 0 ("lines" with equal origin and destination node coordinates will cause an error).
Once your data is sufficiently pre-processed and fulfils all above mentioned requirements, you can either first use one of the clustering algorithms and then bundle the lines, or you can directly bundle the lines (which, on the downside, will take significantly longer). Please double check the input parameters to fit your data (e.g. the "initial step size" in the "edge bundling algorithm" dependent on the coordinate reference system of your data).
""")
def helpUrl(self):
return "https://github.com/dts-ait/qgis-edge-bundling"
def __init__(self):
super().__init__()
def initAlgorithm(self, config=None):
self.addParameter(QgsProcessingParameterFeatureSource(
self.INPUT,
self.tr("Input layer"),
[QgsProcessing.TypeVectorLine]))
self.addParameter(QgsProcessingParameterField(
self.CLUSTER_FIELD,
self.tr("Cluster field"),
None,
self.INPUT))
self.addParameter(QgsProcessingParameterBoolean(
self.USE_CLUSTERING,
self.tr("Use cluster field"),
defaultValue=False))
self.addParameter(QgsProcessingParameterNumber(
self.INITIAL_STEP_SIZE,
self.tr("Initial step size"),
QgsProcessingParameterNumber.Double,
100))
self.addParameter(QgsProcessingParameterNumber(
self.COMPATIBILITY,
self.tr("Compatibility"),
QgsProcessingParameterNumber.Double,
0.6))
self.addParameter(QgsProcessingParameterNumber(
self.CYCLES,
self.tr("Cycles"),
QgsProcessingParameterNumber.Integer,
6))
self.addParameter(QgsProcessingParameterNumber(
self.ITERATIONS,
self.tr("Iterations"),
QgsProcessingParameterNumber.Integer,
90))
self.addParameter(QgsProcessingParameterFeatureSink(
self.OUTPUT,
self.tr("Bundled edges"),
QgsProcessing.TypeVectorLine))
def processAlgorithm(self, parameters, context, feedback):
cluster_field = self.parameterAsFields(parameters, self.CLUSTER_FIELD, context)[0]
use_clustering = self.parameterAsBool(parameters, self.USE_CLUSTERING, context)
initial_step_size = self.parameterAsDouble(parameters, self.INITIAL_STEP_SIZE, context)
compatibility = self.parameterAsDouble(parameters, self.COMPATIBILITY, context)
cycles = self.parameterAsInt(parameters, self.CYCLES, context)
iterations = self.parameterAsInt(parameters, self.ITERATIONS, context)
source = self.parameterAsSource(parameters, self.INPUT, context)
(sink, dest_id) = self.parameterAsSink(parameters, self.OUTPUT, context,
source.fields(), source.wkbType(), source.sourceCrs())
features = source.getFeatures(QgsFeatureRequest())
total = 100.0 / source.featureCount() if source.featureCount() else 0
# Parameter
vlayer = source
fields = vlayer.fields()
# Create edge list
edges = []
for current, feat in enumerate(features):
if feedback.isCanceled():
break
edges.append(feat)
# Create clusters
clusters = []
if use_clustering == True:
# Arrange edges in clusters according to cluster-id
labels = []
for edge in edges:
labels.append(edge[cluster_field])
feedback.pushDebugInfo(cluster_field)
for l in range(0, max(labels) + 1):
clusters.append(list())
for i, label in enumerate(labels):
if label >= 0:
clusters[label].append(edges[i])
else:
clusters.append([edges[i]])
for i, cluster in enumerate(clusters):
clusters[i] = EdgeCluster(cluster, initial_step_size, iterations,
cycles, compatibility)
else:
# If clustering should not be used, create only one big cluster containing all edges
cluster_field = QgsField('CLUSTER', QVariant.Int)
cluster_n_field = QgsField('CLUSTER_N', QVariant.Int)
fields.append(cluster_field)
fields.append(cluster_n_field)
clusters = [EdgeCluster(edges, initial_step_size, iterations,
cycles, compatibility)]
# Do edge-bundling (separately for all clusters)
for c, cl in enumerate(clusters):
feedback.setProgress(80 * ( 1.0 * c / len(clusters)))
if feedback.isCanceled(): break
if cl.E > 1:
cl.force_directed_eb(feedback)
feedback.setProgress(90)
for cl in clusters:
if feedback.isCanceled(): break
for e, edge in enumerate(cl.edges):
feat = QgsFeature()
feat.setGeometry(edge.geometry())
if not use_clustering:
attr = edge.attributes()
attr.append(1)
attr.append(len(edges))
feat.setAttributes(attr)
else:
feat.setAttributes(edge.attributes())
sink.addFeature(feat, QgsFeatureSink.FastInsert)
return {self.OUTPUT: dest_id}
|
gpl-2.0
| -8,249,758,353,570,889,000
| 38.834081
| 459
| 0.573793
| false
| 4.804218
| false
| false
| false
|
locked/4stability
|
motor_test.py
|
1
|
1460
|
#!/usr/bin/python
from optparse import OptionParser
import os
import sys
import time
import termios
import fcntl
import motor
parser = OptionParser()
parser.add_option("-a", "--action", dest="action", help="reset/manual")
(options, args) = parser.parse_args()
m = motor.Motor(0)
if options.action == "reset":
m.reset()
elif options.action == "cycle":
m.init()
speed_percent = 0
while speed_percent < 30:
speed_percent += 1
while speed_percent > 0:
speed_percent -= 1
m.reset()
elif options.action == "manual":
m.init()
# Terminal init stuff found on stackoverflow (SlashV)
fd = sys.stdin.fileno()
oldterm = termios.tcgetattr(fd)
newattr = termios.tcgetattr(fd)
newattr[3] = newattr[3] & ~termios.ICANON & ~termios.ECHO
termios.tcsetattr(fd, termios.TCSANOW, newattr)
oldflags = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, oldflags | os.O_NONBLOCK)
try:
speed_percent = 0
while (True):
try:
c = sys.stdin.read(1)
except IOError:
c = ''
if c == "-":
speed_percent = speed_percent - 1 if speed_percent > 1 else 0
elif c == "+":
speed_percent = speed_percent + 1 if speed_percent < 100 else 0
pos = m.set_speed(speed_percent/100.0)
sys.stdout.write("\r%d%% (%d)" % (speed_percent, pos))
sys.stdout.flush()
#time.sleep(.1)
except: pass
finally:
# Reset terminal
termios.tcsetattr(fd, termios.TCSAFLUSH, oldterm)
fcntl.fcntl(fd, fcntl.F_SETFL, oldflags)
m.reset()
|
bsd-3-clause
| 6,159,371,416,633,251,000
| 22.174603
| 71
| 0.670548
| false
| 2.775665
| false
| false
| false
|
catiabandeiras/StemFactory
|
expansion_tech.py
|
1
|
7506
|
#Imports the generic Python packages
import simpy
import random
import math
#IMPORT THE ANCILLARY METHODS MODULES
from get_places import *
def expansion_tech_run(env,et,donor,lab,gui,int_db):
#First, try to find an available worker
while True:
if lab.occupied_workers < gui.TOTAL_WORKERS:
worker_index = 0
while worker_index < gui.TOTAL_WORKERS:
#Select a worker
worker = lab.list_of_workers[worker_index]
if worker.count < worker.capacity:
with worker.request() as request:
yield request | env.timeout(0.0001)
donor.worker_queue = worker
lab.occupied_workers = sum([lab.list_of_workers[worker_index].count for worker_index in range(gui.TOTAL_WORKERS)])
#yield env.timeout(0.0001)
#print('Lab workers at %.5f seen by %s in the beginning of seeding are %d' % (env.now,et.full_name,lab.occupied_workers))
#2) If worker is available, calls the seeding block
procedure = 'seeding'
bsc_procedure = bsc_et(env,et,donor,lab,procedure,gui,int_db)
env.process(bsc_procedure)
yield env.timeout((donor.worker_queue.count/donor.worker_queue.capacity)*(lab.seeding_time))
#print('Seeding of %s finished at %.5f' % (et.full_name,env.now))
break
else:
worker_index += 1
break
else:
yield env.timeout(0.0001)
continue
#If harvesting before the density is favorable, do it
while et.no_cells < min(et.harvest_density*et.area,gui.CELL_NUMBER_PER_DOSE*(gui.ANNUAL_DEMAND-lab.total_doses)):
#Chooses if the process goes to a bioreactor system or is in the incubator only
if et.base_name[0] == 'b':
incubation = get_place_bioreactor(env,et,donor,lab,gui,int_db)
else:
incubation = get_place_incubator(env,et,donor,lab,gui,int_db)
env.process(incubation)
#print('Incubation of %s started at %.5f' % (et.full_name,env.now))
yield env.timeout(lab.incubation_time)
#print('Incubation of %s finished at %.5f' % (et.full_name,env.now))
if et.no_cells >= et.harvest_density*et.area:
'''Sent for harvesting when the number of cells in the flask to harvest is reached'''
print('%s is sent for harvesting at %.4f' % (et.full_name,env.now))
break
else:
'''Undergoes feeding when the period of incubation is reached'''
while True:
if lab.occupied_workers < gui.TOTAL_WORKERS:
worker_index = 0
while worker_index < gui.TOTAL_WORKERS:
#Select a worker
worker = lab.list_of_workers[worker_index]
if worker.count < worker.capacity:
with worker.request() as request:
yield request | env.timeout(0.0001)
donor.worker_queue = worker
lab.occupied_workers = sum([lab.list_of_workers[worker_index].count for worker_index in range(gui.TOTAL_WORKERS)])
#print('Feeding block initialized for %s at %.5f' % (et.full_name,env.now))
procedure = 'feeding'
#print('Feeding of %s started at %.5f' % (et.full_name,env.now))
bsc_procedure = bsc_et(env,et,donor,lab,procedure,gui,int_db)
env.process(bsc_procedure)
yield env.timeout((donor.worker_queue.count/donor.worker_queue.capacity)*(lab.feeding_time))
#print('Feeding of %s finished at %.5f' % (et.full_name,env.now))
#print('Feeding block terminated for %s at %.5f' % (et.full_name,env.now))
break
else:
worker_index += 1
break
else:
yield env.timeout(0.0001)
continue
# print(lab.reagent_volumes)
#4) Check that the bsc and worker are not busy before going to harvesting
while True:
'''Launches the harvesting steps'''
worker_index = 0
harvested = 0
while worker_index < gui.TOTAL_WORKERS:
#Select a worker
worker = lab.list_of_workers[worker_index]
#print(lab.list_of_workers)
if worker.count < worker.capacity:
# print('Stats before harvesting queue request of %s' % et.full_name)
# print(donor.donor_index)
# print(lab.occupied_workers)
# print(env.now)
with worker.request() as request:
yield request | env.timeout(0.0001)
donor.worker_queue = worker
lab.occupied_workers = sum([lab.list_of_workers[worker_index].count for worker_index in range(gui.TOTAL_WORKERS)])
#yield env.timeout(0.0001)
#print('Lab workers at %.5f seen by %s in the beginning of harvesting are %d' % (env.now,et.full_name,lab.occupied_workers))
#print('Harvesting block initialized for %s at %.5f' % (et.full_name,env.now))
procedure = 'harvesting'
# print('Harvested flasks per passage at %.5f' % env.now)
# print(donor.harvested_per_passage[donor.passage_no-1])
bsc_procedure = bsc_et(env,et,donor,lab,procedure,gui,int_db)
env.process(bsc_procedure)
#print('Harvesting of %s started at %.5f' % (et.full_name,env.now))
yield env.timeout((donor.worker_queue.count/donor.worker_queue.capacity)*(lab.harvesting_time)+int_db.FIXED_HARVESTING_TIME)
#print('Harvesting of %s finished at %.5f' % (et.full_name,env.now))
#print('Harvesting block terminated for %s at %.5f' % (et.full_name,env.now))
harvested = 1
break
else:
worker_index += 1
if harvested == 1:
break
else:
yield env.timeout(0.0001)
continue
# else:
# yield env.timeout(0.0001)
# continue
# print('Worker queue right before finishing the processing')
# print(et.full_name)
# worker_counts = [lab.list_of_workers[worker_index].count for worker_index in range(TOTAL_WORKERS)]
# print(worker_counts)
# print(env.now)
# print('Harvested flasks per passage at %.5f' % env.now)
# print(donor.harvested_per_passage[donor.passage_no-1])
env.exit()
|
mit
| -4,032,397,442,333,823,000
| 28.762295
| 154
| 0.504396
| false
| 3.996805
| false
| false
| false
|
saifuddin778/LDA
|
test.py
|
1
|
1104
|
from __future__ import division
import sys
import copy
sys.dont_write_bytecode = True
"""
Testing LDA
"""
def test_LDA():
from LDA import LDA
x = [
[2.95, 6.63],
[2.53, 7.79],
[3.57, 5.65],
[3.16, 5.47],
[2.58, 4.46],
[2.16, 6.22],
[3.27, 3.52]
]
e = copy.deepcopy(x)
y = [1,1,1,1,2,2,2]
t = LDA(x, y)
for a in e:
r = t.predict(a)
print max(r, key=r.get)
"""
Testing multiclass LDA
"""
def test_multiclass_LDA():
from LDA import multiclass_LDA
from sklearn import datasets
print 'data loaded..'
iris = datasets.load_iris()
x = iris['data']
y = iris['target']
l = copy.deepcopy(x)
m = copy.deepcopy(y)
t = multiclass_LDA(x, y)
for a,b in zip(l, m):
print t.predict(a), b
#t = test_LDA()
#t = test_multiclass_LDA()
if __name__ == '__main__' and len(sys.argv) == 2:
print sys.argv
method_to_test = sys.argv[1]
if method_to_test == 'LDA':
test_LDA()
elif method_to_test == 'multiclass_LDA':
test_multiclass_LDA()
|
mit
| -6,399,753,658,631,220,000
| 19.444444
| 49
| 0.521739
| false
| 2.739454
| true
| false
| false
|
michelesr/network-monitor-server
|
src/hardware.py
|
1
|
2977
|
#! /usr/bin/env python
from threading import Thread
from sys import exit
from time import sleep
from psutil import cpu_percent, virtual_memory, swap_memory, \
net_io_counters, disk_io_counters
"""
Framework di monitoraggio della rete
Modulo per le risorse hardware
Questo modulo si occupa di ottenere dal sistema operativo lo stato delle
risorse hardware, e in particolare:
- utilizzo cpu;
- utilizzo della memoria centrale;
- utilizzo dello swap;
- utilizzo del disco in lettura e scrittura;
- banda di rete in entrata e uscita.
Questo modulo utilizza la libreria esterna psutil, installabile tramite
un gestore di pacchetti python come PIP (Alternative Python Package
Installer).
"""
class Hardware(Thread):
def __init__(self):
"""
Costruttore del Thread, inizializza il thread, lo setta
come demone e inizializza gli attributi dell'oggetto
"""
Thread.__init__(self)
self.setDaemon(True)
self.cpu=0
self.cores = len(cpu_percent(percpu=True))
self.ram=0
self.total_ram = virtual_memory().total
self.swap=0
self.total_swap = swap_memory().total
self.read=0
self.write=0
self.net_in=0
self.net_out=0
def run(self):
"""
Metodo run del thread, raccoglie in tempo reale le informazioni
sull'hardware tramite psutil.
"""
try:
while True:
# disk, net (temp)
self.read_tmp = disk_io_counters().read_bytes
self.write_tmp = disk_io_counters().write_bytes
self.net_in_tmp = net_io_counters().bytes_recv
self.net_out_tmp = net_io_counters().bytes_sent
# cpu
self.cpu = cpu_percent(interval=1)
# disk
self.read = \
disk_io_counters().read_bytes - self.read_tmp
self.write = \
disk_io_counters().write_bytes - self.write_tmp
# net
self.net_in = \
net_io_counters().bytes_recv - self.net_in_tmp
self.net_out = \
net_io_counters().bytes_sent - self.net_out_tmp
# memories
self.ram = virtual_memory().percent
self.swap = swap_memory().percent
sleep(1)
except:
exit()
def get_results(self):
"""
Restituisce le informazioni sull'hardware sotto forma
di dizionario
"""
return {
'cpu': self.cpu,
'cores': self.cores,
'ram': self.ram,
'total_ram': self.total_ram,
'swap': self.swap,
'total_swap': self.total_swap,
'disk_r': self.read,
'disk_w': self.write,
'net_in': self.net_in,
'net_out': self.net_out,
}
|
gpl-3.0
| -2,589,567,619,368,150,500
| 25.81982
| 72
| 0.543164
| false
| 3.648284
| false
| false
| false
|
sjmh/cobbler
|
koan/register.py
|
1
|
5415
|
"""
registration tool for cobbler.
Copyright 2009 Red Hat, Inc and Others.
Michael DeHaan <michael.dehaan AT gmail>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301 USA
"""
from __future__ import print_function
import os
import traceback
from optparse import OptionParser
import time
import sys
import socket
from . import utils
from cexceptions import InfoException
import string
# usage: cobbler-register [--server=server] [--fqdn=hostname] --profile=foo
def main():
"""
Command line stuff...
"""
p = OptionParser()
p.add_option(
"-s",
"--server",
dest="server",
default=os.environ.get("COBBLER_SERVER", ""),
help="attach to this cobbler server"
)
p.add_option(
"-f",
"--fqdn",
dest="hostname",
default="",
help="override the discovered hostname"
)
p.add_option(
"-p",
"--port",
dest="port",
default="80",
help="cobbler port (default 80)"
)
p.add_option(
"-P",
"--profile",
dest="profile",
default="",
help="assign this profile to this system"
)
p.add_option(
"-b",
"--batch",
dest="batch",
action="store_true",
help="indicates this is being run from a script"
)
(options, args) = p.parse_args()
# if not os.getuid() == 0:
# print("koan requires root access")
# return 3
try:
k = Register()
k.server = options.server
k.port = options.port
k.profile = options.profile
k.hostname = options.hostname
k.batch = options.batch
k.run()
except Exception as e:
(xa, xb, tb) = sys.exc_info()
try:
getattr(e, "from_koan")
print(str(e)[1:-1]) # nice exception, no traceback needed
except:
print(xa)
print(xb)
print(string.join(traceback.format_list(traceback.extract_tb(tb))))
return 1
return 0
class Register:
def __init__(self):
"""
Constructor. Arguments will be filled in by optparse...
"""
self.server = ""
self.port = ""
self.profile = ""
self.hostname = ""
self.batch = ""
def run(self):
"""
Commence with the registration already.
"""
# not really required, but probably best that ordinary users don't try
# to run this not knowing what it does.
if os.getuid() != 0:
raise InfoException("root access is required to register")
print("- preparing to koan home")
self.conn = utils.connect_to_server(self.server, self.port)
reg_info = {}
print("- gathering network info")
netinfo = utils.get_network_info()
reg_info["interfaces"] = netinfo
print("- checking hostname")
sysname = ""
if self.hostname != "" and self.hostname != "*AUTO*":
hostname = self.hostname
sysname = self.hostname
else:
hostname = socket.getfqdn()
if hostname == "localhost.localdomain":
if self.hostname == '*AUTO*':
hostname = ""
sysname = str(time.time())
else:
raise InfoException(
"must specify --fqdn, could not discover")
if sysname == "":
sysname = hostname
if self.profile == "":
raise InfoException("must specify --profile")
# we'll do a profile check here just to avoid some log noise on the remote end.
# network duplication checks and profile checks also happen on the
# remote end.
avail_profiles = self.conn.get_profiles()
matched_profile = False
for x in avail_profiles:
if x.get("name", "") == self.profile:
matched_profile = True
break
reg_info['name'] = sysname
reg_info['profile'] = self.profile
reg_info['hostname'] = hostname
if not matched_profile:
raise InfoException(
"no such remote profile, see 'koan --list-profiles'")
if not self.batch:
self.conn.register_new_system(reg_info)
print("- registration successful, new system name: %s" % sysname)
else:
try:
self.conn.register_new_system(reg_info)
print("- registration successful, new system name: %s"
% sysname)
except:
traceback.print_exc()
print("- registration failed, ignoring because of --batch")
return
if __name__ == "__main__":
main()
|
gpl-2.0
| -5,991,250,616,883,523,000
| 27.650794
| 87
| 0.563804
| false
| 4.352894
| false
| false
| false
|
Azure/azure-sdk-for-python
|
sdk/storage/azure-mgmt-storage/azure/mgmt/storage/v2018_02_01/aio/operations/_storage_accounts_operations.py
|
1
|
44288
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class StorageAccountsOperations:
"""StorageAccountsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.storage.v2018_02_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def check_name_availability(
self,
account_name: "_models.StorageAccountCheckNameAvailabilityParameters",
**kwargs
) -> "_models.CheckNameAvailabilityResult":
"""Checks that the storage account name is valid and is not already in use.
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: ~azure.mgmt.storage.v2018_02_01.models.StorageAccountCheckNameAvailabilityParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CheckNameAvailabilityResult, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2018_02_01.models.CheckNameAvailabilityResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CheckNameAvailabilityResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-02-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.check_name_availability.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(account_name, 'StorageAccountCheckNameAvailabilityParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('CheckNameAvailabilityResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
check_name_availability.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Storage/checkNameAvailability'} # type: ignore
async def _create_initial(
self,
resource_group_name: str,
account_name: str,
parameters: "_models.StorageAccountCreateParameters",
**kwargs
) -> Optional["_models.StorageAccount"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.StorageAccount"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-02-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'StorageAccountCreateParameters')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('StorageAccount', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}'} # type: ignore
async def begin_create(
self,
resource_group_name: str,
account_name: str,
parameters: "_models.StorageAccountCreateParameters",
**kwargs
) -> AsyncLROPoller["_models.StorageAccount"]:
"""Asynchronously creates a new storage account with the specified parameters. If an account is
already created and a subsequent create request is issued with different properties, the
account properties will be updated. If an account is already created and a subsequent create or
update request is issued with the exact same set of properties, the request will succeed.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param parameters: The parameters to provide for the created account.
:type parameters: ~azure.mgmt.storage.v2018_02_01.models.StorageAccountCreateParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either StorageAccount or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.storage.v2018_02_01.models.StorageAccount]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageAccount"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_initial(
resource_group_name=resource_group_name,
account_name=account_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('StorageAccount', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}'} # type: ignore
async def delete(
self,
resource_group_name: str,
account_name: str,
**kwargs
) -> None:
"""Deletes a storage account in Microsoft Azure.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-02-01"
# Construct URL
url = self.delete.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}'} # type: ignore
async def get_properties(
self,
resource_group_name: str,
account_name: str,
**kwargs
) -> "_models.StorageAccount":
"""Returns the properties for the specified storage account including but not limited to name, SKU
name, location, and account status. The ListKeys operation should be used to retrieve storage
keys.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: StorageAccount, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2018_02_01.models.StorageAccount
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageAccount"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-02-01"
accept = "application/json"
# Construct URL
url = self.get_properties.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('StorageAccount', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_properties.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}'} # type: ignore
async def update(
self,
resource_group_name: str,
account_name: str,
parameters: "_models.StorageAccountUpdateParameters",
**kwargs
) -> "_models.StorageAccount":
"""The update operation can be used to update the SKU, encryption, access tier, or tags for a
storage account. It can also be used to map the account to a custom domain. Only one custom
domain is supported per storage account; the replacement/change of custom domain is not
supported. In order to replace an old custom domain, the old value must be cleared/unregistered
before a new value can be set. The update of multiple properties is supported. This call does
not change the storage keys for the account. If you want to change the storage account keys,
use the regenerate keys operation. The location and name of the storage account cannot be
changed after creation.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param parameters: The parameters to provide for the updated account.
:type parameters: ~azure.mgmt.storage.v2018_02_01.models.StorageAccountUpdateParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: StorageAccount, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2018_02_01.models.StorageAccount
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageAccount"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-02-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'StorageAccountUpdateParameters')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('StorageAccount', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}'} # type: ignore
def list(
self,
**kwargs
) -> AsyncIterable["_models.StorageAccountListResult"]:
"""Lists all the storage accounts available under the subscription. Note that storage keys are not
returned; use the ListKeys operation for this.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either StorageAccountListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.storage.v2018_02_01.models.StorageAccountListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageAccountListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-02-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('StorageAccountListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Storage/storageAccounts'} # type: ignore
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs
) -> AsyncIterable["_models.StorageAccountListResult"]:
"""Lists all the storage accounts available under the given resource group. Note that storage keys
are not returned; use the ListKeys operation for this.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either StorageAccountListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.storage.v2018_02_01.models.StorageAccountListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageAccountListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-02-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('StorageAccountListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts'} # type: ignore
async def list_keys(
self,
resource_group_name: str,
account_name: str,
**kwargs
) -> "_models.StorageAccountListKeysResult":
"""Lists the access keys for the specified storage account.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: StorageAccountListKeysResult, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2018_02_01.models.StorageAccountListKeysResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageAccountListKeysResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-02-01"
accept = "application/json"
# Construct URL
url = self.list_keys.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('StorageAccountListKeysResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_keys.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/listKeys'} # type: ignore
async def regenerate_key(
self,
resource_group_name: str,
account_name: str,
regenerate_key: "_models.StorageAccountRegenerateKeyParameters",
**kwargs
) -> "_models.StorageAccountListKeysResult":
"""Regenerates one of the access keys for the specified storage account.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param regenerate_key: Specifies name of the key which should be regenerated -- key1 or key2.
:type regenerate_key: ~azure.mgmt.storage.v2018_02_01.models.StorageAccountRegenerateKeyParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: StorageAccountListKeysResult, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2018_02_01.models.StorageAccountListKeysResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageAccountListKeysResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-02-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.regenerate_key.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(regenerate_key, 'StorageAccountRegenerateKeyParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('StorageAccountListKeysResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
regenerate_key.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/regenerateKey'} # type: ignore
async def list_account_sas(
self,
resource_group_name: str,
account_name: str,
parameters: "_models.AccountSasParameters",
**kwargs
) -> "_models.ListAccountSasResponse":
"""List SAS credentials of a storage account.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param parameters: The parameters to provide to list SAS credentials for the storage account.
:type parameters: ~azure.mgmt.storage.v2018_02_01.models.AccountSasParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ListAccountSasResponse, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2018_02_01.models.ListAccountSasResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ListAccountSasResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-02-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.list_account_sas.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'AccountSasParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ListAccountSasResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_account_sas.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/ListAccountSas'} # type: ignore
async def list_service_sas(
self,
resource_group_name: str,
account_name: str,
parameters: "_models.ServiceSasParameters",
**kwargs
) -> "_models.ListServiceSasResponse":
"""List service SAS credentials of a specific resource.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param parameters: The parameters to provide to list service SAS credentials.
:type parameters: ~azure.mgmt.storage.v2018_02_01.models.ServiceSasParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ListServiceSasResponse, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2018_02_01.models.ListServiceSasResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ListServiceSasResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-02-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.list_service_sas.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ServiceSasParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ListServiceSasResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_service_sas.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/ListServiceSas'} # type: ignore
|
mit
| 5,031,194,970,118,649,000
| 51.849642
| 198
| 0.658395
| false
| 4.311946
| true
| false
| false
|
hmenke/espresso
|
testsuite/python/coulomb_cloud_wall_duplicated.py
|
1
|
4487
|
#
# Copyright (C) 2013-2018 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Tests particle property setters/getters
from __future__ import print_function
import unittest as ut
import numpy as np
import espressomd
import espressomd.cuda_init
import espressomd.electrostatics
from espressomd import scafacos
from tests_common import abspath
class CoulombCloudWall(ut.TestCase):
if "ELECTROSTATICS" in espressomd.features():
"""This compares p3m, p3m_gpu, scafacos_p3m and scafacos_p2nfft
electrostatic forces and energy against stored data."""
S = espressomd.System(box_l=[1.0, 1.0, 1.0])
S.seed = S.cell_system.get_state()['n_nodes'] * [1234]
np.random.seed(S.seed)
forces = {}
tolerance = 1E-3
# Reference energy from p3m in the tcl test case
reference_energy = 2. * 148.94229549
def setUp(self):
self.S.box_l = (10, 10, 20)
self.S.time_step = 0.01
self.S.cell_system.skin = 0.4
# Clear actors that might be left from prev tests
if self.S.actors:
del self.S.actors[0]
self.S.part.clear()
data = np.genfromtxt(
abspath("data/coulomb_cloud_wall_duplicated_system.data"))
# Add particles to system and store reference forces in hash
# Input format: id pos q f
for particle in data:
id = particle[0]
pos = particle[1:4]
q = particle[4]
f = particle[5:]
self.S.part.add(id=int(id), pos=pos, q=q)
self.forces[id] = f
def compare(self, method_name, energy=True):
# Compare forces and energy now in the system to stored ones
# Force
force_abs_diff = 0.
for p in self.S.part:
force_abs_diff += abs(
np.sqrt(sum((p.f - self.forces[p.id])**2)))
force_abs_diff /= len(self.S.part)
# Energy
if energy:
energy_abs_diff = abs(
self.S.analysis.energy()["total"] - self.reference_energy)
self.assertTrue(energy_abs_diff <= self.tolerance, "Absolute energy difference " +
str(energy_abs_diff) + " too large for " + method_name)
self.assertTrue(force_abs_diff <= self.tolerance, "Absolute force difference " +
str(force_abs_diff) + " too large for method " + method_name)
# Tests for individual methods
if "P3M" in espressomd.features():
def test_p3m(self):
self.S.actors.add(
espressomd.electrostatics.P3M(
prefactor=1, r_cut=1.001, accuracy=1e-3,
mesh=[64, 64, 128], cao=7, alpha=2.70746, tune=False))
self.S.integrator.run(0)
self.compare("p3m", energy=True)
if espressomd.has_features(["ELECTROSTATICS", "CUDA"]) and not \
str(espressomd.cuda_init.CudaInitHandle().device_list[0]) == "Device 687f":
def test_p3m_gpu(self):
self.S.actors.add(
espressomd.electrostatics.P3MGPU(
prefactor=1,
r_cut=1.001,
accuracy=1e-3,
mesh=[64, 64, 128],
cao=7,
alpha=2.70746,
tune=False))
self.S.integrator.run(0)
self.compare("p3m_gpu", energy=False)
def test_zz_deactivation(self):
# Is the energy 0, if no methods active
self.assertTrue(self.S.analysis.energy()["total"] == 0.0)
if __name__ == "__main__":
ut.main()
|
gpl-3.0
| 4,426,372,993,255,538,700
| 37.350427
| 98
| 0.560508
| false
| 3.729842
| true
| false
| false
|
mwoc/pydna
|
dna/components/heatex.py
|
1
|
14848
|
import scipy
import scipy.optimize
import warnings
# Some short-hands:
from dna.states import state
from dna.iterate import IterateParamHelper
from dna.component import Component
from dna.vendor import refprop as rp
class ConvergenceError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class PinchCalc:
def __init__ (self, n1, n2, n3, n4, Nseg, dTmin):
self.n1 = n1
self.n2 = n2
self.n3 = n3
self.n4 = n4
self.Nseg = Nseg
self.dTmin = dTmin
def check(self, n1, n2, n3, n4):
dH_H = (n1['h']-n2['h'])/self.Nseg
dH_C = (n4['h']-n3['h'])/self.Nseg
dT_left = n1['t'] - n4['t']
dT_right = n2['t'] - n3['t']
dT_pinch = min(dT_left, dT_right)
pinch_pos = 0
Th = []
Tc = []
n1_2 = {
'media': n1['media'],
'y': n1['y'],
'cp': n1['cp'],
'p': n1['p'],
'h': n1['h']
}
n3_4 = {
'media': n3['media'],
'y': n3['y'],
'cp': n3['cp'],
'p': n3['p'],
'h': n4['h'] # Note n4 usage
}
for i in range(self.Nseg+1):
# Be explicit about the copying
n2_ = n1_2.copy()
n3_ = n3_4.copy()
n2_['h'] = n1['h'] - dH_H*i
n3_['h'] = n4['h'] - dH_C*i
T2_ = state(n2_)['t']
Th.append(T2_)
T3_ = state(n3_)['t']
Tc.append(T3_)
if T2_ - T3_ < dT_pinch:
pinch_pos = i
dT_pinch = T2_ - T3_
# Get effectiveness from NTU method
Q_max_cold = n3['mdot'] * (n1['h'] - n3['h'])
Q_max_hot = n1['mdot'] * (n1['h'] - n3['h'])
Q_max = min(abs(Q_max_cold), abs(Q_max_hot))
Q = n1['mdot'] * (n1['h'] - n2['h'])
if Q > 0 and Q_max > 0:
# Guard against division by zero
eff = Q / Q_max
else:
eff = 0
return {'dTmin':dT_pinch, 'Th':Th, 'Tc':Tc, 'percent': pinch_pos / self.Nseg, 'eff': eff, 'Q': Q}
def iterate(self, side=1):
'''
Try to find optimal configuration of heat exchanger which satisfies
pinch point and has the exergy loss as low as possible.
Ideally, the pinch point is close to the hot side, so the cold flow
is heated up maximally.
'''
dTmin = self.dTmin
# Try pinch at cold side (cold in, hot out)
# Iteration params
tol = 0.1
delta = 1
convergence = 1
currIter = IterateParamHelper()
i = 0
dT_left = dTmin
result = {}
find_mdot = False
find_mdot1 = False
find_mdot3 = False
# If enough info is known about the heat transfer, we can deduct an mdot
if not 'mdot' in self.n1:
find_mdot = find_mdot1 = True
#
elif not 'mdot' in self.n3:
find_mdot = find_mdot3 = True
#
print('n1 = ', self.n1['t'])
print('n3 = ', self.n3['t'])
# Tolerance of 0.01 K is close enough
# do NOT alter convergence rate parameter. Too high value breaks design
while abs(delta) > tol and i < 20:
# Make local copies of input
_n1 = self.n1.copy()
_n2 = self.n2.copy()
_n3 = self.n3.copy()
_n4 = self.n4.copy()
if not find_mdot and (_n1['mdot'] <= 0 or _n3['mdot'] <= 0):
# No iteration possible, early return
result['pinch'] = self.check(_n1, _n2, _n3, _n4)
return result['pinch']
if len(currIter.x) > 0:
dT_left = currIter.optimize(dT_left, manual = True)
else:
if side == 1:
dT_left = - 0.25 * (_n1['t'] - _n3['t'])
else:
dT_left = dTmin
if side == 1:
# Side 1 is hot side, 1 and 4
_n4['t'] = _n1['t'] + dT_left
if _n4['t'] > _n1['t']:
_n4['t'] = _n1['t'] - 2*dTmin
dT_left = -2*dTmin
state(_n4)
print('n4 = ', _n4['t'])
_n2['h'] = (_n1['h'] * _n1['mdot'] - (_n3['mdot'] * (_n4['h'] - _n3['h']))) / _n1['mdot']
state(_n2)
if _n2['t'] < _n3['t']:
print('Pretty sure this should be analysed from side 2')
print('n2 = ', _n2['t'])
# Update looping parameters
delta = _n2['t'] - (_n3['t'] + dTmin)
elif side == 2:
# Side 2 is cold side, 2 and 3
_n2['t'] = _n3['t'] - dT_left
if _n2['t'] < _n3['t']:
_n2['t'] = _n3['t'] - dTmin
dT_left = dTmin
state(_n2)
print('n2 = ', _n2['t'])
_n4['h'] = (_n3['h'] * _n3['mdot'] + (_n1['mdot'] * (_n1['h'] - _n2['h']))) / _n3['mdot']
state(_n4)
print('n4 = ', _n4['t'])
if _n4['t'] > _n1['t']:
print('Pretty sure this should be analysed from side 1')
# Update looping parameters
delta = _n1['t'] - (_n4['t'] + dTmin)
else:
# Assume one side is fixed, depending on if find_mdot1 or find_mdot3 is set
if find_mdot1:
# t2 and m1 unknown
_n2['t'] = _n3['t'] - dT_left
if _n2['t'] < _n3['t']:
_n2['t'] = _n3['t'] - dTmin
dT_left = dTmin
if 'tmin' in _n1 and _n2['t'] < _n1['tmin']:
_n2['t'] = _n1['tmin']
dT_left = _n3['t'] - _n2['t']
state(_n2)
_n1['mdot'] = ((_n4['h'] - _n3['h']) * _n3['mdot']) / (_n1['h'] - _n2['h'])
delta = _n1['t'] - (_n4['t'] + dTmin)
elif find_mdot3:
# t4 and m3 unknown
raise Exception('Not implemented')
#n3['mdot'] = ((n1['h'] - n2['h']) * n1['mdot']) / (n4['h'] - n3['h'])
else:
print(_n1)
print(_n2)
print(_n3)
print(_n4)
raise Exception('Wrong unknowns')
# Only accept positive delta for internal pinch calculation
if delta >= 0 - tol:
# At least the pinch at in/outlets is ok. Now check
# it internally
try:
# Check internal pinch too
result['pinch'] = self.check(_n1, _n2, _n3, _n4)
except rp.RefpropError as e:
# Ignore me
print(e)
print('Next')
else:
# Calculation succeeded
delta = result['pinch']['dTmin'] - dTmin
currIter.delta = delta # commented out to prevent IterateParamHelper from guessing
currIter.append(dT_left, delta)
i = i + 1
print('Iteration: ', i, '. Residual: ', currIter.y[-1])
if abs(delta) > tol:
print(delta, convergence, i)
raise ConvergenceError('No convergence reached')
if not 'pinch' in result:
warnings.warn('No pinch solution found', RuntimeWarning)
return False
else:
self.n1.update(_n1)
self.n2.update(_n2)
self.n3.update(_n3)
self.n4.update(_n4)
return result['pinch']
class PinchHex(Component):
def nodes(self, in1, out1, in2, out2):
self.addInlet(in1)
self.addInlet(in2)
self.addOutlet(out1)
self.addOutlet(out2)
return self
def calc(self, Nseg = 11, dTmin = 5, Q = False):
n = self.getNodes()
n1 = n['i'][0]
n2 = n['o'][0]
n3 = n['i'][1]
n4 = n['o'][1]
# Find states for all known inputs:
state(n1) # Hot inlet
state(n3) # Cold inlet
n2['p'] = n1['p']
n2['y'] = n1['y']
if 'media' in n1:
n2['media'] = n1['media']
if 'cp' in n2:
n2['cp'] = n1['cp']
n4['p'] = n3['p']
n4['y'] = n3['y']
if 'media' in n3:
n4['media'] = n3['media']
if 'cp' in n3:
n4['cp'] = n3['cp']
if 'mdot' in n1:
n2['mdot'] = n1['mdot']
if 'mdot' in n3:
n4['mdot'] = n3['mdot']
if n1['t'] < n3['t']:
# Act as if this component is bypassed
n2['t'] = n1['t']
state(n2)
n4['t'] = n3['t']
state(n4)
warnings.warn(self.name + " - cold inlet has higher temperature than hot inlet, this is not possible so setting heat exchange to 0", RuntimeWarning)
return self
calc = False
if 'q' in n2 or 't' in n2:
n2h = state(n2.copy())['h']
# Enthalpy in hot fluid cannot increase
if n2h >= n1['h']:
n2['h'] = n1['h']
state(n2)
if 't' in n4 or 'q' in n4:
n4h = state(n4.copy())['h']
# Enthalpy in cold fluid cannot decrease
if n4h <= n3['h']:
n4['h'] = n3['h']
state(n4) # Cold outlet
# Initiate pincher for later use
pincher = PinchCalc(n1, n2, n3, n4, Nseg, dTmin)
if 'h' in n1 and 'h' in n2 and 'mdot' in n1:
Q = n1['mdot'] * (n1['h'] - n2['h'])
if 'h' in n3 and 'h' in n4 and 'mdot' in n3:
Q = n3['mdot'] * (n4['h'] - n3['h'])
# Find any unknown inputs:
if not 't' in n2 and not 't' in n4:
# Find pinch by iteration, for given mass flow rates and inlet temperatures
calc = True
if n1['mdot'] <= 0 or n3['mdot'] <= 0:
# No heat exchange at all
n2['t'] = n1['t']
state(n2)
n4['t'] = n3['t']
state(n4)
else:
# First try one side of the HEX
try:
pinch = pincher.iterate(side = 1)
except RuntimeError as e:
print('First side failed, trying second. Reason:')
print(e)
# If that failed, try from the other
try:
pinch = pincher.iterate(side = 2)
except rp.RefpropError as e:
print('Second side iteration also failed.')
raise Exception(e)
except rp.RefpropError as e:
print('First side failed, trying second. Reason:')
print(e)
# If that failed, try from the other
try:
pinch = pincher.iterate(side = 2)
except rp.RefpropError as e:
print('Second side iteration also failed.')
raise Exception(e)
except ConvergenceError as e:
print('Convergence failed, trying other side', e)
try:
pinch = pincher.iterate(side = 2)
except rp.RefpropError as e:
print('Second side iteration also failed.')
raise Exception(e)
except Exception as e:
print('Unexpected exception: ', e)
raise(e)
finally:
print('Pinch - {} - following outlet temperatures found:'.format(self.name))
print('T2: ', n2['t'], ' T4: ', n4['t'])
elif not 'h' in n4:
# Calculate T4 for given mass flow rates and other temperatures
calc = True
if 'mdot' in n1 and 'mdot' in n3:
n4['h'] = (n3['h'] * n3['mdot'] + (n1['mdot'] * (n1['h'] - n2['h']))) / n3['mdot']
state(n4)
else:
n1['mdot'] = Q / (n1['h'] - n2['h'])
try:
pinch = pincher.iterate(side = False)
except Exception as e:
raise(e)
elif not 'h' in n2:
# Calculate T2 for given mass flow rates and other temperatures
calc = True
if 'mdot' in n1 and 'mdot' in n3:
n2['h'] = (n1['h'] * n1['mdot'] - (n3['mdot'] * (n4['h'] - n3['h']))) / n1['mdot']
state(n2)
else:
n3['mdot'] = Q / (n4['h'] - n3['h'])
try:
pinch = pincher.iterate(side = False)
except Exception as e:
raise(e)
if not 'mdot' in n3:
# Calculate m3 for given m1 or Q, and given temperatures
calc = True
if not 'mdot' in n1:
n1['mdot'] = Q / (n1['h'] - n2['h'])
n3['mdot'] = ((n1['h'] - n2['h']) * n1['mdot']) / (n4['h'] - n3['h'])
elif not 'mdot' in n1:
# Calculate m1 for given m3 or Q, and given temperatures
calc = True
if not 'mdot' in n3:
n3['mdot'] = Q / (n4['h'] - n3['h'])
n1['mdot'] = ((n4['h'] - n3['h']) * n3['mdot']) / (n1['h'] - n2['h'])
if calc == False:
print('Model overly specified for heatex `{}`'.format(self.name))
n2['mdot'] = n1['mdot']
n4['mdot'] = n3['mdot']
# Find the pinch point
pinch = pincher.check(n1, n2, n3, n4)
self.storeResult(pinch)
if abs(pinch['dTmin'] - dTmin) > 0.1:
print('Pinch - {} - value {:.2f} not enforced, found {:.2f} from conditions'.format(self.name, dTmin, pinch['dTmin']))
return self
class Condenser(Component):
def nodes(self, in1, out1):
self.addInlet(in1)
self.addOutlet(out1)
return self
def calc(self):
n = self.getNodes()
n1 = n['i'][0]
n2 = n['o'][0]
if 'media' in n1:
n2['media'] = n1['media']
n2['p'] = n1['p']
n2['y'] = n1['y']
n2['mdot'] = n1['mdot']
# If it is subcooled liquid entering the condenser, pass it through unamended
Tsat = state({'p': n1['p'], 'y': n1['y'], 'q': 0})['t']
if Tsat > n1['t']:
n2['t'] = n1['t']
else:
n2['t'] = Tsat
state(n2)
return self
|
bsd-3-clause
| 5,614,497,316,062,480,000
| 29.240326
| 160
| 0.420999
| false
| 3.360036
| false
| false
| false
|
Nic30/hwtLib
|
hwtLib/cesnet/mi32/intf.py
|
1
|
5517
|
from hwt.hdl.constants import READ, WRITE, READ_WRITE
from hwt.interfaces.agents.handshaked import HandshakedAgent
from hwt.interfaces.agents.vldSynced import VldSyncedAgent
from hwt.interfaces.std import VectSignal, Signal
from hwt.simulator.agentBase import SyncAgentBase
from hwt.synthesizer.interface import Interface
from hwt.synthesizer.param import Param
from hwtLib.avalon.mm import AvalonMmAddrAgent
from ipCorePackager.constants import DIRECTION
from pyMathBitPrecise.bit_utils import mask
from hwtSimApi.hdlSimulator import HdlSimulator
class Mi32(Interface):
"""
Simple memory interface similar to AvalonMM
:ivar ~.addr: r/w address
:ivar ~.rd: read enable
:ivar ~.wr: write enable
:ivar ~.ardy: slave address channel ready
:ivar ~.be: data byte mask for write
:ivar ~.dwr: write data
:ivar ~.drd: read data
:ivar ~.drdy: read data valid
.. hwt-autodoc::
"""
def _config(self):
self.DATA_WIDTH = Param(32)
self.ADDR_WIDTH = Param(32)
def _declr(self):
self.addr = VectSignal(self.ADDR_WIDTH)
self.rd = Signal()
self.wr = Signal()
self.ardy = Signal(masterDir=DIRECTION.IN)
self.be = VectSignal(self.DATA_WIDTH // 8)
self.dwr = VectSignal(self.DATA_WIDTH)
self.drd = VectSignal(self.DATA_WIDTH, masterDir=DIRECTION.IN)
self.drdy = Signal(masterDir=DIRECTION.IN)
def _getWordAddrStep(self):
"""
:return: size of one word in unit of address
"""
return int(self.DATA_WIDTH) // self._getAddrStep()
def _getAddrStep(self):
"""
:return: how many bits is one unit of address
(e.g. 8 bits for char * pointer, 36 for 36 bit bram)
"""
return 8
def _initSimAgent(self, sim: HdlSimulator):
self._ag = Mi32Agent(sim, self)
class Mi32Agent(SyncAgentBase):
"""
Simulation agent for Mi32 bus interface
:ivar ~.requests: request data, items are tuples (READ, address)
or (WRITE, address, data, be_mask)
:ivar ~.rData: data read from interface
"""
def __init__(self, sim: HdlSimulator, intf: Mi32, allowNoReset=False):
SyncAgentBase.__init__(self, sim, intf, allowNoReset=allowNoReset)
self.addrAg = Mi32AddrAgent(sim, intf, allowNoReset=allowNoReset)
self.dataAg = Mi32DataAgent(sim, intf, allowNoReset=allowNoReset)
def requests_get(self):
return self.addrAg.data
def requests_set(self, v):
self.addrAg.data = v
requests = property(requests_get, requests_set)
def r_data_get(self):
return self.dataAg.data
def r_data_set(self, v):
self.dataAg.data = v
r_data = property(r_data_get, r_data_set)
def getDrivers(self):
self.setEnable = self.setEnable_asDriver
return (self.dataAg.getMonitors()
+self.addrAg.getDrivers())
def getMonitors(self):
self.setEnable = self.setEnable_asMonitor
return (self.dataAg.getDrivers()
+self.addrAg.getMonitors())
class Mi32AddrAgent(HandshakedAgent):
"""
:ivar ~.requests: request data, items are tuples (READ, address)
or (WRITE, address, data, be_mask)
:note: two valid signals "read", "write"
:note: one ready_n signal "waitrequest"
:note: on write set data and byteenamble as well
"""
@classmethod
def get_ready_signal(cls, intf):
return intf.ardy
@classmethod
def get_valid_signal(cls, intf):
return (intf.rd, intf.wr)
def get_valid(self):
r = self._vld[0].read()
w = self._vld[1].read()
r.val = r.val | w.val
r.vld_mask = r.vld_mask & w.vld_mask
return r
def set_valid(self, val):
AvalonMmAddrAgent.set_valid(self, val)
def get_data(self):
intf = self.intf
address = intf.addr.read()
byteEnable = intf.be.read()
read = bool(intf.rd.read())
write = bool(intf.wr.read())
wdata = intf.dwr.read()
if read and write:
rw = READ_WRITE
elif read:
rw = READ
elif write:
rw = WRITE
else:
raise AssertionError("This funtion should not be called when data"
"is not ready on interface")
return (rw, address, wdata, byteEnable)
def set_data(self, data):
intf = self.intf
if data is None:
intf.addr.write(None)
intf.be.write(None)
intf.rd.write(0)
intf.wr.write(0)
else:
rw = data[0]
if rw is READ:
_, address = data
rd, wr = 1, 0
be = mask(intf.DATA_WIDTH // 8)
wdata = None
elif rw is WRITE:
rd, wr = 0, 1
_, address, wdata, be = data
elif rw is READ_WRITE:
rd, wr = 1, 1
_, address, wdata, be = data
else:
raise TypeError(f"rw is in invalid format {rw}")
intf.addr.write(address)
intf.rd.write(rd)
intf.wr.write(wr)
intf.be.write(be)
intf.dwr.write(wdata)
class Mi32DataAgent(VldSyncedAgent):
@classmethod
def get_valid_signal(cls, intf: Mi32):
return intf.drdy
def get_data(self):
return self.intf.drd.read()
def set_data(self, data):
self.intf.drd.write(data)
|
mit
| 5,637,182,407,054,393,000
| 27.734375
| 78
| 0.590357
| false
| 3.52074
| false
| false
| false
|
closeio/nylas
|
inbox/models/contact.py
|
1
|
5371
|
from sqlalchemy import Column, Integer, String, Enum, Text, Index, BigInteger, \
ForeignKey
from sqlalchemy.orm import relationship, backref, validates
from sqlalchemy.schema import UniqueConstraint
from inbox.sqlalchemy_ext.util import MAX_TEXT_CHARS
from inbox.models.mixins import (HasPublicID, HasEmailAddress, HasRevisions,
UpdatedAtMixin, DeletedAtMixin)
from inbox.models.base import MailSyncBase
from inbox.models.event import Event
from inbox.models.message import Message
from inbox.models.namespace import Namespace
from inbox.util.encoding import unicode_safe_truncate
class Contact(MailSyncBase, HasRevisions, HasPublicID, HasEmailAddress,
UpdatedAtMixin, DeletedAtMixin):
"""Data for a user's contact."""
API_OBJECT_NAME = 'contact'
namespace_id = Column(BigInteger, nullable=False, index=True)
namespace = relationship(
Namespace,
primaryjoin='foreign(Contact.namespace_id) == remote(Namespace.id)',
load_on_pending=True)
# A server-provided unique ID.
# NB: We specify the collation here so that the test DB gets setup correctly.
uid = Column(String(64, collation='utf8mb4_bin'), nullable=False)
# A constant, unique identifier for the remote backend this contact came
# from. E.g., 'google', 'eas', 'inbox'
provider_name = Column(String(64))
name = Column(Text)
raw_data = Column(Text)
# A score to use for ranking contact search results. This should be
# precomputed to facilitate performant search.
score = Column(Integer)
# Flag to set if the contact is deleted in a remote backend.
# (This is an unmapped attribute, i.e., it does not correspond to a
# database column.)
deleted = False
__table_args__ = (UniqueConstraint('uid', 'namespace_id',
'provider_name'),
Index('idx_namespace_created', 'namespace_id',
'created_at'),
Index('ix_contact_ns_uid_provider_name',
'namespace_id', 'uid', 'provider_name'))
@validates('raw_data')
def validate_text_column_length(self, key, value):
if value is None:
return None
return unicode_safe_truncate(value, MAX_TEXT_CHARS)
@property
def versioned_relationships(self):
return ['phone_numbers']
def merge_from(self, new_contact):
# This must be updated when new fields are added to the class.
merge_attrs = ['name', 'email_address', 'raw_data']
for attr in merge_attrs:
if getattr(self, attr) != getattr(new_contact, attr):
setattr(self, attr, getattr(new_contact, attr))
class PhoneNumber(MailSyncBase, UpdatedAtMixin, DeletedAtMixin):
STRING_LENGTH = 64
contact_id = Column(BigInteger, index=True)
contact = relationship(
Contact,
primaryjoin='foreign(PhoneNumber.contact_id) == remote(Contact.id)',
backref=backref('phone_numbers', cascade='all, delete-orphan'))
type = Column(String(STRING_LENGTH), nullable=True)
number = Column(String(STRING_LENGTH), nullable=False)
class MessageContactAssociation(MailSyncBase):
"""Association table between messages and contacts.
Examples
--------
If m is a message, get the contacts in the to: field with
[assoc.contact for assoc in m.contacts if assoc.field == 'to_addr']
If c is a contact, get messages sent to contact c with
[assoc.message for assoc in c.message_associations if assoc.field ==
... 'to_addr']
"""
contact_id = Column(BigInteger, primary_key=True, index=True)
message_id = Column(ForeignKey(Message.id, ondelete='CASCADE'),
primary_key=True)
field = Column(Enum('from_addr', 'to_addr',
'cc_addr', 'bcc_addr', 'reply_to'))
# Note: The `cascade` properties need to be a parameter of the backref
# here, and not of the relationship. Otherwise a sqlalchemy error is thrown
# when you try to delete a message or a contact.
contact = relationship(
Contact,
primaryjoin='foreign(MessageContactAssociation.contact_id) == '
'remote(Contact.id)',
backref=backref('message_associations', cascade='all, delete-orphan'))
message = relationship(
Message,
backref=backref('contacts', cascade='all, delete-orphan'))
class EventContactAssociation(MailSyncBase):
"""Association table between event participants and contacts."""
contact_id = Column(BigInteger, primary_key=True, index=True)
event_id = Column(ForeignKey(Event.id, ondelete='CASCADE'),
primary_key=True)
field = Column(Enum('participant', 'title', 'description', 'owner'))
# Note: The `cascade` properties need to be a parameter of the backref
# here, and not of the relationship. Otherwise a sqlalchemy error is thrown
# when you try to delete an event or a contact.
contact = relationship(
Contact,
primaryjoin='foreign(EventContactAssociation.contact_id) == '
'remote(Contact.id)',
backref=backref('event_associations', cascade='all, delete-orphan'))
event = relationship(
Event,
backref=backref('contacts', cascade='all, delete-orphan'))
|
agpl-3.0
| 1,267,026,911,800,225,000
| 40
| 81
| 0.656675
| false
| 4.179767
| false
| false
| false
|
samsu/api_client
|
api_client/eventlet_client.py
|
1
|
7266
|
# Copyright 2015 Fortinet, Inc.
#
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import eventlet
eventlet.monkey_patch(thread=False, socket=False)
import atexit
import time
try:
import Queue
except Exception:
import queue as Queue
from oslo_log import log as logging
from . import base
from . import eventlet_request
from ._i18n import _LE
LOG = logging.getLogger(__name__)
class EventletApiClient(base.ApiClientBase):
"""Eventlet-based implementation of FortiOS ApiClient ABC."""
def __init__(self, api_providers, user, password,
key_file=None, cert_file=None, ca_file=None, ssl_sni=None,
concurrent_connections=base.DEFAULT_CONCURRENT_CONNECTIONS,
gen_timeout=base.GENERATION_ID_TIMEOUT,
use_https=True,
connect_timeout=base.DEFAULT_CONNECT_TIMEOUT,
singlethread=False):
'''Constructor
:param api_providers: a list of tuples of the form: (host, port,
is_ssl).
:param user: login username.
:param password: login password.
:param concurrent_connections: total number of concurrent connections.
:param use_https: whether or not to use https for requests.
:param connect_timeout: connection timeout in seconds.
:param gen_timeout controls how long the generation id is kept
if set to -1 the generation id is never timed out
'''
if not api_providers:
api_providers = []
self._singlethread = singlethread
self._api_providers = set([tuple(p) for p in api_providers])
self._api_provider_data = {} # tuple(semaphore, session_cookie|auth)
for p in self._api_providers:
self._set_provider_data(p, self.get_default_data())
self._user = user
self._password = password
self._key_file = key_file
self._cert_file = cert_file
self._ca_file = ca_file
# SSL server_name_indication
self._ssl_sni = ssl_sni
self._concurrent_connections = concurrent_connections
self._use_https = use_https
self._connect_timeout = connect_timeout
self._config_gen = None
self._config_gen_ts = None
self._gen_timeout = gen_timeout
# Connection pool is a list of queues.
if self._singlethread:
_queue = Queue.PriorityQueue
else:
_queue = eventlet.queue.PriorityQueue
self._conn_pool = _queue()
self._next_conn_priority = 1
for host, port, is_ssl in api_providers:
for __ in range(concurrent_connections):
conn = self._create_connection(host, port, is_ssl)
self._conn_pool.put((self._next_conn_priority, conn))
self._next_conn_priority += 1
atexit.register(self.close_connection)
def get_default_data(self):
if self._singlethread:
return None, None
else:
return eventlet.semaphore.Semaphore(1), None
def acquire_redirect_connection(self, conn_params, auto_login=True,
headers=None):
""" Check out or create connection to redirected API server.
Args:
conn_params: tuple specifying target of redirect, see
self._conn_params()
auto_login: returned connection should have valid session cookie
headers: headers to pass on if auto_login
Returns: An available HTTPConnection instance corresponding to the
specified conn_params. If a connection did not previously
exist, new connections are created with the highest prioity
in the connection pool and one of these new connections
returned.
"""
result_conn = None
data = self._get_provider_data(conn_params)
if data:
# redirect target already exists in provider data and connections
# to the provider have been added to the connection pool. Try to
# obtain a connection from the pool, note that it's possible that
# all connection to the provider are currently in use.
conns = []
while not self._conn_pool.empty():
priority, conn = self._conn_pool.get_nowait()
if not result_conn and self._conn_params(conn) == conn_params:
conn.priority = priority
result_conn = conn
else:
conns.append((priority, conn))
for priority, conn in conns:
self._conn_pool.put((priority, conn))
# hack: if no free connections available, create new connection
# and stash "no_release" attribute (so that we only exceed
# self._concurrent_connections temporarily)
if not result_conn:
conn = self._create_connection(*conn_params)
conn.priority = 0 # redirect connections have highest priority
conn.no_release = True
result_conn = conn
else:
# redirect target not already known, setup provider lists
self._api_providers.update([conn_params])
self._set_provider_data(conn_params, self.get_default_data())
# redirects occur during cluster upgrades, i.e. results to old
# redirects to new, so give redirect targets highest priority
priority = 0
for i in range(self._concurrent_connections):
conn = self._create_connection(*conn_params)
conn.priority = priority
if i == self._concurrent_connections - 1:
break
self._conn_pool.put((priority, conn))
result_conn = conn
if result_conn:
result_conn.last_used = time.time()
if auto_login and self.auth_data(conn) is None:
self._wait_for_login(result_conn, headers)
return result_conn
def _login(self, conn=None, headers=None):
'''Issue login request and update authentication cookie.'''
cookie = None
g = eventlet_request.LoginRequestEventlet(
self, self._user, self._password, conn, headers)
g.start()
ret = g.join()
if ret:
if isinstance(ret, Exception):
LOG.error(_LE('Login error "%s"'), ret)
raise ret
cookie = ret.getheader("Set-Cookie")
if cookie:
LOG.debug("Saving new authentication cookie '%s'", cookie)
return cookie
# Register as subclass.
base.ApiClientBase.register(EventletApiClient)
|
apache-2.0
| -4,340,461,156,337,409,500
| 38.923077
| 79
| 0.604872
| false
| 4.529925
| false
| false
| false
|
naototty/vagrant-lxc-ironic
|
ironic/conductor/task_manager.py
|
1
|
14091
|
# coding=utf-8
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
A context manager to perform a series of tasks on a set of resources.
:class:`TaskManager` is a context manager, created on-demand to allow
synchronized access to a node and its resources.
The :class:`TaskManager` will, by default, acquire an exclusive lock on
a node for the duration that the TaskManager instance exists. You may
create a TaskManager instance without locking by passing "shared=True"
when creating it, but certain operations on the resources held by such
an instance of TaskManager will not be possible. Requiring this exclusive
lock guards against parallel operations interfering with each other.
A shared lock is useful when performing non-interfering operations,
such as validating the driver interfaces.
An exclusive lock is stored in the database to coordinate between
:class:`ironic.conductor.manager` instances, that are typically deployed on
different hosts.
:class:`TaskManager` methods, as well as driver methods, may be decorated to
determine whether their invocation requires an exclusive lock.
The TaskManager instance exposes certain node resources and properties as
attributes that you may access:
task.context
The context passed to TaskManager()
task.shared
False if Node is locked, True if it is not locked. (The
'shared' kwarg arg of TaskManager())
task.node
The Node object
task.ports
Ports belonging to the Node
task.driver
The Driver for the Node, or the Driver based on the
'driver_name' kwarg of TaskManager().
Example usage:
::
with task_manager.acquire(context, node_id) as task:
task.driver.power.power_on(task.node)
If you need to execute task-requiring code in a background thread, the
TaskManager instance provides an interface to handle this for you, making
sure to release resources when the thread finishes (successfully or if
an exception occurs). Common use of this is within the Manager like so:
::
with task_manager.acquire(context, node_id) as task:
<do some work>
task.spawn_after(self._spawn_worker,
utils.node_power_action, task, new_state)
All exceptions that occur in the current GreenThread as part of the
spawn handling are re-raised. You can specify a hook to execute custom
code when such exceptions occur. For example, the hook is a more elegant
solution than wrapping the "with task_manager.acquire()" with a
try..exception block. (Note that this hook does not handle exceptions
raised in the background thread.):
::
def on_error(e):
if isinstance(e, Exception):
...
with task_manager.acquire(context, node_id) as task:
<do some work>
task.set_spawn_error_hook(on_error)
task.spawn_after(self._spawn_worker,
utils.node_power_action, task, new_state)
"""
import functools
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
import retrying
from ironic.common import driver_factory
from ironic.common import exception
from ironic.common.i18n import _LW
from ironic.common import states
from ironic import objects
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
def require_exclusive_lock(f):
"""Decorator to require an exclusive lock.
Decorated functions must take a :class:`TaskManager` as the first
parameter. Decorated class methods should take a :class:`TaskManager`
as the first parameter after "self".
"""
@functools.wraps(f)
def wrapper(*args, **kwargs):
task = args[0] if isinstance(args[0], TaskManager) else args[1]
if task.shared:
raise exception.ExclusiveLockRequired()
return f(*args, **kwargs)
return wrapper
def acquire(context, node_id, shared=False, driver_name=None):
"""Shortcut for acquiring a lock on a Node.
:param context: Request context.
:param node_id: ID or UUID of node to lock.
:param shared: Boolean indicating whether to take a shared or exclusive
lock. Default: False.
:param driver_name: Name of Driver. Default: None.
:returns: An instance of :class:`TaskManager`.
"""
return TaskManager(context, node_id, shared=shared,
driver_name=driver_name)
class TaskManager(object):
"""Context manager for tasks.
This class wraps the locking, driver loading, and acquisition
of related resources (eg, Node and Ports) when beginning a unit of work.
"""
def __init__(self, context, node_id, shared=False, driver_name=None):
"""Create a new TaskManager.
Acquire a lock on a node. The lock can be either shared or
exclusive. Shared locks may be used for read-only or
non-disruptive actions only, and must be considerate to what
other threads may be doing on the same node at the same time.
:param context: request context
:param node_id: ID or UUID of node to lock.
:param shared: Boolean indicating whether to take a shared or exclusive
lock. Default: False.
:param driver_name: The name of the driver to load, if different
from the Node's current driver.
:raises: DriverNotFound
:raises: NodeNotFound
:raises: NodeLocked
"""
self._spawn_method = None
self._on_error_method = None
self.context = context
self.node = None
self.shared = shared
self.fsm = states.machine.copy()
# NodeLocked exceptions can be annoying. Let's try to alleviate
# some of that pain by retrying our lock attempts. The retrying
# module expects a wait_fixed value in milliseconds.
@retrying.retry(
retry_on_exception=lambda e: isinstance(e, exception.NodeLocked),
stop_max_attempt_number=CONF.conductor.node_locked_retry_attempts,
wait_fixed=CONF.conductor.node_locked_retry_interval * 1000)
def reserve_node():
LOG.debug("Attempting to reserve node %(node)s",
{'node': node_id})
self.node = objects.Node.reserve(context, CONF.host, node_id)
try:
if not self.shared:
reserve_node()
else:
self.node = objects.Node.get(context, node_id)
self.ports = objects.Port.list_by_node_id(context, self.node.id)
self.driver = driver_factory.get_driver(driver_name or
self.node.driver)
# NOTE(deva): this handles the Juno-era NOSTATE state
# and should be deleted after Kilo is released
if self.node.provision_state is states.NOSTATE:
self.node.provision_state = states.AVAILABLE
self.node.save()
self.fsm.initialize(self.node.provision_state)
except Exception:
with excutils.save_and_reraise_exception():
self.release_resources()
def spawn_after(self, _spawn_method, *args, **kwargs):
"""Call this to spawn a thread to complete the task.
The specified method will be called when the TaskManager instance
exits.
:param _spawn_method: a method that returns a GreenThread object
:param args: args passed to the method.
:param kwargs: additional kwargs passed to the method.
"""
self._spawn_method = _spawn_method
self._spawn_args = args
self._spawn_kwargs = kwargs
def set_spawn_error_hook(self, _on_error_method, *args, **kwargs):
"""Create a hook to handle exceptions when spawning a task.
Create a hook that gets called upon an exception being raised
from spawning a background thread to do a task.
:param _on_error_method: a callable object, it's first parameter
should accept the Exception object that was raised.
:param args: additional args passed to the callable object.
:param kwargs: additional kwargs passed to the callable object.
"""
self._on_error_method = _on_error_method
self._on_error_args = args
self._on_error_kwargs = kwargs
def release_resources(self):
"""Unlock a node and release resources.
If an exclusive lock is held, unlock the node. Reset attributes
to make it clear that this instance of TaskManager should no
longer be accessed.
"""
if not self.shared:
try:
if self.node:
objects.Node.release(self.context, CONF.host, self.node.id)
except exception.NodeNotFound:
# squelch the exception if the node was deleted
# within the task's context.
pass
self.node = None
self.driver = None
self.ports = None
self.fsm = None
def _thread_release_resources(self, t):
"""Thread.link() callback to release resources."""
self.release_resources()
def process_event(self, event, callback=None, call_args=None,
call_kwargs=None, err_handler=None):
"""Process the given event for the task's current state.
:param event: the name of the event to process
:param callback: optional callback to invoke upon event transition
:param call_args: optional \*args to pass to the callback method
:param call_kwargs: optional \**kwargs to pass to the callback method
:param err_handler: optional error handler to invoke if the
callback fails, eg. because there are no workers available
(err_handler should accept arguments node, prev_prov_state, and
prev_target_state)
:raises: InvalidState if the event is not allowed by the associated
state machine
"""
# Advance the state model for the given event. Note that this doesn't
# alter the node in any way. This may raise InvalidState, if this event
# is not allowed in the current state.
self.fsm.process_event(event)
# stash current states in the error handler if callback is set,
# in case we fail to get a worker from the pool
if err_handler and callback:
self.set_spawn_error_hook(err_handler, self.node,
self.node.provision_state,
self.node.target_provision_state)
self.node.provision_state = self.fsm.current_state
self.node.target_provision_state = self.fsm.target_state
# set up the async worker
if callback:
# clear the error if we're going to start work in a callback
self.node.last_error = None
if call_args is None:
call_args = ()
if call_kwargs is None:
call_kwargs = {}
self.spawn_after(callback, *call_args, **call_kwargs)
# publish the state transition by saving the Node
self.node.save()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is None and self._spawn_method is not None:
# Spawn a worker to complete the task
# The linked callback below will be called whenever:
# - background task finished with no errors.
# - background task has crashed with exception.
# - callback was added after the background task has
# finished or crashed. While eventlet currently doesn't
# schedule the new thread until the current thread blocks
# for some reason, this is true.
# All of the above are asserted in tests such that we'll
# catch if eventlet ever changes this behavior.
thread = None
try:
thread = self._spawn_method(*self._spawn_args,
**self._spawn_kwargs)
# NOTE(comstud): Trying to use a lambda here causes
# the callback to not occur for some reason. This
# also makes it easier to test.
thread.link(self._thread_release_resources)
# Don't unlock! The unlock will occur when the
# thread finshes.
return
except Exception as e:
with excutils.save_and_reraise_exception():
try:
# Execute the on_error hook if set
if self._on_error_method:
self._on_error_method(e, *self._on_error_args,
**self._on_error_kwargs)
except Exception:
LOG.warning(_LW("Task's on_error hook failed to "
"call %(method)s on node %(node)s"),
{'method': self._on_error_method.__name__,
'node': self.node.uuid})
if thread is not None:
# This means the link() failed for some
# reason. Nuke the thread.
thread.cancel()
self.release_resources()
self.release_resources()
|
apache-2.0
| 1,835,137,189,771,582,200
| 38.033241
| 79
| 0.623944
| false
| 4.491871
| false
| false
| false
|
bxm156/yelpy
|
yelpy/yelpy_signer.py
|
1
|
1078
|
import oauth2
import os
class YelpySigner(object):
def __init__(self, consumer_key=None, consumer_secret=None, token=None, token_secret=None):
super(YelpySigner, self).__init__()
self.consumer_key = consumer_key or os.environ['YELPY_CONSUMER_KEY']
self.consumer_secret = consumer_secret or os.environ['YELPY_CONSUMER_SECRET']
self.token = token or os.environ['YELPY_TOKEN']
self.token_secret = token_secret or os.environ['YELPY_TOKEN_SECRET']
def sign(self, url):
consumer = oauth2.Consumer(self.consumer_key, self.consumer_secret)
oauth_request = oauth2.Request('GET', url, {})
oauth_request.update({
'oauth_nonce': oauth2.generate_nonce(),
'oauth_timestamp': oauth2.generate_timestamp(),
'oauth_token': self.token,
'oauth_consumer_key': self.consumer_key,
})
token = oauth2.Token(self.token, self.token_secret)
oauth_request.sign_request(oauth2.SignatureMethod_HMAC_SHA1(), consumer, token)
return oauth_request.to_url()
|
gpl-2.0
| 6,199,489,522,802,149,000
| 43.916667
| 95
| 0.648423
| false
| 3.546053
| false
| false
| false
|
Pulgama/supriya
|
supriya/examples/grey_wash/sessions/chants.py
|
1
|
4782
|
import supriya
from .. import project_settings, synthdefs
class SessionFactory(supriya.nonrealtime.SessionFactory):
### CLASS VARIABLES ###
release_time = 15
### SESSION ###
def __session__(self, initial_seed=0, layer_count=10, minutes=2, **kwargs):
self.buffers = []
session = supriya.Session(
input_bus_channel_count=self.input_bus_channel_count,
output_bus_channel_count=self.output_bus_channel_count,
)
with session.at(0):
for say in self.libretto:
buffer_ = session.add_buffer(channel_count=1, file_path=say)
self.buffers.append(buffer_)
for i in range(layer_count):
session.inscribe(
self.global_pattern, duration=60 * minutes, seed=initial_seed + i
)
with session.at(0):
session.add_synth(
synthdef=synthdefs.compressor_synthdef,
add_action="ADD_TO_TAIL",
duration=session.duration + self.release_time,
pregain=0,
)
session.set_rand_seed(initial_seed)
return session
@property
def libretto(self):
libretto = []
text = "videoconferencing"
for voice in ["Daniel", "Tessa", "Karen", "Thomas"]:
libretto.append(supriya.Say(text, voice=voice))
return libretto
### GLOBAL PATTERN ###
@property
def global_pattern(self):
global_pattern = supriya.patterns.Pgpar(
[self.source_pattern, self.effect_pattern], release_time=self.release_time
)
global_pattern = global_pattern.with_bus(release_time=self.release_time)
return global_pattern
### SOURCE PATTERNS ###
@property
def source_pattern(self):
source_pattern = self.one_shot_player_pattern
source_pattern = source_pattern.with_group(release_time=self.release_time)
source_pattern = source_pattern.with_effect(
synthdef=synthdefs.compressor_synthdef,
release_time=self.release_time,
pregain=3,
)
return source_pattern
@property
def one_shot_player_pattern(self):
return supriya.patterns.Pbind(
synthdef=synthdefs.one_shot_player_synthdef,
add_action=supriya.AddAction.ADD_TO_HEAD,
buffer_id=supriya.patterns.Prand(self.buffers, repetitions=None),
delta=5,
duration=0,
gain=supriya.patterns.Pwhite(-12, 12),
pan=supriya.patterns.Pwhite(-1, 1.0),
rate=2 ** supriya.patterns.Pwhite(-1, 0.25),
)
### EFFECT PATTERNS ###
@property
def chorus_pattern(self):
return supriya.patterns.Pbindf(
self.fx_pattern,
synthdef=synthdefs.windowed_chorus_factory.build(
name="chorus8", iterations=8
),
frequency=supriya.patterns.Pwhite() * 2,
gain=3,
)
@property
def effect_pattern(self):
effect_pattern = supriya.patterns.Ppar(
[
self.chorus_pattern,
self.freeverb_pattern,
self.chorus_pattern,
self.pitchshift_pattern,
]
)
effect_pattern = effect_pattern.with_group(release_time=self.release_time)
effect_pattern = effect_pattern.with_effect(
synthdef=synthdefs.compressor_synthdef,
release_time=self.release_time,
pregain=3,
)
return effect_pattern
@property
def freeverb_pattern(self):
return supriya.patterns.Pbindf(
self.fx_pattern,
synthdef=synthdefs.windowed_freeverb_synthdef,
damping=supriya.patterns.Pwhite() ** 0.25,
gain=3,
level=supriya.patterns.Pwhite(0.0, 0.25),
room_size=supriya.patterns.Pwhite() ** 0.25,
)
@property
def fx_pattern(self):
return supriya.patterns.Pbind(
add_action=supriya.AddAction.ADD_TO_TAIL,
delta=supriya.patterns.Pwhite(0, 10),
duration=supriya.patterns.Pwhite(5, 30),
level=supriya.patterns.Pwhite(0.25, 1.0),
)
@property
def pitchshift_pattern(self):
return supriya.patterns.Pbindf(
self.fx_pattern,
synthdef=synthdefs.windowed_pitchshift_synthdef,
gain=3,
pitch_dispersion=supriya.patterns.Pwhite(0.0, 0.02),
pitch_shift=supriya.patterns.Pwhite(-12.0, 12.0),
time_dispersion=supriya.patterns.Pwhite(),
window_size=supriya.patterns.Pwhite(0.1, 2.0),
)
chants = SessionFactory.from_project_settings(project_settings)
|
mit
| -1,790,003,712,891,903,000
| 31.753425
| 86
| 0.580301
| false
| 3.642041
| false
| false
| false
|
Lencerf/BiliDan
|
bilidan.py
|
1
|
24251
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Biligrab-Danmaku2ASS
#
# Author: Beining@ACICFG https://github.com/cnbeining
# Author: StarBrilliant https://github.com/m13253
#
# Biligrab is licensed under MIT licence
# Permission has been granted for the use of Danmaku2ASS in Biligrab
#
# Copyright (c) 2014
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the “Software”), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
if sys.version_info < (3, 0):
sys.stderr.write('ERROR: Python 3.0 or newer version is required.\n')
sys.exit(1)
import argparse
import gzip
import json
import hashlib
import io
import logging
import math
import os
import re
import subprocess
import tempfile
import urllib.parse
import urllib.request
import xml.dom.minidom
import zlib
USER_AGENT_PLAYER = 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:6.0.2) Gecko/20100101 Firefox/6.0.2 Fengfan/1.0'
USER_AGENT_API = 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:6.0.2) Gecko/20100101 Firefox/6.0.2 Fengfan/1.0'
APPKEY = '452d3958f048c02a' # From some source
APPSEC = '' # We shall not release this from now
BILIGRAB_HEADER = {'User-Agent': USER_AGENT_API, 'Cache-Control': 'no-cache', 'Pragma': 'no-cache'}
def biligrab(url, *, debug=False, verbose=False, media=None, comment=None, cookie=None, quality=None, source=None, keep_fps=False, mpvflags=[], d2aflags={}, fakeip=None):
url_get_metadata = 'http://api.bilibili.com/view?'
url_get_comment = 'http://comment.bilibili.com/%(cid)s.xml'
if source == 'overseas':
url_get_media = 'http://interface.bilibili.com/v_cdn_play?'
else:
url_get_media = 'http://interface.bilibili.com/playurl?'
def parse_url(url):
'''Parse a bilibili.com URL
Return value: (aid, pid)
'''
regex = re.compile('(http:/*[^/]+/video/)?av(\\d+)(/|/index.html|/index_(\\d+).html)?(\\?|#|$)')
regex_match = regex.match(url)
if not regex_match:
raise ValueError('Invalid URL: %s' % url)
aid = regex_match.group(2)
pid = regex_match.group(4) or '1'
return aid, pid
def fetch_video_metadata(aid, pid):
'''Fetch video metadata
Arguments: aid, pid
Return value: {'cid': cid, 'title': title}
'''
req_args = {'type': 'json', 'appkey': APPKEY, 'id': aid, 'page': pid}
#req_args['sign'] = bilibili_hash(req_args)
req_args['sign'] = ''
_, response = fetch_url(url_get_metadata+urllib.parse.urlencode(req_args), user_agent=USER_AGENT_API, cookie=cookie)
try:
response = dict(json.loads(response.decode('utf-8', 'replace')))
except (TypeError, ValueError):
raise ValueError('Can not get \'cid\' from %s' % url)
if 'error' in response:
logging.error('Error message: %s' % response.get('error'))
if 'cid' not in response:
raise ValueError('Can not get \'cid\' from %s' % url)
return response
def get_media_urls(cid, *, fuck_you_bishi_mode=False):
'''Request the URLs of the video
Arguments: cid
Return value: [media_urls]
'''
if source in {None, 'overseas'}:
user_agent = USER_AGENT_API if not fuck_you_bishi_mode else USER_AGENT_PLAYER
req_args = {'appkey': APPKEY, 'cid': cid}
if quality is not None:
req_args['quality'] = quality
#req_args['sign'] = bilibili_hash(req_args)
req_args['sign'] = ''
_, response = fetch_url(url_get_media+urllib.parse.urlencode(req_args), user_agent=user_agent, cookie=cookie, fakeip=fakeip)
media_urls = [str(k.wholeText).strip() for i in xml.dom.minidom.parseString(response.decode('utf-8', 'replace')).getElementsByTagName('durl') for j in i.getElementsByTagName('url')[:1] for k in j.childNodes if k.nodeType == 4]
if not fuck_you_bishi_mode and media_urls == ['http://static.hdslb.com/error.mp4']:
logging.error('Detected User-Agent block. Switching to fuck-you-bishi mode.')
return get_media_urls(cid, fuck_you_bishi_mode=True)
elif source == 'html5':
req_args = {'aid': aid, 'page': pid}
logging.warning('HTML5 video source is experimental and may not always work.')
_, response = fetch_url('http://www.bilibili.com/m/html5?'+urllib.parse.urlencode(req_args), user_agent=USER_AGENT_PLAYER)
response = json.loads(response.decode('utf-8', 'replace'))
media_urls = [dict.get(response, 'src')]
if not media_urls[0]:
media_urls = []
if not fuck_you_bishi_mode and media_urls == ['http://static.hdslb.com/error.mp4']:
logging.error('Failed to request HTML5 video source. Retrying.')
return get_media_urls(cid, fuck_you_bishi_mode=True)
elif source == 'flvcd':
req_args = {'kw': url}
if quality is not None:
if quality == 3:
req_args['quality'] = 'high'
elif quality >= 4:
req_args['quality'] = 'super'
_, response = fetch_url('http://www.flvcd.com/parse.php?'+urllib.parse.urlencode(req_args), user_agent=USER_AGENT_PLAYER)
resp_match = re.search('<input type="hidden" name="inf" value="([^"]+)"', response.decode('gbk', 'replace'))
if resp_match:
media_urls = resp_match.group(1).rstrip('|').split('|')
else:
media_urls = []
elif source == 'bilipr':
req_args = {'cid': cid}
quality_arg = '1080' if quality is not None and quality >= 4 else '720'
logging.warning('BilibiliPr video source is experimental and may not always work.')
resp_obj, response = fetch_url('http://pr.lolly.cc/P%s?%s' % (quality_arg, urllib.parse.urlencode(req_args)), user_agent=USER_AGENT_PLAYER)
if resp_obj.getheader('Content-Type', '').startswith('text/xml'):
media_urls = [str(k.wholeText).strip() for i in xml.dom.minidom.parseString(response.decode('utf-8', 'replace')).getElementsByTagName('durl') for j in i.getElementsByTagName('url')[:1] for k in j.childNodes if k.nodeType == 4]
else:
media_urls = []
else:
assert source in {None, 'overseas', 'html5', 'flvcd', 'bilipr'}
if len(media_urls) == 0 or media_urls == ['http://static.hdslb.com/error.mp4']:
raise ValueError('Can not get valid media URLs.')
return media_urls
def get_video_size(media_urls):
'''Determine the resolution of the video
Arguments: [media_urls]
Return value: (width, height)
'''
try:
if media_urls[0].startswith('http:') or media_urls[0].startswith('https:'):
ffprobe_command = ['ffprobe', '-icy', '0', '-loglevel', 'repeat+warning' if verbose else 'repeat+error', '-print_format', 'json', '-select_streams', 'v', '-show_streams', '-timeout', '60000000', '-user-agent', USER_AGENT_PLAYER, '--', media_urls[0]]
else:
ffprobe_command = ['ffprobe', '-loglevel', 'repeat+warning' if verbose else 'repeat+error', '-print_format', 'json', '-select_streams', 'v', '-show_streams', '--', media_urls[0]]
log_command(ffprobe_command)
ffprobe_process = subprocess.Popen(ffprobe_command, stdout=subprocess.PIPE)
try:
ffprobe_output = json.loads(ffprobe_process.communicate()[0].decode('utf-8', 'replace'))
except KeyboardInterrupt:
logging.warning('Cancelling getting video size, press Ctrl-C again to terminate.')
ffprobe_process.terminate()
return 0, 0
width, height, widthxheight = 0, 0, 0
for stream in dict.get(ffprobe_output, 'streams') or []:
if dict.get(stream, 'width')*dict.get(stream, 'height') > widthxheight:
width, height = dict.get(stream, 'width'), dict.get(stream, 'height')
return width, height
except Exception as e:
log_or_raise(e, debug=debug)
return 0, 0
def convert_comments(cid, video_size):
'''Convert comments to ASS subtitle format
Arguments: cid
Return value: comment_out -> file
'''
_, resp_comment = fetch_url(url_get_comment % {'cid': cid}, cookie=cookie)
comment_in = io.StringIO(resp_comment.decode('utf-8', 'replace'))
comment_out = tempfile.NamedTemporaryFile(mode='w', encoding='utf-8-sig', newline='\r\n', prefix='tmp-danmaku2ass-', suffix='.ass', delete=False)
logging.info('Invoking Danmaku2ASS, converting to %s' % comment_out.name)
d2a_args = dict({'stage_width': video_size[0], 'stage_height': video_size[1], 'font_face': 'PingFangSC-Regular', 'font_size': math.ceil(video_size[1]/23), 'text_opacity': 0.8, 'duration_marquee': min(max(6.75*video_size[0]/video_size[1]-4, 3.0), 8.0), 'duration_still': 5.0}, **d2aflags)
for i, j in ((('stage_width', 'stage_height', 'reserve_blank'), int), (('font_size', 'text_opacity', 'comment_duration', 'duration_still', 'duration_marquee'), float)):
for k in i:
if k in d2aflags:
d2a_args[k] = j(d2aflags[k])
try:
danmaku2ass.Danmaku2ASS([comment_in], comment_out, **d2a_args)
except Exception as e:
log_or_raise(e, debug=debug)
logging.error('Danmaku2ASS failed, comments are disabled.')
comment_out.flush()
comment_out.close() # Close the temporary file early to fix an issue related to Windows NT file sharing
return comment_out
def launch_player(video_metadata, media_urls, comment_out, is_playlist=False, increase_fps=True):
'''Launch MPV media player
Arguments: video_metadata, media_urls, comment_out
Return value: player_exit_code -> int
'''
mpv_version_master = tuple(check_env.mpv_version.split('-', 1)[0].split('.'))
mpv_version_gte_0_10 = mpv_version_master >= ('0', '10') or (len(mpv_version_master) >= 2 and len(mpv_version_master[1]) >= 3) or mpv_version_master[0] == 'git'
mpv_version_gte_0_6 = mpv_version_gte_0_10 or mpv_version_master >= ('0', '6') or (len(mpv_version_master) >= 2 and len(mpv_version_master[1]) >= 2) or mpv_version_master[0] == 'git'
mpv_version_gte_0_4 = mpv_version_gte_0_6 or mpv_version_master >= ('0', '4') or (len(mpv_version_master) >= 2 and len(mpv_version_master[1]) >= 2) or mpv_version_master[0] == 'git'
logging.debug('Compare mpv version: %s %s 0.10' % (check_env.mpv_version, '>=' if mpv_version_gte_0_10 else '<'))
logging.debug('Compare mpv version: %s %s 0.6' % (check_env.mpv_version, '>=' if mpv_version_gte_0_6 else '<'))
logging.debug('Compare mpv version: %s %s 0.4' % (check_env.mpv_version, '>=' if mpv_version_gte_0_4 else '<'))
if increase_fps: # If hardware decoding (without -copy suffix) is used, do not increase fps
for i in mpvflags:
i = i.split('=', 1)
if 'vdpau' in i or 'vaapi' in i or 'vda' in i:
increase_fps = False
break
command_line = ['mpv']
if video_resolution[0] >= 1280 or video_resolution[1] >= 720:
command_line += ['--fs', '--autofit', '950x540']
if mpv_version_gte_0_6:
command_line += ['--cache-file', 'TMP']
if increase_fps and mpv_version_gte_0_6: # Drop frames at vo side but not at decoder side to prevent A/V sync issues
command_line += ['--framedrop', 'vo']
command_line += ['--http-header-fields', 'User-Agent: '+USER_AGENT_PLAYER.replace(',', '\\,')]
if mpv_version_gte_0_6:
if mpv_version_gte_0_10:
command_line += ['--force-media-title', video_metadata.get('title', url)]
else:
command_line += ['--media-title', video_metadata.get('title', url)]
if is_playlist or len(media_urls) > 1:
command_line += ['--merge-files']
if mpv_version_gte_0_4:
command_line += ['--no-video-aspect', '--sub-ass', '--sub-file', comment_out.name]
else:
command_line += ['--no-aspect', '--ass', '--sub', comment_out.name]
if increase_fps:
if mpv_version_gte_0_6:
command_line += ['--vf', 'lavfi="fps=fps=60:round=down"']
else: # Versions < 0.6 have an A/V sync related issue
command_line += ['--vf', 'lavfi="fps=fps=50:round=down"']
command_line += mpvflags
if is_playlist:
command_line += ['--playlist']
else:
command_line += ['--']
command_line += media_urls
log_command(command_line)
player_process = subprocess.Popen(command_line)
try:
player_process.wait()
except KeyboardInterrupt:
logging.info('Terminating media player...')
try:
player_process.terminate()
try:
player_process.wait(timeout=2)
except subprocess.TimeoutExpired:
logging.info('Killing media player by force...')
player_process.kill()
except Exception:
pass
raise
return player_process.returncode
aid, pid = parse_url(url)
logging.info('Loading video info...')
video_metadata = fetch_video_metadata(aid, pid)
logging.info('Got video cid: %s' % video_metadata['cid'])
logging.info('Loading video content...')
if media is None:
media_urls = get_media_urls(video_metadata['cid'])
else:
media_urls = [media]
logging.info('Got media URLs:'+''.join(('\n %d: %s' % (i+1, j) for i, j in enumerate(media_urls))))
logging.info('Determining video resolution...')
video_size = get_video_size(media_urls)
video_resolution = video_size # backup original resolution
logging.info('Video resolution: %sx%s' % video_size)
if video_size[0] > 0 and video_size[1] > 0:
video_size = (video_size[0]*1080/video_size[1], 1080) # Simply fix ASS resolution to 1080p
else:
log_or_raise(ValueError('Can not get video size. Comments may be wrongly positioned.'), debug=debug)
video_size = (1920, 1080)
logging.info('Loading comments...')
if comment is None:
comment_out = convert_comments(video_metadata['cid'], video_size)
else:
comment_out = open(comment, 'r')
comment_out.close()
logging.info('Launching media player...')
player_exit_code = launch_player(video_metadata, media_urls, comment_out, increase_fps=not keep_fps)
if comment is None and player_exit_code == 0:
os.remove(comment_out.name)
return player_exit_code
def fetch_url(url, *, user_agent=USER_AGENT_PLAYER, cookie=None, fakeip=None):
'''Fetch HTTP URL
Arguments: url, user_agent, cookie
Return value: (response_object, response_data) -> (http.client.HTTPResponse, bytes)
'''
logging.debug('Fetch: %s' % url)
req_headers = {'User-Agent': user_agent, 'Accept-Encoding': 'gzip, deflate'}
if cookie:
req_headers['Cookie'] = cookie
if fakeip:
req_headers['X-Forwarded-For'] = fakeip
req_headers['Client-IP'] = fakeip
req = urllib.request.Request(url=url, headers=req_headers)
response = urllib.request.urlopen(req, timeout=120)
content_encoding = response.getheader('Content-Encoding')
if content_encoding == 'gzip':
data = gzip.GzipFile(fileobj=response).read()
elif content_encoding == 'deflate':
decompressobj = zlib.decompressobj(-zlib.MAX_WBITS)
data = decompressobj.decompress(response.read())+decompressobj.flush()
else:
data = response.read()
return response, data
def bilibili_hash(args):
'''Calculate API signature hash
Arguments: {request_paramter: value}
Return value: hash_value -> str
'''
return hashlib.md5((urllib.parse.urlencode(sorted(args.items()))+APPSEC).encode('utf-8')).hexdigest() # Fuck you bishi
def check_env(debug=False):
'''Check the system environment to make sure dependencies are set up correctly
Return value: is_successful -> bool
'''
global danmaku2ass, requests
retval = True
try:
import danmaku2ass
except ImportError as e:
danmaku2ass_filename = os.path.abspath(os.path.join(__file__, '..', 'danmaku2ass.py'))
logging.error('Automatically downloading \'danmaku2ass.py\'\n from https://github.com/m13253/danmaku2ass\n to %s' % danmaku2ass_filename)
try:
danmaku2ass_downloaded = fetch_url('https://github.com/m13253/danmaku2ass/raw/master/danmaku2ass.py')
with open(danmaku2ass_filename, 'wb') as f:
f.write(danmaku2ass_downloaded[1])
del danmaku2ass_downloaded
except Exception as e:
logging.error('Can not download Danmaku2ASS module automatically (%s), please get it yourself.' % e)
retval = False
if retval:
try:
import danmaku2ass
danmaku2ass.Danmaku2ASS
except (AttributeError, ImportError) as e:
logging.error('Danmaku2ASS module is not working (%s), please update it at https://github.com/m13253/danmaku2ass' % e)
retval = False
try:
mpv_process = subprocess.Popen(('mpv', '--version'), stdout=subprocess.PIPE, env=dict(os.environ, MPV_VERBOSE='-1'))
mpv_output = mpv_process.communicate()[0].decode('utf-8', 'replace').splitlines()
for line in mpv_output:
if line.startswith('[cplayer] mpv '):
check_env.mpv_version = line.split(' ', 3)[2]
logging.debug('Detected mpv version: %s' % check_env.mpv_version)
break
else:
log_or_raise(RuntimeError('Can not detect mpv version.'), debug=debug)
check_env.mpv_version = 'git-'
except OSError as e:
logging.error('Please install \'mpv\' as the media player.')
retval = False
try:
mpv_process = subprocess.Popen(('mpv', '--vf', 'lavfi=help'), stdout=subprocess.DEVNULL)
mpv_process.wait()
if mpv_process.returncode != 0:
logging.error('mpv is not configured to enable \'lavfi\' filter. (mpv or ffmpeg may be too old)')
retval = False
except OSError as e:
logging.error('mpv is not configured to enable \'lavfi\' filter. (mpv or ffmpeg may be too old)')
retval = False
try:
subprocess.Popen(('ffprobe', '-version'), stdout=subprocess.DEVNULL)
except OSError as e:
logging.error('Please install \'ffprobe\' from FFmpeg ultilities.')
retval = False
return retval
def log_command(command_line):
'''Log the command line to be executed, escaping correctly
'''
logging.debug('Executing: '+' '.join('\''+i+'\'' if ' ' in i or '?' in i or '&' in i or '"' in i else i for i in command_line))
def log_or_raise(exception, debug=False):
'''Log exception if debug == False, or raise it if debug == True
'''
if debug:
raise exception
else:
logging.error(str(exception))
class MyArgumentFormatter(argparse.HelpFormatter):
def _split_lines(self, text, width):
'''Patch the default argparse.HelpFormatter so that '\\n' is correctly handled
'''
return [i for line in text.splitlines() for i in argparse.HelpFormatter._split_lines(self, line, width)]
def main():
if len(sys.argv) == 1:
sys.argv.append('--help')
parser = argparse.ArgumentParser(formatter_class=MyArgumentFormatter)
parser.add_argument('-c', '--cookie', help='Import Cookie at bilibili.com, type document.cookie at JavaScript console to acquire it')
parser.add_argument('-d', '--debug', action='store_true', help='Stop execution immediately when an error occures')
parser.add_argument('-m', '--media', help='Specify local media file to play with remote comments')
parser.add_argument('--comment', help='Specify local ASS comment file to play with remote media')
parser.add_argument('-q', '--quality', type=int, help='Specify video quality, -q 1 for the lowest, -q 4 for HD')
parser.add_argument('-s', '--source', help='Specify the source of video provider.\n' +
'Available values:\n' +
'default: Default source\n' +
'overseas: CDN acceleration for users outside china\n' +
'flvcd: Video parsing service provided by FLVCD.com\n' +
'html5: Low quality video provided by m.acg.tv for mobile users')
parser.add_argument('-f', '--fakeip', help='Fake ip for bypassing restrictions.')
parser.add_argument('-v', '--verbose', action='store_true', help='Print more debugging information')
parser.add_argument('--hd', action='store_true', help='Shorthand for -q 4')
parser.add_argument('--keep-fps', action='store_true', help='Use the same framerate as the video to animate comments, instead of increasing to 60 fps')
parser.add_argument('--mpvflags', metavar='FLAGS', default='', help='Parameters passed to mpv, formed as \'--option1=value1 --option2=value2\'')
parser.add_argument('--d2aflags', '--danmaku2assflags', metavar='FLAGS', default='', help='Parameters passed to Danmaku2ASS, formed as \'option1=value1,option2=value2\'')
parser.add_argument('url', metavar='URL', nargs='+', help='Bilibili video page URL (http://www.bilibili.com/video/av*/)')
args = parser.parse_args()
logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.DEBUG if args.verbose else logging.INFO)
if not check_env(debug=args.debug):
return 2
quality = args.quality if args.quality is not None else 4 if args.hd else None
source = args.source if args.source != 'default' else None
if source not in {None, 'overseas', 'html5', 'flvcd', 'bilipr'}:
raise ValueError('invalid value specified for --source, see --help for more information')
mpvflags = args.mpvflags.split()
d2aflags = dict((i.split('=', 1) if '=' in i else [i, ''] for i in args.d2aflags.split(','))) if args.d2aflags else {}
fakeip = args.fakeip if args.fakeip else None
retval = 0
for url in args.url:
try:
retval = retval or biligrab(url, debug=args.debug, verbose=args.verbose, media=args.media, comment=args.comment, cookie=args.cookie, quality=quality, source=source, keep_fps=args.keep_fps, mpvflags=mpvflags, d2aflags=d2aflags, fakeip=args.fakeip)
except OSError as e:
logging.error(e)
retval = retval or e.errno
if args.debug:
raise
except Exception as e:
logging.error(e)
retval = retval or 1
if args.debug:
raise
return retval
if __name__ == '__main__':
sys.exit(main())
|
mit
| 6,329,866,537,232,969,000
| 48.27439
| 295
| 0.610857
| false
| 3.61566
| false
| false
| false
|
DayGitH/Python-Challenges
|
DailyProgrammer/20120405A.py
|
1
|
1079
|
"""
1000 Lockers Problem.
In an imaginary high school there exist 1000 lockers labelled 1, 2, ..., 1000. All of them are closed. 1000 students
are to "toggle" a locker's state. * The first student toggles all of them * The second one toggles every other one
(i.e, 2, 4, 6, ...) * The third one toggles the multiples of 3 (3, 6, 9, ...) and so on until all students have
finished.
To toggle means to close the locker if it is open, and to open it if it's closed.
How many and which lockers are open in the end?
Thanks to ladaghini for submitting this challenge to /r/dailyprogrammer_ideas!
"""
import math
N = 1000
working_list = [False] * N
for i in range(1,1000+1):
for n, j in enumerate(working_list):
if n%i == 0:
working_list[n] = not working_list[n]
for n, j in enumerate(working_list):
if j:
print(n)
print(working_list.count(True))
"""
/u/prophile's solution
requires dev to already know that the solution is all squares between 0 and N
"""
opens = [n*n for n in range(int(math.sqrt(N)) + 1) if n*n < N]
print(len(opens), opens)
|
mit
| 1,479,764,557,927,629,600
| 30.735294
| 116
| 0.68304
| false
| 3.100575
| false
| false
| false
|
callowayproject/django-objectpermissions
|
example/simpleapp/models.py
|
1
|
1253
|
from django.db import models
from django.contrib.contenttypes import generic
from django.contrib.contenttypes.models import ContentType
from django.contrib.auth.models import User
class SimpleText(models.Model):
"""A Testing app"""
firstname = models.CharField(blank=True, max_length=255)
lastname = models.CharField(blank=True, max_length=255)
favorite_color = models.CharField(blank=True, max_length=255)
def __unicode__(self):
return self.firstname
class SimpleTaggedItem(models.Model):
tag = models.SlugField()
simple_text = models.ForeignKey(SimpleText)
def __unicode__(self):
return self.tag
import objectpermissions
permissions = ['perm1', 'perm2', 'perm3', 'perm4']
objectpermissions.register(SimpleText, permissions)
objectpermissions.register(SimpleTaggedItem, permissions)
from django.contrib import admin
from objectpermissions.admin import TabularUserPermInline, StackedUserPermInline
class SimpleTaggedItemInline(admin.TabularInline):
model = SimpleTaggedItem
class SimpleTextAdmin(admin.ModelAdmin):
list_display = ('firstname','lastname','favorite_color')
inlines = [SimpleTaggedItemInline, TabularUserPermInline, ]
admin.site.register(SimpleText, SimpleTextAdmin)
|
apache-2.0
| 3,383,614,336,839,751,000
| 32.891892
| 80
| 0.769354
| false
| 4.028939
| false
| false
| false
|
thorwhalen/ut
|
parse/web/parsing_templates.py
|
1
|
2359
|
__author__ = 'thor'
import ut as ms
import re
import requests
from bs4 import BeautifulSoup
import os
import ut.pfile.to
def get_multiple_template_dicts(source):
templates = dict()
if isinstance(source, str):
if not re.compile('\n|\t').match(source) and len(source) < 150: # assume it's a filepath or url...
if os.path.exists(source):
source = ms.pfile.to.string(source)
else:
source = requests.get(source).text # ... and get the html
soup = BeautifulSoup(source)
table_soup_list = soup.find_all('table')
print("Found %d tables..." % len(table_soup_list))
for table_soup in table_soup_list:
try:
tt = mk_simple_template_dict(table_soup)
templates[tt['table']['id']] = tt
except Exception:
raise
print("... could extract a template from %d of these" % len(templates))
return templates
def mk_simple_template_dict(table_soup):
'''
Tries to create a template dict from html containing a table (should feed it with soup.find('table') for example)
This function assumes that all thead cells are formated the same, and all tbody rows are formated the same
'''
# global table attributes
bb = table_soup
glob = dict()
glob['id'] = bb.attrs.get('id')
glob['summary'] = ''
glob['style'] = parse_style(bb.attrs.get('style'))
glob
# thead attributes
bb = table_soup.find('thead').find('th')
thead = dict()
thead['scope'] = bb.attrs.get('scope')
thead['style'] = parse_style(bb.attrs.get('style'))
thead
# tbody attributes
bb = table_soup.find('tbody').find('tr').find('td')
tbody = dict()
tbody['style'] = parse_style(bb.attrs.get('style'))
tbody
return {'table': glob, 'thead': thead, 'tbody': tbody}
def parse_style(style_string):
if style_string:
style_dict = dict()
t = re.compile('[^:]+:[^;]+;').findall(style_string.replace('\n','').replace('\t',''))
t = [x.replace(';','') for x in t]
t = [x.split(':') for x in t]
for i in range(len(t)):
for ii in range(len(t[i])):
t[i][ii] = t[i][ii].strip()
style_dict = dict()
for ti in t:
style_dict[ti[0]] = ti[1]
return style_dict
else:
return None
|
mit
| -3,465,566,294,807,742,500
| 30.891892
| 117
| 0.577363
| false
| 3.531437
| false
| false
| false
|
jyr/japos-client
|
views/login.py
|
1
|
4357
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import wx
from platform import Platform
from openings import Opening_view
from controllers.auth import Auth_controller
# begin wxGlade: extracode
# end wxGlade
class Login_view(wx.Frame):
def __init__(self, parent, id):
# begin wxGlade: Login.__init__
img = Platform("/img/logo.png")
wx.Frame.__init__(self, parent, id, style=wx.DEFAULT_FRAME_STYLE ^(wx.MAXIMIZE_BOX))
self.controller = Auth_controller()
self.p_data = wx.Panel(self, -1)
self.s_username_staticbox = wx.StaticBox(self.p_data, -1, "")
self.s_password_staticbox = wx.StaticBox(self.p_data, -1, "")
self.s_connect_staticbox = wx.StaticBox(self.p_data, -1, "")
self.s_data_staticbox = wx.StaticBox(self.p_data, -1, "")
self.p_header = wx.Panel(self, -1)
self.img_logo = wx.StaticBitmap(self.p_header, -1, wx.Bitmap(img.string, wx.BITMAP_TYPE_ANY))
self.l_japos = wx.StaticText(self.p_header, -1, "JAPOS", style=wx.ALIGN_CENTRE)
self.static_line_1 = wx.StaticLine(self.p_header, -1, style=wx.LI_VERTICAL)
self.l_username = wx.StaticText(self.p_data, -1, "Username: ")
self.cb_username = wx.ComboBox(self.p_data, -1, choices=[], style=wx.CB_DROPDOWN)
self.l_password = wx.StaticText(self.p_data, -1, "Password: ")
self.tc_password = wx.TextCtrl(self.p_data, -1, "", style=wx.TE_PASSWORD)
self.b_login = wx.Button(self.p_data, -1, "Login")
self.Bind(wx.EVT_BUTTON, self.OnAuth, id = self.b_login.GetId())
self.__set_properties()
self.__do_layout()
# end wxGlade
def __set_properties(self):
# begin wxGlade: Login.__set_properties
self.SetTitle("Login")
self.l_japos.SetForegroundColour(wx.Colour(255, 255, 255))
self.l_japos.SetFont(wx.Font(20, wx.DEFAULT, wx.NORMAL, wx.BOLD, 0, ""))
self.static_line_1.SetMinSize((251, 1))
self.static_line_1.SetBackgroundColour(wx.Colour(255, 255, 255))
self.p_header.SetBackgroundColour(wx.Colour(47, 47, 47))
self.l_username.SetFont(wx.Font(15, wx.DEFAULT, wx.NORMAL, wx.BOLD, 0, ""))
self.l_password.SetFont(wx.Font(15, wx.DEFAULT, wx.NORMAL, wx.BOLD, 0, ""))
# end wxGlade
def __do_layout(self):
# begin wxGlade: Login.__do_layout
self.s_login = s_login = wx.BoxSizer(wx.VERTICAL)
s_data = wx.StaticBoxSizer(self.s_data_staticbox, wx.VERTICAL)
s_connect = wx.StaticBoxSizer(self.s_connect_staticbox, wx.HORIZONTAL)
s_password = wx.StaticBoxSizer(self.s_password_staticbox, wx.HORIZONTAL)
s_username = wx.StaticBoxSizer(self.s_username_staticbox, wx.HORIZONTAL)
s_header = wx.BoxSizer(wx.VERTICAL)
s_header.Add(self.img_logo, 0, wx.ALL|wx.ALIGN_CENTER_HORIZONTAL|wx.ALIGN_CENTER_VERTICAL, 10)
s_header.Add(self.l_japos, 0, wx.ALIGN_CENTER_HORIZONTAL|wx.ALIGN_CENTER_VERTICAL, 0)
s_header.Add(self.static_line_1, 0, wx.ALL|wx.EXPAND, 5)
self.p_header.SetSizer(s_header)
s_login.Add(self.p_header, 0, wx.EXPAND, 0)
s_username.Add(self.l_username, 0, 0, 0)
s_username.Add(self.cb_username, 1, 0, 0)
s_data.Add(s_username, 1, wx.EXPAND, 0)
s_password.Add(self.l_password, 0, 0, 0)
s_password.Add(self.tc_password, 1, 0, 0)
s_data.Add(s_password, 1, wx.EXPAND, 0)
s_connect.Add(self.b_login, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 1)
s_data.Add(s_connect, 1, wx.EXPAND, 0)
self.p_data.SetSizer(s_data)
s_login.Add(self.p_data, 1, wx.EXPAND, 0)
self.SetSizer(s_login)
s_login.Fit(self)
self.Layout()
self.Centre()
# end wxGlade
def OnAuth(self, evt):
username = self.cb_username.GetValue().encode('utf-8')
password = self.tc_password.GetValue().encode('utf-8')
print password
try:
self.valid = self.controller.auth(username, password)
if self.valid:
self.p_data.Destroy()
self.p_header.Destroy()
opening = Opening_view(self, -1)
else:
self.controller.error()
except: #japos.crews.models.DoesNotExist:
self.controller.error()
def main():
app = wx.PySimpleApp(0)
f_login = Login_view(None, -1)
f_login.Show()
app.MainLoop()
# end of class Login
|
gpl-2.0
| -9,159,458,240,173,351,000
| 40.903846
| 102
| 0.626348
| false
| 2.943919
| false
| false
| false
|
ethereum/dapp-bin
|
scrypt/scrypt.se.py
|
1
|
6861
|
data smix_intermediates[2**160](pos, stored[1024][4], state[8])
event TestLog6(h:bytes32)
macro blockmix($_inp):
with inp = $_inp:
with X = string(64):
mcopy(X, inp + 64, 64)
X[0] = ~xor(X[0], inp[0])
X[1] = ~xor(X[1], inp[1])
log(type=TestLog, 1, msg.gas)
X = salsa20(X)
log(type=TestLog, 2, msg.gas)
inp[4] = X[0]
inp[5] = X[1]
X[0] = ~xor(X[0], inp[2])
X[1] = ~xor(X[1], inp[3])
X = salsa20(X)
inp[6] = X[0]
inp[7] = X[1]
inp[0] = inp[4]
inp[1] = inp[5]
inp[2] = inp[6]
inp[3] = inp[7]
inp
macro endianflip($x):
with $y = string(len($x)):
with $i = 0:
with $L = len($y):
while $i < $L:
with $d = mload($x - 28 + $i):
mcopylast4($y + $i - 28, byte(31, $d) * 2**24 + byte(30, $d) * 2**16 + byte(29, $d) * 2**8 + byte(28, $d))
$i += 4
$y
macro mcopylast4($to, $frm):
~mstore($to, (~mload($to) & sub(0, 2**32)) + ($frm & 0xffffffff))
roundz = text("\x04\x00\x0c\x07\x08\x04\x00\x09\x0c\x08\x04\x0d\x00\x0c\x08\x12\x09\x05\x01\x07\x0d\x09\x05\x09\x01\x0d\x09\x0d\x05\x01\x0d\x12\x0e\x0a\x06\x07\x02\x0e\x0a\x09\x06\x02\x0e\x0d\x0a\x06\x02\x12\x03\x0f\x0b\x07\x07\x03\x0f\x09\x0b\x07\x03\x0d\x0f\x0b\x07\x12\x01\x00\x03\x07\x02\x01\x00\x09\x03\x02\x01\x0d\x00\x03\x02\x12\x06\x05\x04\x07\x07\x06\x05\x09\x04\x07\x06\x0d\x05\x04\x07\x12\x0b\x0a\x09\x07\x08\x0b\x0a\x09\x09\x08\x0b\x0d\x0a\x09\x08\x12\x0c\x0f\x0e\x07\x0d\x0c\x0f\x09\x0e\x0d\x0c\x0d\x0f\x0e\x0d\x12")
macro salsa20($x):
with b = string(64):
b[0] = $x[0]
b[1] = $x[1]
b = endianflip(b)
with x = string(64):
x[0] = b[0]
x[1] = b[1]
with i = 0:
with refpos = roundz:
while i < 4:
with destination = x + (~mload(refpos - 31) & 255) * 4 - 28:
with bb = ~mload(refpos - 28) & 255:
with a = (mload(x + (~mload(refpos-30) & 255) * 4 - 28) + mload(x + (~mload(refpos-29) & 255) * 4 - 28)) & 0xffffffff:
with oldval = mload(destination):
mcopylast4(destination, ~xor(oldval, ~or(a * 2**bb, a / 2**(32 - bb))))
refpos += 4
if refpos == roundz + 128:
i += 1
refpos = roundz
i = 0
while i < 64:
oldval = mload(b + i - 28) & 0xffffffff
newval = (oldval + mload(x + i - 28)) & 0xffffffff
mcopylast4(b + i - 28, newval)
i += 4
endianflip(b)
event TestLog(a:uint256, b:uint256)
event TestLog2(a:str)
event TestLog3(a:str, x:uint256)
event TestLog4(a:str, x:uint256, y:uint256)
event TestLog5(x:uint256, v1:bytes32, v2:bytes32, v3:bytes32, v4:bytes32)
def smix(b:str):
with h = mod(sha3(b:str), 2**160):
with x = string(256):
mcopy(x, b, 128)
with i = self.smix_intermediates[h].pos:
k = 0
while k < if(i > 0, 8, 0):
x[k] = self.smix_intermediates[h].state[k]
k += 1
while i < 2048 and msg.gas > 450000:
if i < 1024:
self.smix_intermediates[h].stored[i][0] = x[0]
self.smix_intermediates[h].stored[i][1] = x[1]
self.smix_intermediates[h].stored[i][2] = x[2]
self.smix_intermediates[h].stored[i][3] = x[3]
x = blockmix(x)
# if i == 1023:
# log(type=TestLog2, x)
else:
j = div(x[2], 256**31) + (div(x[2], 256**30) & 3) * 256
x[0] = ~xor(x[0], self.smix_intermediates[h].stored[j][0])
x[1] = ~xor(x[1], self.smix_intermediates[h].stored[j][1])
x[2] = ~xor(x[2], self.smix_intermediates[h].stored[j][2])
x[3] = ~xor(x[3], self.smix_intermediates[h].stored[j][3])
x = blockmix(x)
i += 1
k = 0
while k < 8:
self.smix_intermediates[h].state[k] = x[k]
k += 1
self.smix_intermediates[h].pos = i
# log(type=TestLog2, x)
if i == 2048:
with b = string(128):
mcopy(b, x, 128)
return(b:str)
else:
return(text(""):str)
event BlockMixInput(data:str)
def scrypt(pass:str): #implied: pass=salt, n=1024, r=1, p=1, dklen=32
b = self.pbkdf2(pass, pass, 128, outchars=128)
b = self.smix(b, outchars=128)
if not len(b):
return(0:bytes32)
o = self.pbkdf2(pass, b, 32, outchars=32)
return(o[0]:bytes32)
macro hmac_sha256($_key, $message): #implied: c=1, hash=sha256
with key = $_key:
if len(key) > 64:
key = [sha256(key:str)]
key[-1] = 32
if len(key) < 64:
with _o = string(64):
mcopy(_o, key, len(key))
key = _o
with o_key_pad_left = ~xor(0x5c5c5c5c5c5c5c5c5c5c5c5c5c5c5c5c5c5c5c5c5c5c5c5c5c5c5c5c5c5c5c5c, key[0]):
with o_key_pad_right = ~xor(0x5c5c5c5c5c5c5c5c5c5c5c5c5c5c5c5c5c5c5c5c5c5c5c5c5c5c5c5c5c5c5c5c, key[1]):
with padded_msg = string(len($message) + 64):
padded_msg[0] = ~xor(0x3636363636363636363636363636363636363636363636363636363636363636, key[0])
padded_msg[1] = ~xor(0x3636363636363636363636363636363636363636363636363636363636363636, key[1])
mcopy(padded_msg + 64, $message, len($message))
sha256([o_key_pad_left, o_key_pad_right, sha256(padded_msg:str)]:arr)
def hmac_sha256(key:str, msg:str):
return(hmac_sha256(key, msg):bytes32)
def pbkdf2(pass:str, salt:str, dklen): #implied: c=1, hash=sha256
o = string(dklen)
i = 0
while i * 32 < len(o):
o[i] = chain_prf(pass, salt, i + 1)
i += 1
return(o:str)
macro chain_prf($pass, $salt, $i):
with ext_salt = string(len($salt) + 4):
$j = $i
mcopy(ext_salt, $salt, len($salt))
mcopy(ext_salt + len($salt), ref($j) + 28, 4)
hmac_sha256($pass, ext_salt)
|
mit
| 6,643,061,788,805,022,000
| 41.351852
| 529
| 0.457805
| false
| 2.686374
| true
| false
| false
|
Migwi-Ndungu/bc-9-Pomodoro-Timer
|
dbase/db_script.py
|
1
|
2781
|
import sqlite3 as lite
import sys
#statusuuid
# active = 37806757-4391-4c40-8cae-6bbfd71e893e
# pending = 0eaec4f3-c524-40ab-b295-2db5cb7a0770
# finished = f82db8cc-a969-4495-bffd-bb0ce0ba877a
# running = 6c25b6d2-75cc-42c3-9c8c-ccf7b54ba585
#sounduuid
# on = 510b9503-7899-4d69-83c0-690342daf271
# off = 05797a63-51f5-4c1d-9068-215c593bba8d
def initialize_n_create_db():
'''
This is a script that creates a database called pomodoro with a table
called timer_details.The timer_details table is populated with dummy
data that will be used for testing
'''
try:
print 'Initialize database Creation'
con = lite.connect(r'pomodoro.db')
cur = con.cursor()
cur.executescript("""
DROP TABLE IF EXISTS timer_details;
CREATE TABLE timer_details(uuid TEXT PRIMARY KEY, title TEXT,
start_time INTEGER, duration INTEGER,
shortbreak INTEGER, longbreak INTEGER,
cycle INTEGER, statusuuid TEXT, sounduuid TEXT);
INSERT INTO timer_details VALUES('12f63828-e21a-40c1-ab43-5f4dd5a5dd8a',
'presentn1', 1472004636, -9300, -10600, -10200, 1,
'0eaec4f3-c524-40ab-b295-2db5cb7a0770',
'510b9503-7899-4d69-83c0-690342daf271');
INSERT INTO timer_details VALUES('d57037fe-df12-4ca5-abff-1dd626cba2b5',
'presentn2', 1472015436, -9000, -10500, -9960, 2,
'37806757-4391-4c40-8cae-6bbfd71e893e',
'510b9503-7899-4d69-83c0-690342daf271');
INSERT INTO timer_details VALUES('8cb1795f-a50b-40a6-b2b7-6843602ad95c',
'exercise', 1472015536, -10200, -10560, -9600, 0,
'0eaec4f3-c524-40ab-b295-2db5cb7a0770',
'05797a63-51f5-4c1d-9068-215c593bba8d');
INSERT INTO timer_details VALUES('78d9d2bc-6fd3-4fad-94cc-b706aa91f57e',
'learning', 1472015636, -9900, -10500, -9900, 2,
'37806757-4391-4c40-8cae-6bbfd71e893e',
'510b9503-7899-4d69-83c0-690342daf271');
INSERT INTO timer_details VALUES('9bffb77d-569f-491e-8713-7bad9adfefa6',
'revision', 1472015736, -10500, -10440, -9900, 1,
'f82db8cc-a969-4495-bffd-bb0ce0ba877a',
'05797a63-51f5-4c1d-9068-215c593bba8d');
""")
con.commit()
print 'Database timer_details creation finished succes!!'
except lite.Error, e:
print 'Error %s ocurred : Database Creation failed!!!'%e.arg[0]
if __name__ == '__main__':
initialize_n_create_db()
|
mit
| -8,329,316,937,531,565,000
| 40.507463
| 85
| 0.588637
| false
| 2.942857
| false
| false
| false
|
Nuevosmedios/ADL_LRS
|
lrs/util/Authorization.py
|
1
|
4915
|
import base64
from functools import wraps
from django.conf import settings
from django.contrib.auth import authenticate
from vendor.xapi.lrs.exceptions import Unauthorized, OauthUnauthorized, BadRequest
from vendor.xapi.lrs.models import Token, Agent
from vendor.xapi.oauth_provider.utils import send_oauth_error
from vendor.xapi.oauth_provider.consts import ACCEPTED
from django.contrib.auth.models import User
# A decorator, that can be used to authenticate some requests at the site.
def auth(func):
@wraps(func)
def inner(request, *args, **kwargs):
# Note: The cases involving OAUTH_ENABLED are here if OAUTH_ENABLED is switched from true to false
# after a client has performed the handshake. (Not likely to happen, but could)
auth_type = request['auth']['type']
# There is an http auth_type request and http auth is enabled
if auth_type == 'http' and settings.HTTP_AUTH_ENABLED:
http_auth_helper(request)
# There is an http auth_type request and http auth is not enabled
elif auth_type == 'http' and not settings.HTTP_AUTH_ENABLED:
raise BadRequest("HTTP authorization is not enabled. To enable, set the HTTP_AUTH_ENABLED flag to true in settings")
# There is an oauth auth_type request and oauth is enabled
elif auth_type == 'oauth' and settings.OAUTH_ENABLED:
oauth_helper(request)
# There is an oauth auth_type request and oauth is not enabled
elif auth_type == 'oauth' and not settings.OAUTH_ENABLED:
raise BadRequest("OAuth is not enabled. To enable, set the OAUTH_ENABLED flag to true in settings")
# There is no auth_type request and there is some sort of auth enabled
elif auth_type == 'none' and (settings.HTTP_AUTH_ENABLED or settings.OAUTH_ENABLED):
raise Unauthorized("Auth is enabled but no authentication was sent with the request.")
# There is no auth_type request and no auth is enabled
elif auth_type == 'none' and not (settings.HTTP_AUTH_ENABLED or settings.OAUTH_ENABLED):
request['auth'] = None
return func(request, *args, **kwargs)
return inner
def http_auth_helper(request):
if request['headers'].has_key('Authorization'):
auth = request['headers']['Authorization'].split()
if not request['is_authenticated']:
if len(auth) == 2:
if auth[0].lower() == 'basic':
# Currently, only basic http auth is used.
uname, passwd = base64.b64decode(auth[1]).split(':')
user = authenticate(username=uname, password=passwd)
if user:
# If the user successfully logged in, then add/overwrite
# the user object of this request.
request['auth']['id'] = user
else:
raise Unauthorized("Authorization failed, please verify your username and password")
else:
user = User.objects.get(username = request['logged_user'])
request['auth']['id'] = user
else:
# The username/password combo was incorrect, or not provided.
raise Unauthorized("Authorization header missing")
def oauth_helper(request):
consumer = request['auth']['oauth_consumer']
token = request['auth']['oauth_token']
# Make sure consumer has been accepted by system
if consumer.status != ACCEPTED:
raise OauthUnauthorized(send_oauth_error("%s has not been authorized" % str(consumer.name)))
# make sure the token is an approved access token
if token.token_type != Token.ACCESS or not token.is_approved:
raise OauthUnauthorized(send_oauth_error("The access token is not valid"))
user = token.user
user_name = user.username
if user.email.startswith('mailto:'):
user_email = user.email
else:
user_email = 'mailto:%s' % user.email
consumer = token.consumer
members = [
{
"account":{
"name":consumer.key,
"homePage":"lrs://XAPI/OAuth/token/"
},
"objectType": "Agent",
"oauth_identifier": "anonoauth:%s" % (consumer.key)
},
{
"name":user_name,
"mbox":user_email,
"objectType": "Agent"
}
]
kwargs = {"objectType":"Group", "member":members,"oauth_identifier": "anongroup:%s-%s" % (consumer.key, user_email)}
# create/get oauth group and set in dictionary
oauth_group, created = Agent.objects.oauth_group(**kwargs)
request['auth']['id'] = oauth_group
|
apache-2.0
| -1,585,675,415,629,788,400
| 48.153061
| 128
| 0.59939
| false
| 4.496798
| false
| false
| false
|
artemrizhov/django-mail-templated
|
docs/conf.py
|
1
|
9652
|
# -*- coding: utf-8 -*-
#
# Django Mail Templated documentation build configuration file, created by
# sphinx-quickstart on Wed Feb 17 21:51:15 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('..'))
sys.path.insert(0, os.path.abspath('_ext'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
'sphinx.ext.intersphinx',
'sphinxcontrib.napoleon',
'djangodocs',
]
intersphinx_mapping = {
'django': ('http://django.readthedocs.org/en/stable', None),
'python': ('https://docs.python.org/3', None),
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Django Mail Templated'
copyright = u'2016, Artem Rizhov'
author = u'Artem Rizhov'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'2.4'
# The full version, including alpha/beta/rc tags.
release = u'2.4.7'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'DjangoMailTemplateddoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'DjangoMailTemplated.tex', u'Django Mail Templated Documentation',
u'Artem Rizhov', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'djangomailtemplated', u'Django Mail Templated Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'DjangoMailTemplated', u'Django Mail Templated Documentation',
author, 'DjangoMailTemplated', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
mit
| -4,400,840,093,290,225,700
| 31.718644
| 83
| 0.706486
| false
| 3.678354
| true
| false
| false
|
gem/oq-engine
|
openquake/hazardlib/gsim/bindi_2011.py
|
1
|
14198
|
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (C) 2014-2021 GEM Foundation
#
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
"""
Module exports :class:`BindiEtAl2011`.
"""
import numpy as np
from scipy.constants import g
from openquake.hazardlib.gsim.base import GMPE, CoeffsTable
from openquake.hazardlib import const
from openquake.hazardlib.imt import PGA, PGV, SA
def _compute_distance(rup, dists, C):
"""
Compute the second term of the equation 1 described on paragraph 3:
``c1 + c2 * (M-Mref) * log(sqrt(Rjb ** 2 + h ** 2)/Rref) -
c3*(sqrt(Rjb ** 2 + h ** 2)-Rref)``
"""
mref = 5.0
rref = 1.0
rval = np.sqrt(dists.rjb ** 2 + C['h'] ** 2)
return (C['c1'] + C['c2'] * (rup.mag - mref)) *\
np.log10(rval / rref) - C['c3'] * (rval - rref)
def _compute_magnitude(rup, C):
"""
Compute the third term of the equation 1:
e1 + b1 * (M-Mh) + b2 * (M-Mh)**2 for M<=Mh
e1 + b3 * (M-Mh) otherwise
"""
m_h = 6.75
b_3 = 0.0
if rup.mag <= m_h:
return C["e1"] + (C['b1'] * (rup.mag - m_h)) +\
(C['b2'] * (rup.mag - m_h) ** 2)
else:
return C["e1"] + (b_3 * (rup.mag - m_h))
def _get_delta(coeffs, imt, mag):
# Get the coefficients needed to compute the delta used for scaling
tmp = coeffs['a']*mag**2. + coeffs['b']*mag + coeffs['c']
return tmp
def _get_fault_type_dummy_variables(rup):
"""
Fault type (Strike-slip, Normal, Thrust/reverse) is
derived from rake angle.
Rakes angles within 30 of horizontal are strike-slip,
angles from 30 to 150 are reverse, and angles from
-30 to -150 are normal.
Note that the 'Unspecified' case is not considered,
because rake is always given.
"""
U, SS, NS, RS = 0, 0, 0, 0
if np.abs(rup.rake) <= 30.0 or (180.0 - np.abs(rup.rake)) <= 30.0:
# strike-slip
SS = 1
elif rup.rake > 30.0 and rup.rake < 150.0:
# reverse
RS = 1
else:
# normal
NS = 1
return U, SS, NS, RS
def _get_mechanism(rup, C):
"""
Compute the fifth term of the equation 1 described on paragraph :
Get fault type dummy variables, see Table 1
"""
U, SS, NS, RS = _get_fault_type_dummy_variables(rup)
return C['f1'] * NS + C['f2'] * RS + C['f3'] * SS
def _get_site_amplification(sites, C):
"""
Compute the fourth term of the equation 1 described on paragraph :
The functional form Fs in Eq. (1) represents the site amplification and
it is given by FS = sj Cj , for j = 1,...,5, where sj are the
coefficients to be determined through the regression analysis,
while Cj are dummy variables used to denote the five different EC8
site classes
"""
ssa, ssb, ssc, ssd, sse = _get_site_type_dummy_variables(sites)
return (C['sA'] * ssa) + (C['sB'] * ssb) + (C['sC'] * ssc) + \
(C['sD'] * ssd) + (C['sE'] * sse)
def _get_site_type_dummy_variables(sites):
"""
Get site type dummy variables, five different EC8 site classes
he recording sites are classified into 5 classes,
based on the shear wave velocity intervals in the uppermost 30 m, Vs30,
according to the EC8 (CEN 2003):
class A: Vs30 > 800 m/s
class B: Vs30 = 360 − 800 m/s
class C: Vs30 = 180 - 360 m/s
class D: Vs30 < 180 m/s
class E: 5 to 20 m of C- or D-type alluvium underlain by
stiffer material with Vs30 > 800 m/s.
"""
ssa = np.zeros(len(sites.vs30))
ssb = np.zeros(len(sites.vs30))
ssc = np.zeros(len(sites.vs30))
ssd = np.zeros(len(sites.vs30))
sse = np.zeros(len(sites.vs30))
# Class E Vs30 = 0 m/s. We fixed this value to define class E
idx = (np.fabs(sites.vs30) < 1E-10)
sse[idx] = 1.0
# Class D; Vs30 < 180 m/s.
idx = (sites.vs30 >= 1E-10) & (sites.vs30 < 180.0)
ssd[idx] = 1.0
# SClass C; 180 m/s <= Vs30 <= 360 m/s.
idx = (sites.vs30 >= 180.0) & (sites.vs30 < 360.0)
ssc[idx] = 1.0
# Class B; 360 m/s <= Vs30 <= 800 m/s.
idx = (sites.vs30 >= 360.0) & (sites.vs30 < 800)
ssb[idx] = 1.0
# Class A; Vs30 > 800 m/s.
idx = (sites.vs30 >= 800.0)
ssa[idx] = 1.0
return ssa, ssb, ssc, ssd, sse
def _get_stddevs(C, stddev_types, num_sites):
"""
Return standard deviations as defined in table 1.
"""
stddevs = []
for stddev_type in stddev_types:
if stddev_type == const.StdDev.TOTAL:
stddevs.append(C['SigmaTot'] + np.zeros(num_sites))
elif stddev_type == const.StdDev.INTRA_EVENT:
stddevs.append(C['SigmaW'] + np.zeros(num_sites))
elif stddev_type == const.StdDev.INTER_EVENT:
stddevs.append(C['SigmaB'] + np.zeros(num_sites))
return stddevs
class BindiEtAl2011(GMPE):
"""
Implements GMPE developed by D.Bindi, F.Pacor, L.Luzi, R.Puglia,
M.Massa, G. Ameri, R. Paolucci and published as "Ground motion
prediction equations derived from the Italian strong motion data",
Bull Earthquake Eng, DOI 10.1007/s10518-011-9313-z.
SA are given up to 2 s.
The regressions are developed considering the geometrical mean of the
as-recorded horizontal components
"""
#: Supported tectonic region type is 'active shallow crust' because the
#: equations have been derived from data from Italian database ITACA, as
#: explained in the 'Introduction'.
DEFINED_FOR_TECTONIC_REGION_TYPE = const.TRT.ACTIVE_SHALLOW_CRUST
#: Set of :mod:`intensity measure types <openquake.hazardlib.imt>`
#: this GSIM can calculate. A set should contain classes from module
#: :mod:`openquake.hazardlib.imt`.
DEFINED_FOR_INTENSITY_MEASURE_TYPES = {PGA, PGV, SA}
#: Supported intensity measure component is the geometric mean of two
#: horizontal components
DEFINED_FOR_INTENSITY_MEASURE_COMPONENT = const.IMC.AVERAGE_HORIZONTAL
#: Supported standard deviation types are inter-event, intra-event
#: and total, page 1904
DEFINED_FOR_STANDARD_DEVIATION_TYPES = {
const.StdDev.TOTAL, const.StdDev.INTER_EVENT, const.StdDev.INTRA_EVENT}
#: Required site parameter is only Vs30
REQUIRES_SITES_PARAMETERS = {'vs30'}
#: Required rupture parameters are magnitude and rake (eq. 1).
REQUIRES_RUPTURE_PARAMETERS = {'rake', 'mag'}
#: Required distance measure is RRup (eq. 1).
REQUIRES_DISTANCES = {'rjb'}
sgn = 0
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types):
"""
See :meth:`superclass method
<.base.GroundShakingIntensityModel.get_mean_and_stddevs>`
for spec of input and result values.
"""
# extracting dictionary of coefficients specific to required
# intensity measure type.
C = self.COEFFS[imt]
imean = (_compute_magnitude(rup, C) +
_compute_distance(rup, dists, C) +
_get_site_amplification(sites, C) +
_get_mechanism(rup, C))
istddevs = _get_stddevs(C, stddev_types, len(sites.vs30))
# Convert units to g,
# but only for PGA and SA (not PGV):
if imt.string.startswith(('PGA', 'SA')):
mean = np.log((10.0 ** (imean - 2.0)) / g)
else:
# PGV:
mean = np.log(10.0 ** imean)
# Return stddevs in terms of natural log scaling
stddevs = np.log(10.0 ** np.array(istddevs))
# mean_LogNaturale = np.log((10 ** mean) * 1e-2 / g)
if self.sgn:
mean += self.sgn * _get_delta(self.COEFFS_DELTA[imt], imt, rup.mag)
return mean, stddevs
#: Coefficients from SA from Table 1
#: Coefficients from PGA e PGV from Table 5
COEFFS = CoeffsTable(sa_damping=5, table="""
IMT e1 c1 c2 h c3 b1 b2 sA sB sC sD sE f1 f2 f3 f4 SigmaB SigmaW SigmaTot
pgv 2.305 -1.5170 0.3260 7.879 0.000000 0.2360 -0.00686 0.0 0.2050 0.269 0.321 0.428 -0.0308 0.0754 -0.0446 0.0 0.194 0.270 0.332
pga 3.672 -1.9400 0.4130 10.322 0.000134 -0.2620 -0.07070 0.0 0.1620 0.240 0.105 0.570 -0.0503 0.1050 -0.0544 0.0 0.172 0.290 0.337
0.04 3.725 -1.9760 0.4220 9.445 0.000270 -0.3150 -0.07870 0.0 0.1610 0.240 0.060 0.614 -0.0442 0.1060 -0.0615 0.0 0.154 0.307 0.343
0.07 3.906 -2.0500 0.4460 9.810 0.000758 -0.3750 -0.07730 0.0 0.1540 0.235 0.057 0.536 -0.0454 0.1030 -0.0576 0.0 0.152 0.324 0.358
0.10 3.796 -1.7940 0.4150 9.500 0.002550 -0.2900 -0.06510 0.0 0.1780 0.247 0.037 0.599 -0.0656 0.1110 -0.0451 0.0 0.154 0.328 0.363
0.15 3.799 -1.5210 0.3200 9.163 0.003720 -0.0987 -0.05740 0.0 0.1740 0.240 0.148 0.740 -0.0755 0.1230 -0.0477 0.0 0.179 0.318 0.365
0.20 3.750 -1.3790 0.2800 8.502 0.003840 0.0094 -0.05170 0.0 0.1560 0.234 0.115 0.556 -0.0733 0.1060 -0.0328 0.0 0.209 0.320 0.382
0.25 3.699 -1.3400 0.2540 7.912 0.003260 0.0860 -0.04570 0.0 0.1820 0.245 0.154 0.414 -0.0568 0.1100 -0.0534 0.0 0.212 0.308 0.374
0.30 3.753 -1.4140 0.2550 8.215 0.002190 0.1240 -0.04350 0.0 0.2010 0.244 0.213 0.301 -0.0564 0.0877 -0.0313 0.0 0.218 0.290 0.363
0.35 3.600 -1.3200 0.2530 7.507 0.002320 0.1540 -0.04370 0.0 0.2200 0.257 0.243 0.235 -0.0523 0.0905 -0.0382 0.0 0.221 0.283 0.359
0.40 3.549 -1.2620 0.2330 6.760 0.002190 0.2250 -0.04060 0.0 0.2290 0.255 0.226 0.202 -0.0565 0.0927 -0.0363 0.0 0.210 0.279 0.349
0.45 3.550 -1.2610 0.2230 6.775 0.001760 0.2920 -0.03060 0.0 0.2260 0.271 0.237 0.181 -0.0597 0.0886 -0.0289 0.0 0.204 0.284 0.350
0.50 3.526 -1.1810 0.1840 5.992 0.001860 0.3840 -0.02500 0.0 0.2180 0.280 0.263 0.168 -0.0599 0.0850 -0.0252 0.0 0.203 0.283 0.349
0.60 3.561 -1.2300 0.1780 6.382 0.001140 0.4360 -0.02270 0.0 0.2190 0.296 0.355 0.142 -0.0559 0.0790 -0.0231 0.0 0.203 0.283 0.348
0.70 3.485 -1.1720 0.1540 5.574 0.000942 0.5290 -0.01850 0.0 0.2100 0.303 0.496 0.134 -0.0461 0.0896 -0.0435 0.0 0.212 0.283 0.354
0.80 3.325 -1.1150 0.1630 4.998 0.000909 0.5450 -0.02150 0.0 0.2100 0.304 0.621 0.150 -0.0457 0.0795 -0.0338 0.0 0.213 0.284 0.355
0.90 3.318 -1.1370 0.1540 5.231 0.000483 0.5630 -0.02630 0.0 0.2120 0.315 0.680 0.154 -0.0351 0.0715 -0.0364 0.0 0.214 0.286 0.357
1.00 3.264 -1.1140 0.1400 5.002 0.000254 0.5990 -0.02700 0.0 0.2210 0.332 0.707 0.152 -0.0298 0.0660 -0.0362 0.0 0.222 0.283 0.360
1.25 2.896 -0.9860 0.1730 4.340 0.000783 0.5790 -0.03360 0.0 0.2440 0.365 0.717 0.183 -0.0207 0.0614 -0.0407 0.0 0.227 0.290 0.368
1.50 2.675 -0.9600 0.1920 4.117 0.000802 0.5750 -0.03530 0.0 0.2510 0.375 0.667 0.203 -0.0140 0.0505 -0.0365 0.0 0.218 0.303 0.373
1.75 2.584 -1.0060 0.2050 4.505 0.000427 0.5740 -0.03710 0.0 0.2520 0.357 0.593 0.220 0.00154 0.0370 -0.0385 0.0 0.219 0.305 0.376
2.00 2.537 -1.0090 0.1930 4.373 0.000164 0.5970 -0.03670 0.0 0.2450 0.352 0.540 0.226 0.00512 0.0350 -0.0401 0.0 0.211 0.308 0.373
2.50 2.425 -1.0290 0.1790 4.484 -0.000348 0.6550 -0.02620 0.0 0.2440 0.336 0.460 0.229 0.00561 0.0275 -0.0331 0.0 0.212 0.309 0.375
2.75 2.331 -1.0430 0.1830 4.581 -0.000617 0.6780 -0.01820 0.0 0.2320 0.335 0.416 0.232 0.01350 0.0263 -0.0398 0.0 0.203 0.310 0.370
4.00 2.058 -1.0840 0.2000 4.876 -0.000843 0.6740 -0.00621 0.0 0.1950 0.300 0.350 0.230 0.02950 0.0255 -0.0550 0.0 0.197 0.300 0.359
""")
COEFFS_DELTA = CoeffsTable(sa_damping=5, table="""
imt a b c
pga 0.101 -1.136 3.555
pgv 0.066 -0.741 2.400
0.05 0.105 -1.190 3.691
0.1 0.112 -1.284 4.001
0.15 0.094 -1.033 3.177
0.2 0.085 -0.907 2.831
0.3 0.086 -0.927 2.869
0.4 0.088 -0.974 3.076
0.5 0.083 -0.916 2.933
0.75 0.073 -0.808 2.628
1.00 0.066 -0.736 2.420
2.00 0.041 -0.512 1.888
3.00 0.050 -0.616 2.193
4.00 0.076 -0.906 3.046
""")
class BindiEtAl2011Ita19Low(BindiEtAl2011):
"""
Implements the lower term of the ITA19 backbone model.
"""
sgn = -1
class BindiEtAl2011Ita19Upp(BindiEtAl2011):
"""
Implements the upper term of the ITA19 backbone model.
"""
sgn = +1
|
agpl-3.0
| 1,384,862,206,094,783,200
| 46.006623
| 194
| 0.551564
| false
| 2.61581
| false
| false
| false
|
yuichi-nadawaki/sakurakocity
|
sakurakocity/plugins/listen.py
|
1
|
3366
|
# -*- coding: utf-8 -*-
from slackbot.bot import listen_to
from slackbot.bot import respond_to
import random
from .dictionaries import *
import datetime
@listen_to('らこしてぃ|さく|らこすて')
def rakosute(message):
message.send(random.choice(['なんだ?', 'よんだ?']))
@listen_to('よしよし')
def yoshiyoshi(message):
message.send(random.choice(['よしよしまきゎ']))
@listen_to('ちゎ|ちわ|ちぁ|ちあ')
def chiwa(message):
message.send(random.choice(['ちゎ!']))
@listen_to('のゎ|まきゎ|まきわ|のわ|のゎ|ちゎしてぃ|のゎしてぃ|のゎたしてぃ')
def nowa(message):
message.send(random.choice(['ちゎしてぃ!']))
@listen_to('らふこふ')
def listen(message):
message.send('らこしてぃだぞ')
@listen_to('ありがと')
def thankyou(message):
message.react('まきちゎ')
@listen_to('user_info')
def user_info(message):
user = get_user(message)
message.send(str(user))
@listen_to('しごおわ')
def shigoowa(message):
user = get_user(message)
message.send(user_dict[user['name']] + 'おつかれさまきゎだぞ。:こちたまん:')
@listen_to('結婚して|けっこんして|marrige')
@respond_to('結婚して|けっこんして|marrige')
def marrige_count(message):
diff_d = diff_day(day_dict['marrige_day'], datetime.date.today())
message.send('結婚して' + str(diff_d) + u'日だぞ。')
print(diff_year(day_dict['marrige_day'], datetime.date.today()))
@listen_to('付き合って|つきあって|couple|カップル')
@respond_to('付き合って|つきあって|couple|カップル')
def couple_count(message):
diff_d = diff_day(day_dict['couple_day'], datetime.date.today())
message.send('付き合って' + str(diff_d) + u'日だぞ。')
@listen_to('何の日|なんのひ')
@respond_to('何の日|なんのひ')
def what_day(message):
today = datetime.date.today()
if today.month == 3 and today.day == 7:
message.send('記念日だぞ')
if today.month == 10 and today.day == 10:
message.send('プロポーズの日だぞ')
if today.month == 2 and today.day == 4:
message.send('結婚式の日だぞ')
if today.month == 1 and today.day == 1:
message.send('まきちゎの誕生日だぞ')
if today.month == 1 and today.day == 13:
message.send('ゆきちゎの誕生日だぞ')
else:
message.send('ん?')
@listen_to('anniv')
@respond_to('anniv')
def anniversary(message):
message.send(str(day_dict))
@listen_to('何日目')
@respond_to('何日目')
def day_count(message):
diff_couple = diff_day(day_dict['couple_day'], datetime.date.today())
diff_marrige = diff_day(day_dict['marrige_day'], datetime.date.today())
message.send('付き合って' + str(diff_couple + 1) + u'日目、結婚して' + str(diff_marrige + 1) + u'日目だぞ。')
def diff_day(d1: datetime.date, d2: datetime.date) -> int:
if d1 > d2:
d1, d2 = d2, d1
return (d2 - d1).days
def diff_month(d1: datetime.date, d2: datetime.date) -> int:
if d1 > d2:
d1, d2 = d2, d1
return (d2.year - d1.year) * 12 + d2.month - d1.month
def diff_year(d1: datetime.date, d2: datetime.date) -> float:
if d1 > d2:
d1, d2 = d2, d1
diff_m = (d2.year - d1.year) * 12 + d2.month - d1.month
return diff_m/12
|
apache-2.0
| 2,115,304,670,349,832,200
| 28.061224
| 96
| 0.638343
| false
| 2.048921
| false
| false
| false
|
hillwithsmallfields/qs
|
financial/finperiodic.py
|
1
|
2706
|
#!/usr/bin/python
# Program to detect periodic payments and spot gaps in them
import argparse
import csv
import datetime
import os
import qsutils
# See notes in finconv.py for config file format
secs_per_day = 24 * 60 * 60
def finperiodic_setup(app_data, input_format):
return ['payee'], {}
def finperiodic_row(timestamp, row, output_rows, scratch):
timestamp = datetime.datetime.strptime(timestamp, "%Y-%m-%dT%H:%M:%S")
payee = row['payee']
amount = row.get('amount',
row.get('debits',
row.get('credits')))
if payee in scratch:
scratch[payee][timestamp] = amount
else:
scratch[payee] = {timestamp: amount}
def finperiodic_tidyup(columns, rows, scratch):
for payee, transactions in scratch.iteritems():
# print payee, transactions
dates = sorted(transactions.keys())
prev = dates[0]
intervals = []
for when in dates[1:]:
interval = int((when-prev).total_seconds() / secs_per_day)
if interval > 0: # ignore further transactions on the same day
intervals.append(interval)
prev = when
if len(intervals) > 1:
counts = {}
for interval in intervals:
counts[interval] = counts.get(interval, 0) + 1
print payee
for k in sorted(counts.keys()):
print " ", k, counts[k]
total = sum(counts)
approx_weekly = sum(counts[6:8]) / total
approx_monthly = sum(counts[26:34]) / total
print "approx_weekly", approx_weekly
print "approx_monthly", approx_monthly
return None, None
def main():
parser = qsutils.program_argparser()
parser.add_argument("input_files", nargs='*')
args = parser.parse_args()
config = qsutils.program_load_config(args)
parser.add_argument("-o", "--output")
parser.add_argument("-f", "--format",
default=None)
parser.add_argument("input_file")
args = parser.parse_args()
# todo: deduce format of input file; should normally be financisto, or have similar data
qsutils.process_fin_csv({'args': args,
'config': qsutils.load_config(
args.verbose,
None,
None,
qsutils.program_load_config(args),
*args.config or ())},
finperiodic_setup,
finperiodic_row,
finperiodic_tidyup)
if __name__ == "__main__":
main()
|
gpl-3.0
| -6,405,568,724,214,841,000
| 32.825
| 92
| 0.538803
| false
| 4.208398
| true
| false
| false
|
channprj/wiki.chann.kr-source
|
plugin/pelican-page-hierarchy/page_hierarchy.py
|
1
|
3165
|
from pelican import signals, contents
import os.path
from copy import copy
from itertools import chain
'''
This plugin creates a URL hierarchy for pages that matches the
directory hierarchy of their sources.
'''
class UnexpectedException(Exception): pass
def get_path(page, settings):
''' Return the dirname relative to PAGE_PATHS prefix. '''
path = os.path.split(page.get_relative_source_path())[0] + '/'
path = path.replace( os.path.sep, '/' )
# Try to lstrip the longest prefix first
for prefix in sorted(settings['PAGE_PATHS'], key=len, reverse=True):
if not prefix.endswith('/'): prefix += '/'
if path.startswith(prefix):
return path[len(prefix):-1]
raise UnexpectedException('Page outside of PAGE_PATHS ?!?')
def in_default_lang(page):
# page.in_default_lang property is undocumented (=unstable) interface
return page.lang == page.settings['DEFAULT_LANG']
def override_metadata(content_object):
if type(content_object) is not contents.Page:
return
page = content_object
path = get_path(page, page.settings)
def _override_value(page, key):
metadata = copy(page.metadata)
# We override the slug to include the path up to the filename
metadata['slug'] = os.path.join(path, page.slug)
# We have to account for non-default language and format either,
# e.g., PAGE_SAVE_AS or PAGE_LANG_SAVE_AS
infix = '' if in_default_lang(page) else 'LANG_'
return page.settings['PAGE_' + infix + key.upper()].format(**metadata)
for key in ('save_as', 'url'):
if not hasattr(page, 'override_' + key):
setattr(page, 'override_' + key, _override_value(page, key))
def set_relationships(generator):
def _all_pages():
return chain(generator.pages, generator.translations)
# initialize parents and children lists
for page in _all_pages():
page.parent = None
page.parents = []
page.children = []
# set immediate parents and children
for page in _all_pages():
# Parent of /a/b/ is /a/, parent of /a/b.html is /a/
parent_url = os.path.dirname(page.url[:-1])
if parent_url: parent_url += '/'
for page2 in _all_pages():
if page2.url == parent_url and page2 != page:
page.parent = page2
page2.children.append(page)
# If no parent found, try the parent of the default language page
if not page.parent and not in_default_lang(page):
for page2 in generator.pages:
if (page.slug == page2.slug and
os.path.dirname(page.source_path) ==
os.path.dirname(page2.source_path)):
# Only set the parent but not the children, obviously
page.parent = page2.parent
# set all parents (ancestors)
for page in _all_pages():
p = page
while p.parent:
page.parents.insert(0, p.parent)
p = p.parent
def register():
signals.content_object_init.connect(override_metadata)
signals.page_generator_finalized.connect(set_relationships)
|
mit
| -176,065,135,195,253,900
| 36.235294
| 78
| 0.624013
| false
| 3.897783
| false
| false
| false
|
epinna/weevely3
|
tests/test_file_read.py
|
1
|
2238
|
from tests.base_test import BaseTest
from testfixtures import log_capture
from tests import config
from core.sessions import SessionURL
from core import modules
from core import messages
import subprocess
import tempfile
import datetime
import logging
import os
def setUpModule():
subprocess.check_output("""
BASE_FOLDER="{config.base_folder}/test_file_read/"
rm -rf "$BASE_FOLDER"
mkdir -p "$BASE_FOLDER"
echo -n 'OK' > "$BASE_FOLDER/ok.test"
echo -n 'KO' > "$BASE_FOLDER/ko.test"
# Set ko.test to ---x--x--x 0111 execute, should be no readable
chmod 0111 "$BASE_FOLDER/ko.test"
""".format(
config = config
), shell=True)
class FileRead(BaseTest):
def setUp(self):
session = SessionURL(self.url, self.password, volatile = True)
modules.load_modules(session)
self.run_argv = modules.loaded['file_read'].run_argv
def test_read_php(self):
# Simple download
self.assertEqual(self.run_argv(['test_file_read/ok.test']), b'OK')
# Downoad binary. Skip check cause I don't know the remote content, and
# the md5 check is already done inside file_download.
self.assertTrue(self.run_argv(['/bin/ls']))
# Download of an unreadable file
self.assertEqual(self.run_argv(['test_file_read/ko.test']), None)
# Download of an remote unexistant file
self.assertEqual(self.run_argv(['bogus']), None)
def test_read_allvectors(self):
for vect in modules.loaded['file_download'].vectors.get_names():
self.assertEqual(self.run_argv(['-vector', vect, 'test_file_read/ok.test']), b'OK')
def test_read_sh(self):
# Simple download
self.assertEqual(self.run_argv(['-vector', 'base64', 'test_file_read/ok.test']), b'OK')
# Downoad binary. Skip check cause I don't know the remote content, and
# the md5 check is already done inside file_download.
self.assertTrue(self.run_argv(['-vector', 'base64', '/bin/ls']))
# Download of an unreadable file
self.assertEqual(self.run_argv(['-vector', 'base64', 'test_file_read/ko.test']), None)
# Download of an remote unexistant file
self.assertEqual(self.run_argv(['-vector', 'base64', 'bogus']), None)
|
gpl-3.0
| 322,385,826,608,075,500
| 31.434783
| 95
| 0.663539
| false
| 3.586538
| true
| false
| false
|
valdt/tallefjant
|
labbar/lab4/tictactoe_functions.py
|
1
|
1496
|
# -*- coding: utf-8 -*-
import random,sys,time
def isBoxBusy(gamePlan,row, col, EMPTY):
if gamePlan[row][col] == EMPTY:
return False
return True
def computerSelectABox(gamePlan,sign,EMPTY):
size = len(gamePlan)
print("\n---Datorns tur ("+str(sign)+")---")
row = random.randrange(0,size)
col = random.randrange(0,size)
while isBoxBusy(gamePlan, row, col,EMPTY):
row = random.randrange(0,size)
col = random.randrange(0,size)
print("Ange raden:",end = " ")
sys.stdout.flush()
time.sleep(0.6)
print(row)
print("Ange kolumnen:",end = " ")
sys.stdout.flush()
time.sleep(1)
print(col)
time.sleep(0.6)
return row,col
def count(spelplan,x,y, xr, yr, tecken):
if -1<x+xr<len(spelplan) and -1<y+yr<len(spelplan):
if spelplan[x+xr][y+yr] != tecken :
return 0
else:
return 1+count(spelplan,x+xr,y+yr,xr,yr,tecken)
else:
return 0
def lookForWinner(spelplan,x,y,VINRAD):
t=spelplan[x][y]
if (count(spelplan,x,y,1,0,t) + count(spelplan,x,y,-1,0,t)+1>=VINRAD):
return True
if (count(spelplan,x,y,0,1,t) + count(spelplan,x,y,0,-1,t)+1>=VINRAD):
return True
if (count(spelplan,x,y,1,1,t) + count(spelplan,x,y,-1,-1,t)+1>=VINRAD):
return True
if (count(spelplan,x,y,-1,1,t) + count(spelplan,x,y,1,-1,t)+1>=VINRAD):
return True
else: return False
if __name__=="__main__":
pass
|
gpl-3.0
| 6,601,901,868,171,264,000
| 27.788462
| 79
| 0.576872
| false
| 2.633803
| false
| false
| false
|
Schluucht/Destiny
|
destiny/test/test_api_call.py
|
1
|
3437
|
import destiny.settings as settings
from destiny.main.api_call import do_query, get_challenger, get_league_by_summoner, get_acount_id, get_matchlist, \
get_match, get_timeline, get_champion
import pytest
from destiny.main.destinyexception import DestinyApiCallException
@pytest.fixture
def id_summoner():
return 56947948
@pytest.fixture
def id_account():
return 209493252
@pytest.fixture
def id_match():
return 3181575441
def test_do_query():
"""
Tests `api_call.do_query` function.
Use the function against prepared urls and check that the returned results are not empty.
"""
urls = {
"timelines":
settings.REGION + "/lol/match/v3/timelines/by-match/3181575441?api_key=" + settings.API_KEY,
"matches":
settings.REGION + "/lol/match/v3/matches/3181575441?api_key=" + settings.API_KEY,
"summoners":
settings.REGION + "/lol/summoner/v3/summoners/56947948?api_key=" + settings.API_KEY,
"matchlist":
settings.REGION + "/lol/match/v3/matchlists/by-account/209493252/recent?api_key=" + settings.API_KEY
}
for _type, url in urls.items():
assert len(do_query(url)) > 0
with pytest.raises(DestinyApiCallException) as DE401:
url_401 = "https://euw1.api.riotgames.com//lol/unauthorized/"
do_query(url_401)
assert DE401.value.err_code == 401
with pytest.raises(DestinyApiCallException) as DE404:
url_404 = "https://euw1.api.riotgames.com//lol/match/v3/matches/31815751235441?api_key=" + settings.API_KEY
do_query(url_404)
assert DE404.value.err_code == 404
with pytest.raises(DestinyApiCallException) as DE403:
url_403 = "https://euw1.api.riotgames.com//lol/match/v3/matches/31815751235441?api_key=invalid"
do_query(url_403)
assert DE403.value.err_code == 403
def test_get_challenger():
"""
Tests `api_call.get_challenger()` function.
Tests if the returned dict contains something.
:return:
"""
assert len(get_challenger()) > 0
def test_get_league_by_summoner(id_summoner):
"""
API documentation: https://developer.riotgames.com/api-methods/#league-v3/GET_getAllLeaguesForSummoner
:param id_summoner:
:return:
"""
assert len(get_league_by_summoner(id_summoner)) > 0
def test_get_acount_id(id_summoner):
"""
API documentation: https://developer.riotgames.com/api-methods/#summoner-v3/GET_getBySummonerId
:param id_summoner:
:return:
"""
assert len(get_acount_id(id_summoner)) > 0
def test_get_matchlist(id_account):
"""
API documentation: https://developer.riotgames.com/api-methods/#match-v3/GET_getRecentMatchlist
:param id_account:
:return:
"""
assert len(get_matchlist(id_account)) > 0
def test_get_match(id_match):
"""
API documentation: https://developer.riotgames.com/api-methods/#match-v3/GET_getMatch
:param id_match:
:return:
"""
assert len(get_match(id_match)) > 0
def test_get_timeline(id_match):
"""
API documentation: https://developer.riotgames.com/api-methods/#match-v3/GET_getMatchTimeline
:param id_match:
:return:
"""
assert len(get_timeline(id_match)) > 0
def test_get_champion():
"""
API documentation: https://developer.riotgames.com/api-methods/#static-data-v3/GET_getChampionList
:return:
"""
assert len(get_champion()) > 0
|
mit
| -6,181,509,911,528,919,000
| 26.496
| 115
| 0.668606
| false
| 3.06875
| true
| false
| false
|
vassilux/odin
|
pyodin/sys/tools/asterisk.py
|
1
|
1268
|
#
#
#
#
import sys
import os
import socket
import fcntl
import struct
import subprocess
#asterisk bin place
ASTERISK_BIN="/usr/sbin/asterisk"
def _run_asterisk_command(command):
pipe = subprocess.Popen(['/usr/sbin/asterisk', '-nrx', command], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
result = pipe.communicate()[0]
try:
pipe.terminate()
except:
pass
return result
def get_asterisk_version():
result = _run_asterisk_command("core show version")
version = result.split(" ")[1]
return version
def get_asterisk_times():
result = _run_asterisk_command("core show uptime")
uptime="0"
reloadtime = "0"
try:
uptime = result.split("\n")[0].split(":")[1]
reloadtime = result.split("\n")[1].split(":")[1]
except Exception, e:
pass
else:
pass
finally:
pass
info = {}
info['uptime'] = uptime
info['reloadtime'] = reloadtime
return info
def get_asterisk_active_channels():
pass
def get_asterisk_calls():
result = _run_asterisk_command("core show calls")
active="-1"
processed="-1"
try:
active = result.split("\n")[0].split(" ")[0]
processed = result.split("\n")[1].split(" ")[0]
except Exception, e:
pass
else:
pass
finally:
pass
info = {}
info['active'] = active
info['processed'] = processed
return info
|
mit
| -5,730,188,208,835,077,000
| 17.376812
| 113
| 0.671136
| false
| 2.786813
| false
| false
| false
|
gnachman/iTerm2
|
api/library/python/iterm2/iterm2/mainmenu.py
|
1
|
23375
|
"""Defines interfaces for accessing menu items."""
import enum
import iterm2.api_pb2
import iterm2.rpc
import typing
class MenuItemException(Exception):
"""A problem was encountered while selecting a menu item."""
class MenuItemState:
"""Describes the current state of a menu item."""
def __init__(self, checked: bool, enabled: bool):
self.__checked = checked
self.__enabled = enabled
@property
def checked(self):
"""Is the menu item checked? A `bool` property."""
return self.__checked
@property
def enabled(self):
"""
Is the menu item enabled (i.e., it can be selected)? A `bool`
property.
"""
return self.__enabled
class MenuItemIdentifier:
def __init__(self, title, identifier):
self.__title = title
self.__identifier = identifier
@property
def title(self) -> str:
return self.__title
@property
def identifier(self) -> typing.Optional[str]:
return self.__identifier
def _encode(self):
# Encodes to a key binding parameter.
if self.__identifier is None:
return self.__title
return self.__title + "\n" + self.__identifier
class MainMenu:
"""Represents the app's main menu."""
@staticmethod
async def async_select_menu_item(connection, identifier: str):
"""Selects a menu item.
:param identifier: A string. See list of identifiers in :doc:`menu_ids`
:throws MenuItemException: if something goes wrong.
.. seealso:: Example ":ref:`zoom_on_screen_example`"
"""
response = await iterm2.rpc.async_menu_item(
connection, identifier, False)
status = response.menu_item_response.status
# pylint: disable=no-member
if status != iterm2.api_pb2.MenuItemResponse.Status.Value("OK"):
raise MenuItemException(
iterm2.api_pb2.MenuItemResponse.Status.Name(status))
@staticmethod
async def async_get_menu_item_state(
connection, identifier: str) -> MenuItemState:
"""Queries a menu item for its state.
:param identifier: A string. See list of identifiers in :doc:`menu_ids`
:throws MenuItemException: if something goes wrong.
"""
response = await iterm2.rpc.async_menu_item(
connection, identifier, True)
status = response.menu_item_response.status
# pylint: disable=no-member
if status != iterm2.api_pb2.MenuItemResponse.Status.Value("OK"):
raise MenuItemException(
iterm2.api_pb2.MenuItemResponse.Status.Name(status))
return iterm2.MenuItemState(
response.menu_item_response.checked,
response.menu_item_response.enabled)
class iTerm2(enum.Enum):
ABOUT_ITERM2 = MenuItemIdentifier("About iTerm2", "About iTerm2")
SHOW_TIP_OF_THE_DAY = MenuItemIdentifier("Show Tip of the Day", "Show Tip of the Day")
CHECK_FOR_UPDATES = MenuItemIdentifier("Check For Updates…", "Check For Updates…")
TOGGLE_DEBUG_LOGGING = MenuItemIdentifier("Toggle Debug Logging", "Toggle Debug Logging")
COPY_PERFORMANCE_STATS = MenuItemIdentifier("Copy Performance Stats", "Copy Performance Stats")
CAPTURE_GPU_FRAME = MenuItemIdentifier("Capture GPU Frame", "Capture Metal Frame")
PREFERENCES = MenuItemIdentifier("Preferences...", "Preferences...")
HIDE_ITERM2 = MenuItemIdentifier("Hide iTerm2", "Hide iTerm2")
HIDE_OTHERS = MenuItemIdentifier("Hide Others", "Hide Others")
SHOW_ALL = MenuItemIdentifier("Show All", "Show All")
SECURE_KEYBOARD_ENTRY = MenuItemIdentifier("Secure Keyboard Entry", "Secure Keyboard Entry")
MAKE_ITERM2_DEFAULT_TERM = MenuItemIdentifier("Make iTerm2 Default Term", "Make iTerm2 Default Term")
MAKE_TERMINAL_DEFAULT_TERM = MenuItemIdentifier("Make Terminal Default Term", "Make Terminal Default Term")
INSTALL_SHELL_INTEGRATION = MenuItemIdentifier("Install Shell Integration", "Install Shell Integration")
QUIT_ITERM2 = MenuItemIdentifier("Quit iTerm2", "Quit iTerm2")
class Shell(enum.Enum):
NEW_WINDOW = MenuItemIdentifier("New Window", "New Window")
NEW_WINDOW_WITH_CURRENT_PROFILE = MenuItemIdentifier("New Window with Current Profile", "New Window with Current Profile")
NEW_TAB = MenuItemIdentifier("New Tab", "New Tab")
NEW_TAB_WITH_CURRENT_PROFILE = MenuItemIdentifier("New Tab with Current Profile", "New Tab with Current Profile")
DUPLICATE_TAB = MenuItemIdentifier("Duplicate Tab", "Duplicate Tab")
SPLIT_HORIZONTALLY_WITH_CURRENT_PROFILE = MenuItemIdentifier("Split Horizontally with Current Profile", "Split Horizontally with Current Profile")
SPLIT_VERTICALLY_WITH_CURRENT_PROFILE = MenuItemIdentifier("Split Vertically with Current Profile", "Split Vertically with Current Profile")
SPLIT_HORIZONTALLY = MenuItemIdentifier("Split Horizontally…", "Split Horizontally…")
SPLIT_VERTICALLY = MenuItemIdentifier("Split Vertically…", "Split Vertically…")
SAVE_SELECTED_TEXT = MenuItemIdentifier("Save Selected Text…", "Save Selected Text…")
CLOSE = MenuItemIdentifier("Close", "Close")
CLOSE_TERMINAL_WINDOW = MenuItemIdentifier("Close Terminal Window", "Close Terminal Window")
CLOSE_ALL_PANES_IN_TAB = MenuItemIdentifier("Close All Panes in Tab", "Close All Panes in Tab")
UNDO_CLOSE = MenuItemIdentifier("Undo Close", "Undo Close")
class BroadcastInput(enum.Enum):
SEND_INPUT_TO_CURRENT_SESSION_ONLY = MenuItemIdentifier("Send Input to Current Session Only", "Broadcast Input.Send Input to Current Session Only")
BROADCAST_INPUT_TO_ALL_PANES_IN_ALL_TABS = MenuItemIdentifier("Broadcast Input to All Panes in All Tabs", "Broadcast Input.Broadcast Input to All Panes in All Tabs")
BROADCAST_INPUT_TO_ALL_PANES_IN_CURRENT_TAB = MenuItemIdentifier("Broadcast Input to All Panes in Current Tab", "Broadcast Input.Broadcast Input to All Panes in Current Tab")
TOGGLE_BROADCAST_INPUT_TO_CURRENT_SESSION = MenuItemIdentifier("Toggle Broadcast Input to Current Session", "Broadcast Input.Toggle Broadcast Input to Current Session")
SHOW_BACKGROUND_PATTERN_INDICATOR = MenuItemIdentifier("Show Background Pattern Indicator", "Broadcast Input.Show Background Pattern Indicator")
class tmux(enum.Enum):
DETACH = MenuItemIdentifier("Detach", "tmux.Detach")
FORCE_DETACH = MenuItemIdentifier("Force Detach", "tmux.Force Detach")
NEW_TMUX_WINDOW = MenuItemIdentifier("New Tmux Window", "tmux.New Tmux Window")
NEW_TMUX_TAB = MenuItemIdentifier("New Tmux Tab", "tmux.New Tmux Tab")
PAUSE_PANE = MenuItemIdentifier("Pause Pane", "trmux.Pause Pane")
DASHBOARD = MenuItemIdentifier("Dashboard", "tmux.Dashboard")
PAGE_SETUP = MenuItemIdentifier("Page Setup...", "Page Setup...")
class Print(enum.Enum):
SCREEN = MenuItemIdentifier("Screen", "Print.Screen")
SELECTION = MenuItemIdentifier("Selection", "Print.Selection")
BUFFER = MenuItemIdentifier("Buffer", "Print.Buffer")
class Edit(enum.Enum):
UNDO = MenuItemIdentifier("Undo", "Undo")
REDO = MenuItemIdentifier("Redo", "Redo")
CUT = MenuItemIdentifier("Cut", "Cut")
COPY = MenuItemIdentifier("Copy", "Copy")
COPY_WITH_STYLES = MenuItemIdentifier("Copy with Styles", "Copy with Styles")
COPY_WITH_CONTROL_SEQUENCES = MenuItemIdentifier("Copy with Control Sequences", "Copy with Control Sequences")
COPY_MODE = MenuItemIdentifier("Copy Mode", "Copy Mode")
PASTE = MenuItemIdentifier("Paste", "Paste")
class PasteSpecial(enum.Enum):
ADVANCED_PASTE = MenuItemIdentifier("Advanced Paste…", "Paste Special.Advanced Paste…")
PASTE_SELECTION = MenuItemIdentifier("Paste Selection", "Paste Special.Paste Selection")
PASTE_FILE_BASE64ENCODED = MenuItemIdentifier("Paste File Base64-Encoded", "Paste Special.Paste File Base64-Encoded")
PASTE_SLOWLY = MenuItemIdentifier("Paste Slowly", "Paste Special.Paste Slowly")
PASTE_FASTER = MenuItemIdentifier("Paste Faster", "Paste Special.Paste Faster")
PASTE_SLOWLY_FASTER = MenuItemIdentifier("Paste Slowly Faster", "Paste Special.Paste Slowly Faster")
PASTE_SLOWER = MenuItemIdentifier("Paste Slower", "Paste Special.Paste Slower")
PASTE_SLOWLY_SLOWER = MenuItemIdentifier("Paste Slowly Slower", "Paste Special.Paste Slowly Slower")
WARN_BEFORE_MULTILINE_PASTE = MenuItemIdentifier("Warn Before Multi-Line Paste", "Paste Special.Warn Before Multi-Line Paste")
PROMPT_TO_CONVERT_TABS_TO_SPACES_WHEN_PASTING = MenuItemIdentifier("Prompt to Convert Tabs to Spaces when Pasting", "Paste Special.Prompt to Convert Tabs to Spaces when Pasting")
LIMIT_MULTILINE_PASTE_WARNING_TO_SHELL_PROMPT = MenuItemIdentifier("Limit Multi-Line Paste Warning to Shell Prompt", "Paste Special.Limit Multi-Line Paste Warning to Shell Prompt")
WARN_BEFORE_PASTING_ONE_LINE_ENDING_IN_A_NEWLINE_AT_SHELL_PROMPT = MenuItemIdentifier("Warn Before Pasting One Line Ending in a Newline at Shell Prompt", "Paste Special.Warn Before Pasting One Line Ending in a Newline at Shell Prompt")
OPEN_SELECTION = MenuItemIdentifier("Open Selection", "Open Selection")
JUMP_TO_SELECTION = MenuItemIdentifier("Jump to Selection", "Find.Jump to Selection")
SELECT_ALL = MenuItemIdentifier("Select All", "Select All")
SELECTION_RESPECTS_SOFT_BOUNDARIES = MenuItemIdentifier("Selection Respects Soft Boundaries", "Selection Respects Soft Boundaries")
SELECT_OUTPUT_OF_LAST_COMMAND = MenuItemIdentifier("Select Output of Last Command", "Select Output of Last Command")
SELECT_CURRENT_COMMAND = MenuItemIdentifier("Select Current Command", "Select Current Command")
class Find(enum.Enum):
FIND = MenuItemIdentifier("Find...", "Find.Find...")
FIND_NEXT = MenuItemIdentifier("Find Next", "Find.Find Next")
FIND_PREVIOUS = MenuItemIdentifier("Find Previous", "Find.Find Previous")
USE_SELECTION_FOR_FIND = MenuItemIdentifier("Use Selection for Find", "Find.Use Selection for Find")
FIND_GLOBALLY = MenuItemIdentifier("Find Globally...", "Find.Find Globally...")
FIND_URLS = MenuItemIdentifier("Find URLs", "Find.Find URLs")
class MarksandAnnotations(enum.Enum):
SET_MARK = MenuItemIdentifier("Set Mark", "Marks and Annotations.Set Mark")
JUMP_TO_MARK = MenuItemIdentifier("Jump to Mark", "Marks and Annotations.Jump to Mark")
NEXT_MARK = MenuItemIdentifier("Next Mark", "Marks and Annotations.Next Mark")
PREVIOUS_MARK = MenuItemIdentifier("Previous Mark", "Marks and Annotations.Previous Mark")
ADD_ANNOTATION_AT_CURSOR = MenuItemIdentifier("Add Annotation at Cursor", "Marks and Annotations.Add Annotation at Cursor")
NEXT_ANNOTATION = MenuItemIdentifier("Next Annotation", "Marks and Annotations.Next Annotation")
PREVIOUS_ANNOTATION = MenuItemIdentifier("Previous Annotation", "Marks and Annotations.Previous Annotation")
class Alerts(enum.Enum):
ALERT_ON_NEXT_MARK = MenuItemIdentifier("Alert on Next Mark", "Marks and Annotations.Alerts.Alert on Next Mark")
SHOW_MODAL_ALERT_BOX = MenuItemIdentifier("Show Modal Alert Box", "Marks and Annotations.Alerts.Show Modal Alert Box")
POST_NOTIFICATION = MenuItemIdentifier("Post Notification", "Marks and Annotations.Alerts.Post Notification")
CLEAR_BUFFER = MenuItemIdentifier("Clear Buffer", "Clear Buffer")
CLEAR_SCROLLBACK_BUFFER = MenuItemIdentifier("Clear Scrollback Buffer", "Clear Scrollback Buffer")
CLEAR_TO_START_OF_SELECTION = MenuItemIdentifier("Clear to Start of Selection", "Clear to Start of Selection")
CLEAR_TO_LAST_MARK = MenuItemIdentifier("Clear to Last Mark", "Clear to Last Mark")
class View(enum.Enum):
SHOW_TABS_IN_FULLSCREEN = MenuItemIdentifier("Show Tabs in Fullscreen", "Show Tabs in Fullscreen")
TOGGLE_FULL_SCREEN = MenuItemIdentifier("Toggle Full Screen", "Toggle Full Screen")
USE_TRANSPARENCY = MenuItemIdentifier("Use Transparency", "Use Transparency")
ZOOM_IN_ON_SELECTION = MenuItemIdentifier("Zoom In on Selection", "Zoom In on Selection")
ZOOM_OUT = MenuItemIdentifier("Zoom Out", "Zoom Out")
FIND_CURSOR = MenuItemIdentifier("Find Cursor", "Find Cursor")
SHOW_CURSOR_GUIDE = MenuItemIdentifier("Show Cursor Guide", "Show Cursor Guide")
SHOW_TIMESTAMPS = MenuItemIdentifier("Show Timestamps", "Show Timestamps")
SHOW_ANNOTATIONS = MenuItemIdentifier("Show Annotations", "Show Annotations")
AUTO_COMMAND_COMPLETION = MenuItemIdentifier("Auto Command Completion", "Auto Command Completion")
COMPOSER = MenuItemIdentifier("Composer", "Composer")
OPEN_QUICKLY = MenuItemIdentifier("Open Quickly", "Open Quickly")
MAXIMIZE_ACTIVE_PANE = MenuItemIdentifier("Maximize Active Pane", "Maximize Active Pane")
MAKE_TEXT_BIGGER = MenuItemIdentifier("Make Text Bigger", "Make Text Bigger")
MAKE_TEXT_NORMAL_SIZE = MenuItemIdentifier("Make Text Normal Size", "Make Text Normal Size")
RESTORE_TEXT_AND_SESSION_SIZE = MenuItemIdentifier("Restore Text and Session Size", "Restore Text and Session Size")
MAKE_TEXT_SMALLER = MenuItemIdentifier("Make Text Smaller", "Make Text Smaller")
SIZE_CHANGES_UPDATE_PROFILE = MenuItemIdentifier("Size Changes Update Profile", "Size Changes Update Profile")
START_INSTANT_REPLAY = MenuItemIdentifier("Start Instant Replay", "Start Instant Replay")
class Session(enum.Enum):
EDIT_SESSION = MenuItemIdentifier("Edit Session…", "Edit Session…")
RUN_COPROCESS = MenuItemIdentifier("Run Coprocess…", "Run Coprocess…")
STOP_COPROCESS = MenuItemIdentifier("Stop Coprocess", "Stop Coprocess")
RESTART_SESSION = MenuItemIdentifier("Restart Session", "Restart Session")
OPEN_AUTOCOMPLETE = MenuItemIdentifier("Open Autocomplete…", "Open Autocomplete…")
OPEN_COMMAND_HISTORY = MenuItemIdentifier("Open Command History…", "Open Command History…")
OPEN_RECENT_DIRECTORIES = MenuItemIdentifier("Open Recent Directories…", "Open Recent Directories…")
OPEN_PASTE_HISTORY = MenuItemIdentifier("Open Paste History…", "Open Paste History…")
class Triggers(enum.Enum):
ADD_TRIGGER = MenuItemIdentifier("Add Trigger…", "Add Trigger")
EDIT_TRIGGERS = MenuItemIdentifier("Edit Triggers", "Edit Triggers")
ENABLE_TRIGGERS_IN_INTERACTIVE_APPS = MenuItemIdentifier("Enable Triggers in Interactive Apps", "Enable Triggers in Interactive Apps")
ENABLE_ALL = MenuItemIdentifier("Enable All", "Triggers.Enable All")
DISABLE_ALL = MenuItemIdentifier("Disable All", "Triggers.Disable All")
RESET = MenuItemIdentifier("Reset", "Reset")
RESET_CHARACTER_SET = MenuItemIdentifier("Reset Character Set", "Reset Character Set")
class Log(enum.Enum):
LOG_TO_FILE = MenuItemIdentifier("Log to File", "Log.Toggle")
IMPORT_RECORDING = MenuItemIdentifier("Import Recording", "Log.ImportRecording")
EXPORT_RECORDING = MenuItemIdentifier("Export Recording", "Log.ExportRecording")
SAVE_CONTENTS = MenuItemIdentifier("Save Contents…", "Log.SaveContents")
class TerminalState(enum.Enum):
ALTERNATE_SCREEN = MenuItemIdentifier("Alternate Screen", "Alternate Screen")
FOCUS_REPORTING = MenuItemIdentifier("Focus Reporting", "Focus Reporting")
MOUSE_REPORTING = MenuItemIdentifier("Mouse Reporting", "Mouse Reporting")
PASTE_BRACKETING = MenuItemIdentifier("Paste Bracketing", "Paste Bracketing")
APPLICATION_CURSOR = MenuItemIdentifier("Application Cursor", "Application Cursor")
APPLICATION_KEYPAD = MenuItemIdentifier("Application Keypad", "Application Keypad")
STANDARD_KEY_REPORTING_MODE = MenuItemIdentifier("Standard Key Reporting Mode", "Terminal State.Standard Key Reporting")
MODIFYOTHERKEYS_MODE_1 = MenuItemIdentifier("modifyOtherKeys Mode 1", "Terminal State.Report Modifiers like xterm 1")
MODIFYOTHERKEYS_MODE_2 = MenuItemIdentifier("modifyOtherKeys Mode 2", "Terminal State.Report Modifiers like xterm 2")
CSI_U_MODE = MenuItemIdentifier("CSI u Mode", "Terminal State.Report Modifiers with CSI u")
RAW_KEY_REPORTING_MODE = MenuItemIdentifier("Raw Key Reporting Mode", "Terminal State.Raw Key Reporting")
RESET = MenuItemIdentifier("Reset", "Reset Terminal State")
BURY_SESSION = MenuItemIdentifier("Bury Session", "Bury Session")
class Scripts(enum.Enum):
class Manage(enum.Enum):
NEW_PYTHON_SCRIPT = MenuItemIdentifier("New Python Script", "New Python Script")
OPEN_PYTHON_REPL = MenuItemIdentifier("Open Python REPL", "Open Interactive Window")
MANAGE_DEPENDENCIES = MenuItemIdentifier("Manage Dependencies…", "Manage Dependencies")
INSTALL_PYTHON_RUNTIME = MenuItemIdentifier("Install Python Runtime", "Install Python Runtime")
REVEAL_SCRIPTS_IN_FINDER = MenuItemIdentifier("Reveal Scripts in Finder", "Reveal in Finder")
IMPORT = MenuItemIdentifier("Import…", "Import Script")
EXPORT = MenuItemIdentifier("Export…", "Export Script")
CONSOLE = MenuItemIdentifier("Console", "Script Console")
class Profiles(enum.Enum):
OPEN_PROFILES = MenuItemIdentifier("Open Profiles…", "Open Profiles…")
PRESS_OPTION_FOR_NEW_WINDOW = MenuItemIdentifier("Press Option for New Window", "Press Option for New Window")
OPEN_IN_NEW_WINDOW = MenuItemIdentifier("Open In New Window", "Open In New Window")
class Toolbelt(enum.Enum):
SHOW_TOOLBELT = MenuItemIdentifier("Show Toolbelt", "Show Toolbelt")
SET_DEFAULT_WIDTH = MenuItemIdentifier("Set Default Width", "Set Default Width")
class Window(enum.Enum):
MINIMIZE = MenuItemIdentifier("Minimize", "Minimize")
ZOOM = MenuItemIdentifier("Zoom", "Zoom")
EDIT_TAB_TITLE = MenuItemIdentifier("Edit Tab Title", "Edit Tab Title")
EDIT_WINDOW_TITLE = MenuItemIdentifier("Edit Window Title", "Edit Window Title")
class WindowStyle(enum.Enum):
NORMAL = MenuItemIdentifier("Normal", "Window Style.Normal")
FULL_SCREEN = MenuItemIdentifier("Full Screen", "Window Style.Full Screen")
MAXIMIZED = MenuItemIdentifier("Maximized", "Window Style.Maximized")
NO_TITLE_BAR = MenuItemIdentifier("No Title Bar", "Window Style.No Title Bar")
FULLWIDTH_BOTTOM_OF_SCREEN = MenuItemIdentifier("Full-Width Bottom of Screen", "Window Style.FullWidth Bottom of Screen")
FULLWIDTH_TOP_OF_SCREEN = MenuItemIdentifier("Full-Width Top of Screen", "Window Style.FullWidth Top of Screen")
FULLHEIGHT_LEFT_OF_SCREEN = MenuItemIdentifier("Full-Height Left of Screen", "Window Style..FullHeight Left of Screen")
FULLHEIGHT_RIGHT_OF_SCREEN = MenuItemIdentifier("Full-Height Right of Screen", "Window Style.FullHeight Right of Screen")
BOTTOM_OF_SCREEN = MenuItemIdentifier("Bottom of Screen", "Window Style.Bottom of Screen")
TOP_OF_SCREEN = MenuItemIdentifier("Top of Screen", "Window Style.Top of Screen")
LEFT_OF_SCREEN = MenuItemIdentifier("Left of Screen", "Window Style.Left of Screen")
RIGHT_OF_SCREEN = MenuItemIdentifier("Right of Screen", "Window Style.Right of Screen")
MERGE_ALL_WINDOWS = MenuItemIdentifier("Merge All Windows", "Merge All Windows")
ARRANGE_WINDOWS_HORIZONTALLY = MenuItemIdentifier("Arrange Windows Horizontally", "Arrange Windows Horizontally")
ARRANGE_SPLIT_PANES_EVENLY = MenuItemIdentifier("Arrange Split Panes Evenly", "Arrange Split Panes Evenly")
MOVE_SESSION_TO_WINDOW = MenuItemIdentifier("Move Session to Window", "Move Session to Window")
SAVE_WINDOW_ARRANGEMENT = MenuItemIdentifier("Save Window Arrangement", "Save Window Arrangement")
SAVE_CURRENT_WINDOW_AS_ARRANGEMENT = MenuItemIdentifier("Save Current Window as Arrangement", "Save Current Window as Arrangement")
class SelectSplitPane(enum.Enum):
SELECT_PANE_ABOVE = MenuItemIdentifier("Select Pane Above", "Select Split Pane.Select Pane Above")
SELECT_PANE_BELOW = MenuItemIdentifier("Select Pane Below", "Select Split Pane.Select Pane Below")
SELECT_PANE_LEFT = MenuItemIdentifier("Select Pane Left", "Select Split Pane.Select Pane Left")
SELECT_PANE_RIGHT = MenuItemIdentifier("Select Pane Right", "Select Split Pane.Select Pane Right")
NEXT_PANE = MenuItemIdentifier("Next Pane", "Select Split Pane.Next Pane")
PREVIOUS_PANE = MenuItemIdentifier("Previous Pane", "Select Split Pane.Previous Pane")
class ResizeSplitPane(enum.Enum):
MOVE_DIVIDER_UP = MenuItemIdentifier("Move Divider Up", "Resize Split Pane.Move Divider Up")
MOVE_DIVIDER_DOWN = MenuItemIdentifier("Move Divider Down", "Resize Split Pane.Move Divider Down")
MOVE_DIVIDER_LEFT = MenuItemIdentifier("Move Divider Left", "Resize Split Pane.Move Divider Left")
MOVE_DIVIDER_RIGHT = MenuItemIdentifier("Move Divider Right", "Resize Split Pane.Move Divider Right")
class ResizeWindow(enum.Enum):
DECREASE_HEIGHT = MenuItemIdentifier("Decrease Height", "Resize Window.Decrease Height")
INCREASE_HEIGHT = MenuItemIdentifier("Increase Height", "Resize Window.Increase Height")
DECREASE_WIDTH = MenuItemIdentifier("Decrease Width", "Resize Window.Decrease Width")
INCREASE_WIDTH = MenuItemIdentifier("Increase Width", "Resize Window.Increase Width")
SELECT_NEXT_TAB = MenuItemIdentifier("Select Next Tab", "Select Next Tab")
SELECT_PREVIOUS_TAB = MenuItemIdentifier("Select Previous Tab", "Select Previous Tab")
MOVE_TAB_LEFT = MenuItemIdentifier("Move Tab Left", "Move Tab Left")
MOVE_TAB_RIGHT = MenuItemIdentifier("Move Tab Right", "Move Tab Right")
PASSWORD_MANAGER = MenuItemIdentifier("Password Manager", "Password Manager")
PIN_HOTKEY_WINDOW = MenuItemIdentifier("Pin Hotkey Window", "Pin Hotkey Window")
BRING_ALL_TO_FRONT = MenuItemIdentifier("Bring All To Front", "Bring All To Front")
class Help(enum.Enum):
ITERM2_HELP = MenuItemIdentifier("iTerm2 Help", "iTerm2 Help")
COPY_MODE_SHORTCUTS = MenuItemIdentifier("Copy Mode Shortcuts", "Copy Mode Shortcuts")
OPEN_SOURCE_LICENSES = MenuItemIdentifier("Open Source Licenses", "Open Source Licenses")
GPU_RENDERER_AVAILABILITY = MenuItemIdentifier("GPU Renderer Availability", "GPU Renderer Availability")
|
gpl-2.0
| -4,293,014,743,592,645,000
| 64.867232
| 247
| 0.692799
| false
| 3.9208
| false
| false
| false
|
hoxmark/TDT4501-Specialization-Project
|
reinforcement/datasets/digit/model.py
|
1
|
4930
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
from pprint import pprint
import time
from utils import pairwise_distances, batchify
from config import opt, data, loaders
class SimpleClassifier(nn.Module):
def __init__(self):
super(SimpleClassifier, self).__init__()
self.input_size = 64
# TODO params
self.hidden_size = 256
self.output_size = 10
self.relu = nn.ReLU()
self.fc1 = nn.Linear(self.input_size, self.hidden_size)
self.fc3 = nn.Linear(self.hidden_size, self.output_size)
self.reset()
if opt.cuda:
self.cuda()
def reset(self):
torch.nn.init.xavier_normal_(self.fc1.weight)
torch.nn.init.xavier_normal_(self.fc3.weight)
def forward(self, inp):
if opt.cuda:
inp = inp.cuda()
output = self.fc1(inp)
output = self.relu(output)
output = self.fc3(output)
return output
def train_model(self, train_data, epochs):
optimizer = optim.Adadelta(self.parameters(), 0.1)
criterion = nn.CrossEntropyLoss()
self.train()
size = len(train_data[0])
if size > 0:
for e in range(epochs):
avg_loss = 0
corrects = 0
for i, (features, targets) in enumerate(batchify(train_data)):
features = torch.FloatTensor(features)
targets = torch.LongTensor(targets)
if opt.cuda:
features, targets = features.cuda(), targets.cuda()
output = self.forward(features)
optimizer.zero_grad()
loss = criterion(output, targets)
loss.backward()
optimizer.step()
avg_loss += loss.item()
corrects += (torch.max(output, 1)
[1].view(targets.size()) == targets).sum()
avg_loss = avg_loss / opt.batch_size
accuracy = 100.0 * corrects / size
def predict_prob(self, inp):
with torch.no_grad():
output = self.forward(inp)
output = torch.nn.functional.softmax(output, dim=1)
return output
def validate(self, data):
corrects, avg_loss = 0, 0
with torch.no_grad():
for i, (features, targets) in enumerate(batchify(data)):
features = torch.FloatTensor(features)
targets = torch.LongTensor(targets)
if opt.cuda:
features = features.cuda()
targets = targets.cuda()
logit = self.forward(features)
loss = torch.nn.functional.cross_entropy(logit, targets, size_average=False)
avg_loss += loss.item()
corrects += (torch.max(logit, 1)[1].view(targets.size()) == targets).sum()
size = len(data[0])
avg_loss = avg_loss / size
accuracy = 100.0 * float(corrects) / float(size)
metrics = {
'accuracy': accuracy,
'avg_loss': avg_loss,
'performance': accuracy
}
return metrics
def performance_validate(self, data):
return self.validate(data)
def get_state(self, index):
img = torch.Tensor(data["train"][0][index])
if opt.cuda:
img = img.cuda()
preds = self.forward(img)
state = torch.cat((img, preds)).view(1, -1)
return state
def encode_episode_data(self):
pass
# images = []
# # for i, (features, targets) in enumerate(loaders["train_loader"]):
# all_states = torch.Tensor(data["train"][0])
# for i, (features, targets) in enumerate(batchify(data["train"])):
# features = Variable(torch.FloatTensor(features))
# preds = self.predict_prob(features)
# images.append(preds)
#
# images = torch.cat(images, dim=0)
#
# # data["all_predictions"] = images
# data["all_states"] = torch.cat((all_states, images.cpu()), dim=1)
def query(self, index):
# current_state = data["all_states"][index].view(1, -1)
# all_states = data["all_states"]
# current_all_dist = pairwise_distances(current_state, all_states)
# similar_indices = torch.topk(current_all_dist, opt.selection_radius, 1, largest=False)[1]
# similar_indices = similar_indices.data[0].cpu().numpy()
# for idx in similar_indices:
self.add_index(index)
return [index]
def add_index(self, index):
image = data["train"][0][index]
caption = data["train"][1][index]
data["active"][0].append(image)
data["active"][1].append(caption)
|
mit
| -8,798,934,896,517,320,000
| 33.236111
| 99
| 0.541988
| false
| 4.027778
| false
| false
| false
|
qgis/QGIS-Django
|
qgis-app/styles/migrations/0002_auto_20201108_0521.py
|
1
|
1337
|
# Generated by Django 2.2 on 2020-11-08 05:21
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('styles', '0001_initial'),
]
operations = [
migrations.AlterModelManagers(
name='style',
managers=[
],
),
migrations.CreateModel(
name='StyleReview',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('review_date', models.DateTimeField(auto_now_add=True, help_text='The review date. Automatically added on style review.', verbose_name='Reviewed on')),
('reviewer', models.ForeignKey(help_text='The user who reviewed this style.', on_delete=django.db.models.deletion.CASCADE, related_name='styles_reviewed_by', to=settings.AUTH_USER_MODEL, verbose_name='Reviewed by')),
('style', models.ForeignKey(blank=True, help_text='The type of this style, this will automatically be read from the XML file.', null=True, on_delete=django.db.models.deletion.CASCADE, to='styles.Style', verbose_name='Style')),
],
),
]
|
gpl-2.0
| -1,898,174,964,722,884,000
| 43.566667
| 242
| 0.642483
| false
| 4.152174
| false
| false
| false
|
SuperSuperSuperSuper5/everyweak-auto-punch
|
test/mobile_module.py
|
1
|
3052
|
#!/usr/bin/python
#coding:utf-8
import time
import requests
import send_mail
import random_position
import all_headers
import sys
def main(status):
"""
The main function
"""
mail_addr = ""
name = ""
host_addr = "m17708851481@163.com"
# Now we get the token and userid first
get_token_headers = all_headers.gth("get_headers")
#get_token_headers["Content-Length"] = ""
#get_token_headers["User-Agent"] = ""
get_token_data = all_headers.gth("get_data")
#get_token_data["system"] = ""
#get_token_data["password"] = ""
#get_token_data["account"] = ""
#get_token_data["serialNumber"] = ""
#get_token_data["version"] = ""
#get_token_data["model"] = ""
token_req = requests.post("http://www.ddtech.com.cn:7777/mobile/login", headers=get_token_headers, data=get_token_data)
#print(token_req.status_code)
if token_req.status_code == 200:
#print("Get the token is ok")
token = token_req.json()['data'][0]['token']
userid = token_req.json()['data'][0]['userid']
else:
send_mail.send_mail(to_addr=mail_addr, subject="The program want login but failed", text="LOGIN TIME: %s\nHTTP code: %d" % (time.strftime('%Y-%m-%d-%H:%M:%S'), token_req.status_code))
send_mail.send_mail(to_addr=host_addr, subject="%s program want login but failed" % name, text="LOGIN TIME: %s\nHTTP code: %d" % (time.strftime('%Y-%m-%d-%H:%M:%S'), token_req.status_code))
return 1
# Now we send the da ka package
pos_headers = all_headers.gth("pos_headers")
#pos_headers["Content-Length"] = ""
#pos_headers["User-Agent"] = ""
position = random_position.get_position()
pos_data = all_headers.gth("pos_data")
pos_data["token"] = token
pos_data["userId"] = userid
pos_data["longitude"] = position[0]
pos_data["latitude"] = position[1]
#pos_data["isStart"] = "%s" % status
#pos_data["from"] = "IOS"
pos_req = requests.post("http://www.ddtech.com.cn:7777/mobile/busUserClock/saveOrUpdateNewUserClock", headers=pos_headers, data=pos_data)
if pos_req.status_code == 200:
send_mail.send_mail(to_addr=mail_addr, subject="Checked in success", text="CHECK IN TIME: %s\nHTTP code: %d" % (time.strftime('%Y-%m-%d-%H:%M:%S'), pos_req.status_code))
send_mail.send_mail(to_addr=host_addr, subject="%s checked in success" % name, text="CHECK IN TIME: %s\nHTTP code: %d" % (time.strftime('%Y-%m-%d-%H:%M:%S'), pos_req.status_code))
else:
send_mail.send_mail(to_addr=mail_addr, subject="Checked in failure", text="CHECK IN TIME: %s\nHTTP code: %d" % (time.strftime('%Y-%m-%d-%H:%M:%S'), pos_req.status_code))
send_mail.send_mail(to_addr=host_addr, subject="%s checked in failure" % name, text="CHECK IN TIME: %s\nHTTP code: %d" % (time.strftime('%Y-%m-%d-%H:%M:%S'), pos_req.status_code))
if __name__ == "__main__":
if len(sys.argv) != 2:
sys.exit(0)
status = sys.argv[1]
if status == "up":
main("1")
elif status == "down":
main("0")
|
gpl-3.0
| -8,474,352,799,188,116,000
| 38.128205
| 197
| 0.612713
| false
| 3.018793
| false
| false
| false
|
kuscsik/naclports
|
lib/naclports/package_index.py
|
1
|
4237
|
# Copyright (c) 2014 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import hashlib
import configuration
import naclports
import package
EXTRA_KEYS = [ 'BIN_URL', 'BIN_SIZE', 'BIN_SHA1' ]
VALID_KEYS = naclports.binary_package.VALID_KEYS + EXTRA_KEYS
REQUIRED_KEYS = naclports.binary_package.REQUIRED_KEYS + EXTRA_KEYS
DEFAULT_INDEX = os.path.join(naclports.NACLPORTS_ROOT, 'lib', 'prebuilt.txt')
def VerifyHash(filename, sha1):
"""Return True if the sha1 of the given file match the sha1 passed in."""
with open(filename) as f:
file_sha1 = hashlib.sha1(f.read()).hexdigest()
return sha1 == file_sha1
def WriteIndex(index_filename, binaries):
"""Create a package index file from set of binaries on disk.
Returns:
A PackageIndex object based on the contents of the newly written file.
"""
# Write index to a temporary file and then rename it, to avoid
# leaving a partial index file on disk.
tmp_name = index_filename + '.tmp'
with open(tmp_name, 'w') as output_file:
for i, (filename, url) in enumerate(binaries):
package = naclports.binary_package.BinaryPackage(filename)
with open(filename) as f:
sha1 = hashlib.sha1(f.read()).hexdigest()
if i != 0:
output_file.write('\n')
output_file.write(package.GetPkgInfo())
output_file.write('BIN_URL=%s\n' % url)
output_file.write('BIN_SIZE=%s\n' % os.path.getsize(filename))
output_file.write('BIN_SHA1=%s\n' % sha1)
os.rename(tmp_name, index_filename)
return IndexFromFile(index_filename)
def IndexFromFile(filename):
with open(filename) as f:
contents = f.read()
return PackageIndex(filename, contents)
def GetCurrentIndex():
return IndexFromFile(DEFAULT_INDEX)
class PackageIndex(object):
"""In memory representation of a package index file.
This class is used to read a package index of disk and stores
it in memory as dictionary keys on package name + configuration.
"""
def __init__(self, filename, index_data):
self.filename = filename
self.packages = {}
self.ParseIndex(index_data)
def Contains(self, package_name, config):
"""Returns True if the index contains the given package in the given
configuration, False otherwise."""
return (package_name, config) in self.packages
def Installable(self, package_name, config):
"""Returns True if the index contains the given package and it is
installable in the currently configured SDK."""
info = self.packages.get((package_name, config))
if not info:
return False
version = naclports.GetSDKVersion()
if info['BUILD_SDK_VERSION'] != version:
naclports.Trace('Prebuilt package was built with different SDK version: '
'%s vs %s' % (info['BUILD_SDK_VERSION'], version))
return False
return True
def Download(self, package_name, config):
PREBUILT_ROOT = os.path.join(package.PACKAGES_ROOT, 'prebuilt')
if not os.path.exists(PREBUILT_ROOT):
os.makedirs(PREBUILT_ROOT)
info = self.packages[(package_name, config)]
filename = os.path.join(PREBUILT_ROOT, os.path.basename(info['BIN_URL']))
if os.path.exists(filename):
if VerifyHash(filename, info['BIN_SHA1']):
return filename
naclports.Log('Downloading prebuilt binary ...')
naclports.DownloadFile(filename, info['BIN_URL'])
if not VerifyHash(filename, info['BIN_SHA1']):
raise naclports.Error('Unexepected SHA1: %s' % filename)
return filename
def ParseIndex(self, index_data):
if not index_data:
return
for pkg_info in index_data.split('\n\n'):
info = naclports.ParsePkgInfo(pkg_info, self.filename,
VALID_KEYS, EXTRA_KEYS)
debug = info['BUILD_CONFIG'] == 'debug'
config = configuration.Configuration(info['BUILD_ARCH'],
info['BUILD_TOOLCHAIN'],
debug)
key = (info['NAME'], config)
if key in self.packages:
naclports.Error('package index contains duplicate: %s' % str(key))
self.packages[key] = info
|
bsd-3-clause
| -2,090,408,763,854,018,000
| 34.605042
| 79
| 0.666981
| false
| 3.729754
| true
| false
| false
|
puruckertom/poptox
|
poptox/loons/loons_description.py
|
1
|
1357
|
import webapp2 as webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from google.appengine.ext.webapp import template
import os
class loonsDescriptionPage(webapp.RequestHandler):
def get(self):
text_file2 = open('loons/loons_text.txt','r')
xx = text_file2.read()
templatepath = os.path.dirname(__file__) + '/../templates/'
html = template.render(templatepath + '01pop_uberheader.html', {'title':'Ubertool'})
html = html + template.render(templatepath + '02pop_uberintroblock_wmodellinks.html', {'model':'loons','page':'description'})
html = html + template.render (templatepath + '03pop_ubertext_links_left.html', {})
html = html + template.render(templatepath + '04ubertext_start.html', {
'model_page':'',
'model_attributes':'Loons Population Model','text_paragraph':xx})
html = html + template.render(templatepath + '04ubertext_end.html', {})
html = html + template.render(templatepath + '05pop_ubertext_links_right.html', {})
html = html + template.render(templatepath + '06pop_uberfooter.html', {'links': ''})
self.response.out.write(html)
app = webapp.WSGIApplication([('/.*', loonsDescriptionPage)], debug=True)
def main():
run_wsgi_app(app)
if __name__ == '__main__':
main()
|
unlicense
| 8,686,122,703,163,753,000
| 45.793103
| 133
| 0.638172
| false
| 3.657682
| false
| false
| false
|
nop33/indico-plugins
|
livesync/indico_livesync/simplify.py
|
1
|
6947
|
# This file is part of Indico.
# Copyright (C) 2002 - 2017 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
import itertools
from collections import defaultdict
from sqlalchemy.orm import joinedload
from indico.core.db import db
from indico.modules.categories.models.categories import Category
from indico.modules.events.models.events import Event
from indico.modules.events.contributions.models.contributions import Contribution
from indico.modules.events.contributions.models.subcontributions import SubContribution
from indico.util.struct.enum import IndicoEnum
from indico_livesync.models.queue import ChangeType, EntryType
class SimpleChange(int, IndicoEnum):
deleted = 1
created = 2
updated = 4
def process_records(records):
"""Converts queue entries into object changes.
:param records: an iterable containing `LiveSyncQueueEntry` objects
:return: a dict mapping object references to `SimpleChange` bitsets
"""
changes = defaultdict(int)
cascaded_update_records = set()
cascaded_delete_records = set()
for record in records:
if record.change != ChangeType.deleted and record.object is None:
# Skip entries which are not deletions but have no corresponding objects.
# Probably they are updates for objects that got deleted afterwards.
continue
if record.change == ChangeType.created:
assert record.type != EntryType.category
changes[record.object] |= SimpleChange.created
elif record.change == ChangeType.deleted:
assert record.type != EntryType.category
cascaded_delete_records.add(record)
elif record.change in {ChangeType.moved, ChangeType.protection_changed}:
cascaded_update_records.add(record)
elif record.change == ChangeType.data_changed:
assert record.type != EntryType.category
changes[record.object] |= SimpleChange.updated
for obj in _process_cascaded_category_contents(cascaded_update_records):
changes[obj] |= SimpleChange.updated
for obj in _process_cascaded_event_contents(cascaded_delete_records):
changes[obj] |= SimpleChange.deleted
return changes
def _process_cascaded_category_contents(records):
"""
Travel from categories to subcontributions, flattening the whole event structure.
Yields everything that it finds (except for elements whose protection has changed
but are not inheriting their protection settings from anywhere).
:param records: queue records to process
"""
category_prot_records = {rec.category_id for rec in records if rec.type == EntryType.category
and rec.change == ChangeType.protection_changed}
category_move_records = {rec.category_id for rec in records if rec.type == EntryType.category
and rec.change == ChangeType.moved}
changed_events = set()
category_prot_records -= category_move_records # A move already implies sending the whole record
# Protection changes are handled differently, as there may not be the need to re-generate the record
if category_prot_records:
for categ in Category.find(Category.id.in_(category_prot_records)):
cte = categ.get_protection_parent_cte()
# Update only children that inherit
inheriting_categ_children = (Event.query
.join(cte, db.and_((Event.category_id == cte.c.id),
(cte.c.protection_parent == categ.id))))
inheriting_direct_children = Event.find((Event.category_id == categ.id) & Event.is_inheriting)
changed_events.update(itertools.chain(inheriting_direct_children, inheriting_categ_children))
# Add move operations and explicitly-passed event records
if category_move_records:
changed_events.update(Event.find(Event.category_chain_overlaps(category_move_records)))
for elem in _process_cascaded_event_contents(records, additional_events=changed_events):
yield elem
def _process_cascaded_event_contents(records, additional_events=None):
"""
Flatten a series of records into its most basic elements (subcontribution level).
Yields results.
:param records: queue records to process
:param additional_events: events whose content will be included in addition to those
found in records
"""
changed_events = additional_events or set()
changed_contributions = set()
changed_subcontributions = set()
session_records = {rec.session_id for rec in records if rec.type == EntryType.session}
contribution_records = {rec.contrib_id for rec in records if rec.type == EntryType.contribution}
subcontribution_records = {rec.subcontrib_id for rec in records if rec.type == EntryType.subcontribution}
event_records = {rec.event_id for rec in records if rec.type == EntryType.event}
if event_records:
changed_events.update(Event.find(Event.id.in_(event_records)))
for event in changed_events:
yield event
# Sessions are added (explicitly changed only, since they don't need to be sent anywhere)
if session_records:
changed_contributions.update(Contribution
.find(Contribution.session_id.in_(session_records), ~Contribution.is_deleted))
# Contributions are added (implictly + explicitly changed)
changed_event_ids = {ev.id for ev in changed_events}
condition = Contribution.event_id.in_(changed_event_ids) & ~Contribution.is_deleted
if contribution_records:
condition = db.or_(condition, Contribution.id.in_(contribution_records))
contrib_query = Contribution.find(condition).options(joinedload('subcontributions'))
for contribution in contrib_query:
yield contribution
changed_subcontributions.update(contribution.subcontributions)
# Same for subcontributions
if subcontribution_records:
changed_subcontributions.update(SubContribution
.find(SubContribution.contribution_id.in_(subcontribution_records)))
for subcontrib in changed_subcontributions:
yield subcontrib
|
gpl-3.0
| -887,382,400,869,119,900
| 42.149068
| 115
| 0.702174
| false
| 4.285626
| false
| false
| false
|
stxent/kmodgen
|
packages/sop.py
|
1
|
3193
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# sop.py
# Copyright (C) 2016 xent
# Project is distributed under the terms of the GNU General Public License v3.0
import math
from wrlconv import model
import primitives
class SOP:
BODY_CHAMFER = primitives.hmils(0.1)
BODY_OFFSET_Z = primitives.hmils(0.1)
BAND_OFFSET = primitives.hmils(0.0)
BAND_WIDTH = primitives.hmils(0.1)
CHAMFER_RESOLUTION = 1
LINE_RESOLUTION = 1
EDGE_RESOLUTION = 3
@staticmethod
def generate_package_pins(pattern, count, size, offset, pitch):
def make_pin(x, y, angle, number): # pylint: disable=invalid-name
pin = model.Mesh(parent=pattern, name='Pin{:d}'.format(number))
pin.translate([x, y, 0.0])
pin.rotate([0.0, 0.0, 1.0], angle - math.pi / 2.0)
return pin
rows = int(count / 2)
pins = []
# Pins
y_offset = size[1] / 2.0 + offset
for i in range(0, rows):
x_offset = pitch * (i - (rows - 1) / 2.0)
pins.append(make_pin(x_offset, y_offset, math.pi, i + 1 + rows))
pins.append(make_pin(-x_offset, -y_offset, 0.0, i + 1))
return pins
def generate(self, materials, _, descriptor):
body_size = primitives.hmils(descriptor['body']['size'])
pin_height = body_size[2] / 2.0 + SOP.BODY_OFFSET_Z
pin_shape = primitives.hmils(descriptor['pins']['shape'])
band_width_proj = SOP.BAND_WIDTH * math.sqrt(0.5)
body_slope = math.atan(2.0 * band_width_proj / body_size[2])
pin_offset = pin_shape[1] * math.sin(body_slope) / 2.0
body_transform = model.Transform()
body_transform.rotate([0.0, 0.0, 1.0], math.pi)
body_transform.translate([0.0, 0.0, pin_height])
body_mesh = primitives.make_sloped_box(
size=body_size,
chamfer=SOP.BODY_CHAMFER,
slope=math.pi / 4.0,
slope_height=body_size[2] / 5.0,
edge_resolution=SOP.EDGE_RESOLUTION,
line_resolution=SOP.LINE_RESOLUTION,
band=SOP.BAND_OFFSET,
band_width=SOP.BAND_WIDTH)
if 'Body' in materials:
body_mesh.appearance().material = materials['Body']
body_mesh.apply(body_transform)
body_mesh.rename('Body')
pin_mesh = primitives.make_pin_mesh(
pin_shape_size=pin_shape,
pin_height=pin_height + pin_shape[1] * math.cos(body_slope) / 2.0,
pin_length=primitives.hmils(descriptor['pins']['length']) + pin_offset,
pin_slope=math.pi * (10.0 / 180.0),
end_slope=body_slope,
chamfer_resolution=SOP.CHAMFER_RESOLUTION,
edge_resolution=SOP.EDGE_RESOLUTION)
if 'Pin' in materials:
pin_mesh.appearance().material = materials['Pin']
pins = SOP.generate_package_pins(
pattern=pin_mesh,
count=descriptor['pins']['count'],
size=body_size,
offset=band_width_proj - pin_offset,
pitch=primitives.hmils(descriptor['pins']['pitch']))
return pins + [body_mesh]
types = [SOP]
|
gpl-3.0
| -857,548,429,318,372,200
| 32.968085
| 87
| 0.571876
| false
| 3.244919
| false
| false
| false
|
liberation/django-registration
|
registration/forms.py
|
1
|
5027
|
"""
Forms and validation code for user registration.
Note that all of these forms assume Django's bundle default ``User``
model; since it's not possible for a form to anticipate in advance the
needs of custom user models, you will need to write your own forms if
you're using a custom model.
"""
from django.contrib.auth.models import User
from django import forms
from django.utils.translation import ugettext_lazy as _
# I put this on all required fields, because it's easier to pick up
# on them with CSS or JavaScript if they have a class of "required"
# in the HTML. Your mileage may vary. If/when Django ticket #3515
# lands in trunk, this will no longer be necessary.
attrs_dict = {'class': 'required'}
class RegistrationForm(forms.Form):
"""
Form for registering a new user account.
Validates that the requested username is not already in use, and
requires the password to be entered twice to catch typos.
Subclasses should feel free to add any additional validation they
need, but should avoid defining a ``save()`` method -- the actual
saving of collected user data is delegated to the active
registration backend.
"""
required_css_class = 'required'
username = forms.RegexField(regex=r'^[\w.@+-]+$',
max_length=30,
label=_("Username"),
error_messages={'invalid': _("This value may contain only letters, numbers and @/./+/-/_ characters.")})
email = forms.EmailField(widget=forms.TextInput(attrs=dict(attrs_dict,
maxlength=75)),
label=_("Email address"))
password1 = forms.CharField(widget=forms.PasswordInput(attrs=attrs_dict, render_value=False),
label=_("Password"))
password2 = forms.CharField(widget=forms.PasswordInput,
label=_("Password (again)"))
def clean_username(self):
"""
Validate that the username is alphanumeric and is not already
in use.
"""
existing = User.objects.filter(username__iexact=self.cleaned_data['username'])
if existing.exists():
raise forms.ValidationError(_("A user with that username already exists."))
else:
return self.cleaned_data['username']
def clean(self):
"""
Verifiy that the values entered into the two password fields
match. Note that an error here will end up in
``non_field_errors()`` because it doesn't apply to a single
field.
"""
if 'password1' in self.cleaned_data and 'password2' in self.cleaned_data:
if self.cleaned_data['password1'] != self.cleaned_data['password2']:
raise forms.ValidationError(_("The two password fields didn't match."))
return self.cleaned_data
class RegistrationFormTermsOfService(RegistrationForm):
"""
Subclass of ``RegistrationForm`` which adds a required checkbox
for agreeing to a site's Terms of Service.
"""
tos = forms.BooleanField(widget=forms.CheckboxInput,
label=_(u'I have read and agree to the Terms of Service'),
error_messages={'required': _("You must agree to the terms to register")})
class RegistrationFormUniqueEmail(RegistrationForm):
"""
Subclass of ``RegistrationForm`` which enforces uniqueness of
email addresses.
"""
def clean_email(self):
"""
Validate that the supplied email address is unique for the
site.
"""
if User.objects.filter(email__iexact=self.cleaned_data['email']):
raise forms.ValidationError(_("This email address is already in use. Please supply a different email address."))
return self.cleaned_data['email']
class RegistrationFormNoFreeEmail(RegistrationForm):
"""
Subclass of ``RegistrationForm`` which disallows registration with
email addresses from popular free webmail services; moderately
useful for preventing automated spam registrations.
To change the list of banned domains, subclass this form and
override the attribute ``bad_domains``.
"""
bad_domains = ['aim.com', 'aol.com', 'email.com', 'gmail.com',
'googlemail.com', 'hotmail.com', 'hushmail.com',
'msn.com', 'mail.ru', 'mailinator.com', 'live.com',
'yahoo.com']
def clean_email(self):
"""
Check the supplied email address against a list of known free
webmail domains.
"""
email_domain = self.cleaned_data['email'].split('@')[1]
if email_domain in self.bad_domains:
raise forms.ValidationError(_("Registration using free email addresses is prohibited. Please supply a different email address."))
return self.cleaned_data['email']
|
bsd-3-clause
| -6,836,387,276,208,603,000
| 37.968992
| 141
| 0.623831
| false
| 4.680633
| false
| false
| false
|
Symbian9/ysfs_2_0
|
explode_srf.py
|
1
|
5198
|
import os
import bpy
import bmesh
import mathutils
from bpy.props import (BoolProperty, FloatProperty, StringProperty, EnumProperty)
from bpy_extras.io_utils import (ImportHelper, ExportHelper, unpack_list, unpack_face_list, axis_conversion)
# Infomation
bl_info = {
'name' : 'YSFS 2.0 - DNM Parts as SRF file',
'description': 'YSFlight scripts | Export all objects in scene to DNM with separated parts as SRF files.',
'author' : 'Symbian9, Mr Mofumofu',
'version' : (2, 0, 1),
'blender' : (2, 75, 0),
'location' : 'File > Import-Export',
'warning' : '',
'wiki_url' : '',
'tracker_url': 'http://github.com/Symbian9/ysfs_2_0/issues/new',
'category' : 'Airplanes 3D',
}
# Export Form
class ExplodeSRF(bpy.types.Operator, ExportHelper):
# Settings
bl_idname = 'export_model.expsrf'
bl_label = 'Export DNM Parts(SURF)'
filter_glob = StringProperty(
default = '*.srf',
options = {'HIDDEN'},
)
check_extension = True
filename_ext = '.srf'
# On Click Save Button
def execute(self, context):
# ==============================
# Getting Data
# ==============================
# Currently Scene
scene = context.scene
# Rotation(Option)
global_matrix = mathutils.Matrix((
(-1.0, 0.0, 0.0, 0.0),
( 0.0, 0.0, 1.0, 0.0),
( 0.0, -1.0, 0.0, 0.0),
( 0.0, 0.0, 0.0, 1.0),
))
# Selected Object
for object in scene.objects:
export(object, self.filepath, global_matrix)
return {'FINISHED'}
def export(object, filepath, global_matrix):
me = object.data
for objects in object.children:
export(objects, filepath, global_matrix)
if isinstance(me, bpy.types.Mesh):
# Convert to BMesh(For N-Sided Polygon)
bm = bmesh.new()
bm.from_mesh(me)
# Rotation(Option)
bm.transform(global_matrix * object.matrix_world)
bm.normal_update()
# Vertexs and Faces
verts = bm.verts
faces = bm.faces
# ==============================
# Output
# ==============================
# Save File
filepath = '{0}/{1}.srf'.format(os.path.dirname(filepath), object.name)
filepath = os.fsencode(filepath)
fp = open(filepath, 'w')
# For Transparent
za = ''
zacount = 0
# Header
fp.write('SURF\n')
# Vertexs
for vert in verts:
fp.write('V {:.4f} {:.4f} {:.4f} '.format(*vert.co))
# Smoothing
smooth = True
for edge in vert.link_edges:
if edge.smooth == False:
smooth = False
break
if smooth:
for face in vert.link_faces:
if face.smooth:
fp.write('R')
break
fp.write('\n')
# Faces
for face in faces:
fp.write('F\n')
# Has Material?
if len(object.material_slots):
# Getting Material
material = object.material_slots[face.material_index].material
# Color
color = material.diffuse_color * 255.0
fp.write('C {:.0f} {:.0f} {:.0f}\n'.format(*color))
# Lighting
if material.emit > 0.0:
fp.write('B\n')
# Transparent
if material.alpha < 1.0:
if zacount == 0:
za = 'ZA {:d} {:.0f}'.format(face.index, (1.0 - material.alpha) * 228.0)
elif zacount % 8 == 0:
za += '\nZA {:d} {:.0f}'.format(face.index, (1.0 - material.alpha) * 228.0)
else:
za += ' {:d} {:.0f}'.format(face.index, (1.0 - material.alpha) * 228.0)
zacount = zacount + 1
# Median and Normal
median = face.calc_center_median_weighted()
normal = -face.normal
fp.write('N {:.4f} {:.4f} {:.4f} '.format(*median))
fp.write('{:.4f} {:.4f} {:.4f}\n'.format(*normal))
# Vertexs consist Face
fp.write('V')
for vid in face.verts:
fp.write(' {:d}'.format(vid.index))
fp.write('\n')
fp.write('E\n')
# Footer
fp.write('E\n')
# For Transparent
if za != '':
fp.write(za + '\n')
# ==============================
# Close
# ==============================
fp.close()
bm.free()
return {'FINISHED'}
# Menu Button
def menu_func_export(self, context):
self.layout.operator(ExplodeSRF.bl_idname, text = 'DNM Parts (.srf)')
# Regist
def register():
bpy.utils.register_module(__name__)
bpy.types.INFO_MT_file_export.append(menu_func_export)
# Unregist
def unregister():
bpy.utils.unregister_module(__name__)
bpy.types.INFO_MT_file_export.remove(menu_func_export)
if __name__ == '__main__':
register()
|
mit
| -1,173,587,664,364,313,600
| 29.940476
| 110
| 0.481147
| false
| 3.602218
| false
| false
| false
|
linsalrob/PyFBA
|
PyFBA/cmd/gapfill_from_roles.py
|
1
|
22366
|
"""
Given a set of roles (e.g. from a genome annotation) can we gap fill those?
Largely based on From_functional_roles_to_gap-filling
"""
import os
import sys
import PyFBA
import argparse
import copy
from PyFBA import log_and_message
def run_eqn(why, md, r2r, med, bme, verbose=False):
"""
Run the fba
:param why: why are we doing this
:param md: modeldata
:param r2r: reactions to run
:param med: media object
:param bme: biomass equation
:param verbose: more output
:type verbose: bool
:return: (value, growth)
"""
status, value, growth = PyFBA.fba.run_fba(md, r2r, med, bme)
log_and_message(f"FBA run {why} has a biomass flux value of {value} --> Growth: {growth}", stderr=verbose)
return value, growth
def minimize_reactions(original_reactions_to_run, added_reactions, modeldata, media, biomass_equation, verbose=False):
"""
Sort thorugh all the added reactions and return a dict of new reactions
:param original_reactions_to_run: the original set from our genome
:type original_reactions_to_run: set(PyFBA.metabolism.Reaction)
:param added_reactions: new reactions we need
:type added_reactions: list[(str, set(str)]
:param modeldata: our modeldata object
:type modeldata: PyFBA.model_seed.ModelData
:param media: our media object
:type media: set[PyFBA.metabolism.Compound]
:param biomass_equation: our biomass equation
:type biomass_equation: PyFBA.metabolism.Reaction
:param verbose: more output
:type verbose: bool
:return: A dict of the minimal set of reactions and their source
:rtype: dict[str, str]
"""
reqd_additional = set()
print(f"Before we began, we had {len(original_reactions_to_run)} reactions")
rxn_source = {}
while added_reactions:
ori = copy.deepcopy(original_reactions_to_run)
ori.update(reqd_additional)
# Test next set of gap-filled reactions
# Each set is based on a method described above
how, new = added_reactions.pop()
sys.stderr.write(f"Testing reactions from {how}\n")
# Get all the other gap-filled reactions we need to add
for tple in added_reactions:
ori.update(tple[1])
for r in new:
# remember the source. It doesn't matter if we overwrite, as it will replace later with earlier
rxn_source[r] = how
# Use minimization function to determine the minimal
# set of gap-filled reactions from the current method
new_essential = PyFBA.gapfill.minimize_additional_reactions(ori, new, modeldata, media, biomass_equation,
verbose=True)
log_and_message(f"Saved {len(new_essential)} reactions from {how}", stderr=verbose)
# Record the method used to determine
# how the reaction was gap-filled
for new_r in new_essential:
modeldata.reactions[new_r].is_gapfilled = True
modeldata.reactions[new_r].gapfill_method = how
reqd_additional.update(new_essential)
# add the original set too
for r in original_reactions_to_run:
rxn_source[r] = 'genome prediction'
# Combine old and new reactions and add the source, to return a dict
return {r: rxn_source[r] for r in original_reactions_to_run.union(reqd_additional)}
def roles_to_reactions_to_run(roles, orgtype='gramnegative', verbose=False):
roles_to_reactions = PyFBA.filters.roles_to_reactions(roles, organism_type=orgtype, verbose=verbose)
reactions_to_run = set()
for role in roles_to_reactions:
reactions_to_run.update(roles_to_reactions[role])
log_and_message(f"There are {len(reactions_to_run)} unique reactions associated with this genome", stderr=verbose)
return reactions_to_run
def read_media(mediafile, modeldata, verbose=False):
"""
Read the media file and return a set of compounds
:param modeldata: the modeldata object
:type modeldata: PyFBA.model_seed.ModelData
:param mediafile: the media file to read
:param verbose: more output
:type verbose: bool
:return: a set of media compounds
:rtype: Set[PyFBA.metabolism.Compound]
"""
if mediafile in PyFBA.parse.media_files():
log_and_message(f"parsing media directly from {mediafile}", stderr=verbose)
# pyfba media already corrects the names, so we can just return it.
return PyFBA.parse.pyfba_media(mediafile, modeldata)
elif os.path.exists(mediafile):
log_and_message(f"parsing media file {mediafile}", stderr=verbose)
media = PyFBA.parse.read_media_file(mediafile)
elif 'PYFBA_MEDIA_DIR' in os.environ and os.path.exists(os.path.join(os.environ['PYFBA_MEDIA_DIR'], mediafile)):
log_and_message(f"parsing media file {os.path.join(os.environ['PYFBA_MEDIA_DIR'], mediafile)}", stderr=verbose)
media = PyFBA.parse.read_media_file(os.path.join(os.environ['PYFBA_MEDIA_DIR'], mediafile))
else:
log_and_message(f"Can't figure out how to parse media from {mediafile}", stderr=True, loglevel="CRITICAL")
sys.exit(-1)
return PyFBA.parse.correct_media_names(media, modeldata.compounds)
def update_r2r(old, new, why, verbose=False):
"""
Update the reactions to run and log the changes
:param old: the initial reactions to run
:param new: the new reactions to add
:param why: the step we are at
:param verbose: more output
:return: a set of reactions to run
:rtype: set[str]
"""
before = len(old)
old.update(new)
msg = f"Before updating reactions from {why}: {before} reactions, after {len(old)} reactions"
log_and_message(msg, stderr=verbose)
return old
def run_gapfill_from_roles(roles, reactions_to_run, modeldata, media, orgtype='gramnegative', close_orgs=None,
close_genera=None, verbose=False):
"""
gapfill growth from a set of roles in the genome
:param close_genera: the list of roles in close genera
:param close_orgs: the list of roles in close organisms
:param roles: The set of roles in this genome
:type roles: set[str[
:param reactions_to_run: The reactions to run
:type reactions_to_run: set[str]
:param modeldata: the modeldata object
:type modeldata: PyFBA.model_seed.ModelData
:param media: a set of media compounds
:type media: Set[PyFBA.metabolism.Compound]
:param orgtype: the organism type for the model
:type orgtype: str
:param verbose: more output
:type verbose: bool
:return: a dict of the reactions and what step they were added at
:rtype: dict[str, str]
"""
tempset = set()
for r in reactions_to_run:
if r in modeldata.reactions:
tempset.add(r)
else:
log_and_message(f"Reaction ID {r} is not in our reactions list. Skipped", stderr=verbose)
reactions_to_run = tempset
biomass_equation = PyFBA.metabolism.biomass_equation(orgtype)
run_eqn("Initial", modeldata, reactions_to_run, media, biomass_equation, verbose=verbose)
added_reactions = []
original_reactions_to_run = copy.deepcopy(reactions_to_run)
#############################################################################################
# Gapfilling #
# #
# We do this in the order: #
# essential reactions: because you need to have these, but it is stronger evidence if #
# your friends have it too! #
# media: because you should be importing everything in the media #
# linked_reactions: because they make sense! #
# closely related organisms: because you should have roles your friends have #
# subsystems: to complete things you already have #
# orphans: to make sure everything is produced/consumed #
# probability: because there are other reactions we can add #
# reactions with proteins: to make sure you can at least grow on the media #
# #
#############################################################################################
#############################################################################################
# ESSENTIAL PROTEINS #
#############################################################################################
log_and_message("Gap filling from Essential Reactions", stderr=verbose)
essential_reactions = PyFBA.gapfill.suggest_essential_reactions()
for r in essential_reactions:
modeldata.reactions[r].reset_bounds()
added_reactions.append(("essential", essential_reactions))
reactions_to_run = update_r2r(reactions_to_run, essential_reactions, "ESSENTIAL REACTIONS")
value, growth = run_eqn("Initial", modeldata, reactions_to_run, media, biomass_equation, verbose=verbose)
if growth:
return minimize_reactions(original_reactions_to_run, added_reactions, modeldata, media, biomass_equation,
verbose=verbose)
#############################################################################################
# LINKED REACTIONS #
#############################################################################################
log_and_message("Gap filling from Linked Reactions", stderr=verbose)
linked_reactions = PyFBA.gapfill.suggest_linked_reactions(modeldata, reactions_to_run)
for r in linked_reactions:
modeldata.reactions[r].reset_bounds()
added_reactions.append(("linked_reactions", linked_reactions))
reactions_to_run = update_r2r(reactions_to_run, linked_reactions, "LINKED REACTIONS")
value, growth = run_eqn("Initial", modeldata, reactions_to_run, media, biomass_equation, verbose=verbose)
if growth:
return minimize_reactions(original_reactions_to_run, added_reactions, modeldata, media, biomass_equation,
verbose=verbose)
#############################################################################################
# EC NUMBERS #
#############################################################################################
log_and_message("Gap filling from limited EC numbers", stderr=verbose)
ecnos = PyFBA.gapfill.suggest_reactions_using_ec(roles, modeldata, reactions_to_run, verbose=verbose)
for r in ecnos:
modeldata.reactions[r].reset_bounds()
added_reactions.append(("ec_numbers_brief", ecnos))
reactions_to_run = update_r2r(reactions_to_run, ecnos, "EC Numbers")
value, growth = run_eqn("Initial", modeldata, reactions_to_run, media, biomass_equation, verbose=verbose)
if growth:
return minimize_reactions(original_reactions_to_run, added_reactions, modeldata, media, biomass_equation,
verbose=verbose)
#############################################################################################
# Media import reactions #
#############################################################################################
log_and_message("Gap filling from MEDIA", stderr=verbose)
media_reactions = PyFBA.gapfill.suggest_from_media(modeldata, reactions_to_run, media, verbose=verbose)
added_reactions.append(("media", media_reactions))
reactions_to_run = update_r2r(reactions_to_run, media_reactions, "MEDIA REACTIONS")
value, growth = run_eqn("Initial", modeldata, reactions_to_run, media, biomass_equation, verbose=verbose)
if growth:
return minimize_reactions(original_reactions_to_run, added_reactions, modeldata, media, biomass_equation,
verbose=verbose)
#############################################################################################
# Other genomes and organisms #
#############################################################################################
log_and_message("Gap filling from CLOSE GENOMES", stderr=verbose)
if close_orgs:
# add reactions from roles in close genomes
close_reactions = PyFBA.gapfill.suggest_from_roles(close_orgs, modeldata.reactions, threshold=0,
verbose=verbose)
close_reactions.difference_update(reactions_to_run)
added_reactions.append(("close genomes ", close_reactions))
reactions_to_run = update_r2r(reactions_to_run, close_reactions, "CLOSE ORGANISMS")
value, growth = run_eqn("Initial", modeldata, reactions_to_run, media, biomass_equation, verbose=verbose)
if growth:
return minimize_reactions(original_reactions_to_run, added_reactions, modeldata, media, biomass_equation,
verbose=verbose)
if close_genera:
# add reactions from roles in similar genera
genus_reactions = PyFBA.gapfill.suggest_from_roles(close_genera, modeldata.reactions, threshold=0,
verbose=verbose)
genus_reactions.difference_update(reactions_to_run)
added_reactions.append(("other genera", genus_reactions))
reactions_to_run = update_r2r(reactions_to_run, genus_reactions, "CLOSE GENERA")
value, growth = run_eqn("Initial", modeldata, reactions_to_run, media, biomass_equation, verbose=verbose)
if growth:
return minimize_reactions(original_reactions_to_run, added_reactions, modeldata, media, biomass_equation,
verbose=verbose)
#############################################################################################
# Subsystems #
#############################################################################################
log_and_message("Gap filling from SUBSYSTEMS", stderr=verbose)
subsystem_reactions = PyFBA.gapfill.suggest_reactions_from_subsystems(modeldata.reactions, reactions_to_run,
organism_type=orgtype, threshold=0.5,
verbose=verbose)
added_reactions.append(("subsystems", subsystem_reactions))
reactions_to_run = update_r2r(reactions_to_run, subsystem_reactions, "SUBSYSTEMS")
value, growth = run_eqn("Initial", modeldata, reactions_to_run, media, biomass_equation, verbose=verbose)
if growth:
return minimize_reactions(original_reactions_to_run, added_reactions, modeldata, media, biomass_equation,
verbose=verbose)
#############################################################################################
# Orphan compounds #
#############################################################################################
log_and_message("Gap filling from ORPHANS", stderr=verbose)
orphan_compounds = PyFBA.gapfill.suggest_by_compound(modeldata, reactions_to_run, 1)
added_reactions.append(("orphans", orphan_compounds))
reactions_to_run = update_r2r(reactions_to_run, orphan_compounds, "ORPHANS")
value, growth = run_eqn("Initial", modeldata, reactions_to_run, media, biomass_equation, verbose=verbose)
if growth:
return minimize_reactions(original_reactions_to_run, added_reactions, modeldata, media, biomass_equation,
verbose=verbose)
# ## Revisit EC Numbers
#
# When we added the EC numbers before, we were a little conservative, only adding those EC numbers that appeared in
# two or less (by default) reactions. If we get here, lets be aggressive and add any EC number regardless of how
# many reactions we add. We set the `maxnumrx` variable to 0
#############################################################################################
# EC NUMBERS #
#############################################################################################
log_and_message("Gap filling from limited EC numbers", stderr=verbose)
ecnos = PyFBA.gapfill.suggest_reactions_using_ec(roles, modeldata, reactions_to_run, maxnumrx=0, verbose=verbose)
for r in ecnos:
modeldata.reactions[r].reset_bounds()
added_reactions.append(("ec_numbers_full", ecnos))
reactions_to_run = update_r2r(reactions_to_run, ecnos, "EC Numbers")
value, growth = run_eqn("Initial", modeldata, reactions_to_run, media, biomass_equation, verbose=verbose)
if growth:
return minimize_reactions(original_reactions_to_run, added_reactions, modeldata, media, biomass_equation,
verbose=verbose)
# We revist linked reactions once more, because now we have many more reactions in our set to run!
#############################################################################################
# LINKED REACTIONS #
#############################################################################################
log_and_message("Gap filling from Linked Reactions", stderr=verbose)
linked_reactions = PyFBA.gapfill.suggest_linked_reactions(modeldata, reactions_to_run)
for r in linked_reactions:
modeldata.reactions[r].reset_bounds()
added_reactions.append(("linked_reactions_full", linked_reactions))
reactions_to_run = update_r2r(reactions_to_run, linked_reactions, "LINKED REACTIONS")
value, growth = run_eqn("Initial", modeldata, reactions_to_run, media, biomass_equation, verbose=verbose)
if growth:
return minimize_reactions(original_reactions_to_run, added_reactions, modeldata, media, biomass_equation,
verbose=verbose)
log_and_message(f"FATAL: After compiling {len(reactions_to_run)} reactions, we still could not get growth",
stderr=True, loglevel='CRITICAL')
return set()
def gapfill_from_roles():
"""
Parse the arguments and start the gapfilling.
"""
orgtypes = ['gramnegative', 'grampositive', 'microbial', 'mycobacteria', 'plant']
parser = argparse.ArgumentParser(description='Run Flux Balance Analysis on a set of gapfilled functional roles')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('-r', '--roles', help='A list of functional roles in this genome, one per line')
group.add_argument('-a', '--assigned_functions', help='RAST assigned functions (tab separated PEG/Functional Role)')
group.add_argument('-f', '--features', help='PATRIC features.txt file (with 5 columns)')
parser.add_argument('-o', '--output', help='file to save new reaction list to', required=True)
parser.add_argument('-m', '--media', help='media name', required=True)
parser.add_argument('-t', '--type', default='gramnegative',
help=f'organism type for the model (currently allowed are {orgtypes}). Default=gramnegative')
parser.add_argument('-c', '--close', help='a file with roles from close organisms')
parser.add_argument('-g', '--genera', help='a file with roles from similar genera')
parser.add_argument('-v', '--verbose', help='verbose output', action='store_true')
args = parser.parse_args(sys.argv[2:])
log_and_message(f"Running PyFBA with the parameters: {sys.argv}\n", quiet=True)
model_data = PyFBA.parse.model_seed.parse_model_seed_data(args.type)
if args.roles:
if not os.path.exists(args.roles):
sys.stderr.write(f"FATAL: {args.roles} does not exist. Please check your files\n")
sys.exit(1)
log_and_message(f"Getting the roles from {args.roles}", stderr=args.verbose)
roles = PyFBA.parse.read_functional_roles(args.roles, args.verbose)
elif args.assigned_functions:
if not os.path.exists(args.assigned_functions):
sys.stderr.write(f"FATAL: {args.assigned_functions} does not exist. Please check your files\n")
sys.exit(1)
log_and_message(f"Getting the roles from {args.assigned_functions}", stderr=args.verbose)
roles = PyFBA.parse.assigned_functions_set(args.assigned_functions)
elif args.features:
if not os.path.exists(args.features):
sys.stderr.write(f"FATAL: {args.features} does not exist. Please check your files\n")
sys.exit(1)
log_and_message(f"Getting the roles from {args.features}", stderr=args.verbose)
roles = PyFBA.parse.read_features_file(args.features, args.verbose)
else:
sys.stderr.write("FATAL. Either a roles or functions file must be provided")
sys.exit(1)
reactions_to_run = roles_to_reactions_to_run(roles, args.type, args.verbose)
media = read_media(args.media, model_data, args.verbose)
new_reactions = run_gapfill_from_roles(roles=roles, reactions_to_run=reactions_to_run, modeldata=model_data,
media=media, orgtype=args.type, close_orgs=args.close,
close_genera=args.genera, verbose=args.verbose)
if new_reactions:
with open(args.output, 'w') as out:
for r in new_reactions:
out.write(f"{r}\t{new_reactions[r]}\n")
if __name__ == "__main__":
gapfill_from_roles()
|
mit
| 6,733,707,812,166,747,000
| 51.874704
| 120
| 0.573281
| false
| 4.036456
| false
| false
| false
|
damianpv/sfotipy
|
sfotipy/urls.py
|
1
|
1434
|
from django.conf.urls import patterns, include, url
from django.conf import settings
from artists.views import ArtistDetailView, ArtistListView
from django.contrib import admin
admin.autodiscover()
from rest_framework import routers
from artists.views import ArtistViewSet
from albums.views import AlbumViewSet
from tracks.views import TrackViewSet
router = routers.DefaultRouter()
router.register(r'artists', ArtistViewSet)
router.register(r'albums', AlbumViewSet)
router.register(r'tracks', TrackViewSet)
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'sfotipy.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^grappelli/', include('grappelli.urls')), # grappelli URLS
url(r'^admin/', include(admin.site.urls)),
url(r'^tracks/(?P<title>[\w\-\W]+)/', 'tracks.views.track_view', name='track_view'),
#url(r'^tracks/(?P<title>[\w\-]+)/', 'tracks.views.track_view', name='track_view'),
url(r'^signup/', 'userprofiles.views.signup', name='signup'),
url(r'^signin/', 'userprofiles.views.signin', name='signin'),
url(r'^artists/(?P<pk>[\d]+)', ArtistDetailView.as_view()),
url(r'^api/', include(router.urls)),
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework'))
)
if settings.DEBUG:
urlpatterns += patterns('',
url(r'^media/(?P<path>.*)$', 'django.views.static.serve', {'document_root': settings.MEDIA_ROOT, }),
)
|
mit
| -1,419,620,475,134,443,000
| 37.756757
| 108
| 0.684798
| false
| 3.342657
| false
| true
| false
|
thoas/django-sequere
|
setup.py
|
1
|
1329
|
# -*- coding: utf-8 -*-
import os
from setuptools import setup, find_packages
version = __import__('sequere').__version__
root = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(root, 'README.rst')) as f:
README = f.read()
setup(
name='django-sequere',
version=version,
description='A Django application to implement a follow system and a timeline using multiple backends (db, redis, etc.)',
long_description=README,
author='Florent Messa',
author_email='florent.messa@gmail.com',
url='http://github.com/thoas/django-sequere',
zip_safe=False,
include_package_data=True,
keywords='django libraries settings redis follow timeline'.split(),
platforms='any',
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Topic :: Utilities',
],
extras_require={
'redis': ['redis'],
'nydus': ['nydus'],
},
install_requires=['six'],
tests_require=['coverage', 'exam', 'celery', 'nydus'],
packages=find_packages(exclude=['tests']),
)
|
mit
| 5,351,943,238,205,519,000
| 31.414634
| 125
| 0.623777
| false
| 3.818966
| false
| true
| false
|
our-city-app/oca-backend
|
src/solutions/common/integrations/cirklo/api.py
|
1
|
13752
|
# -*- coding: utf-8 -*-
# Copyright 2020 Green Valley Belgium NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.7@@
import cloudstorage
import logging
from babel.dates import format_datetime
from datetime import datetime
from google.appengine.ext import ndb, deferred, db
from typing import List
from xlwt import Worksheet, Workbook, XFStyle
from mcfw.cache import invalidate_cache
from mcfw.consts import REST_TYPE_TO
from mcfw.exceptions import HttpBadRequestException, HttpForbiddenException, HttpNotFoundException
from mcfw.restapi import rest
from mcfw.rpc import returns, arguments
from rogerthat.bizz.gcs import get_serving_url
from rogerthat.bizz.service import re_index_map_only
from rogerthat.consts import FAST_QUEUE, HIGH_LOAD_WORKER_QUEUE
from rogerthat.models import ServiceIdentity
from rogerthat.models.settings import ServiceInfo
from rogerthat.rpc import users
from rogerthat.rpc.users import get_current_session
from rogerthat.utils import parse_date
from rogerthat.utils.cloud_tasks import schedule_tasks, create_task
from rogerthat.utils.service import create_service_identity_user
from shop.models import Customer
from solutions import translate
from solutions.common.bizz import SolutionModule, broadcast_updates_pending
from solutions.common.bizz.campaignmonitor import send_smart_email_without_check
from solutions.common.consts import OCA_FILES_BUCKET
from solutions.common.dal import get_solution_settings
from solutions.common.integrations.cirklo.cirklo import get_city_id_by_service_email, whitelist_merchant, \
list_whitelisted_merchants, list_cirklo_cities
from solutions.common.integrations.cirklo.models import CirkloCity, CirkloMerchant, SignupLanguageProperty, \
SignupMails, CirkloAppInfo
from solutions.common.integrations.cirklo.to import CirkloCityTO, CirkloVoucherListTO, CirkloVoucherServiceTO, \
WhitelistVoucherServiceTO
from solutions.common.restapi.services import _check_is_city
def _check_permission(city_sln_settings):
if SolutionModule.CIRKLO_VOUCHERS not in city_sln_settings.modules:
raise HttpForbiddenException()
if len(city_sln_settings.modules) != 1:
_check_is_city(city_sln_settings.service_user)
@rest('/common/vouchers/cities', 'get', silent_result=True)
@returns([dict])
@arguments(staging=bool)
def api_list_cirklo_cities(staging=False):
return list_cirklo_cities(staging)
@rest('/common/vouchers/services', 'get', silent_result=True)
@returns(CirkloVoucherListTO)
@arguments()
def get_cirklo_vouchers_services():
city_service_user = users.get_current_user()
city_sln_settings = get_solution_settings(city_service_user)
_check_permission(city_sln_settings)
to = CirkloVoucherListTO()
to.total = 0
to.results = []
to.cursor = None
to.more = False
cirklo_city = CirkloCity.get_by_service_email(city_service_user.email())
if not cirklo_city:
return to
cirklo_merchants = list_whitelisted_merchants(cirklo_city.city_id)
cirklo_dict = {}
cirklo_emails = []
for merchant in cirklo_merchants:
if merchant['email'] in cirklo_emails:
logging.error('Duplicate found %s', merchant['email'])
continue
cirklo_emails.append(merchant['email'])
cirklo_dict[merchant['email']] = merchant
qry = CirkloMerchant.list_by_city_id(cirklo_city.city_id) # type: List[CirkloMerchant]
osa_merchants = []
merchants_to_put = []
for merchant in qry:
if merchant.service_user_email:
osa_merchants.append(merchant)
else:
cirklo_merchant = cirklo_dict.get(merchant.data['company']['email'])
changed = merchant.populate_from_cirklo(cirklo_merchant)
if changed:
merchants_to_put.append(merchant)
if cirklo_merchant:
if merchant.data['company']['email'] in cirklo_emails:
cirklo_emails.remove(merchant.data['company']['email'])
whitelist_date = cirklo_merchant['createdAt'] if cirklo_merchant else None
to.results.append(
CirkloVoucherServiceTO.from_model(merchant, whitelist_date, merchant.registered, u'Cirklo signup'))
if osa_merchants:
customer_to_get = [Customer.create_key(merchant.customer_id) for merchant in osa_merchants]
customers_dict = {customer.id: customer for customer in db.get(customer_to_get)}
info_keys = [ServiceInfo.create_key(users.User(merchant.service_user_email), ServiceIdentity.DEFAULT)
for merchant in osa_merchants]
models = ndb.get_multi(info_keys)
for service_info, merchant in zip(models, osa_merchants):
customer = customers_dict[merchant.customer_id]
if not customer.service_user:
merchant.key.delete()
continue
cirklo_merchant = cirklo_dict.get(customer.user_email)
changed = merchant.populate_from_cirklo(cirklo_merchant)
if changed:
merchants_to_put.append(merchant)
if cirklo_merchant:
if customer.user_email in cirklo_emails:
cirklo_emails.remove(customer.user_email)
whitelist_date = cirklo_merchant['createdAt'] if cirklo_merchant else None
service_to = CirkloVoucherServiceTO.from_model(merchant, whitelist_date, merchant.registered, u'OSA signup')
service_to.populate_from_info(service_info, customer)
to.results.append(service_to)
if merchants_to_put:
logging.debug('Updating merchants: %s', merchants_to_put)
ndb.put_multi(merchants_to_put)
tasks = [create_task(re_index_map_only, create_service_identity_user(users.User(merchant.service_user_email)))
for merchant in merchants_to_put if merchant.service_user_email]
schedule_tasks(tasks, HIGH_LOAD_WORKER_QUEUE)
for email in cirklo_emails:
cirklo_merchant = cirklo_dict[email]
to.results.append(CirkloVoucherServiceTO.from_cirklo_info(cirklo_merchant))
return to
@rest('/common/vouchers/services/whitelist', 'put', type=REST_TYPE_TO)
@returns(CirkloVoucherServiceTO)
@arguments(data=WhitelistVoucherServiceTO)
def whitelist_voucher_service(data):
city_service_user = users.get_current_user()
city_sln_settings = get_solution_settings(city_service_user)
_check_permission(city_sln_settings)
cirklo_city = CirkloCity.get_by_service_email(city_service_user.email()) # type: CirkloCity
if not cirklo_city:
raise HttpNotFoundException('No cirklo settings found.')
is_cirklo_only_merchant = '@' not in data.id
if is_cirklo_only_merchant:
merchant = CirkloMerchant.create_key(long(data.id)).get() # type: CirkloMerchant
language = merchant.get_language()
else:
merchant = CirkloMerchant.create_key(data.id).get()
language = get_solution_settings(users.User(merchant.service_user_email)).main_language
if data.accepted:
email_id = cirklo_city.get_signup_accepted_mail(language)
if not email_id:
raise HttpBadRequestException(
'The "Signup accepted" email for the language %s is not configured yet' % language)
whitelist_merchant(cirklo_city.city_id, data.email)
else:
email_id = cirklo_city.get_signup_denied_mail(language)
if not email_id:
raise HttpBadRequestException(
'The "Signup denied" email for the language %s is not configured yet' % language)
deferred.defer(send_smart_email_without_check, email_id, [data.email], _countdown=1,
_queue=FAST_QUEUE)
whitelist_date = datetime.now().isoformat() + 'Z' if data.accepted else None
if not is_cirklo_only_merchant:
if data.accepted:
merchant.whitelisted = True
else:
merchant.denied = True
merchant.put()
service_info = ServiceInfo.create_key(users.User(merchant.service_user_email), ServiceIdentity.DEFAULT).get()
customer = Customer.get_by_id(merchant.customer_id) # type: Customer
if data.accepted:
service_identity_user = create_service_identity_user(customer.service_user)
deferred.defer(re_index_map_only, service_identity_user)
to = CirkloVoucherServiceTO.from_model(merchant, whitelist_date, False, u'OSA signup')
to.populate_from_info(service_info, customer)
return to
else:
if data.accepted:
merchant.whitelisted = True
else:
merchant.denied = True
merchant.put()
return CirkloVoucherServiceTO.from_model(merchant, whitelist_date, False, u'Cirklo signup')
@rest('/common/vouchers/cirklo', 'get')
@returns(CirkloCityTO)
@arguments()
def api_vouchers_get_cirklo_settings():
service_user = users.get_current_user()
city = CirkloCity.get_by_service_email(service_user.email())
return CirkloCityTO.from_model(city)
@rest('/common/vouchers/cirklo', 'put')
@returns(CirkloCityTO)
@arguments(data=CirkloCityTO)
def api_vouchers_save_cirklo_settings(data):
service_user = users.get_current_user()
if not get_current_session().shop:
lang = get_solution_settings(service_user).main_language
raise HttpForbiddenException(translate(lang, 'no_permission'))
other_city = CirkloCity.get_by_service_email(service_user.email()) # type: CirkloCity
if not data.city_id:
if other_city:
other_city.key.delete()
return CirkloCityTO.from_model(None)
key = CirkloCity.create_key(data.city_id)
city = key.get()
if not city:
city = CirkloCity(key=key, service_user_email=service_user.email())
elif city.service_user_email != service_user.email():
raise HttpBadRequestException('City id %s is already in use by another service' % data.city_id)
if other_city and other_city.key != key:
other_city.key.delete()
invalidate_cache(get_city_id_by_service_email, service_user.email())
city.logo_url = data.logo_url
city.signup_enabled = data.signup_enabled
city.signup_logo_url = data.signup_logo_url
city.signup_names = None
city.signup_mail = SignupMails.from_to(data.signup_mail)
if data.signup_name_nl and data.signup_name_fr:
city.signup_names = SignupLanguageProperty(nl=data.signup_name_nl,
fr=data.signup_name_fr)
elif data.signup_name_nl:
city.signup_names = SignupLanguageProperty(nl=data.signup_name_nl,
fr=data.signup_name_nl)
elif data.signup_name_fr:
city.signup_names = SignupLanguageProperty(nl=data.signup_name_fr,
fr=data.signup_name_fr)
og_info = city.app_info and city.app_info.to_dict()
info = CirkloAppInfo(enabled=data.app_info.enabled,
title=data.app_info.title,
buttons=data.app_info.buttons)
sln_settings = get_solution_settings(service_user)
if info.to_dict() != og_info and not sln_settings.ciklo_vouchers_only():
city.app_info = info
sln_settings.updates_pending = True
sln_settings.put()
broadcast_updates_pending(sln_settings)
city.put()
return CirkloCityTO.from_model(city)
@rest('/common/vouchers/cirklo/export', 'post')
@returns(dict)
@arguments()
def api_export_cirklo_services():
service_user = users.get_current_user()
city_sln_settings = get_solution_settings(service_user)
_check_permission(city_sln_settings)
all_services = get_cirklo_vouchers_services()
if all_services.cursor:
raise NotImplementedError()
book = Workbook(encoding='utf-8')
sheet = book.add_sheet('Cirklo') # type: Worksheet
language = city_sln_settings.main_language
sheet.write(0, 0, translate(language, 'reservation-name'))
sheet.write(0, 1, translate(language, 'Email'))
sheet.write(0, 2, translate(language, 'address'))
sheet.write(0, 3, translate(language, 'Phone number'))
sheet.write(0, 4, translate(language, 'created'))
sheet.write(0, 5, translate(language, 'merchant_registered'))
date_format = XFStyle()
date_format.num_format_str = 'dd/mm/yyyy'
row = 0
for service in all_services.results:
row += 1
sheet.write(row, 0, service.name)
sheet.write(row, 1, service.email)
sheet.write(row, 2, service.address)
sheet.write(row, 3, service.phone_number)
sheet.write(row, 4, parse_date(service.creation_date), date_format)
sheet.write(row, 5, translate(language, 'Yes') if service.merchant_registered else translate(language, 'No'))
date = format_datetime(datetime.now(), format='medium', locale='en_GB')
gcs_path = '/%s/tmp/cirklo/export-cirklo-%s.xls' % (OCA_FILES_BUCKET, date.replace(' ', '-'))
content_type = 'application/vnd.ms-excel'
with cloudstorage.open(gcs_path, 'w', content_type=content_type) as gcs_file:
book.save(gcs_file)
deferred.defer(cloudstorage.delete, gcs_path, _countdown=86400)
return {
'url': get_serving_url(gcs_path),
}
|
apache-2.0
| -1,141,183,746,215,933,700
| 42.109718
| 120
| 0.686009
| false
| 3.368937
| false
| false
| false
|
kirankaranth1/ShareIt
|
myproject/myapp/views.py
|
1
|
2533
|
# -*- coding: utf-8 -*-
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse
from myproject.myapp.forms import EmailForm
from myproject.myapp.models import Document
from myproject.myapp.forms import DocumentForm
from django.shortcuts import render, get_object_or_404, redirect
from django.core.mail import send_mail
def list(request):
# Handle file upload
if request.method == 'POST':
form = DocumentForm(request.POST, request.FILES)
if form.is_valid():
newdoc = Document(docfile = request.FILES['docfile'])
newdoc.save()
url=newdoc.docfile.url
request.session['file_url'] = url
# Redirect to the document list after POST
return HttpResponseRedirect(reverse('myproject.myapp.views.email_url'))
else:
form = DocumentForm() # A empty, unbound form
# Load documents for the list page
documents = Document.objects.all()
# Render list page with the documents and the form
return render_to_response(
'myapp/list.html',
{'documents': documents, 'form': form},
context_instance=RequestContext(request)
)
def send_url(email,name,url):
#Need to put mail function here
#send_mail('Subject here', 'Here is the message.', 'messanger@localhost.com',['any@email.com'], fail_silently=False)
print("Sharing %s with %s as %s" %(url,email,name))
def email_url(request):
file_url = request.session.get('file_url')
hostname = request.get_host()
file_url = str(hostname) + str(file_url)
eform = EmailForm(request.POST or None)
if eform.is_valid():
email = eform.cleaned_data["email"]
name = eform.cleaned_data["name"]
send_url(email,name,file_url)
request.session['recipentEmail'] = email
request.session['name'] = name
request.session['file_url'] = file_url
return HttpResponseRedirect(reverse('myproject.myapp.views.thank_you'))
context = { "eform": eform, "file_url":file_url,}
return render(request,"myapp/email_share.html",context)
def thank_you(request):
recipentEmail = request.session.get('recipentEmail')
recipentName = request.session.get('name')
file_url = request.session.get('file_url')
context = { "recipentName": recipentName,"recipentEmail": recipentEmail, "file_url":file_url}
return render(request,"myapp/thank_you.html",context)
|
gpl-2.0
| -4,222,012,508,288,221,000
| 37.393939
| 121
| 0.680616
| false
| 3.708638
| false
| false
| false
|
santisiri/popego
|
envs/ALPHA-POPEGO/lib/python2.5/site-packages/SQLAlchemy-0.4.5-py2.5.egg/sqlalchemy/util.py
|
1
|
35617
|
# util.py
# Copyright (C) 2005, 2006, 2007, 2008 Michael Bayer mike_mp@zzzcomputing.com
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import inspect, itertools, new, sets, sys, warnings, weakref
import __builtin__
types = __import__('types')
from sqlalchemy import exceptions
try:
import thread, threading
except ImportError:
import dummy_thread as thread
import dummy_threading as threading
try:
Set = set
set_types = set, sets.Set
except NameError:
set_types = sets.Set,
# layer some of __builtin__.set's binop behavior onto sets.Set
class Set(sets.Set):
def _binary_sanity_check(self, other):
pass
def issubset(self, iterable):
other = type(self)(iterable)
return sets.Set.issubset(self, other)
def __le__(self, other):
sets.Set._binary_sanity_check(self, other)
return sets.Set.__le__(self, other)
def issuperset(self, iterable):
other = type(self)(iterable)
return sets.Set.issuperset(self, other)
def __ge__(self, other):
sets.Set._binary_sanity_check(self, other)
return sets.Set.__ge__(self, other)
# lt and gt still require a BaseSet
def __lt__(self, other):
sets.Set._binary_sanity_check(self, other)
return sets.Set.__lt__(self, other)
def __gt__(self, other):
sets.Set._binary_sanity_check(self, other)
return sets.Set.__gt__(self, other)
def __ior__(self, other):
if not isinstance(other, sets.BaseSet):
return NotImplemented
return sets.Set.__ior__(self, other)
def __iand__(self, other):
if not isinstance(other, sets.BaseSet):
return NotImplemented
return sets.Set.__iand__(self, other)
def __ixor__(self, other):
if not isinstance(other, sets.BaseSet):
return NotImplemented
return sets.Set.__ixor__(self, other)
def __isub__(self, other):
if not isinstance(other, sets.BaseSet):
return NotImplemented
return sets.Set.__isub__(self, other)
try:
import cPickle as pickle
except ImportError:
import pickle
try:
reversed = __builtin__.reversed
except AttributeError:
def reversed(seq):
i = len(seq) -1
while i >= 0:
yield seq[i]
i -= 1
raise StopIteration()
try:
# Try the standard decimal for > 2.3 or the compatibility module
# for 2.3, if installed.
from decimal import Decimal
decimal_type = Decimal
except ImportError:
def Decimal(arg):
if Decimal.warn:
warn("True Decimal types not available on this Python, "
"falling back to floats.")
Decimal.warn = False
return float(arg)
Decimal.warn = True
decimal_type = float
try:
from operator import attrgetter
except:
def attrgetter(attribute):
return lambda value: getattr(value, attribute)
if sys.version_info >= (2, 5):
class PopulateDict(dict):
"""a dict which populates missing values via a creation function.
note the creation function takes a key, unlike collections.defaultdict.
"""
def __init__(self, creator):
self.creator = creator
def __missing__(self, key):
self[key] = val = self.creator(key)
return val
else:
class PopulateDict(dict):
"""a dict which populates missing values via a creation function."""
def __init__(self, creator):
self.creator = creator
def __getitem__(self, key):
try:
return dict.__getitem__(self, key)
except KeyError:
self[key] = value = self.creator(key)
return value
try:
from collections import defaultdict
except ImportError:
class defaultdict(dict):
def __init__(self, default_factory=None, *a, **kw):
if (default_factory is not None and
not hasattr(default_factory, '__call__')):
raise TypeError('first argument must be callable')
dict.__init__(self, *a, **kw)
self.default_factory = default_factory
def __getitem__(self, key):
try:
return dict.__getitem__(self, key)
except KeyError:
return self.__missing__(key)
def __missing__(self, key):
if self.default_factory is None:
raise KeyError(key)
self[key] = value = self.default_factory()
return value
def __reduce__(self):
if self.default_factory is None:
args = tuple()
else:
args = self.default_factory,
return type(self), args, None, None, self.iteritems()
def copy(self):
return self.__copy__()
def __copy__(self):
return type(self)(self.default_factory, self)
def __deepcopy__(self, memo):
import copy
return type(self)(self.default_factory,
copy.deepcopy(self.items()))
def __repr__(self):
return 'defaultdict(%s, %s)' % (self.default_factory,
dict.__repr__(self))
try:
from collections import deque
except ImportError:
class deque(list):
def appendleft(self, x):
self.insert(0, x)
def extendleft(self, iterable):
self[0:0] = list(iterable)
def popleft(self):
return self.pop(0)
def rotate(self, n):
for i in xrange(n):
self.appendleft(self.pop())
def to_list(x, default=None):
if x is None:
return default
if not isinstance(x, (list, tuple)):
return [x]
else:
return x
def array_as_starargs_decorator(func):
"""Interpret a single positional array argument as
*args for the decorated method.
"""
def starargs_as_list(self, *args, **kwargs):
if len(args) == 1:
return func(self, *to_list(args[0], []), **kwargs)
else:
return func(self, *args, **kwargs)
return starargs_as_list
def to_set(x):
if x is None:
return Set()
if not isinstance(x, Set):
return Set(to_list(x))
else:
return x
def to_ascii(x):
"""Convert Unicode or a string with unknown encoding into ASCII."""
if isinstance(x, str):
return x.encode('string_escape')
elif isinstance(x, unicode):
return x.encode('unicode_escape')
else:
raise TypeError
def flatten_iterator(x):
"""Given an iterator of which further sub-elements may also be
iterators, flatten the sub-elements into a single iterator.
"""
for elem in x:
if hasattr(elem, '__iter__'):
for y in flatten_iterator(elem):
yield y
else:
yield elem
class ArgSingleton(type):
instances = weakref.WeakValueDictionary()
def dispose(cls):
for key in list(ArgSingleton.instances):
if key[0] is cls:
del ArgSingleton.instances[key]
dispose = staticmethod(dispose)
def __call__(self, *args):
hashkey = (self, args)
try:
return ArgSingleton.instances[hashkey]
except KeyError:
instance = type.__call__(self, *args)
ArgSingleton.instances[hashkey] = instance
return instance
def get_cls_kwargs(cls):
"""Return the full set of inherited kwargs for the given `cls`.
Probes a class's __init__ method, collecting all named arguments. If the
__init__ defines a **kwargs catch-all, then the constructor is presumed to
pass along unrecognized keywords to it's base classes, and the collection
process is repeated recursively on each of the bases.
"""
for c in cls.__mro__:
if '__init__' in c.__dict__:
stack = Set([c])
break
else:
return []
args = Set()
while stack:
class_ = stack.pop()
ctr = class_.__dict__.get('__init__', False)
if not ctr or not isinstance(ctr, types.FunctionType):
continue
names, _, has_kw, _ = inspect.getargspec(ctr)
args.update(names)
if has_kw:
stack.update(class_.__bases__)
args.discard('self')
return list(args)
def get_func_kwargs(func):
"""Return the full set of legal kwargs for the given `func`."""
return inspect.getargspec(func)[0]
def unbound_method_to_callable(func_or_cls):
"""Adjust the incoming callable such that a 'self' argument is not required."""
if isinstance(func_or_cls, types.MethodType) and not func_or_cls.im_self:
return func_or_cls.im_func
else:
return func_or_cls
# from paste.deploy.converters
def asbool(obj):
if isinstance(obj, (str, unicode)):
obj = obj.strip().lower()
if obj in ['true', 'yes', 'on', 'y', 't', '1']:
return True
elif obj in ['false', 'no', 'off', 'n', 'f', '0']:
return False
else:
raise ValueError("String is not true/false: %r" % obj)
return bool(obj)
def coerce_kw_type(kw, key, type_, flexi_bool=True):
"""If 'key' is present in dict 'kw', coerce its value to type 'type_' if
necessary. If 'flexi_bool' is True, the string '0' is considered false
when coercing to boolean.
"""
if key in kw and type(kw[key]) is not type_ and kw[key] is not None:
if type_ is bool and flexi_bool:
kw[key] = asbool(kw[key])
else:
kw[key] = type_(kw[key])
def duck_type_collection(specimen, default=None):
"""Given an instance or class, guess if it is or is acting as one of
the basic collection types: list, set and dict. If the __emulates__
property is present, return that preferentially.
"""
if hasattr(specimen, '__emulates__'):
# canonicalize set vs sets.Set to a standard: util.Set
if (specimen.__emulates__ is not None and
issubclass(specimen.__emulates__, set_types)):
return Set
else:
return specimen.__emulates__
isa = isinstance(specimen, type) and issubclass or isinstance
if isa(specimen, list): return list
if isa(specimen, set_types): return Set
if isa(specimen, dict): return dict
if hasattr(specimen, 'append'):
return list
elif hasattr(specimen, 'add'):
return Set
elif hasattr(specimen, 'set'):
return dict
else:
return default
def dictlike_iteritems(dictlike):
"""Return a (key, value) iterator for almost any dict-like object."""
if hasattr(dictlike, 'iteritems'):
return dictlike.iteritems()
elif hasattr(dictlike, 'items'):
return iter(dictlike.items())
getter = getattr(dictlike, '__getitem__', getattr(dictlike, 'get', None))
if getter is None:
raise TypeError(
"Object '%r' is not dict-like" % dictlike)
if hasattr(dictlike, 'iterkeys'):
def iterator():
for key in dictlike.iterkeys():
yield key, getter(key)
return iterator()
elif hasattr(dictlike, 'keys'):
return iter([(key, getter(key)) for key in dictlike.keys()])
else:
raise TypeError(
"Object '%r' is not dict-like" % dictlike)
def assert_arg_type(arg, argtype, name):
if isinstance(arg, argtype):
return arg
else:
if isinstance(argtype, tuple):
raise exceptions.ArgumentError("Argument '%s' is expected to be one of type %s, got '%s'" % (name, ' or '.join(["'%s'" % str(a) for a in argtype]), str(type(arg))))
else:
raise exceptions.ArgumentError("Argument '%s' is expected to be of type '%s', got '%s'" % (name, str(argtype), str(type(arg))))
def warn_exception(func, *args, **kwargs):
"""executes the given function, catches all exceptions and converts to a warning."""
try:
return func(*args, **kwargs)
except:
warn("%s('%s') ignored" % sys.exc_info()[0:2])
def monkeypatch_proxied_specials(into_cls, from_cls, skip=None, only=None,
name='self.proxy', from_instance=None):
"""Automates delegation of __specials__ for a proxying type."""
if only:
dunders = only
else:
if skip is None:
skip = ('__slots__', '__del__', '__getattribute__',
'__metaclass__', '__getstate__', '__setstate__')
dunders = [m for m in dir(from_cls)
if (m.startswith('__') and m.endswith('__') and
not hasattr(into_cls, m) and m not in skip)]
for method in dunders:
try:
spec = inspect.getargspec(getattr(from_cls, method))
fn_args = inspect.formatargspec(spec[0])
d_args = inspect.formatargspec(spec[0][1:])
except TypeError:
fn_args = '(self, *args, **kw)'
d_args = '(*args, **kw)'
py = ("def %(method)s%(fn_args)s: "
"return %(name)s.%(method)s%(d_args)s" % locals())
env = from_instance is not None and {name: from_instance} or {}
exec py in env
setattr(into_cls, method, env[method])
class SimpleProperty(object):
"""A *default* property accessor."""
def __init__(self, key):
self.key = key
def __set__(self, obj, value):
setattr(obj, self.key, value)
def __delete__(self, obj):
delattr(obj, self.key)
def __get__(self, obj, owner):
if obj is None:
return self
else:
return getattr(obj, self.key)
class NotImplProperty(object):
"""a property that raises ``NotImplementedError``."""
def __init__(self, doc):
self.__doc__ = doc
def __set__(self, obj, value):
raise NotImplementedError()
def __delete__(self, obj):
raise NotImplementedError()
def __get__(self, obj, owner):
if obj is None:
return self
else:
raise NotImplementedError()
class OrderedProperties(object):
"""An object that maintains the order in which attributes are set upon it.
Also provides an iterator and a very basic getitem/setitem
interface to those attributes.
(Not really a dict, since it iterates over values, not keys. Not really
a list, either, since each value must have a key associated; hence there is
no append or extend.)
"""
def __init__(self):
self.__dict__['_data'] = OrderedDict()
def __len__(self):
return len(self._data)
def __iter__(self):
return self._data.itervalues()
def __add__(self, other):
return list(self) + list(other)
def __setitem__(self, key, object):
self._data[key] = object
def __getitem__(self, key):
return self._data[key]
def __delitem__(self, key):
del self._data[key]
def __setattr__(self, key, object):
self._data[key] = object
def __getstate__(self):
return {'_data': self.__dict__['_data']}
def __setstate__(self, state):
self.__dict__['_data'] = state['_data']
def __getattr__(self, key):
try:
return self._data[key]
except KeyError:
raise AttributeError(key)
def __contains__(self, key):
return key in self._data
def get(self, key, default=None):
if key in self:
return self[key]
else:
return default
def keys(self):
return self._data.keys()
def has_key(self, key):
return self._data.has_key(key)
def clear(self):
self._data.clear()
class OrderedDict(dict):
"""A Dictionary that returns keys/values/items in the order they were added."""
def __init__(self, ____sequence=None, **kwargs):
self._list = []
if ____sequence is None:
if kwargs:
self.update(**kwargs)
else:
self.update(____sequence, **kwargs)
def clear(self):
self._list = []
dict.clear(self)
def update(self, ____sequence=None, **kwargs):
if ____sequence is not None:
if hasattr(____sequence, 'keys'):
for key in ____sequence.keys():
self.__setitem__(key, ____sequence[key])
else:
for key, value in ____sequence:
self[key] = value
if kwargs:
self.update(kwargs)
def setdefault(self, key, value):
if key not in self:
self.__setitem__(key, value)
return value
else:
return self.__getitem__(key)
def __iter__(self):
return iter(self._list)
def values(self):
return [self[key] for key in self._list]
def itervalues(self):
return iter(self.values())
def keys(self):
return list(self._list)
def iterkeys(self):
return iter(self.keys())
def items(self):
return [(key, self[key]) for key in self.keys()]
def iteritems(self):
return iter(self.items())
def __setitem__(self, key, object):
if key not in self:
self._list.append(key)
dict.__setitem__(self, key, object)
def __delitem__(self, key):
dict.__delitem__(self, key)
self._list.remove(key)
def pop(self, key):
value = dict.pop(self, key)
self._list.remove(key)
return value
def popitem(self):
item = dict.popitem(self)
self._list.remove(item[0])
return item
try:
from threading import local as ThreadLocal
except ImportError:
try:
from dummy_threading import local as ThreadLocal
except ImportError:
class ThreadLocal(object):
"""An object in which attribute access occurs only within the context of the current thread."""
def __init__(self):
self.__dict__['_tdict'] = {}
def __delattr__(self, key):
try:
del self._tdict[(thread.get_ident(), key)]
except KeyError:
raise AttributeError(key)
def __getattr__(self, key):
try:
return self._tdict[(thread.get_ident(), key)]
except KeyError:
raise AttributeError(key)
def __setattr__(self, key, value):
self._tdict[(thread.get_ident(), key)] = value
class OrderedSet(Set):
def __init__(self, d=None):
Set.__init__(self)
self._list = []
if d is not None:
self.update(d)
def add(self, key):
if key not in self:
self._list.append(key)
Set.add(self, key)
def remove(self, element):
Set.remove(self, element)
self._list.remove(element)
def discard(self, element):
try:
Set.remove(self, element)
except KeyError:
pass
else:
self._list.remove(element)
def clear(self):
Set.clear(self)
self._list = []
def __getitem__(self, key):
return self._list[key]
def __iter__(self):
return iter(self._list)
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, self._list)
__str__ = __repr__
def update(self, iterable):
add = self.add
for i in iterable:
add(i)
return self
__ior__ = update
def union(self, other):
result = self.__class__(self)
result.update(other)
return result
__or__ = union
def intersection(self, other):
other = Set(other)
return self.__class__([a for a in self if a in other])
__and__ = intersection
def symmetric_difference(self, other):
other = Set(other)
result = self.__class__([a for a in self if a not in other])
result.update([a for a in other if a not in self])
return result
__xor__ = symmetric_difference
def difference(self, other):
other = Set(other)
return self.__class__([a for a in self if a not in other])
__sub__ = difference
def intersection_update(self, other):
other = Set(other)
Set.intersection_update(self, other)
self._list = [ a for a in self._list if a in other]
return self
__iand__ = intersection_update
def symmetric_difference_update(self, other):
Set.symmetric_difference_update(self, other)
self._list = [ a for a in self._list if a in self]
self._list += [ a for a in other._list if a in self]
return self
__ixor__ = symmetric_difference_update
def difference_update(self, other):
Set.difference_update(self, other)
self._list = [ a for a in self._list if a in self]
return self
__isub__ = difference_update
if hasattr(Set, '__getstate__'):
def __getstate__(self):
base = Set.__getstate__(self)
return base, self._list
def __setstate__(self, state):
Set.__setstate__(self, state[0])
self._list = state[1]
class IdentitySet(object):
"""A set that considers only object id() for uniqueness.
This strategy has edge cases for builtin types- it's possible to have
two 'foo' strings in one of these sets, for example. Use sparingly.
"""
_working_set = Set
def __init__(self, iterable=None):
self._members = _IterableUpdatableDict()
if iterable:
for o in iterable:
self.add(o)
def add(self, value):
self._members[id(value)] = value
def __contains__(self, value):
return id(value) in self._members
def remove(self, value):
del self._members[id(value)]
def discard(self, value):
try:
self.remove(value)
except KeyError:
pass
def pop(self):
try:
pair = self._members.popitem()
return pair[1]
except KeyError:
raise KeyError('pop from an empty set')
def clear(self):
self._members.clear()
def __cmp__(self, other):
raise TypeError('cannot compare sets using cmp()')
def __eq__(self, other):
if isinstance(other, IdentitySet):
return self._members == other._members
else:
return False
def __ne__(self, other):
if isinstance(other, IdentitySet):
return self._members != other._members
else:
return True
def issubset(self, iterable):
other = type(self)(iterable)
if len(self) > len(other):
return False
for m in itertools.ifilterfalse(other._members.has_key,
self._members.iterkeys()):
return False
return True
def __le__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
return self.issubset(other)
def __lt__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
return len(self) < len(other) and self.issubset(other)
def issuperset(self, iterable):
other = type(self)(iterable)
if len(self) < len(other):
return False
for m in itertools.ifilterfalse(self._members.has_key,
other._members.iterkeys()):
return False
return True
def __ge__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
return self.issuperset(other)
def __gt__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
return len(self) > len(other) and self.issuperset(other)
def union(self, iterable):
result = type(self)()
# testlib.pragma exempt:__hash__
result._members.update(
self._working_set(self._members.iteritems()).union(_iter_id(iterable)))
return result
def __or__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
return self.union(other)
def update(self, iterable):
self._members = self.union(iterable)._members
def __ior__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
self.update(other)
return self
def difference(self, iterable):
result = type(self)()
# testlib.pragma exempt:__hash__
result._members.update(
self._working_set(self._members.iteritems()).difference(_iter_id(iterable)))
return result
def __sub__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
return self.difference(other)
def difference_update(self, iterable):
self._members = self.difference(iterable)._members
def __isub__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
self.difference_update(other)
return self
def intersection(self, iterable):
result = type(self)()
# testlib.pragma exempt:__hash__
result._members.update(
self._working_set(self._members.iteritems()).intersection(_iter_id(iterable)))
return result
def __and__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
return self.intersection(other)
def intersection_update(self, iterable):
self._members = self.intersection(iterable)._members
def __iand__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
self.intersection_update(other)
return self
def symmetric_difference(self, iterable):
result = type(self)()
# testlib.pragma exempt:__hash__
result._members.update(
self._working_set(self._members.iteritems()).symmetric_difference(_iter_id(iterable)))
return result
def __xor__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
return self.symmetric_difference(other)
def symmetric_difference_update(self, iterable):
self._members = self.symmetric_difference(iterable)._members
def __ixor__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
self.symmetric_difference(other)
return self
def copy(self):
return type(self)(self._members.itervalues())
__copy__ = copy
def __len__(self):
return len(self._members)
def __iter__(self):
return self._members.itervalues()
def __hash__(self):
raise TypeError('set objects are unhashable')
def __repr__(self):
return '%s(%r)' % (type(self).__name__, self._members.values())
if sys.version_info >= (2, 4):
_IterableUpdatableDict = dict
else:
class _IterableUpdatableDict(dict):
"""A dict that can update(iterable) like Python 2.4+'s dict."""
def update(self, __iterable=None, **kw):
if __iterable is not None:
if not isinstance(__iterable, dict):
__iterable = dict(__iterable)
dict.update(self, __iterable)
if kw:
dict.update(self, **kw)
def _iter_id(iterable):
"""Generator: ((id(o), o) for o in iterable)."""
for item in iterable:
yield id(item), item
class OrderedIdentitySet(IdentitySet):
class _working_set(OrderedSet):
# a testing pragma: exempt the OIDS working set from the test suite's
# "never call the user's __hash__" assertions. this is a big hammer,
# but it's safe here: IDS operates on (id, instance) tuples in the
# working set.
__sa_hash_exempt__ = True
def __init__(self, iterable=None):
IdentitySet.__init__(self)
self._members = OrderedDict()
if iterable:
for o in iterable:
self.add(o)
class UniqueAppender(object):
"""Only adds items to a collection once.
Additional appends() of the same object are ignored. Membership is
determined by identity (``is a``) not equality (``==``).
"""
def __init__(self, data, via=None):
self.data = data
self._unique = IdentitySet()
if via:
self._data_appender = getattr(data, via)
elif hasattr(data, 'append'):
self._data_appender = data.append
elif hasattr(data, 'add'):
# TODO: we think its a set here. bypass unneeded uniquing logic ?
self._data_appender = data.add
def append(self, item):
if item not in self._unique:
self._data_appender(item)
self._unique.add(item)
def __iter__(self):
return iter(self.data)
class ScopedRegistry(object):
"""A Registry that can store one or multiple instances of a single
class on a per-thread scoped basis, or on a customized scope.
createfunc
a callable that returns a new object to be placed in the registry
scopefunc
a callable that will return a key to store/retrieve an object,
defaults to ``thread.get_ident`` for thread-local objects. Use
a value like ``lambda: True`` for application scope.
"""
def __init__(self, createfunc, scopefunc=None):
self.createfunc = createfunc
if scopefunc is None:
self.scopefunc = thread.get_ident
else:
self.scopefunc = scopefunc
self.registry = {}
def __call__(self):
key = self._get_key()
try:
return self.registry[key]
except KeyError:
return self.registry.setdefault(key, self.createfunc())
def has(self):
return self._get_key() in self.registry
def set(self, obj):
self.registry[self._get_key()] = obj
def clear(self):
try:
del self.registry[self._get_key()]
except KeyError:
pass
def _get_key(self):
return self.scopefunc()
class _symbol(object):
def __init__(self, name):
"""Construct a new named symbol."""
assert isinstance(name, str)
self.name = name
def __reduce__(self):
return symbol, (self.name,)
def __repr__(self):
return "<symbol '%s>" % self.name
_symbol.__name__ = 'symbol'
class symbol(object):
"""A constant symbol.
>>> symbol('foo') is symbol('foo')
True
>>> symbol('foo')
<symbol 'foo>
A slight refinement of the MAGICCOOKIE=object() pattern. The primary
advantage of symbol() is its repr(). They are also singletons.
Repeated calls of symbol('name') will all return the same instance.
"""
symbols = {}
_lock = threading.Lock()
def __new__(cls, name):
cls._lock.acquire()
try:
sym = cls.symbols.get(name)
if sym is None:
cls.symbols[name] = sym = _symbol(name)
return sym
finally:
symbol._lock.release()
def function_named(fn, name):
"""Return a function with a given __name__.
Will assign to __name__ and return the original function if possible on
the Python implementation, otherwise a new function will be constructed.
"""
try:
fn.__name__ = name
except TypeError:
fn = new.function(fn.func_code, fn.func_globals, name,
fn.func_defaults, fn.func_closure)
return fn
def conditional_cache_decorator(func):
"""apply conditional caching to the return value of a function."""
return cache_decorator(func, conditional=True)
def cache_decorator(func, conditional=False):
"""apply caching to the return value of a function."""
name = '_cached_' + func.__name__
def do_with_cache(self, *args, **kwargs):
if conditional:
cache = kwargs.pop('cache', False)
if not cache:
return func(self, *args, **kwargs)
try:
return getattr(self, name)
except AttributeError:
value = func(self, *args, **kwargs)
setattr(self, name, value)
return value
return do_with_cache
def reset_cached(instance, name):
try:
delattr(instance, '_cached_' + name)
except AttributeError:
pass
def warn(msg):
if isinstance(msg, basestring):
warnings.warn(msg, exceptions.SAWarning, stacklevel=3)
else:
warnings.warn(msg, stacklevel=3)
def warn_deprecated(msg):
warnings.warn(msg, exceptions.SADeprecationWarning, stacklevel=3)
def deprecated(message=None, add_deprecation_to_docstring=True):
"""Decorates a function and issues a deprecation warning on use.
message
If provided, issue message in the warning. A sensible default
is used if not provided.
add_deprecation_to_docstring
Default True. If False, the wrapped function's __doc__ is left
as-is. If True, the 'message' is prepended to the docs if
provided, or sensible default if message is omitted.
"""
if add_deprecation_to_docstring:
header = message is not None and message or 'Deprecated.'
else:
header = None
if message is None:
message = "Call to deprecated function %(func)s"
def decorate(fn):
return _decorate_with_warning(
fn, exceptions.SADeprecationWarning,
message % dict(func=fn.__name__), header)
return decorate
def pending_deprecation(version, message=None,
add_deprecation_to_docstring=True):
"""Decorates a function and issues a pending deprecation warning on use.
version
An approximate future version at which point the pending deprecation
will become deprecated. Not used in messaging.
message
If provided, issue message in the warning. A sensible default
is used if not provided.
add_deprecation_to_docstring
Default True. If False, the wrapped function's __doc__ is left
as-is. If True, the 'message' is prepended to the docs if
provided, or sensible default if message is omitted.
"""
if add_deprecation_to_docstring:
header = message is not None and message or 'Deprecated.'
else:
header = None
if message is None:
message = "Call to deprecated function %(func)s"
def decorate(fn):
return _decorate_with_warning(
fn, exceptions.SAPendingDeprecationWarning,
message % dict(func=fn.__name__), header)
return decorate
def _decorate_with_warning(func, wtype, message, docstring_header=None):
"""Wrap a function with a warnings.warn and augmented docstring."""
def func_with_warning(*args, **kwargs):
warnings.warn(wtype(message), stacklevel=2)
return func(*args, **kwargs)
doc = func.__doc__ is not None and func.__doc__ or ''
if docstring_header is not None:
doc = '\n'.join((docstring_header.rstrip(), doc))
func_with_warning.__doc__ = doc
func_with_warning.__dict__.update(func.__dict__)
return function_named(func_with_warning, func.__name__)
|
bsd-3-clause
| -2,789,202,377,800,578,600
| 29.05654
| 176
| 0.574613
| false
| 4.170609
| false
| false
| false
|
dj95/telegram-autokick
|
autokick.py
|
1
|
3167
|
#!/bin/env python3
#
# Telegram Auto Kick
#
# © 2015 Daniel Jankowski
import subprocess
import os
import re
import json
import sqlite3
import db
from threading import Thread,Event
DATABASE = '/home/neo/Projekte/Python/telegram-autokick/banned_usernames.db'
GROUP_NAME = 'Linux_statt_Windows'
class kicker(Thread):
def __init__(self):
super().__init__()
self.stop_event = Event()
self.__db = db.db_handler(DATABASE)
self.__db.create_table()
self.__db.close_database()
def add_username(self, username):
self.__db.add_user(username)
def remove_shit(self, data):
data = re.sub('\r', '', data)
data = re.sub('\x1b\[K', '', data)
data = re.sub('>\s*', '', data)
data = re.sub('All done\. Exit\n', '', data)
data = re.sub('halt\n*', '', data)
data = re.sub('\n*$', '', data)
return json.loads(data)
def run(self):
while not self.stop_event.is_set():
# get data from telegram-cli
cmd = ['telegram-cli','-b','-W','-D','--json','-e chat_info ' + GROUP_NAME]
s = subprocess.Popen(cmd, stdout=subprocess.PIPE)
data = s.communicate()[0].decode('utf-8')
data = self.remove_shit(data)
# processing data
members = data['members']
self.__db = db.db_handler(DATABASE)
banned_users = self.__db.get_banned_usernames()
self.__db.close_database()
banned_usernames = []
banned_ids = []
for user in banned_users:
banned_usernames.append(user[0])
banned_ids.append(user[1])
for member in members:
if 'username' in member:
if member['username'] in banned_usernames:
if member['id'] not in banned_ids:
self.__db = db.db_handler(DATABASE)
self.__db.add_user_id(member['id'], member['username'])
self.__db.close_database()
if 'print_name' in member:
if member['print_name'] in banned_usernames:
if member['id'] not in banned_ids:
self.__db = db.db_handler(DATABASE)
self.__db.add_user_id(member['id'], member['username'])
self.__db.close_database()
if member['id'] in banned_ids:
cmd = ['telegram-cli','-b','-W','-D','--json','-e chat_del_user ' + GROUP_NAME + ' ' + member['print_name']]
s = subprocess.Popen(cmd, stdout=subprocess.PIPE)
data = s.communicate()[0].decode('utf-8')
data = self.remove_shit(data)
self.stop_event.wait(1.0)
def stop(self):
self.stop_event.set()
def main():
print('Telegram Auto Kick')
bot = kicker()
bot.start()
inp = ''
while inp != "exit":
inp = input()
bot.stop()
bot.join()
return
if __name__ == '__main__':
main()
|
lgpl-3.0
| -4,750,373,982,801,555,000
| 30.66
| 128
| 0.49463
| false
| 3.819059
| false
| false
| false
|
joelmpiper/bill_taxonomy
|
src/report/make_roc_curve.py
|
1
|
1830
|
from sklearn.cross_validation import train_test_split
from sklearn import metrics
import matplotlib.pyplot as plt
import pickle
from src.utils.get_time_stamp import get_time_stamp
from sklearn.grid_search import GridSearchCV
def make_roc_curve(pipeline, X, y, train_frac, subject, cfg):
X_train, X_test, y_train, y_test = train_test_split(X, y,
train_size=train_frac,
random_state=1,
stratify=y)
grid_search = GridSearchCV(pipeline, {}, scoring=cfg['scoring'],
verbose=10)
grid_search.fit(X_train, y_train)
y_pred_class = grid_search.predict(X_test)
y_pred_prob = grid_search.predict_proba(X_test)[:, 1]
acc_score = metrics.accuracy_score(y_test, y_pred_class)
print(acc_score)
conf_mat = metrics.confusion_matrix(y_test, y_pred_class)
print(conf_mat)
fpr, tpr, thresholds = metrics.roc_curve(y_test, y_pred_prob)
roc_auc = metrics.auc(fpr, tpr)
# method I: plt
plt.title(subject + '\nReceiver Operating Characteristic')
plt.plot(fpr, tpr, 'b', label='AUC = %0.2f' % roc_auc)
plt.legend(loc='lower right')
plt.plot([0, 1], [0, 1], 'r--')
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
fig_dir = cfg['fig_dir']
plt.savefig(fig_dir + '/roc_curve_' + subject.lower() +
'_' + get_time_stamp() + '.png')
results_save = (grid_search, X_test, y_test, acc_score, conf_mat,
y_pred_class, y_pred_prob)
pickle.dump(results_save, open(fig_dir + '/split_data_' + subject.lower() +
'_' + get_time_stamp() + '.p', 'wb'))
|
mit
| -2,897,342,823,128,817,000
| 42.571429
| 79
| 0.562295
| false
| 3.388889
| true
| false
| false
|
sfcta/CountDracula
|
scripts/updateCountsWorkbooks.py
|
1
|
9336
|
"""
Created on Jul 25, 2011
@author: lmz
This script reads counts data from input Excel workbooks and inserts the info into the CountDracula dataabase.
"""
import getopt, logging, os, re, shutil, sys, time, traceback, xlrd, xlwt
libdir = os.path.realpath(os.path.join(os.path.split(__file__)[0], "..", "geodjango"))
sys.path.append(libdir)
os.environ['DJANGO_SETTINGS_MODULE'] = 'geodjango.settings'
from django.core.management import setup_environ
from geodjango import settings
from django.contrib.auth.models import User
import countdracula.models
from countdracula.parsers.CountsWorkbookParser import CountsWorkbookParser
USAGE = """
python updateCountsWorkbooks.py v1.0_toprocess_dir v1.0_outdated_dir v1.1_new_dir
"""
DATE_REGEX = re.compile(r"(\d\d\d\d)\.(\d{1,2})\.(\d{1,2})")
MAINLINE_NODES = re.compile(r"(\d{5,6}) (\d{5,6})")
CALIBRI_10PT = xlwt.easyxf('font: name Calibri, height 200;')
CALIBRI_10PT_RED = xlwt.easyxf('font: name Calibri, height 200, color-index red;')
CALIBRI_10PT_ORANGE_CENTER = xlwt.easyxf('font: name Calibri, height 200; pattern: pattern solid, fore_color 0x33; alignment: horz center;')
CALIBRI_10PT_LIME_CENTER = xlwt.easyxf('font: name Calibri, height 200; pattern: pattern solid, fore_color 0x32; alignment: horz center;')
def copysheet(rb, r_sheet, wb):
w_sheet = wb.add_sheet(r_sheet.name)
for rownum in range(r_sheet.nrows):
for colnum in range(r_sheet.ncols):
w_sheet.write(rownum, colnum, r_sheet.cell_value(rownum,colnum), CALIBRI_10PT_RED)
def isRowEmpty(r_sheet, r_rownum):
"""
Is the row empty? (aside for the first column)
"""
for colnum in range(1,r_sheet.ncols):
# logger.debug("cell_type=%d cell_value=[%s]" % (r_sheet.cell_type(r_rownum,colnum), str(r_sheet.cell_value(r_rownum,colnum))))
if r_sheet.cell_type(r_rownum,colnum) in [xlrd.XL_CELL_BLANK,xlrd.XL_CELL_EMPTY]:
continue
if r_sheet.cell_value(r_rownum,colnum) != "":
# found something!
return False
return True # didn't find anything
def isColumnZeros(r_sheet, colnum):
"""
Starts at row 2. Breaks on empty row.
"""
for r_rownum in range(2,r_sheet.nrows):
if r_sheet.cell_type(r_rownum,colnum) in [xlrd.XL_CELL_BLANK,xlrd.XL_CELL_EMPTY]: break
elif r_sheet.cell_type(r_rownum,colnum) in [xlrd.XL_CELL_NUMBER]:
if float(r_sheet.cell_value(r_rownum,colnum)) > 0.0: return False
else:
raise Exception("Didn't understand cell value at (%d,%d)" % (r_rownum, colnum))
return True
def updateWorkbook(logger, DIR_TOPROCESS, DIR_OLDV10, DIR_NEWV11, file, mainline_or_turns):
"""
Converts a v1.0 workbook to a v1.1 workbook. For anything unexpected, logs and error and returns.
For success only, the new workbook will be placed in *DIR_NEWV11* and the old one will be placed in *DIR_OLDV10*.
"""
assert(mainline_or_turns in ["MAINLINE","TURNS"])
rb = xlrd.open_workbook(os.path.join(DIR_TOPROCESS, file), formatting_info=True)
wb = xlwt.Workbook(encoding='utf-8')
# go through the sheets
for sheet_idx in range(rb.nsheets):
r_sheet = rb.sheet_by_index(sheet_idx)
sheet_name = r_sheet.name
logger.info(" Reading sheet [%s]" % sheet_name)\
# just copy the source sheet
if sheet_name == "source":
copysheet(rb, r_sheet, wb)
continue
match_obj = re.match(DATE_REGEX, sheet_name)
if match_obj.group(0) != sheet_name:
logger.error("Sheetname [%s] is not the standard date format! Skipping this workbook." % sheet_name)
return
w_sheet = wb.add_sheet(sheet_name)
# check what we're copying over
for colnum in range(r_sheet.ncols):
if mainline_or_turns == "MAINLINE":
# nodes ok
if r_sheet.cell_type(1,colnum) == xlrd.XL_CELL_TEXT and re.match(MAINLINE_NODES, str(r_sheet.cell_value(1,colnum))) != None:
continue
if r_sheet.cell_value(1,colnum) not in [1.0, 2.0, ""]:
logger.warn("Unexpected MAINLINE row 1 cell value = [%s]! Skipping this workbook." % r_sheet.cell_value(1,colnum))
return
if mainline_or_turns == "TURNS" and colnum==0 and r_sheet.cell_value(1,colnum) not in [3.0, 4.0, ""]:
logger.warn("Unexpected TURNS row 1 cell value = [%s]! Skipping this workbook." % r_sheet.cell_value(1,colnum))
return
# copy first line down; make sure its MAINLINE|TURNS, [dir1], [dir2], ...
for colnum in range(r_sheet.ncols):
if colnum == 0 and r_sheet.cell_value(0, colnum) != mainline_or_turns:
logger.warn("Unexpected row 0 cell value = [%s]! Skipping this workbook." % r_sheet.cell_value(0,colnum))
return
if mainline_or_turns == "MAINLINE" and colnum > 0 and r_sheet.cell_value(0,colnum) not in ["NB","SB","EB","WB", ""]:
logger.warn("Unexpected mainline row 0 cell value = [%s]! Skipping this workbook." % r_sheet.cell_value(0,colnum))
return
if mainline_or_turns == "TURNS" and colnum > 0 and r_sheet.cell_value(0,colnum) not in ["NBLT", "NBRT", "NBTH",
"SBLT", "SBRT", "SBTH",
"EBLT", "EBRT", "EBTH",
"WBLT", "WBRT", "WBTH"]:
logger.warn("Unexpected turns row 0 cell value = [%s]! Skipping this workbook." % r_sheet.cell_value(0,colnum))
return
w_sheet.write(1, colnum, r_sheet.cell_value(0,colnum), CALIBRI_10PT_ORANGE_CENTER)
if colnum != 0: w_sheet.write(0, colnum, "")
w_sheet.write(0,0, "All", CALIBRI_10PT_LIME_CENTER)
# mainline - copy over non-empty rows
if mainline_or_turns == "MAINLINE":
w_rownum = 2
for r_rownum in range(2,r_sheet.nrows):
# don't copy the empty rows
if isRowEmpty(r_sheet, r_rownum): continue
# copy this row
for colnum in range(r_sheet.ncols):
w_sheet.write(w_rownum, colnum, r_sheet.cell_value(r_rownum,colnum), CALIBRI_10PT)
w_rownum += 1
# turns - error non-zero columns
else:
# look for zero columns and abort if found
for colnum in range(1,r_sheet.ncols):
if isColumnZeros(r_sheet, colnum):
logger.warn("Zero column found! Skipping this workbook.")
return
# copy over everything
for r_rownum in range(2,r_sheet.nrows):
for colnum in range(r_sheet.ncols):
w_sheet.write(r_rownum, colnum, r_sheet.cell_value(r_rownum,colnum), CALIBRI_10PT)
if os.path.exists(os.path.join(DIR_NEWV11, file)):
logger.warn("File %s already exists! Skipping." % os.path.join(DIR_NEWV11, file))
return
wb.default_style.font.height = 20*10
wb.save(os.path.join(DIR_NEWV11, file))
# move the old one to the deprecated dir
shutil.move(os.path.join(DIR_TOPROCESS,file),
os.path.join(DIR_OLDV10,file))
if __name__ == '__main__':
optlist, args = getopt.getopt(sys.argv[1:], '')
if len(args) < 2:
print USAGE
sys.exit(2)
if len(args) != 3:
print USAGE
sys.exit(2)
DIR_TOPROCESS = args[0]
DIR_OLDV10 = args[1]
DIR_NEWV11 = args[2]
logger = logging.getLogger('countdracula')
logger.setLevel(logging.DEBUG)
consolehandler = logging.StreamHandler()
consolehandler.setLevel(logging.DEBUG)
consolehandler.setFormatter(logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s'))
logger.addHandler(consolehandler)
debugFilename = "updateCountsWorkbooks.DEBUG.log"
debugloghandler = logging.StreamHandler(open(debugFilename, 'w'))
debugloghandler.setLevel(logging.DEBUG)
debugloghandler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(message)s', '%Y-%m-%d %H:%M'))
logger.addHandler(debugloghandler)
files_to_process = sorted(os.listdir(DIR_TOPROCESS))
for file in files_to_process:
if file[-4:] !='.xls':
print "File suffix is not .xls: %s -- skipping" % file[-4:]
continue
logger.info("")
logger.info("Processing file %s" % file)
streetlist = CountsWorkbookParser.parseFilename(file)
# mainline
if len(streetlist) in [2,3]:
updateWorkbook(logger, DIR_TOPROCESS, DIR_OLDV10, DIR_NEWV11, file, "MAINLINE" if len(streetlist)==3 else "TURNS")
else:
logger.info(" Invalid workbook name %s" % file)
|
gpl-3.0
| -5,795,683,601,665,664,000
| 41.244344
| 140
| 0.576478
| false
| 3.366751
| false
| false
| false
|
pombredanne/django-rest-framework-fine-permissions
|
rest_framework_fine_permissions/fields.py
|
1
|
1121
|
# -*- coding: utf-8 -*-
"""
"""
import logging
import six
import collections
from django.db import models
from rest_framework import serializers
from rest_framework.fields import Field, empty
from .utils import get_serializer
logger = logging.getLogger(__name__)
class ModelPermissionsField(Field):
""" Field that acts as a ModelPermissionsSerializer for relations. """
def __init__(self, serializer, **kwargs):
self.serializer = get_serializer(serializer)
super(ModelPermissionsField, self).__init__(**kwargs)
def to_representation(self, obj):
""" Represent data for the field. """
many = isinstance(obj, collections.Iterable) \
or isinstance(obj, models.Manager) \
and not isinstance(obj, dict)
serializer_cls = get_serializer(self.serializer)
assert serializer_cls is not None \
and issubclass(serializer_cls, serializers.ModelSerializer), (
"Bad serializer defined %s" % serializer_cls
)
ser = self.serializer(obj, context=self.context, many=many)
return ser.data
|
gpl-2.0
| -2,109,886,766,022,636,800
| 26.341463
| 74
| 0.659233
| false
| 4.43083
| false
| false
| false
|
ucdavis-bioinformatics/proc10xG
|
profile_mapping.py
|
1
|
10092
|
#!/usr/bin/env python
"""
Copyright 2017 Matt Settles
Created June 8, 2017
"""
from optparse import OptionParser
from collectons import Counter
import os
import sys
import time
import traceback
import signal
from subprocess import Popen
from subprocess import PIPE
# Handle PE:
# logic: 0x1 = multiple segments in sequencing, 0x4 = segment unmapped, 0x8 = next segment unmapped
if (flag & 0x1): # PE READ
if (not (flag & 0x4) and not (flag & 0x8)): # both pairs mapped
if (flag & 0x40): # is this PE1 (first segment in template)
# PE1 read, check that PE2 is in dict
ID = line2[0]
if ID in PE2:
if mq >= self.minMQ and int(PE2[ID].strip().split()[4]) >= self.minMQ: # check MQ of both reads
self.ok_bc_lines.append(line)
self.ok_bc_lines.append(PE2[ID])
del PE2[ID]
# TODO: NEED to determine read cloud for read
mapped_pairs_count += 1
else:
if (flag & 0x10): # reverse complement
line2[9] = reverseComplement(line2[9])
line2[10] = reverse(line2[10])
r1 = '\n'.join(['@' + line2[0] + ' 1:N:O', line2[9], '+', line2[10]]) # sequence + qual
rl2 = PE2[ID].strip().split()
if (int(rl2[1]) & 0x10): # reverse complement
rl2[9] = reverseComplement(rl2[9])
rl2[10] = reverse(rl2[10])
r2 = '\n'.join(['@' + rl2[0] + ' 2:N:O', rl2[9], '+', rl2[10]]) # sequence + qual
self.addRead('\n'.join([r1, r2]))
del PE2[ID]
remapped_pairs_count += 1
else:
PE1[ID] = line
elif (flag & 0x80): # is this PE2 (last segment in template)
# PE2 read, check that PE1 is in dict and write out
ID = line2[0]
if ID in PE1:
if mq >= self.minMQ and int(PE1[ID].strip().split()[4]) >= self.minMQ: # check MQ of both reads
self.ok_bc_lines.append(line)
self.ok_bc_lines.append(PE1[ID])
del PE1[ID]
# TODO: NEED to determine read cloud for read
mapped_pairs_count += 1
else:
if (flag & 0x10): # reverse complement
line2[9] = reverseComplement(line2[9])
line2[10] = reverse(line2[10])
r2 = '\n'.join(['@' + line2[0] + ' 2:N:O', line2[9], '+', line2[10]]) # sequence + qual
rl1 = PE1[ID].strip().split()
if (int(rl1[1]) & 0x10): # reverse complement
rl1[9] = reverseComplement(rl1[9])
rl1[10] = reverse(rl1[10])
r1 = '\n'.join(['@' + rl1[0] + ' 1:N:O', rl1[9], '+', rl1[10]]) # sequence + qual
self.addRead('\n'.join([r1, r2]))
del PE1[ID]
remapped_pairs_count += 1
else:
PE2[ID] = line
else: # an 'unmapped' pair, at least 1 unmapped
if (flag & 0x40): # is this PE1 (first segment in template)
# PE1 read, check that PE2 is in dict and write out
ID = line2[0]
if ID in PE2:
if (flag & 0x10): # reverse complement
line2[9] = reverseComplement(line2[9])
line2[10] = reverse(line2[10])
r1 = '\n'.join(['@' + line2[0] + ' 1:N:O', line2[9], '+', line2[10]]) # sequence + qual
rl2 = PE2[ID].strip().split()
if (int(rl2[1]) & 0x10): # reverse complement
rl2[9] = reverseComplement(rl2[9])
rl2[10] = reverse(rl2[10])
r2 = '\n'.join(['@' + rl2[0] + ' 2:N:O', rl2[9], '+', rl2[10]]) # sequence + qual
self.addRead('\n'.join([r1, r2]))
del PE2[ID]
remapped_pairs_count += 1
else:
PE1[ID] = line
elif (flag & 0x80): # is this PE2 (last segment in template)
# PE2 read, check that PE1 is in dict and write out
ID = line2[0]
if ID in PE1:
if (flag & 0x10): # reverse complement
line2[9] = reverseComplement(line2[9])
line2[10] = reverse(line2[10])
r1 = '\n'.join(['@' + line2[0] + ' 1:N:O', line2[9], '+', line2[10]]) # sequence + qual
rl2 = PE2[ID].strip().split()
if (int(rl2[1]) & 0x10): # reverse complement
rl2[9] = reverseComplement(rl2[9])
rl2[10] = reverse(rl2[10])
r2 = '\n'.join(['@' + rl2[0] + ' 2:N:O', rl2[9], '+', rl2[10]]) # sequence + qual
self.addRead('\n'.join([r1, r2]))
del PE2[ID]
remapped_pairs_count += 1
else:
PE2[ID] = line
def main(insam, output_all, verbose):
global file_path
refDict = {}
bcDict = {}
line_count = 0
bc_count = 0
for line in insam:
# Comment/header lines start with @
if line[0] == "@":
# pass header directly to output
if line[0:3] == "@SQ":
# reference sequence id
sp = line.split()
refDict[sp[1][3:]] = int(sp[2][3:])
elif line[0] != "@" and len(line.strip().split()) > 2:
line_count += 1
bc = line.split(":")[0]
# instead check the ST:Z:GOOD for GOOD or MATCH or MISMATCH1
if line.split()[15][5:] not in ['GOOD', 'MATCH', 'MISMATCH1']:
# if seqToHash(bc) not in gbcDict:
# barcode does not match whitelist
if output_all:
# if output_all pass line directly to output
outsam.write(line)
elif bc == current_bc:
# add line to bc processing
proc_bc.addLine(line)
current_bc_count += 1
elif current_bc is None:
current_bc = bc
# add line to bc processing
proc_bc.addLine(line)
current_bc_count += 1
else:
# this is a new barcode
# can add a check to see if seen bc before, which is a no-no
# process the bc
proc_bc.process()
# record having processed the barcode
# output to sam file
bc_count += 1
proc_bc.clearbc()
current_bc = bc
# add line to bc processing
current_bc_count = 1
proc_bc.addLine(line)
else:
# Not sure what happened
sys.stderr.write("Unknown line: %s" % line)
if line_count % 100000 == 0 and line_count > 0 and verbose:
print "Records processed: %s" % (line_count)
#####################################
# Parse options and setup #
usage = "usage %prog -o [output file prefix (path + name)] -(a) --quiet samfile"
usage += "%prog will process alignment file produced by processing_10xReads and do profile each barcode"
parser = OptionParser(usage=usage, version="%prog 0.0.1")
parser.add_option('-o', '--output', help="Directory + filename to output bc stats",
action="store", type="str", dest="outfile", default="bc_profile.txt")
parser.add_option('-a', '--all', help="output all barcodes, not just those with valid gem barcode (STATUS is UNKNOWN, or AMBIGUOUS)",
action="store_true", dest="output_all", default=False)
parser.add_option('--quiet', help="turn off verbose output",
action="store_false", dest="verbose", default=True)
(options, args) = parser.parse_args()
if len(args) == 1:
infile = args[0]
# Start opening input/output files:
if not os.path.exists(infile):
sys.exit("Error, can't find input file %s" % infile)
insam = open(infile, 'r')
else:
# reading from stdin
insam = sys.stdin
outfile = options.outfile
if outfile == "stdout":
outf = sys.stdout
else:
outf = open(outfile, 'r')
output_all = options.output_all
verbose = options.verbose
# need to check, can write to output folder
# global variables
file_path = os.path.dirname(os.path.realpath(__file__))
stime = time.time()
main(insam, outf, output_all, verbose)
sys.exit(0)
|
apache-2.0
| -3,586,046,802,234,102,300
| 46.380282
| 133
| 0.419144
| false
| 4.247475
| false
| false
| false
|
prophittcorey/Artificial-Intelligence
|
assign2/search.py
|
1
|
1618
|
#!/usr/bin/env python
'''
File: search.py
Author: Corey Prophitt <prophitt.corey@gmail.com>
Class: CS440, Colorado State University.
License: GPLv3, see license.txt for more details.
Description:
The iterative deepening search algorithm.
'''
#
# Standard module imports
#
from copy import copy
def depthLimitedSearchHelper(state, actionsF, takeActionF, goalTestF, depthLimit):
'''A helper function for the iterative deepening search. Does the recursive
calls. This is almost verbatim from the class notes.
'''
if goalTestF(state):
return state
if depthLimit == 0:
return "cutoff"
for action in actionsF(state):
childState = takeActionF(copy(state), action)
result = depthLimitedSearchHelper(childState, actionsF, takeActionF, goalTestF, depthLimit-1) # <-- Some problem here
if result == "cutoff":
cutoffOccurred = True
elif result != "failure":
return result # <-- Some problem here
if cutoffOccurred:
return "cutoff"
else:
return "failure"
def iterativeDeepeningSearch(startState, actionsF, takeActionF, goalTestF, maxDepth):
'''The iterative portion of the search. Iterates through the possible "depths".
This is almost verbatim from the class notes.'''
for depth in range(maxDepth):
result = depthLimitedSearchHelper(startState, actionsF, takeActionF, goalTestF, depth)
if result != "cutoff":
return [result]
return "cutoff"
if __name__ == '__main__':
pass
|
gpl-3.0
| -6,677,673,130,061,893,000
| 28.981481
| 126
| 0.645859
| false
| 3.908213
| false
| false
| false
|
QingkaiLu/RAMCloud
|
scripts/smux.py
|
1
|
5252
|
#!/usr/bin/python
# Copyright (c) 2010-2014 Stanford University
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR(S) DISCLAIM ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL AUTHORS BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import os
import sys
import time
totalWindows = 0
MAX_WINDOWS=500
def tcmd(cmd):
os.system("tmux %s" % cmd)
def splitWindow():
global totalWindows
global MAX_WINDOWS
if totalWindows < MAX_WINDOWS:
tcmd("split-window -d -h")
totalWindows += 1
def newWindow():
global totalWindows
global MAX_WINDOWS
if totalWindows < MAX_WINDOWS:
tcmd("new-window")
totalWindows += 1
def carvePanes(numPerWindow, layout):
for i in xrange(numPerWindow - 1):
splitWindow()
tcmd("select-layout %s" % layout)
tcmd("select-layout %s" % layout)
def sendCommand(cmd, pane = 0, ex = True):
time.sleep(0.1)
if ex:
tcmd("send-keys -t %d '%s ' Enter" % (pane,cmd))
else:
tcmd("send-keys -t %d '%s'" % (pane,cmd))
# Commands is a list of lists, where each list is a sequence of
# commands to give to particular window.
# executeBeforeAttach is a function that a client can pass in to be executed
# before the attach (assuming we are not inside a tmux already), because
# nothing can be run after the attach.
def create(numPanesPerWindow, commands, layout = 'tiled', executeBeforeAttach = None):
# Defend against forkbombs
if not numPanesPerWindow > 0:
print "Forkbomb attempt detected!"
return
if numPanesPerWindow > 30:
print "Number per window must be less than 30!"
return
tmux = True
if not os.environ.get('TMUX'): # Session exist
tcmd("new-session -d")
tmux = False
else:
newWindow()
panesNeeded = len(commands)
index = 0
while panesNeeded > 0:
carvePanes(numPanesPerWindow, layout)
panesNeeded -= numPanesPerWindow
# Send the commands in with CR
for i in xrange(min(numPanesPerWindow, len(commands))):
print i
for x in commands[i]:
sendCommand(x,i)
# Pop off the commands we just finished with
for i in xrange(min(numPanesPerWindow, len(commands))):
commands.pop(0)
# Create a new window if necessary
if panesNeeded > 0:
newWindow()
if executeBeforeAttach: executeBeforeAttach()
if not tmux:
tcmd("attach-session")
def startSession(file):
cmds = []
# default args in place
args = {"PANES_PER_WINDOW" : "4", "LAYOUT" : "tiled"}
cur_cmds = None
for line in file:
line = line.strip()
# comments
if line == '' or line.startswith("#"): continue
# Start a new pane specification
if line.startswith("---"):
if cur_cmds is not None:
cmds.append(cur_cmds)
cur_cmds = []
continue
# Configuration part
if cur_cmds == None:
try:
left,right = line.split('=',1)
args[left.strip()] = right.strip()
except:
print "Argment '%s' ignored" % line
print "Arguments must be in the form of key = value"
continue
else: # Actual session is being added to
cur_cmds.append(line.strip())
if cur_cmds:
cmds.append(cur_cmds)
# Start the sessions
create(int(args['PANES_PER_WINDOW']), cmds, args['LAYOUT'])
def usage():
doc_string = '''
mux.py <session_spec_file>
The format of session_spec_file consists of ini-style parameters followed by
lists of commands delimited by lines beginning with '---'.
Any line starting with a # is considered a comment and ignored.
Currently there are two supported parameters.
PANES_PER_WINDOW,
The number of panes that each window will be carved into
LAYOUT,
One of the five standard tmux layouts, given below.
even-horizontal, even-vertical, main-horizontal, main-vertical, tiled.
Sample Input File:
# This is a comment
PANES_PER_WINDOW = 4
LAYOUT = tiled
----------
echo 'This is pane 1'
cat /proc/cpuinfo | less
----------
echo 'This is pane 2'
cat /proc/meminfo
----------
echo 'This is pane 3'
uname -a
----------
echo "This is pane 4"
cat /etc/issue
----------
'''
print doc_string
sys.exit(1)
def main():
if len(sys.argv) < 2 or sys.argv[1] in ['--help', '-h','-?'] : usage()
try:
with open(sys.argv[1]) as f:
startSession(f)
except:
print >>sys.stderr, 'File "%s" does not exist.' % sys.argv[1]
sys.exit(2)
if __name__ == "__main__": main()
|
isc
| -7,239,412,484,352,983,000
| 27.085561
| 86
| 0.624334
| false
| 3.727466
| false
| false
| false
|
linuxrocks123/MailTask
|
mt_utils.py
|
1
|
13214
|
#! /usr/bin/env python
# MailTask Alpha: The Email Manager
# Copyright (C) 2015 Patrick Simmons
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import base64
import email
import email.parser
import email.utils
import sys
import time
#Dead-Simple CaseInsensitiveList class
class CaseInsensitiveList(list):
def index(self,key):
lowered_key = key.lower()
for i in range(len(self)):
if self[i].lower()==lowered_key:
return i
raise ValueError
def __contains__(self, key):
try:
self.index(key)
except ValueError:
return False
return True
##Stupidly simple method to turn a sequence type's .index() method into .find()
def find(seqobj, needle):
try:
return seqobj.index(needle)
except ValueError:
return -1
##Returns a date/time string appropriate for use in email main browser
def browser_time(tstr,formatstr="%m/%d/%Y %H:%M"):
tztmt = email.utils.parsedate_tz(tstr)
if tztmt!=None:
return time.strftime(formatstr,time.localtime(email.utils.mktime_tz(tztmt)))
else:
return time.strftime(formatstr,time.localtime(0))
##Given an X-MailTask-Date-Info string, return a 1-tuple of the epoch time deadline for a Deadline task, and a 2-tuple of the beginning epoch time and ending epoch time of a Meeting task.
def gtstfxmdis(dinfo_str):
dinfo = dinfo_str.split("/")
if len(dinfo)==1: #(Deadline)
return (email.utils.mktime_tz(email.utils.parsedate_tz(dinfo[0])),)
else: #len(dinfo)==2 (Meeting)
return (email.utils.mktime_tz(email.utils.parsedate_tz(dinfo[0])),
email.utils.mktime_tz(email.utils.parsedate_tz(dinfo[1])))
##Given an email header, find all instances of commas in nicknames and turn them into
# ASCII character Device Control 1 (0x11)
def decomma(tstr):
to_return=""
in_quotes=False
prev_char_backslash=False
for char in tstr:
if prev_char_backslash and not in_quotes:
if char==',':
to_return+='\x11'
else:
to_return+=char
prev_char_backslash=False
elif char=='\\':
prev_char_backslash=True
elif char=='"':
in_quotes = not in_quotes
elif in_quotes and char==',':
to_return+='\x11'
else:
to_return+=char
return to_return
def recomma(tstr):
return tstr.replace('\x11',',')
##Return the MIME Message-ID, or generate and return NONMIME ID from timestamp.
def get_message_id(msg,folder):
if "Message-ID" in msg:
return msg['Message-ID'].replace(' ','').replace('\t','').replace('\r','').replace('\n','')
else: #generate nonmime-id
sanitized_folder=folder.replace('/','-')
return "<NONMIME-"+base64.b32encode(repr(hash(msg.as_string())))+"@"+sanitized_folder+".mailtask"
##Generate a unique Message-ID for given message
def gen_message_id(msg,params):
messageID = ("<"+base64.b32encode(repr(hash(msg.as_string())))+"@"+base64.b32encode(repr(hash(msg["From"])))+repr(int(params[0]))+".mailtask"+">").replace("=","")
del msg["Message-ID"]
msg["Message-ID"]=messageID
##Get list of MIME IDs of related messages
def get_related_ids(msg):
return msg["References"].replace('\t',' ').replace('\r',' ').replace('\n',' ').replace(',',' ').split() if 'References' in msg else []
##Set "References" header to specified list of MIME IDs
def set_related_ids(msg,related_list):
del msg["References"]
msg["References"]=",".join(set(related_list))
##Encapsulates a message object into an RFC822 attachment
def rfc822_encapsulate(msg,filename=""):
lines = msg.as_string().splitlines()
for header in ("Content-Type","Content-Transfer-Encoding"):
splitpoint=-1
for i in range(len(lines)):
if lines[i]=="":
splitpoint=i
break
for i in range(splitpoint):
if lines[i].lower().find((header+": ").lower())==0:
lines.insert(splitpoint,lines[i])
del lines[i]
#Handle multi-line Content-Type/Content-Transfer-Encoding headers
while len(lines[i]) and lines[i][0] in (' ','\t'):
lines.insert(splitpoint,lines[i])
del lines[i]
break
for i in range(len(lines)):
if lines[i].lower().find("Content-Type: ".lower())==0:
lines.insert(i,"")
break
return email.parser.Parser().parsestr('Content-Type: message/rfc822'+('; name="'+filename+'"' if filename!="" else "")+'\n'+"\n".join(lines))
##Attaches a message object to the payload of another message object
# If the parent message object is not of multipart type, restructure
# the message such that its current payload is the first subpayload of
# the parent message, and change the parent payload's content type to
# multipart/mixed.
def attach_payload(parent,child):
#message/rfc822 encapsulation requires the payload's sole list element to be
#the target of the attachment instead of the encapsulated message
if parent.get_content_type()=="message/rfc822":
attach_payload(parent.get_payload()[0],child)
return
if 'X-MailTask-Virgin' in parent:
del parent['X-MailTask-Virgin']
if 'Content-Type' not in child:
child.set_type("text/plain")
if ('To' in child or 'Cc' in child or 'Bcc' in child or 'Message-ID' in child) and child.get_content_type()!="message/rfc822":
child = rfc822_encapsulate(child)
if isinstance(parent.get_payload(),str):
first_payload = email.message.Message()
first_payload['Content-Type']=parent['Content-Type']
first_payload['Content-Transfer-Encoding']=parent['Content-Transfer-Encoding']
if 'Content-Disposition' in parent:
first_payload['Content-Disposition']=parent['Content-Disposition']
first_payload.set_payload(parent.get_payload())
parent.set_type("multipart/mixed")
parent.set_payload([first_payload])
parent.attach(child)
##Take a message embedded in another message (such as a message of type
# multipart/x.MailTask) and delete the message/rfc822 header. Replace
# it with the message internal header. This is complicated by the fact
# that the message's internal header must be moved up to before the
# Message-ID header in order to be accepted.
# Precondition: message must already have Message-ID header
def unrfc822(message):
msgstr = message.as_string()
msg_parts = msgstr.split("\n")
del msg_parts[0]
insert_idx = -1
fields_to_move = set(["Content-Type","MIME-Version"])
for i in range(len(msg_parts)):
if msg_parts[i].lower().find("Message-ID".lower())==0 and insert_idx==-1:
insert_idx=i
move_this_line = False
for field in fields_to_move:
if msg_parts[i].lower().find(field.lower())==0:
move_this_line = True
fields_to_move.remove(field)
break
if move_this_line:
if insert_idx!=-1:
magic_str = msg_parts[i]
del msg_parts[i]
msg_parts.insert(insert_idx,magic_str)
else:
print "BUG: Content-Type before Message-ID in unrfc822"
return email.parser.Parser().parsestr("\n".join(msg_parts))
##Flatten a message according to RFC2822 by stupidly inserting newlines everywhere.
# Do the minimum necessary because this is asinine but Microsoft SMTP seems to require it.
# I DON'T CARE if it's the standard IT'S 2015 AND ARBITRARY LINE LENGTH LIMITS MAKE NO SENSE!
def rfc2822_flatten(mstring):
to_return=""
for line in mstring.split("\n"):
if len(line)<998:
to_return+=line+"\n"
else:
to_dispose = line
while len(to_dispose):
if len(to_dispose)<998:
to_return+=to_dispose+"\n"
to_dispose=""
else:
if to_dispose[:998].rfind("\n")!=-1:
split_idx = to_dispose[:998].rfind("\n")
else:
split_idx = 998
to_return+=to_dispose[:split_idx]+"\n"
to_dispose = to_dispose[split_idx:]
return to_return
##Deletes the passed object from the payload of message object.
# Handles changing message content type from multipart to single-part if necessary
def delete_payload_component(parent,child):
if parent.get_content_type()=="message/rfc822":
delete_payload_component(parent.get_payload()[0],child)
return
payload = parent.get_payload()
del payload[payload.index(child)]
if len(payload)==1:
sole_component = payload[0]
parent.set_payload(sole_component.get_payload())
if 'Content-Type' in sole_component:
parent.replace_header('Content-Type',sole_component['Content-Type'])
else:
parent.set_type("text/plain")
if 'Content-Transfer-Encoding' in sole_component:
del parent['Content-Transfer-Encoding']
parent['Content-Transfer-Encoding']=sole_component['Content-Transfer-Encoding']
#Get best submessage from an email to use as a body. Return it
def get_body(msg):
#Internal method to rank content types of bodies
def rank_body(ctype):
TYPE_RANKING = ["text/plain","text/html","text/"]
for i in range(len(TYPE_RANKING)):
if ctype.get_content_type().find(TYPE_RANKING[i])==0:
return i
return len(TYPE_RANKING)
full_payload = msg.get_payload()
if isinstance(full_payload,str):
return msg
#Best body found so far
best_body = None
best_body_ranking = sys.maxint
#Check all direct payload subcomponents
for candidate in full_payload:
if 'Content-Type' in candidate and not ('Content-Disposition' in candidate and candidate['Content-Disposition'].lower().find("attachment")!=-1):
if rank_body(candidate) < best_body_ranking:
best_body = candidate
best_body_ranking = rank_body(candidate)
#Check if we have multipart/alternative subpart. Examine it if so.
for node in full_payload:
if 'Content-Type' in node and node.get_content_type().find("multipart/")==0:
subpayload = node.get_payload()
for candidate in subpayload:
if 'Content-Type' in candidate and not ('Content-Disposition' in candidate and candidate['Content-Disposition'].find("attachment")!=-1):
if rank_body(candidate) < best_body_ranking:
best_body = candidate
best_body_ranking = rank_body(candidate)
return best_body
##Returns string representing which type of task we are
def get_task_type(task):
if 'X-MailTask-Date-Info' not in task:
return "Checklist"
elif task['X-MailTask-Date-Info'].find("/")==-1:
return "Deadline"
else:
return "Meeting"
#Search message cache for specific MIDs
def search_cache(mid,cache):
for record in cache:
rdict = record[1]
if 'Message-ID' in rdict and get_message_id(rdict,None)==mid:
return record
return None
##Walk the body of a message and process each submessage
def walk_attachments(submsg,process_single_submsg,force_decomp=False):
if not isinstance(submsg.get_payload(),str) and (force_decomp or submsg.get_content_type().find("multipart/")==0):
for component in submsg.get_payload():
if component.get_content_type().find("multipart/")==0:
for subsubmsg in component.get_payload():
walk_attachments(subsubmsg,process_single_submsg)
else:
process_single_submsg(component)
else:
process_single_submsg(submsg)
##Gets MIME type of file
# Uses magic if available, otherwise mimetypes
try:
import magic
has_magic=True
except ImportError:
has_magic=False
import mimetypes
mimetypes.init()
def get_mime_type(fname):
if has_magic:
return magic.from_file(fname,mime=True)
else:
if fname.find(".")!=-1:
simple_suffix=fname.rsplit(".")[1]
simple_name=fname.split(".")[0]+"."+simple_suffix
else:
simple_name=fname
to_return = mimetypes.guess_type(simple_name,strict=False)[0]
if to_return==None:
to_return = "application/octet-stream"
return to_return
|
gpl-3.0
| 5,728,171,730,024,221,000
| 38.681682
| 187
| 0.629484
| false
| 3.814665
| false
| false
| false
|
tinkerinestudio/Tinkerine-Suite
|
TinkerineSuite/Cura/util/stl2.py
|
1
|
2253
|
from __future__ import absolute_import
import sys
import os
import struct
import time
from Cura.util import mesh2
class stlModel(mesh2.mesh):
def __init__(self):
super(stlModel, self).__init__()
def load(self, filename):
f = open(filename, "rb")
if f.read(5).lower() == "solid":
self._loadAscii(f)
if self.vertexCount < 3:
f.seek(5, os.SEEK_SET)
self._loadBinary(f)
else:
self._loadBinary(f)
f.close()
self._postProcessAfterLoad()
return self
def _loadAscii(self, f):
cnt = 0
for lines in f:
for line in lines.split('\r'):
if 'vertex' in line:
cnt += 1
self._prepareVertexCount(int(cnt))
f.seek(5, os.SEEK_SET)
cnt = 0
for lines in f:
for line in lines.split('\r'):
if 'vertex' in line:
data = line.split()
self.addVertex(float(data[1]), float(data[2]), float(data[3]))
def _loadBinary(self, f):
#Skip the header
f.read(80-5)
faceCount = struct.unpack('<I', f.read(4))[0]
self._prepareVertexCount(faceCount * 3)
for idx in xrange(0, faceCount):
data = struct.unpack("<ffffffffffffH", f.read(50))
self.addVertex(data[3], data[4], data[5])
self.addVertex(data[6], data[7], data[8])
self.addVertex(data[9], data[10], data[11])
def saveAsSTL(mesh, filename):
f = open(filename, 'wb')
#Write the STL binary header. This can contain any info, except for "SOLID" at the start.
f.write(("CURA BINARY STL EXPORT. " + time.strftime('%a %d %b %Y %H:%M:%S')).ljust(80, '\000'))
#Next follow 4 binary bytes containing the amount of faces, and then the face information.
f.write(struct.pack("<I", int(mesh.vertexCount / 3)))
for idx in xrange(0, mesh.vertexCount, 3):
v1 = mesh.origonalVertexes[idx]
v2 = mesh.origonalVertexes[idx+1]
v3 = mesh.origonalVertexes[idx+2]
f.write(struct.pack("<fff", 0.0, 0.0, 0.0))
f.write(struct.pack("<fff", v1[0], v1[1], v1[2]))
f.write(struct.pack("<fff", v2[0], v2[1], v2[2]))
f.write(struct.pack("<fff", v3[0], v3[1], v3[2]))
f.write(struct.pack("<H", 0))
f.close()
if __name__ == '__main__':
for filename in sys.argv[1:]:
m = stlModel().load(filename)
print("Loaded %d faces" % (m.vertexCount / 3))
parts = m.splitToParts()
for p in parts:
saveAsSTL(p, "export_%i.stl" % parts.index(p))
|
agpl-3.0
| -5,708,727,742,411,427,000
| 28.25974
| 96
| 0.637372
| false
| 2.595622
| false
| false
| false
|
meain/bridge
|
docs/report/urls.py
|
1
|
1263
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.index, name="index"),
url(r'^signin/$', views.signin, name='signin'),
url(r'^create_user/(?P<user_id>[0-9]+)/(?P<user_class>\
([a-z])+)$', views.create_new_user, \
name='new_user'),
url(r'^timetable/(?P<user_id>[0-9]+)$', \
views.get_timetable, name='timetable'),
url(r'^notes/(?P<user_id>[0-9]+)$', \
views.get_notes, name='notes'),
url(r'^subject_data/(?P<user_id>[0-9]+)$', \
views.get_sub_data, name='subject_data'),
url(r'^events/(?P<user_id>[0-9]+)$', \
views.get_events_dummy, name='events'),
url(r'^track_data/$', \
views.get_track_data, name='events'),
url(r'^calendar/(?P<user_id>[0-9]+)$', \
views.get_cal_data_dummy, name='events'),
url(r'^subject_attendence/(?P<user_id>[0-9]+)$', \
views.get_attendence, name='get_attendence'),
url(r'^create_user/$', \
views.create_new_user, name='new_user'),
url(r'^update_attendence/$', \
views.update_attendence,\
name='update_attendence'),
url(r'^set_track_data/$', \
views.set_track_data, name='set_track_data'),
]
|
mit
| 7,931,066,556,517,879,000
| 39.741935
| 59
| 0.53365
| false
| 3.08802
| false
| true
| false
|
GoogleCloudPlatform/PerfKitBenchmarker
|
tests/linux_packages/pip_test.py
|
1
|
2020
|
"""Tests for pip."""
from typing import Dict, List
import unittest
from absl.testing import parameterized
import mock
from perfkitbenchmarker.linux_packages import pip
from perfkitbenchmarker.linux_packages import python
from tests import pkb_common_test_case
# executed remote commands
NEED_PIP_27 = [
'curl https://bootstrap.pypa.io/pip/2.7/get-pip.py | sudo python -',
'pip --version',
'mkdir -p /opt/pkb && pip freeze | tee /opt/pkb/requirements.txt'
]
NEED_PIP_38 = [
'curl https://bootstrap.pypa.io/pip/get-pip.py | sudo python3 -',
'pip3 --version',
'mkdir -p /opt/pkb && pip3 freeze | tee /opt/pkb/requirements.txt'
]
EXISTING_PIP_27 = [
'echo \'exec python -m pip "$@"\'| sudo tee /usr/bin/pip && '
'sudo chmod 755 /usr/bin/pip',
'pip --version',
'mkdir -p /opt/pkb && pip freeze | tee /opt/pkb/requirements.txt',
]
EXISTING_PIP_38 = [
'echo \'exec python3 -m pip "$@"\'| sudo tee /usr/bin/pip3 && '
'sudo chmod 755 /usr/bin/pip3',
'pip3 --version',
'mkdir -p /opt/pkb && pip3 freeze | tee /opt/pkb/requirements.txt',
]
PYTHON_38_KWARGS = {'pip_cmd': 'pip3', 'python_cmd': 'python3'}
class PipTest(pkb_common_test_case.PkbCommonTestCase):
@parameterized.named_parameters(
('need_pip_27', False, '2.7', NEED_PIP_27, {}),
('need_pip_38', False, '3.8', NEED_PIP_38, PYTHON_38_KWARGS),
('existing_pip_27', True, '2.7', EXISTING_PIP_27, {}),
('existing_pip_38', True, '3.8', EXISTING_PIP_38, PYTHON_38_KWARGS),
)
def testInstall(self, need_pip: bool, python_version: str,
expected_commands: List[str], install_kwargs: Dict[str, str]):
self.enter_context(
mock.patch.object(
python, 'GetPythonVersion', return_value=python_version))
vm = mock.Mock()
vm.TryRemoteCommand.return_value = need_pip
pip.Install(vm, **install_kwargs)
vm.RemoteCommand.assert_has_calls(
[mock.call(cmd) for cmd in expected_commands])
if __name__ == '__main__':
unittest.main()
|
apache-2.0
| -1,458,470,186,040,201,200
| 32.114754
| 80
| 0.640099
| false
| 2.927536
| true
| false
| false
|
berkmancenter/mediacloud
|
apps/common/src/python/mediawords/key_value_store/multiple_stores.py
|
1
|
7164
|
from typing import List, Union
from mediawords.db import DatabaseHandler
from mediawords.key_value_store import KeyValueStore, McKeyValueStoreException
from mediawords.util.perl import decode_object_from_bytes_if_needed
class McMultipleStoresStoreException(McKeyValueStoreException):
"""Multiple stores exception."""
pass
class MultipleStoresStore(KeyValueStore):
"""Key-value store that reads from / writes to multiple stores."""
__slots__ = [
'__stores_for_reading',
'__stores_for_writing',
]
def __init__(self,
stores_for_reading: List[KeyValueStore] = None,
stores_for_writing: List[KeyValueStore] = None):
"""Constructor."""
if stores_for_reading is None:
stores_for_reading = []
if stores_for_writing is None:
stores_for_writing = []
if len(stores_for_reading) + len(stores_for_writing) == 0:
raise McMultipleStoresStoreException("At least one store for reading / writing should be present.")
self.__stores_for_reading = stores_for_reading
self.__stores_for_writing = stores_for_writing
def stores_for_reading(self) -> list:
"""Return list of stores for reading."""
return self.__stores_for_reading
def stores_for_writing(self) -> list:
"""Return list of stores for writing."""
return self.__stores_for_writing
def fetch_content(self, db: DatabaseHandler, object_id: int, object_path: str = None) -> bytes:
"""Fetch content from any of the stores that might have it; raise if none of them do."""
object_id = self._prepare_object_id(object_id)
object_path = decode_object_from_bytes_if_needed(object_path)
if len(self.__stores_for_reading) == 0:
raise McMultipleStoresStoreException("List of stores for reading object ID %d is empty." % object_id)
errors = []
content = None
for store in self.__stores_for_reading:
try:
# MC_REWRITE_TO_PYTHON: use named parameters after Python rewrite
content = store.fetch_content(db, object_id, object_path)
if content is None:
raise McMultipleStoresStoreException("Fetching object ID %d from store %s succeeded, "
"but the returned content is undefined." % (
object_id, str(store),
))
except Exception as ex:
# Silently skip through errors and die() only if content wasn't found anywhere
errors.append("Error fetching object ID %(object_id)d from store %(store)s: %(exception)s" % {
'object_id': object_id,
'store': store,
'exception': str(ex),
})
else:
break
if content is None:
raise McMultipleStoresStoreException(
"All stores failed while fetching object ID %(object_id)d; errors: %(errors)s" % {
'object_id': object_id,
'errors': "\n".join(errors),
}
)
return content
def store_content(self,
db: DatabaseHandler,
object_id: int,
content: Union[str, bytes],
content_type: str='binary/octet-stream') -> str:
"""Store content to all stores; raise if one of them fails."""
object_id = self._prepare_object_id(object_id)
content = self._prepare_content(content)
if len(self.__stores_for_writing) == 0:
raise McMultipleStoresStoreException("List of stores for writing object ID %d is empty." % object_id)
last_store_path = None
for store in self.__stores_for_writing:
try:
# MC_REWRITE_TO_PYTHON: use named parameters after Python rewrite
last_store_path = store.store_content(db, object_id, content)
if last_store_path is None:
raise McMultipleStoresStoreException(
"Storing object ID %d to %s succeeded, but the returned path is empty." % (object_id, store,)
)
except Exception as ex:
raise McMultipleStoresStoreException(
"Error while saving object ID %(object_id)d to store %(store)s: %(exception)s" % {
'object_id': object_id,
'store': str(store),
'exception': str(ex)
}
)
if last_store_path is None:
raise McMultipleStoresStoreException(
"Storing object ID %d to all stores succeeded, but the returned path is empty." % object_id
)
return last_store_path
def remove_content(self, db: DatabaseHandler, object_id: int, object_path: str = None) -> None:
"""Remove content from all stores; raise if one of them fails."""
object_id = self._prepare_object_id(object_id)
object_path = decode_object_from_bytes_if_needed(object_path)
if len(self.__stores_for_writing) == 0:
raise McMultipleStoresStoreException("List of stores for writing object ID %d is empty." % object_id)
for store in self.__stores_for_writing:
try:
# MC_REWRITE_TO_PYTHON: use named parameters after Python rewrite
store.remove_content(db, object_id, object_path)
except Exception as ex:
raise McMultipleStoresStoreException(
"Error while removing object ID %(object_id)d from store %(store)s: %(exception)s" % {
'object_id': object_id,
'store': str(store),
'exception': str(ex)
}
)
def content_exists(self, db: DatabaseHandler, object_id: int, object_path: str = None) -> bool:
"""Test if content in at least one of the stores."""
object_id = self._prepare_object_id(object_id)
object_path = decode_object_from_bytes_if_needed(object_path)
if len(self.__stores_for_reading) == 0:
raise McMultipleStoresStoreException("List of stores for reading object ID %d is empty." % object_id)
for store in self.__stores_for_reading:
try:
# MC_REWRITE_TO_PYTHON: use named parameters after Python rewrite
exists = store.content_exists(db, object_id, object_path)
except Exception as ex:
raise McMultipleStoresStoreException(
"Error while testing whether object ID %(object_id)d exists in store %(store)s: %(exception)s" % {
'object_id': object_id,
'store': store,
'exception': str(ex),
})
else:
if exists:
return True
return False
|
agpl-3.0
| -3,426,115,544,674,871,000
| 37.934783
| 118
| 0.556253
| false
| 4.485911
| false
| false
| false
|
artdavis/pyfred
|
pyfred/webcolors-1.5/webcolors.py
|
1
|
27031
|
"""
Utility functions for working with the color names and color value
formats defined by the HTML and CSS specifications for use in
documents on the Web.
See documentation (in docs/ directory of source distribution) for
details of the supported formats and conversions.
"""
import math
import re
import string
import struct
# Python 2's unichr() is Python 3's chr().
try:
unichr
except NameError:
unichr = chr
# Python 2's unicode is Python 3's str.
try:
unicode
except NameError:
unicode = str
def _reversedict(d):
"""
Internal helper for generating reverse mappings; given a
dictionary, returns a new dictionary with keys and values swapped.
"""
return dict(zip(d.values(), d.keys()))
HEX_COLOR_RE = re.compile(r'^#([a-fA-F0-9]{3}|[a-fA-F0-9]{6})$')
SUPPORTED_SPECIFICATIONS = (u'html4', u'css2', u'css21', u'css3')
SPECIFICATION_ERROR_TEMPLATE = u"'%%s' is not a supported specification for color name lookups; \
supported specifications are: %s." % (u', '.join(SUPPORTED_SPECIFICATIONS))
# Mappings of color names to normalized hexadecimal color values.
#################################################################
# The HTML 4 named colors.
#
# The canonical source for these color definitions is the HTML 4
# specification:
#
# http://www.w3.org/TR/html401/types.html#h-6.5
#
# The file tests/definitions.py in the source distribution of this
# module downloads a copy of the HTML 4 standard and parses out the
# color names to ensure the values below are correct.
HTML4_NAMES_TO_HEX = {
u'aqua': u'#00ffff',
u'black': u'#000000',
u'blue': u'#0000ff',
u'fuchsia': u'#ff00ff',
u'green': u'#008000',
u'gray': u'#808080',
u'lime': u'#00ff00',
u'maroon': u'#800000',
u'navy': u'#000080',
u'olive': u'#808000',
u'purple': u'#800080',
u'red': u'#ff0000',
u'silver': u'#c0c0c0',
u'teal': u'#008080',
u'white': u'#ffffff',
u'yellow': u'#ffff00',
}
# CSS 2 used the same list as HTML 4.
CSS2_NAMES_TO_HEX = HTML4_NAMES_TO_HEX
# CSS 2.1 added orange.
CSS21_NAMES_TO_HEX = dict(HTML4_NAMES_TO_HEX, orange=u'#ffa500')
# The CSS 3/SVG named colors.
#
# The canonical source for these color definitions is the SVG
# specification's color list (which was adopted as CSS 3's color
# definition):
#
# http://www.w3.org/TR/SVG11/types.html#ColorKeywords
#
# CSS 3 also provides definitions of these colors:
#
# http://www.w3.org/TR/css3-color/#svg-color
#
# SVG provides the definitions as RGB triplets. CSS 3 provides them
# both as RGB triplets and as hexadecimal. Since hex values are more
# common in real-world HTML and CSS, the mapping below is to hex
# values instead. The file tests/definitions.py in the source
# distribution of this module downloads a copy of the CSS 3 color
# module and parses out the color names to ensure the values below are
# correct.
CSS3_NAMES_TO_HEX = {
u'aliceblue': u'#f0f8ff',
u'antiquewhite': u'#faebd7',
u'aqua': u'#00ffff',
u'aquamarine': u'#7fffd4',
u'azure': u'#f0ffff',
u'beige': u'#f5f5dc',
u'bisque': u'#ffe4c4',
u'black': u'#000000',
u'blanchedalmond': u'#ffebcd',
u'blue': u'#0000ff',
u'blueviolet': u'#8a2be2',
u'brown': u'#a52a2a',
u'burlywood': u'#deb887',
u'cadetblue': u'#5f9ea0',
u'chartreuse': u'#7fff00',
u'chocolate': u'#d2691e',
u'coral': u'#ff7f50',
u'cornflowerblue': u'#6495ed',
u'cornsilk': u'#fff8dc',
u'crimson': u'#dc143c',
u'cyan': u'#00ffff',
u'darkblue': u'#00008b',
u'darkcyan': u'#008b8b',
u'darkgoldenrod': u'#b8860b',
u'darkgray': u'#a9a9a9',
u'darkgrey': u'#a9a9a9',
u'darkgreen': u'#006400',
u'darkkhaki': u'#bdb76b',
u'darkmagenta': u'#8b008b',
u'darkolivegreen': u'#556b2f',
u'darkorange': u'#ff8c00',
u'darkorchid': u'#9932cc',
u'darkred': u'#8b0000',
u'darksalmon': u'#e9967a',
u'darkseagreen': u'#8fbc8f',
u'darkslateblue': u'#483d8b',
u'darkslategray': u'#2f4f4f',
u'darkslategrey': u'#2f4f4f',
u'darkturquoise': u'#00ced1',
u'darkviolet': u'#9400d3',
u'deeppink': u'#ff1493',
u'deepskyblue': u'#00bfff',
u'dimgray': u'#696969',
u'dimgrey': u'#696969',
u'dodgerblue': u'#1e90ff',
u'firebrick': u'#b22222',
u'floralwhite': u'#fffaf0',
u'forestgreen': u'#228b22',
u'fuchsia': u'#ff00ff',
u'gainsboro': u'#dcdcdc',
u'ghostwhite': u'#f8f8ff',
u'gold': u'#ffd700',
u'goldenrod': u'#daa520',
u'gray': u'#808080',
u'grey': u'#808080',
u'green': u'#008000',
u'greenyellow': u'#adff2f',
u'honeydew': u'#f0fff0',
u'hotpink': u'#ff69b4',
u'indianred': u'#cd5c5c',
u'indigo': u'#4b0082',
u'ivory': u'#fffff0',
u'khaki': u'#f0e68c',
u'lavender': u'#e6e6fa',
u'lavenderblush': u'#fff0f5',
u'lawngreen': u'#7cfc00',
u'lemonchiffon': u'#fffacd',
u'lightblue': u'#add8e6',
u'lightcoral': u'#f08080',
u'lightcyan': u'#e0ffff',
u'lightgoldenrodyellow': u'#fafad2',
u'lightgray': u'#d3d3d3',
u'lightgrey': u'#d3d3d3',
u'lightgreen': u'#90ee90',
u'lightpink': u'#ffb6c1',
u'lightsalmon': u'#ffa07a',
u'lightseagreen': u'#20b2aa',
u'lightskyblue': u'#87cefa',
u'lightslategray': u'#778899',
u'lightslategrey': u'#778899',
u'lightsteelblue': u'#b0c4de',
u'lightyellow': u'#ffffe0',
u'lime': u'#00ff00',
u'limegreen': u'#32cd32',
u'linen': u'#faf0e6',
u'magenta': u'#ff00ff',
u'maroon': u'#800000',
u'mediumaquamarine': u'#66cdaa',
u'mediumblue': u'#0000cd',
u'mediumorchid': u'#ba55d3',
u'mediumpurple': u'#9370db',
u'mediumseagreen': u'#3cb371',
u'mediumslateblue': u'#7b68ee',
u'mediumspringgreen': u'#00fa9a',
u'mediumturquoise': u'#48d1cc',
u'mediumvioletred': u'#c71585',
u'midnightblue': u'#191970',
u'mintcream': u'#f5fffa',
u'mistyrose': u'#ffe4e1',
u'moccasin': u'#ffe4b5',
u'navajowhite': u'#ffdead',
u'navy': u'#000080',
u'oldlace': u'#fdf5e6',
u'olive': u'#808000',
u'olivedrab': u'#6b8e23',
u'orange': u'#ffa500',
u'orangered': u'#ff4500',
u'orchid': u'#da70d6',
u'palegoldenrod': u'#eee8aa',
u'palegreen': u'#98fb98',
u'paleturquoise': u'#afeeee',
u'palevioletred': u'#db7093',
u'papayawhip': u'#ffefd5',
u'peachpuff': u'#ffdab9',
u'peru': u'#cd853f',
u'pink': u'#ffc0cb',
u'plum': u'#dda0dd',
u'powderblue': u'#b0e0e6',
u'purple': u'#800080',
u'red': u'#ff0000',
u'rosybrown': u'#bc8f8f',
u'royalblue': u'#4169e1',
u'saddlebrown': u'#8b4513',
u'salmon': u'#fa8072',
u'sandybrown': u'#f4a460',
u'seagreen': u'#2e8b57',
u'seashell': u'#fff5ee',
u'sienna': u'#a0522d',
u'silver': u'#c0c0c0',
u'skyblue': u'#87ceeb',
u'slateblue': u'#6a5acd',
u'slategray': u'#708090',
u'slategrey': u'#708090',
u'snow': u'#fffafa',
u'springgreen': u'#00ff7f',
u'steelblue': u'#4682b4',
u'tan': u'#d2b48c',
u'teal': u'#008080',
u'thistle': u'#d8bfd8',
u'tomato': u'#ff6347',
u'turquoise': u'#40e0d0',
u'violet': u'#ee82ee',
u'wheat': u'#f5deb3',
u'white': u'#ffffff',
u'whitesmoke': u'#f5f5f5',
u'yellow': u'#ffff00',
u'yellowgreen': u'#9acd32',
}
# Mappings of normalized hexadecimal color values to color names.
#################################################################
HTML4_HEX_TO_NAMES = _reversedict(HTML4_NAMES_TO_HEX)
CSS2_HEX_TO_NAMES = HTML4_HEX_TO_NAMES
CSS21_HEX_TO_NAMES = _reversedict(CSS21_NAMES_TO_HEX)
CSS3_HEX_TO_NAMES = _reversedict(CSS3_NAMES_TO_HEX)
# Aliases of the above mappings, for backwards compatibility.
#################################################################
(html4_names_to_hex,
css2_names_to_hex,
css21_names_to_hex,
css3_names_to_hex) = (HTML4_NAMES_TO_HEX,
CSS2_NAMES_TO_HEX,
CSS21_NAMES_TO_HEX,
CSS3_NAMES_TO_HEX)
(html4_hex_to_names,
css2_hex_to_names,
css21_hex_to_names,
css3_hex_to_names) = (HTML4_HEX_TO_NAMES,
CSS2_HEX_TO_NAMES,
CSS21_HEX_TO_NAMES,
CSS3_HEX_TO_NAMES)
# Normalization functions.
#################################################################
def normalize_hex(hex_value):
"""
Normalize a hexadecimal color value to 6 digits, lowercase.
"""
match = HEX_COLOR_RE.match(hex_value)
if match is None:
raise ValueError(
u"'%s' is not a valid hexadecimal color value." % hex_value
)
hex_digits = match.group(1)
if len(hex_digits) == 3:
hex_digits = u''.join(2 * s for s in hex_digits)
return u'#%s' % hex_digits.lower()
def _normalize_integer_rgb(value):
"""
Internal normalization function for clipping integer values into
the permitted range (0-255, inclusive).
"""
return 0 if value < 0 \
else 255 if value > 255 \
else value
def normalize_integer_triplet(rgb_triplet):
"""
Normalize an integer ``rgb()`` triplet so that all values are
within the range 0-255 inclusive.
"""
return tuple(_normalize_integer_rgb(value) for value in rgb_triplet)
def _normalize_percent_rgb(value):
"""
Internal normalization function for clipping percent values into
the permitted range (0%-100%, inclusive).
"""
percent = value.split(u'%')[0]
percent = float(percent) if u'.' in percent else int(percent)
return u'0%' if percent < 0 \
else u'100%' if percent > 100 \
else u'%s%%' % percent
def normalize_percent_triplet(rgb_triplet):
"""
Normalize a percentage ``rgb()`` triplet so that all values are
within the range 0%-100% inclusive.
"""
return tuple(_normalize_percent_rgb(value) for value in rgb_triplet)
# Conversions from color names to various formats.
#################################################################
def name_to_hex(name, spec=u'css3'):
"""
Convert a color name to a normalized hexadecimal color value.
The optional keyword argument ``spec`` determines which
specification's list of color names will be used; valid values are
``html4``, ``css2``, ``css21`` and ``css3``, and the default is
``css3``.
When no color of that name exists in the given specification,
``ValueError`` is raised.
"""
if spec not in SUPPORTED_SPECIFICATIONS:
raise ValueError(SPECIFICATION_ERROR_TEMPLATE % spec)
normalized = name.lower()
hex_value = {u'css2': CSS2_NAMES_TO_HEX,
u'css21': CSS21_NAMES_TO_HEX,
u'css3': CSS3_NAMES_TO_HEX,
u'html4': HTML4_NAMES_TO_HEX}[spec].get(normalized)
if hex_value is None:
raise ValueError(
u"'%s' is not defined as a named color in %s." % (name, spec)
)
return hex_value
def name_to_rgb(name, spec=u'css3'):
"""
Convert a color name to a 3-tuple of integers suitable for use in
an ``rgb()`` triplet specifying that color.
"""
return hex_to_rgb(name_to_hex(name, spec=spec))
def name_to_rgb_percent(name, spec=u'css3'):
"""
Convert a color name to a 3-tuple of percentages suitable for use
in an ``rgb()`` triplet specifying that color.
"""
return rgb_to_rgb_percent(name_to_rgb(name, spec=spec))
# Conversions from hexadecimal color values to various formats.
#################################################################
def hex_to_name(hex_value, spec=u'css3'):
"""
Convert a hexadecimal color value to its corresponding normalized
color name, if any such name exists.
The optional keyword argument ``spec`` determines which
specification's list of color names will be used; valid values are
``html4``, ``css2``, ``css21`` and ``css3``, and the default is
``css3``.
When no color name for the value is found in the given
specification, ``ValueError`` is raised.
"""
if spec not in SUPPORTED_SPECIFICATIONS:
raise ValueError(SPECIFICATION_ERROR_TEMPLATE % spec)
normalized = normalize_hex(hex_value)
name = {u'css2': CSS2_HEX_TO_NAMES,
u'css21': CSS21_HEX_TO_NAMES,
u'css3': CSS3_HEX_TO_NAMES,
u'html4': HTML4_HEX_TO_NAMES}[spec].get(normalized)
if name is None:
raise ValueError(
u"'%s' has no defined color name in %s." % (hex_value, spec)
)
return name
def hex_to_rgb(hex_value):
"""
Convert a hexadecimal color value to a 3-tuple of integers
suitable for use in an ``rgb()`` triplet specifying that color.
"""
hex_value = normalize_hex(hex_value)
hex_value = int(hex_value[1:], 16)
return (hex_value >> 16,
hex_value >> 8 & 0xff,
hex_value & 0xff)
def hex_to_rgb_percent(hex_value):
"""
Convert a hexadecimal color value to a 3-tuple of percentages
suitable for use in an ``rgb()`` triplet representing that color.
"""
return rgb_to_rgb_percent(hex_to_rgb(hex_value))
# Conversions from integer rgb() triplets to various formats.
#################################################################
def rgb_to_name(rgb_triplet, spec=u'css3'):
"""
Convert a 3-tuple of integers, suitable for use in an ``rgb()``
color triplet, to its corresponding normalized color name, if any
such name exists.
The optional keyword argument ``spec`` determines which
specification's list of color names will be used; valid values are
``html4``, ``css2``, ``css21`` and ``css3``, and the default is
``css3``.
If there is no matching name, ``ValueError`` is raised.
"""
return hex_to_name(
rgb_to_hex(
normalize_integer_triplet(rgb_triplet)),
spec=spec)
def rgb_to_hex(rgb_triplet):
"""
Convert a 3-tuple of integers, suitable for use in an ``rgb()``
color triplet, to a normalized hexadecimal value for that color.
"""
return u'#%02x%02x%02x' % normalize_integer_triplet(rgb_triplet)
def rgb_to_rgb_percent(rgb_triplet):
"""
Convert a 3-tuple of integers, suitable for use in an ``rgb()``
color triplet, to a 3-tuple of percentages suitable for use in
representing that color.
This function makes some trade-offs in terms of the accuracy of
the final representation; for some common integer values,
special-case logic is used to ensure a precise result (e.g.,
integer 128 will always convert to '50%', integer 32 will always
convert to '12.5%'), but for all other values a standard Python
``float`` is used and rounded to two decimal places, which may
result in a loss of precision for some values.
"""
# In order to maintain precision for common values,
# special-case them.
specials = {255: u'100%', 128: u'50%', 64: u'25%',
32: u'12.5%', 16: u'6.25%', 0: u'0%'}
return tuple(specials.get(d, u'%.02f%%' % (d / 255.0 * 100))
for d in normalize_integer_triplet(rgb_triplet))
# Conversions from percentage rgb() triplets to various formats.
#################################################################
def rgb_percent_to_name(rgb_percent_triplet, spec=u'css3'):
"""
Convert a 3-tuple of percentages, suitable for use in an ``rgb()``
color triplet, to its corresponding normalized color name, if any
such name exists.
The optional keyword argument ``spec`` determines which
specification's list of color names will be used; valid values are
``html4``, ``css2``, ``css21`` and ``css3``, and the default is
``css3``.
If there is no matching name, ``ValueError`` is raised.
"""
return rgb_to_name(
rgb_percent_to_rgb(
normalize_percent_triplet(
rgb_percent_triplet)),
spec=spec)
def rgb_percent_to_hex(rgb_percent_triplet):
"""
Convert a 3-tuple of percentages, suitable for use in an ``rgb()``
color triplet, to a normalized hexadecimal color value for that
color.
"""
return rgb_to_hex(
rgb_percent_to_rgb(
normalize_percent_triplet(rgb_percent_triplet)))
def _percent_to_integer(percent):
"""
Internal helper for converting a percentage value to an integer
between 0 and 255 inclusive.
"""
num = float(percent.split(u'%')[0]) / 100 * 255
e = num - math.floor(num)
return e < 0.5 and int(math.floor(num)) or int(math.ceil(num))
def rgb_percent_to_rgb(rgb_percent_triplet):
"""
Convert a 3-tuple of percentages, suitable for use in an ``rgb()``
color triplet, to a 3-tuple of integers suitable for use in
representing that color.
Some precision may be lost in this conversion. See the note
regarding precision for ``rgb_to_rgb_percent()`` for details.
"""
return tuple(map(_percent_to_integer,
normalize_percent_triplet(rgb_percent_triplet)))
# HTML5 color algorithms.
#################################################################
# These functions are written in a way that may seem strange to
# developers familiar with Python, because they do not use the most
# efficient or idiomatic way of accomplishing their tasks. This is
# because, for compliance, these functions are written as literal
# translations into Python of the algorithms in HTML5.
#
# For ease of understanding, the relevant steps of the algorithm from
# the standard are included as comments interspersed in the
# implementation.
def html5_parse_simple_color(input):
"""
Apply the simple color parsing algorithm from section 2.4.6 of
HTML5.
"""
# 1. Let input be the string being parsed.
#
# 2. If input is not exactly seven characters long, then return an
# error.
if not isinstance(input, unicode) or len(input) != 7:
raise ValueError(
u"An HTML5 simple color must be a Unicode string "
u"exactly seven characters long."
)
# 3. If the first character in input is not a U+0023 NUMBER SIGN
# character (#), then return an error.
if not input.startswith('#'):
raise ValueError(
u"An HTML5 simple color must begin with the "
u"character '#' (U+0023)."
)
# 4. If the last six characters of input are not all ASCII hex
# digits, then return an error.
if not all(c in string.hexdigits for c in input[1:]):
raise ValueError(
u"An HTML5 simple color must contain exactly six ASCII hex digits."
)
# 5. Let result be a simple color.
#
# 6. Interpret the second and third characters as a hexadecimal
# number and let the result be the red component of result.
#
# 7. Interpret the fourth and fifth characters as a hexadecimal
# number and let the result be the green component of result.
#
# 8. Interpret the sixth and seventh characters as a hexadecimal
# number and let the result be the blue component of result.
#
# 9. Return result.
result = (int(input[1:3], 16),
int(input[3:5], 16),
int(input[5:7], 16))
return result
def html5_serialize_simple_color(simple_color):
"""
Apply the serialization algorithm for a simple color from section
2.4.6 of HTML5.
"""
red, green, blue = simple_color
# 1. Let result be a string consisting of a single "#" (U+0023)
# character.
result = u'#'
# 2. Convert the red, green, and blue components in turn to
# two-digit hexadecimal numbers using lowercase ASCII hex
# digits, zero-padding if necessary, and append these numbers
# to result, in the order red, green, blue.
result += (u"%02x" % red).lower()
result += (u"%02x" % green).lower()
result += (u"%02x" % blue).lower()
# 3. Return result, which will be a valid lowercase simple color.
return result
def html5_parse_legacy_color(input):
"""
Apply the legacy color parsing algorithm from section 2.4.6 of
HTML5.
"""
# 1. Let input be the string being parsed.
if not isinstance(input, unicode):
raise ValueError(
u"HTML5 legacy color parsing requires a Unicode string as input."
)
# 2. If input is the empty string, then return an error.
if input == "":
raise ValueError(
u"HTML5 legacy color parsing forbids empty string as a value."
)
# 3. Strip leading and trailing whitespace from input.
input = input.strip()
# 4. If input is an ASCII case-insensitive match for the string
# "transparent", then return an error.
if input.lower() == u"transparent":
raise ValueError(
u'HTML5 legacy color parsing forbids "transparent" as a value.'
)
# 5. If input is an ASCII case-insensitive match for one of the
# keywords listed in the SVG color keywords section of the CSS3
# Color specification, then return the simple color
# corresponding to that keyword.
keyword_hex = CSS3_NAMES_TO_HEX.get(input.lower())
if keyword_hex is not None:
return html5_parse_simple_color(keyword_hex)
# 6. If input is four characters long, and the first character in
# input is a "#" (U+0023) character, and the last three
# characters of input are all ASCII hex digits, then run these
# substeps:
if len(input) == 4 and \
input.startswith(u'#') and \
all(c in string.hexdigits for c in input[1:]):
# 1. Let result be a simple color.
#
# 2. Interpret the second character of input as a hexadecimal
# digit; let the red component of result be the resulting
# number multiplied by 17.
#
# 3. Interpret the third character of input as a hexadecimal
# digit; let the green component of result be the resulting
# number multiplied by 17.
#
# 4. Interpret the fourth character of input as a hexadecimal
# digit; let the blue component of result be the resulting
# number multiplied by 17.
result = (int(input[1], 16) * 17,
int(input[2], 16) * 17,
int(input[3], 16) * 17)
# 5. Return result.
return result
# 7. Replace any characters in input that have a Unicode code
# point greater than U+FFFF (i.e. any characters that are not
# in the basic multilingual plane) with the two-character
# string "00".
# This one's a bit weird due to the existence of multiple internal
# Unicode string representations in different versions and builds
# of Python.
#
# From Python 2.2 through 3.2, Python could be compiled with
# "narrow" or "wide" Unicode strings (see PEP 261). Narrow builds
# handled Unicode strings with two-byte characters and surrogate
# pairs for non-BMP code points. Wide builds handled Unicode
# strings with four-byte characters and no surrogates. This means
# ord() is only sufficient to identify a non-BMP character on a
# wide build.
#
# Starting with Python 3.3, the internal string representation
# (see PEP 393) is now dynamic, and Python chooses an encoding --
# either latin-1, UCS-2 or UCS-4 -- wide enough to handle the
# highest code point in the string.
#
# The code below bypasses all of that for a consistently effective
# method: encode the string to little-endian UTF-32, then perform
# a binary unpack of it as four-byte integers. Those integers will
# be the Unicode code points, and from there filtering out non-BMP
# code points is easy.
encoded_input = input.encode('utf_32_le')
# Format string is '<' (for little-endian byte order), then a
# sequence of 'L' characters (for 4-byte unsigned long integer)
# equal to the length of the original string, which is also
# one-fourth the encoded length. For example, for a six-character
# input the generated format string will be '<LLLLLL'.
format_string = '<' + ('L' * (int(len(encoded_input) / 4)))
codepoints = struct.unpack(format_string, encoded_input)
input = ''.join(u'00' if c > 0xffff
else unichr(c)
for c in codepoints)
# 8. If input is longer than 128 characters, truncate input,
# leaving only the first 128 characters.
if len(input) > 128:
input = input[:128]
# 9. If the first character in input is a "#" (U+0023) character,
# remove it.
if input.startswith(u'#'):
input = input[1:]
# 10. Replace any character in input that is not an ASCII hex
# digit with the character "0" (U+0030).
if any(c for c in input if c not in string.hexdigits):
input = ''.join(c if c in string.hexdigits else u'0' for c in input)
# 11. While input's length is zero or not a multiple of three,
# append a "0" (U+0030) character to input.
while (len(input) == 0) or (len(input) % 3 != 0):
input += u'0'
# 12. Split input into three strings of equal length, to obtain
# three components. Let length be the length of those
# components (one third the length of input).
length = int(len(input) / 3)
red = input[:length]
green = input[length:length*2]
blue = input[length*2:]
# 13. If length is greater than 8, then remove the leading
# length-8 characters in each component, and let length be 8.
if length > 8:
red, green, blue = (red[length-8:],
green[length-8:],
blue[length-8:])
length = 8
# 14. While length is greater than two and the first character in
# each component is a "0" (U+0030) character, remove that
# character and reduce length by one.
while (length > 2) and (red[0] == u'0' and
green[0] == u'0' and
blue[0] == u'0'):
red, green, blue = (red[1:],
green[1:],
blue[1:])
length -= 1
# 15. If length is still greater than two, truncate each
# component, leaving only the first two characters in each.
if length > 2:
red, green, blue = (red[:2],
green[:2],
blue[:2])
# 16. Let result be a simple color.
#
# 17. Interpret the first component as a hexadecimal number; let
# the red component of result be the resulting number.
#
# 18. Interpret the second component as a hexadecimal number; let
# the green component of result be the resulting number.
#
# 19. Interpret the third component as a hexadecimal number; let
# the blue component of result be the resulting number.
result = (int(red, 16),
int(green, 16),
int(blue, 16))
# 20. Return result.
return result
|
gpl-3.0
| -3,413,213,957,325,096,000
| 31.844471
| 97
| 0.610484
| false
| 3.439059
| false
| false
| false
|
vagabondcoder/nm_tools
|
nm_listen.py
|
1
|
8285
|
#!/usr/bin/env python3
""" NetworkManager event listener and calls user scripts
Listens on DBus for NetworkManager events.
When an interface is coming up or coming down, user scripts will be
called.
"""
import dbus
from dbus.mainloop.glib import DBusGMainLoop
import sys
import socket
import struct
import gi.repository.GLib
import daemon
import subprocess
import os
import logging
import pidfile
bin_dir = os.environ['HOME'] + "/.config/nm_listen/" # location of user scripts to be called on NM changes
log_file = "/tmp/nm_listen.log" # logfile location
pid_file = "/tmp/nm_listen.pid" # pid file location
NM_DBUS_SERVICE = "org.freedesktop.NetworkManager"
NM_DBUS_DEVICE = "org.freedesktop.NetworkManager.Device"
NM_DBUS_IP4Config = "org.freedesktop.NetworkManager.IP4Config"
NM_DBUS_INTERFACE = "org.freedesktop.NetworkManager"
NM_DBUS_OPATH = "/org/freedesktop/NetworkManager"
NM_DBUS_SETTINGS = "org.freedesktop.NetworkManager.Settings"
NM_DBUS_SETTINGS_CONN = "org.freedesktop.NetworkManager.Settings.Connection"
DBUS_SERVICE = "org.freedesktop.DBus"
DBUS_PATH = "/org/freedesktop/DBus"
DBUS_PROPS_IFACE = "org.freedesktop.DBus.Properties"
device_states = { 0: "Unknown",
10: "Unmanaged",
20: "Unavailable",
30: "Disconnected",
40: "Prepare",
50: "Config",
60: "Need Auth",
70: "IP Config",
80: "IP Check",
90: "Secondaries",
100: "Activated",
110: "Deactivating",
120: "Failed" }
connectivity_states = { 0: "Unknown",
1: "None",
2: "Portal",
3: "Limited",
4: "Full" }
nm_states = { 0: "Unknown",
10: "Asleep",
20: "Disconnected",
30: "Disconnecting",
40: "Connecting",
50: "Connected local",
60: "Connected site",
70: "Connected global" }
DBusGMainLoop(set_as_default=True)
class utils( object ):
def bytes_to_python( bs ):
return bytes("",'ascii').join(bs).decode('utf-8')
def mac_to_python(mac):
return "%02X:%02X:%02X:%02X:%02X:%02X" % tuple([ord(x) for x in mac])
def addr_to_python(addr):
return socket.inet_ntoa(struct.pack('I', addr))
def mask_to_python(mask):
return int(mask)
def addrconf_to_python(addrconf):
addr, netmask, gateway = addrconf
return [
utils.addr_to_python(addr),
utils.mask_to_python( netmask ),
utils.addr_to_python(gateway) ]
def DeviceAdded( dpath ):
""" Gets called when a device is added.
Just set the callback for state changes
"""
bus = dbus.SystemBus()
dobj = bus.get_object( NM_DBUS_SERVICE, dpath )
iface = dbus.Interface( dobj, dbus_interface=NM_DBUS_DEVICE )
iface.connect_to_signal( "StateChanged", DeviceStateChanged
, sender_keyword='sender'
,interface_keyword='iface'
,path_keyword='path'
,destination_keyword='dest'
,member_keyword='mem' )
dev_props = dbus.Interface( dobj, DBUS_PROPS_IFACE )
dev_type = dev_props.Get( NM_DBUS_DEVICE, "Interface" )
logging.info( "Tracking state changes on : %s " % dev_type )
def NewConnection( cpath ):
"""NM has a new connection. Just keep track of updates
"""
bus = dbus.SystemBus()
conn_obj = bus.get_object( NM_DBUS_SERVICE, cpath )
iface = dbus.Interface( conn_obj, dbus_interface=NM_DBUS_SETTINGS_CONN )
c_settings = iface.GetSettings()
conn = c_settings['connection']
conn_id = conn['id']
logging.info( "Got new connection '%s'" % conn_id )
#if 'autoconnect' in conn:
c_settings[ 'connection' ][ 'autoconnect' ] = False;
logging.info( "Turning off autoconnect for %s" % conn_id )
iface.Update( c_settings )
def DeviceStateChanged(new_state, old_state, reason, sender=None, iface=None, mem=None, path=None, dest=None):
""" The state of one of the devices changed.
If the state is one of the ones we care about, call the user scripts.
If is an interface coming up, add some additional environment
variables.
"""
bus = dbus.SystemBus()
logging.debug( "DeviceStateChanged: '%s' : '%s' : '%s'" %
( device_states[ new_state ]
, device_states[ old_state ]
, reason ) )
try:
dobj = bus.get_object( NM_DBUS_SERVICE, path )
except dbus.exceptions.DBusException as e:
logging.warning( "Error in DeviceStateChanged : ", e )
return
action = None
if new_state == 40: # prepare the connection
action = 'pre-up'
elif new_state == 100: #activated
action = 'up'
elif new_state == 110: #disconnecting
action = 'pre-down'
elif new_state == 30: #disconnected
action = 'down'
if action != None:
# update the environment that will passed into the script
dev_props = dbus.Interface( dobj, DBUS_PROPS_IFACE )
dev_name = dev_props.Get( NM_DBUS_DEVICE, "Interface" )
env = os.environ.copy()
env[ 'DEVICE_IFACE' ] = dev_name
env[ 'DEVICE_IP_IFACE' ] = dev_props.Get( NM_DBUS_DEVICE, "IpInterface" )
if new_state == 100: # activated
ip4ConfigPath = dev_props.Get( NM_DBUS_DEVICE, "Ip4Config" )
ip4Config = bus.get_object( NM_DBUS_SERVICE, ip4ConfigPath )
ip4_props = dbus.Interface( ip4Config, DBUS_PROPS_IFACE )
addrs_dbus = ip4_props.Get( NM_DBUS_IP4Config, "Addresses" )
addrs = [ utils.addrconf_to_python(addr) for addr in addrs_dbus ]
# NM sets these environment variables as well
env[ 'IP4_NUM_ADDRESSES' ] = str( len( addrs ) )
for i in range( 0, len(addrs) ):
a = "%s/%d %s" % ( addrs[i][0], addrs[i][1], addrs[i][2] )
logging.debug( 'Address : %s ' % a )
env[ 'IP4_ADDRESS_%d' % i ] = a
routes = ip4_props.Get( NM_DBUS_IP4Config, 'Routes' )
env[ 'IP4_NUM_ROUTES' ] = str( len( routes ) )
for i in range( 0, len(routes) ):
env[ 'IP4_ROUTE_%d' % i ] = str( routes[i] )
domains_dbus = ip4_props.Get( NM_DBUS_IP4Config, 'Domains' )
domains = [ str(d) for d in domains_dbus ]
env[ 'IP4_DOMAINS' ] = ' '.join(domains)
logging.info( "Action: %s %s" % ( action, dev_name ) )
# actually call the user scripts
files = os.listdir( bin_dir )
files.sort()
for f in files:
full = bin_dir+f
if os.access( full, os.X_OK ):
logging.info( "Running: %s %s %s" % ( full, dev_name, action ) )
subprocess.Popen( [ full, dev_name, action ], env=env )
else:
logging.warning( "Can't execute %s', skipping" % full )
def initialize():
""" Go through the devices and add them so we can listen
for state changes.
"""
try:
logging.info( "Initializing" )
bus = dbus.SystemBus()
nm_obj = bus.get_object(NM_DBUS_SERVICE, NM_DBUS_OPATH)
ds = nm_obj.GetDevices()
for dpath in ds:
DeviceAdded( dpath )
logging.info( "Initialized" )
except dbus.exceptions.DBusException as e:
# this isn't probably a problem. If NM isn't on then
# this exception will trigger. When it comes back
# then DeviceAdded will get called by the signal handler
logging.warn( "Failed to initialize : ", e )
def listen():
"""This just sets up all the callbacks and then
loops on DBus events.
"""
bus = dbus.SystemBus()
bus.add_signal_receiver( DeviceAdded
, dbus_interface=NM_DBUS_INTERFACE
, signal_name="DeviceAdded" )
bus.add_signal_receiver( NewConnection
, dbus_interface=NM_DBUS_SETTINGS
, signal_name="NewConnection" )
initialize()
loop = gi.repository.GLib.MainLoop()
try:
loop.run()
except KeyboardInterrupt:
print( "Keyboard interrupt received...shuting down..." )
loop.quit()
sys.exit(0)
except SystemExit:
logging.info( "Quitting listen.", flush=True )
loop.quit()
sys.exit(0)
except Exception as e:
logging.warning( "Quitting listen.", e, flush=True )
def stop_daemon():
pfile=pidfile.PidFile( pid_file )
pfile.kill()
def restart_daemon():
stop_daemon()
start_daemon()
def start_daemon():
f = open( log_file, "a+" )
context = daemon.DaemonContext( stderr=f, stdout=f, pidfile=pidfile.PidFile( pid_file ) )
try:
context.open()
listen()
except SystemExit as e:
logging.warning( "Quitting : %s " % str( e ), flush=True )
except Exception as e:
logging.warning( "Quitting : %s " % str( e ), flush=True )
context.close()
f.close()
if __name__ == "__main__":
logging.basicConfig( format='%(levelname)s:%(message)s', level=logging.DEBUG )
if len( sys.argv ) > 1 and sys.argv[1] == "-l":
listen()
else:
if len( sys.argv ) > 1:
if sys.argv[1] == "stop":
stop_daemon()
elif sys.argv[1] == "restart":
restart_daemon()
elif sys.argv[1] == "start":
start_daemon()
else:
print( "Usage : %s [-l |start|stop|restart]" % sys.argv[0] )
else:
start_daemon()
|
gpl-2.0
| -1,890,240,044,035,423,000
| 28.275618
| 110
| 0.672179
| false
| 2.846101
| true
| false
| false
|
mensi/pyggi
|
pyggi/lib/filters.py
|
1
|
3924
|
# -*- coding: utf-8 -*-
"""
:copyright: (c) 2011 by Tobias Heinzen
:license: BSD, see LICENSE for more details
"""
import time
html_escape_table = {
"&": "&",
'"': """,
"'": "'",
">": ">",
"<": "<",
}
def force_unicode(txt):
try:
return unicode(txt)
except UnicodeDecodeError:
pass
orig = txt
if type(txt) != str:
txt = str(txt)
for args in [('utf-8',), ('latin1',), ('ascii', 'replace')]:
try:
return txt.decode(*args)
except UnicodeDecodeError:
pass
raise ValueError("Unable to force %s object %r to unicode" % (type(orig).__name__, orig))
def format_datetime(value, format='iso8601'):
# convert format to iso8601 compliant
if format == 'iso8601':
format = "%Y-%m-%d %H:%M:%S"
# convert format to iso8601 compliant (full)
if format == 'iso8601-full':
format = "%a %b %d %H:%M:%S %Z %Y"
# if we have a timestamp, we have to convert it
# to a time_struct
if isinstance(value, int):
from datetime import datetime
value = datetime.fromtimestamp(value).timetuple()
return time.strftime(format, value)
def format_diff(value):
# escape HTML, because format_diff shall be used with 'safe'
value = unicode(value, 'utf-8') # correct?
value = "".join(html_escape_table.get(c,c) for c in value)
if value.startswith("+") and not value.startswith("+++"):
return '<li class="diff-add">%s </li>' % value
elif value.startswith("-") and not value.startswith("---"):
return '<li class="diff-remove">%s </li>' % value
elif value.startswith("@@"):
return '<li class="diff-change">%s </li>' % value
return '<li>%s</li>' % value
def humanize_timesince(when):
import datetime
# convert when to datetime
if type(when) == int:
when = datetime.datetime.utcfromtimestamp(when)
else:
when = datetime.datetime(*when[:6])
now = datetime.datetime.utcnow()
difference = now - when
if difference < datetime.timedelta(minutes=2):
return "%s seconds ago" % difference.seconds
elif difference < datetime.timedelta(hours=2):
return "%s minutes ago" % (difference.seconds / 60)
elif difference < datetime.timedelta(days=2):
return "%s hours ago" % (difference.days * 24 + difference.seconds / 3600)
elif difference < datetime.timedelta(days=2*7):
return "%s days ago" % difference.days
elif difference < datetime.timedelta(days=2*30):
return "%s weeks ago" % (difference.days / 7)
elif difference < datetime.timedelta(days=2*365):
return "%s months ago" % (difference.days / 30)
else:
return "%s years ago" % (difference.days / 365)
def is_text(mimetype):
"""
determine if a mimetype holds printable text (ascii)
"""
# all text documents
if mimetype.startswith("text/"):
return True
# xml/html/xhtml documents
if mimetype.startswith("application/") and \
(mimetype.find("html") != -1 or mimetype.find("xml") != -1):
return True
# javascript documents
if mimetype == "application/javascript":
return True
return False
def first_line(string):
string = string.replace('\r', '\n', 1)
try:
return string[:string.index('\n')]
except ValueError:
return string
def static_url_for(filename):
from flask import url_for, request
from config import config
import urllib
url_base = request.environ.get('wsgiorg.routing_args', ([], {}))[1].get('static_url_base')
if not url_base and config.has_option('general', 'static_url_base'):
url_base = config.get('general', 'static_url_base')
if url_base:
return url_base.rstrip('/') + '/' + urllib.quote(filename)
else:
return url_for('static', filename=filename)
|
bsd-3-clause
| 1,785,850,873,471,814,100
| 29.65625
| 94
| 0.602701
| false
| 3.787645
| false
| false
| false
|
lorensen/VTKExamples
|
src/Python/GeometricObjects/Quad.py
|
1
|
1673
|
#!/usr/bin/env python
import vtk
def main():
colors = vtk.vtkNamedColors()
# Create four points (must be in counter clockwise order)
p0 = [0.0, 0.0, 0.0]
p1 = [1.0, 0.0, 0.0]
p2 = [1.0, 1.0, 0.0]
p3 = [0.0, 1.0, 0.0]
# Add the points to a vtkPoints object
points = vtk.vtkPoints()
points.InsertNextPoint(p0)
points.InsertNextPoint(p1)
points.InsertNextPoint(p2)
points.InsertNextPoint(p3)
# Create a quad on the four points
quad = vtk.vtkQuad()
quad.GetPointIds().SetId(0, 0)
quad.GetPointIds().SetId(1, 1)
quad.GetPointIds().SetId(2, 2)
quad.GetPointIds().SetId(3, 3)
# Create a cell array to store the quad in
quads = vtk.vtkCellArray()
quads.InsertNextCell(quad)
# Create a polydata to store everything in
polydata = vtk.vtkPolyData()
# Add the points and quads to the dataset
polydata.SetPoints(points)
polydata.SetPolys(quads)
# Setup actor and mapper
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputData(polydata)
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetColor(colors.GetColor3d("Silver"))
# Setup render window, renderer, and interactor
renderer = vtk.vtkRenderer()
renderWindow = vtk.vtkRenderWindow()
renderWindow.SetWindowName("Quad")
renderWindow.AddRenderer(renderer)
renderWindowInteractor = vtk.vtkRenderWindowInteractor()
renderWindowInteractor.SetRenderWindow(renderWindow)
renderer.AddActor(actor)
renderer.SetBackground(colors.GetColor3d("Salmon"))
renderWindow.Render()
renderWindowInteractor.Start()
if __name__ == '__main__':
main()
|
apache-2.0
| -8,918,097,045,932,373,000
| 25.983871
| 61
| 0.676031
| false
| 3.319444
| false
| false
| false
|
pacoqueen/bbinn
|
formularios/resultados_cemento.py
|
1
|
30261
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright (C) 2005-2008 Francisco José Rodríguez Bogado, #
# Diego Muñoz Escalante. #
# (pacoqueen@users.sourceforge.net, escalant3@users.sourceforge.net) #
# #
# This file is part of GeotexInn. #
# #
# GeotexInn is free software; you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation; either version 2 of the License, or #
# (at your option) any later version. #
# #
# GeotexInn is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with GeotexInn; if not, write to the Free Software #
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA #
###############################################################################
###################################################################
## resultados_fibra.py - Resultados de pruebas de fibra.
###################################################################
## NOTAS:
##
###################################################################
## Changelog:
## 18 de mayo de 2006 -> Inicio
## 19 de mayo de 2006 -> Testing
##
###################################################################
## FIXME:
## - Al salir con el evento destroy (bolaspa) pregunta dos veces si
## quiere salir y la segunda vez ignora la respuesta.
##
###################################################################
## NOTAS: Se reusa la misma ventana (glade) de resultados de fibra,
## todo lo relacionado con rizo es humedad en la fibra de cemento.
###################################################################
from ventana import Ventana
import utils
import pygtk
pygtk.require('2.0')
import gtk, gtk.glade, time, sqlobject
try:
import pclases
except ImportError:
import sys
from os.path import join as pathjoin; sys.path.append(pathjoin("..", "framework"))
import pclases
try:
import geninformes
except ImportError:
import sys
sys.path.append('../informes')
import geninformes
from utils import _float as float
from resultados_fibra import comprobar_y_preguntar_si_guardar
class ResultadosFibra(Ventana):
def __init__(self, objeto = None, usuario = None):
"""
Constructor. objeto puede ser un objeto de pclases con el que
comenzar la ventana (en lugar del primero de la tabla, que es
el que se muestra por defecto).
"""
self.usuario = usuario
Ventana.__init__(self, 'resultados_fibra.glade', objeto)
connections = {'b_salir/clicked': self._salir,
'b_lote/clicked': self.set_loteCem,
'b_fecha/clicked': self.fecha,
'b_add/clicked': self.add,
'b_drop/clicked': self.drop,
'sp_tolerancia/value-changed': self.cambiar_tolerancia,
'b_guardar_obs/clicked': self.guardar_obs,
'b_imprimir/clicked': self.imprimir,
'ventana/delete_event': self._salir
}
self.add_connections(connections)
self.activar_widgets(False)
self.inicializar_ventana()
if objeto == None:
self.loteCem = None
else:
self.loteCem = objeto
self.actualizar_ventana()
gtk.main()
def _salir(self, *args, **kw):
"""
Si hay cambios pendientes en observaciones, pregunta.
Después llama a la función salir heredada.
"""
comprobar_y_preguntar_si_guardar(self)
self.salir(*args, **kw)
# --------------- Funciones auxiliares ------------------------------
def activar_widgets(self, valor):
self.ws = ('e_codigo',
'e_nombre',
'e_dtex',
'e_corte',
'e_color',
'e_tenacidad',
'e_elongacion',
'e_rizo',
'e_grasa',
'e_encogimiento',
'tv_pruebas',
'b_add',
'b_drop',
'b_fecha',
'e_media',
'e_desvtipica',
'sp_tolerancia',
'txt_observaciones',
'frame4')
for i in self.ws:
self.wids[i].set_sensitive(valor)
if self.usuario:
try:
ventana = pclases.Ventana.select(pclases.Ventana.q.fichero == "resultados_cemento.py")[0] # OJO: HARCODED
except IndexError:
txt = "resultados_fibra::activar_widgets -> Ventana no encontrada en BD."
self.logger.error(txt)
print txt
else:
permiso = self.usuario.get_permiso(ventana)
if not permiso.escritura and self.usuario.nivel > 1:
self.wids['tv_pruebas'].set_sensitive(False)
self.wids['txt_observaciones'].set_sensitive(False)
if not permiso.nuevo and self.usuario.nivel > 1:
self.wids['b_add'].set_sensitive(False)
def crear_listview(self, tv):
cols = (('Fecha', 'gobject.TYPE_STRING', True, True, True, self.cambiar_fecha),
('Título (DTEX)', 'gobject.TYPE_STRING', True, True, False, self.cambiar_titulo),
('Alargamiento (%)', 'gobject.TYPE_STRING', True, True, False, self.cambiar_alargamiento),
('Tenacidad (cN/tex)', 'gobject.TYPE_STRING', True, True, False, self.cambiar_tenacidad),
('Grasa (%)', 'gobject.TYPE_STRING', True, True, False, self.cambiar_grasa),
('Encogimiento (%)', 'gobject.TYPE_STRING', True, True, False, self.cambiar_encogimiento),
('Humedad (%)', 'gobject.TYPE_STRING', True, True, False, self.cambiar_humedad),
('ID', 'gobject.TYPE_STRING', False, False, False, None)) # Contiene los ID de los resultados separados por ','
utils.preparar_listview(tv, cols)
tv.get_column(1).get_cell_renderers()[0].set_property('xalign', 0.1)
tv.get_column(2).get_cell_renderers()[0].set_property('xalign', 0.1)
tv.get_column(3).get_cell_renderers()[0].set_property('xalign', 0.1)
tv.get_column(4).get_cell_renderers()[0].set_property('xalign', 0.1)
tv.get_column(5).get_cell_renderers()[0].set_property('xalign', 0.1)
tv.get_column(6).get_cell_renderers()[0].set_property('xalign', 0.1)
def inicializar_ventana(self):
"""
Inicializa los widgets de la ventana.
"""
self.crear_listview(self.wids['tv_pruebas'])
self.wids['b_fecha'].set_property("visible", False)
self.wids['l_rizo'].set_label("Humedad: ")
self.wids['txt_observaciones'].get_buffer().connect("changed", lambda txtbuffer: self.wids['b_guardar_obs'].set_sensitive(True))
def func_sort(self, t1, t2):
if t1[0] < t2[0]:
return -1
elif t1[0] > t2[0]:
return 1
else:
return 0
def preparar_pruebas(self):
"""
Devuelve una lista de listas que contiene las pruebas ordenadas del
loteCem por fecha de la forma: [(fecha, prueba título, ..., "id0,id1,...id5")]
"""
res = []
for p in self.loteCem.pruebasTitulo:
res.append([p.fecha, p.resultado, None, None, None, None, None, [p.id, 0, 0, 0, 0, 0]])
for p in self.loteCem.pruebasElongacion:
puesto = False
for fila in res:
if p.fecha == fila[0] and fila[2] == None: # Hay hueco en la fecha
fila[2] = p.resultado
fila[-1][1] = p.id
puesto = True
break
if not puesto:
res.append([p.fecha, None, p.resultado, None, None, None, None, [0, p.id, 0, 0, 0, 0]])
for p in self.loteCem.pruebasTenacidad:
puesto = False
for fila in res:
if p.fecha == fila[0] and fila[3] == None: # Hay hueco en la fecha
fila[3] = p.resultado
fila[-1][2] = p.id
puesto = True
break
if not puesto:
res.append([p.fecha, None, None, p.resultado, None, None, None, [0, 0, p.id, 0, 0, 0]])
for p in self.loteCem.pruebasGrasa:
puesto = False
for fila in res:
if p.fecha == fila[0] and fila[4] == None: # Hay hueco en la fecha
fila[4] = p.resultado
fila[-1][3] = p.id
puesto = True
break
if not puesto:
res.append([p.fecha, None, None, None, p.resultado, None, None, [0, 0, 0, p.id, 0, 0]])
for p in self.loteCem.pruebasEncogimiento:
puesto = False
for fila in res:
if p.fecha == fila[0] and fila[5] == None: # Hay hueco en la fecha
fila[5] = p.resultado
fila[-1][4] = p.id
puesto = True
break
if not puesto:
res.append([p.fecha, None, None, None, None, p.resultado, None, [0, 0, 0, 0, p.id, 0]])
for p in self.loteCem.pruebasHumedad:
puesto = False
for fila in res:
if p.fecha == fila[0] and fila[6] == None: # Hay hueco en la fecha
fila[6] = p.resultado
fila[-1][5] = p.id
puesto = True
break
if not puesto:
res.append([p.fecha, None, None, None, None, None, p.resultado, [0, 0, 0, 0, 0, p.id]])
res.sort(self.func_sort)
res = [(utils.str_fecha(f[0]), \
f[1] and "%.2f" % f[1] or "", \
f[2] and "%.2f" % f[2] or "", \
f[3] and "%.2f" % f[3] or "", \
f[4] and "%.2f" % f[4] or "", \
f[5] and "%.2f" % f[5] or "", \
f[6] and "%.2f" % f[6] or "", \
','.join(map(str, f[7]))) for f in res]
return res
def rellenar_pruebas(self):
"""
Introduce en el treeview las pruebas del loteCem seleccionado y
recalcula la característica del loteCem.
"""
model = self.wids['tv_pruebas'].get_model()
model.clear()
self.calcular_caracteristicas()
pruebas = self.preparar_pruebas()
for prueba in pruebas:
model.append(prueba)
def calcular_caracteristicas(self):
"""
Calcula la media, desviación típica y marca los valores según tolerancia.
"""
loteCem = self.loteCem
# La tolerancia depende del tipo de producto:
try:
dtex = loteCem.bigbags[0].articulos[0].productoVenta.camposEspecificosBala.dtex
except:
utils.dialogo_info(titulo = 'ERROR',
texto = 'Ocurrió un error buscando el tipo de fibra.',
padre = self.wids['ventana'])
return
mediatitulo = 0
sumatorio = 0
desvtipica = 0
for p in loteCem.pruebasTitulo:
mediatitulo += p.resultado
sumatorio += p.resultado**2.0
try:
mediatitulo /= len(loteCem.pruebasTitulo)
desvtipica = sumatorio / len(loteCem.pruebasTitulo)
desvtipica -= mediatitulo**2.0
desvtipica = desvtipica**0.5 # ValueError cuando intente hacer raíz de número negativo. No debería ocurrir.
except ZeroDivisionError:
mediatitulo = 0
desvtipica = 0
loteCem.mediatitulo = mediatitulo
self.wids['e_desvtipica'].set_text("%.2f" % desvtipica)
self.marcar_tolerancia(dtex, mediatitulo, loteCem.tolerancia)
self.calcular_caracteristicas_propias()
self.rellenar_info_loteCem()
def calcular_elongacion(self):
"""
Calcula la media de los valores de y elongación.
"""
loteCem = self.loteCem
loteCem.update_valor("elongacion")
def calcular_tenacidad(self):
loteCem = self.loteCem
loteCem.update_valor("tenacidad")
def calcular_grasa(self):
loteCem = self.loteCem
# La elongación depende del tipo de producto:
loteCem.update_valor("grasa")
def calcular_encogimiento(self):
loteCem = self.loteCem
loteCem.update_valor("encogimiento")
def calcular_humedad(self):
loteCem = self.loteCem
loteCem.update_valor("humedad")
def calcular_caracteristicas_propias(self):
self.calcular_elongacion()
self.calcular_tenacidad()
self.calcular_grasa()
self.calcular_encogimiento()
self.calcular_humedad()
self.rellenar_info_loteCem()
def marcar_tolerancia(self, dtex, mediatitulo, tolerancia):
self.wids['ruler'].set_sensitive(False)
diferencia = abs(mediatitulo - dtex)
try:
porcentaje = (diferencia * 100) / dtex # En formato 0 a 100 porque las posiciones del ruler son de -100 a 100
except ZeroDivisionError: # El DTEX del artículo es 0.
porcentaje = 0.0
if mediatitulo < dtex:
porcentaje *= -1
self.wids['ruler'].set_property('position', porcentaje)
difmax = dtex * tolerancia
if round(diferencia,2) > difmax:
self.wids['e_media'].modify_base(gtk.STATE_NORMAL, self.wids['e_media'].get_colormap().alloc_color("red"))
else:
self.wids['e_media'].modify_base(gtk.STATE_NORMAL, self.wids['e_media'].get_colormap().alloc_color("green"))
self.colorear(self.wids['tv_pruebas'], dtex, difmax)
def colorear(self, tv, dtex, diferencia):
"""
diferencia es la diferencia máxima en valor absoluto que debe
haber entre el resultado y el título del artículo.
"""
def cell_func(col, cell, model, itr, (dtex, dif)):
resultado = model[itr][1].replace(" ", "")
if resultado != "":
resultado = float(resultado)
if round(abs(resultado-dtex),2) > dif:
color = "red"
else:
color = "green"
cell.set_property("text", "%.2f" % resultado)
else:
color = "white"
cell.set_property("text", "")
cell.set_property("cell-background", color)
cols = tv.get_columns()
col = cols[1]
cells = col.get_cell_renderers()
for cell in cells:
col.set_cell_data_func(cell, cell_func, (dtex, diferencia))
def actualizar_ventana(self):
"""
Método que sobreescribe el "actualizar_ventana" que hereda de la clase ventana.
PRECONDICION: self.loteCem no puede ser None
"""
try:
self.loteCem.sync()
self.rellenar_widgets()
except sqlobject.SQLObjectNotFound:
utils.dialogo_info(titulo = 'REGISTRO ELIMINADO',
texto = 'El registro ha sido borrado desde otro puesto.',
padre = self.wids['ventana'])
self.loteCem = None
self.activar_widgets(self.loteCem!=None)
# --------------- Manejadores de eventos ----------------------------
def guardar_obs(self, boton):
"""
Guarda el contenido del TextView en el atributo observaciones.
"""
if self.objeto != None:
buffer = self.wids['txt_observaciones'].get_buffer()
self.objeto.observaciones = buffer.get_text(buffer.get_start_iter(), buffer.get_end_iter())
self.wids['b_guardar_obs'].set_sensitive(False)
def add(self, w):
if self.loteCem != None:
model = self.wids['tv_pruebas'].get_model()
model.append((utils.str_fecha(time.localtime()),
"", "", "", "", "", "", "0,0,0,0,0,0"))
else:
print "WARNING: Se ha intentano añadir una prueba con loteCem = None"
def drop(self, w):
"""
Borra una línea completa de resultados.
"""
model, iter = self.wids['tv_pruebas'].get_selection().get_selected()
if iter != None and utils.dialogo(titulo = 'BORRAR PRUEBA', texto = '¿Está seguro?', padre = self.wids['ventana']):
ids = map(int, model[iter][-1].split(','))
for columnaid in range(len(ids)):
id = ids[columnaid]
if id != 0:
clase = self.get_clase(columnaid+1)
prueba = clase.get(id)
prueba.destroySelf()
self.rellenar_pruebas()
def set_loteCem(self, w):
comprobar_y_preguntar_si_guardar(self)
codlote = utils.dialogo_entrada(titulo = 'Nº LOTE',
texto = 'Introduzca número o código de lote de fibra '
'de cemento:',
padre = self.wids['ventana'])
if codlote != None:
numlote = utils.parse_numero(codlote)
loteCems = pclases.LoteCem.select(pclases.OR(
pclases.LoteCem.q.numlote == numlote,
pclases.LoteCem.q.codigo.contains(codlote)))
if loteCems.count() == 0:
utils.dialogo_info(titulo = 'LOTE NO ENCONTRADO',
texto = 'No se encontró ningún lote de fibra de cemento'
' %s.' % (codlote),
padre = self.wids['ventana'])
return
elif loteCems.count() > 1:
filas = [(l.id, l.numlote, l.codigo, l.tenacidad,
l.elongacion, l.humedad, l.encogimiento)
for l in loteCems]
idloteCem = utils.dialogo_resultado(filas,
titulo = 'SELECCIONE LOTE',
cabeceras = ('ID', 'Número', 'Código', 'Tenacidad',
'Elongación', 'Humedad', 'Encogimiento'),
padre = self.wids['ventana'])
if idloteCem < 0:
return
loteCem = pclases.LoteCem.get(idloteCem)
else:
loteCem = loteCems[0]
if len(loteCem.bigbags) == 0:
utils.dialogo_info(titulo = 'LOTE VACÍO',
texto = 'El lote de cemento no contiene bigbags, no '
'puede\nrealizar pruebas sobre un lote vacío.',
padre = self.wids['ventana'])
self.loteCem = None
return
self.loteCem = loteCem
self.actualizar_ventana()
def rellenar_widgets(self):
self.objeto = self.loteCem
self.activar_widgets(self.loteCem != None)
if self.loteCem != None:
self.rellenar_info_loteCem()
self.rellenar_pruebas()
self.rellenar_observaciones()
def rellenar_observaciones(self):
"""
Introduce las observaciones de la partida en el TextView.
"""
self.wids['txt_observaciones'].get_buffer().set_text(self.objeto.observaciones)
self.wids['b_guardar_obs'].set_sensitive(False)
def rellenar_info_loteCem(self):
"""
PRECONDICIÓN: self.loteCem != None y len(self.loteCem.bigbags) > 0
"""
loteCem = self.loteCem
self.wids['e_codigo'].set_text("%d (%s)" % (loteCem.numlote, loteCem.codigo))
self.wids['e_nombre'].set_text(loteCem.bigbags[0].articulos[0].productoVenta.nombre)
self.wids['e_dtex'].set_text("%.1f DTEX" % (loteCem.bigbags[0].articulos[0].productoVenta.camposEspecificosBala.dtex))
self.wids['e_corte'].set_text(`loteCem.bigbags[0].articulos[0].productoVenta.camposEspecificosBala.corte`)
self.wids['e_color'].set_text(loteCem.bigbags[0].articulos[0].productoVenta.camposEspecificosBala.color or '')
self.wids['e_tenacidad'].set_text(loteCem.tenacidad == None and "-" or utils.float2str(loteCem.tenacidad))
self.wids['e_elongacion'].set_text(loteCem.elongacion == None and "-" or utils.float2str(loteCem.elongacion))
self.wids['e_rizo'].set_text(loteCem.humedad == None and "-" or utils.float2str(loteCem.humedad))
self.wids['e_encogimiento'].set_text(loteCem.encogimiento == None and "-" or utils.float2str(loteCem.encogimiento))
self.wids['e_grasa'].set_text(loteCem.grasa == None and "-" or utils.float2str(loteCem.grasa))
self.wids['e_media'].set_text(loteCem.mediatitulo == None and "-" or "%.2f DTEX" % (loteCem.mediatitulo))
try:
self.wids['sp_tolerancia'].set_value(loteCem.tolerancia*100.0)
except:
self.wids['sp_tolerancia'].set_value(20)
loteCem.tolerancia = 0.2
def fecha(self, w):
self.wids['e_fecha'].set_text(utils.str_fecha(utils.mostrar_calendario(fecha_defecto = self.objeto and self.objeto.fecha or None, padre = self.wids['ventana'])))
def cambiar_fecha(self, cell, path, texto):
try:
fecha = time.strptime(texto, '%d/%m/%Y')
except:
utils.dialogo_info('FECHA INCORRECTA',
'La fecha introducida (%s) no es correcta.' % (texto),
padre = self.wids['ventana'])
return
model = self.wids['tv_pruebas'].get_model()
model[path][0] = utils.str_fecha(fecha)
ids = map(int, model[path][-1].split(','))
for col in xrange(6):
if ids[col] != 0:
clase = self.get_clase(col+1)
prueba = clase.get(ids[col])
prueba.fecha = fecha
def get_clase(self, columna):
if columna == 1:
clase = pclases.PruebaTitulo
elif columna == 2:
clase = pclases.PruebaElongacion
elif columna == 3:
clase = pclases.PruebaTenacidad
elif columna == 4:
clase = pclases.PruebaGrasa
elif columna == 5:
clase = pclases.PruebaEncogimiento
elif columna == 6:
clase = pclases.PruebaHumedad
else:
print "WARNING: resultados_fibra.py: No debería entrar aquí."
clase = None
return clase
def cambiar_resultado(self, tv, path, texto, columna):
texto = texto.replace(" ", "")
if texto != "":
try:
resultado = utils._float(texto)
except:
utils.dialogo_info('RESULTADO INCORRECTO',
'El número tecleado (%s) no es correcto.' % (texto),
padre = self.wids['ventana'])
return
clase = self.get_clase(columna)
columnaid = columna-1 # Porque en los IDS empieza por 0
if clase != None:
model = self.wids['tv_pruebas'].get_model()
ids = map(int, model[path][-1].split(','))
id = ids[columnaid]
if id == 0:
if texto != "":
fecha = time.strptime(model[path][0], '%d/%m/%Y')
try:
prueba = clase(fecha = fecha,
resultado = resultado,
loteCem = self.loteCem,
lote = None)
except TypeError: # Es prueba de Humedad, no lleva relación con lote de fibra:
prueba = clase(fecha = fecha,
resultado = resultado,
loteCem = self.loteCem)
ids[columnaid] = prueba.id
model[path][-1] = ','.join(map(str, ids))
model[path][columna] = "%.2f" % resultado
else:
prueba = clase.get(int(id))
if texto == "":
try:
prueba.destroySelf()
except:
utils.dialogo_info(titulo = "ERROR",
texto = "El resultado no se pudo eliminar.",
padre = self.wids['ventana'])
return
model[path][columna] = ""
ids[columnaid] = 0
model[path][-1] = ','.join(map(str, ids))
self.rellenar_pruebas() # Prefiero esto a comprobar si la fila se ha quedado vacía, etc...
else:
prueba.resultado = resultado
if columna != 6:
model[path][columna] = "%.2f" % resultado
else:
model[path][columna] = "%d" % resultado
self.calcular_caracteristicas()
# print model[path][-1]
# self.rellenar_pruebas()
def cambiar_titulo(self, tv ,path, texto):
self.cambiar_resultado(tv, path, texto, 1)
def cambiar_alargamiento(self, tv ,path, texto):
self.cambiar_resultado(tv, path, texto, 2)
def cambiar_tenacidad(self, tv ,path, texto):
self.cambiar_resultado(tv, path, texto, 3)
def cambiar_grasa(self, tv ,path, texto):
self.cambiar_resultado(tv, path, texto, 4)
def cambiar_encogimiento(self, tv ,path, texto):
self.cambiar_resultado(tv, path, texto, 5)
def cambiar_humedad(self, tv ,path, texto):
self.cambiar_resultado(tv, path, texto, 6)
def cambiar_tolerancia(self, sp):
loteCem = self.loteCem
try:
loteCem.tolerancia = float(sp.get_value()) / 100.0
self.calcular_caracteristicas()
except ValueError:
utils.dialogo_info(titulo = 'VALOR INCORRECTO',
texto = 'El valor %s no es correcto.' % (sp.get_value()),
padre = self.wids['ventana'])
def imprimir(self, boton):
"""
Imprime la información en pantalla.
"""
import informes, geninformes
txt = "LOTE: %s\n" % (self.wids['e_codigo'].get_text())
txt += "PRODUCTO: %s\n\n" % (self.wids['e_nombre'].get_text())
txt += "\nCaracterísticas del lote:\n"
txt += " DTEX: %s\n" % (self.wids['e_dtex'].get_text())
txt += " Tenacidad: %s\n" % (self.wids['e_tenacidad'].get_text())
txt += " Alargamiento: %s\n" % (self.wids['e_elongacion'].get_text())
txt += " Corte: %s\n" % (self.wids['e_corte'].get_text())
txt += " Grasa: %s\n" % (self.wids['e_grasa'].get_text())
txt += " Encogimiento: %s\n" % (self.wids['e_encogimiento'].get_text())
txt += " Color: %s\n" % (self.wids['e_color'].get_text())
txt += " Humedad: %s\n" % (self.wids['e_rizo'].get_text())
loteCem = self.loteCem
try:
dtex = loteCem.bigbags[0].articulos[0].productoVenta.camposEspecificosBala.dtex
tolerancia = loteCem.tolerancia
mediatitulo = loteCem.mediatitulo
except:
utils.dialogo_info(titulo = 'ERROR',
texto = 'Ocurrió un error buscando el tipo de fibra.',
padre = self.wids['ventana'])
dtex = 0
tolerancia = 0
mediatitulo = 0
difmax = dtex * tolerancia
diferencia = abs(mediatitulo - dtex)
if round(diferencia, 2) > difmax:
ok = False
else:
ok = True
txt += " Media de título: %s (%s)\n" % (self.wids['e_media'].get_text(),
ok and "dentro del %s%% de tolerancia" % utils.float2str(self.wids['sp_tolerancia'].get_value(), 0)
or "no cumple el %s%% de tolerancia" % utils.float2str(self.wids['sp_tolerancia'].get_value(), 0)
)
txt += " Desviación típica: %s\n" % (self.wids['e_desvtipica'].get_text())
txt += "\nResultados de las pruebas:\n"
model = self.wids['tv_pruebas'].get_model()
for fila in model:
txt += " %s\n" % (fila[0])
txt += " Título (dtex): %s\n" % (fila[1])
txt += " Alargamiento (%%): %s\n" % (fila[2])
txt += " Tenacidad (cN/tex): %s\n" % (fila[3])
txt += " Grasa (%%): %s\n" % (fila[4])
txt += " Encogimiento (%%): %s\n" % (fila[5])
txt += " Humedad (%%): %s\n" % (fila[6])
buffer = self.wids['txt_observaciones'].get_buffer()
txt += "\nObervaciones: %s\n" % buffer.get_text(buffer.get_start_iter(), buffer.get_end_iter())
informes.abrir_pdf(geninformes.texto_libre(txt, "Resultados de laboratorio: %s" % (self.objeto and self.objeto.codigo or "")))
if __name__=='__main__':
a = ResultadosFibra()
|
gpl-2.0
| 6,911,447,172,993,764,000
| 44.227545
| 169
| 0.505197
| false
| 3.312719
| false
| false
| false
|
kevana/corpscores
|
dci_notify/scraper/scraper.py
|
1
|
6017
|
#!/Users/kevanahlquist/Dropbox/dev/dci_notify/env/bin/python
'''
Monitor the dci.org website for new score postings.
'''
from __future__ import print_function
#Initialize Sentry before others, requires SENTRY_DSN environment variable
from raven import Client
client = Client()
# Imports
from bs4 import BeautifulSoup
from datetime import datetime
from email.mime.text import MIMEText
from requests.exceptions import ConnectionError
from socket import error as SocketError
import json
import os
import requests
import smtplib
import time
# Config directives
MAIL_SERVER = 'smtp.mailgun.org'
MAIL_PORT = 465
MAIL_USE_TLS = False
MAIL_USE_SSL = True
MAIL_USERNAME = os.environ.get('MAIL_USERNAME', 'postmaster@example.com')
MAIL_PASSWORD = os.environ.get('MAIL_PASSWORD', 'example_password')
MAIL_DEFAULT_SENDER = os.environ.get('MAIL_DEFAULT_SENDER', 'sms@example.com')
MAIL_SUPPRESS_SEND = os.environ.get('MAIL_SUPPRESS_SEND', False)
APP_SUPPRESS_POST = os.environ.get('APP_SUPPRESS_POST', False)
API_POST_URL = os.environ.get('API_POST_URL', 'http://example.com/events/') # 'http://corpscores.herokuapp.com/events/'
RECIPIENT = 'admin@example.com' # Emails message before sending to SMS.
# May be able to ignore basedir, make file wherever script is running
basedir = os.path.abspath(os.path.dirname(__file__))
OUTFILE = os.path.join(basedir, 'lastscrape.txt')
API_KEY = os.environ.get('API_KEY', 'API_KEY')
# JSONify dates in ISO 8601 format
dthandler = lambda obj: (
obj.isoformat()
if isinstance(obj, datetime)
else json.JSONEncoder().default(obj))
def eqIn(item, iterable):
'''Quick in operator to test for equality instead of identity'''
for thing in iterable:
if item == thing:
return True
return False
def send_email(text):
'''Send the raw event to an admin.'''
msg = MIMEText(text)
msg['Subject'] = 'New Event posted on DCI.org'
msg['From'] = MAIL_DEFAULT_SENDER
msg['To'] = RECIPIENT
if not MAIL_SUPPRESS_SEND:
s = smtplib.SMTP(MAIL_SERVER)
s.login(MAIL_USERNAME, MAIL_PASSWORD)
s.sendmail(MAIL_DEFAULT_SENDER, [RECIPIENT], msg.as_string())
def post_to_app(text):
"""Post event to app, text is a string containing a json object."""
headers = {'Content-type': 'application/json',
'Accept': 'application/json'}
r = requests.post(API_POST_URL, data=text, headers=headers)
if r.status_code != 200:
raise IOError('Unable to post event to app: %s' % text)
def process_event(event):
'''Retrieve, parse, and send the scores for the given event UUID.'''
params = {'event': event}
try:
r = requests.get('http://www.dci.org/scores/index.cfm', params=params)
except (SocketError, ConnectionError):
return
if r.status_code != 200:
return
# Get coarse info out of page
soup = BeautifulSoup(r.text)
scoresTable = (soup.find_all('table')[5].
find_all('table')[1])
infoHeader = (soup.find_all('table')[5].
find('h3'))
infoList = list(infoHeader.strings)
# Build a new event structure
thisEvent = {}
thisEvent['date'] = datetime.strptime(infoList[0], '%A, %B %d, %Y')
thisEvent['name'] = infoList[2]
loc = infoList[1].rsplit(' ', 1)
thisEvent['city'] = loc[0].rstrip(',\n\r\t ')
thisEvent['state'] = loc[1]
# Parse scores
rows = scoresTable.findChildren('tr')[2:-2]
eventResults = []
for row in rows:
columns = row.findChildren('td')
cleanColumns = [col.text.strip() for col in columns]
if len(cleanColumns) < 3:
break # Some events have Exhibition/International class labels
result = {}
result['place'] = cleanColumns[0]
result['corps'] = cleanColumns[1]
result['score'] = cleanColumns[2]
eventResults.append(result)
thisEvent['results'] = eventResults
thisEvent['api_key'] = API_KEY
event_text = json.dumps(thisEvent,
sort_keys=True,
indent=2,
default=dthandler)
#send_email(event_text)
add_processed_event(event)
if not APP_SUPPRESS_POST:
post_to_app(event_text)
def set_processed_events(events):
'''Write all processed events out to persistent storage.'''
with open(OUTFILE, 'w') as f:
f.writelines('%s\n' % event for event in events)
def get_processed_events():
'''Retrieve all processed events from persistent storage.'''
try:
with open(OUTFILE, 'r') as f:
ret = f.readlines()
ret = [item.strip() for item in ret]
except IOError:
ret = []
return ret
def add_processed_event(event):
'''Add a single new event to the processed events collection.'''
events = get_processed_events()
if event not in events:
events += event
set_processed_events(events)
def scrape_func():
'''Entry method when script is run.
Download scores page to obtain list of event UUIDs, compare to previously
processed events, process any new events.
'''
try:
# Base /scores URL redirects to the most recent score data
r = requests.get('http://www.dci.org/scores', allow_redirects=True)
except (SocketError, ConnectionError):
return
if r.status_code != 200:
return
soup = BeautifulSoup(r.text)
try:
options = soup.find('select').findChildren()
except AttributeError:
return None
current_events = [opt['value'] for opt in options]
last_processed_events = get_processed_events()
diff = [item for item in current_events if not eqIn(item,
last_processed_events)]
if diff:
for event in diff:
process_event(event)
if __name__ == '__main__':
while True:
try:
scrape_func()
except Exception as e:
print(e)
time.sleep(60)
|
bsd-3-clause
| 4,312,084,939,726,736,000
| 30.502618
| 119
| 0.633206
| false
| 3.705049
| false
| false
| false
|
OscarES/Differential-Algebra-Tracker
|
diffWithNpyAndDstFiles.py
|
1
|
2191
|
import numpy as np
from IOHandler import loadMultipart
##### Diff between particle data, data should be stored as x column xp column y column yp
firstdata = raw_input('Enter first multipart datafile name:')
seconddata = raw_input('Enter second multipart datafile name:')
firstmultipart = loadMultipart(firstdata)
secondmultipart = loadMultipart(seconddata)
xf = [firstmultipart[i][0][0] for i in xrange(len(firstmultipart))]
xpf = [firstmultipart[i][0][1] for i in xrange(len(firstmultipart))]
yf = [firstmultipart[i][0][2] for i in xrange(len(firstmultipart))]
ypf = [firstmultipart[i][0][3] for i in xrange(len(firstmultipart))]
zf = [firstmultipart[i][0][4] for i in xrange(len(firstmultipart))]
zpf = [firstmultipart[i][0][5] for i in xrange(len(firstmultipart))]
xs = [secondmultipart[i][0][0] for i in xrange(len(secondmultipart))]
xps = [secondmultipart[i][0][1] for i in xrange(len(secondmultipart))]
ys = [secondmultipart[i][0][2] for i in xrange(len(secondmultipart))]
yps = [secondmultipart[i][0][3] for i in xrange(len(secondmultipart))]
zs = [secondmultipart[i][0][4] for i in xrange(len(secondmultipart))]
zps = [secondmultipart[i][0][5] for i in xrange(len(secondmultipart))]
diffx = np.array(xf) - np.array(xs)
diffxp = np.array(xpf) - np.array(xps)
diffy = np.array(yf) - np.array(ys)
diffyp = np.array(ypf) - np.array(yps)
diffz = np.array(zf) - np.array(zs)
diffzp = np.array(zpf) - np.array(zps)
diffx = diffx.astype('float')
diffxp = diffxp.astype('float')
diffy = diffy.astype('float')
diffyp = diffyp.astype('float')
diffz = diffz.astype('float')
diffzp = diffzp.astype('float')
stdx = np.std(diffx)
stdxp = np.std(diffxp)
stdy = np.std(diffy)
stdyp = np.std(diffyp)
stdz = np.std(diffz)
stdzp = np.std(diffzp)
print 'stdx:',stdx
print 'stdxp:',stdxp
print 'stdy:',stdy
print 'stdyp:',stdyp
print 'stdz:',stdz
print 'stdzp:',stdzp
# std for xin,xpin,yin,ypin
#print 'Initial beam std (when firstsdata is the init while and not results...)'
#print 'stdx:',np.std(firstx)
#print 'stdxp:',np.std(firstxp)
#print 'stdy:',np.std(firsty)
#print 'stdyp:',np.std(firstyp)
## TODO:
#1: make the program work by calling something like: python diff.py out.txt out2.txt
|
gpl-3.0
| -2,406,106,296,024,561,700
| 34.934426
| 89
| 0.715199
| false
| 2.769912
| false
| false
| false
|
coopie/huzzer
|
huzzer/expressions.py
|
1
|
5250
|
from . import BOOL, INT, types
class Expression:
"""
A representation of an expression.
## Methods:
* stringify: function that takes a `namer` as an argument. This function renders the expression as text.
Expressions can be empty or parameterized. An empty expression when called creates a new expression
with the arguments of the invocation.
Expressions with arguments cannot be called.
"""
def __init__(self, type_signiature, stringify_func, args=[]):
assert stringify_func is not None, 'expression must have a way of representing itself as a string'
self.type_signiature = type_signiature
self.stringify_func = stringify_func
self.args = args
def __call__(self, *args):
assert len(self.args) == 0
assert len(args) == len(self.type_signiature[1:])
return Expression(self.type_signiature, self.stringify_func, args)
def stringify(self, namer):
return self.stringify_func(namer, self.args)
class FunctionExpression(Expression):
def __init__(self, type_signiature, function_id, args=[]):
self.type_signiature = type_signiature
self.function_id = function_id
self.args = args
def stringify_func(namer, args):
if len(args) != 0:
args_strings = ' ' + ' '.join([x.stringify(namer) for x in args])
return '({0}{1})'.format(namer.name_function(self), args_strings)
else:
return namer.name_function(self)
self.stringify_func = stringify_func
def __call__(self, *args):
assert len(self.args) == 0
assert len(args) == len(self.type_signiature[1:])
return FunctionExpression(self.type_signiature, self.function_id, args)
class VariableExpression(Expression):
def __init__(self, type_signiature, var_id):
assert type_signiature in types
self.type_signiature = type_signiature
self.var_id = var_id
def stringify(self, namer):
return namer.name_variable(self)
def __call__(self, *args):
raise TypeError('VariableExpression should not be called as it can never be an empty expression')
def stringify_binary_function(function_name):
def stringify_expr(namer, args):
assert len(args) == 2
a, b = [x.stringify(namer) for x in args]
template = '({0} {1} {2})'
return template.format(function_name, a, b)
return stringify_expr
def stringify_infix_function(function_name):
def stringify_expr(namer, args):
assert len(args) == 2
a, b = [x.stringify(namer) for x in args]
return '({1} {0} {2})'.format(function_name, a, b)
return stringify_expr
def stringify_unary_function(function_string):
def stringify_expr(namer, args):
assert len(args) == 1
return function_string.format(args[0].stringify(namer))
return stringify_expr
def type_of_expression(expr):
return expr.type_signiature[-1]
def make_binary_expr(type_signiature, stringify_func):
return Expression(type_signiature, stringify_func)
# empty expressions used for expression generation
div_expr = Expression((INT, INT, INT), stringify_binary_function('div'))
mod_expr = Expression((INT, INT, INT), stringify_binary_function('mod'))
max_expr = Expression((INT, INT, INT), stringify_binary_function('max'))
min_expr = Expression((INT, INT, INT), stringify_binary_function('min'))
plus_expr = Expression((INT, INT, INT), stringify_infix_function('+'))
minus_expr = Expression((INT, INT, INT), stringify_infix_function('-'))
mul_expr = Expression((INT, INT, INT), stringify_infix_function('*'))
eq_expr = Expression((INT, INT, BOOL), stringify_infix_function('=='))
neq_expr = Expression((INT, INT, BOOL), stringify_infix_function('/='))
gt_expr = Expression((INT, INT, BOOL), stringify_infix_function('>'))
gte_expr = Expression((INT, INT, BOOL), stringify_infix_function('>='))
lt_expr = Expression((INT, INT, BOOL), stringify_infix_function('<'))
lte_expr = Expression((INT, INT, BOOL), stringify_infix_function('<='))
or_expr = Expression((BOOL, BOOL, BOOL), stringify_infix_function('||'))
and_expr = Expression((BOOL, BOOL, BOOL), stringify_infix_function('&&'))
not_expr = Expression((BOOL, BOOL), stringify_unary_function('(not {})'))
fromEnum_expr = Expression((BOOL, INT), stringify_unary_function('(fromEnum {})'))
All_BRANCH_EXPRESSIONS = [
div_expr,
mod_expr,
max_expr,
min_expr,
plus_expr,
minus_expr,
mul_expr,
eq_expr,
neq_expr,
gt_expr,
gte_expr,
lt_expr,
lte_expr,
or_expr,
and_expr,
or_expr,
not_expr,
fromEnum_expr
]
BRANCH_EXPRESSIONS = {}
for haskell_type in types:
expressions_of_type = [x for x in All_BRANCH_EXPRESSIONS if type_of_expression(x) == haskell_type]
BRANCH_EXPRESSIONS[haskell_type] = expressions_of_type
def stringify_literal(namer, args):
assert len(args) == 1
return str(args[0])
# these are treated like unary expressions, which take an x and return an x
int_literal = Expression((INT, INT), stringify_literal)
bool_literal = Expression((BOOL, BOOL), stringify_literal)
LITERAL_EXPRESSIONS = {
INT: int_literal,
BOOL: bool_literal
}
|
mit
| 8,976,415,102,576,479,000
| 31.8125
| 108
| 0.659238
| false
| 3.420195
| false
| false
| false
|
simonpessemesse/seguinus
|
easyPoS/Gui/Taches.py
|
1
|
3919
|
from PyQt4 import QtCore, QtGui
from kronos import ThreadedScheduler
import FenetreTache
import taches.views as taaaches
from datetime import datetime, timedelta, date
import preferences
import webbrowser
from chambres.models import joliePeriode
from easyPoS.models import DonneesEntreprise, LigneFacture, PreparationFacture, Produit, LogFacture
from chambres.models import Client
from chambres.models import Reservation, TourOperateur
import traceback
from PyQt4.QtGui import QIcon
from PyQt4.QtCore import QThread, SIGNAL
from easyPoS.models import Facture
import time
import EditionFacture
import sys
import logging
class GuetteNouvelles(QThread):
def __init__(self, parent=None):
QThread.__init__(self, parent)
self.minuteur = None
def run(self):
if self.minuteur:
self.minuteur.stop()
self.minuteur = ThreadedScheduler()
today = date.today()
executions = taaaches.taches(today)
for t in executions:
if (t.rappel):
rap = str(t.rappel).split(":")
diff = datetime(today.year, today.month, today.day, int(rap[0]), int(rap[1])) - datetime.now()
if (abs(diff) == diff):
print(t, " pour ", str(t.rappel))
self.minuteur.add_single_task(self.montreFenetre, "test action 1",
diff.days * 24 * 60 * 60 + diff.seconds, "threaded", [t.id], None)
else:
pass
self.minuteur.add_single_task(self.run, "malin le fauve", 600, "threaded", [], None)
self.minuteur.start()
def montreFenetre(self, id):
print("montre", id)
self.emit(SIGNAL("showTache(int)"), id)
class TachesListe(QtGui.QMainWindow):
def nettoie(self):
toDelete = []
for key, item in self.facturettes.items():
if not item.isVisible():
item.stop()
item.deleteLater()
toDelete.append(key)
for i in toDelete:
del self.facturettes[i]
def montreFenetre(self, plop):
id = plop
if id in self.facturettes:
self.facturettes[id].setFocus()
self.facturettes[id].setWindowState(QtCore.Qt.WindowActive)
self.facturettes[id].activateWindow()
self.facturettes[id].show()
else:
self.facturettes[id] = FenetreTache.EditTache(tache=id, parent=self)
self.facturettes[id].show()
def nouvo(self, idFact=0):
self.montreFenetre(idFact)
return
if idFact < 1:
facture = Facture(etat='B')
c = Client()
c.save()
facture.client = c
facture.save()
self.listePrincipaleModel.ajoute(facture)
else:
facture = Facture.objects.get(pk=idFact)
self.listePrincipaleModel.ajoute(facture)
def montreTache(self):
today = date.today()
executions = taaaches.taches(today)
for t in executions:
if (t.rappel):
rap = str(t.rappel).split(":")
diff = datetime(today.year, today.month, today.day, int(rap[0]), int(rap[1])) - datetime.now()
if (abs(diff) == diff) and t.date == date.today():
pass
else:
if not t.estPeriodique():
self.montreFenetre(t.id)
def __init__(self):
QtGui.QMainWindow.__init__(self)
self.minuteur = None
self.guetteNouvelles = GuetteNouvelles()
self.facturettes = {}
# self.setAttribute(QtCore.Qt.WA_DeleteOnClose)
self.resize(600, 500)
self.setWindowTitle('Taches')
QtCore.QObject.connect(self.guetteNouvelles, SIGNAL("showTache(int)"), self.nouvo)
self.guetteNouvelles.start()
self.montreTache()
|
gpl-2.0
| 8,621,369,324,232,689,000
| 33.078261
| 116
| 0.586374
| false
| 3.468142
| false
| false
| false
|
asterix24/GestionaleCaldaie
|
gestionale/test/settings.py
|
1
|
5158
|
# Django settings for gestionale project.
from local_settings import *
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = ''
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'it-IT'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = False
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Session and login settings
LOGIN_URL = '/login/'
SESSION_EXPIRE_AT_BROWSER_CLOSE = True
# Idle time before the session expire in seconds.
SESSION_IDLE_TIMEOUT = 3600
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'main.middleware.LoginRequiredMiddleware',
'main.middleware.SessionIdleTimeout',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'gestionale.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'gestionale.' + APP_PREFIX_NAME + '.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
TEMPLATE_CONTEXT_PROCESSORS = (
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.static",
"django.core.context_processors.tz",
"django.contrib.messages.context_processors.messages",
"gestionale.local_env",
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_extensions',
'main',
'south',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
#'django.contrib.admindocs',
)
import sys
if DEBUG:
import logging
logging.basicConfig(level=logging.INFO, format='[%(asctime)s] %(message)s', datefmt='%d/%b/%Y %H:%M:%S',
filename=LOCAL_LOG_PATH + "gestionale.log")
logging.getLogger('main.data_render').setLevel(logging.INFO)
logging.getLogger('main.database_manager').setLevel(logging.INFO)
logging.getLogger('main.myfomrs').setLevel(logging.INFO)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
|
gpl-2.0
| 7,529,562,840,878,033,000
| 34.328767
| 108
| 0.73575
| false
| 3.745824
| false
| false
| false
|
cburmeister/fluid
|
app/lib/partial_file.py
|
1
|
1095
|
"""
A wrapper around Flask's send_file implementing the 206 partial protocol.
"""
from flask import Response, request, send_file
import mimetypes
import os
import re
def send(path):
"""Returns a file via the 206 partial protocol."""
range_header = request.headers.get('Range', None)
if not range_header:
return send_file(path) # Client must want the entire file
size = os.path.getsize(path)
start, end = 0, None
m = re.search('(\d+)-(\d*)', range_header)
g = m.groups()
if g[0]:
start = int(g[0])
if g[1]:
end = int(g[1])
length = min(size - start, 5120000)
if end is not None:
length = end - start
data = None
with open(path, 'rb') as f:
f.seek(start)
data = f.read(length)
mimetype, _ = mimetypes.guess_type(path)
rv = Response(data, 206, mimetype=mimetype, direct_passthrough=True)
rv.headers.add('Accept-Ranges', 'bytes')
rv.headers.add(
'Content-Range', 'bytes {0}-{1}/{2}'.format(
start, start + length - 1, size
)
)
return rv
|
mit
| 5,795,330,658,714,252,000
| 23.333333
| 73
| 0.591781
| false
| 3.487261
| false
| false
| false
|
ardi69/pyload-0.4.10
|
pyload/plugin/hoster/UloziskoSk.py
|
1
|
2443
|
# -*- coding: utf-8 -*-
import re
import urlparse
from pyload.plugin.internal.SimpleHoster import SimpleHoster
class UloziskoSk(SimpleHoster):
__name = "UloziskoSk"
__type = "hoster"
__version = "0.25"
__pattern = r'http://(?:www\.)?ulozisko\.sk/.+'
__config = [("use_premium", "bool", "Use premium account if available", True)]
__description = """Ulozisko.sk hoster plugin"""
__license = "GPLv3"
__authors = [("zoidberg", "zoidberg@mujmail.cz")]
NAME_PATTERN = r'<div class="down1">(?P<N>[^<]+)</div>'
SIZE_PATTERN = ur'Veľkosť súboru: <strong>(?P<S>[\d.,]+) (?P<U>[\w^_]+)</strong><br />'
OFFLINE_PATTERN = ur'<span class = "red">Zadaný súbor neexistuje z jedného z nasledujúcich dôvodov:</span>'
LINK_FREE_PATTERN = r'<form name = "formular" action = "(.+?)" method = "post">'
ID_PATTERN = r'<input type = "hidden" name = "id" value = "(.+?)" />'
CAPTCHA_PATTERN = r'<img src="(/obrazky/obrazky\.php\?fid=.+?)" alt="" />'
IMG_PATTERN = ur'<strong>PRE ZVÄČŠENIE KLIKNITE NA OBRÁZOK</strong><br /><a href = "(.+?)">'
def process(self, pyfile):
self.html = self.load(pyfile.url, decode=True)
self.getFileInfo()
m = re.search(self.IMG_PATTERN, self.html)
if m:
self.link = "http://ulozisko.sk" + m.group(1)
else:
self.handle_free(pyfile)
def handle_free(self, pyfile):
m = re.search(self.LINK_FREE_PATTERN, self.html)
if m is None:
self.error(_("LINK_FREE_PATTERN not found"))
parsed_url = 'http://www.ulozisko.sk' + m.group(1)
m = re.search(self.ID_PATTERN, self.html)
if m is None:
self.error(_("ID_PATTERN not found"))
id = m.group(1)
self.logDebug("URL:" + parsed_url + ' ID:' + id)
m = re.search(self.CAPTCHA_PATTERN, self.html)
if m is None:
self.error(_("CAPTCHA_PATTERN not found"))
captcha_url = urlparse.urljoin("http://www.ulozisko.sk", m.group(1))
captcha = self.decryptCaptcha(captcha_url, cookies=True)
self.logDebug("CAPTCHA_URL:" + captcha_url + ' CAPTCHA:' + captcha)
self.download(parsed_url,
post={"antispam": captcha,
"id" : id,
"name" : pyfile.name,
"but" : "++++STIAHNI+S%DABOR++++"})
|
gpl-3.0
| -5,508,463,001,809,072,000
| 34.231884
| 111
| 0.546689
| false
| 3.042553
| false
| false
| false
|
googleapis/googleapis-gen
|
google/apps/market/v2/hosted-marketplace-v2-py/ccc/hosted/marketplace_v2/services/license_notification_service/transports/grpc.py
|
1
|
11550
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import grpc_helpers # type: ignore
from google.api_core import gapic_v1 # type: ignore
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from ccc.hosted.marketplace_v2.types import resources
from ccc.hosted.marketplace_v2.types import services
from .base import LicenseNotificationServiceTransport, DEFAULT_CLIENT_INFO
class LicenseNotificationServiceGrpcTransport(LicenseNotificationServiceTransport):
"""gRPC backend transport for LicenseNotificationService.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_stubs: Dict[str, Callable]
def __init__(self, *,
host: str = 'appsmarket.googleapis.com',
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or applicatin default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
credentials=self._credentials,
credentials_file=credentials_file,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@classmethod
def create_channel(cls,
host: str = 'appsmarket.googleapis.com',
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
Raises:
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def list(self) -> Callable[
[services.LicenseNotificationListRequest],
resources.LicenseNotificationList]:
r"""Return a callable for the list method over gRPC.
Get a list of licensing notifications with regards to
a given app.
Returns:
Callable[[~.LicenseNotificationListRequest],
~.LicenseNotificationList]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'list' not in self._stubs:
self._stubs['list'] = self.grpc_channel.unary_unary(
'/ccc.hosted.marketplace.v2.LicenseNotificationService/List',
request_serializer=services.LicenseNotificationListRequest.serialize,
response_deserializer=resources.LicenseNotificationList.deserialize,
)
return self._stubs['list']
__all__ = (
'LicenseNotificationServiceGrpcTransport',
)
|
apache-2.0
| -7,904,287,990,203,589,000
| 44.833333
| 87
| 0.61013
| false
| 4.834659
| false
| false
| false
|
MartinThoma/hwrt
|
tests/create_ffiles_test.py
|
1
|
4678
|
#!/usr/bin/env python
# Core Library modules
import os
# First party modules
import hwrt.create_ffiles as create_ffiles
import hwrt.data_multiplication as data_multiplication
import hwrt.features as features
import hwrt.utils as utils
import tests.testhelper as th
def test_training_set_multiplication():
"""Test the create_ffiles.training_set_multiplication method."""
sample = th.get_symbol_as_handwriting(292934)
training_set = [
{
"id": 1337,
"is_in_testset": 0,
"formula_id": 42,
"handwriting": sample,
"formula_in_latex": "B",
}
]
mult_queue = [data_multiplication.Multiply()]
create_ffiles.training_set_multiplication(training_set, mult_queue)
def test_execution():
formula_id2index = {1337: 1, 12: 2}
feature_folder = "."
index2latex = {1: "\\alpha", 2: "\\beta"}
create_ffiles._create_index_formula_lookup(
formula_id2index, feature_folder, index2latex
)
def test_prepare_dataset():
"""Test create_ffiles.prepare_dataset."""
dataset = []
for i in range(200):
dataset.append(
{"handwriting": th.get_symbol_as_handwriting(97705), "formula_id": 42}
)
# dataset[-1]['handwriting'].formula_id = 42
formula_id2index = {}
formula_id2index[42] = 1
feature_list = [features.StrokeCount()]
is_traindata = False
create_ffiles.prepare_dataset(dataset, formula_id2index, feature_list, is_traindata)
def test_normalize_features_one():
"""Test create_ffiles._normalize_features with one point."""
feature_list = [features.Width(), features.Height()]
prepared = [([123], 1)]
is_traindata = True
out = create_ffiles._normalize_features(feature_list, prepared, is_traindata)
assert out == [([0.0], 1)]
def test_normalize_features_two():
"""Test create_ffiles._normalize_features with two points."""
feature_list = [features.Width(), features.Height()]
prepared = [([123], 1), ([100], 1)]
is_traindata = True
out = create_ffiles._normalize_features(feature_list, prepared, is_traindata)
# Mean: 111.5; Range: 23
assert out == [([0.5], 1), ([-0.5], 1)]
# Now the other set
prepared = [([111.5], 1), ([90], 1), ([180], 1)]
is_traindata = False
out = create_ffiles._normalize_features(feature_list, prepared, is_traindata)
assert out == [([0.0], 1), ([-0.93478260869565222], 1), ([2.9782608695652173], 1)]
def test_normalize_features_two_feats():
"""Test create_ffiles._normalize_features with two points."""
feature_list = [features.Width(), features.Height()]
prepared = [([123, 123], 1), ([100, 100], 1)]
is_traindata = True
out = create_ffiles._normalize_features(feature_list, prepared, is_traindata)
# Mean: 111.5; Range: 23
assert out == [([0.5, 0.5], 1), ([-0.5, -0.5], 1)]
# Now the other set
prepared = [([111.5, 111.5], 1), ([146, 146], 1), ([54, 54], 1)]
is_traindata = False
out = create_ffiles._normalize_features(feature_list, prepared, is_traindata)
assert out == [([0.0, 0.0], 1), ([1.5, 1.5], 1), ([-2.5, -2.5], 1)]
def test_normalize_features_two_feats2():
"""Test create_ffiles._normalize_features with two points."""
feature_list = [features.Width(), features.Height()]
prepared = [([123, 123], 1), ([100, 100], 1)]
is_traindata = True
out = create_ffiles._normalize_features(feature_list, prepared, is_traindata)
# Mean: 111.5; Range: 23
assert out == [([0.5, 0.5], 1), ([-0.5, -0.5], 1)]
# Now the other set
prepared = [([111.5, 146], 1), ([146, 111.5], 1), ([54, 54], 1)]
is_traindata = False
out = create_ffiles._normalize_features(feature_list, prepared, is_traindata)
assert out == [([0.0, 1.5], 1), ([1.5, 0.0], 1), ([-2.5, -2.5], 1)]
def test_normalize_features_two_classes():
"""Test create_ffiles._normalize_features with two classes."""
feature_list = [features.Width(), features.Height()]
prepared = [([123], 1), ([100], 1), ([500], 2)]
is_traindata = True
out = create_ffiles._normalize_features(feature_list, prepared, is_traindata)
# Mean: 241; Range: 400
assert out == [([-0.295], 1), ([-0.3525], 1), ([0.6475], 2)]
def test_create_translation_file():
"""Test create_ffiles._create_translation_file."""
feature_folder = os.path.join(
utils.get_project_root(), "feature-files", "small-baseline"
)
dataset_name = "testtestdata"
translation = [(133700, "\\alpha", 42)]
formula_id2index = {42: 1}
create_ffiles._create_translation_file(
feature_folder, dataset_name, translation, formula_id2index
)
|
mit
| -465,068,950,497,233,400
| 34.709924
| 88
| 0.618854
| false
| 3.21512
| true
| false
| false
|
GiulianoFranchetto/zephyr
|
boards/xtensa/intel_s1000_crb/support/messenger.py
|
1
|
8143
|
#!/usr/bin/env python3
#
# Copyright (c) 2019 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
# Author: Sathish Kuttan <sathish.k.kuttan@intel.com>
# This file defines a message class that contains functions to create
# commands to the target and to parse responses from the target.
import bitstruct
class Message:
"""
Message class containing the methods to create command messages and
parse response messages.
"""
message_id = {1: 'Control'}
cmd_rsp = {2: 'Load Firmware',
4: 'Mode Select',
0x10: 'Memory Read',
0x11: 'Memory Write',
0x12: 'Memory Block Write',
0x13: 'Execute',
0x14: 'Wait',
0x20: 'Ready'}
tx_data = None
tx_bulk_data = None
tx_index = 0
cmd_word_fmt = 'u1 u1 u1 u5 u16 u8'
cmd_keys = ['cmd', 'rsvd1', 'rsp', 'msg_id', 'rsvd2', 'cmd_rsp']
def __init__(self):
"""
Intialize a byte array of 64 bytes for command messages
Intialize another byte array of 4096 bytes for bulk messages
"""
self.tx_data = bytearray(64)
self.tx_bulk_data = bytearray(4096)
def init_tx_data(self):
"""
Intialize transmit message buffers to zeros
"""
for index in range(len(self.tx_data)):
self.tx_data[index] = 0
self.tx_index = 0
def endian_swap(self, dst, dst_offset, src):
"""
Performs a byte swap of a 32-bit word to change it's endianness
"""
for index in range(0, len(src), 4):
dst[dst_offset + index + 0] = src[index + 3]
dst[dst_offset + index + 1] = src[index + 2]
dst[dst_offset + index + 2] = src[index + 1]
dst[dst_offset + index + 3] = src[index + 0]
def print_cmd_message(self):
"""
Prints the contents of the command message buffer
"""
for index in range(0, self.tx_index, 4):
offset = index * 8
word = bitstruct.unpack_from('u32', self.tx_data, offset)
print('Index: %2d Content: 0x%08x' %(index, word[0]))
def print_response(self, msg, verbose = False):
"""
Parses and prints the contents of the response message
"""
unpacked = bitstruct.unpack_from_dict(self.cmd_word_fmt,
self.cmd_keys, msg)
msg_id = unpacked['msg_id']
rsp = unpacked['cmd_rsp']
if msg_id == 0 and rsp == 0:
print('RSP <<< NULL.')
else:
print('RSP <<< %s.' % self.cmd_rsp[rsp])
if verbose == True:
count = bitstruct.unpack_from('u32', msg, 4 * 8)[0]
count &= 0x1ff
for index in range(0, 8 + (count * 4), 4):
offset = index * 8
word = bitstruct.unpack_from('u32', msg, offset)
print('Index: %2d Content: 0x%08x' %(index, word[0]))
def get_cmd_code(self, cmd):
"""
Looks up the command and returns the numeric code
"""
index = list(self.cmd_rsp.values()).index(cmd)
return list(self.cmd_rsp.keys())[index]
def print_cmd_code(self, cmd):
"""
Prints the numeric code for the given command
"""
key = self.get_cmd_code(cmd)
print('CMD >>> %s. Command Code: 0x%02x' % (cmd, key))
def create_null_cmd(self):
"""
Creates a NULL command
"""
print('CMD >>> NULL.')
for index in range(len(self.tx_data)):
self.tx_data[index] = 0
self.tx_index = len(self.tx_data)
return self.tx_data
def create_memwrite_cmd(self, tuple):
"""
Creates a memory write command with memory address and value pairs
"""
cmd = 'Memory Write'
print('CMD >>> %s.' % cmd)
code = self.get_cmd_code(cmd)
self.init_tx_data()
index = list(self.message_id.values()).index('Control')
msg_id = list(self.message_id.keys())[index]
bitstruct.pack_into_dict(self.cmd_word_fmt, self.cmd_keys,
self.tx_data, 0, {'cmd': 1, 'rsvd1': 0, 'rsp': 0,
'msg_id': msg_id, 'rsvd2': 0, 'cmd_rsp': code})
self.tx_index += 4
bitstruct.pack_into('u32', self.tx_data, self.tx_index * 8,
len(tuple))
self.tx_index += 4
for index in range(len(tuple)):
bitstruct.pack_into('u32', self.tx_data, self.tx_index * 8,
tuple[index])
self.tx_index += 4
return self.tx_data
def create_memread_cmd(self, tuple):
"""
Creates a memory read command with memory addresses
"""
cmd = 'Memory Read'
print('CMD >>> %s.' % cmd)
code = self.get_cmd_code(cmd)
self.init_tx_data()
index = list(self.message_id.values()).index('Control')
msg_id = list(self.message_id.keys())[index]
bitstruct.pack_into_dict(self.cmd_word_fmt, self.cmd_keys,
self.tx_data, 0, {'cmd': 1, 'rsvd1': 0, 'rsp': 0,
'msg_id': msg_id, 'rsvd2': 0, 'cmd_rsp': code})
self.tx_index += 4
bitstruct.pack_into('u32', self.tx_data, self.tx_index * 8,
len(tuple))
self.tx_index += 4
for index in range(len(tuple)):
bitstruct.pack_into('u32', self.tx_data, self.tx_index * 8,
tuple[index])
self.tx_index += 4
return self.tx_data
def create_loadfw_cmd(self, size, sha):
"""
Creates a command to load firmware with associated parameters
"""
cmd = 'Load Firmware'
print('CMD >>> %s.' % cmd)
code = self.get_cmd_code(cmd)
FW_NO_EXEC_FLAG = (1 << 26)
SEL_HP_CLK = (1 << 21)
LD_FW_HEADER_LEN = 3
count_flags = FW_NO_EXEC_FLAG | SEL_HP_CLK
count_flags |= (LD_FW_HEADER_LEN + int(len(sha) / 4))
self.init_tx_data()
index = list(self.message_id.values()).index('Control')
msg_id = list(self.message_id.keys())[index]
bitstruct.pack_into_dict(self.cmd_word_fmt, self.cmd_keys,
self.tx_data, 0, {'cmd': 1, 'rsvd1': 0, 'rsp': 0,
'msg_id': msg_id, 'rsvd2': 0, 'cmd_rsp': code})
self.tx_index += 4
bitstruct.pack_into('u32', self.tx_data, self.tx_index * 8, count_flags)
self.tx_index += 4
bitstruct.pack_into('u32', self.tx_data, self.tx_index * 8, 0xbe000000)
self.tx_index += 4
bitstruct.pack_into('u32', self.tx_data, self.tx_index * 8, 0)
self.tx_index += 4
bitstruct.pack_into('u32', self.tx_data, self.tx_index * 8, size)
self.tx_index += 4
self.endian_swap(self.tx_data, self.tx_index, sha)
self.tx_index += len(sha)
return self.tx_data
def create_execfw_cmd(self):
"""
Creates a command to excute firmware
"""
cmd = 'Execute'
print('CMD >>> %s.' % cmd)
code = self.get_cmd_code(cmd)
EXE_FW_HEADER_LEN = 1
count = EXE_FW_HEADER_LEN
self.init_tx_data()
index = list(self.message_id.values()).index('Control')
msg_id = list(self.message_id.keys())[index]
bitstruct.pack_into_dict(self.cmd_word_fmt, self.cmd_keys,
self.tx_data, 0, {'cmd': 1, 'rsvd1': 0, 'rsp': 0,
'msg_id': msg_id, 'rsvd2': 0, 'cmd_rsp': code})
self.tx_index += 4
bitstruct.pack_into('u32', self.tx_data, self.tx_index * 8, count)
self.tx_index += 4
bitstruct.pack_into('u32', self.tx_data, self.tx_index * 8, 0xbe000000)
self.tx_index += 4
return self.tx_data
def create_bulk_message(self, data):
"""
Copies the input byte stream to the bulk message buffer
"""
self.endian_swap(self.tx_bulk_data, 0, data)
return self.tx_bulk_data[:len(data)]
def get_bulk_message_size(self):
"""
Returns the size of the bulk message buffer
"""
return len(self.tx_bulk_data)
|
apache-2.0
| 9,050,911,253,903,432,000
| 33.948498
| 80
| 0.534078
| false
| 3.384456
| false
| false
| false
|
cwaldbieser/txsshadmin
|
txsshadmin/proto_dispatcher.py
|
1
|
3715
|
from twisted.conch.recvline import HistoricRecvLine
from twisted.python import log
from textwrap import dedent
def makeSSHDispatcherProtocolFactory(handlerFactory, *args, **kwds):
def makeDispatcherProtocol(avatar, *a, **k):
proto = SSHDispatcherProtocol()
proto.handler = handlerFactory(avatar, *args, **kwds)
return proto
return makeDispatcherProtocol
class SSHDispatcherProtocol(HistoricRecvLine):
prompt = "$"
CTRL_D = '\x04'
def connectionMade(self):
HistoricRecvLine.connectionMade(self)
self.keyHandlers.update({
self.CTRL_D: lambda: self.handler.onEOF(self)})
try:
self.handler.onConnect(self)
except AttributeError:
pass
self.showPrompt()
def showPrompt(self):
self.terminal.write("{0} ".format(self.prompt))
def getCommandFunc(self, cmd):
return getattr(self.handler, 'handle_{0}'.format(cmd), None)
def lineReceived(self, line):
line = line.strip()
if line:
argv = line.split()
cmd = argv[0]
args = argv[1:]
func = self.getCommandFunc(cmd)
if func:
try:
func(self, *args)
except Exception as ex:
self.terminal.write("Errors occured.")
self.terminal.nextLine()
log.msg(str(ex))
else:
self.terminal.write("Unknown command, '{0}'.".format(cmd))
self.terminal.nextLine()
self.showPrompt()
class BaseHandler(object):
def __init__(self, avatar):
self.avatar = avatar
commands = [attr[7:] for attr in dir(self)
if attr.startswith('handle_') and attr.lower() == attr]
commands.sort()
self.commandHelp = "Commands: {0}".format(' '.join(commands))
def onConnect(self, dispatcher):
pass
def handle_help(self, dispatcher, *args):
"""
Get help on a command.
help [COMMAND]
"""
terminal = dispatcher.terminal
if len(args) == 0:
terminal.write(self.commandHelp)
terminal.nextLine()
terminal.write("Use `help <command>` for help on a particular command.")
terminal.nextLine()
else:
cmd = args[0]
handler = "handle_{0}".format(cmd)
if hasattr(self, handler):
func = getattr(self, handler)
doc = dedent(func.__doc__)
lines = doc.split('\n')
for line in lines:
terminal.write(line)
terminal.nextLine()
else:
terminal.write("Unknown command, '{0}'.".format(cmd))
termnial.nextLine()
def handle_whoami(self, dispatcher):
"""
Show who you are logged in as.
"""
terminal = dispatcher.terminal
terminal.write("You are '{0}'.".format(self.avatar.avatarId))
terminal.nextLine()
def handle_clear(self, dispatcher):
"""
Clear the terminal.
"""
terminal = dispatcher.terminal
terminal.reset()
def handle_quit(self, dispatcher):
"""
Exit this admin shell.
"""
terminal = dispatcher.terminal
terminal.write("Goodbye.")
terminal.nextLine()
terminal.loseConnection()
def onEOF(self, dispatcher):
terminal = dispatcher.terminal
lineBuffer = dispatcher.lineBuffer
if lineBuffer:
terminal.write('\a')
else:
self.handle_quit(dispatcher)
|
gpl-3.0
| -4,181,060,626,740,396,500
| 28.484127
| 84
| 0.545626
| false
| 4.454436
| false
| false
| false
|
nicfit/Clique
|
clique/app/identity.py
|
1
|
1960
|
# -*- coding: utf-8 -*-
import sys
import json
import argparse
import nicfit
from .. import Identity, IdentityChain
from .utils import prompt
from ..common import thumbprint, newJwk, jwkIsPrivate
@nicfit.command.register
class identity(nicfit.Command):
HELP = "Identity and stuffs"
def _initArgParser(self, parser):
parser.add_argument("-i", "--identity", default=None,
type=argparse.FileType('r'),
help="File containing an Identity in JSON format.")
parser.add_argument("-k", "--keyfile", default=None,
type=argparse.FileType('r'),
help="File containing a private JWK.")
parser.add_argument("--iss", default=None,
help="Identity issuer.")
def _run(self):
if self.args.identity:
ident = Identity.fromJson(json.loads(self.args.identity.read()))
else:
if self.args.keyfile:
try:
jwk = json.loads(self.args.keyfile.read())
key = newJwk(**jwk)
if not jwkIsPrivate(key):
raise ValueError(
"Key file does not contain a private key")
except Exception as ex:
print("Error loading key: " + str(ex), file=sys.stderr)
return 1
key._params["kid"] = thumbprint(key)
else:
key = Identity.generateKey()
iss = self.args.iss or prompt("iss? ")
ident = Identity(iss, key)
ident.idchain = IdentityChain.fromIdentity(ident,
ident.acct).serialize()
print(json.dumps(ident.toJson(private=True), indent=2, sort_keys=True))
idchain = IdentityChain.deserialize(ident.idchain)
print("\n## IdentityChain ##:\n" + str(idchain))
|
lgpl-3.0
| 3,020,030,026,125,875,700
| 35.981132
| 79
| 0.529082
| false
| 4.474886
| false
| false
| false
|
rgcarrasqueira/python-pagseguro
|
pagseguro/configs.py
|
1
|
6137
|
# coding: utf-8
import abc
class AbstractConfig(object): # pragma: no cover
__metaclass__ = abc.ABCMeta
def __init__(self, sandbox=False):
self.sandbox = sandbox
@classmethod
def get(self, key, default=None):
return getattr(self, key, default)
@abc.abstractproperty
def BASE_URL(self):
return self._BASE_URL
@BASE_URL.setter
def BASE_URL(self, value):
self._BASE_URL = value
@abc.abstractproperty
def VERSION(self):
return self._VERSION
@VERSION.setter
def VERSION(self, value):
self._VERSION = value
@abc.abstractproperty
def CHECKOUT_SUFFIX(self):
return self._CHECKOUT_SUFFIX
@CHECKOUT_SUFFIX.setter
def CHECKOUT_SUFFIX(self, value):
self._CHECKOUT_SUFFIX = value
@abc.abstractproperty
def CHARSET(self):
return self._CHARSET
@CHARSET.setter
def CHARSET(self, value):
self._CHARSET = value
@abc.abstractproperty
def NOTIFICATION_SUFFIX(self):
return self._NOTIFICATION_SUFFIX
@NOTIFICATION_SUFFIX.setter
def NOTIFICATION_SUFFIX(self, value):
self._NOTIFICATION_SUFFIX = value
@abc.abstractproperty
def TRANSACTION_SUFFIX(self):
return self._TRANSACTION_SUFFIX
@TRANSACTION_SUFFIX.setter
def TRANSACTION_SUFFIX(self, value):
self._TRANSACTION_SUFFIX = value
@abc.abstractproperty
def QUERY_TRANSACTION_SUFFIX(self):
return self._QUERY_TRANSACTION_SUFFIX
@QUERY_TRANSACTION_SUFFIX.setter
def QUERY_TRANSACTION_SUFFIX(self, value):
self._QUERY_TRANSACTION_SUFFIX = value
@abc.abstractproperty
def CHECKOUT_URL(self):
return self._CHECKOUT_URL
@CHECKOUT_URL.setter
def CHECKOUT_URL(self, value):
self._CHECKOUT_URL = value
@abc.abstractproperty
def NOTIFICATION_URL(self):
return self._NOTIFICATION_URL
@NOTIFICATION_URL.setter
def NOTIFICATION_URL(self, value):
self._NOTIFICATION_URL = value
@abc.abstractproperty
def TRANSACTION_URL(self):
return self._TRANSACTION_URL
@TRANSACTION_URL.setter
def TRANSACTION_URL(self, value):
self._TRANSACTION_URL = value
@abc.abstractproperty
def QUERY_TRANSACTION_URL(self):
return self._QUERY_TRANSACTION_URL
@QUERY_TRANSACTION_URL.setter
def QUERY_TRANSACTION_URL(self, value):
self._QUERY_TRANSACTION_URL = value
@abc.abstractproperty
def CURRENCY(self):
return self._CURRENCY
@CURRENCY.setter
def CURRENCY(self, value):
self._CURRENCY = value
@abc.abstractproperty
def CTYPE(self):
return self._CTYPE
@CTYPE.setter
def CTYPE(self, value):
self._CTYPE = value
@abc.abstractproperty
def HEADERS(self):
return self._HEADERS
@HEADERS.setter
def HEADERS(self, value):
self._HEADERS = value
@abc.abstractproperty
def REFERENCE_PREFIX(self):
return self._REFERENCE_PREFIX
@REFERENCE_PREFIX.setter
def REFERENCE_PREFIX(self, value):
self._REFERENCE_PREFIX = value
@abc.abstractproperty
def PAYMENT_HOST(self):
return self._PAYMENT_HOST
@PAYMENT_HOST.setter
def PAYMENT_HOST(self, value):
self._PAYMENT_HOST = value
@abc.abstractproperty
def PAYMENT_URL(self):
return self._PAYMENT_URL
@PAYMENT_URL.setter
def PAYMENT_URL(self, value):
self._PAYMENT_URL = value
@abc.abstractproperty
def DATETIME_FORMAT(self):
return self._DATETIME_FORMAT
@DATETIME_FORMAT.setter
def DATETIME_FORMAT(self, value):
self._DATETIME_FORMAT = value
class Config(AbstractConfig):
BASE_URL = "https://ws.pagseguro.uol.com.br"
VERSION = "/v2/"
CHECKOUT_SUFFIX = VERSION + "checkout"
CHARSET = "UTF-8" # ISO-8859-1
NOTIFICATION_SUFFIX = VERSION + "transactions/notifications/%s"
PRE_APPROVAL_NOTIFICATION_SUFFIX = (
VERSION + "pre-approvals/notifications/%s"
)
PRE_APPROVAL_PAYMENT_URL = BASE_URL + VERSION + "pre-approvals/payment"
PRE_APPROVAL_CANCEL_URL = BASE_URL + VERSION + "pre-approvals/cancel/%s"
TRANSACTION_SUFFIX = VERSION + "transactions/%s"
QUERY_TRANSACTION_SUFFIX = VERSION + "transactions"
SESSION_CHECKOUT_SUFFIX = VERSION + "sessions/"
SESSION_CHECKOUT_URL = BASE_URL + SESSION_CHECKOUT_SUFFIX
TRANSPARENT_CHECKOUT_URL = BASE_URL + QUERY_TRANSACTION_SUFFIX + '/'
CHECKOUT_URL = BASE_URL + CHECKOUT_SUFFIX
NOTIFICATION_URL = BASE_URL + NOTIFICATION_SUFFIX
PRE_APPROVAL_NOTIFICATION_URL = BASE_URL + PRE_APPROVAL_NOTIFICATION_SUFFIX
TRANSACTION_URL = BASE_URL + TRANSACTION_SUFFIX
QUERY_TRANSACTION_URL = BASE_URL + QUERY_TRANSACTION_SUFFIX
QUERY_PRE_APPROVAL_URL = BASE_URL + VERSION + "pre-approvals"
CURRENCY = "BRL"
CTYPE = "application/x-www-form-urlencoded; charset={0}".format(CHARSET)
HEADERS = {"Content-Type": CTYPE}
REFERENCE_PREFIX = "REF%s"
PAYMENT_HOST = "https://pagseguro.uol.com.br"
PAYMENT_URL = PAYMENT_HOST + CHECKOUT_SUFFIX + "/payment.html?code=%s"
DATETIME_FORMAT = '%Y-%m-%dT%H:%M:%S'
class ConfigSandbox(AbstractConfig):
BASE_URL = "https://ws.sandbox.pagseguro.uol.com.br"
VERSION = "/v2/"
CHECKOUT_SUFFIX = VERSION + "checkout"
CHARSET = "UTF-8" # ISO-8859-1
NOTIFICATION_SUFFIX = VERSION + "transactions/notifications/%s"
TRANSACTION_SUFFIX = VERSION + "transactions/%s"
QUERY_TRANSACTION_SUFFIX = VERSION + "transactions"
CHECKOUT_URL = BASE_URL + CHECKOUT_SUFFIX
NOTIFICATION_URL = BASE_URL + NOTIFICATION_SUFFIX
TRANSACTION_URL = BASE_URL + TRANSACTION_SUFFIX
QUERY_TRANSACTION_URL = BASE_URL + QUERY_TRANSACTION_SUFFIX
CURRENCY = "BRL"
CTYPE = "application/x-www-form-urlencoded; charset={0}".format(CHARSET)
HEADERS = {"Content-Type": CTYPE}
REFERENCE_PREFIX = "REF%s"
PAYMENT_HOST = "https://sandbox.pagseguro.uol.com.br"
PAYMENT_URL = PAYMENT_HOST + CHECKOUT_SUFFIX + "/payment.html?code=%s"
DATETIME_FORMAT = '%Y-%m-%dT%H:%M:%S'
|
mit
| -8,740,387,266,421,151,000
| 27.948113
| 79
| 0.663842
| false
| 3.53107
| false
| false
| false
|
sguazt/dcsxx-testbed
|
tools/giws/classRepresentation/parameterGiws.py
|
1
|
2638
|
#!/usr/bin/python -u
# Copyright or Copr. INRIA/Scilab - Sylvestre LEDRU
#
# Sylvestre LEDRU - <sylvestre.ledru@inria.fr> <sylvestre@ledru.info>
#
# This software is a computer program whose purpose is to generate C++ wrapper
# for Java objects/methods.
#
# This software is governed by the CeCILL license under French law and
# abiding by the rules of distribution of free software. You can use,
# modify and/ or redistribute the software under the terms of the CeCILL
# license as circulated by CEA, CNRS and INRIA at the following URL
# "http://www.cecill.info".
#
# As a counterpart to the access to the source code and rights to copy,
# modify and redistribute granted by the license, users are provided only
# with a limited warranty and the software's author, the holder of the
# economic rights, and the successive licensors have only limited
# liability.
#
# In this respect, the user's attention is drawn to the risks associated
# with loading, using, modifying and/or developing or reproducing the
# software by the user in light of its specific status of free software,
# that may mean that it is complicated to manipulate, and that also
# therefore means that it is reserved for developers and experienced
# professionals having in-depth computer knowledge. Users are therefore
# encouraged to load and test the software's suitability as regards their
# requirements in conditions enabling the security of their systems and/or
# data to be ensured and, more generally, to use and operate it in the
# same conditions as regards security.
#
# The fact that you are presently reading this means that you have had
# knowledge of the CeCILL license and that you accept its terms.
#
# For more information, see the file COPYING
from datatypes.dataGiws import dataGiws
from datatypes.dataBufferGiws import dataBufferGiws
from datatypes.dataFactoryGiws import dataFactoryGiws
class parameterGiws():
__name=""
__type=""
def __init__(self, name, type):
myDataFactory=dataFactoryGiws()
self.__type=myDataFactory.create(type)
self.__name=name
def getName(self):
return self.__name
def getType(self):
return self.__type
def __str__(self):
return """%s %s, """ % (self.getType().getNativeType(), self.getName())
def generateCXXHeader(self):
""" Generate the profil of the parameter """
str="""%s %s""" % (self.getType().getNativeTypeWithConst(), self.getName())
if self.getType().isArray():
if self.getType().getDimensionArray() == 1:
str+=", int %sSize"%self.getName()
else:
str+=", int %sSize, int %sSizeCol"%(self.getName(),self.getName())
return str
|
apache-2.0
| 8,050,276,257,811,369,000
| 37.794118
| 79
| 0.735027
| false
| 3.425974
| false
| false
| false
|
jansky/JanskyBlog
|
misc.py
|
1
|
1343
|
"""
The MIT License (MIT)
Copyright (c) 2014 Janský Důnska
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import os
from string import Template
def PrintHeaders(httpcode):
print "HTTP/1.0 " + httpcode
print "Content-type: text/html; charset=utf-8"
print
# Uncomment below to enable the simple WriteDocument function
#def WriteDocument(content,title=""):
# print "<!DOCTYPE html>"
# print "<html>"
# print "<head>"
# if title == "":
# print "<title>Blog</title>"
# else:
# print "<title>" + title + " | Blog</title>"
# print "<meta charset='utf-8'/>"
# print "<meta name='author' content='PyBloggingSystem v0.1'/>"
# print "<body>"
# print content
# print "</body>"
# print "</html>"
def DoDocumentTemplating(data, templateFile):
templateFileString = ""
with open(templateFile, 'r') as template_file:
templateFileString = template_file.read()
print Template(templateFileString).safe_substitute(data)
|
mit
| -4,770,807,393,456,153,000
| 25.294118
| 77
| 0.727069
| false
| 3.420918
| false
| false
| false
|
Luciden/easl
|
easl/log.py
|
1
|
5275
|
__author__ = 'Dennis'
import csv
class Log(object):
"""
Simple log that contains all experiment information (actions, observations).
Time based. Logs for every time step what happened.
Can (not yet) be read from/to files etc.
"""
def __init__(self):
"""
Attributes
----------
log : [{}]
All entries.
An entry describes time, type of entry and its type-related data.
verbose : bool
If set to True, logging attempts are printed to stdout.
"""
self.log = []
self.verbose = False
self.time = 0
self.length = 0
def read_file(self, file_name):
"""
Parameters
----------
file_name : string
Name of the file to read the log from.
"""
self.__from_file(file_name)
def set_verbose(self, verbose=True):
self.verbose = verbose
def get_length(self):
return self.length
def get_at_time(self, time):
entries = []
for e in self.log:
if e["_time"] == time:
entries.append(e)
return entries
def time_tick(self, time=None):
if time is None:
self.time += 1
else:
self.time = time
if self.verbose:
print "t {0}".format(self.time)
def do_log(self, kind, data):
entry = {"_time": self.time, "_type": kind}
entry.update(data)
self.log.append(entry)
self.length = self.time
if self.verbose:
print entry
def write_file(self, name):
"""
Writes all entries to a file.
Parameters
----------
name : string
Name of the file to write to.
"""
f = open(name, 'wt')
try:
writer = csv.DictWriter(f)
for entry in self.log:
writer.writerow(entry)
finally:
f.close()
def __from_file(self, name):
"""
Reads all entries from a file.
Parameters
----------
name : string
Name of the file to read from.
"""
f = open(name, 'rt')
try:
reader = csv.DictReader(f)
for row in reader:
self.log.append(row)
finally:
f.close()
def make_data(self, file_name, attribute_labels, number=None):
"""
Parameters
----------
file : string
File name to write to.
"""
suffix = "" if number is None else "_{0}".format(str(number))
f = open(file_name + suffix + ".csv", "wt")
try:
writer = csv.writer(f, delimiter=' ')
# Calculate changes in position for every limb.
attributes = attribute_labels.keys()
labels = attribute_labels.values()
data = []
for entry in self.log:
if "observation" in entry and entry["observation"] in attributes:
t = entry["_time"]
if len(data) - 1 < t:
data.append({})
data[t][entry["observation"]] = entry["value"]
writer.writerow(["t"] + labels)
for i in range(len(data) - 1):
k = [0] * len(attributes)
for p in range(len(attributes)):
if data[i][attributes[p]] != data[i + 1][attributes[p]]:
k[p] = 1
writer.writerow([i] + k)
finally:
f.close()
@staticmethod
def make_bins(name, c, n, number=None):
"""
Parameters
----------
c : int
Number of columns next to the time column.
"""
suffix = "" if number is None else "_{0}".format(str(number))
f = open(name + suffix + ".csv", "rt")
o = open(name + suffix + "_bins.csv", "wt")
try:
# Skip header
f.readline()
reader = csv.reader(f, delimiter=' ')
bins = []
i_bin = 1
current = [0] * len(c)
for row in reader:
if int(row[0]) >= i_bin * n:
bins.append(current)
i_bin += 1
current = [0] * len(c)
current = [x + y for (x, y) in zip(current, [int(z) for z in row[1:]])]
bins.append(current)
writer = csv.writer(o, delimiter=' ')
writer.writerow(["block"] + c)
for i in range(len(bins)):
writer.writerow([str(i)] + [str(x) for x in bins[i]])
finally:
f.close()
o.close()
@staticmethod
def write_data(name, c, data):
o = open(name + ".csv", "wt")
try:
writer = csv.writer(o, delimiter=' ')
writer.writerow(["block"] + c)
for i in range(len(data)):
writer.writerow([str(i)] + [str(x) for x in data[i]])
finally:
o.close()
|
mit
| 3,446,437,785,109,815,300
| 26.208556
| 87
| 0.442085
| false
| 4.381229
| false
| false
| false
|
schristakidis/p2ner
|
p2ner/components/overlay/completeclient/completeclient/messages/submessages.py
|
1
|
5521
|
# -*- coding: utf-8 -*-
# Copyright 2012 Loris Corazza, Sakis Christakidis
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from p2ner.base.ControlMessage import ControlMessage, trap_sent,probe_all,BaseControlMessage
from p2ner.base.Consts import MessageCodes as MSG
from construct import Container
class AskInitNeighs(BaseControlMessage):
type = "subsidmessage"
code = MSG.ASK_INIT_NEIGHS_SUB
ack = True
@classmethod
def send(cls, sid, superOverlay,interOverlay, peer, out):
d=out.send(cls, Container(streamid = sid, superOverlay=superOverlay, interOverlay=interOverlay), peer)
d.addErrback(trap_sent)
return d
class PeerListMessage(ControlMessage):
type = "subpeerlistmessage"
code = MSG.SEND_IP_LIST_SUB
ack = True
def trigger(self, message):
if self.stream.id != message.streamid or not self.subOverlay.checkTriggerInitiatorsMessage(message.superOverlay,message.interOverlay):
return False
return True
def action(self, message, peer):
self.log.debug('received peerList message from %s for %s',peer,str(message.peer))
if message.peer:
for p in message.peer:
self.subOverlay.checkSendAddNeighbour(p,peer)
else:
self.subOverlay.checkSendAddNeighbour(None,peer)
@classmethod
def send(cls, sid, peerlist, peer, out):
return out.send(cls, Container(streamid=sid, peer=peerlist), peer).addErrback(trap_sent)
class AddNeighbourMessage(ControlMessage):
type = "suboverlaymessage"
code = MSG.ADD_NEIGH_SUB
ack = True
def trigger(self, message):
if self.stream.id != message.streamid or not self.subOverlay.checkTriggerMessage(message.superOverlay,message.interOverlay):
return False
return True
def action(self, message, peer):
peer.dataPort=message.port
peer.reportedBW=message.bw
if message.peer:
peer.lip=message.peer.ip
peer.lport=message.peer.port
peer.ldataPort=message.peer.dataPort
peer.hpunch=message.peer.hpunch
self.log.debug('received add neigh message from %s',peer)
print 'received neigh message from ',peer
self.subOverlay.checkAcceptNeighbour(peer)
@classmethod
def send(cls, id,sOver,iOver,port,bw, inpeer, peer, out):
msg = Container(streamid=id,superOverlay=sOver,interOverlay=iOver,port=int(port), bw=bw,peer=inpeer)
d=out.send(cls, msg, peer)
d.addErrback(trap_sent)
return d
class ConfirmNeighbourMessage(ControlMessage):
type = "subsidmessage"
code = MSG.CONFIRM_NEIGH_SUB
ack = True
def trigger(self, message):
if self.stream.id != message.streamid or not self.subOverlay.checkTriggerMessage(message.superOverlay,message.interOverlay):
return False
return True
def action(self, message, peer):
self.subOverlay.addNeighbour(peer)
@classmethod
def send(cls, sid, sOver,iOver,peer, out):
d=out.send(cls, Container(streamid = sid, superOverlay=sOver, interOverlay=iOver), peer)
d.addErrback(trap_sent)
return d
class SuggestNewPeerMessage(ControlMessage):
type = "subpeerlistmessage"
code = MSG.SUGGEST_NEW_PEER_SUB
ack = True
def trigger(self, message):
if self.stream.id != message.streamid or not self.subOverlay.checkTriggerMessage(message.superOverlay,message.interOverlay):
return False
return True
def action(self, message, peer):
self.log.debug('received suggest new peer message from %s',peer)
self.subOverlay.suggestNewPeer(peer,message.peer)
@classmethod
def send(cls, sid,sover,iover, peerlist, peer, out, suc_func=None,err_func=None):
return out.send(cls, Container(streamid=sid, superOverlay=sover, interOverlay=iover, peer=peerlist), peer).addErrback(probe_all,err_func=err_func,suc_func=suc_func)
class SuggestMessage(ControlMessage):
type = "subpeerlistmessage"
code = MSG.SUGGEST_SUB
ack = True
def trigger(self, message):
if self.stream.id != message.streamid or not self.subOverlay.checkTriggerMessage(message.superOverlay,message.interOverlay):
return False
return True
def action(self, message, peer):
self.log.debug('received suggest message from %s',peer)
self.subOverlay.availableNewPeers(peer,message.peer)
@classmethod
def send(cls, sid,sover,iover, peerlist, peer, out):
return out.send(cls, Container(streamid=sid, superOverlay=sover,interOverlay=iover, peer=peerlist), peer).addErrback(trap_sent)
class PingMessage(ControlMessage):
type='basemessage'
code=MSG.ADDNEIGH_RTT
ack=True
def trigger(self,message):
return True
def action(self,message,peer):
return
@classmethod
def send(cls, peer, out):
out.send(cls,Container(message=None),peer).addErrback(trap_sent)
|
apache-2.0
| -2,815,734,315,043,387,400
| 33.080247
| 172
| 0.690998
| false
| 3.688043
| false
| false
| false
|
GeosoftInc/gxpy
|
geosoft/gxapi/GXEDOC.py
|
1
|
18354
|
### extends 'class_empty.py'
### block ClassImports
# NOTICE: Do not edit anything here, it is generated code
from . import gxapi_cy
from geosoft.gxapi import GXContext, float_ref, int_ref, str_ref
### endblock ClassImports
### block Header
# NOTICE: The code generator will not replace the code in this block
### endblock Header
### block ClassImplementation
# NOTICE: Do not edit anything here, it is generated code
class GXEDOC(gxapi_cy.WrapEDOC):
"""
GXEDOC class.
The `GXEDOC <geosoft.gxapi.GXEDOC>` class provides access to a generic documents views as loaded within
Oasis montaj.
"""
def __init__(self, handle=0):
super(GXEDOC, self).__init__(GXContext._get_tls_geo(), handle)
@classmethod
def null(cls):
"""
A null (undefined) instance of `GXEDOC <geosoft.gxapi.GXEDOC>`
:returns: A null `GXEDOC <geosoft.gxapi.GXEDOC>`
:rtype: GXEDOC
"""
return GXEDOC()
def is_null(self):
"""
Check if this is a null (undefined) instance
:returns: True if this is a null (undefined) instance, False otherwise.
:rtype: bool
"""
return self._internal_handle() == 0
# GMSYS 3D Models
@classmethod
def create_new_gms_3d(cls, name, nx, ny, type):
"""
Creates a new `GXGMSYS <geosoft.gxapi.GXGMSYS>` 3D Model into the workspace, flags as new.
:param name: Document to load.
:param nx: X Size
:param ny: Y Size
:param type: :ref:`GMS3D_MODELTYPE`
:type name: str
:type nx: int
:type ny: int
:type type: int
:returns: Handle to the newly created edited model.
:rtype: GXEDOC
.. versionadded:: 5.0
**License:** `Geosoft Extended End-User License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-ext-end-user-lic>`_
**Limitations:** May not be available while executing a command line program.
**Note:** See `load <geosoft.gxapi.GXEDOC.load>`. This is used for brand new documents, it also sets
an internal flag such that if on closing the user chooses
not to save changes, the document is deleted thus keeping the
project folders clean.
"""
ret_val = gxapi_cy.WrapEDOC._create_new_gms_3d(GXContext._get_tls_geo(), name.encode(), nx, ny, type)
return GXEDOC(ret_val)
# Miscellaneous
@classmethod
def current(cls, type):
"""
This method returns the Current Edited Document.
:param type: :ref:`EDOC_TYPE`
:type type: int
:returns: `GXEDOC <geosoft.gxapi.GXEDOC>` Object
:rtype: GXEDOC
.. versionadded:: 5.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
**Limitations:** May not be available while executing a command line program.
"""
ret_val = gxapi_cy.WrapEDOC._current(GXContext._get_tls_geo(), type)
return GXEDOC(ret_val)
@classmethod
def current_no_activate(cls, type):
"""
This method returns the Current Edited Document.
:param type: :ref:`EDOC_TYPE`
:type type: int
:returns: `GXEDOC <geosoft.gxapi.GXEDOC>` Object
:rtype: GXEDOC
.. versionadded:: 9.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
**Limitations:** May not be available while executing a command line program.
**Note:** This function acts just like `current <geosoft.gxapi.GXEDOC.current>` except that the document is not activated (brought to foreground) and no
guarantee is given about which document is currently active.
"""
ret_val = gxapi_cy.WrapEDOC._current_no_activate(GXContext._get_tls_geo(), type)
return GXEDOC(ret_val)
@classmethod
def current_if_exists(cls, type):
"""
This method returns the Current Edited Document.
:param type: :ref:`EDOC_TYPE`
:type type: int
:returns: `GXEDOC <geosoft.gxapi.GXEDOC>` Object to current edited document. If there is no current document,
the user is not prompted for a document, and 0 is returned.
:rtype: GXEDOC
.. versionadded:: 5.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
**Limitations:** May not be available while executing a command line program.
"""
ret_val = gxapi_cy.WrapEDOC._current_if_exists(GXContext._get_tls_geo(), type)
return GXEDOC(ret_val)
@classmethod
def get_documents_lst(cls, lst, path, type):
"""
Load the file names of open documents into a `GXLST <geosoft.gxapi.GXLST>`.
:param lst: `GXLST <geosoft.gxapi.GXLST>` to load
:param path: :ref:`EDOC_PATH`
:param type: :ref:`EDOC_TYPE`
:type lst: GXLST
:type path: int
:type type: int
:returns: The number of documents loaded into the `GXLST <geosoft.gxapi.GXLST>`.
The `GXLST <geosoft.gxapi.GXLST>` is cleared first.
:rtype: int
.. versionadded:: 5.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
**Limitations:** May not be available while executing a command line program.
"""
ret_val = gxapi_cy.WrapEDOC._get_documents_lst(GXContext._get_tls_geo(), lst, path, type)
return ret_val
def get_name(self, name):
"""
Get the name of the document object of this `GXEDOC <geosoft.gxapi.GXEDOC>`.
:param name: Name returned
:type name: str_ref
.. versionadded:: 5.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
**Limitations:** May not be available while executing a command line program.
"""
name.value = self._get_name(name.value.encode())
def get_window_state(self):
"""
Retrieve the current state of the document window
:returns: :ref:`EDOC_WINDOW_STATE`
:rtype: int
.. versionadded:: 5.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
**Limitations:** May not be available while executing a command line program.
"""
ret_val = self._get_window_state()
return ret_val
@classmethod
def have_current(cls, type):
"""
Returns true if a document is loaded
:param type: :ref:`EDOC_TYPE`
:type type: int
:rtype: bool
.. versionadded:: 5.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
**Limitations:** May not be available while executing a command line program.
"""
ret_val = gxapi_cy.WrapEDOC._have_current(GXContext._get_tls_geo(), type)
return ret_val
@classmethod
def loaded(cls, name, type):
"""
Returns 1 if a document is loaded .
:param name: document name
:param type: :ref:`EDOC_TYPE`
:type name: str
:type type: int
:returns: 1 if document is loaded, 0 otherwise.
:rtype: int
.. versionadded:: 5.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
**Limitations:** May not be available while executing a command line program.
"""
ret_val = gxapi_cy.WrapEDOC._loaded(GXContext._get_tls_geo(), name.encode(), type)
return ret_val
def get_window_position(self, left, top, right, bottom, state, is_floating):
"""
Get the map window's position and dock state
:param left: Window left position
:param top: Window top position
:param right: Window right position
:param bottom: Window bottom position
:param state: Window state :ref:`EDOC_WINDOW_STATE`
:param is_floating: Docked or floating :ref:`EDOC_WINDOW_POSITION`
:type left: int_ref
:type top: int_ref
:type right: int_ref
:type bottom: int_ref
:type state: int_ref
:type is_floating: int_ref
.. versionadded:: 9.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
**Limitations:** May not be available while executing a command line program.
"""
left.value, top.value, right.value, bottom.value, state.value, is_floating.value = self._get_window_position(left.value, top.value, right.value, bottom.value, state.value, is_floating.value)
def set_window_position(self, left, top, right, bottom, state, is_floating):
"""
Get the map window's position and dock state
:param left: Window left position
:param top: Window top position
:param right: Window right position
:param bottom: Window bottom position
:param state: Window state :ref:`EDOC_WINDOW_STATE`
:param is_floating: Docked or floating :ref:`EDOC_WINDOW_POSITION`
:type left: int
:type top: int
:type right: int
:type bottom: int
:type state: int
:type is_floating: int
.. versionadded:: 9.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
**Limitations:** May not be available while executing a command line program.
"""
self._set_window_position(left, top, right, bottom, state, is_floating)
def read_only(self):
"""
Checks if a document is currently opened in a read-only mode.
:rtype: bool
.. versionadded:: 5.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
**Limitations:** May not be available while executing a command line program.
"""
ret_val = self._read_only()
return ret_val
@classmethod
def load(cls, name, type):
"""
Loads a list of documents into the workspace
:param name: list of documents (';' or '|' delimited) to load.
:param type: :ref:`EDOC_TYPE`
:type name: str
:type type: int
:returns: Handle to current edited document, which will be the last
document in the list.
:rtype: GXEDOC
.. versionadded:: 5.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
**Limitations:** May not be available while executing a command line program.
**Note:** The last listed document will become the current document.
Only the first file in the list may have a directory path.
All other files in the list are assumed to be in the same
directory as the first file.
"""
ret_val = gxapi_cy.WrapEDOC._load(GXContext._get_tls_geo(), name.encode(), type)
return GXEDOC(ret_val)
@classmethod
def load_no_activate(cls, name, type):
"""
Loads a list of documents into the workspace
:param name: list of documents (';' or '|' delimited) to load.
:param type: :ref:`EDOC_TYPE`
:type name: str
:type type: int
:returns: Handle to current edited document, which will be the last
document in the list.
:rtype: GXEDOC
.. versionadded:: 9.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
**Limitations:** May not be available while executing a command line program.
**Note:** This function acts just like `load <geosoft.gxapi.GXEDOC.load>` except that the document(s) is not activated (brought to foreground) and no
guarantee is given about which document is currently active.
"""
ret_val = gxapi_cy.WrapEDOC._load_no_activate(GXContext._get_tls_geo(), name.encode(), type)
return GXEDOC(ret_val)
def make_current(self):
"""
Makes this `GXEDOC <geosoft.gxapi.GXEDOC>` object the current active object to the user.
.. versionadded:: 5.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
**Limitations:** May not be available while executing a command line program.
"""
self._make_current()
def set_window_state(self, state):
"""
Changes the state of the document window
:param state: :ref:`EDOC_WINDOW_STATE`
:type state: int
.. versionadded:: 5.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
**Limitations:** May not be available while executing a command line program.
"""
self._set_window_state(state)
@classmethod
def sync(cls, file, type):
"""
Syncronize the Metadata of a document that is not currently open
:param file: Document file name
:param type: :ref:`EDOC_TYPE`
:type file: str
:type type: int
.. versionadded:: 5.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
**Limitations:** May not be available while executing a command line program.
"""
gxapi_cy.WrapEDOC._sync(GXContext._get_tls_geo(), file.encode(), type)
def sync_open(self):
"""
Syncronize the Metadata of a document
.. versionadded:: 5.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
**Limitations:** May not be available while executing a command line program.
"""
self._sync_open()
@classmethod
def un_load(cls, name, type):
"""
Unloads an edited document.
:param name: Name of document to unload
:param type: :ref:`EDOC_TYPE`
:type name: str
:type type: int
.. versionadded:: 5.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
**Limitations:** May not be available while executing a command line program.
**Note:** If the document is not loaded, nothing happens.
Same as `un_load_verify <geosoft.gxapi.GXEDOC.un_load_verify>` with FALSE to prompt save.
"""
gxapi_cy.WrapEDOC._un_load(GXContext._get_tls_geo(), name.encode(), type)
@classmethod
def un_load_all(cls, type):
"""
Unloads all opened documents
:param type: :ref:`EDOC_TYPE`
:type type: int
.. versionadded:: 5.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
**Limitations:** May not be available while executing a command line program.
"""
gxapi_cy.WrapEDOC._un_load_all(GXContext._get_tls_geo(), type)
@classmethod
def un_load_discard(cls, name, type):
"""
Unloads a document in the workspace, discards changes.
:param name: Name of document to unload
:param type: :ref:`EDOC_TYPE`
:type name: str
:type type: int
.. versionadded:: 5.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
**Limitations:** May not be available while executing a command line program.
**Note:** If the document is not loaded, nothing happens.
"""
gxapi_cy.WrapEDOC._un_load_discard(GXContext._get_tls_geo(), name.encode(), type)
@classmethod
def un_load_verify(cls, name, verify, type):
"""
Unloads an edited document, optional prompt to save.
:param name: Name of document to unload
:param verify: :ref:`EDOC_UNLOAD`
:param type: :ref:`EDOC_TYPE`
:type name: str
:type verify: int
:type type: int
.. versionadded:: 5.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
**Limitations:** May not be available while executing a command line program.
**Note:** If the document is not loaded, nothing happens.
The user can be prompted to save before unloading.
If `EDOC_UNLOAD_NO_PROMPT <geosoft.gxapi.EDOC_UNLOAD_NO_PROMPT>`, data is always saved.
"""
gxapi_cy.WrapEDOC._un_load_verify(GXContext._get_tls_geo(), name.encode(), verify, type)
### endblock ClassImplementation
### block ClassExtend
# NOTICE: The code generator will not replace the code in this block
### endblock ClassExtend
### block Footer
# NOTICE: The code generator will not replace the code in this block
### endblock Footer
|
bsd-2-clause
| -78,147,204,572,312,750
| 30.537801
| 198
| 0.600959
| false
| 3.74495
| false
| false
| false
|
2013Commons/hue
|
apps/beeswax/src/beeswax/views.py
|
1
|
35107
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import re
import sys
import time
from django import forms
from django.contrib import messages
from django.contrib.auth.models import User
from django.db.models import Q
from django.http import HttpResponse, QueryDict
from django.shortcuts import redirect
from django.utils.html import escape
from django.utils.translation import ugettext as _
from django.core.urlresolvers import reverse
from desktop.appmanager import get_apps_dict
from desktop.context_processors import get_app_name
from desktop.lib.paginator import Paginator
from desktop.lib.django_util import copy_query_dict, format_preserving_redirect, render
from desktop.lib.django_util import login_notrequired, get_desktop_uri_prefix
from desktop.lib.exceptions_renderable import PopupException
from desktop.lib.i18n import smart_unicode
from desktop.models import Document
from jobsub.parameterization import find_variables
import beeswax.forms
import beeswax.design
import beeswax.management.commands.beeswax_install_examples
from beeswax import common, data_export, models
from beeswax.models import SavedQuery, QueryHistory
from beeswax.server import dbms
from beeswax.server.dbms import expand_exception, get_query_server_config, QueryServerException
LOG = logging.getLogger(__name__)
def index(request):
return execute_query(request)
"""
Design views
"""
def save_design(request, form, type_, design, explicit_save):
"""
save_design(request, form, type_, design, explicit_save) -> SavedQuery
A helper method to save the design:
* If ``explicit_save``, then we save the data in the current design.
* If the user clicked the submit button, we do NOT overwrite the current
design. Instead, we create a new "auto" design (iff the user modified
the data). This new design is named after the current design, with the
AUTO_DESIGN_SUFFIX to signify that it's different.
Need to return a SavedQuery because we may end up with a different one.
Assumes that form.saveform is the SaveForm, and that it is valid.
"""
authorized_get_design(request, design.id)
assert form.saveform.is_valid()
sub_design_form = form # Beeswax/Impala case
if type_ == models.HQL:
design_cls = beeswax.design.HQLdesign
elif type_ == models.IMPALA:
design_cls = beeswax.design.HQLdesign
elif type_ == models.SPARK:
from spark.design import SparkDesign
design_cls = SparkDesign
sub_design_form = form.query
else:
raise ValueError(_('Invalid design type %(type)s') % {'type': type_})
design_obj = design_cls(sub_design_form, query_type=type_)
name = form.saveform.cleaned_data['name']
desc = form.saveform.cleaned_data['desc']
return _save_design(request.user, design, type_, design_obj, explicit_save, name, desc)
def _save_design(user, design, type_, design_obj, explicit_save, name=None, desc=None):
# Design here means SavedQuery
old_design = design
new_data = design_obj.dumps()
# Auto save if (1) the user didn't click "save", and (2) the data is different.
# Create an history design if the user is executing a shared design.
# Don't generate an auto-saved design if the user didn't change anything.
if explicit_save and (not design.doc.exists() or design.doc.get().can_write_or_exception(user)):
design.name = name
design.desc = desc
design.is_auto = False
elif design_obj != old_design.get_design():
# Auto save iff the data is different
if old_design.id is not None:
# Clone iff the parent design isn't a new unsaved model
design = old_design.clone(new_owner=user)
if not old_design.is_auto:
design.name = old_design.name + models.SavedQuery.AUTO_DESIGN_SUFFIX
else:
design.name = models.SavedQuery.DEFAULT_NEW_DESIGN_NAME
design.is_auto = True
design.type = type_
design.data = new_data
design.save()
LOG.info('Saved %s design "%s" (id %s) for %s' % (explicit_save and '' or 'auto ', design.name, design.id, design.owner))
if design.doc.exists():
design.doc.update(name=design.name, description=design.desc)
else:
Document.objects.link(design, owner=design.owner, extra=design.type, name=design.name, description=design.desc)
if design.is_auto:
design.doc.get().add_to_history()
return design
def delete_design(request):
if request.method == 'POST':
ids = request.POST.getlist('designs_selection')
designs = dict([(design_id, authorized_get_design(request, design_id, owner_only=True)) for design_id in ids])
if None in designs.values():
LOG.error('Cannot delete non-existent design(s) %s' % ','.join([key for key, name in designs.items() if name is None]))
return list_designs(request)
for design in designs.values():
if request.POST.get('skipTrash', 'false') == 'false':
design.doc.get().send_to_trash()
else:
design.doc.all().delete()
design.delete()
return redirect(reverse(get_app_name(request) + ':list_designs'))
else:
return render('confirm.mako', request, {'url': request.path, 'title': _('Delete design(s)?')})
def restore_design(request):
if request.method == 'POST':
ids = request.POST.getlist('designs_selection')
designs = dict([(design_id, authorized_get_design(request, design_id)) for design_id in ids])
if None in designs.values():
LOG.error('Cannot restore non-existent design(s) %s' % ','.join([key for key, name in designs.items() if name is None]))
return list_designs(request)
for design in designs.values():
design.doc.get().restore_from_trash()
return redirect(reverse(get_app_name(request) + ':list_designs'))
else:
return render('confirm.mako', request, {'url': request.path, 'title': _('Restore design(s)?')})
def clone_design(request, design_id):
"""Clone a design belonging to any user"""
design = authorized_get_design(request, design_id)
if design is None:
LOG.error('Cannot clone non-existent design %s' % (design_id,))
return list_designs(request)
copy = design.clone(request.user)
copy.save()
copy_doc = design.doc.get().copy(owner=request.user)
copy.doc.all().delete()
copy.doc.add(copy_doc)
messages.info(request, _('Copied design: %(name)s') % {'name': design.name})
return format_preserving_redirect(request, reverse(get_app_name(request) + ':execute_design', kwargs={'design_id': copy.id}))
def list_designs(request):
"""
View function for show all saved queries.
We get here from /beeswax/list_designs?filterargs, with the options being:
page=<n> - Controls pagination. Defaults to 1.
user=<name> - Show design items belonging to a user. Default to all users.
type=<type> - <type> is "hql", for saved query type. Default to show all.
sort=<key> - Sort by the attribute <key>, which is one of:
"date", "name", "desc", and "type" (design type)
Accepts the form "-date", which sort in descending order.
Default to "-date".
text=<frag> - Search for fragment "frag" in names and descriptions.
"""
DEFAULT_PAGE_SIZE = 20
app_name = get_app_name(request)
# Extract the saved query list.
prefix = 'q-'
querydict_query = _copy_prefix(prefix, request.GET)
# Manually limit up the user filter.
querydict_query[ prefix + 'type' ] = app_name
page, filter_params = _list_designs(request.user, querydict_query, DEFAULT_PAGE_SIZE, prefix)
return render('list_designs.mako', request, {
'page': page,
'filter_params': filter_params,
'user': request.user,
'designs_json': json.dumps([query.id for query in page.object_list])
})
def list_trashed_designs(request):
DEFAULT_PAGE_SIZE = 20
app_name= get_app_name(request)
user = request.user
# Extract the saved query list.
prefix = 'q-'
querydict_query = _copy_prefix(prefix, request.GET)
# Manually limit up the user filter.
querydict_query[ prefix + 'type' ] = app_name
page, filter_params = _list_designs(user, querydict_query, DEFAULT_PAGE_SIZE, prefix, is_trashed=True)
return render('list_trashed_designs.mako', request, {
'page': page,
'filter_params': filter_params,
'user': request.user,
'designs_json': json.dumps([query.id for query in page.object_list])
})
def my_queries(request):
"""
View a mix of history and saved queries.
It understands all the GET params in ``list_query_history`` (with a ``h-`` prefix)
and those in ``list_designs`` (with a ``q-`` prefix). The only thing it disallows
is the ``user`` filter, since this view only shows what belongs to the user.
"""
DEFAULT_PAGE_SIZE = 30
app_name= get_app_name(request)
# Extract the history list.
prefix = 'h-'
querydict_history = _copy_prefix(prefix, request.GET)
# Manually limit up the user filter.
querydict_history[ prefix + 'user' ] = request.user
querydict_history[ prefix + 'type' ] = app_name
hist_page, hist_filter = _list_query_history(request.user,
querydict_history,
DEFAULT_PAGE_SIZE,
prefix)
# Extract the saved query list.
prefix = 'q-'
querydict_query = _copy_prefix(prefix, request.GET)
# Manually limit up the user filter.
querydict_query[ prefix + 'user' ] = request.user
querydict_query[ prefix + 'type' ] = app_name
query_page, query_filter = _list_designs(request.user, querydict_query, DEFAULT_PAGE_SIZE, prefix)
filter_params = hist_filter
filter_params.update(query_filter)
return render('my_queries.mako', request, {
'request': request,
'h_page': hist_page,
'q_page': query_page,
'filter_params': filter_params,
'designs_json': json.dumps([query.id for query in query_page.object_list])
})
def list_query_history(request):
"""
View the history of query (for the current user).
We get here from /beeswax/query_history?filterargs, with the options being:
page=<n> - Controls pagination. Defaults to 1.
user=<name> - Show history items from a user. Default to current user only.
Also accepts ':all' to show all history items.
type=<type> - <type> is "beeswax|impala", for design type. Default to show all.
design_id=<id> - Show history for this particular design id.
sort=<key> - Sort by the attribute <key>, which is one of:
"date", "state", "name" (design name), and "type" (design type)
Accepts the form "-date", which sort in descending order.
Default to "-date".
auto_query=<bool> - Show auto generated actions (drop table, read data, etc). Default True
"""
DEFAULT_PAGE_SIZE = 30
prefix = 'q-'
share_queries = request.user.is_superuser
querydict_query = request.GET.copy()
if not share_queries:
querydict_query[prefix + 'user'] = request.user.username
app_name = get_app_name(request)
querydict_query[prefix + 'type'] = app_name
page, filter_params = _list_query_history(request.user, querydict_query, DEFAULT_PAGE_SIZE, prefix)
filter = request.GET.get(prefix + 'search') and request.GET.get(prefix + 'search') or ''
if request.GET.get('format') == 'json':
resp = {
'queries': [massage_query_history_for_json(app_name, query_history) for query_history in page.object_list]
}
return HttpResponse(json.dumps(resp), mimetype="application/json")
return render('list_history.mako', request, {
'request': request,
'page': page,
'filter_params': filter_params,
'share_queries': share_queries,
'prefix': prefix,
'filter': filter,
})
def massage_query_history_for_json(app_name, query_history):
return {
'query': query_history.query,
'timeInMs': time.mktime(query_history.submission_date.timetuple()),
'timeFormatted': query_history.submission_date.strftime("%x %X"),
'designUrl': reverse(app_name + ':execute_design', kwargs={'design_id': query_history.design.id}),
'resultsUrl': not query_history.is_failure() and reverse(app_name + ':watch_query_history', kwargs={'query_history_id': query_history.id}) or ""
}
def download(request, id, format):
try:
query_history = authorized_get_query_history(request, id, must_exist=True)
db = dbms.get(request.user, query_history.get_query_server_config())
LOG.debug('Download results for query %s: [ %s ]' % (query_history.server_id, query_history.query))
return data_export.download(query_history.get_handle(), format, db)
except Exception, e:
if not hasattr(e, 'message') or not e.message:
message = e
else:
message = e.message
raise PopupException(message, detail='')
"""
Queries Views
"""
def execute_query(request, design_id=None, query_history_id=None):
"""
View function for executing an arbitrary query.
"""
action = 'query'
if query_history_id:
query_history = authorized_get_query_history(request, query_history_id, must_exist=True)
design = query_history.design
try:
if query_history.server_id and query_history.server_guid:
handle, state = _get_query_handle_and_state(query_history)
if 'on_success_url' in request.GET:
if request.GET.get('on_success_url'):
action = 'watch-redirect'
else:
action = 'watch-results'
else:
action = 'editor-results'
except QueryServerException, e:
if 'Invalid query handle' in e.message or 'Invalid OperationHandle' in e.message:
query_history.save_state(QueryHistory.STATE.expired)
LOG.warn("Invalid query handle", exc_info=sys.exc_info())
action = 'editor-expired-results'
else:
raise e
else:
# Check perms.
authorized_get_design(request, design_id)
app_name = get_app_name(request)
query_type = SavedQuery.TYPES_MAPPING[app_name]
design = safe_get_design(request, query_type, design_id)
query_history = None
context = {
'design': design,
'query': query_history, # Backward
'query_history': query_history,
'autocomplete_base_url': reverse(get_app_name(request) + ':api_autocomplete_databases', kwargs={}),
'can_edit_name': design and design.id and not design.is_auto,
'can_edit': design and design.id and design.doc.get().can_write(request.user),
'action': action,
'on_success_url': request.GET.get('on_success_url'),
'has_metastore': 'metastore' in get_apps_dict(request.user)
}
return render('execute.mako', request, context)
def view_results(request, id, first_row=0):
"""
Returns the view for the results of the QueryHistory with the given id.
The query results MUST be ready.
To display query results, one should always go through the execute_query view.
If the result set has has_result_set=False, display an empty result.
If ``first_row`` is 0, restarts (if necessary) the query read. Otherwise, just
spits out a warning if first_row doesn't match the servers conception.
Multiple readers will produce a confusing interaction here, and that's known.
It understands the ``context`` GET parameter. (See execute_query().)
"""
first_row = long(first_row)
start_over = (first_row == 0)
results = type('Result', (object,), {
'rows': 0,
'columns': [],
'has_more': False,
'start_row': 0,
})
data = []
fetch_error = False
error_message = ''
log = ''
columns = []
app_name = get_app_name(request)
query_history = authorized_get_query_history(request, id, must_exist=True)
query_server = query_history.get_query_server_config()
db = dbms.get(request.user, query_server)
handle, state = _get_query_handle_and_state(query_history)
context_param = request.GET.get('context', '')
query_context = parse_query_context(context_param)
# Update the status as expired should not be accessible
# Impala does not support startover for now
expired = state == models.QueryHistory.STATE.expired
# Retrieve query results or use empty result if no result set
try:
if query_server['server_name'] == 'impala' and not handle.has_result_set:
downloadable = False
else:
results = db.fetch(handle, start_over, 100)
data = []
# Materialize and HTML escape results
# TODO: use Number + list comprehension
for row in results.rows():
escaped_row = []
for field in row:
if isinstance(field, (int, long, float, complex, bool)):
escaped_field = field
elif field is None:
escaped_field = 'NULL'
else:
field = smart_unicode(field, errors='replace') # Prevent error when getting back non utf8 like charset=iso-8859-1
escaped_field = escape(field).replace(' ', ' ')
escaped_row.append(escaped_field)
data.append(escaped_row)
# We display the "Download" button only when we know that there are results:
downloadable = first_row > 0 or data
log = db.get_log(handle)
columns = results.data_table.cols()
except Exception, ex:
fetch_error = True
error_message, log = expand_exception(ex, db, handle)
# Handle errors
error = fetch_error or results is None or expired
context = {
'error': error,
'message': error_message,
'query': query_history,
'results': data,
'columns': columns,
'expected_first_row': first_row,
'log': log,
'hadoop_jobs': app_name != 'impala' and _parse_out_hadoop_jobs(log),
'query_context': query_context,
'can_save': False,
'context_param': context_param,
'expired': expired,
'app_name': app_name,
'next_json_set': None,
'is_finished': query_history.is_finished()
}
if not error:
download_urls = {}
if downloadable:
for format in common.DL_FORMATS:
download_urls[format] = reverse(app_name + ':download', kwargs=dict(id=str(id), format=format))
results.start_row = first_row
context.update({
'id': id,
'results': data,
'has_more': results.has_more,
'next_row': results.start_row + len(data),
'start_row': results.start_row,
'expected_first_row': first_row,
'columns': columns,
'download_urls': download_urls,
'can_save': query_history.owner == request.user,
'next_json_set':
reverse(get_app_name(request) + ':view_results', kwargs={
'id': str(id),
'first_row': results.start_row + len(data)
}
)
+ ('?context=' + context_param or '') + '&format=json'
})
context['columns'] = massage_columns_for_json(columns)
if 'save_form' in context:
del context['save_form']
if 'query' in context:
del context['query']
return HttpResponse(json.dumps(context), mimetype="application/json")
def configuration(request):
app_name = get_app_name(request)
query_server = get_query_server_config(app_name)
config_values = dbms.get(request.user, query_server).get_default_configuration(
bool(request.REQUEST.get("include_hadoop", False)))
for value in config_values:
if 'password' in value.key.lower():
value.value = "*" * 10
return render("configuration.mako", request, {'config_values': config_values})
"""
Other views
"""
def install_examples(request):
response = {'status': -1, 'message': ''}
if request.method == 'POST':
try:
app_name = get_app_name(request)
beeswax.management.commands.beeswax_install_examples.Command().handle_noargs(app_name=app_name, user=request.user)
response['status'] = 0
except Exception, err:
LOG.exception(err)
response['message'] = str(err)
else:
response['message'] = _('A POST request is required.')
return HttpResponse(json.dumps(response), mimetype="application/json")
@login_notrequired
def query_done_cb(request, server_id):
"""
A callback for query completion notification. When the query is done,
BeeswaxServer notifies us by sending a GET request to this view.
"""
message_template = '<html><head></head>%(message)s<body></body></html>'
message = {'message': 'error'}
try:
query_history = QueryHistory.objects.get(server_id=server_id + '\n')
# Update the query status
query_history.set_to_available()
# Find out details about the query
if not query_history.notify:
message['message'] = 'email_notify is false'
return HttpResponse(message_template % message)
design = query_history.design
user = query_history.owner
subject = _("Beeswax query completed.")
if design:
subject += ": %s" % (design.name,)
link = "%s%s" % \
(get_desktop_uri_prefix(),
reverse(get_app_name(request) + ':watch_query_history', kwargs={'query_history_id': query_history.id}))
body = _("%(subject)s. See the results here: %(link)s\n\nQuery:\n%(query)s") % {
'subject': subject, 'link': link, 'query': query_history.query
}
user.email_user(subject, body)
message['message'] = 'sent'
except Exception, ex:
msg = "Failed to send query completion notification via e-mail: %s" % (ex)
LOG.error(msg)
message['message'] = msg
return HttpResponse(message_template % message)
"""
Utils
"""
def massage_columns_for_json(cols):
massaged_cols = []
for column in cols:
massaged_cols.append({
'name': column.name,
'type': column.type,
'comment': column.comment
})
return massaged_cols
def authorized_get_design(request, design_id, owner_only=False, must_exist=False):
if design_id is None and not must_exist:
return None
try:
design = SavedQuery.objects.get(id=design_id)
except SavedQuery.DoesNotExist:
if must_exist:
raise PopupException(_('Design %(id)s does not exist.') % {'id': design_id})
else:
return None
if owner_only:
design.doc.get().can_write_or_exception(request.user)
else:
design.doc.get().can_read_or_exception(request.user)
return design
def authorized_get_query_history(request, query_history_id, owner_only=False, must_exist=False):
if query_history_id is None and not must_exist:
return None
try:
query_history = QueryHistory.get(id=query_history_id)
except QueryHistory.DoesNotExist:
if must_exist:
raise PopupException(_('QueryHistory %(id)s does not exist.') % {'id': query_history_id})
else:
return None
# Some queries don't have a design so are not linked to Document Model permission
if query_history.design is None or not query_history.design.doc.exists():
if not request.user.is_superuser and request.user != query_history.owner:
raise PopupException(_('Permission denied to read QueryHistory %(id)s') % {'id': query_history_id})
else:
query_history.design.doc.get().can_read_or_exception(request.user)
return query_history
def safe_get_design(request, design_type, design_id=None):
"""
Return a new design, if design_id is None,
Return the design with the given id and type. If the design is not found,
display a notification and return a new design.
"""
design = None
if design_id is not None:
design = authorized_get_design(request, design_id)
if design is None:
design = SavedQuery(owner=request.user, type=design_type)
return design
def make_parameterization_form(query_str):
"""
Creates a django form on the fly with arguments from the
query.
"""
variables = find_variables(query_str)
if len(variables) > 0:
class Form(forms.Form):
for name in sorted(variables):
locals()[name] = forms.CharField(required=True)
return Form
else:
return None
def execute_directly(request, query, query_server=None,
design=None, on_success_url=None, on_success_params=None,
**kwargs):
"""
execute_directly(request, query_msg, tablename, design) -> HTTP response for execution
This method wraps around dbms.execute_query() to take care of the HTTP response
after the execution.
query
The HQL model Query object.
query_server
To which Query Server to submit the query.
Dictionary with keys: ['server_name', 'server_host', 'server_port'].
design
The design associated with the query.
on_success_url
Where to go after the query is done. The URL handler may expect an option "context" GET
param. (See ``watch_query``.) For advanced usage, on_success_url can be a function, in
which case the on complete URL is the return of:
on_success_url(history_obj) -> URL string
Defaults to the view results page.
on_success_params
Optional params to pass to the on_success_url (in additional to "context").
Note that this may throw a Beeswax exception.
"""
if design is not None:
authorized_get_design(request, design.id)
db = dbms.get(request.user, query_server)
database = query.query.get('database', 'default')
db.use(database)
query_history = db.execute_query(query, design)
watch_url = reverse(get_app_name(request) + ':watch_query_history', kwargs={'query_history_id': query_history.id})
# Prepare the GET params for the watch_url
get_dict = QueryDict(None, mutable=True)
# (1) on_success_url
if on_success_url:
if callable(on_success_url):
on_success_url = on_success_url(query_history)
get_dict['on_success_url'] = on_success_url
# (2) misc
if on_success_params:
get_dict.update(on_success_params)
return format_preserving_redirect(request, watch_url, get_dict)
def _list_designs(user, querydict, page_size, prefix="", is_trashed=False):
"""
_list_designs(user, querydict, page_size, prefix, is_trashed) -> (page, filter_param)
A helper to gather the designs page. It understands all the GET params in
``list_designs``, by reading keys from the ``querydict`` with the given ``prefix``.
"""
DEFAULT_SORT = ('-', 'date') # Descending date
SORT_ATTR_TRANSLATION = dict(
date='last_modified',
name='name',
desc='description',
type='extra',
)
# Trash and security
if is_trashed:
db_queryset = Document.objects.trashed_docs(SavedQuery, user)
else:
db_queryset = Document.objects.available_docs(SavedQuery, user)
# Filter by user
filter_username = querydict.get(prefix + 'user')
if filter_username:
try:
db_queryset = db_queryset.filter(owner=User.objects.get(username=filter_username))
except User.DoesNotExist:
# Don't care if a bad filter term is provided
pass
# Design type
d_type = querydict.get(prefix + 'type')
if d_type and d_type in SavedQuery.TYPES_MAPPING.keys():
db_queryset = db_queryset.filter(extra=str(SavedQuery.TYPES_MAPPING[d_type]))
# Text search
frag = querydict.get(prefix + 'text')
if frag:
db_queryset = db_queryset.filter(Q(name__icontains=frag) | Q(description__icontains=frag))
# Ordering
sort_key = querydict.get(prefix + 'sort')
if sort_key:
if sort_key[0] == '-':
sort_dir, sort_attr = '-', sort_key[1:]
else:
sort_dir, sort_attr = '', sort_key
if not SORT_ATTR_TRANSLATION.has_key(sort_attr):
LOG.warn('Bad parameter to list_designs: sort=%s' % (sort_key,))
sort_dir, sort_attr = DEFAULT_SORT
else:
sort_dir, sort_attr = DEFAULT_SORT
db_queryset = db_queryset.order_by(sort_dir + SORT_ATTR_TRANSLATION[sort_attr])
designs = [job.content_object for job in db_queryset.all() if job.content_object and job.content_object.is_auto == False]
pagenum = int(querydict.get(prefix + 'page', 1))
paginator = Paginator(designs, page_size)
page = paginator.page(pagenum)
# We need to pass the parameters back to the template to generate links
keys_to_copy = [ prefix + key for key in ('user', 'type', 'sort') ]
filter_params = copy_query_dict(querydict, keys_to_copy)
return page, filter_params
def _get_query_handle_and_state(query_history):
"""
Front-end wrapper to handle exceptions. Expects the query to be submitted.
"""
handle = query_history.get_handle()
if handle is None:
raise PopupException(_("Failed to retrieve query state from the Query Server."))
state = dbms.get(query_history.owner, query_history.get_query_server_config()).get_state(handle)
if state is None:
raise PopupException(_("Failed to contact Server to check query status."))
return (handle, state)
def parse_query_context(context):
"""
parse_query_context(context) -> ('table', <table_name>) -or- ('design', <design_obj>)
"""
if not context:
return None
pair = context.split(':', 1)
if len(pair) != 2 or pair[0] not in ('table', 'design'):
LOG.error("Invalid query context data: %s" % (context,))
return None
if pair[0] == 'design': # Translate design id to design obj
pair[1] = models.SavedQuery.get(int(pair[1]))
return pair
HADOOP_JOBS_RE = re.compile("(http[^\s]*/jobdetails.jsp\?jobid=([a-z0-9_]*))")
HADOOP_YARN_JOBS_RE = re.compile("(http[^\s]*/proxy/([a-z0-9_]+?)/)")
def _parse_out_hadoop_jobs(log):
"""
Ideally, Hive would tell us what jobs it has run directly
from the Thrift interface. For now, we parse the logs
to look for URLs to those jobs.
"""
ret = []
for match in HADOOP_JOBS_RE.finditer(log):
full_job_url, job_id = match.groups()
# We ignore full_job_url for now, but it may
# come in handy if we support multiple MR clusters
# correctly.
# Ignore duplicates
if job_id not in ret:
ret.append(job_id)
for match in HADOOP_YARN_JOBS_RE.finditer(log):
full_job_url, job_id = match.groups()
if job_id not in ret:
ret.append(job_id)
return ret
def _copy_prefix(prefix, base_dict):
"""Copy keys starting with ``prefix``"""
querydict = QueryDict(None, mutable=True)
for key, val in base_dict.iteritems():
if key.startswith(prefix):
querydict[key] = val
return querydict
def _list_query_history(user, querydict, page_size, prefix=""):
"""
_list_query_history(user, querydict, page_size, prefix) -> (page, filter_param)
A helper to gather the history page. It understands all the GET params in
``list_query_history``, by reading keys from the ``querydict`` with the
given ``prefix``.
"""
DEFAULT_SORT = ('-', 'date') # Descending date
SORT_ATTR_TRANSLATION = dict(
date='submission_date',
state='last_state',
name='design__name',
type='design__type',
)
db_queryset = models.QueryHistory.objects.select_related()
# Filtering
#
# Queries without designs are the ones we submitted on behalf of the user,
# (e.g. view table data). Exclude those when returning query history.
if querydict.get(prefix + 'auto_query', 'on') != 'on':
db_queryset = db_queryset.exclude(design__isnull=False, design__is_auto=True)
user_filter = querydict.get(prefix + 'user', user.username)
if user_filter != ':all':
db_queryset = db_queryset.filter(owner__username=user_filter)
# Design id
design_id = querydict.get(prefix + 'design_id')
if design_id:
db_queryset = db_queryset.filter(design__id=int(design_id))
# Search
search_filter = querydict.get(prefix + 'search')
if search_filter:
db_queryset = db_queryset.filter(Q(design__name__icontains=search_filter) | Q(query__icontains=search_filter) | Q(owner__username__icontains=search_filter))
# Design type
d_type = querydict.get(prefix + 'type')
if d_type:
if d_type not in SavedQuery.TYPES_MAPPING.keys():
LOG.warn('Bad parameter to list_query_history: type=%s' % (d_type,))
else:
db_queryset = db_queryset.filter(design__type=SavedQuery.TYPES_MAPPING[d_type])
# Ordering
sort_key = querydict.get(prefix + 'sort')
if sort_key:
sort_dir, sort_attr = '', sort_key
if sort_key[0] == '-':
sort_dir, sort_attr = '-', sort_key[1:]
if not SORT_ATTR_TRANSLATION.has_key(sort_attr):
LOG.warn('Bad parameter to list_query_history: sort=%s' % (sort_key,))
sort_dir, sort_attr = DEFAULT_SORT
else:
sort_dir, sort_attr = DEFAULT_SORT
db_queryset = db_queryset.order_by(sort_dir + SORT_ATTR_TRANSLATION[sort_attr])
# Get the total return count before slicing
total_count = db_queryset.count()
# Slicing (must be the last filter applied)
pagenum = int(querydict.get(prefix + 'page', 1))
if pagenum < 1:
pagenum = 1
db_queryset = db_queryset[ page_size * (pagenum - 1) : page_size * pagenum ]
paginator = Paginator(db_queryset, page_size, total=total_count)
page = paginator.page(pagenum)
# We do slicing ourselves, rather than letting the Paginator handle it, in order to
# update the last_state on the running queries
for history in page.object_list:
_update_query_state(history.get_full_object())
# We need to pass the parameters back to the template to generate links
keys_to_copy = [ prefix + key for key in ('user', 'type', 'sort', 'design_id', 'auto_query', 'search') ]
filter_params = copy_query_dict(querydict, keys_to_copy)
return page, filter_params
def _update_query_state(query_history):
"""
Update the last_state for a QueryHistory object. Returns success as True/False.
This only occurs iff the current last_state is submitted or running, since the other
states are stable, more-or-less.
Note that there is a transition from available/failed to expired. That occurs lazily
when the user attempts to view results that have expired.
"""
if query_history.last_state <= models.QueryHistory.STATE.running.index:
try:
state_enum = dbms.get(query_history.owner, query_history.get_query_server_config()).get_state(query_history.get_handle())
if state_enum is None:
# Error was logged at the source
return False
except Exception, e:
LOG.error(e)
state_enum = models.QueryHistory.STATE.failed
query_history.save_state(state_enum)
return True
def get_db_choices(request):
app_name = get_app_name(request)
query_server = get_query_server_config(app_name)
db = dbms.get(request.user, query_server)
dbs = db.get_databases()
return [(db, db) for db in dbs]
WHITESPACE = re.compile("\s+", re.MULTILINE)
def collapse_whitespace(s):
return WHITESPACE.sub(" ", s).strip()
|
apache-2.0
| 5,357,420,010,559,240,000
| 33.384917
| 160
| 0.670806
| false
| 3.539012
| false
| false
| false
|
osspeak/osspeak
|
osspeak/recognition/actions/library/vocola/dragonkeys.py
|
1
|
12786
|
###
### Code for parsing extended SendDragonKeys syntax into a series of
### Input events suitable for calling SendInput with.
###
### Uses ctypes (requires Python 2.5+).
###
### Assumes input is 8-bit Windows-1252 encoding.
###
###
### Author: Mark Lillibridge
### Version: 0.7
###
import re
from ctypes import *
from recognition.actions.library.vocola.sendinput import *
debug = False
###
### Break SendDragonKeys input into the chords that make it up. Each
### chord is represented in terms of its three parts: modifiers, base,
### and effect.
###
### E.g., "a{shift+left_10} " -> [[None, "a", None], ["shift", "left",
### "10"], [None, "space", None]]
###
### Update: The chord's text is also stored for unparsing without information loss.
### E.g., "{{}" -> [None, "{", None, "{{}"]
###
def parse_into_chords(specification):
chords = []
while len(specification) > 0:
m = chord_pattern.match(specification)
if not m:
raise ValueError(f'Cannot parse chords from specification {specification}')
modifiers = m.group(1)
if modifiers: modifiers = modifiers[:-1] # remove final "+"
chords += [[modifiers, m.group(2), m.group(3), m.group(0)]]
specification = specification[m.end():]
return chords
# Because we can't be sure of the current code page, treat all non-ASCII
# characters as potential accented letters for now.
chord_pattern = re.compile(r"""\{ ( (?: [a-zA-Z0-9\x80-\xff]+ \+ )* )
( . | [-a-zA-Z0-9/*+.\x80-\xff]+ )
(?: [ _] (\d+|hold|release) )?
\}""", re.VERBOSE|re.IGNORECASE)
###
###
###
def chord_to_events(chord):
modifiers, base, effect, text = chord
if base == " ":
base = "space"
if modifiers:
modifiers = modifiers.split("+")
else:
modifiers = []
hold_count = release_count = 1
if effect:
effect = effect.lower()
if effect == "hold": release_count = 0
elif effect == "release": hold_count = 0
else:
hold_count = int(effect)
if hold_count == 0:
# check for bad names even when no events:
for modifier in modifiers:
single(modifier, False)
single(base, False)
return []
if len(base) == 1:
try:
m, f = how_type_character(base)
if debug and (len(m)>0 or describe_key(f)!=base):
mm = ""
if m: mm = '+'.join(m) + "+"
bb = "<" + base + ">"
if ord(base[0])<32: bb = hex(ord(base[0]))
print("typing " + bb + " by {" + mm + describe_key(f) + "}")
modifiers += m
base = "VK" + hex(f)
except:
if debug and ord(base[0])<128:
bb = "<" + base + ">"
if ord(base[0])<32: bb = hex(ord(base[0]))
print("can't type " + bb + " on current keyboard layout")
pass
events = []
modifiers_down = []
modifiers_up = []
for modifier in modifiers:
modifiers_down += single(modifier, False)
modifiers_up = modifiers_up + single(modifier, True)
try:
# down down up (hardware auto-repeat style) fails so use down,up pairs:
if hold_count > 1:
return modifiers_down \
+ (single(base,False)+single(base, True))*hold_count \
+ modifiers_up
if hold_count > 0:
events += modifiers_down + single(base,False)*hold_count
if release_count > 0:
events += single(base, True) + modifiers_up
return events
except:
if len(base) != 1:
raise
if len(modifiers) != 0:
print("Warning: unable to use modifiers with character: " + base)
# Unicode?
if release_count==0:
print("Warning: unable to independently hold character: " + base)
if hold_count==0:
print("Warning: unable to independently release character: " + base)
return []
if debug:
print("using numpad entry for: " + base)
return windows1252_to_events(ord(base[0])) * hold_count
###
### Pressing/releasing a single generalized virtual key or mouse button
###
##
## Keyboard key names:
##
Key_name = {
#
# SendDragonKeys virtual key names:
#
"alt" : VK_MENU,
"back" : VK_BACK,
"backspace" : VK_BACK,
"break" : VK_CANCEL,
"capslock" : VK_CAPITAL,
"center" : VK_CLEAR,
"ctrl" : VK_CONTROL,
"del" : VK_DELETE,
"down" : VK_DOWN,
"end" : VK_END,
"enter" : VK_RETURN,
"esc" : VK_ESCAPE,
"escape" : VK_ESCAPE,
"home" : VK_HOME,
"ins" : VK_INSERT,
"left" : VK_LEFT,
"numlock" : VK_NUMLOCK,
"pagedown" : VK_NEXT,
"pageup" : VK_PRIOR,
"pgdn" : VK_NEXT,
"pgup" : VK_PRIOR,
"pause" : VK_PAUSE,
"prtsc" : VK_SNAPSHOT,
"right" : VK_RIGHT,
"scrolllock" : VK_SCROLL,
"shift" : VK_SHIFT,
"space" : VK_SPACE,
#"sysreq" : VK_SYSREQ,# <<<>>>
"tab" : VK_TAB,
"up" : VK_UP,
"f1" : VK_F1,
"f2" : VK_F2,
"f3" : VK_F3,
"f4" : VK_F4,
"f5" : VK_F5,
"f6" : VK_F6,
"f7" : VK_F7,
"f8" : VK_F8,
"f9" : VK_F9,
"f10" : VK_F10,
"f11" : VK_F11,
"f12" : VK_F12,
"f13" : VK_F13,
"f14" : VK_F14,
"f15" : VK_F15,
"f16" : VK_F16,
"numkey/" : VK_DIVIDE,
"numkey*" : VK_MULTIPLY,
"numkey-" : VK_SUBTRACT,
"numkey+" : VK_ADD,
"numkey0" : VK_NUMPAD0,
"numkey1" : VK_NUMPAD1,
"numkey2" : VK_NUMPAD2,
"numkey3" : VK_NUMPAD3,
"numkey4" : VK_NUMPAD4,
"numkey5" : VK_NUMPAD5,
"numkey6" : VK_NUMPAD6,
"numkey7" : VK_NUMPAD7,
"numkey8" : VK_NUMPAD8,
"numkey9" : VK_NUMPAD9,
"numkey." : VK_DECIMAL,
"numkeyenter" : GK_NUM_RETURN,
"extdel" : GK_NUM_DELETE,
"extdown" : GK_NUM_DOWN,
"extend" : GK_NUM_END,
"exthome" : GK_NUM_HOME,
"extins" : GK_NUM_INSERT,
"extleft" : GK_NUM_LEFT,
"extpgdn" : GK_NUM_NEXT,
"extpgup" : GK_NUM_PRIOR,
"extright" : GK_NUM_RIGHT,
"extup" : GK_NUM_UP,
"leftalt" : VK_LMENU,
"rightalt" : VK_RMENU,
"leftctrl" : VK_LCONTROL,
"rightctrl" : VK_RCONTROL,
"leftshift" : VK_LSHIFT,
"rightshift" : VK_RSHIFT,
"0" : VK_0,
"1" : VK_1,
"2" : VK_2,
"3" : VK_3,
"4" : VK_4,
"5" : VK_5,
"6" : VK_6,
"7" : VK_7,
"8" : VK_8,
"9" : VK_9,
"a" : VK_A,
"b" : VK_B,
"c" : VK_C,
"d" : VK_D,
"e" : VK_E,
"f" : VK_F,
"g" : VK_G,
"h" : VK_H,
"i" : VK_I,
"j" : VK_J,
"k" : VK_K,
"l" : VK_L,
"m" : VK_M,
"n" : VK_N,
"o" : VK_O,
"p" : VK_P,
"q" : VK_Q,
"r" : VK_R,
"s" : VK_S,
"t" : VK_T,
"u" : VK_U,
"v" : VK_V,
"w" : VK_W,
"x" : VK_X,
"y" : VK_Y,
"z" : VK_Z,
#
# New names for virtual keys:
#
"win" : VK_LWIN,
"leftwin" : VK_LWIN,
"rightwin" : VK_RWIN,
"apps" : VK_APPS, # name may change...
"f17" : VK_F17,
"f18" : VK_F18,
"f19" : VK_F19,
"f20" : VK_F20,
"f21" : VK_F21,
"f22" : VK_F22,
"f23" : VK_F23,
"f24" : VK_F24,
"browserback" : VK_BROWSER_BACK,
"browserfavorites" : VK_BROWSER_FAVORITES,
"browserforward" : VK_BROWSER_FORWARD,
"browserhome" : VK_BROWSER_HOME,
"browserrefresh" : VK_BROWSER_REFRESH,
"browsersearch" : VK_BROWSER_SEARCH,
"browserstop" : VK_BROWSER_STOP,
# these names may change in the future...
"launchapp1" : VK_LAUNCH_APP1,
"launchapp2" : VK_LAUNCH_APP2,
"launchmail" : VK_LAUNCH_MAIL,
"launchmediaselect" : VK_LAUNCH_MEDIA_SELECT,
"medianexttrack" : VK_MEDIA_NEXT_TRACK,
"mediaplaypause" : VK_MEDIA_PLAY_PAUSE,
"mediaprevioustrack" : VK_MEDIA_PREV_TRACK,
"mediastop" : VK_MEDIA_STOP,
"volumedown" : VK_VOLUME_DOWN,
"volumemute" : VK_VOLUME_MUTE,
"volumeup" : VK_VOLUME_UP,
# possibly more names to come...
"oem1" : VK_OEM_1,
"oem2" : VK_OEM_2,
"oem3" : VK_OEM_3,
"oem4" : VK_OEM_4,
"oem5" : VK_OEM_5,
"oem6" : VK_OEM_6,
"oem7" : VK_OEM_7,
"oem8" : VK_OEM_8,
"oem102" : VK_OEM_102,
"oemcomma" : VK_OEM_COMMA,
"oemminus" : VK_OEM_MINUS,
"oemperiod" : VK_OEM_PERIOD,
"oemplus" : VK_OEM_PLUS,
}
Code_to_name = {}
for name in Key_name.keys():
Code_to_name[Key_name[name]] = name
def describe_key(code):
try:
return Code_to_name[code]
except:
return "VK" + hex(code)
##
## Mouse button names:
##
Button_name = {
"leftbutton" : "left", # really primary button
"middlebutton" : "middle",
"rightbutton" : "right", # really secondary button
"xbutton1" : "X1",
"xbutton2" : "X2",
}
GetSystemMetrics = windll.user32.GetSystemMetrics
GetSystemMetrics.argtypes = [c_int]
GetSystemMetrics.restype = c_int
# Convert ExtendSendDragonKeys mouse button names to those required
# by SendInput.py, swapping left & right buttons if user has "Switch
# primary and secondary buttons" selected:
def get_mouse_button(button_name):
try:
button = Button_name[button_name.lower()]
if button=="left" or button=="right":
if GetSystemMetrics(win32con.SM_SWAPBUTTON):
if button=="left":
button = "right"
else:
button = "left"
return button
except:
raise KeyError("unknown mouse button: " + key)
##
## Create a single virtual event to press or release a keyboard key or
## mouse button:
##
def single(key, releasing):
# universal syntax is VK0xhh for virtual key with code 0xhh:
if key[0:4] == "VK0x":
return [virtual_key_event(int(key[4:],16), releasing)]
lower_key = key.lower()
try:
return [virtual_key_event(Key_name[lower_key], releasing)]
except:
try:
return [mouse_button_event(get_mouse_button(lower_key), releasing)]
except:
raise KeyError("unknown key/button: " + key)
###
###
###
DWORD = c_ulong # 32 bits
SHORT = c_short # 16 bits
#TCHAR = c_char # if not using Unicode
TCHAR = c_wchar # if using Unicode
HKL = HANDLE = PVOID = c_void_p
GetKeyboardLayout = windll.user32.GetKeyboardLayout
GetKeyboardLayout.argtypes = [DWORD]
GetKeyboardLayout.restype = HKL
VkKeyScan = windll.user32.VkKeyScanW
VkKeyScan.argtypes = [TCHAR]
VkKeyScan.restype = SHORT
VkKeyScanEx = windll.user32.VkKeyScanExW
VkKeyScanEx.argtypes = [TCHAR, HKL]
VkKeyScanEx.restype = SHORT
def how_type_character(char):
how_type = VkKeyScan(char)
virtual_key = how_type & 0xff
if virtual_key == 0xff:
raise ValueError("unable to type character with current keyboard layout: "
+ char)
modifiers = []
if how_type&0x400: modifiers += ["alt"]
if how_type&0x200: modifiers += ["ctrl"]
if how_type&0x100: modifiers += ["shift"]
if how_type&0xf800:
raise ValueError("unknown modifiers required, tell MDL: " + hex(how_type))
return modifiers, virtual_key
###
###
###
def windows1252_to_events(code):
events = []
events += single("alt", False)
events += numpad(0)
events += numpad(code/100 %10)
events += numpad(code/10 %10)
events += numpad(code/1 %10)
events += single("alt", True)
return events
def numpad(i):
return chord_to_events([None, "numkey"+str(i), None, "{numkey"+str(i)+"}"])
|
mit
| 4,115,163,699,120,067,600
| 27.289823
| 87
| 0.492805
| false
| 2.976257
| false
| false
| false
|
peterrenshaw/zerotasks
|
machine.py
|
1
|
1465
|
#!/usr/bin/env python
# ~*~ encoding: utf-8 ~*~
#=======
# _____ ______ __
# /__ / ___ _________ /_ __/___ ______/ /_______
# / / / _ \/ ___/ __ \ / / / __ `/ ___/ //_/ ___/
# / /__/ __/ / / /_/ / / / / /_/ (__ ) ,< (__ )
# /____/\___/_/ \____/ /_/ \__,_/____/_/|_/____/
#
# This file is part of Zero Tasks.
#
# Zero Tasks is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Zero Tasks is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Zero Tasks. If not, see <http://www.gnu.org/licenses/gpl-3.0.txt>.
#
# name: machine.py
# date: 2016NOV10
# prog: pr
# desc: machine dependent Zero Tasks: read docs/ABOUT.txt
#======
#------
# DIR structure is as follows
#
# /HOME/REL_PATH/APP_DIR
#
HOME = "/path/to/user"
REL_PATH = "relative/path"
APP_DIR = "zerotasks"
#------
def main():
"""main entry point for cli"""
pass
# main entry point for cli
if __name__ == "__main__":
main()
# vim: ff=unix:ts=4:sw=4:tw=78:noai:expandtab
|
gpl-3.0
| -7,992,657,440,703,624,000
| 25.636364
| 80
| 0.54744
| false
| 2.977642
| false
| false
| false
|
lukesneeringer/fauxquests
|
fauxquests/session.py
|
1
|
4388
|
from __future__ import unicode_literals
from collections import namedtuple
from fauxquests.adapter import FauxAdapter
from fauxquests.compat import mock
from requests.compat import OrderedDict
from requests.sessions import Session
from sdict import AlphaSortedDict
class FauxServer(Session):
"""A class that can register certain endpoints to have false
responses returned.
"""
def __init__(self, adapter_class=FauxAdapter, url_pattern='%s'):
"""Create a new Fauxquests instance, which knows how to
mock out requests.session.Session and insert itself.
If a `url_pattern` is provided, then all URLs registered
are interpolated through the `url_pattern`.
"""
# Initialize this object.
super(FauxServer, self).__init__()
self.patcher = mock.patch('requests.sessions.Session',
return_value=self)
self.adapters = OrderedDict()
# Write settings to this object.
self.adapter_class = adapter_class
self.url_pattern = url_pattern
# Save a list of registrations to apply to any FauxAdapter
# that this FauxServer creates.
self.registrations = {}
def __enter__(self):
"""Mock out `requests.session.Session`, replacing it with this
object.
"""
return self.start()
def __exit__(self, type, value, traceback):
return self.stop()
def register(self, url, response, status_code=200, method='GET',
headers=None, **kwargs):
"""Register a given URL and response with this FauxServer.
Internally, this object's context manager creates and returns a
FauxAdapters, so regisrations within a context manager go away
when the context manager is exited.
This method, however, is run before the context manager is applied,
and applies universally to all adapters this object creates.
"""
self.registrations[url] = Registration('', response, status_code,
method, headers, kwargs)
def register_json(self, url, response, status_code=200,
method='GET', headers=None, **kwargs):
"""Register a given URL and response with this FauxServer.
Internally, this object's context manager creates and returns a
FauxAdapters, so regisrations within a context manager go away
when the context manager is exited.
This method, however, is run before the context manager is applied,
and applies universally to all adapters this object creates.
"""
self.registrations[url] = Registration('json', response, status_code,
method, headers, kwargs)
def start(self):
"""Institute the patching process, meaining requests sent to
requests (how meta) are caught and handled by our adapter instead.
"""
# Mount the Fauxquests adapter, which handles delivery of
# responses based on the provided URL.
adapter = self.adapter_class(url_pattern=self.url_pattern)
self.mount('https://', adapter)
self.mount('http://', adapter)
# Iterate over any registrations that are saved as part of this
# FauxServer object and register them to the Adapter.
for url, reg in self.registrations.items():
# Is this a plain registration or a JSON registration?
method_name = 'register'
if reg.type:
method_name += '_' + reg.type
# Forward the registration to the adapter.
getattr(adapter, method_name)(url, reg.response, reg.status_code,
reg.method, reg.headers, **reg.kwargs)
# Start the patcher.
self.patcher.start()
# Return the adapter object, which can accept registred
# URLs with responses
return adapter
def stop(self):
"""Undo the patching process set up in `self.start`, and also
set this object back to having no adapters.
"""
self.patcher.stop()
self.adapters = OrderedDict()
Registration = namedtuple('Registration', ['type', 'response', 'status_code',
'method', 'headers', 'kwargs'])
|
bsd-3-clause
| -9,205,908,773,695,624,000
| 38.890909
| 80
| 0.616226
| false
| 4.748918
| false
| false
| false
|
ntoll/yotta
|
yotta/lib/registry_access.py
|
1
|
22721
|
# Copyright 2014 ARM Limited
#
# Licensed under the Apache License, Version 2.0
# See LICENSE file for details.
# standard library modules, , ,
import re
import logging
from collections import OrderedDict
import uuid
import functools
import json
import binascii
import calendar
import datetime
import hashlib
import itertools
import base64
import webbrowser
import os
try:
from urllib import quote as quoteURL
except ImportError:
from urllib.parse import quote as quoteURL
# requests, apache2
import requests
# PyJWT, MIT, Jason Web Tokens, pip install PyJWT
import jwt
# cryptography, Apache License, Python Cryptography library,
import cryptography
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.hazmat.primitives import serialization
# settings, , load and save settings, internal
import settings
# access_common, , things shared between different component access modules, internal
import access_common
# version, , represent versions and specifications, internal
import version
# Ordered JSON, , read & write json, internal
import ordered_json
# Github Access, , access repositories on github, internal
import github_access
# export key, , export pycrypto keys, internal
import exportkey
Registry_Base_URL = 'https://registry.yottabuild.org'
Registry_Auth_Audience = 'http://registry.yottabuild.org'
Website_Base_URL = 'http://yottabuild.org'
_OpenSSH_Keyfile_Strip = re.compile(b"^(ssh-[a-z0-9]*\s+)|(\s+.+\@.+)|\n", re.MULTILINE)
logger = logging.getLogger('access')
# suppress logging from the requests library
logging.getLogger("requests").setLevel(logging.WARNING)
class AuthError(RuntimeError):
pass
# Internal functions
def generate_jwt_token(private_key, registry=None):
registry = registry or Registry_Base_URL
expires = calendar.timegm((datetime.datetime.utcnow() + datetime.timedelta(hours=2)).timetuple())
prn = _fingerprint(private_key.public_key())
logger.debug('fingerprint: %s' % prn)
token_fields = {
"iss": 'yotta',
"aud": registry,
"prn": prn,
"exp": str(expires)
}
logger.debug('token fields: %s' % token_fields)
private_key_pem = private_key.private_bytes(
serialization.Encoding.PEM,
serialization.PrivateFormat.PKCS8,
serialization.NoEncryption()
)
token = jwt.encode(token_fields, private_key_pem.decode('ascii'), 'RS256').decode('ascii')
logger.debug('encoded token: %s' % token)
return token
def _pubkeyWireFormat(pubkey):
pubk_numbers = pubkey.public_numbers()
logger.debug('openssh format publickey:\n%s' % exportkey.openSSH(pubk_numbers))
return quoteURL(_OpenSSH_Keyfile_Strip.sub(b'', exportkey.openSSH(pubk_numbers)))
def _fingerprint(pubkey):
stripped = _OpenSSH_Keyfile_Strip.sub(b'', exportkey.openSSH(pubkey.public_numbers()))
decoded = base64.b64decode(stripped)
khash = hashlib.md5(decoded).hexdigest()
return ':'.join([khash[i:i+2] for i in range(0, len(khash), 2)])
def _returnRequestError(fn):
''' Decorator that captures requests.exceptions.RequestException errors
and returns them as an error message. If no error occurs the reture
value of the wrapped function is returned (normally None). '''
@functools.wraps(fn)
def wrapped(*args, **kwargs):
try:
return fn(*args, **kwargs)
except requests.exceptions.RequestException as e:
return "server returned status %s: %s" % (e.response.status_code, e.message)
return wrapped
def _handleAuth(fn):
''' Decorator to re-try API calls after asking the user for authentication. '''
@functools.wraps(fn)
def wrapped(*args, **kwargs):
try:
return fn(*args, **kwargs)
except requests.exceptions.HTTPError as e:
if e.response.status_code == requests.codes.unauthorized:
logger.debug('%s unauthorised', fn)
# any provider is sufficient for registry auth
github_access.authorizeUser(provider=None)
logger.debug('retrying after authentication...')
return fn(*args, **kwargs)
else:
raise
return wrapped
def _friendlyAuthError(fn):
''' Decorator to print a friendly you-are-not-authorised message. Use
**outside** the _handleAuth decorator to only print the message after
the user has been given a chance to login. '''
@functools.wraps(fn)
def wrapped(*args, **kwargs):
try:
return fn(*args, **kwargs)
except requests.exceptions.HTTPError as e:
if e.response.status_code == requests.codes.unauthorized:
logger.error('insufficient permission')
return None
else:
raise
return wrapped
def _getPrivateRegistryKey():
if 'YOTTA_PRIVATE_REGISTRY_API_KEY' in os.environ:
return os.environ['YOTTA_PRIVATE_REGISTRY_API_KEY']
return None
def _listVersions(namespace, name):
sources = _getSources()
registry_urls = [s['url'] for s in sources if 'type' in s and s['type'] == 'registry']
# look in the public registry last
registry_urls.append(Registry_Base_URL)
versions = []
for registry in registry_urls:
# list versions of the package:
url = '%s/%s/%s/versions' % (
registry,
namespace,
name
)
request_headers = _headersForRegistry(registry)
logger.debug("GET %s, %s", url, request_headers)
response = requests.get(url, headers=request_headers)
if response.status_code == 404:
continue
# raise any other HTTP errors
response.raise_for_status()
for x in ordered_json.loads(response.text):
rtv = RegistryThingVersion(x, namespace, name, registry=registry)
if not rtv in versions:
versions.append(rtv)
if not len(versions):
raise access_common.Unavailable(
('%s does not exist in the %s registry. '+
'Check that the name is correct, and that it has been published.') % (name, namespace)
)
return versions
def _tarballURL(namespace, name, version, registry=None):
registry = registry or Registry_Base_URL
return '%s/%s/%s/versions/%s/tarball' % (
registry, namespace, name, version
)
def _getTarball(url, directory, sha256):
logger.debug('registry: get: %s' % url)
if not sha256:
logger.warn('tarball %s has no hash to check' % url)
# figure out which registry we're fetching this tarball from (if any) and
# add appropriate headers
registry = Registry_Base_URL
for source in _getSources():
if ('type' in source and source['type'] == 'registry' and
'url' in source and url.startswith(source['url'])):
registry = source['url']
break
request_headers = _headersForRegistry(registry)
logger.debug('GET %s, %s', url, request_headers)
response = requests.get(url, headers=request_headers, allow_redirects=True, stream=True)
response.raise_for_status()
return access_common.unpackTarballStream(response, directory, ('sha256', sha256))
def _getSources():
sources = settings.get('sources')
if sources is None:
sources = []
return sources
def _isPublicRegistry(registry):
return (registry is None) or (registry == Registry_Base_URL)
def _friendlyRegistryName(registry):
return registry
def _getPrivateKey(registry):
if _isPublicRegistry(registry):
return settings.getProperty('keys', 'private')
else:
for s in _getSources():
if _sourceMatches(s, registry):
if 'keys' in s and s['keys'] and 'private' in s['keys']:
return s['keys']['private']
return None
def _sourceMatches(source, registry):
return ('type' in source and source['type'] == 'registry' and
'url' in source and source['url'] == registry)
def _generateAndSaveKeys(registry=None):
registry = registry or Registry_Base_URL
k = rsa.generate_private_key(
public_exponent=65537, key_size=2048, backend=default_backend()
)
privatekey_pem = k.private_bytes(
serialization.Encoding.PEM,
serialization.PrivateFormat.PKCS8,
serialization.NoEncryption()
)
pubkey_pem = k.public_key().public_bytes(
serialization.Encoding.PEM,
serialization.PublicFormat.SubjectPublicKeyInfo
)
if _isPublicRegistry(registry):
settings.setProperty('keys', 'private', privatekey_pem.decode('ascii'))
settings.setProperty('keys', 'public', pubkey_pem.decode('ascii'))
else:
sources = _getSources()
keys = None
for s in sources:
if _sourceMatches(s, registry):
if not 'keys' in s:
s['keys'] = dict()
keys = s['keys']
break
if keys is None:
keys = dict()
sources.append({
'type':'registry',
'url':registry,
'keys':keys
})
keys['private'] = privatekey_pem.decode('ascii')
keys['public'] = pubkey_pem.decode('ascii')
settings.set('sources', sources)
return pubkey_pem, privatekey_pem
def _getPrivateKeyObject(registry=None):
registry = registry or Registry_Base_URL
privatekey_pem = _getPrivateKey(registry)
if not privatekey_pem:
pubkey_pem, privatekey_pem = _generateAndSaveKeys(registry)
else:
# settings are unicode, we should be able to safely decode to ascii for
# the key though, as it will either be hex or PEM encoded:
privatekey_pem = privatekey_pem.encode('ascii')
# if the key doesn't look like PEM, it might be hex-encided-DER (which we
# used historically), so try loading that:
if b'-----BEGIN PRIVATE KEY-----' in privatekey_pem:
return serialization.load_pem_private_key(
privatekey_pem, None, default_backend()
)
else:
privatekey_der = binascii.unhexlify(privatekey_pem)
return serialization.load_der_private_key(
privatekey_der, None, default_backend()
)
def _headersForRegistry(registry):
registry = registry or Registry_Base_URL
auth_token = generate_jwt_token(_getPrivateKeyObject(registry), registry)
r = {
'Authorization': 'Bearer %s' % auth_token
}
if registry == Registry_Base_URL:
return r
for s in _getSources():
if _sourceMatches(s, registry):
if 'apikey' in s:
r['X-Api-Key'] = s['apikey']
break
return r
# API
class RegistryThingVersion(access_common.RemoteVersion):
def __init__(self, data, namespace, name, registry=None):
logger.debug('RegistryThingVersion %s/%s data: %s' % (namespace, name, data))
version = data['version']
self.namespace = namespace
self.name = name
self.version = version
if 'hash' in data and 'sha256' in data['hash']:
self.sha256 = data['hash']['sha256']
else:
self.sha256 = None
url = _tarballURL(self.namespace, self.name, version, registry)
super(RegistryThingVersion, self).__init__(
version, url, name=name, friendly_source=_friendlyRegistryName(registry)
)
def unpackInto(self, directory):
assert(self.url)
_getTarball(self.url, directory, self.sha256)
class RegistryThing(access_common.RemoteComponent):
def __init__(self, name, version_spec, namespace):
self.name = name
self.spec = version_spec
self.namespace = namespace
@classmethod
def createFromSource(cls, vs, name, registry):
''' returns a registry component for anything that's a valid package
name (this does not guarantee that the component actually exists in
the registry: use availableVersions() for that).
'''
# we deliberately allow only lowercase, hyphen, and (unfortunately)
# numbers in package names, to reduce the possibility of confusingly
# similar names: if the name doesn't match this then escalate to make
# the user fix it
name_match = re.match('^([a-z0-9-]+)$', name)
if not name_match:
raise ValueError('Dependency name "%s" is not valid (must contain only lowercase letters, hyphen, and numbers)' % name)
assert(vs.semantic_spec)
return RegistryThing(name, vs.semantic_spec, registry)
def versionSpec(self):
return self.spec
def availableVersions(self):
''' return a list of Version objects, each able to retrieve a tarball '''
return _listVersions(self.namespace, self.name)
def tipVersion(self):
raise NotImplementedError()
@classmethod
def remoteType(cls):
return 'registry'
@_handleAuth
def publish(namespace, name, version, description_file, tar_file, readme_file,
readme_file_ext, registry=None):
''' Publish a tarblob to the registry, if the request fails, an exception
is raised, which either triggers re-authentication, or is turned into a
return value by the decorators. (If successful, the decorated function
returns None)
'''
registry = registry or Registry_Base_URL
url = '%s/%s/%s/versions/%s' % (
registry,
namespace,
name,
version
)
if readme_file_ext == '.md':
readme_section_name = 'readme.md'
elif readme_file_ext == '':
readme_section_name = 'readme'
else:
raise ValueError('unsupported readme type: "%s"' % readne_file_ext)
# description file is in place as text (so read it), tar file is a file
body = OrderedDict([('metadata', (None, description_file.read(),'application/json')),
('tarball',('tarball', tar_file)),
(readme_section_name, (readme_section_name, readme_file))])
headers = _headersForRegistry(registry)
response = requests.put(url, headers=headers, files=body)
if not response.ok:
return "server returned status %s: %s" % (response.status_code, response.text)
return None
@_handleAuth
def unpublish(namespace, name, version, registry=None):
''' Try to unpublish a recently published version. Return any errors that
occur.
'''
registry = registry or Registry_Base_URL
url = '%s/%s/%s/versions/%s' % (
registry,
namespace,
name,
version
)
headers = _headersForRegistry(registry)
response = requests.delete(url, headers=headers)
if not response.ok:
return "server returned status %s: %s" % (response.status_code, response.text)
return None
@_friendlyAuthError
@_handleAuth
def listOwners(namespace, name, registry=None):
''' List the owners of a module or target (owners are the people with
permission to publish versions and add/remove the owners).
'''
registry = registry or Registry_Base_URL
url = '%s/%s/%s/owners' % (
registry,
namespace,
name
)
request_headers = _headersForRegistry(registry)
response = requests.get(url, headers=request_headers)
if response.status_code == 404:
logger.error('no such %s, "%s"' % (namespace[:-1], name))
return []
# raise exceptions for other errors - the auth decorators handle these and
# re-try if appropriate
response.raise_for_status()
return ordered_json.loads(response.text)
@_friendlyAuthError
@_handleAuth
def addOwner(namespace, name, owner, registry=None):
''' Add an owner for a module or target (owners are the people with
permission to publish versions and add/remove the owners).
'''
registry = registry or Registry_Base_URL
url = '%s/%s/%s/owners/%s' % (
registry,
namespace,
name,
owner
)
request_headers = _headersForRegistry(registry)
response = requests.put(url, headers=request_headers)
if response.status_code == 404:
logger.error('no such %s, "%s"' % (namespace[:-1], name))
return
# raise exceptions for other errors - the auth decorators handle these and
# re-try if appropriate
response.raise_for_status()
@_friendlyAuthError
@_handleAuth
def removeOwner(namespace, name, owner, registry=None):
''' Remove an owner for a module or target (owners are the people with
permission to publish versions and add/remove the owners).
'''
registry = registry or Registry_Base_URL
url = '%s/%s/%s/owners/%s' % (
registry,
namespace,
name,
owner
)
request_headers = _headersForRegistry(registry)
response = requests.delete(url, headers=request_headers)
if response.status_code == 404:
logger.error('no such %s, "%s"' % (namespace[:-1], name))
return
# raise exceptions for other errors - the auth decorators handle these and
# re-try if appropriate
response.raise_for_status()
def search(query='', keywords=[], registry=None):
''' generator of objects returned by the search endpoint (both modules and
targets).
Query is a full-text search (description, name, keywords), keywords
search only the module/target description keywords lists.
If both parameters are specified the search is the intersection of the
two queries.
'''
registry = registry or Registry_Base_URL
url = '%s/search' % registry
headers = _headersForRegistry(registry)
params = {
'skip': 0,
'limit': 50
}
if len(query):
params['query'] = query
if len(keywords):
params['keywords[]'] = keywords
while True:
response = requests.get(url, headers=headers, params=params)
response.raise_for_status()
objects = ordered_json.loads(response.text)
if len(objects):
for o in objects:
yield o
params['skip'] += params['limit']
else:
break
def deauthorize(registry=None):
registry = registry or Registry_Base_URL
if _isPublicRegistry(registry):
if settings.get('keys'):
settings.set('keys', dict())
else:
sources = [s for s in _getSources() if not _sourceMatches(s, registry)]
settings.set('sources', sources)
def setAPIKey(registry, api_key):
''' Set the api key for accessing a registry. This is only necessary for
development/test registries.
'''
if (registry is None) or (registry == Registry_Base_URL):
return
sources = _getSources()
source = None
for s in sources:
if _sourceMatches(s, registry):
source = s
if source is None:
source = {
'type':'registry',
'url':registry,
}
sources.append(source)
source['apikey'] = api_key
settings.set('sources', sources)
def getPublicKey(registry=None):
''' Return the user's public key (generating and saving a new key pair if necessary) '''
registry = registry or Registry_Base_URL
pubkey_pem = None
if _isPublicRegistry(registry):
pubkey_pem = settings.getProperty('keys', 'public')
else:
for s in _getSources():
if _sourceMatches(s, registry):
if 'keys' in s and s['keys'] and 'public' in s['keys']:
pubkey_pem = s['keys']['public']
break
if not pubkey_pem:
pubkey_pem, privatekey_pem = _generateAndSaveKeys()
else:
# settings are unicode, we should be able to safely decode to ascii for
# the key though, as it will either be hex or PEM encoded:
pubkey_pem = pubkey_pem.encode('ascii')
# if the key doesn't look like PEM, it might be hex-encided-DER (which we
# used historically), so try loading that:
if b'-----BEGIN PUBLIC KEY-----' in pubkey_pem:
pubkey = serialization.load_pem_public_key(pubkey_pem, default_backend())
else:
pubkey_der = binascii.unhexlify(pubkey_pem)
pubkey = serialization.load_der_public_key(pubkey_der, default_backend())
return _pubkeyWireFormat(pubkey)
def testLogin(registry=None):
registry = registry or Registry_Base_URL
url = '%s/users/me' % (
registry
)
request_headers = _headersForRegistry(registry)
logger.debug('test login...')
response = requests.get(url, headers=request_headers)
response.raise_for_status()
def getAuthData(registry=None):
''' Poll the registry to get the result of a completed authentication
(which, depending on the authentication the user chose or was directed
to, will include a github or other access token)
'''
registry = registry or Registry_Base_URL
url = '%s/tokens' % (
registry
)
request_headers = _headersForRegistry(registry)
logger.debug('poll for tokens... %s', request_headers)
try:
response = requests.get(url, headers=request_headers)
except requests.RequestException as e:
logger.debug(str(e))
return None
if response.status_code == requests.codes.unauthorized:
logger.debug('Unauthorised')
return None
elif response.status_code == requests.codes.not_found:
logger.debug('Not Found')
return None
body = response.text
logger.debug('auth data response: %s' % body);
r = {}
parsed_response = ordered_json.loads(body)
if 'error' in parsed_response:
raise AuthError(parsed_response['error'])
for token in parsed_response:
if token['provider'] == 'github':
r['github'] = token['accessToken']
break
logger.debug('parsed auth tokens %s' % r);
return r
def getLoginURL(provider=None, registry=None):
registry = registry or Registry_Base_URL
if provider:
query = ('?provider=%s' % provider)
else:
query = ''
if not _isPublicRegistry(registry):
if not len(query):
query = '?'
query += '&private=1'
return Website_Base_URL + '/' + query + '#login/' + getPublicKey(registry)
def openBrowserLogin(provider=None, registry=None):
registry = registry or Registry_Base_URL
webbrowser.open(getLoginURL(provider=provider, registry=registry))
|
apache-2.0
| -412,501,898,421,974,500
| 31.928986
| 131
| 0.632763
| false
| 4.100523
| false
| false
| false
|
myboycrais99/Rule1
|
GetGrowthRateExceptions.py
|
1
|
4224
|
'''
This file passes the maximum allowed number of years that the stock
is allowed to fail to meet the desired growth rates. Default is 0 years
This value will be used in PassGrowthRate.py
'''
# -------------------------- Required Files----- ---------------------
#
#
# --------------------------------------------------------------------
# --------------------------Variable Declaration ---------------------
# years: global variable
# years_exceptions: This is the maximum allowed number of years that
# the stock is allowed to fail to meet the desired
# growth rates.
# loop_counter: used for the while loop.
# invalid_answer: boolean expression... 0 is valid. 1 is invalid.
#
# --------------------------------------------------------------------
#default value
years_exceptions = 0
print()
#print filename
print('Filename: GetGrowthRateExceptions')
#This function requests the user to input a value. If an invalid answer is supplied, it
#allows the operator three attempts before it sets the default value
def get_years_exceptions ():
# years_exceptions = input('Maximum allowed number of years that a stock is allowed '
# 'to fail to meet the desired growth rates: ')
# print()
#
#
# #Set to default values
# #invalid_answer = 1: it's invalid
# #invalid_answer =0: valid
# invalid_answer = 1
#
# # max number of loops = 3. Starting counter at 0.
# loop_counter = 0
#
# #Check if the value is a number below 5. If it's not, ask for a new value.
# #Allow for three attempts before setting default value to 0.
# while loop_counter <= 3 and invalid_answer == 1:
#
# if years_exceptions == '0':
# invalid_answer = 0 #0 means it's valid
# elif years_exceptions == '1':
# invalid_answer = 0
# elif years_exceptions == '2':
# invalid_answer = 0
# elif years_exceptions == '3':
# invalid_answer = 0
# elif years_exceptions == '4':
# invalid_answer = 0
# else:
# years_exceptions = input('You entered an invalid answer. Please try again: ')
#
# loop_counter = loop_counter + 1
#
# #end while loop
#
# #Check the final looped value was valid or not
# if loop_counter == 4 and invalid_answer == 1:
# if years_exceptions == '0':
# invalid_answer = 0 #0 means it's valid
# elif years_exceptions == '1':
# invalid_answer = 0
# elif years_exceptions == '2':
# invalid_answer = 0
# elif years_exceptions == '3':
# invalid_answer = 0
# elif years_exceptions == '4':
# invalid_answer = 0
# #end if
#
# # Check if loop_counter = 4. If it does, set the years_exception to default value 0
# if loop_counter == 4 and invalid_answer == 1:
# years_exceptions = 0
# print()
# print()
# print('you suck as you apparently can not follow the simplest of instructions.')
# print('I am overriding your answer to 0')
# print()
# print('years exceptions: ', years_exceptions)
#
# #since inputs are always as a string, this function converts it to an integer
# years_exceptions = int(years_exceptions)
#
# print()
# print('years exceptions: ', years_exceptions)
#temporarily overriding years exceptions to a default value for troubleshooting
years_exceptions = 1
print('years excempt:', years_exceptions)
return years_exceptions
# END FUNCTION
#call the function
get_years_exceptions()
#outside_funcion_exceptions = get_years_exceptions()
#print()
#print('final answer \n years exceptions =', outside_funcion_exceptions)
#todo: instead of printing a nice message, the values that pass the criteria need to
#todo: then move on to another list or be exported etc.
|
mit
| 3,795,689,030,596,564,000
| 33.813559
| 94
| 0.54285
| false
| 4.258065
| false
| false
| false
|
firmadyne/scraper
|
firmware/spiders/centurylink.py
|
1
|
1956
|
from scrapy import Spider
from scrapy.http import Request
from firmware.items import FirmwareImage
from firmware.loader import FirmwareLoader
import urllib.request, urllib.parse, urllib.error
# http://home.centurytel.net/ihd/
class CenturyLinkSpider(Spider):
name = "centurylink"
allowed_domains = ["centurylink.com"]
start_urls = ["http://internethelp.centurylink.com/internethelp/downloads-auto-firmware-q.html"]
def parse(self, response):
product = None
for section in response.xpath("//div[@class='product-content']/div[@class='product-box2']/div"):
text = section.xpath(".//text()").extract()
if not section.xpath(".//a"):
product = text[0].strip()
else:
for link in section.xpath(".//a/@href").extract():
if link.endswith(".html"):
yield Request(
url=urllib.parse.urljoin(response.url, link),
meta={"product": product,
"version": FirmwareLoader.find_version(text)},
headers={"Referer": response.url},
callback=self.parse_download)
def parse_download(self, response):
for link in response.xpath("//div[@id='auto']//a"):
href = link.xpath("./@href").extract()[0]
text = link.xpath(".//text()").extract()[0]
if ("downloads" in href or "firmware" in href) and \
not href.endswith(".html"):
item = FirmwareLoader(item=FirmwareImage(), response=response)
item.add_value("version", response.meta["version"])
item.add_value("url", href)
item.add_value("description", text)
item.add_value("product", response.meta["product"])
item.add_value("vendor", self.name)
yield item.load_item()
|
mit
| 4,258,128,315,239,091,000
| 41.521739
| 104
| 0.555215
| false
| 4.327434
| false
| false
| false
|
botswana-harvard/edc-label
|
edc_label/views/change_printer_view.py
|
1
|
1732
|
from django.views.generic.edit import ProcessFormView
from django.urls.base import reverse
from django.http.response import HttpResponseRedirect
from edc_base.models import UserProfile
from django.contrib.auth.mixins import LoginRequiredMixin
class ChangePrinterView(LoginRequiredMixin, ProcessFormView):
success_url = 'edc_label:home_url'
empty_selection = '--'
def post(self, request, *args, **kwargs):
user_profile = UserProfile.objects.get(user=self.request.user)
print_server_name = request.POST.get('print_server_name')
if print_server_name:
if print_server_name == self.empty_selection:
print_server_name = None
request.session['print_server_name'] = print_server_name
user_profile.print_server = print_server_name
clinic_label_printer_name = request.POST.get(
'clinic_label_printer_name')
if clinic_label_printer_name:
if clinic_label_printer_name == self.empty_selection:
clinic_label_printer_name = None
request.session['clinic_label_printer_name'] = clinic_label_printer_name
user_profile.clinic_label_printer = clinic_label_printer_name
lab_label_printer_name = request.POST.get('lab_label_printer_name')
if lab_label_printer_name:
if lab_label_printer_name == self.empty_selection:
lab_label_printer_name = None
request.session['lab_label_printer_name'] = lab_label_printer_name
user_profile.lab_label_printer = lab_label_printer_name
user_profile.save()
success_url = reverse(self.success_url)
return HttpResponseRedirect(redirect_to=success_url)
|
gpl-3.0
| 5,477,501,294,457,762,000
| 39.27907
| 84
| 0.671478
| false
| 3.848889
| false
| false
| false
|
OSGeoLabBp/tutorials
|
english/img_processing/code/circles.py
|
1
|
1095
|
import cv2
import numpy as np
import os.path
from sys import argv
if len(argv) < 2:
print("Usage: {} img_file [img_file ...]".format(argv[0]))
exit()
# process images
for fn in argv[1:]:
try:
src_img = cv2.imread(fn) # load image
except:
print("Failed to read image {}".format(fn))
continue
# convert image to gray scale
gray_img = cv2.cvtColor(src_img, cv2.COLOR_BGR2GRAY)
# noise reduction
img = cv2.medianBlur(gray_img, 5)
#find circles
circles = cv2.HoughCircles(img, cv2.HOUGH_GRADIENT, 1, 400,
param1=100, param2=30, minRadius=10, maxRadius=1000)
print(circles)
circles = np.uint16(np.around(circles))
for i in circles[0,:]:
# draw the outer circle
cv2.circle(src_img, (i[0], i[1]), i[2], (0, 255, 0), 10)
# draw the center of the circle
cv2.circle(src_img, (i[0], i[1]), 2, (0,0,255), 10)
fn1 = os.path.split(fn)
fn2 = os.path.join(fn1[0], "c_" + fn1[1])
cv2.imwrite(fn2, src_img)
cv2.imshow('circles', src_img)
cv2.waitKey(0)
|
cc0-1.0
| -2,507,481,706,028,149,000
| 30.285714
| 80
| 0.585388
| false
| 2.793367
| false
| false
| false
|
SNeuhausen/training_management
|
utils/field_utils.py
|
1
|
2138
|
class FieldUtils(object):
""" A utility class, which provides helper functions for managing fields of BaseModels. """
@staticmethod
def get_field_description(model_object, field_name):
result = model_object.fields_get([field_name])
field_description = result.get(field_name)
return field_description
@classmethod
def get_selection_label(cls, model_object, field_name, selection_value):
""" Returns the label for a given selection value of field ``field_name`` from model ``model_object``. """
field_description = cls.get_field_description(model_object, field_name)
selection_pairs = field_description.get('selection')
for pair in selection_pairs:
value = pair[0]
if value == selection_value:
label = pair[1]
return label
@classmethod
def is_valid_selection_value(cls, model_object, field_name, selection_value):
""" Checks, whether the given selection field ``field_name`` has a selection value ``selection_value``. """
field_description = cls.get_field_description(model_object, field_name)
selection_pairs = field_description.get("selection")
for pair in selection_pairs:
value = pair[0]
if value == selection_value:
return True
return False
@classmethod
def assert_selection_value(cls, model_object, field_name, selection_value):
""" Checks, if the given selection value is contained in the selection field or raises an exception. """
assert cls.is_valid_selection_value(model_object, field_name, selection_value), \
u"The value '{0}' is not contained in selection field '{1}'".format(selection_value, field_name)
@classmethod
def assert_and_get_selection_value(cls, model_object, field_name, selection_value):
""" Assert that ``selection_value`` is a valid value in selection field ``field_name`` and return
``selection_value``.
"""
cls.assert_selection_value(model_object, field_name, selection_value)
return selection_value
|
gpl-3.0
| 4,628,970,539,293,727,000
| 47.613636
| 115
| 0.65435
| false
| 4.354379
| false
| false
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.