repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
digitalocean/netbox
|
netbox/users/migrations/0004_standardize_description.py
|
Python
|
apache-2.0
| 400
| 0
|
# Generated by Django 3.0.3 on 2020-03-13 20:27
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0003_token_permissions'),
]
operations = [
migrations.AlterField(
model_name='token',
name='description',
field=models.Cha
|
rField(blank=True, max_length=200),
|
),
]
|
takkaneko/netengtools
|
fwiloprov.py
|
Python
|
mit
| 6,508
| 0.006454
|
#!/usr/bin/env python3
# fwiloprov.py
import re
import getpass
import pexpect
from pexpect import EOF
from pexpect import TIMEOUT
from pexpect import ExceptionPexpect
from locationcode import Loccode
from networksid import NWdevice
from ipaddress import ip_address
from ipaddress import ip_network
from ipaddress import ip_interface
from resources import getUPA
from resources import getDevices
from resources import getVLAN
from resources import getDepth
from resources import devicePorts
def main():
print('WARNING: THIS OPTION SHOULD BE USED TO PROVISION A *SECNET* ILO SEGMENT')
print('(NOT COMMON - ONLY SOME ACCENTURE ACCOUNTS HAVE THIS)\n')
##### Prompts to enter username, password, and allocation code:
[username,password,alloccode] = getUPA()
##### Prompts to enter SIDs/speed/locations
[mfw,speed,mfwloc,ips,ipsloc] = getDevices('firewall')
print('OK\nNow let\'s define a SECNET iLO segment.\n')
###### STANDARD BACK SEGMENT
# 1. VLAN:
while True:
try:
iloVlan = getVLAN('SecNet iLO',[])
if 900 <= iloVlan <= 949:
break
else:
print('ERRO
|
R: ILO VLAN MUST BE BETWEEN 900 AND 949.\n')
except AtrributeError:
print('ERROR: AttributeError.\n')
# 2. DEPTHCODE:
iloDepth = getDepth('firewall','9901',[])
# 3. ILO SUBNET:
if mfwloc.site == 'iad':
ilonet = ip_network('10.176.0.0/16')
else:
ilonet = ip_network('10.177.0.0/16')
# 4. FIREWALL INTERFACE:
while True:
try:
|
fw_interface = input('Enter the firewall interface for the iLO segment (e.g, eth1, s1p1, gi0/4, etc.): ').strip()
if fw_interface in devicePorts(mfw):
break
else:
print('ERROR: INVALID INTERFACE\n')
except AttributeError:
print('ERROR: INVALID INTERFACE\n')
# 5. SWITCHPORT NUMBER:
while True:
try:
swpt_number = int(input('Pick the first available port on mod 4 of '+mfwloc.findsecsw()+': '))
if 1 <= swpt_number <= 48:
break
else:
print('ERROR: NUMBER OUT OF RANGE\n')
except (AttributeError,ValueError):
print('ERROR: INVALID DATA\n')
# 6. VRRP IP:
while True:
try:
vrrp_ip = ip_address(input('Enter the IP address of the iLO gateway/VRRP interface (e.g., 10.176.128.9): '))
if vrrp_ip in ilonet:
break
else:
print('ERROR: '+str(vrrp_ip)+' does not belong to '+str(ilonet)+'.\n')
except ValueError:
print('ERROR: INVALID ADDRESS/NETMASK\n')
#############################################################################
print('\nThe rest will generate port configs, custom cabling info, allocation form, etc.\n')
# back up port configs
print('***************************************')
print('Collecting switchport backup configs...')
print('***************************************\n')
try:
child = pexpect.spawnu('telnet '+mfwloc.findsecsw()+'.dn.net')
child.expect('Username: ',timeout=3)
child.sendline(username)
child.expect('Password: ',timeout=3)
child.sendline(password)
child.expect('6513-'+mfwloc.findsecsw()+'-sec-c\d{1,2}#',timeout=3)
print(mfwloc.findsecsw()+':\n')
child.sendline('sh run int gi4/'+str(swpt_number))
child.expect('6513-'+mfwloc.findsecsw()+'-sec-c\d{1,2}#')
print(child.before)
child.sendline('exit')
except (EOF,TIMEOUT,ExceptionPexpect):
print('ERROR: Unable to collect switchport configs from '+mfwloc.findsecsw())
print('Try collecting configs manually instead:')
print()
print(' '+mfwloc.findsecsw()+':')
print(' sh run int gi4/'+str(swpt_number))
print()
input('Hit Enter to view the new switchport configs.')
print()
# new port configs
print('*************************************************')
print('Use the following to apply new switchport configs')
print('*************************************************\n')
swconf = 'telnet '+mfwloc.findsecsw()+'\n'
swconf += username+'\n'
swconf += password+'\n'
swconf += 'conf t\n'
swconf += 'int gi4/'+str(swpt_number)+'\n'
swconf += ' description '+alloccode+'-'+iloDepth+' '+mfw+' back\n'
swconf += ' switchport\n'
swconf += ' switchport access vlan '+str(iloVlan)+'\n'
swconf += ' switchport private-vlan host-association 11 '+str(iloVlan)+'\n'
swconf += ' switchport mode private-vlan host\n'
swconf += ' speed '+speed+'\n'
swconf += ' duplex full\n'
swconf += ' spanning-tree portfast edge\n'
swconf += ' no shut\n'
swconf += ' end\n'
print(swconf)
input('Hit Enter to view the custom cabling information')
print()
# cabling instructions
print('CUSTOM CABLING INFORMATION:')
print('---------------------------\n')
print()
print('iLO:')
print(' '+mfw+' '+fw_interface+' -> GREEN STRAIGHT -> '+mfwloc.findsecsw()+' gi4/'+str(swpt_number)+' (Direct run no patch)')
print()
input('Hit Enter to view the firewall allocation form')
print()
# Firewall allocation form
print('**Add the following to the firewall allocation section:\n\n')
HAdeviceForm = 'FIREWALL NETWORK INFORMATION:\n'
HAdeviceForm += '------------------------------\n\n'
HAdeviceForm += '**iLO (Network '+iloDepth+'):\n\n'
HAdeviceForm += ' Physical Interface: '+fw_interface+'\n\n'
HAdeviceForm += ' Back Interface: '+str(vrrp_ip)+' (gateway for servers)\n'
HAdeviceForm += ' Back Network: '+str(ilonet)+'\n'
HAdeviceForm += ' Back Netmask: '+str(ilonet.netmask)+'\n\n'
HAdeviceForm += ' Connection To: '+mfwloc.findsecsw()+'\n'
HAdeviceForm += ' Connection Port: gi4/'+str(swpt_number)+'\n\n'
HAdeviceForm += ' SwitchPort Speed/Duplex set to: '+speed+'M/Full\n'
HAdeviceForm += ' (Firewalls should be set to the same speed)\n'
HAdeviceForm += ' SecNet Community VLAN (Num/Label): '+str(iloVlan)+'/ilonet_'+alloccode+'\n\n'
print(HAdeviceForm)
if __name__ == '__main__':
main()
|
tymofij/adofex
|
transifex/releases/migrations/0001_initial.py
|
Python
|
gpl-3.0
| 323
| 0.009288
|
fro
|
m south.db import db
from django.db import models
from transifex.releases.models import *
class Migration:
def forwards(self, orm):
"Write your forwards migration here"
def backwards(self, orm):
"Write your backwards migration here"
models = {
}
complete_apps = ['
|
releases']
|
openatx/uiautomator2
|
examples/runyaml/run.py
|
Python
|
mit
| 3,918
| 0.002061
|
#!/usr/bin/env python3
# coding: utf-8
#
import re
import os
import time
import argparse
import yaml
import bunch
import uiautomator2 as u2
from logzero import logger
CLICK = "click"
# swipe
SWIPE_UP =
|
"swipe_up"
SWIPE_RIGHT = "swipe_right"
SWIPE_LEFT = "swipe_left"
SWIPE_DOWN = "swipe_down"
SCREENSHOT = "screenshot"
EXIST = "assert_exist"
W
|
AIT = "wait"
def split_step(text: str):
__alias = {
"点击": CLICK,
"上滑": SWIPE_UP,
"右滑": SWIPE_RIGHT,
"左滑": SWIPE_LEFT,
"下滑": SWIPE_DOWN,
"截图": SCREENSHOT,
"存在": EXIST,
"等待": WAIT,
}
for keyword in __alias.keys():
if text.startswith(keyword):
body = text[len(keyword):].strip()
return __alias.get(keyword, keyword), body
else:
raise RuntimeError("Step unable to parse", text)
def read_file_content(path: str, mode:str = "r") -> str:
with open(path, mode) as f:
return f.read()
def run_step(cf: bunch.Bunch, app: u2.Session, step: str):
logger.info("Step: %s", step)
oper, body = split_step(step)
logger.debug("parse as: %s %s", oper, body)
if oper == CLICK:
app.xpath(body).click()
elif oper == SWIPE_RIGHT:
app.xpath(body).swipe("right")
elif oper == SWIPE_UP:
app.xpath(body).swipe("up")
elif oper == SWIPE_LEFT:
app.xpath(body).swipe("left")
elif oper == SWIPE_DOWN:
app.xpath(body).swipe("down")
elif oper == SCREENSHOT:
output_dir = "./output"
filename = "screen-%d.jpg" % int(time.time()*1000)
if body:
filename = body
name_noext, ext = os.path.splitext(filename)
if ext.lower() not in ['.jpg', '.jpeg', '.png']:
ext = ".jpg"
os.makedirs(cf.output_directory, exist_ok=True)
filename = os.path.join(cf.output_directory, name_noext + ext)
logger.debug("Save screenshot: %s", filename)
app.screenshot().save(filename)
elif oper == EXIST:
assert app.xpath(body).wait(), body
elif oper == WAIT:
#if re.match("^[\d\.]+$")
if body.isdigit():
seconds = int(body)
logger.info("Sleep %d seconds", seconds)
time.sleep(seconds)
else:
app.xpath(body).wait()
else:
raise RuntimeError("Unhandled operation", oper)
def run_conf(d, conf_filename: str):
d.healthcheck()
d.xpath.when("允许").click()
d.xpath.watch_background(2.0)
cf = yaml.load(read_file_content(conf_filename), Loader=yaml.SafeLoader)
default = {
"output_directory": "output",
"action_before_delay": 0,
"action_after_delay": 0,
"skip_cleanup": False,
}
for k, v in default.items():
cf.setdefault(k, v)
cf = bunch.Bunch(cf)
print("Author:", cf.author)
print("Description:", cf.description)
print("Package:", cf.package)
logger.debug("action_delay: %.1f / %.1f", cf.action_before_delay, cf.action_after_delay)
app = d.session(cf.package)
for step in cf.steps:
time.sleep(cf.action_before_delay)
run_step(cf, app, step)
time.sleep(cf.action_after_delay)
if not cf.skip_cleanup:
app.close()
device = None
conf_filename = None
def test_entry():
pass
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--command", help="run single step command")
parser.add_argument("-s", "--serial", help="run single step command")
parser.add_argument("conf_filename", default="test.yml", nargs="?", help="config filename")
args = parser.parse_args()
d = u2.connect(args.serial)
if args.command:
cf = bunch.Bunch({"output_directory": "output"})
app = d.session()
run_step(cf, app, args.command)
else:
run_conf(d, args.conf_filename)
|
anhstudios/swganh
|
data/scripts/templates/object/draft_schematic/clothing/shared_clothing_dress_formal_26.py
|
Python
|
mit
| 462
| 0.047619
|
#
|
### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Intangible()
result.template = "object/draft_schematic/clothing/shared_clothing_dress_formal_26.iff"
result.attribute_template_id = -1
result.stfName("string_id_table","")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
| |
krisss2121/bpmn-python
|
bpmn_python/graph/classes/events/intermediate_catch_event_type.py
|
Python
|
gpl-3.0
| 478
| 0
|
# coding=utf-8
"""
Class used for representing tIntermediateCatchEvent of BPMN 2.0 graph
"""
import graph.classes.events.catch_event_type as catch_event
class IntermediateCatchEvent(catch_event.CatchEvent):
"""
Class used for representing tIntermedia
|
teCatchEvent of BPMN 2.0 graph
"""
def __init__(self):
"""
Default constructor, initializes
|
object fields with new instances.
"""
super(IntermediateCatchEvent, self).__init__()
|
hfp/tensorflow-xsmm
|
tensorflow/contrib/optimizer_v2/rmsprop_test.py
|
Python
|
apache-2.0
| 19,033
| 0.010561
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for rmsprop optimizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import math
from absl.testing import parameterized
import numpy as np
from tensorflow.contrib.optimizer_v2 import rmsprop
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
_DATA_TYPES = [dtypes.half, dtypes.float32]
_TEST_PARAM_VALUES = [
# learning_rate, decay, momentum, epsilon, centered, use_resource
[0.5, 0.9, 0.0, 1.0, True, False],
[0.5, 0.9, 0.0, 1.0, False, False],
[0.5, 0.9, 0.0, 1.0, True, True],
[0.5, 0.9, 0.0, 1.0, False, True],
[0.1, 0.9, 0.0, 1.0, True, False],
[0.5, 0.95, 0.0, 1.0, False, False],
[0.5, 0.8, 0.0, 1e-3, True, False],
[0.5, 0.8, 0.9, 1e-3, True, False],
]
class RMSPropOptimizerTest(test.TestCase, parameterized.TestCase):
def _rmsprop_update_numpy(self, var, g, mg, rms, mom, lr, decay, momentum,
centered):
rms_t = rms * decay + (1 - decay) * g * g
if centered:
mg_t = mg * decay + (1 - decay) * g
denom_t = rms_t - mg_t * mg_t
else:
mg_t = mg
denom_t = rms_t
mom_t = momentum * mom + lr * g / np.sqrt(denom_t, dtype=denom_t.dtype)
var_t = var - mom_t
return var_t, mg_t, rms_t, mom_t
def _sparse_rmsprop_update_numpy(self, var, gindexs, gvalues, mg, rms, mom,
lr, decay, momentum, centered):
mg_t = copy.deepcopy(mg)
rms_t = copy.deepcopy(rms)
mom_t = copy.deepcopy(mom)
var_t = copy.deepcopy(var)
for i in range(len(gindexs)):
gindex = gindexs[i]
gvalue = gvalues[i]
rms_t[gindex] = rms[gindex] * decay + (1 - decay) * gvalue * gvalue
denom_t = rms_t[gindex]
if centered:
mg_t[gindex] = mg_t[gindex] * decay + (1 - decay) * gvalue
denom_t -= mg_t[gindex] * mg_t[gindex]
mom_t[gindex] = momentum * mom[gindex] + lr * gvalue / np.sqrt(denom_t)
var_t[gindex] = var[gindex] - mom_t[gindex]
return var_t, mg_t, rms_t, mom_t
@parameterized.named_parameters(
*test_util.generate_combinations_with_testcase_name(
dtype=_DATA_TYPES, param_value=_TEST_PARAM_VALUES))
def testDense(self, dtype, param_value):
(learning_rate, decay, momentum, epsilon, centered, use_resource) = tuple(
param_value)
with self.session(use_gpu=True):
# Initialize variables for numpy implementation.
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.2], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.2], dtype=dtype.as_numpy_dtype)
if use_resource:
var0 = resource_variable_ops.ResourceVariable(var0_np)
var1 = resource_variable_ops.ResourceVariable(var1_np)
else:
var0 = variables.Variable(var0_np)
var1 = variables.Variable(var1_np)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
opt = rmsprop.RMSPropOptimizer(
learning_rate=learning_rate,
decay=decay,
momentum=momentum,
epsilon=epsilon,
centered=centered)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
mg0 = opt.get_slot(var0, "mg")
self.assertEqual(mg0 is not None, centered)
mg1 = opt.get_slot(var1, "mg")
self.assertEqual(mg1 is not None, centered)
rms0 = opt.get_slot(var0, "rms")
self.assertIsNotNone(rms0)
rms1 = opt.get_slot(var1, "rms")
self.assertIsNotNone(rms1)
mom0 = opt.get_slot(var0, "momentum")
self.assertIsNotNone(mom0)
mom1 = opt.get_slot(var1, "momentum")
self.assertIsNotNone(mom1)
mg0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
mg1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
rms0_np = np.array([epsilon, epsilon], dtype=dtype.as_numpy_dtype)
rms1_np = np.array([epsilon, epsilon], dtype=dtype.as_numpy_dtype)
mom0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
mom1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
# Run 4 steps of RMSProp
for _ in range(4):
update.run()
var0_np, mg0_np, rms0_np, mom0_np = self._rmsprop_update_numpy(
var0_np, grads0_np, mg0_np, rms0_np, mom0_np, learning_rate,
decay, momentum, centered)
var1_np, mg1_np, rms1_np, mom1_np = self._rmsprop_update_numpy(
var1_np, grads1_np, mg1_np, rms1_np, mom1_np, learning_rate,
decay, momentum, centered)
# Validate updated params
if centered:
self.assertAllCloseAccordingToType(mg0_np, mg0.eval())
self.assertAllCloseAccordingToType(mg1_np, mg1.eval())
self.assertAllCloseAccordingToType(rms0_np, rms0.eval())
self.assertAllCloseAccordingToType(rms1_np, rms1.eval())
self.assertAllCloseAccordingToType(mom0_np, mom0.eval())
self.assertAllCloseAccordingToType(mom1_np, mom1.eval())
# TODO(b/117393988): Reduce tolerances for float16.
self.assertAllCloseAccordingToType(
var0_np, var0.eval(), half_rtol=3e-3, half_atol=3e-3)
self.assertAllCloseAccordingToType(
var1_np, var1.eval(), half_rtol=3e-3, half_atol=3e-3)
@parameterized.parameters([dtypes.float32, dtypes.float64])
def testMinimizeSparseResourceVariable(self, dtype):
with self.cached_session():
var0 = resource_variable_ops.ResourceVariable([[1.0, 2.0]], dtype=dtype)
x = constant_op.constant([[4.0], [5.0]], dtype=dtype)
pred = math_ops.matmul(embedding_ops.embedding_lookup([var0], [0]), x)
loss = pred * pred
sgd_op = rmsprop.RMSPropOptimizer(
learning_rate=1.0,
decay=0.0,
momentum=0.0,
epsilon=0.0,
centered=False).minimize(loss
|
)
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllCloseAccordingToType([[1.0, 2.0]], var0.eval())
# Run 1 step of sgd
sgd_op.run()
# Validate updated params
self.a
|
ssertAllCloseAccordingToType(
[[0., 1.]], var0.eval(), atol=0.01)
@parameterized.parameters([dtypes.float32, dtypes.float64])
def testMinimizeSparseResourceVariableCentered(self, dtype):
with self.cached_session():
var0 = resource_variable_ops.ResourceVariable([[1.0, 2.0]], dtype=dtype)
x = constant_op.constant([[4.0], [5.0]], dtype=dtype)
pred = math_ops.matmul(embedding_ops.embedding_lookup([var0], [0]), x)
loss = pred * pred
sgd_op = rmsprop.RMSPropOptimizer(
learning_rate=1.0,
decay=0.1,
momentum=0.0,
epsilon=1.0,
centered=True).minimize(loss)
variables.global_variables_initializer().run()
# Fe
|
hall1467/wikidata_usage_tracking
|
python_analysis_scripts/longitudinal_misalignment/calculate_intermediate_sums.py
|
Python
|
mit
| 2,652
| 0.006033
|
"""
Post processing (subset of columns) to calculate intermediate sum edit counts
and other variables. Date sorted.
Usage:
calculate_intermediate_sums (-h|--help)
calculate_intermediate_sums <input> <output>
[--debug]
[--verbose]
Options:
-h, --help This help message is printed
|
<input> Path to file to process.
<output> Wh
|
ere revisions results
will be written
--debug Print debug logging to stderr
--verbose Print dots and stuff to stderr
"""
import docopt
import sys
import logging
import operator
from collections import defaultdict
import mysqltsv
logger = logging.getLogger(__name__)
def main(argv=None):
args = docopt.docopt(__doc__)
logging.basicConfig(
level=logging.INFO if not args['--debug'] else logging.DEBUG,
format='%(asctime)s %(levelname)s:%(name)s -- %(message)s'
)
input_file = mysqltsv.Reader(open(args['<input>'], "r"), headers=True,
types=[int, float, int, int, int, int])
output_file = mysqltsv.Writer(open(args['<output>'], "w"), headers=[
'yyyymm', 'aligned_entities', 'difference_in_alignment_with_previous',
'bot_edits', 'semi_automated_edits', 'non_bot_edits', 'anon_edits',
'current_bot_edits_count', 'current_semi_automated_edits_count',
'current_non_bot_edits_count', 'current_anon_edits_count'])
verbose = args['--verbose']
run(input_file, output_file, verbose)
def run(input_file, output_file, verbose):
current_bot_edits_count = 0
semi_automated_edits_count = 0
non_bot_edits_count = 0
anon_edits_count = 0
all_edits_count = 0
previous_alignment = 0
for i, line in enumerate(input_file):
current_bot_edits_count += line['bot_edits']
semi_automated_edits_count += line['semi_automated_edits']
non_bot_edits_count += line['non_bot_edits']
anon_edits_count += line['anon_edits']
output_file.write([line['yyyymm'],
line['aligned_entities'],
line['aligned_entities'] - previous_alignment,
line['bot_edits'],
line['semi_automated_edits'],
line['non_bot_edits'],
line['anon_edits'],
current_bot_edits_count,
semi_automated_edits_count,
non_bot_edits_count,
anon_edits_count])
previous_alignment = line['aligned_entities']
main()
|
sandialabs/BioCompoundML
|
bcml/Parser/build_training.py
|
Python
|
bsd-3-clause
| 6,250
| 0.00048
|
"""
This process takes in the training dataset and outputs
a data structure that includes the name of the molecule,
the predictor, and the CAS number
Attributes:
input_file (str): This is the training file that
is read by the output
Instance (class): This is a private class which
structures each instance
Model (class): This is a public class with the
total structure of the set
"""
from __future__ import print_function
import numpy as np
import warnings
import sys
from sklearn.preprocessing import Imputer
from Boruta import boruta_py
from sklearn.ensemble import RandomForestClassifier
from KNNImpute.knnimpute import (
knn_impute_optimistic,
)
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
def dictitems(dict):
if sys.version_info[0] >= 3:
return dict.items()
else:
return dict.iteritems()
def verbose_print(verbose, line):
if verbose:
print(line)
_possible_features = ('experimentalhash', 'binhash', 'padelhash', 'userhash')
def _convert_predictor(predictors, split_value):
""""This function discretizes the predictors. This is currently
set to handle a binary class, future versions will handle multi-class
predictors"""
preds = np.zeros(len(predictors), dtype=int)
for i, value in np.ndenumerate(predictors):
if value >= split_value:
preds[i] = 1
return preds
def _get_feature_names(compounds):
"""This function handles collecting the feature names"""
feature_names = {}
for compound in compounds:
for feature in sorted(_possible_features):
if feature in compound.keys():
keys = sorted(compound[feature].keys())
for feat in keys:
feature_names[feat] = 1
compound[feat] = compound[feature][feat]
return (compounds, sorted(feature_names.keys()))
class Process(object):
"""This file reads a training file"""
def _load_training_set(self):
"""This function takes the features and
compounds and loads them into a numpy array
"""
for index, value in np.ndenumerate(self.train):
compound = self.compounds[index[0]]
feature = list(self.feature_names)[index[1]]
if (feature in compound.keys()) and (compound[feature] is not "")\
and (compound[feature] != "NULL")\
and (compound[feature] != "False"):
self.train[index] = float(compound[feature])
else:
self.train[index] = np.nan
def feature_selection(self, verbose, seed=False):
"""This function runs Boruta feature selection to remove
unimportant features from the data"""
if verbose:
verbose = 2
'''Boruta cannot handle missing values. Either run impute_values
before feature_selection, or the following function runs mean
imputation prior to running Boruta'''
if np.any(np.isnan(self.train)):
warnings.warn('RandomF
|
orestClassifier requires no missing data,\
features being imputed by mean')
X = self.train
imp = Imputer(missing_values='NaN', strategy='mean', axis=0)
imp.fit(X)
|
self.train = imp.transform(X)
rf = RandomForestClassifier(n_jobs=-1, oob_score=True, max_depth=5)
feat_selector = boruta_py.BorutaPy(rf, n_estimators='auto',
verbose=verbose, seed=seed)
feat_selector.fit(self.train, self.predictors)
self.feature_support = feat_selector.support_
filtered_names = [i for indx, i in enumerate(self.feature_names) if self.feature_support[indx]]
self.feature_names = filtered_names
self.train = feat_selector.transform(self.train)
def impute_values(self, distance, verbose, k=5):
"""This function handles the missing values from
the training set and estimates their value, based on
the mean and reloads them into the training set"""
verbose_print(verbose, 'Imputing using KNN strategy')
X = self.train
missing_mask = np.isnan(X)
'''First impute uing knn optimistic'''
impute = knn_impute_optimistic(X, missing_mask=missing_mask,
distance=distance, k=k)
X = impute.astype(float)
'''For features with a small number of features, use mean
imputation to remove NaN values'''
imp = Imputer(missing_values='NaN', strategy='mean', axis=0)
imp.fit(X)
self.train = imp.transform(X)
def __init__(self, model_input, split_value=False, verbose=False):
"""This initialization function handles the heavy
work of loading the features and processing the
compounds"""
self.input = model_input
compounds = []
predictors = []
weights = []
self.input.compound = OrderedDict(sorted(self.input.compound.items(), key=lambda t: t[0]))
for id, compound in dictitems(self.input.compound):
compounds.append(self.input.compound[id])
predictors.append(self.input.compound[id]['predictor'])
weights.append(self.input.compound[id]['weight'])
predictor_values = np.array(predictors, '|S4').astype(np.float)
weight_values = np.array(weights, '|S4').astype(np.float)
self.weights = weight_values
if split_value:
self.predictors = _convert_predictor(predictor_values, split_value)
else:
print_line = "Splitting at " + str(np.median(predictor_values))
verbose_print(verbose, print_line)
self.predictors = _convert_predictor(predictor_values,
np.median(predictor_values))
self.rows = len(self.predictors)
self.compounds, self.feature_names = _get_feature_names(compounds)
self.columns = len(self.feature_names)
'''Initialize the training array'''
self.train = np.zeros((self.rows, self.columns,), dtype=np.float64)
'''Load the training array'''
self._load_training_set()
|
UdK-VPT/Open_eQuarter
|
mole3/stat_corr/window_wall_west_ratio_MFH_by_building_age_correlation.py
|
Python
|
gpl-2.0
| 570
| 0.036842
|
# OeQ autogenerated correlation for 'Window/Wall Ratio West in Correlation to
|
the Building Age'
import math
import numpy as np
from . import oeqCorrelation as oeq
def window_wall_west_ratio_MFH_by_building_age_correlation(*xin):
# OeQ autogenerated correlation for 'Window to Wall Ratio in Western Direction'
A_WIN_W_BY_AW= oeq.correlation(
const= -37846.911859,
a= 7
|
7.3456608212,
b= -0.0592490161432,
c= 2.01631207341e-05,
d= -2.57207834473e-09,
mode= "lin")
return dict(A_WIN_W_BY_AW=A_WIN_W_BY_AW.lookup(*xin))
|
samuel/kokki
|
kokki/cookbooks/nginx/libraries/sites.py
|
Python
|
bsd-3-clause
| 535
| 0.013084
|
from os.path import exists
from k
|
okki import Environment, Execute
def site(name, enable=True):
env = Environment.get_instance()
if enable:
cmd = 'nxensite'
else:
cmd = 'nxdissite'
def _not_if():
e = exists("%s/sites-enabled/%s" % (env.config.nginx.dir, name))
return e if enable else not e
Execute("%s %s" % (cmd, name),
command = "/usr/sbin/%s %s" % (cmd, name),
n
|
otifies = [("reload", env.resources["Service"]["nginx"])],
not_if = _not_if)
|
govarguz/espressopp
|
src/esutil/RNG.py
|
Python
|
gpl-3.0
| 1,767
| 0.006791
|
# Copyright (C) 2012,2013,2019
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redis
|
tribute it and/or modify
# it under the terms of the GNU G
|
eneral Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""
*********************
espressopp.esutil.RNG
*********************
"""
from espressopp import pmi
from _espressopp import esutil_RNG
class RNGLocal(esutil_RNG):
pass
# def gamma(self, a=None):
# if pmi._PMIComm and pmi._PMIComm.isActive():
# if pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
# if a==None:
# return self.cxxclass.gammaArg(self, 1)
# else:
# return self.cxxclass.gammaArg(self, a)
# else :
# pass
if pmi.isController:
class RNG(object):
__metaclass__ = pmi.Proxy
'Random number generator.'
pmiproxydefs = dict(
cls = 'espressopp.esutil.RNGLocal',
localcall = [ '__call__', 'normal', 'gamma', 'uniformOnSphere' ],
pmicall = [ 'seed', 'get_seed', 'saveState', 'loadState' ]
)
|
Zlash65/erpnext
|
erpnext/accounts/report/profitability_analysis/profitability_analysis.py
|
Python
|
gpl-3.0
| 5,970
| 0.031658
|
# Copyright (c) 2013, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.utils import flt, getdate, formatdate, cstr
from erpnext.accounts.report.financial_statements import filter_accounts, filter_out_zero_value_rows
from erpnext.accounts.report.trial_balance.trial_balance import validate_filters
value_fields = ("income", "expense", "gross_profit_loss")
def execute(filters=None):
if not filters.get('based_on'): filters["based_on"] = 'Cost Center'
based_on = filters.based_on.replace(' ', '_').lower()
validate_filters(filters)
accounts = get_accounts_data(based_on, filters.get("company"))
data = get_data(accounts, filters, based_on)
columns = get_columns(filters)
return columns, data
def get_accounts_data(based_on, company):
if based_on == 'cost_center':
return frappe.db.sql("""select name, parent_cost_center as parent_account, cost_center_name as account_name, lft, rgt
from `tabCost Center` where company=%s order by name""", company, as_dict=True)
elif based_on == 'project':
return frappe.get_all('Project', fields = ["name"], filters = {'company': company}, order_by = 'name')
else:
filters = {}
doctype = frappe.unscrub(based_on)
has_company = frappe.db.has_column(doctype, 'company')
if has_company:
filters.update({'company': company})
return frappe.get_all(doctype, fields = ["name"], filters = filters, order_by = 'name')
def get_data(accounts, filters, based_on):
if not accounts:
return []
accounts, accounts_by_name, parent_children_map = filter_accounts(accounts)
gl_entries_by_account = {}
set_gl_entries_by_account(filters.get("company"), filters.get("from_date"),
filters.get("to_date"), based_on, gl_entries_by_account, ignore_closing_entries=not flt(filters.get("with_period_closing_entry"
|
)))
total_row = calculate_values(accounts, gl_entries_by_account, filters)
accumulate_values_into_parents(accounts, accounts_by_name)
data = prepare_data(accounts, filters, total_row, parent_children_map, based_on)
data = filter_out_zero_value_rows(data, parent_children_map,
show_zero_values=filters.get("show_zero_values"))
|
return data
def calculate_values(accounts, gl_entries_by_account, filters):
init = {
"income": 0.0,
"expense": 0.0,
"gross_profit_loss": 0.0
}
total_row = {
"cost_center": None,
"account_name": "'" + _("Total") + "'",
"warn_if_negative": True,
"income": 0.0,
"expense": 0.0,
"gross_profit_loss": 0.0,
"account": "'" + _("Total") + "'",
"parent_account": None,
"indent": 0,
"has_value": True
}
for d in accounts:
d.update(init.copy())
# add opening
for entry in gl_entries_by_account.get(d.name, []):
if cstr(entry.is_opening) != "Yes":
if entry.type == 'Income':
d["income"] += flt(entry.credit) - flt(entry.debit)
if entry.type == 'Expense':
d["expense"] += flt(entry.debit) - flt(entry.credit)
d["gross_profit_loss"] = d.get("income") - d.get("expense")
total_row["income"] += d["income"]
total_row["expense"] += d["expense"]
total_row["gross_profit_loss"] = total_row.get("income") - total_row.get("expense")
return total_row
def accumulate_values_into_parents(accounts, accounts_by_name):
for d in reversed(accounts):
if d.parent_account:
for key in value_fields:
accounts_by_name[d.parent_account][key] += d[key]
def prepare_data(accounts, filters, total_row, parent_children_map, based_on):
data = []
company_currency = frappe.get_cached_value('Company', filters.get("company"), "default_currency")
for d in accounts:
has_value = False
row = {
"account_name": d.account_name or d.name,
"account": d.name,
"parent_account": d.parent_account,
"indent": d.indent,
"fiscal_year": filters.get("fiscal_year"),
"currency": company_currency,
"based_on": based_on
}
for key in value_fields:
row[key] = flt(d.get(key, 0.0), 3)
if abs(row[key]) >= 0.005:
# ignore zero values
has_value = True
row["has_value"] = has_value
data.append(row)
data.extend([{},total_row])
return data
def get_columns(filters):
return [
{
"fieldname": "account",
"label": _(filters.get("based_on")),
"fieldtype": "Link",
"options": filters.get("based_on"),
"width": 300
},
{
"fieldname": "currency",
"label": _("Currency"),
"fieldtype": "Link",
"options": "Currency",
"hidden": 1
},
{
"fieldname": "income",
"label": _("Income"),
"fieldtype": "Currency",
"options": "currency",
"width": 120
},
{
"fieldname": "expense",
"label": _("Expense"),
"fieldtype": "Currency",
"options": "currency",
"width": 120
},
{
"fieldname": "gross_profit_loss",
"label": _("Gross Profit / Loss"),
"fieldtype": "Currency",
"options": "currency",
"width": 120
}
]
def set_gl_entries_by_account(company, from_date, to_date, based_on, gl_entries_by_account,
ignore_closing_entries=False):
"""Returns a dict like { "account": [gl entries], ... }"""
additional_conditions = []
if ignore_closing_entries:
additional_conditions.append("and ifnull(voucher_type, '')!='Period Closing Voucher'")
if from_date:
additional_conditions.append("and posting_date >= %(from_date)s")
gl_entries = frappe.db.sql("""select posting_date, {based_on} as based_on, debit, credit,
is_opening, (select root_type from `tabAccount` where name = account) as type
from `tabGL Entry` where company=%(company)s
{additional_conditions}
and posting_date <= %(to_date)s
and {based_on} is not null
order by {based_on}, posting_date""".format(additional_conditions="\n".join(additional_conditions), based_on= based_on),
{
"company": company,
"from_date": from_date,
"to_date": to_date
},
as_dict=True)
for entry in gl_entries:
gl_entries_by_account.setdefault(entry.based_on, []).append(entry)
return gl_entries_by_account
|
adregner/HAppy
|
happy/shell.py
|
Python
|
mit
| 1,151
| 0.013901
|
# HAppy
import sys
import logging
from optparse import OptionParser
logger = logging.getLogger(__name__)
SUB_COMMANDS = [
'daemon',
'takeover',
'release',
'status',
]
def parse_args(argv):
if len(argv) > 0 and argv[0] in SUB_COMMANDS:
subcommand = argv.pop(0)
else:
subcommand = 'daemon'
parser = OptionParser()
parser.add_option('-f', '--foreground', dest='foreground', default=False, action='store_true',
help = "Don't daemonize by forking into the background.")
parser.a
|
dd_option('-l', '--level', dest='log_level', default='warn',
help = "Set logging level (debug, info, warn, error) Default: warn")
parser.add_option('-c', '--config', dest='config', default='/etc/happy.conf'
|
,
help = "Path to HAppy configuration file. Default: /etc/happy.conf")
options, args = parser.parse_args()
options.subcommand = subcommand
return options
def main():
options = parse_args(sys.argv[1:])
import happy
prog = happy.HAppy(options)
getattr(prog, options.subcommand)()
if __name__ == '__main__':
main()
|
andresriancho/dotcloud-cli
|
dotcloud/ui/colors.py
|
Python
|
mit
| 2,257
| 0
|
"""
dotcloud.ui.colors - Pythonic wrapper around colorama
Usage:
colors = Colors()
# Format string inlining
print '{c.green}->{c.reset} Hello world!'.format(c=colors)
# Call
print colors.blue('Hello world!')
# Wrapper
with colors.red:
print 'Hello world'
"""
import sys
import colorama
colorama.init()
class Colors(object):
def __init__(self, disable_colors=None):
""" Initialize Colors
disable_colors can be either:
* True: Disable colors. Useful to disable colors dynamically
* None: Automatic colors. Colors will be enabled unless stdin is
not a tty (for instance if piped to another program).
* False: Force enable colors, even if not running on a pty.
"""
self.disable_colors = disable_colors
if self.disable_colors is None:
self.disable_colors = False if sys.stdout.isatty() else True
def __getattr__(self, color):
if self.disable_colors:
return Color(None)
color = color.upper()
if color in ['DIM', 'BRIGHT']:
return getattr(colorama.Style, color.upper())
if color == 'RESET':
return colorama.Style.RESET_ALL
return Color(color)
class Color(object):
def __init__(self, color):
self.color = self._lookup_color(color)
def _lookup_color(self, color):
""" Lookup color by na
|
me """
if color is None:
return None
if not hasattr(colorama.Fore, color.upper()):
raise KeyError('Unknown color "{0}"'.format(color))
return getattr(colorama.Fore, color.upper())
def __enter__(self):
if self.color is not None:
|
sys.stdout.write(self.color)
def __exit__(self, type, value, traceback):
if self.color is not None:
sys.stdout.write(colorama.Style.RESET_ALL)
def __str__(self):
if self.color is None:
return ''
return self.color
def __call__(self, text):
if self.color is None:
return text
return '{color}{text}{reset}'.format(
color=self.color,
text=text,
reset=colorama.Style.RESET_ALL
)
|
nathanielvarona/airflow
|
airflow/contrib/operators/qubole_operator.py
|
Python
|
apache-2.0
| 1,158
| 0.001727
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitat
|
ions
# under the License.
"""This module is deprecated. Please use :mod:`airflow.providers.qubole.operators.qubole`."""
import warnings
# pylint: disable=unused-import
from airflow.providers.qubole.operators.qubole import QuboleOperator # noqa
warnings.warn(
"This module is deprecated. Please use `airf
|
low.providers.qubole.operators.qubole`.",
DeprecationWarning,
stacklevel=2,
)
|
pencilcheck/pttbbs-py
|
src/pttbbs/screenlets.py
|
Python
|
mit
| 33,791
| 0.004458
|
# -*- encoding: UTF-8 -*-
import struct
from time import localtime
import itertools
import codecs
import gevent
from copy import copy
from copy import deepcopy
import screen
from screen import ForegroundColors, BackgroundColors
from screen import Align
from utility import Dimension
from string import lowercase
return_key = ['\r', '\r\x00']
backspace_key = ['\x7f']
arrow_up_key = ['\x1bOA', '\x1b[A']
arrow_down_key = ['\x1bOB', '\x1b[B']
arrow_right_key = ['\x1bOC', '\x1b[C']
arrow_left_key = ['\x1bOD', '\x1b[D']
tab_key = ['\t']
ctrl_x = ['\x18']
shift_key = '' # it doesn't send
delete_key = ['\x1b[3~']
clr_scr = 1
normal = 0
yes = 'Y'
no = 'N'
def isKey(input, key):
for k in key:
if k == input:
return True
return False
def isSingleByte(data):
if '\x1f' < data < '\x7f':
return True
else:
return False
def isDoubleByte(data):
if '\x4e\x00' < data < '\x9f\xff' or '\x81\x40' < data < '\xfe\xff' or '\xa1\x40' < data < '\xf9\xff':
return True
else:
return False
class screenlet(object):
def __init__(self, routine, dimension):
self.routine = routine
self.dimension = dimension
self.controls = []
self.focusIndex = None
def handleData(self, data):
return normal
def add(self, control):
self.controls.append(control)
def focusedControl(self):
return self.cont
|
rols[self.focusIndex] if self.focusIndex != None else None
def setFocusedControl(self, control):
for i, item in enumerate(self.controls):
item.focused = False
if item == control:
self.focusIndex = i
item.focused = True
return
def update(self, data=''):
for i, item in enumerate(self.controls):
item.update(data if i == self.focusIndex else '')
if not self.focusIndex:
|
for item in self.controls:
item.update(data)
return self.handleData(data)
def draw(self, force=False):
self.buffer = ""
for item in self.controls:
if item.redrawn and not item.visible:
self.buffer += screen.fillBlank(item.dimension)
item.redrawn = False
for item in self.controls:
if (force or item.redrawn) and item.visible and item.minACL <= self.routine.acl:
self.buffer += item.draw()
item.redrawn = False
focus = ''
if self.focusedControl():
focus = screen.move_cursor(self.focusedControl().focusLine, self.focusedControl().focusColn)
else:
focus = screen.move_cursor(self.routine.height+1, self.routine.width+1)
return self.buffer + focus
class control(object):
def __init__(self, routine, dimension, **kwargs):
self.routine = routine
self.dimension = dimension
self.focusLine = kwargs['focusLine'] if 'focusLine' in kwargs else self.dimension.line
self.focusColn = kwargs['focusColn'] if 'focusColn' in kwargs else self.dimension.coln
self.visible = kwargs['visible'] if 'visible' in kwargs else True
self.minACL = kwargs['minACL'] if 'minACL' in kwargs else 0
self.redrawn = False
self.focused = False
self.buffer = ""
def setVisibility(self, flag):
self.redrawn = True if not flag == self.visible else self.redrawn
self.visible = flag
def update(self, data=''):
pass # set redrawn flag if needed
def draw(self):
return self.buffer
class selectionMenu(control):
def __init__(self, routine, dimension, menu, **kwargs):
super(selectionMenu, self).__init__(routine, dimension, **kwargs)
# menu is a list of tuples with the form (id, title) id = -1 if it is not relevant
self.menu = menu
self.change = [1]*len(self.menu)
self.cursor = 0
self.offset = 0
def index(self):
return self.cursor + self.offset
def redraw(self):
self.change = [1]*len(self.menu)
def update(self, data=''):
if len(self.menu) == 0: # don't need to update if the list is empty
return
print "selectionMenu update"
ind = self.index()
self.change[self.index()] = 1
# self.cursor can only go from 0 to self.height
if isKey(data, arrow_up_key):
if self.cursor == 0:
if self.offset > 0:
self.offset -= 1
self.redraw()
else:
self.cursor -= 1
self.redrawn = True
elif isKey(data, arrow_down_key):
if self.cursor == self.dimension.height-1:
if self.offset + self.dimension.height < len(self.menu)-1:
self.offset += 1
self.redraw()
else:
if self.offset + self.cursor < len(self.menu)-1:
self.cursor += 1
self.redrawn = True
if self.index() == ind:
self.change[self.index()] = 0
else:
self.change[self.index()] = 1
def draw(self):
ind = 0
self.buffer = ""
if any(self.change[self.offset:self.offset+self.dimension.height]):
for i, item in enumerate(self.menu):
line = item[1]
if i >= self.offset and ind < self.dimension.height:
#print i, self.offset, ind, self.dimension.height
if self.change[i]:
if self.cursor == ind:
self.buffer = self.buffer + screen.puts(self.dimension.line + ind, self.dimension.coln, line, self.dimension.width, Align.Left, fg=ForegroundColors.White, bg=BackgroundColors.Yellow)
else:
self.buffer = self.buffer + screen.puts(self.dimension.line + ind, self.dimension.coln, line, self.dimension.width, Align.Left)
self.change[i] = 0
ind = ind + 1
return self.buffer
class scrollableMenu(control):
def __init__(self, routine, dimension, menu, **kwargs):
super(scrollableMenu, self).__init__(routine, dimension, **kwargs)
self.menu = menu
self.offset = 0
self.change = [1]*len(self.menu)
def index(self):
return self.offset
def update(self, data=''):
if isKey(data, arrow_up_key):
if self.offset > 0:
self.offset -= 1
self.change = self.change[:self.offset] + [1]*self.dimension.height + self.change[self.offset+self.dimension.height+1:]
self.redrawn = True
elif isKey(data, arrow_down_key):
if self.offset + self.dimension.height < len(self.menu)-1:
self.offset += 1
self.change = self.change[:self.offset] + [1]*self.dimension.height + self.change[self.offset+self.dimension.height+1:]
self.redrawn = True
def draw(self, force=False):
ind = 0
for i, line in enumerate(self.menu):
if i >= self.offset and ind < self.dimension.height:
if self.change[i]:
self.buffer = self.buffer + screen.puts(self.dimension.line + ind, self.dimension.coln, line.strip())
self.change[i] = 0
ind = ind + 1
return self.buffer
class art(control):
def __init__(self, routine, dimension, file, **kwargs):
super(art, self).__init__(routine, dimension, **kwargs)
self.buffer = screen.move_cursor(self.line, self.coln)
self.file = file
for i, line in enumerate(open(self.file)):
if i < self.dimension.height:
self.buffer = self.buffer + line[:self.dimension.width] + screen.move_cursor_down(1) + screen.move_cursor_left(self.dimension.width)
class label(control):
def __init__(self, routine, dimension, msg, length=None, align=Align.Left, **kwargs):
super(label, self).__init__(routine, dimension, **kwargs)
"display a label"
self.data = msg
self.buffer = scr
|
OSUmageed/1DSweptCUDA
|
ResultPlots/ConferencePaper/Parsed/plotit.py
|
Python
|
mit
| 4,498
| 0.028902
|
import numpy as np
import os
import sys
import os.path as op
import matplotlib as mpl
import mpl.pyplot as plt
import palettable.colorbrewer as pal
from datetime import datetime
from cycler import cycler
#plt.rc('axes', prop_cycle=cycler('color', pal.qualitative.Dark2_8.mpl_colors)+
# cycler('marker',['D','o','v','*','^','x','h','8']))
mpl.rcParams['lines.markersize'] = 10
mpl.rcParams['lines.linewidth'] = 3
thispath = op.abspath(op.dirname(__file__))
mpi = np.genfromtxt('MPICompare.txt')
heat = np.genfromtxt('HeatComplete.txt')
KSdiv = np.genfromtxt('Divides.txt')
KSall = np.genfromtxt('KSComplete.txt')
ylbl = "Time per timestep (us)"
xlbl = "Number of spatial points"
#mpi
fig, (ax1,ax2) = plt.subplots(1,2, figsize=(14,8))
plt.suptitle("MPI and GPU performance",fontsize='large', fontweight="bold")
mpiLabels = ['MPIClassic', 'MPISwept', 'GPUClassic', 'GPUShared']
for i,mp in enumerate(mpiLabels):
ax1.loglog(mpi[:,0],mpi[:,i+1])
ax1.hold(True)
ax2.semilogx(mpi[:,0],mpi[:,-2],mpi[:,0],mpi[:,-1])
ax1.hold(True)
ax1.legend(mpiLabels, loc='upper left', fontsize='medium')
ax2.legend(["Classic", "Shared"], loc='upper left', fontsize='medium')
ax1.grid(alpha=0.5)
ax2.grid(alpha=0.5)
ax1.set_ylabel(ylbl)
ax2.set_ylabel("Speedup vs MPI")
ax1.set_xlabel(xlbl)
ax2.set_xlabel(xlbl)
plotfile = op.join(thispath,"mpiPlot.pdf")
ax1.set_xlim([heat[0,0],heat[-1,0]])
ax2.set_xlim([heat[0,0],heat[-1,0]])
fig.subplots_adjust(bottom=0.08, right=0.92, top=0.92)
plt.savefig(plotfile, bbox_inches='tight')
#KSdiv
divs = ["Divide","Multiply"]
fig, (ax1,ax2) = plt.subplots(1,2, figsize=(14,8), sharey=True)
plt.suptitle("Improvement to KS from division avoidance",fontsize='large', fontweight="bold")
ax1.loglog(KSdiv[:,0],KSdiv[:,1], KSdiv[:,0], KSdiv[:,2])
ax1.set_title("Double Precision")
ax2.loglog(KSdiv[:,0],KSdiv[:,3], KSdiv[:,0], KSdiv[:,4])
ax2.set_title("Single Precision")
ax1.set_ylabel(ylbl)
ax1.set_xlabel(xlbl)
ax2.set_xlabel(xlbl)
ax1.set_xlim([heat[0,0],heat[-1,0]])
plt.legend(divs, loc='upper left', fontsize='medium')
ax1.grid(alpha=0.5)
ax2.grid(alpha=0.5)
plotfile = op.join(thispath,"divisionPlot.pdf")
ax2.set_xlim([heat[0,0],heat[-1,0]])
plt.savefig(plotfile, bbox_inches='tight')
#hand, lbl = ax.get_legend_handles_labels()
#Heat complete
prec = ["Double", "Single"]
ksorder = mpiLabels[2:]
heatorder = ['Classic', 'GPUShared', 'Hybrid']
ho=[prec[0]+" "+rd for rd in heatorder]+[prec[1]+" "+rd for rd in heatorder]
fig, (ax1,ax2) = plt.subplots(1,2, figsize=(14,8))
plt.suptitle("Heat",fontsize='large', fontweight="bold")
ax1.loglog(heat[:,0],heat[:,1], heat[:,0], heat[:,2], heat[:,0], heat[:,3])
ax1.hold(True)
ax1.loglog(heat[:,0],heat[:,6], heat[:,0], heat[:,7], heat[:,0], heat[:,8])
ax1.leg
|
end(ho, loc='up
|
per left', fontsize='medium')
ax1.set_ylabel(ylbl)
ax1.set_xlabel(xlbl)
ax1.set_xlim([heat[0,0],heat[-1,0]])
ho.pop(3)
ho.pop(0)
ax2.semilogx(heat[:,0],heat[:,4], heat[:,0], heat[:,5])
ax2.hold(True)
ax2.semilogx(heat[:,0],heat[:,9], heat[:,0], heat[:,10])
ax2.legend(ho, loc='upper right', fontsize='medium')
ax1.grid(alpha=0.5)
ax2.grid(alpha=0.5)
ax2.set_xlabel(xlbl)
ax2.set_ylabel("Speedup vs Classic")
fig.tight_layout(pad=0.2, w_pad=0.75, h_pad=1.5)
fig.subplots_adjust(bottom=0.08, right=0.92, top=0.92)
plotfile = op.join(thispath,"heatComplete.pdf")
ax2.set_xlim([heat[0,0],heat[-1,0]])
plt.savefig(plotfile, bbox_inches='tight')
reg = ["Register"]
ksorder += reg
#KS complete
ko=[prec[0]+" "+ rd for rd in ksorder]+[prec[1]+" "+ rd for rd in ksorder]
fig, (ax1,ax2) = plt.subplots(1,2, figsize=(14,8))
plt.suptitle("KS",fontsize='large', fontweight="bold")
ax1.loglog(KSall[:,0],KSall[:,1], KSall[:,0], KSall[:,2], KSall[:,0], KSall[:,3])
ax1.hold(True)
ax1.loglog(KSall[:,0],KSall[:,6], KSall[:,0], KSall[:,7], KSall[:,0], KSall[:,8])
ax1.legend(ko, loc='upper left', fontsize='medium')
ax1.set_ylabel(ylbl)
ax1.set_xlabel(xlbl)
ax1.set_xlim([heat[0,0],heat[-1,0]])
ko.pop(3)
ko.pop(0)
ax2.semilogx(KSall[:,0],KSall[:,4], KSall[:,0], KSall[:,5])
ax2.hold(True)
ax2.semilogx(KSall[:,0],KSall[:,9], KSall[:,0], KSall[:,10])
ax2.legend(ko, loc='upper right', fontsize='medium')
ax1.grid(alpha=0.5)
ax2.grid(alpha=0.5)
ax2.set_xlabel(xlbl)
ax2.set_ylabel("Speedup vs Classic")
fig.tight_layout(pad=0.2, w_pad=0.75, h_pad=1.0)
fig.subplots_adjust(bottom=0.08, right=0.92, top=0.92)
plotfile = op.join(thispath,"KSallComplete.pdf")
ax2.set_xlim([heat[0,0],heat[-1,0]])
plt.savefig(plotfile, bbox_inches='tight')
|
nathanhi/deepserve
|
deepserve/fileupload/views.py
|
Python
|
mit
| 231
| 0
|
# -*- coding: utf-8 -*-
from rest_framework import viewsets
from . import serializers, models
class FileViewSet(viewsets.ModelViewSe
|
t):
queryset = models.File.objects.all()
serialize
|
r_class = serializers.FileSerializer
|
UK992/servo
|
tests/wpt/web-platform-tests/webdriver/tests/support/authentication.py
|
Python
|
mpl-2.0
| 800
| 0
|
import urllib
def basic_authentication(username=None, password=None, protocol="http"):
from .fixtures
|
import server_config, url
build_url = url(server_config())
query = {}
|
return build_url("/webdriver/tests/support/authentication.py",
query=urllib.urlencode(query),
protocol=protocol)
def main(request, response):
user = request.auth.username
password = request.auth.password
if user == "user" and password == "password":
return "Authentication done"
realm = "test"
if "realm" in request.GET:
realm = request.GET.first("realm")
return ((401, "Unauthorized"),
[("WWW-Authenticate", 'Basic realm="' + realm + '"')],
"Please login with credentials 'user' and 'password'")
|
dims/cinder
|
cinder/image/glance.py
|
Python
|
apache-2.0
| 23,309
| 0
|
# Copyright 2010 OpenStack Foundation
# Copyright 2013 NTT corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Implementation of an image service that uses Glance as the backend"""
from __future__ import absolute_import
import copy
import itertools
import random
import shutil
import sys
import time
import glanceclient.exc
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import timeutils
import six
from six.moves import range
from six.moves import urllib
from cinder import exception
from cinder.i18n import _LE, _LW
glance_opts = [
cfg.ListOpt('allowed_direct_url_schemes',
default=[],
help='A list of url schemes that can be downloaded directly '
'via the direct_url. Currently supported schemes: '
'[file].'),
]
glance_core_properties_opts = [
cfg.ListOpt('glance_core_properties',
default=['checksum', 'container_format',
'disk_format', 'image_name', 'image_id',
'min_disk', 'min_ram', 'name', 'size'],
help='Default core properties of image')
]
CONF = cfg.CONF
CONF.register_opts(glance_opts)
CONF.register_opts(glance_core_properties_opts)
CONF.import_opt('glance_api_version', 'cinder.common.config')
LOG = logging.getLogger(__name__)
def _parse_image_ref(image_href):
"""Parse an image href into composite parts.
:param image_href: href of an image
:returns: a tuple of the form (image_id, netloc, use_ssl)
:raises ValueError
"""
url = urllib.parse.urlparse(image_href)
netloc = url.netloc
image_id = url.path.split('/')[-1]
use_ssl = (url.scheme == 'https')
return (image_id, netloc, use_ssl)
def _create_glance_client(context, netloc, use_ssl, version=None):
|
"""Instantiate a new glanceclient.Client object."""
if version is None:
version = CONF.glance_api_version
params = {}
if use_ssl:
scheme = 'https'
# https specific params
params['insecure'] = CONF.glance_api_insecure
params['ssl_com
|
pression'] = CONF.glance_api_ssl_compression
params['cacert'] = CONF.glance_ca_certificates_file
else:
scheme = 'http'
if CONF.auth_strategy == 'keystone':
params['token'] = context.auth_token
if CONF.glance_request_timeout is not None:
params['timeout'] = CONF.glance_request_timeout
endpoint = '%s://%s' % (scheme, netloc)
return glanceclient.Client(str(version), endpoint, **params)
def get_api_servers():
"""Return Iterable over shuffled api servers.
Shuffle a list of CONF.glance_api_servers and return an iterator
that will cycle through the list, looping around to the beginning
if necessary.
"""
api_servers = []
for api_server in CONF.glance_api_servers:
if '//' not in api_server:
api_server = 'http://' + api_server
url = urllib.parse.urlparse(api_server)
netloc = url.netloc
use_ssl = (url.scheme == 'https')
api_servers.append((netloc, use_ssl))
random.shuffle(api_servers)
return itertools.cycle(api_servers)
class GlanceClientWrapper(object):
"""Glance client wrapper class that implements retries."""
def __init__(self, context=None, netloc=None, use_ssl=False,
version=None):
if netloc is not None:
self.client = self._create_static_client(context,
netloc,
use_ssl, version)
else:
self.client = None
self.api_servers = None
self.version = version
if CONF.glance_num_retries < 0:
LOG.warning(_LW(
"glance_num_retries shouldn't be a negative value. "
"The number of retries will be set to 0 until this is"
"corrected in the cinder.conf."))
CONF.set_override('glance_num_retries', 0)
def _create_static_client(self, context, netloc, use_ssl, version):
"""Create a client that we'll use for every call."""
self.netloc = netloc
self.use_ssl = use_ssl
self.version = version
return _create_glance_client(context,
self.netloc,
self.use_ssl, self.version)
def _create_onetime_client(self, context, version):
"""Create a client that will be used for one call."""
if self.api_servers is None:
self.api_servers = get_api_servers()
self.netloc, self.use_ssl = next(self.api_servers)
return _create_glance_client(context,
self.netloc,
self.use_ssl, version)
def call(self, context, method, *args, **kwargs):
"""Call a glance client method.
If we get a connection error,
retry the request according to CONF.glance_num_retries.
"""
version = kwargs.pop('version', self.version)
retry_excs = (glanceclient.exc.ServiceUnavailable,
glanceclient.exc.InvalidEndpoint,
glanceclient.exc.CommunicationError)
num_attempts = 1 + CONF.glance_num_retries
for attempt in range(1, num_attempts + 1):
client = self.client or self._create_onetime_client(context,
version)
try:
controller = getattr(client,
kwargs.pop('controller', 'images'))
return getattr(controller, method)(*args, **kwargs)
except retry_excs as e:
netloc = self.netloc
extra = "retrying"
error_msg = _LE("Error contacting glance server "
"'%(netloc)s' for '%(method)s', "
"%(extra)s.")
if attempt == num_attempts:
extra = 'done trying'
LOG.exception(error_msg, {'netloc': netloc,
'method': method,
'extra': extra})
raise exception.GlanceConnectionFailed(reason=e)
LOG.exception(error_msg, {'netloc': netloc,
'method': method,
'extra': extra})
time.sleep(1)
class GlanceImageService(object):
"""Provides storage and retrieval of disk image objects within Glance."""
def __init__(self, client=None):
self._client = client or GlanceClientWrapper()
self._image_schema = None
self.temp_images = None
def detail(self, context, **kwargs):
"""Calls out to Glance for a list of detailed image information."""
params = self._extract_query_params(kwargs)
try:
images = self._client.call(context, 'list', **params)
except Exception:
_reraise_translated_exception()
_images = []
for image in images:
if self._is_image_available(context, image):
_images.append(self._translate_from_glance(context, image))
return _images
def _extract_query_params(self, params):
_params = {}
accepted_params = ('filters', 'marker', 'limit',
'sort_key', 'sort_dir')
for param in accepted_params:
|
AGMMGA/EM_scripts
|
EM_scripts/tests/mrc2mrc_tests.py
|
Python
|
gpl-2.0
| 8,856
| 0.004855
|
import sys
import multiprocessing
import os
import shutil
import tempfile
import unittest
from unittest.mock import patch
from scripts_EM import mrc2mrc as m
# class test_files_operations(unittest.TestCase):
#
# def setUp(self):
# self.files = ['1.mrc', '2.mrc', '3.mrc']
# self.tempdir = tempfile.mkdtemp()
# for f in self.files:
# with open(os.path.join(self.tempdir,f),'w') as f:
# pass
#
# def tearDown(self):
# shutil.rmtree(self.tempdir, ignore_errors=True)
#
# def test_get_mrc_files(self):
# #checking that the file is returned with the whole path
# testargs = 'foo.py -i {}'.format(self.tempdir)
# with patch('sys.argv', testargs.split()):
# a = m.imageConverter()
# res = a.get_mrc_files()
# exp = [os.path.join(self.tempdir, f) for f in self.files]
# self.assertEqual(exp.sort(), res.sort())
#
# def test_no_mrc_files(self):
# for f in glob.glob(os.path.join(self.tempdir, '*.mrc')):
# os.remove(f)
# testargs = 'foo.py -i {}'.format(self.tempdir)
#
|
with patch('sys.argv', testargs.split()):
# with self .assertRaises(SystemExit):
# a = m.image
|
Converter()
# a.get_mrc_files()
#
# def test_normal_operation(self):
# testargs = 'foo.py -i {}'.format(self.tempdir)
# with patch('sys.argv', testargs.split()):
# with patch('subprocess.Popen') as mock_popen:
# #mocking subprocess.communicate to avoid exception in main()
# mock_popen.return_value.communicate.return_value = [0,1]
# a = m.imageConverter()
# with patch('sys.stdout'): #silent please
# a.main()
# self.assertEqual(mock_popen.call_count, 3)
# #extract the calls to the mock, in sensible format
# call_args = []
# for i in range(3):
# call_args.append(mock_popen.call_args_list[i][0][0])
# call_args.sort()
# for i, value in enumerate(self.calls):
# self.assertEqual(value, call_args[i])
#
# def test_EMAN_fails(self):
# testargs = 'foo.py -i {}'.format(self.tempdir)
# with patch('sys.argv', testargs.split()):
# with patch('subprocess.Popen') as mock_popen:
# #mocking subprocess.communicate to avoid exception in main()
# mock_popen.return_value.communicate.return_value = [0,'Traceback']
# a = m.imageConverter()
# with self.assertRaises(SystemExit):
# a.main()
#
# def test_jpg_exists_no_force(self):
# os.mkdir(os.path.join(self.tempdir, 'jpgs'))
# os.remove(os.path.join(self.tempdir, '2.mrc'))
# os.remove(os.path.join(self.tempdir, '3.mrc'))
# with open(os.path.join(self.tempdir, 'jpgs', '1.jpg'),'w'):
# pass
# testargs = 'foo.py -i {}'.format(self.tempdir)
# with patch('sys.argv', testargs.split()):
# with patch('sys.stdout'):
# with patch('builtins.print') as mock_print:
# a = m.imageConverter()
# a.main()
# #the IOError exception is caught and handled. Here I am testing that
# #the error message is actually printed as a proxy for the handling
# msg = '{}/1.jpg exists. Skipped. Use -f to force overwrite'.format(
# self.tempdir)
# mock_print.assert_called_with(msg)
#
# def test_jpg_exists_and_force(self):
# os.mkdir(os.path.join(self.tempdir, 'jpgs'))
# with open(os.path.join(self.tempdir, 'jpgs', '1.jpg'),'w'):
# pass
# testargs = 'foo.py -i {} -f'.format(self.tempdir)
# with patch('sys.argv', testargs.split()):
# with patch('os.remove') as mock_remove:
# a = m.imageConverter()
# with patch('sys.stdout'):
# a.main()
# mock_remove.assert_called_with(self.outfiles[0])
class test_args_check(unittest.TestCase):
def setUp(self):
self.tempdir = tempfile.mkdtemp()
files = ['1.mrc', '2.mrc', '3.mrc']
self.files = ['{}/{}'.format(self.tempdir,i)
for i in files]
for f in self.files:
open(f,'w').close()
def tearDown(self):
shutil.rmtree(self.tempdir, ignore_errors=True)
def test_i_single_file(self):
testargs = 'foo.py -i {} -o @.mrc'.format(self.files[0])
with patch('sys.argv', testargs):
print(sys.argv)
a = m.imageConverter()
self.assertEqual(self.files[0], a.i)
def test_i_pattern(self):
self.tempdir = tempfile.mkdtemp()
testargs = 'foo.py -i *.mrc -o @.mrc'
with patch('sys.argv', testargs.split()):
a = m.imageConverter()
self.assertEqual(a.i, '*.mrc')
self.assertEqual(self.files, a.files)
def test_o_single_file(self):
testargs = 'foo.py -i {} -o testout.mrc'.format(self.files[0])
with patch('sys.argv', testargs.split()):
a = m.imageConverter()
exp = os.path.join(self.tempdir, 'testout.mrc')
self.assertEqual(exp, a.o)
def test_o_pattern(self):
testargs = 'foo.py -i {} -o @.mrc'.format(self.files[0])
with patch('sys.argv', testargs.split()):
a = m.imageConverter()
exp = os.path.join('@.mrc')
self.assertEqual(exp, a.o)
def test_o_given_and_same_as_i(self):
testargs = 'foo.py -i {0} -o {0} -'.format(self.files[0])
with patch('sys.argv', testargs.split()):
with self.assertRaises(SystemExit):
m.imageConverter()
def test_o_exists_and_no_force(self):
testargs = 'foo.py -i {0} -o @_hig -f'.format(self.tempdir)
with patch('sys.argv', testargs.split()):
with patch ('os.makedirs') as mock:
m.imageConverter()
self.assertTrue(mock.called)
def test_o_given_nonexisting_no_force(self):
testargs = 'foo.py -o /nonexisting/path'
with patch('sys.argv', testargs.split()):
with patch('os.makedirs'):
with self.assertRaises(SystemExit):
m.imageConverter()
def test_process_default(self):
with patch('sys.argv', ['foo.py']):
a = m.imageConverter()
self.assertEqual(a.lowpass, '')
def test_process_given(self):
testargs = 'foo.py --lowpass 40'
with patch('sys.argv', testargs.split()):
a = m.imageConverter()
i = 1/40
exp = '--process filter.lowpass.gauss:cutoff_freq={}'.format(i)
self.assertAlmostEqual(a.lowpass, exp, 3, 0.001)
def test_process_given_with_A(self):
testargs = 'foo.py --lowpass 40A'
with patch('sys.argv', testargs.split()):
a = m.imageConverter()
i = 1/40
exp = '--process filter.lowpass.gauss:cutoff_freq={}'.format(i)
self.assertAlmostEqual(a.lowpass, exp, 7, 0.001)
def test_process_given_malformed(self):
testargs = 'foo.py --lowpass forty'
with patch('sys.argv', testargs.split()):
with self.assertRaises(SystemExit):
m.imageConverter()
def test_scale_default(self):
with patch('sys.argv', ['foo.py']):
a = m.imageConverter()
self.assertEqual(a.scale, '')
def test_scale_given(self):
testargs = 'foo.py --scale 4'
with patch('sys.argv', testargs.split()):
a = m.imageConverter()
self.assertEqual(a.scale, '--meanshrink 4')
def test_ncpus_default(self):
testargs = 'foo.py'
cpus = multiprocessing.cpu_count()
|
point97/hapifis
|
server/apps/survey/migrations/0036_auto__add_questionreport__del_field_question_filterBy__del_field_quest.py
|
Python
|
gpl-3.0
| 7,859
| 0.007126
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'QuestionReport'
db.create_table(u'survey_questionreport', (
(u'question_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['survey.Question'], unique=True, primary_key=True)),
('filterBy', self.gf('django.db.models.fields.BooleanField')(default=False)),
('visualize', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal(u'survey', ['QuestionReport'])
# Deleting field 'Question.filterBy'
db.delete_column(u'survey_question', 'filterBy')
# Deleting field 'Question.visualize'
db.delete_column(u'survey_question', 'visualize')
def backwards(self, orm):
# Deleting model 'QuestionReport'
db.delete_table(u'survey_questionreport')
# Adding field 'Question.filterBy'
db.add_column(u'survey_question', 'filterBy',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
# Adding field 'Question.visualize'
db.add_column(u'survey_question', 'visualize',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
models = {
u'survey.option': {
'Meta': {'object_name': 'Option'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.SlugField', [], {'max_length': '64'}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '254'})
},
u'survey.page': {
'Meta': {'ordering': "['survey', 'question__order']", 'object_name': 'Page'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Question']"}),
'survey': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Survey']"})
},
u'survey.question': {
'Meta': {'ordering': "['order']", 'object_name': 'Question'},
'allow_other': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hoist_answers': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'hoisted'", 'null': 'True', 'to': u"orm['survey.Question']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'info': ('django.db.models.fields.CharField', [], {'max_length': '254', 'null': 'True', 'blank': 'True'}),
'integer_max': ('django.db.models.fields.IntegerField', [], {'default': '365', 'null': 'True', 'blank': 'True'}),
'integer_min': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '254'}),
'lat': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'lng': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'min_zoom': ('django.db.models.fields.IntegerField', [], {'default': '10', 'null': 'True', 'blank': 'True'}),
'modalQuestion': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'modal_question'", 'null': 'True', 'to': u"orm['survey.Question']"}),
'options': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['survey.Option']", 'null': 'True', 'blank': 'True'}),
'options_from_previous_answer': ('django.db.models.fields.CharField', [], {'max_length': '254', 'null': 'True', 'blank': 'True'}),
'options_json': ('django.db.models.fields.CharField', [], {'max_length': '254', 'null': 'True', 'blank': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'randomize_groups': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'required': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '64'}),
'term_condition': ('django.db.models.fields.CharField', [], {'max_length': '254', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.TextField', [], {}),
'type': ('django.db.models.fields.CharField', [], {'default': "'text'", 'max_length': '20'}),
'zoom': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
u'survey.questionreport': {
'Meta': {'ordering': "['order']", 'object_name': 'QuestionReport', '_ormbases': [u'survey.Question']},
'filterBy': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'question_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['survey.Question']", 'unique': 'True', 'primary_key': 'True'}),
'visualize': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'survey.respondant': {
'Meta': {'object_name': 'Respondant'},
'complete': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'email': ('django.db.models.fields.EmailField', [], {'default': 'None', 'max_length': '254', 'null': 'True', 'blank': 'True'}),
'responses': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'responses'", 'symmetrical': 'False', 'to': u"orm['survey.Response']"}),
'survey': ('django.db.models.field
|
s.rela
|
ted.ForeignKey', [], {'to': u"orm['survey.Survey']"}),
'ts': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 5, 12, 0, 0)'}),
'uuid': ('django.db.models.fields.CharField', [], {'default': "'a9c61729-3f02-4bcc-84d1-c76a9c59a771'", 'max_length': '36', 'primary_key': 'True'})
},
u'survey.response': {
'Meta': {'object_name': 'Response'},
'answer': ('django.db.models.fields.TextField', [], {}),
'answer_raw': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Question']"}),
'respondant': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Respondant']"}),
'ts': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 5, 12, 0, 0)'})
},
u'survey.survey': {
'Meta': {'object_name': 'Survey'},
'anon': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '254'}),
'questions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['survey.Question']", 'null': 'True', 'through': u"orm['survey.Page']", 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '254'}),
'states': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['survey']
|
masfaraud/volmdlr
|
tests/core.py
|
Python
|
gpl-3.0
| 1,065
| 0.002817
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Testing core module functions
"""
import math
import numpy as npy
import volmdlr as vm
v2D_1 = vm.Vector2D(3*npy.random.random(2)-1.5)
|
v2D_2 = vm.Vector2D(3*npy.random.random(2)-1.5)
p2D_1 = vm.Point2D(3*npy.random.random(2)-1.5)
p2D_2 = vm.Point2D(3*npy.random.random(2)-1.5)
v3D_1 = vm.Vector3D(3*npy.random.random(3)-1.5)
v3D_2 = vm.Vector3D(3*npy.random.random(3)-1.5)
p3D_1 = vm.Point3D(3*npy.random.random(3)-1.5)
p3D_2 = vm.Point3D(3*npy.random.random(3)-1.5)
# Testing if normalized vector has norm ==1 and is still colinear to original vector
v2D_1_normalized = v2D_1.copy()
v2D_1_normalized.Normalize()
assert math.isclose(
|
v2D_1_normalized.Norm(), 1, abs_tol=1e-9)
assert math.isclose(v2D_1_normalized.Dot(v2D_1),v2D_1.Norm(), abs_tol=1e-9)
# Testing normal vector
normal_v2D_1 = v2D_1.NormalVector()
assert math.isclose(normal_v2D_1.Dot(v2D_1), 0, abs_tol=1e-9)
normal_unit_v2D_1 = v2D_1.NormalVector(unit=True)
assert math.isclose(normal_unit_v2D_1.Norm(), 1., abs_tol=1e-9)
# Testing Cross
|
GaretJax/csat
|
csat/acquisition/migrations/0013_auto__add_field_acquisitionsessionconfig_dark_thumbnail.py
|
Python
|
mit
| 3,962
| 0.008329
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'AcquisitionSessionConfig.dark_thumbnail'
db.add_column(u'acquisition_acquisitionsessionconfig', 'dark_thumbnail',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
def backwards(self, orm):
# Deleting field 'AcquisitionSessionConfig.dark_thumbnail'
db.delete_column(u'acquisition_acquisitionsessionconfig', 'dark_thumbnail')
models = {
u'acquisition.acquisitionsessionconfig': {
'Meta': {'ordering': "['created']", 'ob
|
ject_name': 'AcquisitionSessionConfig'},
'completed': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'dark_thumbnail': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'description': ('django.db.models.fi
|
elds.TextField', [], {'blank': 'True'}),
'graph': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'started': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'temporary': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'thumbnail': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'})
},
u'acquisition.datacollectorconfig': {
'Meta': {'object_name': 'DataCollectorConfig'},
'completed': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'configurator': ('django.db.models.fields.CharField', [], {'max_length': '44'}),
'graph': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'output': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'polymorphic_acquisition.datacollectorconfig_set'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'result_id': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'running_instance_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'session_config': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'collectors'", 'to': u"orm['acquisition.AcquisitionSessionConfig']"}),
'started': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['acquisition']
|
GabrielNicolasAvellaneda/dd-agent
|
tests/checks/mock/test_cassandra.py
|
Python
|
bsd-3-clause
| 5,234
| 0.004776
|
# stdlib
import logging
import unittest
# project
from dogstream.cassandra import parse_cassandra
logger = logging.getLogger(__name__)
class TestCassandraDogstream(unittest.TestCase):
def testStart(self):
events = parse_cassandra(logger, " INFO [main] 2012-12-11 21:46:26,995 StorageService.java (line 687) Bootstrap/Replace/Move completed! Now serving reads.")
self.assertTrue(events is None)
def testInfo(self):
events = parse_cassandra(logger, " INFO [CompactionExecutor:35] 2012-12-02 21:15:03,738 AutoSavingCache
|
.java (line 268) Saved KeyCache (5 items) in 3 ms")
self.assertTrue(events is None)
def testWarn(self):
events = parse_cassandra(logger, " WARN [MemoryMeter:1] 2012-12-03 20:07:47,158 Memtable.java (line 197) setting live ratio to minimum of 1.0 instead of 0.9416553595658074")
self.assertTrue(events is None)
def testError(self):
for line in """\
ERROR [CompactionExecutor:518] 2012-12-11 21:35:29,686 AbstractCassandraDaemon.jav
|
a (line 135) Exception in thread Thread[CompactionExecutor:518,1,RMI Runtime]
java.util.concurrent.RejectedExecutionException
at java.util.concurrent.ThreadPoolExecutor$AbortPolicy.rejectedExecution(ThreadPoolExecutor.java:1768)
at java.util.concurrent.ThreadPoolExecutor.reject(ThreadPoolExecutor.java:767)
at java.util.concurrent.ScheduledThreadPoolExecutor.delayedExecute(ScheduledThreadPoolExecutor.java:215)
at java.util.concurrent.ScheduledThreadPoolExecutor.schedule(ScheduledThreadPoolExecutor.java:397)
at java.util.concurrent.ScheduledThreadPoolExecutor.submit(ScheduledThreadPoolExecutor.java:470)
at org.apache.cassandra.io.sstable.SSTableDeletingTask.schedule(SSTableDeletingTask.java:67)
at org.apache.cassandra.io.sstable.SSTableReader.releaseReference(SSTableReader.java:806)
at org.apache.cassandra.db.DataTracker.removeOldSSTablesSize(DataTracker.java:358)
at org.apache.cassandra.db.DataTracker.postReplace(DataTracker.java:330)
at org.apache.cassandra.db.DataTracker.replace(DataTracker.java:324)
at org.apache.cassandra.db.DataTracker.replaceCompactedSSTables(DataTracker.java:253)
at org.apache.cassandra.db.ColumnFamilyStore.replaceCompactedSSTables(ColumnFamilyStore.java:992)
at org.apache.cassandra.db.compaction.CompactionTask.execute(CompactionTask.java:200)
at org.apache.cassandra.db.compaction.CompactionManager$1.runMayThrow(CompactionManager.java:154)
at org.apache.cassandra.utils.WrappedRunnable.run(WrappedRunnable.java:30)
at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:441)
at java.util.concurrent.FutureTask$Sync.innerRun(FutureTask.java:303)
at java.util.concurrent.FutureTask.run(FutureTask.java:138)
at java.util.concurrent.ThreadPoolExecutor$Worker.runTask(ThreadPoolExecutor.java:886)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:908)
at java.lang.Thread.run(Thread.java:662)""".splitlines():
events = parse_cassandra(logger, line)
self.assertTrue(events is None)
def testCompactionStart(self):
events = parse_cassandra(logger, " INFO [CompactionExecutor:2] 2012-12-11 21:46:27,012 CompactionTask.java (line 109) Compacting [SSTableReader(path='/var/lib/cassandra/data/system/LocationInfo/system-LocationInfo-he-11-Data.db'), SSTableReader(path='/var/lib/cassandra/data/system/LocationInfo/system-LocationInfo-he-9-Data.db'), SSTableReader(path='/var/lib/cassandra/data/system/LocationInfo/system-LocationInfo-he-12-Data.db'), SSTableReader(path='/var/lib/cassandra/data/system/LocationInfo/system-LocationInfo-he-10-Data.db')]")
self.assertEquals(events, [{'alert_type': 'info', 'event_type': 'cassandra.compaction', 'timestamp': 1355262387, 'msg_title': "Compacting [SSTableReader(path='/var/lib/cassandra/data/system/LocationInfo/system-LocationInfo-he-1", 'msg_text': "Compacting [SSTableReader(path='/var/lib/cassandra/data/system/LocationInfo/system-LocationInfo-he-11-Data.db'), SSTableReader(path='/var/lib/cassandra/data/system/LocationInfo/system-LocationInfo-he-9-Data.db'), SSTableReader(path='/var/lib/cassandra/data/system/LocationInfo/system-LocationInfo-he-12-Data.db'), SSTableReader(path='/var/lib/cassandra/data/system/LocationInfo/system-LocationInfo-he-10-Data.db')]", 'auto_priority': 0}])
def testCompactionEnd(self):
events = parse_cassandra(logger, "INFO [CompactionExecutor:2] 2012-12-11 21:46:27,095 CompactionTask.java (line 221) Compacted to [/var/lib/cassandra/data/system/LocationInfo/system-LocationInfo-he-13-Data.db,]. 880 to 583 (~66% of original) bytes for 4 keys at 0.007831MB/s. Time: 71ms.")
self.assertEquals(events, [{'alert_type': 'info', 'event_type': 'cassandra.compaction', 'timestamp': 1355262387, 'msg_title': 'Compacted to [/var/lib/cassandra/data/system/LocationInfo/system-LocationInfo-he-13-Data.db,]. 880 ', 'msg_text': 'Compacted to [/var/lib/cassandra/data/system/LocationInfo/system-LocationInfo-he-13-Data.db,]. 880 to 583 (~66% of original) bytes for 4 keys at 0.007831MB/s. Time: 71ms.', 'auto_priority': 0}])
|
okfde/ris-web
|
scripts/remove_thumbs.py
|
Python
|
bsd-3-clause
| 2,543
| 0.005899
|
# encoding: utf-8
"""
Copyright (c) 2012 - 2015, Marian Steinbach, Ernesto Ruge
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import sys
sys.path.append('./')
from pymongo import MongoClient
import config
import os
import inspect
import argparse
cmd_subfolder = os.path.realpath(os.path.abspath(os.path.jo
|
in(os.path.split(inspect.getfile( inspect.currentframe() ))[0],"../city")))
if cmd_subfolder not in sys.path:
sys.path.insert(0, cmd_subfolder)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
des
|
cription='Generate Fulltext for given City Conf File')
parser.add_argument(dest='city', help=("e.g. bochum"))
options = parser.parse_args()
city = options.city
cityconfig = __import__(city)
connection = MongoClient(config.DB_HOST, config.DB_PORT)
db = connection[config.DB_NAME]
query = {'thumbnails': {'$exists': True}, "rs" : cityconfig.RS}
modification = {
'$unset': {
'thumbnails': 1,
'thumbnails_created': 1
}
}
for doc in db.attachments.find(query):
db.attachments.update({'_id': doc['_id']}, modification)
|
tux-00/ansible
|
lib/ansible/module_utils/facts/hardware/freebsd.py
|
Python
|
gpl-3.0
| 6,910
| 0.001881
|
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import re
from ansible.module_utils.facts.hardware.base import Hardware, HardwareCollector
from ansible.module_utils.facts.timeout import TimeoutError, timeout
from ansible.module_utils.facts.utils import get_file_content, get_mount_size
class FreeBSDHardware(Hardware):
"""
FreeBSD-specific subclass of Hardware. Defines memory and CPU facts:
- memfree_mb
- memtotal_mb
- swapfree_mb
- swaptotal_mb
- processor (a list)
- processor_cores
- processor_count
- devices
"""
platform = 'FreeBSD'
DMESG_BOOT = '/var/run/dmesg.boot'
def populate(self, collected_facts=None):
hardware_facts = {}
cpu_facts = self.get_cpu_facts()
memory_facts = self.get_memory_facts()
dmi_facts = self.get_dmi_facts()
device_facts = self.get_device_facts()
mount_facts = {}
try:
mount_facts = self.get_mount_facts()
except TimeoutError:
pass
hardware_facts.update(cpu_facts)
hardware_facts.update(memory_facts)
hardware_facts.update(dmi_facts)
hardware_facts.update(device_facts)
hardware_facts.update(mount_facts)
return hardware_facts
def get_cpu_facts(self):
cpu_facts = {}
cpu_facts['processor'] = []
rc, out, err = self.module.run_command("/sbin/sysctl -n hw.ncpu")
cpu_facts['processor_count'] = out.strip()
dmesg_boot = get_file_content(FreeBSDHardware.DMESG_BOOT)
if not dmesg_boot:
rc, dmesg_boot, err = self.module.run_command("/sbin/dmesg")
for line in dmesg_boot.splitlines():
if 'CPU:' in line:
cpu = re.sub(r'CPU:\s+', r"", line)
cpu_facts['processor'].append(cpu.strip())
if 'Logical CPUs per core' in line:
cpu_facts['processor_cores'] = line.split()[4]
return cpu_facts
def get_memory_facts(self):
memory_facts = {}
rc, out, err = self.module.run_command("/sbin/sysctl vm.stats")
for line in out.splitlines():
data = line.split()
if 'vm.stats.vm.v_page_size' in line:
pagesize = int(data[1])
if 'vm.stats.vm.v_page_count' in line:
pagecount = int(data[1])
if 'vm.stats.vm.v_free_count' in line:
freecount = int(data[1])
memory_facts['memtotal_mb'] = pagesize * pagecount // 1024 // 1024
memory_facts['memfree_mb'] = pagesize * freecount // 1024 // 1024
# Get swapinfo. swapinfo output looks like:
# Device 1M-blocks Used Avail Capacity
# /dev/ada0p3 314368 0 314368 0%
#
rc, out, err = self.module.run_command("/usr/sbin/swapinfo -k")
lines = out.splitlines()
if len(lines[-1]) == 0:
lines.pop()
data = lines[-1].split()
if data[0] != 'Device':
memory_facts['swaptotal_mb'] = int(data[1]) // 1024
memory_facts['swapfree_mb'] = int(data[3]) // 1024
return memory_facts
@timeout()
def get_mount_facts(self):
mount_facts = {}
mount_facts['mounts'] = []
fstab = get_file_content('/etc/fstab')
if fstab:
for line in fstab.splitlines():
if line.startswith('#') or line.strip() == '':
continue
fields = re.sub(r'\s+', ' ', line).split()
size_total, size_available = get_mount_size(fields[1])
mount_facts['mounts'].append({
'mount': fields[1],
'device': fields[0],
'fstype': fields[2],
'options': fields[3],
'size_total': size_total,
'size_available': size_available
})
return mount_facts
def get_device_facts(self):
device_facts = {}
sysdir = '/dev'
device_facts['devices'] = {}
drives = re.compile('(ada?\d+|da\d+|a?cd\d+)') # TODO: rc, disks, err = self.module.run_command("/sbin/sysctl kern.disks")
slices = re.compile('(ada?\d+s\d+\w*|da\d+s\d+\w*)')
if os.path.isdir(sysdir):
dirlist = sorted(os.listdir(sysdir))
for device in dirlist:
d = drives.match(device)
if d:
device_facts['devices'][d.group(1)] = []
s = slices.match(device)
if s:
device_facts['devices'][d.group(1)].append(s.group(1))
return device_facts
def get_dmi_facts(self):
''' learn dmi facts from system
Use dmidecode executable if available'''
dmi_facts = {}
# Fall back to using dmidecode, if available
dmi_bin = self.module.get_bin_path('dmidecode')
DMI_DICT = dict(
bios_date='bios-release-date',
bios_version='bios-version',
form_factor='chassis-type',
product_name='system-product-name',
product_serial='system-serial-number',
product_uuid='system-uuid',
product_version='system-version',
system_vendor='system-man
|
ufacturer'
)
for (k, v) in DMI_DICT.items():
|
if dmi_bin is not None:
(rc, out, err) = self.module.run_command('%s -s %s' % (dmi_bin, v))
if rc == 0:
# Strip out commented lines (specific dmidecode output)
# FIXME: why add the fact and then test if it is json?
dmi_facts[k] = ''.join([line for line in out.splitlines() if not line.startswith('#')])
try:
json.dumps(dmi_facts[k])
except UnicodeDecodeError:
dmi_facts[k] = 'NA'
else:
dmi_facts[k] = 'NA'
else:
dmi_facts[k] = 'NA'
return dmi_facts
class FreeBSDHardwareCollector(HardwareCollector):
_fact_class = FreeBSDHardware
_platform = 'FreeBSD'
|
convexengineering/gplibrary
|
gpkitmodels/GP/aircraft/wing/gustloading.py
|
Python
|
mit
| 2,004
| 0.001497
|
" spar loading for gust case "
import os
from numpy import pi, hstack, array
from ad.admath import cos
import pandas as pd
from gpkit import parse_variables
from gpfit.fit_constraintset import FitCS
from .sparloading import SparLoading
#pylint: disable=invalid-name, no-member, arguments-differ, exec-used
#pylint: disable=attribute-defined-outside-init, undefined-variable
class GustL(SparLoading):
""" Gust Loading Model
Variables
---------
vg
|
ust 10 [m/s] gust velocity
Ww [lbf] wing weight
v [m/s] vehicle speed
cl [-] wing lift coefficient
Variables of length wing.N
--------------------------
agust [-]
|
gust angle of attack
cosminus1 self.return_cosm1 [-] 1 minus cosine factor
LaTex Strings
-------------
vgust V_{\\mathrm{gust}}
Ww W_{\\mathrm{w}}
cl c_l
agust \\alpha_{\\mathrm{gust}}
cosminus1 (cos(x)-1)
"""
new_qbarFun = None
new_SbarFun = None
def return_cosm1(self, c):
eta = c(self.wing.planform.eta).to("dimensionless").magnitude
return hstack([1e-10, 1-array(cos(eta[1:]*pi/2))])
@parse_variables(__doc__, globals())
def setup(self, wing, state, out=False):
self.load = SparLoading.setup(self, wing, state, out=out)
cbar = self.wing.planform.cbar
W = self.W # from SparLoading
q = self.q
N = self.N
b = self.b
path = os.path.dirname(os.path.abspath(__file__))
df = pd.read_csv(path + os.sep + "arctan_fit.csv").to_dict(
orient="records")[0]
constraints = [
# fit for arctan from 0 to 1, RMS = 0.044
FitCS(df, agust, [cosminus1*vgust/v]),
q >= W*N/b*cbar*(1 + 2*pi*agust/cl*(1+Ww/W)),
]
return self.load, constraints
|
hakkeroid/lcconcept
|
src/layeredconfig/sources/jsonfile.py
|
Python
|
bsd-3-clause
| 457
| 0
|
# -*- coding: utf-8 -*-
import json
from layeredconfig import so
|
urce
class JsonFile(source.Source):
"""Source for json files"""
def __init__(self, source, **kwargs):
super(JsonFile, self).__init__(**kwargs)
self._source = source
def _read(self):
with open(self._source) as fh:
return json.load(fh)
def _write(self,
|
data):
with open(self._source, 'w') as fh:
json.dump(data, fh)
|
okwow123/djangol2
|
example/env/bin/gifmaker.py
|
Python
|
mit
| 689
| 0
|
#!/home/ilovejsp/project/ad3/django-allauth/example/env/bin/python
#
# The Python Imaging Library
# $Id$
#
# convert sequence format to GIF animation
#
# history:
# 97
|
-01-03 fl created
#
# Copyright (c) Secret Labs AB 1997. All rights reserved.
# Copyright (c) Fredrik Lundh 1997.
#
# See the README file for information on usage and redistribution.
#
from __future__ import print_function
from PIL import Image
if __name__ == "__main__":
import sys
if len(sys.argv) < 3:
print("GIFMAKER -- create
|
GIF animations")
print("Usage: gifmaker infile outfile")
sys.exit(1)
im = Image.open(sys.argv[1])
im.save(sys.argv[2], save_all=True)
|
kadhikari/navitia
|
source/jormungandr/jormungandr/georef.py
|
Python
|
agpl-3.0
| 5,482
| 0.001642
|
# Copyright (c) 2001-2014, Canal TP and/or its affiliates. All rights reserved.
#
# This file is part of Navitia,
# the software to build cool stuff with public transport.
#
# Hope you'll enjoy and contribute to this project,
# powered by Canal TP (www.canaltp.fr).
# Help us simplify mobility and open public transport:
# a non ending quest to the responsive locomotion way of traveling!
#
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with t
|
his program. If not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# IRC #navitia on freenode
# https://groups.google.com/d/forum/navitia
# www.navitia.io
from __future__ import absolute_import, print_function, unicode_literals, division
from navitiacommon import r
|
equest_pb2, response_pb2, type_pb2
import logging
from jormungandr import utils
class Kraken(object):
def __init__(self, instance):
self.instance = instance
def place(self, place):
req = request_pb2.Request()
req.requested_api = type_pb2.place_uri
req.place_uri.uri = place
req.place_uri.depth = 2
response = self.instance.send_and_receive(req)
if response.places:
return response.places[0]
if utils.is_coord(place):
# In some cases, the given "from" is not always a findable address by kraken
# we just forward the given coord and return a pt object
lon, lat = utils.get_lon_lat(place)
p = type_pb2.PtObject()
p.uri = place
p.embedded_type = type_pb2.ADDRESS
p.address.coord.lon = lon
p.address.coord.lat = lat
p.address.uri = place
return p
return None
def get_car_co2_emission_on_crow_fly(self, origin, destination):
logger = logging.getLogger(__name__)
req = request_pb2.Request()
req.requested_api = type_pb2.car_co2_emission
req.car_co2_emission.origin.place = origin
req.car_co2_emission.origin.access_duration = 0
req.car_co2_emission.destination.place = destination
req.car_co2_emission.destination.access_duration = 0
response = self.instance.send_and_receive(req)
if response.error and response.error.id == \
response_pb2.Error.error_id.Value('no_solution'):
logger.error("Cannot compute car co2 emission from {} to {}"
.format(origin, destination))
return None
return response.car_co2_emission
def get_crow_fly(self, origin, streetnetwork_mode, max_duration, max_nb_crowfly, **kwargs):
# Getting stop_points or stop_areas using crow fly
# the distance of crow fly is defined by the mode speed and max_duration
req = request_pb2.Request()
req.requested_api = type_pb2.places_nearby
req.places_nearby.uri = origin
req.places_nearby.distance = kwargs.get(streetnetwork_mode,
kwargs.get("walking")) * max_duration
req.places_nearby.depth = 0
req.places_nearby.count = max_nb_crowfly
req.places_nearby.start_page = 0
req.disable_feedpublisher = True
# we are only interested in public transports
req.places_nearby.types.append(type_pb2.STOP_POINT)
res = self.instance.send_and_receive(req)
if len(res.feed_publishers) != 0:
logger = logging.getLogger(__name__)
logger.error("feed publisher not empty: expect performance regression!")
return res.places_nearby
def get_stop_points_for_stop_area(self, uri):
req = request_pb2.Request()
req.requested_api = type_pb2.PTREFERENTIAL
req.ptref.requested_type = type_pb2.STOP_POINT
req.ptref.count = 100
req.ptref.start_page = 0
req.ptref.depth = 0
req.ptref.filter = 'stop_area.uri = {uri}'.format(uri=uri)
result = self.instance.send_and_receive(req)
if not result.stop_points:
logging.getLogger(__name__).info('PtRef, Unable to find stop_point with filter {}'.
format(req.ptref.filter))
return result.stop_points
def get_stop_points_from_uri(self, uri):
req = request_pb2.Request()
req.requested_api = type_pb2.PTREFERENTIAL
req.ptref.requested_type = type_pb2.STOP_POINT
req.ptref.count = 100
req.ptref.start_page = 0
req.ptref.depth = 0
req.ptref.filter = 'stop_point.uri = {uri}'.format(uri=uri)
result = self.instance.send_and_receive(req)
return result.stop_points
def get_odt_stop_points(self, coord):
req = request_pb2.Request()
req.requested_api = type_pb2.odt_stop_points
req.coord.lon = coord.lon
req.coord.lat = coord.lat
return self.instance.send_and_receive(req).stop_points
|
mdpiper/topoflow
|
topoflow/utils/midpoints.py
|
Python
|
mit
| 10,558
| 0.014302
|
## Copyright (c) 2001-2009, Scott D. Peckham
## November 2009 (converted from IDL)
#-----------------------------------------------------------------------
# Notes: Use the random midpoint displacement method to create
# a fractal surface/landscape (due to Mandelbrot).
# This can be used as initial surface for landscape
# evolution models and is used by Alan Howard's
# MARSSIM model.
#-----------------------------------------------------------------------
#
# unit_test()
# make_fractal_surface()
#
#-----------------------------------------------------------------------
from numpy import *
import numpy
import rtg_files
import rti_files
#-----------------------------------------------------------------------
def unit_test():
z = make_fractal_surface(n_levels=8, H=1.5)
print 'min(z), max(z) =', z.min(), z.max()
print 'Finished with unit_test().'
print ' '
# unit_test()
#-----------------------------------------------------------------------
def make_fractal_surface(n_levels, H=1.5, RTG_file=None,
sigma=float64(1),
scale=float64(1),
seed=168993,
X_WRAP=False, Y_WRAP=False,
SILENT=False):
#---------------------------------------------------------
# Notes: Can apply SCALE at very end. A value of about
# 0.01 should give results similar to Howard's
# MATRIX_2D with ERROR=0.02.
# H is a fractal exponent of some kind.
# Set the X_WRAP or Y_WRAP keywords in order to
# impose periodic boundaries on the left & right
# or top & bottom, respectively.
# If n_levels = 7, nx = 129
# If n_levels = 8, nx = 257
# If n_levels = 9, nx = 513
# If n_levels = 10, nx = 1025
# If n_levels = 11, nx = 2049
#----------------------------------------------------------
if (n_levels > 11):
print '********************************************'
print ' ERROR: Max number of levels is 11,'
print ' which gives a grid size of 2049 x 2049.'
print '********************************************'
print ' '
return
if not(SILENT):
print 'Creating a fractal surface...'
#------------------
# Initialize vars
#------------------
factor = float64(1) / sqrt(float64(2) ** H) #############
nx = (int32(2) ** n_levels) + 1
ny = nx
step = nx - 1
if not(SILENT):
print 'nx, ny =', nx, ',', ny
#----------------------------------------------
x_vec = numpy.arange(nx, dtype='Int16')
y_vec = numpy.arange(ny, dtype='Int16')
cols, rows = numpy.meshgrid( x_vec, y_vec )
## rows = reshape(repeat(y_vec, nx), (ny, nx))
## cols = rot90(rows) # (will work if nx=ny)
sum_grid = (cols + rows)
#----------------------------------------------
DONE = zeros([ny, nx], dtype='UInt8')
DONE[0,0]
|
= 1
DONE[0,nx - 1] = 1
DONE[ny - 1,0] = 1
DONE[ny - 1,nx - 1] = 1
#----------------------------------------------
EDGE = zeros([ny, nx], dtype='UInt8')
|
EDGE[:,0] = 1
EDGE[:,nx - 1] = 1
EDGE[0,:] = 1
EDGE[ny - 1,:] = 1
#------------------------------
# Initialize grid of z-values
#------------------------------
numpy.random.seed(seed)
v = random.normal(loc=0.0, scale=1.0, size=(2, 2))
z = zeros([ny, nx], dtype='Float64')
z[0,0] = v[0,0]
z[0,nx - 1] = v[0,1]
z[ny - 1,0] = v[1,0]
z[ny - 1,nx - 1] = v[1,1]
#------------------------------------
if (X_WRAP):
z[0,nx - 1] = z[0,0]
z[ny - 1,nx - 1] = z[ny - 1,0]
if (Y_WRAP):
z[ny - 1,0] = z[0,0]
z[ny - 1,nx - 1] = z[0,nx - 1]
#------------------------------------
zF = z.flat ## (an iterator to allow 1D indexing) ##########
for k in xrange( 1, (n_levels + 1) ):
if not(SILENT):
print 'Working on level', k
step = (step / 2)
#---------------------------------------
# Get midpoint locations of this level
#---------------------------------------
w = where(logical_and(logical_and(logical_and(((cols.flat % step) == 0), \
((rows.flat % step) == 0)),
logical_not(DONE.flat)), logical_not(EDGE.flat)))
n_mid = size(w[0])
#########################
# Need this !!
#########################
w = w[0]
#-----------------------------------------
# Break these into two groups, w1 and w2
#-----------------------------------------
a1 = where((sum_grid.flat[w] % (2 * step)) == 0) # (1D array)
n1 = size(a1[0])
a2 = where((sum_grid.flat[w] % (2 * step)) != 0) # (1D array)
n2 = size(a2[0])
if (n1 != 0):
w1 = w[a1[0]]
if (n2 != 0):
w2 = w[a2[0]]
#---------------------------------------------
# Compute midpoint elevations as the average
# of the diagonal neighbor elevations plus
# a rescaled Gaussian random variable
#---------------------------------------------
UL = w1 - step * (nx + 1)
UR = w1 - step * (nx - 1)
LL = w1 + step * (nx - 1)
LR = w1 + step * (nx + 1)
#---------------------------
### numpy.random.seed(seed)
ran = factor * sigma * random.normal(loc=0.0, scale=1.0, size=n1)
zF[w1] = ((zF[UL] + zF[UR] + zF[LL] + zF[LR]) / float64(4)) + ran
DONE.flat[w1] = 1
#----------------------------------------------
# Compute midpoint elevations of remaining
# pixels at this scale as the average of the
# nearest neighbor elevations plus a rescaled
# Gaussian random variable. n2=0 at start.
#----------------------------------------------
if (n2 != 0):
T = w2 - (step * nx)
B = w2 + (step * nx)
R = w2 + step
L = w2 - step
#----------------------------
### numpy.random.seed(seed)
ran = factor * sigma * random.normal(loc=0.0, scale=1.0, size=n2)
zF[w2] = ((zF[T] + zF[B] + zF[L] + zF[R]) / float64(4)) + ran
DONE.flat[w2] = 1
#--------------------------------------------
# Compute elevations of edge pixels at this
# scale as average of 3 nearest neighbors
# plus a rescaled Gaussian random variable.
#--------------------------------------------
jump = (step * nx)
#----------------------------
L = where(logical_and(logical_and((cols.flat == 0), \
((rows.flat % step) == 0)), \
logical_not(DONE.flat)))
nL = size(L[0])
T = L - jump
B = L + jump
R = L + step
### numpy.random.seed(seed)
ran = factor * sigma * random.normal(loc=0.0, scale=1.0, size=nL)
zF[L] = ((zF[T] + zF[B] + zF[R]) / float64(3)) + ran
DONE.flat[L] = 1
#-----------------------------------------------------------------------------
R = where(logical_and(logical_and((cols.flat == (nx - 1)), \
((rows.flat % step) == 0)), \
logical_not(DONE.flat)))
nR = size(R[0])
if not(X_WRAP):
L = R - step
T = R - jump
B = R + jump
### numpy.random.seed(seed)
ran = factor * sigma * random.normal(loc=0.0, scale=1.0, size=nR)
zF[R] = ((zF[L] + zF[T] + zF[B]) / float64(3)) + ran
else:
zF[R] = zF[L]
DONE.flat[R] = 1
#-----------------------------------------------------------------------------
T = where(l
|
vmware/nsxramlclient
|
tests/clusterprep.py
|
Python
|
mit
| 2,874
| 0.007658
|
# coding=utf-8
#
# Copyright © 2015 VMware, Inc. All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
# to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions
# of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
# TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
# CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
__author__ = 'yfauser'
from tests.config import *
from nsxramlclient.client import NsxClient
import time
def cluster_prep(session, cluster_moid):
cluster_prep_body = session.extract_resource_body_example('nwfabricConfig', 'create')
cluster_prep_body['nwFabricFeatureConfig']['resourceConfig']['resourceId'] = cluster_moid
return session.create('nwfabricConfig', request_body_dict=cluster_prep_body)
def cluster_unprep(session, cluster_moid):
cluster_prep_body = session.extract_resource_body_example('nwfabricConfig', 'delete')
cluster_prep_body['nwFabricFeatureConfig']['resourceConfig']['resourceId'] = cluster_moid
return session.delete('nwfabricConfig', request_body_dict=cluster_prep_body)
def wait_for_job_completion(session, job_id, completion_status):
status_poll_count = 0
while status_poll_count <
|
20:
response = session.read('taskFrameworkJobs', uri_parameters={'jobId': job_id})
|
session.view_response(response)
status = response['body']['jobInstances']['jobInstance']['status']
if status == completion_status:
return True
else:
time.sleep(30)
status_poll_count += 1
raise Exception('Timeout waiting for Job to complete')
def main():
s = NsxClient(nsxraml_file, nsxmanager, nsx_username, nsx_password, debug=False)
prep_response = cluster_prep(s, 'domain-c26')
s.view_response(prep_response)
wait_for_job_completion(s, prep_response['objectId'], completion_status='COMPLETED')
#unprep_response = cluster_unprep(s, 'domain-c26')
#s.view_response(unprep_response)
#wait_for_job_completion(s, unprep_response['objectId'], completion_status='COMPLETED')
if __name__ == "__main__":
main()
|
yast/yast-python-bindings
|
examples/RichText2.py
|
Python
|
gpl-2.0
| 673
| 0.011887
|
# encoding: utf-8
# Example for a RichText widget
from yast import import_module
import_module('UI')
from yast import *
class RichText2Client:
def main(self):
UI.OpenDialog(
|
Opt("defaultsize"),
VBox(
RichText(
Opt("plainText"),
"This is a RichText in plainText mode.\n" +
"No HTML \t\ttags\tare\tsupported\
|
there, tabs\tand\tline\tbreaks\n" +
"are output literally \n" +
"as are HTML tags like <b> or <i> or &product;."
),
PushButton(Opt("default"), "&OK")
)
)
UI.UserInput()
UI.CloseDialog()
RichText2Client().main()
|
abesto/fig
|
tests/unit/cli_test.py
|
Python
|
apache-2.0
| 4,148
| 0.000482
|
from __future__ import unicode_literals
from __future__ import absolute_import
import logging
import os
import tempfile
import shutil
from .. import unittest
import mock
from compose.cli import main
from compose.cli.main import TopLevelCommand
from compose.cli.errors import ComposeFileNotFound
from six import StringIO
class CLITestCase(unittest.TestCase):
def test_default_project_name(self):
cwd = os.getcwd()
try:
os.chdir('tests/fixtures/simple-composefile')
command = TopLevelCommand()
project_name = command.get_project_name(command.get_config_path())
self.assertEquals('simplecomposefile', project_name)
finally:
os.chdir(cwd)
def test_project_name_with_explicit_base_dir(self):
command = TopLevelCommand()
command.base_dir = 'tests/fixtures/simple-composefile'
project_name = command.get_project_name(command.get_config_path())
self.assertEquals('simplecomposefile', project_name)
def test_project_name_with_explicit_uppercase_base_dir(self):
command = TopLevelCommand()
command.base_dir = 'tests/fixtures/UpperCaseDir'
project_name = command.get_project_name(command.get_config_path())
self.assertEquals('uppercasedir', project_name)
def test_project_name_with_explicit_project_name(self):
command = TopLevelCommand()
name = 'explicit-project-name'
project_name = command.get_project_name(None, project_name=name)
self.assertEquals('explicitprojectname', project_name)
def test_project_name_from_environment_old_var(self):
command = TopLevelCommand()
name = 'namefromenv'
with mock.patch.dict(os.environ):
os.environ['FIG_PROJECT_NAME'] = name
project_name = command.get_project_name(None)
self.assertEquals(project_name, name)
def test_project_name_from_environment_new_var(self):
command = TopLevelCommand()
name = 'namefromenv'
with mock.patch.dict(os.environ):
os.environ['COMPOSE_PROJECT_NAME'] = name
project_name = command.get_project_name(None)
self.assertEquals(project_name, name)
def test_filename_check(self):
self.assertEqual('docker-compose.yml', get_config_filename_for_files([
'docker-compose.yml',
'docker-compose.yaml',
'fig.yml',
'fig.yaml',
]))
self.assertEqual('docker-compose.yaml', get_config_filename_for_files([
'docker-compose.yaml',
'fig.yml',
'fig.yaml',
]))
self.assertEqual('fig.yml', get_config_filename_for_files([
'fig.yml',
'fig.yaml',
]))
self.assertEqual('fig.yaml', get_config_filename_for_files([
'fig.yaml',
]))
self.assertRaises(ComposeFileNotFound, lambda: get_config_filename_for_files([]))
def test_get_project(self):
command = TopLevelCommand()
command.base_dir = 'tests/fixtures/longer-filename-composefile'
project = command.get_project(command.get_config_path())
self.assertEqual(project.name, 'longerfilenamecomposefile')
self.assertTrue(project.client)
self.assertTrue(project.services)
def test_help(self):
command = TopLevelCommand()
with self.assertRaises(SystemExit):
command.dispatch(['-h'], None)
def test_setup_logging(self):
main.setup_logging()
self.assertEqual(logging.getLogger().level, logging.DEBUG)
self.assertEqual(logging.getLogger('requests').propagate, False)
def get_config_filename_for_files(filenames):
|
project_dir = tempfile.mkdtemp()
try:
make_files(project_dir, filenames)
command = TopLevelCommand()
command.base_dir = project_dir
return os.path.basename(command.get_config_path())
finally:
shutil.rmtree(project_dir)
def make_files(dirname, filenames):
for fname in filenames:
with ope
|
n(os.path.join(dirname, fname), 'w') as f:
f.write('')
|
Makeystreet/makeystreet
|
woot/apps/catalog/migrations/0137_auto__add_upfile.py
|
Python
|
apache-2.0
| 70,587
| 0.007749
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'UpFile'
db.create_table(u'catalog_upfile', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('added_time', self.gf('django.db.models.fields.DateTimeField')()),
('is_enabled', self.gf('django.db.models.fields.BooleanField')(default=True)),
('score', self.gf('django.db.models.fields.IntegerField')(default=0)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('filename', self.gf('django.db.models.fields.CharField')(max_length=200)),
('filetype', self.gf('django.db.models.fields.CharField')(max_length=50)),
('url', self.gf('django.db.models.fields.URLField')(max_length=1000)),
('description', self.gf('django.db.models.fields.TextField')(max_length=1000, null=True, blank=True)),
('makey', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='files', null=True, to=orm['catalog.Makey'])),
))
db.send_create_signal('catalog', ['UpFile'])
def backwards(self, orm):
# Deleting model 'UpFile'
db.delete_table(u'catalog_upfile')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'catalog.article': {
'Meta': {'object_name': 'Article'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'comments': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['catalog.Comment']", 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'new_user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.NewUser']", 'null': 'True', 'blank': 'True'}),
'rating': ('django.db.models.fields.IntegerField', [], {}),
'recommendation': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['catalog.ArticleTag']", 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'catalog.articleemail': {
'Meta': {'object_name': 'ArticleEmail'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'email_subscriptions'", 'null': 'True', 'to': "orm['catalog.ArticleTag']"}),
'temp_id': ('django.db.models.fields.CharField', [], {'max_length':
|
'50'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'catalog.articletag': {
'Meta': {'obj
|
ect_name': 'ArticleTag'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'likes_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'url_snippet': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'})
},
'catalog.cfistoreitem': {
'Meta': {'object_name': 'CfiStoreItem'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'item': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.Product']", 'unique': 'True'}),
'likers': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'cfi_store_item_likes'", 'symmetrical': 'False', 'through': "orm['catalog.LikeCfiStoreItem']", 'to': u"orm['auth.User']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.comment': {
'Meta': {'object_name': 'Comment'},
|
liumengjun/django-static-precompiler
|
static_precompiler/compilers/babel.py
|
Python
|
mit
| 2,171
| 0.001382
|
import os
import warnings
from static_precompiler import exceptions, utils
from . import base
__all__ = (
"Babel",
)
class Babel(base.BaseCompiler):
name = "babel"
input_extension = "es6"
output_extension = "js"
def __init__(self, executable="babel", sourcemap_enabled=False, modules=None, plugins=None, presets=None):
self.executable = executable
self.is_sourcemap_enabled = sourcemap_enabled
if modules:
warnings.warn("'modules' option is removed in Babel 6.0. Use `plugins` instead.", DeprecationWarning)
self.modules = modules
self.plugins = plugins
self.presets = presets
super(Babel, self).__init__()
def get_extra_args(self):
args = []
if self.modules is not None:
args += ["--modules", self.modules]
if self.plugins is not None:
args += ["--plugins", self.plugins]
if self.presets is not None:
args += ["--presets", self.presets]
return args
def compile_file(self, source_path):
args = [
self.executable,
] + self.get_extra_args()
if self.is_sourcemap_enabled:
args.append("-s")
full_output_path = self.get_full_output_path(source_path)
full_output_dirname = os.path.dirname(full_output_path)
if not os.path.exists(full_output_dirname):
os.makedirs(full_output_dirname)
args.extend(["-o", full_output_path])
args.append(self.get_full_source_path(source_path))
|
return_code, out, errors = utils.run_command(args)
if return_code:
raise exceptions.StaticCompilationError(errors)
if self.is_sourcemap_enabled:
utils.fix_sourcemap(full_output_path + ".map", source_path, full_output_path)
return self.get_output_path(source_path)
def compile_source(self, source):
args = [
self.executable,
] + self.get_extra_args()
return_code, out, errors = utils.run_command(args, sou
|
rce)
if return_code:
raise exceptions.StaticCompilationError(errors)
return out
|
Guake/guake
|
guake/globals.py
|
Python
|
gpl-2.0
| 3,863
| 0.001812
|
# -*- coding: utf-8; -*-
"""
Copyright (C) 2007-2013 Guake authors
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation; either version 2 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public
License along with this program; if not, write to the
Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
Boston, MA 02110-1301 USA
"""
import inspect
import logging
import os
__all__ = [
"ALIGN_BOTTOM",
"ALIGN_CENTER",
"ALIGN_LEFT",
"ALIGN_RIGHT",
"ALIGN_TOP",
"ALWAYS_ON_PRIMARY",
"NAME",
]
log = logging.getLogger(__name__)
def bindtextdomain(app_name, locale_dir=None):
"""
Bind the domain represented by app_name to the locale directory locale_dir.
It has the effect of loading translations, enabling applications for different
languages.
app_name:
a domain to look for translations, typically the name of an application.
locale_dir:
a directory with locales like locale_dir/lang_isocode/LC_MESSAGES/app_name.mo
If omitted or None, then the current binding for app_name is used.
"""
# pylint: disable=import-outside-toplevel
import locale
# pylint: enable=import-outside-toplevel
log.info("Local binding for app '%s', local dir: %s", app_name, locale_dir)
locale.bindtextdomain(app_name, locale_dir)
locale.textdomain(app_name)
def is_run_from_git_workdir():
self_path = os.path.abspath(inspect.getfile(inspect.currentframe()))
return os.path.exists(f"{self_path}.in")
NAME = "guake"
ALIGN_CENTER, ALIGN_LEFT, ALIGN_RIGHT = range(3)
ALIGN_TOP, ALIGN_BOTTOM = range(2)
ALWAYS_ON_PRIMARY = -1
# TODO this is not as fancy as as it could be
# pylint: disable=anomalous-backslash-in-string
TERMINAL_MATCH_TAGS = ("schema", "http", "https", "email", "ftp")
# Beware this is a PRCE (Perl) regular expression, not a Python one!
# Edit: use regex101.com with PCRE syntax
TERMINAL_MATCH_EXPRS = [
r"(news:|telnet:|nntp:|file:\/|https?:|ftps?:|webcal:)\/\/([-[:alnum:]]+"
r"(:[-[:alnum:],?;.:\/!%$^\*&~\"#']+)?\@)?[-[:alnum:]]+(\.[-[:alnum:]]+)*"
r"(:[0-9]{1,5})?(\/[-[:alnum:]_$.+!*(),;:@&=?\/~#%]*[^]'.>) \t\r\n,\\\"])?",
r"(www|ftp)[-[:alnum:]]*\.[-[:alnum:]]+(\.[-[:alnum:]]+)*(:[0-9]{1,5})?"
r"(\/[-[:alnum:]_$.+!*(),;:@&=?\/~#%]*[^]'.>) \t\r\n,\\\"])?",
r"(mailto:)?[-[:alnum:]][-[:alnum:].]*@[-[:alnum:]]+\.[-[:alnum:]]+(\\.[-[:alnum:]]+)*",
]
# tuple (title/quick matcher/filename and line number extractor)
QUICK_OPEN_MATCHERS = [
(
"Python traceback",
r"^\s*File\s\".*\",\sline\s[0-9]+",
r"^\s*File\s\"(.*)\",\sline\s([0-9]+)",
),
(
"Python
|
pyt
|
est report",
r"^\s.*\:\:[a-zA-Z0-9\_]+\s",
r"^\s*(.*\:\:[a-zA-Z0-9\_]+)\s",
),
(
"line starts by 'ERROR in Filename:line' pattern (GCC/make). File path should exists.",
r"\s.\S[^\s\s].[a-zA-Z0-9\/\_\-\.\ ]+\.?[a-zA-Z0-9]+\:[0-9]+",
r"\s.\S[^\s\s].(.*)\:([0-9]+)",
),
(
"line starts by 'Filename:line' pattern (GCC/make). File path should exists.",
r"^\s*[a-zA-Z0-9\/\_\-\.\ ]+\.?[a-zA-Z0-9]+\:[0-9]+",
r"^\s*(.*)\:([0-9]+)",
),
]
# Transparency max level (should be always 100)
MAX_TRANSPARENCY = 100
# Tabs session schema version
TABS_SESSION_SCHEMA_VERSION = 2
# Constants for vte regex matching are documented in the pcre2 api:
# https://www.pcre.org/current/doc/html/pcre2api.html
PCRE2_MULTILINE = 0x00000400
|
aabed/mhn
|
server/mhn/common/templatetags.py
|
Python
|
lgpl-2.1
| 65
| 0
|
def format_date(dt):
ret
|
urn dt.strftime('%Y-%m-%d %H:%M:%S'
|
)
|
FND/gabbi
|
gabbi/simple_wsgi.py
|
Python
|
apache-2.0
| 3,301
| 0
|
#
# Copyright 2014, 20
|
15 Red Hat. All Rights Reserved.
#
# Author: Chris Dent <chdent@redhat.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable l
|
aw or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
SimpleWsgi provides a WSGI callable that can be used in tests to
reflect posted data and otherwise confirm headers and queries.
"""
import json
from six.moves.urllib import parse as urlparse
METHODS = ['GET', 'PUT', 'POST', 'DELETE', 'PATCH']
class SimpleWsgi(object):
"""A simple wsgi application to use in tests."""
def __call__(self, environ, start_response):
request_method = environ['REQUEST_METHOD'].upper()
query_data = urlparse.parse_qs(environ.get('QUERY_STRING', ''))
request_url = environ.get('REQUEST_URI',
environ.get('RAW_URI', 'unknown'))
accept_header = environ.get('HTTP_ACCEPT')
content_type_header = environ.get('CONTENT_TYPE', '')
request_url = self._fully_qualify(environ, request_url)
if accept_header:
response_content_type = accept_header
else:
response_content_type = 'application/json'
headers = [
('X-Gabbi-method', request_method),
('Content-Type', response_content_type),
('X-Gabbi-url', request_url),
]
if request_method not in METHODS:
headers.append(
('Allow', ', '.join(METHODS)))
start_response('405 Method Not Allowed', headers)
return []
if request_method.startswith('P'):
body = environ['wsgi.input'].read()
if body:
if not content_type_header:
start_response('400 Bad request', headers)
return []
if content_type_header == 'application/json':
body_data = json.loads(body.decode('utf-8'))
if query_data:
query_data.update(body_data)
else:
query_data = body_data
headers.append(('Location', request_url))
start_response('200 OK', headers)
query_output = json.dumps(query_data)
return [query_output.encode('utf-8')]
@staticmethod
def _fully_qualify(environ, url):
"""Turn a URL path into a fully qualified URL."""
path, query, fragment = urlparse.urlsplit(url)[2:]
server_name = environ.get('SERVER_NAME')
server_port = environ.get('SERVER_PORT')
server_scheme = environ.get('wsgi.url_scheme')
if server_port not in ['80', '443']:
netloc = '%s:%s' % (server_name, server_port)
else:
netloc = server_name
return urlparse.urlunsplit((server_scheme, netloc, path,
query, fragment))
|
gkonstantyno/construct
|
construct/protocols/layer2/arp.py
|
Python
|
mit
| 2,137
| 0.04773
|
"""
Ethernet (TCP/IP protocol stack)
"""
from binascii import unhexlify
from construct import *
from construct.protocols.layer3.ipv4 import IpAddressAdapter
from ethernet import MacAddressAdapter
def HwAddress(name):
return IfThenElse(name, lambda ctx: ctx.hardware_type == "ETHERNET",
MacAddressAdapter(Field("data", lambda ctx: ctx.hwaddr_length)),
Field("data", lambda ctx: ctx.hwaddr_length)
)
def ProtoAddress(name):
return IfThenElse(name, lambda ctx: ctx.protocol_type == "IP",
IpAddressAdapter(Field("data", lambda ctx: ctx.protoaddr_length)),
Field("data", lambda ctx: ctx.protoaddr_length)
)
arp_header = Struct("arp_header",
Enum(UBInt16("hardware_type"),
ETHERNET = 1,
EXPERIMENTAL_ETHERNET = 2,
ProNET_TOKEN_RING = 4,
CHAOS = 5,
IEEE802 = 6,
ARCNET = 7,
HYPERCHANNEL = 8,
ULTRALINK = 13,
FRAME_RELAY = 15,
FIBRE_CHA
|
NNEL = 18,
IEEE1394 = 24,
HIPARP = 28,
ISO7816_3 = 29,
ARPSEC = 30,
IPSEC_TUNNEL = 31,
INFINIBAND = 32,
),
Enum(UBInt16("protocol_type"),
IP = 0x0800,
),
UBInt8("hwaddr_length"),
UBInt8("protoaddr_length"),
Enum(UBInt16("opcode"),
REQUEST = 1,
|
REPLY = 2,
REQUEST_REVERSE = 3,
REPLY_REVERSE = 4,
DRARP_REQUEST = 5,
DRARP_REPLY = 6,
DRARP_ERROR = 7,
InARP_REQUEST = 8,
InARP_REPLY = 9,
ARP_NAK = 10
),
HwAddress("source_hwaddr"),
ProtoAddress("source_protoaddr"),
HwAddress("dest_hwaddr"),
ProtoAddress("dest_protoaddr"),
)
rarp_header = Rename("rarp_header", arp_header)
if __name__ == "__main__":
cap1 = unhexlify(b"00010800060400010002e3426009c0a80204000000000000c0a80201")
obj = arp_header.parse(cap1)
print (obj)
print (repr(arp_header.build(obj)))
print ("-" * 80)
cap2 = unhexlify(b"00010800060400020011508c283cc0a802010002e3426009c0a80204")
obj = arp_header.parse(cap2)
print (obj)
print (repr(arp_header.build(obj)))
|
tboyce021/home-assistant
|
homeassistant/components/rfxtrx/__init__.py
|
Python
|
apache-2.0
| 16,421
| 0.00067
|
"""Support for RFXtrx devices."""
import asyncio
import binascii
from collections import OrderedDict
import copy
import logging
import RFXtrx as rfxtrxmod
import async_timeout
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.components.binary_sensor import DEVICE_CLASSES_SCHEMA
from homeassistant.const import (
CONF_COMMAND_OFF,
CONF_COMMAND_ON,
CONF_DEVICE,
CONF_DEVICE_CLASS,
CONF_DEVICE_ID,
CONF_DEVICES,
CONF_HOST,
CONF_PORT,
DEGREE,
ELECTRICAL_CURRENT_AMPERE,
ENERGY_KILO_WATT_HOUR,
EVENT_HOMEASSISTANT_STOP,
LENGTH_MILLIMETERS,
PERCENTAGE,
POWER_WATT,
PRECIPITATION_MILLIMETERS_PER_HOUR,
PRESSURE_HPA,
SIGNAL_STRENGTH_DECIBELS_MILLIWATT,
SPEED_METERS_PER_SECOND,
TEMP_CELSIUS,
UV_INDEX,
VOLT,
)
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.restore_state import RestoreEntity
from .const import (
ATTR_EVENT,
CONF_AUTOMATIC_ADD,
CONF_DATA_BITS,
CONF_DEBUG,
CONF_FIRE_EVENT,
CONF_OFF_DELAY,
CONF_REMOVE_DEVICE,
CONF_SIGNAL_REPETITIONS,
DATA_CLEANUP_CALLBACKS,
DATA_LISTENER,
DATA_RFXOBJECT,
DEVICE_PACKET_TYPE_LIGHTING4,
EVENT_RFXTRX_EVENT,
SERVICE_SEND,
)
DOMAIN = "rfxtrx"
DEFAULT_SIGNAL_REPETITIONS = 1
SIGNAL_EVENT = f"{DOMAIN}_event"
DATA_TYPES = OrderedDict(
[
("Temperature", TEMP_CELSIUS),
("Temperature2", TEMP_CELSIUS),
("Humidity", PERCENTAGE),
("Barometer", PRESSURE_HPA),
("Wind direction", DEGREE),
("Rain rate", PRECIPITATION_MILLIMETERS_PER_HOUR),
("Energy usage", POWER_WATT),
("Total usage", ENERGY_KILO_WATT_HOUR),
("Sound", None),
("Sensor Status", None),
("Counter value", "count"),
("UV", UV_INDEX),
("Humidity status", None),
("Forecast", None),
("Forecast numeric", None),
("Rain total", LENGTH_MILLIMETERS),
("Wind average speed", SPEED_METERS_PER_SECOND),
("Wind gust", SPEED_METERS_PER_SECOND),
("Chill", TEMP_CELSIUS),
("Count", "count"),
("Current Ch. 1", ELECTRICAL_CURRENT_AMPERE),
("Current Ch. 2", ELECTRICAL_CURRENT_AMPERE),
("Current Ch. 3", ELECTRICAL_CURRENT_AMPERE),
("Voltage", VOLT),
("Current", ELECTRICAL_CURRENT_AMPERE),
("Battery numeric", PERCENTAGE),
("Rssi numeric", SIGNAL_STRENGTH_DECIBELS_MILLIWATT),
]
)
_LOGGER = logging.getLogger(__name__)
def _bytearray_string(data):
val = cv.string(data)
try:
return bytearray.fromhex(val)
except ValueError as err:
raise vol.Invalid(
"Data must be a hex string with multiple of two characters"
) from err
def _ensure_device(value):
if value is None:
return DEVICE_DATA_SCHEMA({})
return DEVICE_DATA_SCHEMA(value)
SERVICE_SEND_SCHEMA = vol.Schema({ATTR_EVENT: _bytearray_string})
DEVICE_DATA_SCHEMA = vol.Schema(
{
vol.Optional(CONF_DEVICE_CLASS): DEVICE_CLASSES_SCHEMA,
vol.Optional(CONF_FIRE_EVENT, default=False): cv.boolean,
vol.Optional(CONF_OFF_DELAY): vol.All(
cv.time_period, cv.positive_timedelta, lambda value: value.total_seconds()
),
vol.Optional(CONF_DATA_BITS): cv.positive_int,
vol.Optional(CONF_COMMAND_ON): cv.byte,
vol.Optional(CONF_COMMAND_OFF): cv.byte,
vol.Optional(CONF_SIGNAL_REPETITIONS, default=1): cv.positive_int,
}
)
BASE_SCHEMA = vol.Schema(
{
vol.Optional(CONF_DEBUG): cv.boolean,
vol.Optional(CONF_AUTOMATIC_ADD, default=False): cv.boolean,
vol.Optional(CONF_DEVICES, default={}): {cv.string: _ensure_device},
},
)
DEVICE_SCHEMA = BASE_SCHEMA.extend({vol.Required(CONF_DEVICE): cv.string})
PORT_SCHEMA = BASE_SCHEMA.extend(
{vol.Required(CONF_PORT): cv.port, vol.Optional(CONF_HOST): cv.string}
)
CONFIG_SCHEMA = vol.Schema(
{DOMAIN: vol.All(cv.deprecated(CONF_DEBUG), vol.Any(DEVICE_SCHEMA, PORT_SCHEMA))},
extra=vol.ALLOW_EXTRA,
)
DOMAINS = ["switch", "sensor", "light", "binary_sensor", "cover"]
async def async_setup(hass, config):
"""Set up the RFXtrx component."""
if DOMAIN not in config:
return True
data = {
CONF_HOST: config[DOMAIN].get(CONF_HOST),
CONF_PORT: config[DOMAIN].get(CONF_PORT),
CONF_DEVICE: config[DOMAIN].get(CONF_DEVICE),
CONF_AUTOMATIC_ADD: config[DOMAIN].get(CONF_AUTOMATIC_ADD),
CONF_DEVICES: config[DOMAIN][CONF_DEVICES],
}
# Read device_id from the event code add to the data that will end up in the ConfigEntry
for event_code, event_config in data[CONF_DEVICES].items():
event = get_rfx_object(event_code)
if event is None:
continue
device_id = get_device_id(
event.device, data_bits=event_config.get(CONF_DATA_BITS)
)
event_config[CONF_DEVICE_
|
ID] = device_id
|
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data=data,
)
)
return True
async def async_setup_entry(hass, entry: config_entries.ConfigEntry):
"""Set up the RFXtrx component."""
hass.data.setdefault(DOMAIN, {})
hass.data[DOMAIN][DATA_CLEANUP_CALLBACKS] = []
try:
await async_setup_internal(hass, entry)
except asyncio.TimeoutError:
# Library currently doesn't support reload
_LOGGER.error(
"Connection timeout: failed to receive response from RFXtrx device"
)
return False
for domain in DOMAINS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, domain)
)
return True
async def async_unload_entry(hass, entry: config_entries.ConfigEntry):
"""Unload RFXtrx component."""
if not all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(entry, component)
for component in DOMAINS
]
)
):
return False
hass.services.async_remove(DOMAIN, SERVICE_SEND)
for cleanup_callback in hass.data[DOMAIN][DATA_CLEANUP_CALLBACKS]:
cleanup_callback()
listener = hass.data[DOMAIN][DATA_LISTENER]
listener()
rfx_object = hass.data[DOMAIN][DATA_RFXOBJECT]
await hass.async_add_executor_job(rfx_object.close_connection)
hass.data.pop(DOMAIN)
return True
def _create_rfx(config):
"""Construct a rfx object based on config."""
if config[CONF_PORT] is not None:
# If port is set then we create a TCP connection
rfx = rfxtrxmod.Connect(
(config[CONF_HOST], config[CONF_PORT]),
None,
transport_protocol=rfxtrxmod.PyNetworkTransport,
)
else:
rfx = rfxtrxmod.Connect(config[CONF_DEVICE], None)
return rfx
def _get_device_lookup(devices):
"""Get a lookup structure for devices."""
lookup = {}
for event_code, event_config in devices.items():
event = get_rfx_object(event_code)
if event is None:
continue
device_id = get_device_id(
event.device, data_bits=event_config.get(CONF_DATA_BITS)
)
lookup[device_id] = event_config
return lookup
async def async_setup_internal(hass, entry: config_entries.ConfigEntry):
"""Set up the RFXtrx component."""
config = entry.data
# Initialize library
async with async_timeout.timeout(30):
rfx_object = await hass.async_add_executor_job(_create_rfx, config)
# Setup some per device config
devices = _get_device_lookup(config[CONF_DEVICES])
# Declare the Handle event
@callback
def async_handle_receive(event):
"""Handle received messages from RFXtrx gateway."""
# Log RFXCOM event
if not event.device.id_string:
return
event_data = {
"packet_type": event.device.packettype,
"sub_type": event.device.subt
|
cyclus/cyc3d
|
data_to_json.py
|
Python
|
bsd-3-clause
| 912
| 0.006579
|
#!/usr/bin/env python
import os
import json
from argparse import ArgumentParser
from datetime import datetime
import time
import numpy as np
from tools import diff_last, cost_val, load_kind
def year_to_ms(y):
"""converts years to milliseconds."""
x = datetime(y, 1, 1)
x = time.mktime(x.timetuple()) * 1000
return i
|
nt(x)
def main():
parser = ArgumentParser()
parser.add_argu
|
ment('filename')
parser.add_argument('-k', dest="kind", help="waste or cost or newcost", default="waste")
ns = parser.parse_args()
data = load_kind(ns.filename, ns.kind)
dates = map(year_to_ms, data['year'])
j = [{"key": k, "values": zip(dates, np.asarray(data[k], 'i8'))} \
for k in data.dtype.names[1:]]
jfname = "{0}-{1}.json".format(os.path.splitext(ns.filename)[0], ns.kind)
with open(jfname, 'w') as f:
json.dump(j, f)
if __name__ == "__main__":
main()
|
patrickod/fixture
|
fixture/test/test_loadable/test_django/util.py
|
Python
|
lgpl-2.1
| 579
| 0.008636
|
"""This is mostly a copy of methods and internal classes from loadable"""
from django.db.models.loading import get_models
from fixture.loadable.loadable import DeferredStoredObject
def assert_empty(mod):
for model in get_models(mod):
assert model.objec
|
ts.count() == 0
def resolve_stored_object(column_val):
if type(column_val)==DeferredStoredObject:
return column_val.get_stored_object_from_loader(self)
else:
return column_val
def get_column_vals(row):
for c in row.columns():
yield (c, resolve_store
|
d_object(getattr(row, c)))
|
Oinweb/py-fly
|
api/management/commands/evaluate_quiz.py
|
Python
|
bsd-2-clause
| 3,663
| 0.001092
|
import os
import sys
import json
from datetime import datetime
from django.db import connection, transaction
from django.core.management.base import BaseCommand, CommandError
from django.contrib.auth.models import User
from django.utils.translation import ugettext_lazy as _
from fly_project import constants
from api.models import QuizSubmission
from api.models import QuestionSubmission
class Command(BaseCommand):
"""
Run in your console:
$ python manage.py evaluate_quiz {{ quiz_submission_id }}
"""
help = _('Command will mark and score the User\'s submitted quiz.')
def add_arguments(self, parser):
parser.add_argument('id', nargs='+')
def handle(self, *args, **options):
# Process all the Quizzes that are inputted into this Command.
for id in options['id']:
try:
submission = QuizSubmission.objects.get(id=id)
self.begin_processing(submission)
except QuizSubmission.DoesNotExist:
pass
def begin_processing(self, submission):
"""
Function will load up the Quiz Submission and iterate through all
Question Submissions and evaluate them for being either correct
or wrong and then assign a mark to them.
"""
# Step 1: Fetch all the submitted Questions for this Quiz.
try:
question_submissions = QuestionSubmission.objects.filter(
quiz=submission.quiz,
user=submission.user,
)
except QuestionSubmission.DoesNotExist:
question_submissions = None
# Step 2: Iterate through all the submitted Questions and mark them
# either right or wrong depending on the correct answer.
for question_submission in question_submissions.all():
is_right = True
question_answer = question_submission.question
# Step 3: If the question is 'Open-ended' then simply give the
# student the mark and finish this function, else then
# evaluate the quiz question.
if question_submission.type == 1:
question_submission.mark = 1
else:
if question_submission.a is not question_answer.a_is_correct:
is_right = False
if question_submission.b is not question_answer.b_is_correct:
is_right = False
if question_submission.c is not question_answer.c_is_correct:
is_right = False
if question_submission.d is not question_answer.d_is_correct:
is_right = False
if question_submission.e is not question_answer.e_is_correct:
is_right = False
if questio
|
n_submission.f is not question_answer.f_is_correct:
is_right = False
if is_right:
question_submission.mark = 1
|
else:
question_submission.mark = 0
question_submission.save()
# Step 4: Iterate through all the submitted Questions and perform a
# total mark tally of the Quiz and then mark the Quiz either a success
# or a failure.
total_mark = 0
actual_mark = 0
for question_submission in question_submissions.all():
total_mark += 1
actual_mark += question_submission.mark
final_mark = (actual_mark / total_mark) * 100
submission.final_mark = final_mark
submission.save()
|
google-research/language
|
language/bert_extraction/steal_bert_qa/utils/filter_queries_victim_agreement.py
|
Python
|
apache-2.0
| 5,977
| 0.009035
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Filter pool of queries based on their predictions on various victim model.
Used in Section 5.1 of the academic paper.
"""
import json
import random
from bert_extraction.steal_bert_qa.utils import evaluate_squad
import numpy as np
import tensorflow.compat.v1 as tf
import tqdm
app = tf.app
flags = tf.flags
gfile = tf.gfile
logging = tf.logging
flags.DEFINE_string("pool_dataset", None,
"Pool of queries to sort and filter, such as 10x datasets")
flags.DEFINE_string(
"prediction_files", None,
"Comma-separated list of predictions from different victim seeds")
flags.DEFINE_enum("scheme", "top_f1", ["top_f1", "bottom_f1", "random"],
"Scheme to carry out sorting and filtering of pool dataset")
flags.DEFINE_string("output_dir", None,
"Output directory to store filtered dataset")
flags.DEFINE_integer("train_set_size", 87599, "Target size of filtered dataset")
FLAGS = flags.FLAGS
def main(_):
with gfile.Open(FLAGS.pool_dataset, "r") as f:
pool_data = json.loads(f.read())["data"]
preds_data = []
for pred_file in FLAGS.prediction_files.split(","):
if pred_file.strip():
with gfile.Open(pred_file, "r") as f:
preds_data.append(json.loads(f.read()))
# Calculate the pairwise F1 scores of the predicted answers across files.
# Store the average score for sorting the list.
qa_f1 = []
for inst in tqdm.tqdm(pool_data):
for para in inst["paragraphs"]:
for qa in para["qas"]:
f1_scores = evaluate_squad.f1_score_multiple(
[x[qa["id"]] for x in preds_data])
qa_f1.append([
qa["id"], f1_scores,
np.mean(f1_scores), qa["answers"][0],
[x[qa["id"]] for x in preds_data]
])
# Sort the pool dataset based on average pairwise F1 score.
qa_f1.sort(key=lambda x: x[2], reverse=True)
#
|
Filer the dataset based on the filtering scheme.
if FLAGS.scheme == "random":
random.shuffle(qa_f1)
qa_f1 = {x[0]: x[2] for x in qa_f1[:FLAGS.train_set_size]}
elif FLAGS.scheme == "top_f1":
qa_f1 = {x[0]: x[2] for x in qa_f1[:FLAGS.train_set_size]}
elif FLAGS.scheme == "bottom_f1":
qa_f1 = {x[0]: x[2] for x in qa_f1[-1 * FLAGS.train_set_size:]}
else:
logging.error("error")
r
|
eturn
output_data_orig = {"data": [], "version": FLAGS.version}
# A total of len(preds_data) + 1 datasets are constructed, each with all
# all possible answers for the filtered questions.
# First, make a dataset with the original pool_dataset's answers.
# Run through the pool dataset and add all those questions which survived
# the filtering scheme.
for inst in tqdm.tqdm(pool_data):
inst1 = {"title": "original ans", "paragraphs": []}
for para in inst["paragraphs"]:
para_text = para["context"]
para1 = {"context": para_text, "qas": []}
for qa in para["qas"]:
if qa["id"] not in qa_f1:
continue
para1["qas"].append(qa)
# only add paragraphs with non-zero QAs.
if para1["qas"]:
inst1["paragraphs"].append(para1)
# only add instances with non-zero paragraphs.
if inst1["paragraphs"]:
output_data_orig["data"].append(inst1)
total_questions = 0
for instance in output_data_orig["data"]:
for para in instance["paragraphs"]:
for qa in para["qas"]:
total_questions += 1
logging.info("Orig answers dataset size = %d", total_questions)
gfile.MakeDirs(FLAGS.output_dir + "/orig_answers")
with gfile.Open(FLAGS.output_dir + "/orig_answers/train-v1.1.json", "w") as f:
f.write(json.dumps(output_data_orig))
# Next, make datasets with each of the predicted file's answers.
# These datasets have been used in the academic publication. For schemes like
# top_f1 there will be a lot of redundancy (and hence low variance in plots).
for pp, pred_data1 in enumerate(preds_data):
output_data_preds = {"data": [], "version": FLAGS.version}
for inst in tqdm.tqdm(pool_data):
inst1 = {"title": "pred answer %d" % pp, "paragraphs": []}
for para in inst["paragraphs"]:
para_text = para["context"]
para1 = {"context": para_text, "qas": []}
for qa in para["qas"]:
if qa["id"] not in qa_f1:
continue
if pred_data1[qa["id"]] not in para_text:
continue
para1["qas"].append({
"question": qa["question"],
"id": qa["id"],
"answers": [{
"answer_start": para_text.index(pred_data1[qa["id"]]),
"text": pred_data1[qa["id"]]
}],
"is_impossible": False
})
if para1["qas"]:
inst1["paragraphs"].append(para1)
if inst1["paragraphs"]:
output_data_preds["data"].append(inst1)
total_questions = 0
for instance in output_data_preds["data"]:
for para in instance["paragraphs"]:
for qa in para["qas"]:
total_questions += 1
logging.info("Final prediction #%d dataset size = %d", pp, total_questions)
gfile.MakeDirs(FLAGS.output_dir + "/pred_answer%d" % pp)
with gfile.Open(FLAGS.output_dir + "/pred_answer%d/train-v1.1.json" % pp,
"w") as f:
f.write(json.dumps(output_data_preds))
return
if __name__ == "__main__":
app.run(main)
|
martomo/SublimeTextXdebug
|
xdebug/helper/helper.py
|
Python
|
mit
| 1,497
| 0.000668
|
"""
Helper module for Python version 3.0 and above
- Ordered dictionaries
- Encoding/decoding urls
- Unicode/Bytes (for sending/receiving data from/to socket, base64)
- Exception handling (except Exception as e)
"""
import base64
from urllib.parse import unquote, quote
from collections import OrderedDict
def modulename():
return 'Helper module for Python version 3.0 and above'
def url_decode(uri):
return unquote(uri)
def url_encode(uri):
return quote(uri)
def new_dictionary():
return OrderedDict()
def dictionary_keys(dictionary):
return list(dictionary.keys())
def dictionary_values(dictionary):
return list(dictionary.values())
def data_read(data):
# Convert bytes to string
return data.decode('utf8')
def data_write(data):
# Convert string to bytes
return bytes(data, 'utf8')
def base64_decode(data):
# Base64 returns decoded byte string, decode to convert to UTF8 string
return base64.b64decode(data).decode('utf8')
def base64_encode(data):
# Base64 needs ascii input to encode, which returns Ba
|
se64 byte string, decode to convert to UTF8 string
return base64.b64encode(data.encode('ascii')).decode('utf8')
def unicode_chr(code):
return chr(code)
def unicode_string(string):
# Python 3.* uses unicode by default
return string
def is_digit(string):
# Check if string is digit
return isinstance(string, str) and string.isdigit()
def is_number(value):
return
|
isinstance(value, int)
|
MaxMorgenstern/EmeraldAI
|
EmeraldAI/Logic/ROS/Serial/SerialFinder.py
|
Python
|
apache-2.0
| 888
| 0.005631
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import subprocess
import itertools
class SerialFin
|
der():
_command = """ls -al /sys/class/tty/ttyUSB* | grep -o "/sys/class/tty/ttyUSB.*"| sed 's/ -> .*//'"""
_replace = "/sys/class/tty"
_replaceWith = "/dev"
def Find(self):
proc = subprocess.Popen([self._command], stdout=subprocess.PIPE, shell=True)
(out, _) = proc.communicate()
if len(out) < 2:
return []
return self.__split(out)
def __groupSeparator(self, line):
return line=='\n'
|
def __split(self, data):
lines = []
for _, group in itertools.groupby(data, self.__groupSeparator):
line = ''.join(str(e) for e in group)
line = line.strip()
if (len(line) > 1):
lines.append(line.replace(self._replace, self._replaceWith))
return lines
|
VirusTotal/misp-modules
|
misp_modules/modules/expansion/sophoslabs_intelix.py
|
Python
|
agpl-3.0
| 6,396
| 0.004378
|
import json
import requests
import base64
from . import check_input_attribute, checking_error, standard_error_message
from pymisp import MISPEvent, MISPObject
from urllib.parse import quote
moduleinfo = {'version': '1.0',
'author': 'Ben Verschaeren',
'description': 'SOPHOSLabs Intelix Integration',
'module-type': ['expansion']}
moduleconfig = ['client_id', 'client_secret']
misperrors = {'error': 'Error'}
misp_types_in = ['sha256', 'ip', 'ip-src', 'ip-dst', 'uri', 'url', 'domain', 'hostname']
mispattributes = {'input': misp_types_in,
'format': 'misp_standard'}
class SophosLabsApi():
def __init__(self, client_id, client_secret):
self.misp_event = MISPEvent()
self.client_id = client_id
self.client_secret = client_secret
self.authToken = f"{self.client_id}:{self.client_secret}"
self.baseurl = 'de.api.labs.sophos.com'
d = {'grant_type': 'client_credentials'}
h = {'Authorization': f"Basic {base64.b64encode(self.authToken.encode('UTF-8')).decode('ascii')}",
'Content-Type': 'application/x-www-form-urlencoded'}
r = requests.post('https://api.labs.sophos.com/oauth2/token', headers=h, data=d)
if r.status_code == 200:
j = json.loads(r.text)
self.accessToken = j['access_token']
def get_result(self):
event = json.loads(self.misp_event.to_json())
results = {key: event[key] for key in ('Attribute', 'Object') if (key in event and event[key])}
return {'results': results}
def hash_lookup(self, filehash):
sophos_object = MISPObject('SOPHOSLabs Intelix SHA256 Report')
h = {"Authorization": f"{self.accessToken}"}
r = requests.get(f"https://{self.baseurl}/lookup/files/v1/{filehash}", headers=h)
if r.status_code == 200:
j = json.loads(r.text)
if 'reputationScore' in j:
sophos_object.add_attribute('Reputation Score', type='text', value=j['reputationScore'])
if 0 <= j['reputationScore'] <= 19:
sophos_object.add_attribute('Decision', type='text', value='This file is malicious')
if 20 <= j['reputationScore'] <= 29:
sophos_object.add_attribute('Decision', type='text', value='This file is potentially unwanted')
if 30 <= j['reputationScore'] <= 69:
sophos_object.add_attribute('Decision', type='text', value='This file is unknown and suspicious')
if 70 <= j['reputationScore'] <= 100:
sophos_object.add_attribute('Decision', type='text', value='This file is known good')
if 'detectionName' in j:
sophos_object.add_attribute('Detection Name', type='text', value=j['detectionName'])
else:
sophos_object.add_attribute('Detection Name', type='text', value='No name associated with this IoC')
self.misp_event.add_object(**sophos_object)
def ip_lookup(self, ip):
sophos_object = MISPObject('SOPHOSLabs Intelix IP Category Lookup')
h = {"Authorization": f"{self.accessToken}"}
r = requests.get(f"https://{self.baseurl}/lookup/ips/v1/{ip}", headers=h)
if r.status_code == 200:
j = json.loads(r.text)
if 'category' in j:
for c in j['category']:
sophos_object.add_attribute('IP Address Categorisation', type='text', value=c)
else:
sophos_object.add_attribute('IP Address Categorisation', type='text', value='No category assocaited with IoC')
self.misp_event.add_object(**sophos_object)
def url_lookup(self, url):
sophos_object = MISPObject('SOPHOSLabs Intelix URL Lookup')
h = {"Authorization": f"{self.accessToken}"}
r = requests.get(f"https://{self.baseurl}/lookup/urls/v1/{quote(url, safe='')}", headers=h)
if r.status_code == 200:
j = json.loads(r.text)
if 'productivityCategory' in j:
sophos_object.add_attribute('URL Categorisation', type='text', value=j['productivityCategory'])
else:
sophos_object.add_attribute('URL Categorisation', type='text', value='No category assocaited with IoC')
if 'riskLevel' in j:
sophos_object.add_attribute('URL Risk Level', type='text', value=j['riskLevel'])
else:
sophos_object.add_attribute('URL Risk Level', type='text', value='No risk level associated with IoC')
if 'securityCategory' in j:
sophos_object.add_attribute('URL Security Category', type='text', value=j['securityCategory'])
else:
sophos_object.add_attribute('URL Security Category', type='text', value='No Security Category associated with IoC')
self.misp_event.add_object(**sophos_object)
def handler(q=False):
if q is False:
return False
j = json.loads(q)
if not j.get('config') or not j['config'].get('client_id') or not j['config'].get('client_secret'):
misperrors['error'] = "Missing client_id or client_secret value for SOPHOSLabs Intelix. \
It's free to sign up here https://aws.amazon.com/marketplace/pp/B07SLZPMCS."
return misperrors
to_check = (('type', 'value'), ('type', 'value1'))
if not j.get('attribute') or not any(check_input_attribute(j['attribute'], requirements=check) for check in to_check):
return {'error': f'{standard_error_message}, {checking_error}.'}
attribute = j['attribute']
if attribute['type'] not in misp_types_in:
return {'error': 'Unsupported attribute type.'}
client = SophosLabsApi(j['config']['client_id'], j['config']['client_secret'])
mapping = {
'sha256': 'hash_lookup',
'ip-dst': 'ip_lookup',
'ip-src': 'ip_lookup',
'ip': 'ip_lookup',
'uri': 'url_lookup',
'url': 'url_lookup',
'domain': 'url_lookup',
'hostname': 'url_lookup'
}
attribute_value = attribute['value'] if 'value' i
|
n attribute else attribute['value1']
|
getattr(client, mapping[attribute['type']])(attribute_value)
return client.get_result()
def introspection():
return mispattributes
def version():
moduleinfo['config'] = moduleconfig
return moduleinfo
|
RafaelPAndrade/LEIC-A-IST
|
RC/Proj/CS.py
|
Python
|
mit
| 16,058
| 0.002678
|
#!/usr/bin/env python3
import socket, sys, getopt, os
from signal import signal, pause, SIGINT, SIGTERM, SIG_IGN
from pickle import load, dump
from multiprocessing import Process
from multiprocessing.managers import SyncManager
from lib.server import tcp_server, udp_server, udp_client
from lib.utils import (read_bytes_until, DEFAULT_CS_PORT, CS_KNOWN_BS_SAVEFILE,
CS_VALID_USERS_SAVEFILE, CS_DIRS_LOCATION_SAVEFILE,
backup_dict_to_file, restore_dict_from_file,
ignore_sigint, get_best_ip)
# Function to deal with any protocol unexpected error
def unexpected_command(my_socket):
""" Informs that there was a error. TCP and UDP compatible. """
my_socket.sendall("ERR\n".encode())
# Code to deal with queries from BS (UDP server)
def deal_with_udp(udp_socket, known_bs):
def signal_handler(_signum, _frame):
udp_socket.close()
exit(0)
# ignore CTRL-C; handle .terminate() from parent
signal(SIGINT, SIG_IGN)
signal(SIGTERM, signal_handler)
while True:
response, address = udp_socket.recvfrom(32)
args = response.decode().split(" ")
command = args[0]
args = args[1:]
if command == "REG":
add_bs(known_bs, args, udp_socket, address)
elif command == "UNR":
remove_bs(known_bs, args, udp_socket, address)
else:
unexpected_command(udp_socket)
def add_bs(known_bs, args, udp_socket, address):
status = "ERR"
ip_bs = args[0]
port_bs = args[1].split("\n")[0]
if len(args) != 2 or port_bs.isdigit() is False:
print("Error in arguments received from BS server: {} {}".format(ip_bs, port_bs))
elif (ip_bs, port_bs) in known_bs:
print("Error: Already added BS {}".format(ip_bs))
status = "NOK"
else:
known_bs[(ip_bs, port_bs)] = 0
backup_dict_to_file(known_bs, CS_KNOWN_BS_SAVEFILE)
status = "OK"
print("-> BS added:\n - ip: {}\n - port: {}\n".format(ip_bs, port_bs))
udp_socket.sendto("RGR {}\n".format(status).encode(), address)
def remove_bs(known_bs, args, udp_socket, address):
status = "ERR\n"
ip_bs = args[0]
port_bs = args[1].split("\n")[0]
if len(args) != 2 or port_bs.isdigit() is False:
print("Error in arguments received from BS server: {} {}".format(ip_bs, port_bs))
elif (ip_bs, port_bs) not in known_bs:
print("Error: User {} does not exist".format(ip_bs))
status = "NOK\n"
else:
del known_bs[(ip_bs, port_bs)]
backup_dict_to_file(known_bs, CS_KNOWN_BS_SAVEFILE)
status = "OK\n"
print("-> BS removed:\n - ip: {}\n - port: {}\n".format(ip_bs, port_bs))
udp_socket.sendto("UAR {}\n".format(status).encode(), address)
def deal_with_tcp(tcp_socket, valid_users, dirs_location, known_bs):
def signal_handler(_signum, _frame):
tcp_socket.close()
exit(0)
def deal_with_client(client, valid_users, dirs_location, known_bs):
""" Code / function for forked worker """
conn = client[0]
logged_in = False # this var is False or contains the user id
while True:
try:
command = read_bytes_until(conn, " \n")
if command == "AUT":
logged_in, password = authenticate_user(valid_users, conn)
elif command == "DLU" and logged_in:
delete_user(logged_in, conn, dirs_location, valid_users)
break
elif command == "BCK" and logged_in:
backup_dir(logged_in, conn, known_bs, password, dirs_location)
break
elif command == "RST" and logged_in:
restore_dir(logged_in, conn, dirs_location)
break
elif command == "LSD" and logged_in:
list_user_dirs(logged_in, conn, dirs_location)
break
elif command == "LSF" and logged_in:
list_files_in_dir(logged_in, conn, dirs_location)
break
elif command == "DEL" and logged_in:
delete_dir(logged_in, conn, dirs_location)
break
else:
unexpected_command(conn)
except (BrokenPipeError, ConnectionResetError):
print("{}: connection closed\n".format(client[1]))
exit(0)
conn.close() # end of code
# Mask CTRL-C, handle SIGTERM (terminate, from father)
signal(SIGINT, SIG_IGN)
signal(SIGTERM, signal_handler)
while True:
client = tcp_socket.accept()
p_client = Process(target=deal_with_client, args=(client, valid_users, dirs_location, known_bs), daemon=True)
|
p_client.
|
start()
def authenticate_user(valid_users, conn):
""" Authenticates user, returns (user,pass) (AUT/AUR) """
username = read_bytes_until(conn, " ")
password = read_bytes_until(conn, "\n")
print("-> AUT {} {}".format(username, password))
res = (False, False)
status = "NOK"
if username not in valid_users:
valid_users[username] = password
backup_dict_to_file(valid_users, CS_VALID_USERS_SAVEFILE)
res = (username, password)
status = "NEW"
print("New user: {}".format(username))
elif valid_users[username] != password:
print("Password received does not match")
else:
res = (username, password)
status = "OK"
print("User {} logged in sucessfully".format(username))
response = "AUR {}\n".format(status)
conn.sendall(response.encode())
return res
def delete_user(username, conn, dirs_location, valid_users):
print(">> DLU")
status = "NOK\n"
if username in [f[0] for f in dict(dirs_location)]:
print("There is still information stored for user\n")
else:
del valid_users[username]
backup_dict_to_file(valid_users, CS_VALID_USERS_SAVEFILE)
status = "OK\n"
print("User {} deleted sucessfully\n".format(username))
response = "DLR " + status
conn.sendall(response.encode())
def backup_dir(username, conn, known_bs, password, dirs_location):
flag = 0
folder = read_bytes_until(conn, " ")
nr_user_files = int(read_bytes_until(conn, " "))
print(">> BCK {} {}".format(folder, str(nr_user_files)))
user_dict = {} # {"filename": [date, time, size]}
bs_dict = {} # {"filename": [date, time, size]}
string_of_files = ""
registered_in_bs = 0
files_user = read_bytes_until(conn, "\n").split()
for i in range(nr_user_files):
filename = files_user[4*i]
date = files_user[4*i+1]
time = files_user[4*i+2]
size = files_user[4*i+3]
user_dict[filename] = [date, time, size]
string_of_files += " {} {} {} {}".format(filename, date, time, size)
if (username, folder) in dirs_location:
flag = 1
ip_bs = dirs_location[(username, folder)][0]
port_bs = dirs_location[(username, folder)][1]
print("BCK {} {} {} {}".format(username, folder, ip_bs, port_bs))
bs_socket = udp_client(ip_bs, int(port_bs))
bs_socket.sendall("LSF {} {}\n".format(username, folder).encode())
response = bs_socket.recv(2048).decode().split()
bs_socket.close()
command = response[0]
if command != "LFD":
print("Error in command")
exit(0)
nr_bs_files = int(response[1])
for i in range(nr_bs_files):
filename = response[2 + 4*i]
date = response[2 + 4*i + 1]
time = response[2 + 4*i + 2]
size = response[2 + 4*i + 3]
bs_dict[filename] = [date, time, size]
final_string_of_files = ""
nr_files_final = 0
for user_file in user_dict:
for bs_file in bs_dict:
if user_file == bs_file and user_dict[user_file] != bs_dict[bs_file]:
final_string_of_files += " {} {} {} {}".format(user_file, bs_dict[user_file][0], bs_dict[user
|
uber/tchannel-python
|
tests/tornado/test_peer.py
|
Python
|
mit
| 12,004
| 0.000083
|
# encoding=utf8
# Copyright (c) 2016 Uber Technologies, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import absolute_import
import mock
import pytest
from tornado import gen
from tchannel import TChannel
from tchannel.errors import NoAvailablePeerError
from tchannel.tornado import peer as tpeer
from tchannel.tornado.connection import TornadoConnection
from tchannel.tornado.peer import Peer
from tchannel.tornado.stream import InMemStream
from tchannel.tornado.stream import read_full
from six.moves import range
def closed_stream(body):
"""Builds an in-memory stream whose entire request body is the given string.
:param body:
Request body for the returned Stream
"""
stream = InMemStream(body)
stream.close()
return stream
def mocked_stream():
# An object that conforms to the stream interface but isn't an instance of
# Stream.
def reader():
if stream.read.call_count == 3:
return gen.maybe_future('')
else:
return gen.maybe_future('foo')
stream = mock.Mock()
stream.read.side_effect = reader
return stream
def test_basic_peer_management_operations():
peer_group = tpeer.PeerGroup(mock.MagicMock())
assert not peer_group.hosts
assert not peer_group.peers
assert not peer_group.lookup('localhost:4040')
p = peer_group.get('localhost:4040')
assert p
assert peer_group.lookup('localhost:4040') is p
assert peer_group.get('localhost:4040') is p
assert peer_group.remove('localhost:4040') is p
assert not peer_group.lookup('localhost:4040')
@pytest.mark.parametrize('s, expected', [
(None, b''),
('foo', b'foo'),
(u'☃', b'\xe2\x98\x83'),
(bytearray([0x12, 0x34]), b'\x12\x34'),
(closed_stream('foo'), b'foo'),
(mocked_stream(), b'foofoo')
])
@pytest.mark.gen_test
def test_maybe_stream(s, expected):
got = yield read_full(tpeer.maybe_stream(s))
assert expected == got
@pytest.mark.gen_test
def test_peer_connection_failure():
# Test connecting a peer when the first connection attempt fails.
MockConnection = mock.MagicMock()
connection = mock.MagicMock()
called = [False]
with mock.patch.object(tpeer.Peer, 'connection_class', MockConnection):
@gen.coroutine
def try_connect(*args, **kwargs):
if not called[0]:
# If this is the first call, fail.
called[0] = True
raise ZeroDivisionError('great sadness')
else:
raise gen.Return(connection)
MockConnection.outgoing.side_effect = try_connect
peer = tpeer.Peer(mock.MagicMock(), 'localhost:4040')
future = peer.connect()
with pytest.raises(ZeroDivisionError) as excinfo:
yield future
assert 'great sadness' in str(excinfo)
got = yield peer.connect()
assert got is connection
assert MockConn
|
ection.outgoing.call_count == 2
@pytest.mark.gen_test
def test_peer_connection_network_failure():
# Network errors in connecting to a peer must be retried with a different
# peer.
healthy = TChannel('healthy-server')
healthy.listen()
unhe
|
althy = TChannel('unhealthy-server')
unhealthy.listen()
# register the endpoint on the healthy host only to ensure that the
# request wasn't made to the unhealthy one.
@healthy.raw.register('hello')
def endpoint(request):
return 'world'
known_peers = [healthy.hostport, unhealthy.hostport]
client = TChannel('client', known_peers=known_peers)
with mock.patch.object(tpeer.PeerGroup, 'choose') as mock_choose:
def fake_choose(*args, **kwargs):
if mock_choose.call_count == 1:
# First choose the unhealthy host.
hostport = unhealthy.hostport
else:
hostport = healthy.hostport
# TODO need access to peers in new TChannel
return client._dep_tchannel.peers.get(hostport)
mock_choose.side_effect = fake_choose
# TODO New TChannel doesn't have close() and old one doesn't call
# stop() on server.
unhealthy._dep_tchannel._server.stop()
resp = yield client.raw('server', 'hello', 'foo')
assert resp.body == b'world'
@pytest.mark.gen_test
def test_peer_connection_failure_exhausted_peers():
# If we run out of healthy peers while trying to connect, raise
# NoAvailablePeerError.
servers = [TChannel('server-%d' % n) for n in range(10)]
for server in servers:
server.listen()
known_peers = [server.hostport for server in servers]
client = TChannel('client', known_peers=known_peers)
for server in servers:
# TODO New TChannel doesn't have close() and old one doesn't call
# stop() on server.
server._dep_tchannel._server.stop()
with pytest.raises(NoAvailablePeerError):
yield client.raw('server', 'hello', 'foo')
@pytest.mark.gen_test
def test_peer_incoming_connections_are_preferred(request):
incoming = mock.MagicMock()
incoming.closed = False
outgoing = mock.MagicMock()
outgoing.closed = False
peer = tpeer.Peer(mock.MagicMock(), 'localhost:4040')
with mock.patch(
'tchannel.tornado.connection.StreamConnection.outgoing'
) as mock_outgoing:
mock_outgoing.return_value = gen.maybe_future(outgoing)
peer.connect()
assert (yield peer.connect()) is outgoing
peer.register_incoming_conn(incoming)
assert (yield peer.connect()) is incoming
@pytest.fixture
def peer():
return Peer(
tchannel=TChannel('peer'),
hostport='127.0.0.1:21300',
)
def test_on_conn_change(peer, connection):
c = [0]
def conn_change_db(peer):
c[0] += 1
peer.set_on_conn_change_callback(conn_change_db)
peer.register_incoming_conn(connection)
assert c[0] == 1
peer.register_outgoing_conn(connection)
assert c[0] == 2
@pytest.mark.gen_test
def test_outbound_pending_change():
server = TChannel('server')
server.listen()
connection = yield TornadoConnection.outgoing(server.hostport)
c = [0]
def outbound_pending_change_callback():
c[0] += 1
connection.set_outbound_pending_change_callback(
outbound_pending_change_callback
)
connection.add_pending_outbound()
assert c[0] == 1
connection.add_pending_outbound()
assert c[0] == 2
connection.remove_pending_outbound()
assert c[0] == 3
connection.remove_pending_outbound()
assert c[0] == 4
@pytest.mark.gen_test
def test_outbound_pending_change_propagate(peer):
server = TChannel('server')
server.listen()
connection = yield TornadoConnection.outgoing(server.hostport)
peer.register_incoming_conn(connection)
b = [0]
def conn_change_db(peer):
b[0] += 1
peer.set_on_conn_change_callback(conn_change_db)
connection.add_pending_outbound()
assert b[0] == 1
connection.add_pending_outbound()
assert b[0] == 2
connection.remove_pending_outbound()
assert b[0] == 3
connection.remove_pendi
|
ncbray/pystream
|
sandbox/learning/__init__.py
|
Python
|
apache-2.0
| 578
| 0.00173
|
# Copyright 2011 Nicholas Bray
#
# Licensed under the Apache Licens
|
e, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under
|
the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
bearpaw/pytorch-pose
|
pose/models/__init__.py
|
Python
|
gpl-3.0
| 80
| 0
|
from .hou
|
rglass import *
from .hourglass_gn import *
from .pose_resnet impo
|
rt *
|
VROOM-Project/vroom-scripts
|
benchmarks/compare_to_BKS.py
|
Python
|
bsd-2-clause
| 5,946
| 0.000841
|
# -*- coding: utf-8 -*-
import json
import numpy as np
import sys
# Compare a set of computed solutions to best known solutions on the
# same problems.
# See src/vrptw_to_json.py, src/pdptw_to_json.py and
# src/hvrp_to_json.py.
CUSTOM_PRECISION = 1000
BENCH_DOUBLE_PRECISION = 100
CUSTOM_PRECISION_CLASSES = [
"solomon",
"homberger",
"li_lim",
"VFMP_V",
"HFVRP",
"cordeau",
]
def uses_custom_precision(bench):
custom = False
for current_class in CUSTOM_PRECISION_CLASSES:
if current_class in bench:
custom = True
break
return custom
def s_round(v, d):
if d == 0:
return str(int(v))
else:
return str(round(v, d))
JOB_TYPES = ["job", "pickup", "delivery"]
def nb_jobs(solution):
jobs = 0
for r in solution["routes"]:
for s in r["steps"]:
if s["type"] in JOB_TYPES:
jobs += 1
return jobs
def log_comparisons(BKS, files):
print(
|
",".join(
[
"Instance",
"Jobs",
"Vehicles",
"tightness",
"Best known cost",
"Assigned jobs",
"Used vehicles",
"Solution cost",
"Unassigned jobs",
"Gap (%)",
"Computing time (ms)",
]
)
)
jobs = []
vehicles = []
|
assigned = []
unassigned = []
tightnesses = []
gaps = []
computing_times = []
assigned_jobs = 0
total_files = len(files)
job_ok_files = 0
optimal_sols = 0
for f in files:
instance = f[0 : f.rfind("_sol.json")]
instance = instance[instance.rfind("/") + 1 :]
if instance not in BKS and instance + "_distance" not in BKS:
total_files -= 1
continue
if instance + "_distance" in BKS:
# Specific entry for approach targeting distance as optimization
# objective.
indicators = BKS[instance + "_distance"]
else:
indicators = BKS[instance]
BK_cost = indicators["best_known_cost"]
bench = indicators["class"]
if uses_custom_precision(bench):
BK_cost = int(BENCH_DOUBLE_PRECISION * BK_cost)
nb_job = indicators["jobs"]
jobs.append(nb_job)
nb_vehicle = indicators["vehicles"]
vehicles.append(nb_vehicle)
if "capacity" in indicators:
total_capacity = nb_vehicle * indicators["capacity"]
else:
total_capacity = indicators["total_capacity"]
tightness = round(float(indicators["total_amount"]) / total_capacity, 3)
tightnesses.append(tightness)
line = [instance, nb_job, nb_vehicle, tightness, BK_cost]
with open(f, "r") as sol_file:
solution = json.load(sol_file)
if solution["code"] != 0:
continue
sol_jobs = nb_jobs(solution)
assigned.append(sol_jobs)
line.append(sol_jobs)
line.append(len(solution["routes"]))
cost = solution["summary"]["cost"]
if uses_custom_precision(bench):
cost = int(round(BENCH_DOUBLE_PRECISION * float(cost) / CUSTOM_PRECISION))
line.append(cost)
line.append(nb_job - sol_jobs)
unassigned.append(nb_job - sol_jobs)
if sol_jobs == nb_job:
job_ok_files += 1
gap = 100 * (float(cost) / BK_cost - 1)
line.append(round(gap, 2))
gaps.append(gap)
if cost <= BK_cost:
optimal_sols += 1
else:
line.append("")
computing_time = (
solution["summary"]["computing_times"]["loading"]
+ solution["summary"]["computing_times"]["solving"]
)
line.append(computing_time)
computing_times.append(computing_time)
print(",".join(map(lambda x: str(x), line)))
print(
"Average,"
+ s_round(np.mean(jobs), 1)
+ ","
+ s_round(np.mean(vehicles), 1)
+ ","
+ s_round(np.mean(tightnesses), 2)
+ ","
+ s_round(np.mean(assigned), 1)
+ ",,,,"
+ s_round(np.mean(unassigned), 1)
+ ","
+ s_round(np.mean(gaps), 2)
+ ","
+ s_round(np.mean(computing_times), 0)
)
total_jobs = np.sum(jobs)
assigned_jobs = np.sum(assigned)
print(",")
print("Total jobs," + s_round(total_jobs, 0))
print(
"Total jobs assigned,"
+ s_round(assigned_jobs, 0)
+ ","
+ s_round(100 * float(assigned_jobs) / total_jobs, 2)
+ "%"
)
print(",")
print("Instances," + s_round(total_files, 0))
print(
"All jobs solutions,"
+ s_round(job_ok_files, 0)
+ ","
+ s_round(100 * float(job_ok_files) / total_files, 2)
+ "%"
)
print(
"Optimal solutions,"
+ s_round(optimal_sols, 0)
+ ","
+ s_round(100 * float(optimal_sols) / total_files, 2)
+ "%"
)
# Percentiles
print(",")
gaps_percentiles = np.percentile(gaps, [0, 10, 25, 50, 75, 90, 100])
ct_percentiles = np.percentile(computing_times, [0, 10, 25, 50, 75, 90, 100])
print(",Gaps,Computing times")
titles = [
"Min",
"First decile",
"Lower quartile",
"Median",
"Upper quartile",
"Ninth decile",
"Max",
]
for i in range(len(titles)):
print(
titles[i]
+ ","
+ s_round(gaps_percentiles[i], 2)
+ ","
+ s_round(ct_percentiles[i], 0)
)
if __name__ == "__main__":
# First argument if the best known solution file.
with open(sys.argv[1], "r") as sol_file:
bks = json.load(sol_file)
# Remaining arguments are computed solution files to use.
log_comparisons(bks, sys.argv[2:])
|
czpython/django-cms
|
cms/test_utils/project/pluginapp/plugins/revdesc/models.py
|
Python
|
bsd-3-clause
| 1,417
| 0.003529
|
# -*- coding: utf-8 -*-
from django.db import models
from cms.models import CMSPlugin
# sorry for the cryptic names. But we were hitting max lengths on Django 1.6
# and 1.7 with the too long names otherwise.
class UnalteredPM(CMSPlu
|
gin):
title = models.CharField(max_length=50)
search_fields = ['title']
class NoRelNmePM(CMSPlugin):
cmsplugin_ptr = models.OneToOneField(CMSPlugin, related_name='+', parent_link=True)
title = models.CharField(max_length=50)
search_fields = ['title']
class NoRelQNmePM(CMSPlugin):
cmsplugin_ptr = models.OneToOneField(CMSPlugin, related_query_name='+', parent_link=True)
title = models.CharField(max_length=50)
search_fields = ['title']
|
class CustomRelQNmePM(CMSPlugin):
cmsplugin_ptr = models.OneToOneField(CMSPlugin, related_query_name='reldesc_custom_relqn', parent_link=True)
title = models.CharField(max_length=50)
search_fields = ['title']
class CustomRelNmePM(CMSPlugin):
cmsplugin_ptr = models.OneToOneField(CMSPlugin, related_name='reldesc_custom_reln', parent_link=True)
title = models.CharField(max_length=50)
search_fields = ['title']
class CustomRelNmeAndRelQNmePM(CMSPlugin):
cmsplugin_ptr = models.OneToOneField(CMSPlugin, related_name='reldesc_custom_reln2', related_query_name='reldesc_custom_relqn2', parent_link=True)
title = models.CharField(max_length=50)
search_fields = ['title']
|
cxxgtxy/tensorflow
|
tensorflow/python/lib/core/bfloat16_test.py
|
Python
|
apache-2.0
| 17,140
| 0.004959
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test cases for the bfloat16 Python type."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import itertools
import math
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
# pylint: disable=unused-import,g-bad-import-order
from tensorflow.python.framework import dtypes
from tensorflow.python.lib.core import _pywrap_bfloat16
from tensorflow.python.platform import test
bfloat16 = _pywrap_bfloat16.TF_bfloat16_type()
def numpy_assert_allclose(a, b, **kwargs):
a = a.astype(np.float32) if a.dtype == bfloat16 else a
b = b.astype(np.float32) if b.dtype == bfloat16 else b
return np.testing.assert_allclose(a, b, **kwargs)
epsilon = float.fromhex("1.0p-7")
# Values that should round trip exactly to float and back.
FLOAT_VALUES = [
0.0, 1.0, -1, 0.5, -0.5, epsilon, 1.0 + epsilon, 1.0 - epsilon,
-1.0 - epsilon, -1.0 + epsilon, 3.5, 42.0, 255.0, 256.0,
float("inf"),
float("-inf"),
float("nan")
]
class Bfloat16Test(parameterized.TestCase):
"""Tests the non-numpy Python methods of the bfloat16 type."""
def testRoundTripToFloat(self):
for v in FLOAT_VALUES:
np.testing.assert_equal(v, float(bfloat16(v)))
def testRoundTripNumpyTypes(self):
for dtype in [np.float16, np.float32, np.float64]:
np.testing.assert_equal(-3.75, dtype(bfloat16(dtype(-3.75))))
np.testing.assert_equal(1.5, float(bfloat16(dtype(1.5))))
np.testing.assert_equal(4.5, dtype(bfloat16(np.array(4.5, dtype))))
np.testing.assert_equal(
np.array([2, 5, -1], bfloat16), bfloat16(np.array([2, 5, -1], dtype)))
def testRoundTripToInt(self):
for v in [-256, -255, -34, -2, -1, 0, 1, 2, 10, 47, 128, 255, 256, 512]:
self.assertEqual(v, int(bfloat16(v)))
# pylint: disable=g-complex-comprehension
@parameterized.named_parameters(({
"testcase_name": "_" + dtype.__name__,
"dtype": dtype
} for dtype in [bfloat16, np.float16, np.float32, np.float64]))
def testRoundTripToNumpy(self, dtype):
for v in FLOAT_VALUES:
np.testing.assert_equal(v, bfloat16(dtype(v)))
np.testing.assert_equal(v, dtype(bfloat16(dtype(v))))
np.testing.assert_equal(v, dtype(bfloat16(np.array(v, dtype))))
if dtype != bfloat16:
np.testing.assert_equal(
np.array(FLOAT_VALUES, dtype),
bfloat16(np.array(FLOAT_VALUES, dtype)).astype(dtype))
def testStr(self):
self.assertEqual("0", str(bfloat16(0.0)))
self.assertEqual("1", str(bfloat16(1.0)))
self.assertEqual("-3.5", str(bfloat16(-3.5)))
self.assertEqual("0.0078125", str(bfloat16(float.fromhex("1.0p-7"))))
self.assertEqual("inf", str(bfloat16(float("inf"))))
self.assertEqual("-inf", str(bfloat16(float("-inf"))))
self.assertEqual("nan", str(bfloat16(float("nan"))))
def testRepr(self):
self.assertEqual("0", repr(bfloat16(0)))
self.assertEqual("1", repr(bfloat16(1)))
self.assertEqual("-3.5", repr(bfloat16(-3.5)))
self.assertEqual("0.0078125", repr(bfloat16(float.fromhex("1.0p-7"))))
self.assertEqual("inf", repr(bfloat16(float("inf"))))
self.assertEqual("-inf", repr(bfloat16(float("-inf"))))
self.assertEqual("nan", repr(bfloat16(float("nan"))))
def testHash(self):
self.assertEqual(0, hash(bfloat16(0.0)))
self.assertEqual(0x3f80, hash(bfloat16(1.0)))
self.assertEqual(0x7fc0, hash(bfloat16(float("nan"))))
# Tests for Python operations
def testNegate(self):
for v in FLOAT_VALUES:
np.testing.assert_equal(-v, float(-bfloat16(v)))
def testAdd(self):
np.testing.assert_equal(0, float(bfloat16(0) + bfloat16(0)))
np.testing.assert_equal(1, float(bfloat16(1) + bfloat16(0)))
np.testing.assert_equal(0, float(bfloat16(1) + bfloat16(-1)))
np.testing.assert_equal(5.5, float(bfloat16(2) + bfloat16(3.5)))
np.testing.assert_equal(1.25, float(bfloat16(3.5) + bfloat16(-2.25)))
np.testing.assert_equal(
float("inf"), float(bfloat16(float("inf")) + bfloat16(-2.25)))
np.testing.assert_equal(
float("-inf"), float(bfloat16(float("-inf")) + bfloat16(-2.25)))
self.assertTrue(math.isnan(float(bfloat16(3.5) + bfloat16(float("nan")))))
# Test type promotion against Numpy scalar values.
self.assertEqual(np.float32, type(bfloat16(3.5) + np.float16(2.25)))
self.assertEqual(np.float32, type(np.float16(3.5) + bfloat16(2.25)))
self.assertEqual(np.float32, type(bfloat16(3.5) + np.float32(2.25)))
self.assertEqual(np.float32, type(np.float32(3.5) + bfloat16(2.25)))
self.assertEqual(np.float64, type(bfloat16(3.5) + np.float64(2.25)))
self.assertEqual(np.float64, type(np.float64(3.5) + bfloat16(2.25)))
self.assertEqual(np.float64, type(bfloat16(3.5) + float(2.25)))
self.assertEqual(np.float64, type(float(3.5) + bfloat16(2.25)))
self.assertEqual(np.float32,
type(bfloat16(3.5) + np.array(2.25, np.float32)))
self.assertEqual(np.float32,
type(np.array(3.5, np.float32) + bfloat16(2.25)))
def testSub(self):
np.testing.assert_equal(0, float(bfloat16(0) - bfloat16(0)))
np.testing.assert_equal(1, float(bfloat16(1) - bfloat16(0)))
np.testing.assert_equal(2, float(bfloat16(1) - bfloat16(-1)))
np.testing.assert_equal(-1.5, float(bfloat16(2) - bfloat16(3.5)))
np.testing.assert_equal(5.75, float(bfloat16(3.5) - bfloat16(-2.25)))
np.testing.assert_equal(
float("-inf"), float(bfloat16(-2.25) - bfloat16(float("inf"))))
np.testing.assert_equal(
float("inf"), float(bfloat16(-2.25) - bfloat16(float("-inf"))))
self.assertTrue(math.isnan(float(bfloat16(3.5) - bfloat16(float("nan")))))
def testMul(self):
np.testing.assert_equal(0, float(bfloat16(0) * bfloat16(0)))
np.testing.assert_equal(0, float(bfloat16(1) * bfloat16(0)))
np.testing.assert_equal(-1, float(bfloat16(1) * bfloat16(-1)))
np.testing.assert_equal(-7.875, float(bfloat16(3.5) * bfloat16(-2.25)))
np.testing.assert_equal(
float("-inf"), float(bfloat16(float("inf")) * bfloat16(-2.25)))
np.testing.assert_equal(
float("inf"), float(bfloat16(float("-inf")) * bfloat16(-2.25)))
self.assertTrue(math.isnan(float(bfloat16(3.5) * bfloat16(float("nan")))))
def testDiv(self):
self.assertTrue(math.isnan(float(bfloat16(0) / bfloat16(0))))
np.testing.assert_equal(float("inf"), float(bfloat16(1) / bfloat16(0)))
np.testing.assert_equal(-1, float(bfloat16(1) / bfloat16(-1)))
np.testing.assert_equal(-1.75, float(bfloat16(3.5) / bfloat16(-2)))
np.testing.assert_equal(
float("-inf"), float(bfloat16(float("inf")) / bfloat16(-2.25)))
np.testing.assert_equal(
float("inf"), float(bfloat16(float("-inf")) / bfloat16(-2.25)))
self.assertTrue(math.isnan(float(bfloat16(3.5) / bfloat16(float("nan")))))
def testLess(self):
for v in FLOAT_VALUES:
for w in FLOAT_VALUES:
self.assertEqual(v < w, bfloat16(v) < bfloat16(w))
def testLessEqual(self):
for v in FLOAT_VALUES:
for w in FLOAT_VALUES:
self.assertEqual(v <= w, bfloat16(v) <= bfloat16(w))
def testGreater(self):
for v in FLOAT_VALUES:
for w in FLOAT_VALUES:
|
self.assertEqual(v > w, bfloat16(v) > bfloat16(w))
def testGreaterEqual(self):
for v in FLO
|
AT_VALUES:
for w in FLOAT_VALUES:
self.assertEqual(v >= w, bfloat16(v) >= bfloat16(w))
def
|
batpad/osmcha-django
|
osmchadjango/changeset/migrations/0002_auto_20150804_0119.py
|
Python
|
gpl-3.0
| 448
| 0.002232
|
# -*
|
- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('changeset', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='changeset',
name='reasons',
field=models.ManyToManyField(related_name='changesets', to='changeset.SuspicionR
|
easons'),
),
]
|
28ideas/quant-econ
|
quantecon/robustlq.py
|
Python
|
bsd-3-clause
| 8,434
| 0.002846
|
"""
Filename: robustlq.py
Authors: Chase Coleman, Spencer Lyon, Thomas Sargent, John Stachurski
Solves robust LQ control problems.
"""
from __future__ import division # Remove for Python 3.sx
import numpy as np
from lqcontrol import LQ
from quadsums import var_quadratic_sum
from numpy import dot, log, sqrt, identity, hstack, vstack, trace
from scipy.linalg import solve, inv, det, solve_discrete_lyapunov
class RBLQ:
"""
Provides methods for analysing infinite horizon robust LQ control
problems of the form
min_{u_t} sum_t beta^t {x_t' R x_t + u'_t Q u_t }
subject to
x_{t+1} = A x_t + B u_t + C w_{t+1}
and with model misspecification parameter theta.
"""
def __init__(self, Q, R, A, B, C, beta, theta):
"""
Sets up the robust control problem.
Parameters
==========
Q, R : array_like, dtype = float
The matrices R and Q from the objective function
A, B, C : array_like, dtype = float
The matrices A, B, and C from the state space system
beta, theta : scalar, float
The discount and robustness factors in the robust control problem
We assume that
* R is n x n, symmetric and nonnegative definite
* Q is k x k, symmetric and positive definite
* A is n x n
* B is n x k
* C is n x j
"""
# == Make sure all matrices can be treated as 2D arrays == #
A, B, C, Q, R = map(np.atleast_2d, (A, B, C, Q, R))
self.A, self.B, self.C, self.Q, self.R = A, B, C, Q, R
# == Record dimensions == #
self.k = self.Q.shape[0]
self.n = self.R.shape[0]
self.j = self.C.shape[1]
# == Remaining parameters == #
self.beta, self.theta = beta, theta
def d_operator(self, P):
"""
The D operator, mapping P into
D(P) := P + PC(theta I - C'PC)^{-1} C'P.
Parameters
==========
P : array_like
A self.n x self.n array
"""
C, theta = self.C, self.theta
I = np.identity(self.j)
S1 = dot(P, C)
S2 = dot(C.T, S1)
return P + dot(S1, solve(theta * I - S2, S1.T))
def b_operator(self, P):
"""
The B operator, mapping P into
B(P) := R - beta^2 A'PB (Q + beta B'PB)^{-1} B'PA + beta A'PA
and also returning
F := (Q + beta B'PB)^{-1} beta B'PA
Parameters
==========
P : array_like
An self.n x self.n array
"""
A, B, Q, R, beta = self.A, self.B, self.Q, self.R, self.beta
S1 = Q + beta * dot(B.T, dot(P, B))
S2 = beta * dot(B.T, dot(P, A))
S3 = beta * dot(A.T, dot(P, A))
F = solve(S1, S2)
new_P = R - dot(S2.T, solve(S1, S2)) + S3
return F, new_P
def robust_rule(self):
"""
This method solves the robust control problem by tricking it into a
stacked LQ problem, as described in chapter 2 of Hansen-Sargent's
text "Robustness." The optimal control with observed state is
u_t = - F x_t
And the value function is -x'Px
Returns
=======
F : array_like, dtype = float
The optimal control matrix from above above
P : array_like, dtype = float
The psoitive semi-definite matrix defining the value function
K : array_like, dtype = float
the worst-case shock matrix K, where :math:`w_{t+1} = K x_t` is
the worst case shock
"""
# == Simplify names == #
A, B, C, Q, R = self.A, self.B, self.C, self.Q, self.R
beta, theta = self.beta, self.theta
k, j = self.k, self.j
# == Set up LQ version == #
I = identity(j)
Z = np.zeros((k, j))
Ba = hstack([B, C])
Qa = vstack([hstack([Q, Z]), hstack([Z.T, -beta*I*theta])])
lq = LQ(Qa, R, A, Ba, beta=beta)
# == Solve and convert back to robust problem == #
P, f, d = lq.stationary_values()
F = f[:k, :]
K = -f[k:f.shape[0], :]
return F, K, P
def robust_rule_simple(self, P_init=None, max_iter=80, tol=1e-8):
"""
A simple algorithm for computing the robust policy F and the
corresponding value function P, based around straightforward
iteration with the robust Bellman operator. This function is easier
to understand but one or two orders of magnitude slower than
self.robust_rule(). For more information see the docstring of that
method.
"""
# == Simplify names == #
A, B, C, Q, R = self.A, self.B, self.C, self.Q, self.R
beta, theta = self.beta, self.theta
# == Set up loop == #
P = np.zeros((self.n, self.n)) if not P_init else P_init
iterate, e = 0, tol + 1
while iterate < max_iter and e > tol:
F, new_P = self.b_operator(self.d_operator(P))
e = np.sqrt(np.sum((new_P - P)**2))
iterate += 1
P = new_P
I = np.identity(self.j)
S1 = P.dot(C)
S2 = C.T.dot(S1)
K = inv(theta * I - S2).dot(S1.T).dot(A - B.dot(F))
return F, K, P
def F_to_K(self, F):
"""
Compute agent 2's best cost-minimizing response K, given F.
Parameters
==========
|
F : array_like
A self.k x self.n array
Retu
|
rns
=======
K : array_like, dtype = float
P : array_like, dtype = float
"""
Q2 = self.beta * self.theta
R2 = - self.R - dot(F.T, dot(self.Q, F))
A2 = self.A - dot(self.B, F)
B2 = self.C
lq = LQ(Q2, R2, A2, B2, beta=self.beta)
P, neg_K, d = lq.stationary_values()
return - neg_K, P
def K_to_F(self, K):
"""
Compute agent 1's best value-maximizing response F, given K.
Parameters
==========
K : array_like
A self.j x self.n array
Returns
=======
F : array_like, dtype = float
P : array_like, dtype = float
"""
A1 = self.A + dot(self.C, K)
B1 = self.B
Q1 = self.Q
R1 = self.R - self.beta * self.theta * dot(K.T, K)
lq = LQ(Q1, R1, A1, B1, beta=self.beta)
P, F, d = lq.stationary_values()
return F, P
def compute_deterministic_entropy(self, F, K, x0):
"""
Given K and F, compute the value of deterministic entropy, which is
sum_t beta^t x_t' K'K x_t with x_{t+1} = (A - BF + CK) x_t.
"""
H0 = dot(K.T, K)
C0 = np.zeros((self.n, 1))
A0 = self.A - dot(self.B, F) + dot(self.C, K)
e = var_quadratic_sum(A0, C0, H0, self.beta, x0)
return e
def evaluate_F(self, F):
"""
Given a fixed policy F, with the interpretation u = -F x, this
function computes the matrix P_F and constant d_F associated with
discounted cost J_F(x) = x' P_F x + d_F.
Parameters
==========
F : array_like
A self.k x self.n array
Returns
=======
P_F : array_like, dtype = float
Matrix for discounted cost
d_F : scalar
Constant for discounted cost
K_F : array_like, dtype = float
Worst case policy
O_F : array_like, dtype = float
Matrix for discounted entropy
o_F : scalar
Constant for discounted entropy
"""
# == Simplify names == #
Q, R, A, B, C = self.Q, self.R, self.A, self.B, self.C
beta, theta = self.beta, self.theta
# == Solve for policies and costs using agent 2's problem == #
K_F, neg_P_F = self.F_to_K(F)
P_F = - neg_P_F
I = np.identity(self.j)
H = inv(I - C.T.dot(P_F.dot(C)) / theta)
d_F = log(det(H))
# == Compute O_F and o_F == #
sig = -1.0 / theta
AO = sqrt(beta) * (A - dot(B, F) +
|
xiandiancloud/edxplaltfom-xusong
|
lms/djangoapps/instructor/views/legacy.py
|
Python
|
agpl-3.0
| 82,769
| 0.003383
|
"""
Instructor Views
"""
## NOTE: This is the code for the legacy instructor dashboard
## We are no longer supporting this file or accepting changes into it.
from contextlib import contextmanager
import csv
import json
import logging
import os
import re
import requests
from collections import defaultdict, OrderedDict
from markupsafe import escape
from requests.status_codes import codes
from StringIO import StringIO
from django.conf import settings
from django.contrib.auth.models import User
from django.http import HttpResponse
from django_future.csrf import ensure_csrf_cookie
from django.views.decorators.cache import cache_control
from django.core.urlresolvers import reverse
from django.core.mail import send_mail
from django.utils import timezone
from xmodule_modifiers import wrap_xblock, request_token
import xmodule.graders as xmgraders
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.django import modulestore
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from xmodule.modulestore.exceptions import ItemNotFoundError
from xmodule.html_module import HtmlDescriptor
from opaque_keys import InvalidKeyError
from lms.lib.xblock.runtime import quote_slashes
from submissions import api as sub_api # installed from the edx-submissions repository
from bulk_email.models import CourseEmail, CourseAuthorization
from courseware import grades
from courseware.access import has_access
from courseware.courses import get_course_with_access, get_cms_course_link
from student.roles import (
CourseStaffRole, CourseInstructorRole, CourseBetaTesterRole, GlobalStaff
)
from courseware.models import StudentModule
from django_comment_common.models import (
Role, FORUM_ROLE_ADMINISTRATOR, FORUM_ROLE_MODERATOR, FORUM_ROLE_COMMUNITY_TA
)
from django_comment_client.utils import has_forum_access
from instructor.offline_gradecalc import student_grades, offline_grades_available
from instructor.views.tools import strip_if_string, bulk_email_is_enabled_for_course
from instructor_task.api import (
get_running_instructor_tasks,
get_instructor_task_history,
submit_rescore_problem_for_all_students,
submit_rescore_problem_for_student,
submit_reset_problem_attempts_for_all_students,
submit_bulk_course_email
)
from instructor_task.views import get_task_completion_info
from edxmako.shortcuts import render_to_response, render_to_string
from class_dashboard import dashboard_data
from psychometrics import psychoanalyze
from student.models import (
CourseEnrollment,
CourseEnrollmentAllowed,
unique_id_for_user,
anonymous_id_for_user
)
import track.views
from xblock.field_data import DictFieldData
from xblock.fields import ScopeIds
from django.utils.translation import ugettext as _
from microsite_configuration import microsite
from opaque_keys.edx.locations import i4xEncoder
log = logging.getLogger(__name__)
# internal commands for managing forum roles:
FORUM_ROLE_ADD = 'add'
FORUM_ROLE_REMOVE = 'remove'
# For determining if a shibboleth course
SHIBBOLETH_DOMAIN_PREFIX = 'shib:'
def split_by_comma_and_whitespace(a_str):
"""
Return string a_str, split by , or whitespace
"""
return re.split(r'[\s,]', a_str)
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
def instructor_dashboard(request, course_id):
"""Display the instructor dashboard for a course."""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
course = get_course_with_access(request.user, 'staff', course_key, depth=None)
instructor_access = has_access(request.user, 'instructor', course) # an instructor can manage staff lists
forum_admin_access = has_forum_access(request.user, course_key, FORUM_ROLE_ADMINISTRATOR)
msg = ''
email_msg = ''
email_to_option = None
email_subject = None
html_message = ''
show_email_tab = False
problems = []
plots = []
datatable = {}
# the instructor dashboard page is modal: grades, psychometrics, admin
# keep that state in request.session (defaults to grades mode)
idash_mode = request.POST.get('idash_mode', '')
idash_mode_key = u'idash_mode:{0}'.format(course_id)
if idash_mode:
request.session[idash_mode_key] = idash_mode
else:
idash_mode = request.session.get(idash_mode_key, 'Grades')
enrollment_number = CourseEnrollment.num_enrolled_in(course_key)
# assemble some course statistics for output to instructor
def get_course_stats_table():
datatable = {
'header': ['Statistic', 'Value'],
'title': _('Course Statistics At A Glance'),
}
data = [['# Enrolled', enrollment_number]]
data += [['Date', timezone.now().isoformat()]]
data += compute_course_stats(course).items()
if request.user.is_staff:
for field in course.fields.values():
if getattr(field.scope, 'user', False):
continue
data.append([
field.name,
json.dumps(field.read_json(course), cls=i4xEncoder)
])
datatable['data'] = data
return datatable
def return_csv(func, datatable, file_pointer=None):
"""Outputs a CSV file from the contents of a datatable."""
if file_pointer is None:
response = HttpResponse(mimetype='text/csv')
response['Content-Disposition'] = (u'attachment; filename={0}'.format(func)).encode('utf-8')
else:
response = file_pointer
writer = csv.writer(response, dialect='excel', quotechar='"', quoting=csv.QUOTE_ALL)
encoded_row = [unicode(s).encode('utf-8') for s in datatable['header']]
writer.writerow(encoded_row)
for datarow in datatable['data']:
# 's' here may be an integer, float (eg score) or string (eg student name)
encoded_row = [
# If s is already a UTF-8 string, trying to make a unicode
# object out of it will fail unless we pass in an encoding to
# the constructor. But we can't do that across the board,
# because s is often a numeric type. So just do this.
s if isinstance(s, str) else unicode(s).encode('utf-8')
for s in datarow
]
writer.writerow(encoded_row)
return response
def get_student_from_identifier(unique_student_identifier):
"""Gets a student object using either an email address or username"""
|
unique_student_identifier = strip_if_string(unique_student_identifier)
msg = ""
try:
if "@" in unique_student_identifier:
student = User.objects.get(email=unique_student_identifier)
|
else:
student = User.objects.get(username=unique_student_identifier)
msg += _("Found a single student. ")
except User.DoesNotExist:
student = None
msg += "<font color='red'>{text}</font>".format(
text=_("Couldn't find student with that email or username.")
)
return msg, student
# process actions from form POST
action = request.POST.get('action', '')
use_offline = request.POST.get('use_offline_grades', False)
if settings.FEATURES['ENABLE_MANUAL_GIT_RELOAD']:
if 'GIT pull' in action:
data_dir = course.data_dir
log.debug('git pull {0}'.format(data_dir))
gdir = settings.DATA_DIR / data_dir
if not os.path.exists(gdir):
msg += "====> ERROR in gitreload - no such directory {0}".format(gdir)
else:
cmd = "cd {0}; git reset --hard HEAD; git clean -f -d; git pull origin; chmod g+w course.xml".format(gdir)
msg += "git pull on {0}:<p>".format(data_dir)
msg += "<pre>{0}</pre></p>".format(escape(os.popen(cmd).read()))
track.views.server_track(request, "git-pull", {"directory": data_dir}, page="idashboard")
if 'Reload course' in action:
log.debug('reloading {0} ({1})'.format(c
|
jorsea/vertical-ngo
|
logistic_order_requisition_donation/__openerp__.py
|
Python
|
agpl-3.0
| 1,324
| 0
|
# -*- coding: utf-8 -*-
#
#
# Copyright 2014 Camptocamp SA
# Au
|
thor: Yannick Vaucher
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PART
|
ICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
{"name": "Link 'Logistics Order - Donation' and 'Logistics Requisition'",
"summary": "Adapt views and fields",
"version": "0.1",
"author": "Camptocamp,Odoo Community Association (OCA)",
"license": "AGPL-3",
"category": "Purchase Management",
'complexity': "normal",
"images": [],
"website": "http://www.camptocamp.com",
"depends": ["logistic_order_donation",
"logistic_requisition",
],
"demo": [],
"data": ['view/sale_order.xml',
],
"test": [],
'installable': True,
"auto_install": True,
}
|
dios-game/dios-cocos
|
src/oslibs/cocos/cocos-src/tools/cocos2d-console/plugins/framework/framework_update.py
|
Python
|
mit
| 934
| 0.001071
|
import cocos
from MultiLanguage import MultiLanguage
fro
|
m package.helper import ProjectHelper
class FrameworkUpdate(cocos.CCPlugin):
@staticmethod
def plugin_name():
return "update-framework"
@staticmethod
def brief_des
|
cription():
return MultiLanguage.get_string('FRAMEWORK_UPDATE_BRIEF')
# parse arguments
def parse_args(self, argv):
from argparse import ArgumentParser
parser = ArgumentParser(prog="cocos %s" % self.__class__.plugin_name(),
description=self.__class__.brief_description())
parser.add_argument("name", metavar="NAME", help=MultiLanguage.get_string('FRAMEWORK_UPDATE_ARG_NAME'))
return parser.parse_args(argv)
def run(self, argv):
args = self.parse_args(argv)
name = args.name
project = ProjectHelper.get_current_project()
ProjectHelper.update_framework(project, name)
|
mclois/iteexe
|
nevow/appserver.py
|
Python
|
gpl-2.0
| 13,064
| 0.002526
|
# -*- test-case-name: nevow.test.test_appserver -*-
# Copyright (c) 2004 Divmod.
# See LICENSE for details.
"""A web application server built using twisted.web
"""
import cgi
from copy import copy
from urllib import unquote
from types import StringType
import warnings
from twisted.web import server
from twisted.web import resource
from twisted.web import util as webutil
from twisted.protocols import http
from twisted.python import log
from twisted.python import failure
from twisted.internet import defer
from twisted.application import service
from nevow import compy
from nevow import context
from nevow import inevow
from nevow import events
from nevow import url
from nevow import util
from nevow import flat
from nevow import stan
class UninformativeExceptionHandler:
__implements__ = inevow.ICanHandleException
def renderHTTP_exception(self, ctx, reason):
request = inevow.IRequest(ctx)
log.err(reason)
request.write("<html><head><title>Internal Server Error</title></head>")
request.write("<body><h1>Internal Server Error</h1>An error occurred rendering the requested page. To see a more detailed error message, enable tracebacks in the configuration.</body></html>")
server.Request.finish(request)
def renderInlineException(self, request, reason):
log.err(reason)
return """<div style="border: 1px dashed red; color: red; clear: both">[[ERROR]]</div>"""
class DefaultExceptionHandler:
__implements__ = inevow.ICanHandleException
def renderHTTP_exception(self, ctx, reason):
request = inevow.IRequest(ctx)
request.setResponseCode(http.INTERNAL_SERVER_ERROR)
request.write("<html><head><title>Exception</title></head><body>")
from nevow import failure
result = failure.formatFailure(reason)
request.write(''.join(flat.flatten(result)))
request.write("</body></html>")
server.Request.finish(request)
def renderInlineException(self, context, reason):
from nevow import failure
formatted = failure.formatFailure(reason)
desc = str(e)
return flat.serialize([
stan.xml("""<div style="border: 1px dashed red; color: red; clear: both" onclick="this.childNodes[1].style.display = this.childNodes[1].style.display == 'none' ? 'block': 'none'">"""),
desc,
stan.xml('<div style="display: none">'),
formatted,
stan.xml('</div></div>')
], context)
errorMarker = object()
def processingFailed(reason, request, ctx):
try:
handler = inevow.ICanHandleException(ctx)
handler.renderHTTP_exception(ctx, reason)
except Exception, e:
request.setResponseCode(http.INTERNAL_SERVER_ERROR)
log.msg("Exception rendering error page:", isErr=1)
log.err(e)
log.err("Original exception:", isErr=1)
log.err(reason)
request.write("<html><head><title>Internal Server Error</title></head>")
request.write("<body><h1>Internal Server Error</h1>An error occurred rendering the requested page. Additionally, an error occured rendering the error page.</body></html>")
server.Request.finish(request)
return errorMarker
def defaultExceptionHandlerFactory(ctx):
return DefaultExceptionHandler()
class NevowRequest(compy.Componentized, server.Request):
"""A Request subclass which does additional
processing if a form was POSTed. When a form is POSTed,
we create a cgi.FieldStorage instance using the data posted,
and set it as the request.fields attribute. This way, we can
get at information about filenames and mime-types of
files that were posted.
TODO: cgi.FieldStorage blocks while decoding the MIME.
Rewrite it to do the work in chunks, yielding from time to
time.
"""
__implements__ = inevow.IRequest,
def __init__(self, *args, **kw):
server.Request.__init__(self, *args, **kw)
compy.Componentized.__init__(self)
def process(self):
# extra request parsing
if self.method == 'POST':
t = self.content.tell()
self.content.seek(0)
self.fields = cgi.FieldStorage(self.content, self.received_headers,
environ={'REQUEST_METHOD': 'POST'})
self.content.seek(t)
# get site from channel
self.site = self.channel.site
# set various default headers
self.setHeader('server', server.version)
self.setHeader('date', server.http.datetimeToString())
self.setHeader('content-type', "text/html; charset=UTF-8")
# Resource Identification
self.prepath = []
self.postpath = map(unquote, self.path[1:].split('/'))
self.sitepath = []
self.deferred = defer.Deferred()
requestContext = context.RequestContext(parent=self.site.context, tag=self)
requestContext.remember( (), inevow.ICurrentSegments)
requestContext.remember(tuple(self.postpath), inevow.IRemainingSegments)
return self.site.getPageContextForRequestContext(
requestContext
).addErrback(
processingFailed, self, requestContext
).addCallback(
self.gotPageContext
)
def gotPageContext(self, pageContext):
if pageContext is not errorMarker:
return defer.maybeDeferred(
pageContext.tag.renderHTTP, pageContext
).addErrback(
processingFailed, self, pageContext
).addCallback(
self._cbFinishRender, pageContext
)
def finish(self):
self.deferred.callback("")
def _cbFinishRender(self, html, ctx):
if isinstance(html, str):
self.write(html)
server.Request.finish(self)
elif html is errorMarker:
## Error webpage has already been rendered and finish called
pass
else:
res = inevow.IResource(html, None)
if res is not None:
pageContext = context.PageContext(tag=res, parent=ctx)
return self.gotPageContext(pageContext)
else:
print "html is not a string: %s on %s" % (str(html), ctx.tag)
server.Request.finish(self)
return html
session = None
def getSession(self, sessionInterface=None):
if self.session is not None:
self.session.touch()
if sessionInterface:
return sessionInterface(self.session)
return self.session
## temporary until things settle down wi
|
th the new sessions
return server.Request.getSession(self, sessionInterface)
def URLPath(self):
return url.URL.fromContext(self)
def rememberRootURL(self, url=None):
"""
Remember the currently-processed part of the URL for later
recalling.
"""
if url is None:
|
return server.Request.rememberRootURL(self)
else:
self.appRootURL = url
def sessionFactory(ctx):
"""Given a RequestContext instance with a Request as .tag, return a session
"""
return ctx.tag.getSession()
requestFactory = lambda ctx: ctx.tag
class NevowSite(server.Site):
requestFactory = NevowRequest
def __init__(self, *args, **kwargs):
server.Site.__init__(self, *args, **kwargs)
self.context = context.SiteContext()
def remember(self, obj, inter=None):
"""Remember the given object for the given interfaces (or all interfaces
obj implements) in the site's context.
The site context is the parent of all other contexts. Anything
remembered here will be available throughout the site.
"""
self.context.remember(obj, inter)
def getPageContextForRequestContext(self, ctx):
"""Retrieve a resource from this site for a particular request. The
resource will be wrapped in a PageContext which keeps track
of how the resource was located.
"""
path = inevow.IRemainingSegments(ctx)
|
BjoernSch/WLANThermo_v2
|
software/usr/sbin/wlt_2_nextion.py
|
Python
|
gpl-3.0
| 53,246
| 0.006524
|
#!/usr/bin/python
# coding=utf-8
# Copyright (c) 2015, 2016 Björn Schrader
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
import ConfigParser
import os
import time
import logging
import pyinotify
import serial
import subprocess
import threading
import re
import string
import signal
import Queue
from struct import *
NX_lf = '\xff\xff\xff'
NX_channel = 0
NX_page = 0
version = '0.18'
temps = dict()
channels = dict()
pitmaster = dict()
pitconf = dict()
# Events werden vom Display asynchron gesendet
NX_eventq = Queue.Queue()
# Returns werden auf Anforderung zurückgegeben
NX_returnq = Queue.Queue()
# Lock für das schreiben in die Konfig
configfile_lock = threading.Lock()
# Neue Temperaturen
temps_event = threading.Event()
# Neue Kanalkonfiguration (= geändertes Konfigfile)
channels_event = threading.Event()
# Neue Pitmasterevents
pitmaster_event = threading.Event()
# Neue Pitmasterkonfiguration (= geändertes Konfigfile)
pitconf_event = threading.Event()
# Event für ein Aufwachen aus dem Sleep-Mode (= geändertes Konfigfile)
NX_wake_event = threading.Event()
# Stop des Prozesses wurde angefordert
stop_event = threading.Event()
# Konfigurationsdatei einlesen
configdefaults = {'dim' : '90',
'timeout': '30',
'serialdevice': '/dev/ttyAMA0',
'serialspeed': '115200'}
configfile = '/var/www/conf/WLANThermo.conf'
Config = ConfigParser.SafeConfigParser(configdefaults)
# Wir laufen als root, auch andere müssen die Config schreiben!
os.umask (0)
for i in range(0,5):
while True:
try:
Config.read(configfile)
except IndexError:
# Auf Event warten geht hier noch nicht, da wir die anderen Pfade aus der Config brauchen
# Logging geht auch noch nicht, da wir das Logfile brauchen, als an StdErr
sys.stderr.write('Warte auf Konfigurationsdatei')
time.sleep(1)
continue
break
# Logging initialisieren
LOGFILE = Config.get('daemon_logging', 'log_file')
logger = logging.getLogger('WLANthermoNEXTION')
#Define Logging Level by changing >logger.setLevel(logging.LEVEL_YOU_WANT)< available: DEBUG, INFO, WARNING, ERROR, CRITICAL
log_level = Config.get('daemon_logging', 'level_DISPLAY')
if log_level == 'DEBUG':
logger.setLevel(logging.DEBUG)
if log_level == 'INFO':
logger.setLevel(logging.INFO)
if log_level == 'ERROR':
logger.setLevel(logging.ERROR)
if log_level == 'WARNING':
logger.setLevel(logging.WARNING)
if log_level == 'CRITICAL':
logger.setLevel(logging.CRITICAL)
handler = logging.FileHandler(LOGFILE)
handler.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
logging.captureWarnings(True)
# Pfad fuer die Übergabedateien auslesen
curPath, curFile = os.path.split(Config.get('filepath','current_temp'))
pitPath, pitFile = os.path.split(Config.get('filepath','pitmaster'))
confPath, confFile = os.path.split(configfile)
# Wenn das display Verzeichniss im Ram Drive nicht exisitiert erstelle es
if not os.path.exists(curPath):
os.makedirs(curPath)
class FileEvent(pyinotify.ProcessEvent):
def process_IN_CLOSE_WRITE(self, event):
global temps, channels, pitmaster, pitconf, Config, configfile
global temps_event, channels_event, pitmaster_event, pitconf_event, logger
logger.debug("IN_CLOSE_WRITE: %s " % os.path.join(event.path, event.name))
if event.path == curPath and event.name == curFile:
logger.debug('Neue Temperaturwerte vorhanden')
temps_event.set()
elif event.path == confPath and event.name == confFile:
logger.debug('Neue Konfiguration vorhanden')
channels_event.set()
pitconf_event.set()
elif event.path == pitPath and event.name == pitFile:
logger.debug('Neue Pitmasterdaten vorhanden')
pitmaster_event.set()
def process_IN_MOVED_TO(self, event):
global temps, channels, pitmaster, pitconf, Config, configfile
global temps_event, channels_event, pitmaster_event, pitconf_event, logger
logger.debug("IN_MOVED_TO: %s " % os.path.join(event.path, event.name))
if event.path == curPath and event.name == curFile:
logger.debug('Neue Temperaturwerte vorhanden')
temps_event.set()
elif event.path == confPath and event.name == confFile:
logger.debug('Neue Konfiguration vorhanden')
channels_event.set()
pitconf_event.set()
elif event.path == pitPath and event.name == pitFile:
logger.debug('Neue Pitmasterdaten vorhanden')
pitmaster_event.set()
def NX_reader():
global logger, ser, NX_returns, NX_events, stop_event, NX_wake_event
logger.info('Reader-Thread gestartet')
# Timeout setzen, damit der Thread gestoppt werden kann
ser.timeout = 0.1
# Dauerschleife, bricht ab wenn ein stop_event vorlieg
while not stop_event.is_set():
is_return = False
endcount = 0
bytecount = 0
message = {'raw' : '', 'iserr' : False, 'errmsg' : '', 'data' : {}, 'type': ''}
while (endcount != 3):
byte = ser.read()
if byte != '':
# Kein Timeout
bytecount += 1
message['raw'] += byte[0]
if (byte[0] == '\xff'):
endcount += 1
else:
endcount = 0
else:
# Timeout, sollen wir stoppen?
if stop_event.is_set():
break
if stop_event.is_set():
break
elif (message['raw'][0] == '\
|
x00'):
message['type'] = 'inv_instr'
message['iserr'] = True
message['errmsg'] = 'Invalid instruction'
is_return = True
elif (message['raw'][0] == '\x01'):
message['type'] = 'ok'
message['errmsg'] = 'Successful execution of instruction'
is_return = True
elif (message['raw'][0] ==
|
'\x03'):
message['type'] = 'inv_pageid'
message['iserr'] = True
message['errmsg'] = 'Page ID invalid'
is_return = True
elif (message['raw'][0] == '\x04'):
message['type'] = 'inv_pictid'
message['iserr'] = True
message['errmsg'] = 'Picture ID invalid'
is_return = True
elif (message['raw'][0] == '\x05'):
message['type'] = 'inv_fontid'
message['iserr'] = True
message['errmsg'] = 'Font ID invalid'
is_return = True
elif (message['raw'][0] == '\x11'):
message['type'] = 'inv_baudrate'
message['iserr'] = True
message['errmsg'] = 'Baud rate setting invalid'
is_return = True
elif (message['raw'][0] == '\x12'):
message['type'] = 'inv_curve'
message['iserr'] = True
message['errmsg'] = 'Curve control ID number or channel number is invalid'
is_return = True
elif (message['raw'][0] == '\x1a'):
message['type'] = 'inv_varname'
message['iserr'] = True
message['errmsg'] = 'Variable name invalid '
is_return = True
elif (message['raw'][0] == '\x1B'):
message['type'] = 'inv_varop'
message['iserr'] = True
message['errmsg'] = 'Variable operation invalid'
is_return = True
elif (message['raw'][0] == '\x
|
reddymeghraj/showroom
|
erpnext/buying/doctype/item_info/test_item_info.py
|
Python
|
agpl-3.0
| 300
| 0.01
|
#
|
-*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
# test_records = frappe.get_test_records('Item Info')
class TestItemInfo(
|
unittest.TestCase):
pass
|
GPContributors/PyGP
|
pygp/loadfile.py
|
Python
|
lgpl-3.0
| 19,068
| 0.007971
|
import zipfile
import os
import array
import collections
from struct import unpack
from ctypes import c_ubyte
import pygp.utils as utils
import pygp.constants as constants
import pygp.crypto as crypto
class Loadfile(object):
'''
This class builds a LoadFile object from a disk-based file.
and provides basic getters.
'''
def __init__(self, filename):
'''
Builds a Loadfile Object with the filename as parameter
:param str filename : An absolute or relative path to a load File
'''
self.is_capfile = False
# dict of all components of a cap file
self.components = collections.OrderedDict()
# check if the filename is a relative path
if os.path.isabs(filename) == False:
# make it absolute starting from the current working directory
retval = os.path.join(os.curdir,filename)
self.loadfile_path = os.path.abspath(retval)
else:
# already absolute path, do nothing
self.loadfile_path = filename
# # analyze the loadfile
if zipfile.is_zipfile(self.loadfile_path):
# it is a CAP file
self.is_capfile = self.read_cap_format()
else:
# it is a ijc file
pass
self.is_capfile = self.read_ijc_format()
# check if it is a cap file
if self.is_capfile == False:
raise BaseException("The file %s is not a valid CAP File" %self.loadfile_path)
def get_load_blocks(self, blockSize, addHeader = True):
''' return a list of blockSize long data blocks '''
allBlocks = []
# build the complete datablock string
if addHeader== True:
completeCode = self.__createHeaderSize__() + self.get_raw_code()
else:
completeCode = self.get_raw_code()
# cut the string into blocksize long blocks
for index in range(0, len(completeCode), blockSize*2):
allBlocks.append(completeCode[index:index + blockSize*2] )
return allBlocks
def __createHeaderSize__(self):
''' Returns the BERTLV string representing the CAP File length'''
headerSize = 'C4'
length = int(float(self.get_code_size()))
if length < 128:
return headerSize + utils.intToHexString(length)
elif (length < 256 ):
return headerSize + '81' + utils.intToHexString(length)
else:
return headerSize + '82' + utils.intToHexString(length, 2)
def get_raw_code(self):
''' Returns the raw code of the load file as string (ie. All components excluding Descriptor and Debug Component)'''
rawCodeAsString = ''
#for component_name in self.components:
# if component_name != 'Descriptor' or component_name != 'Debug':
# rawCodeAsString = rawCodeAsString + self.components[component_name]
# Sometimes suncap file does not have correct component order and it will return 6985,
# so generate raw string one by one to prevent it.
if "Header" in self.components.keys():
rawCodeAsString += self.components["Header"]
if "Directory" in self.components.keys():
rawCodeAsString += self.components["Directory"]
if "Import" in self.components.keys():
rawCodeAsString += self.components["Import"]
if "Applet" in self.components.keys():
rawCodeAsString += self.components["Applet"]
if "Class" in self.components.keys():
rawCodeAsString += self.components["Class"]
if "Method" in self.components.keys():
rawCodeAsString += self.components["Method"]
if "Staticfield" in self.components.keys():
rawCodeAsString += self.component
|
s["Staticfield"]
if "Export" in self.components.keys():
rawCodeAsString += self.components["Export"]
if "ConstantPool" in self.components.keys():
rawCodeAsString += self.components["ConstantPool"]
if "RefLocation" in self.components.keys():
rawCodeAsString += self.components["RefLocation"]
# Add this even optional component
if "Descriptor" in self.components.keys():
|
rawCodeAsString += self.components["Descriptor"]
return rawCodeAsString
def get_code_size(self):
''' returns the code size of the package '''
size = int(len(self.get_raw_code())/2)
return size
def get_estimate_size(self):
''' returns the estimate size of the package on card '''
size = 0x00
if "Applet" in self.components.keys():
applet_component_str = self.components["Applet"]
applet_comp_size = utils.getBytes(applet_component_str,2, 2)
size = size + int(applet_comp_size, 16)
size = size + int(len(self.get_aid())/2)
# class component
class_comp_size = utils.getBytes(self.components["Class"],2, 2)
size = size + int(class_comp_size, 16)
# method component
method_comp_size = utils.getBytes(self.components["Method"],2, 2)
size = size + int(method_comp_size, 16)
if "Export" in self.components.keys():
export_component_str = self.components["Applet"]
export_comp_size = utils.getBytes(export_component_str,2, 2)
size = size + int(export_comp_size, 16)
# static component
static_comp_size = utils.getBytes(self.components["Staticfield"],2, 2)
size = size + int(static_comp_size, 16)
return size
def get_name(self):
''' returns the load file name '''
return os.path.basename(self.loadfile_path)
def get_int_support(self):
''' returns True if the load file use integer '''
# look into the header component
header_component_str = self.components["Header"]
flag = utils.getBytes(header_component_str,10)
return (int(flag, 16) & 0x01) == 0x01
def get_jc_version(self):
''' returns the load file javacard version '''
# look into the header component
header_component_str = self.components["Header"]
# the minor and major version are byte 8 and 9 of the header component
minor_version = utils.getBytes(header_component_str,8)
major_version = utils.getBytes(header_component_str,9)
return major_version + "." + minor_version
def get_aid(self):
''' returns the load file AID '''
# look into the header component
header_component_str = self.components["Header"]
# the package AIDlength is byte 13 of the header component
aid_len = utils.getBytes(header_component_str,13)
aid = utils.getBytes(header_component_str,14, int(aid_len, 16))
return aid
def isAppletPresent(self):
''' Returns True if an application is present into load file '''
return "Applet" in self.components.keys()
def get_applet_aid(self):
''' returns the aid of tha application if any '''
# look into the applet component
applets_aid = []
if "Applet" in self.components.keys():
applet_component_str = self.components["Applet"]
applet_count = utils.getBytes(applet_component_str,4)
offset = 5
for i in range(0, int(applet_count, 16)):
aid_len = utils.getBytes(applet_component_str,offset)
offset = offset + 1
aid = utils.getBytes(applet_component_str,offset, int(aid_len, 16))
offset = offset + int(aid_len, 16)
applets_aid.append(aid)
# by pass install method offset
offset = offset + 2
return applets_aid
def get_version(self):
''' returns the load file version '''
# look into the header component
header_component_str = self.components["Header"]
# the minor and major version are byte 11 and 12 of the header component
minor_version = utils.getBytes(header_compon
|
ericmjl/reassortment-simulation-and-reconstruction
|
sequence.py
|
Python
|
mit
| 1,435
| 0.036934
|
"""
Author: Eric J. Ma
Affiliation: Massachusetts Institute of Technology
"""
from random import choice
from generate_id import generate_id
class Sequence(object):
"""
The Sequence object is the lowest level object in the pathogen simulator.
It provides a container for storing seed sequences for the pathogens present
in the environment.
This can be subclassed to store seed sequences for other pathogens, rather
than using a generated sequence.
Note that when a virus replicates, the full sequence object is not copied
for each of its segments; rather, each segment only keeps track of the
mutations that have happened.
"""
def __init__(self, length=1000, sequence=None, id=None):
"""
Initialize the sequence with a random sequence of length 1000 if
sequence is not specified.
Otherwise, initialize sequence with a sequence that is specified.
"""
s
|
uper(Sequence, self).__init__()
self.sequence = None
if sequence == None:
self.sequence = self.generate_sequence(length)
else:
self.sequence = sequence
if id == None:
self.id = generate_id()
else:
self.id = id
def __repr__(self):
return self.id
def generate_sequence(self, length):
"""
This method will generate a sequence, and set the Sequence object's
se
|
quence to that sequence.
"""
sequence = ''
for i in range(length):
letter = choice(['A', 'T', 'G', 'C'])
sequence += letter
return sequence
|
sunilsm7/django_resto
|
profiles/migrations/0003_remove_profile_following.py
|
Python
|
mit
| 398
| 0
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-07-21 06:05
from __future__ import unicode_literals
from djan
|
go.db import migrations
cl
|
ass Migration(migrations.Migration):
dependencies = [
('profiles', '0002_auto_20170721_1129'),
]
operations = [
migrations.RemoveField(
model_name='profile',
name='following',
),
]
|
jawilson/home-assistant
|
homeassistant/components/roomba/roomba.py
|
Python
|
apache-2.0
| 3,241
| 0
|
"""Class for Roomba devices."""
import logging
from homeassistant.components.vacuum import SUPPORT_FAN_SPEED
from .irobot_base import SUPPORT_IROBOT, IRobotVacuum
_LOGGER = logging.getLogger(__name__)
ATTR_BIN_FULL = "bin_full"
ATTR_BIN_PRESENT = "bin_present"
FAN_SPEED_AUTOMATIC = "Automatic"
FAN_SPEED_ECO = "Eco"
FAN_SPEED_PERFORMANCE = "Performance"
FAN_SPEEDS = [FAN_SPEED_AUTOMATIC, FAN_SPEED_ECO, FAN_SPEED_PERFORMANCE]
# Only Roombas with CarpetBost can set their fanspeed
SUPPORT_ROOMBA_CARPET_BOOST = SUPPORT_IROBOT | SUPPORT_FAN_SPEED
class RoombaVacuum(IRobotVacuum):
"""Basic Roomba robot (without carpet boost)."""
@property
def extra_state_attributes(self):
"""Return the state attributes of the device."""
state_attrs = super().extra_state_attributes
# Get bin state
bin_raw_state = self.vacuum_state.get("bin", {})
bin_state = {}
if bin_raw_state.get("present") is not None:
bin_state[ATTR_BIN_PRESENT] = bin_raw_state.get("present")
if bin_raw_state.get("full") is not None:
bin_state[ATTR_BIN_FULL] = bin_raw_state.get("full")
state_attrs.update(bin_state)
return state_attrs
class RoombaVacuumCarpetBoost(RoombaVacuum):
"""Roomba robot with carpet boost."""
@property
def supported_features(self):
"""Flag vacuum cleaner robot features that are supported."""
return SUPPORT_ROOMBA_CARPET_BOOST
@property
def fan_speed(self):
"""Return the fan speed of the vacuum cleaner."""
fan_speed = None
carpet_boost = self.vacuum_state.get("carpetBoost")
high_perf = self.vacuum_state.get("vacHigh")
if carpet_boost is not None and high_perf is not None:
if carpet_boost:
fan_speed = FAN_SPEED_AUTOMATIC
elif high_perf:
fan_speed = FAN_SPEED_PERFORMANCE
else: # carpet_boost and high_perf are False
fan_speed = FAN_SPEED_ECO
return
|
fan_speed
@property
def fan_speed_list
|
(self):
"""Get the list of available fan speed steps of the vacuum cleaner."""
return FAN_SPEEDS
async def async_set_fan_speed(self, fan_speed, **kwargs):
"""Set fan speed."""
if fan_speed.capitalize() in FAN_SPEEDS:
fan_speed = fan_speed.capitalize()
_LOGGER.debug("Set fan speed to: %s", fan_speed)
high_perf = None
carpet_boost = None
if fan_speed == FAN_SPEED_AUTOMATIC:
high_perf = False
carpet_boost = True
elif fan_speed == FAN_SPEED_ECO:
high_perf = False
carpet_boost = False
elif fan_speed == FAN_SPEED_PERFORMANCE:
high_perf = True
carpet_boost = False
else:
_LOGGER.error("No such fan speed available: %s", fan_speed)
return
# The set_preference method does only accept string values
await self.hass.async_add_executor_job(
self.vacuum.set_preference, "carpetBoost", str(carpet_boost)
)
await self.hass.async_add_executor_job(
self.vacuum.set_preference, "vacHigh", str(high_perf)
)
|
rwightman/pytorch-image-models
|
timm/models/densenet.py
|
Python
|
apache-2.0
| 15,611
| 0.003139
|
"""Pytorch Densenet implementation w/ tweaks
This file is a copy of https://github.com/pytorch/vision 'densenet.py' (BSD-3-Clause) with
fixed kwargs passthrough and addition of dynamic global avg/max pool.
"""
import re
from collections import OrderedDict
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint as cp
from torch.jit.annotations import List
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from .helpers import build_model_with_cfg
from .layers import BatchNormAct2d, create_norm_act, BlurPool2d, create_classifier
from .registry import register_model
__all__ = ['DenseNet']
def _cfg(url=''):
return {
'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
'crop_pct': 0.875, 'interpolation': 'bicubic',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'features.conv0', 'classifier': 'classifier',
}
default_cfgs = {
'densenet121': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/densenet121_ra-50efcf5c.pth'),
'densenet121d': _cfg(url=''),
'densenetblur121d': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/densenetblur121d_ra-100dcfbc.pth'),
'densenet169': _cfg(url='https://download.pytorch.org/models/densenet169-b2777c0a.pth'),
'densenet201': _cfg(url='https://download.pytorch.org/models/densenet201-c1103571.pth'),
'densenet161': _cfg(url='https://download.pytorch.org/models/densenet161-8d451a50.pth'),
'densenet264': _cfg(url=''),
'densenet264d_iabn': _cfg(url=''),
'tv_densenet121': _cfg(url='https://download.pytorch.org/models/densenet121-a639ec97.pth'),
}
class DenseLayer(nn.Module):
def __init__(self, num_input_features, growth_rate, bn_size, norm_layer=BatchNormAct2d,
drop_rate=0., memory_efficient=False):
super(DenseLayer, self).__init__()
self.add_module('norm1', norm_layer(num_input_features)),
self.add_module('conv1', nn.Conv2d(
num_input_features, bn_size * growth_rate, kernel_size=1, stride=1, bias=False)),
self.add_module('norm2', norm_layer(bn_size * growth_rate)),
self.add_module('conv2', nn.Conv2d(
bn_size * growth_rate, growth_rate, kernel_size=3, stride=1, padding=1, bias=False)),
self.drop_rate = float(drop_rate)
self.memory_efficient = memory_efficient
def bottleneck_fn(self, xs):
# type: (List[torch.Tensor]) -> torch.Tensor
concated_features = torch.cat(xs, 1)
bottleneck_output = self.conv1(self.norm1(concated_features)) # noqa: T484
return bottleneck_output
# todo: rewrite when torchscript supports any
def any_requires_grad(self, x):
# type: (List[torch.Tensor]) -> bool
for tensor in x:
if tensor.requires_grad:
return True
return False
@torch.jit.unused # noqa: T484
def call_checkpoint_bottleneck(self, x):
# type: (List[torch.Tensor]) -> torch.Tensor
def closure(*xs):
return self.bottleneck_fn(xs)
return cp.checkpoint(closure, *x)
@torch.jit._overload_method # noqa: F811
def forward(self, x):
# type: (List[torch.Tensor]) -> (torch.Tensor)
pass
@torch.jit._overload_method # noqa: F811
def forward(self, x):
# type: (torch.Tensor) -> (torch.Tensor)
pass
# torchscript does not yet support *args, so we overload method
# allowing it to take either a List[Tensor] or single Tensor
def forward(self, x): # noqa: F811
if isinstance(x, torch.Tensor):
prev_features = [x]
else:
prev_features = x
if self.memory_efficient and self.any_requires_grad(prev_features):
if torch.jit.is_scripting():
raise Exception("Memory Efficient not supported in JIT")
bottleneck_output = self.call_checkpoint_bottleneck(prev_features)
else:
bottleneck_output = self.bottleneck_fn(prev_features)
new_features = self.conv2(self.norm2(bottleneck_output))
if self.drop_rate > 0:
new_features = F.dropout(new_features, p=self.drop_rate, training=self.training)
return new_features
class DenseBlock(nn.ModuleDict):
_version = 2
def __init__(self, num_layers, num_input_features, bn_size, growth_rate, norm_layer=nn.ReLU,
drop_rate=0., memory_efficient=False):
super(DenseBlock, self).__init__()
for i in range(num_layers):
layer = DenseLayer(
num_input_features + i * growth_rate,
growth_rate=growth_rate,
bn_size=bn_size,
norm_layer=norm_layer,
drop_rate=drop_rate,
memory_efficient=memory_efficient,
)
self.add_module('denselayer%d' % (i + 1), layer)
def forward(self, init_features):
features = [init_features]
for name, layer in self.items():
new_features = layer(features)
features.append(new_features)
return torch.cat(features, 1)
class DenseTransition(nn.Sequential):
def __init__(self, num_input_features, num_output_features, norm_layer=nn.BatchNorm2d, aa_layer=None):
super(DenseTransition, self).__init__()
self.add_module('norm', norm_layer(num_input_features))
self.add_module('conv', nn.Conv2d(
num_input_features, num_output_features, kernel_size=1, stride=1, bias=False))
if aa_layer is not None:
self.add_module('pool', aa_layer(num_output_features, stride=2))
else:
self.add_module('pool', nn.AvgPool2d(kernel_size=2, stride=2))
class DenseNet(nn.Module):
r"""Densenet-BC model class, based on
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_
Args:
growth_rate (int) - how many filters to add each layer (`k` in paper)
block_config (list of 4 ints) - how many layers in each pooling block
bn_size (int) - multiplicative factor for number of bottle neck layers
(i.e. bn_size * k features in the bottleneck layer)
drop_rate (float) - dropout rate after each dense layer
num_classes (int) - number of classification classes
memory_efficient (bool) - If True, uses checkpointing. Much more memory efficient,
but slower. Default: *False*. See `"paper" <https://arxiv.org/pdf/1707.06990.pdf>`_
"""
def __init__(self, growth_rate=32, block_config=(6, 12, 24, 16), bn_size=4, stem_type='',
num_classes=1000, in_chans=3, global_pool='avg',
norm_layer=BatchNormAct2d, aa_layer=None, drop_rate=0, memory_efficient=False,
aa_
|
stem_only=True):
self.num_classes = num_classes
self.drop_rate = drop_rate
super(DenseNet, self).__init__()
# Stem
deep_stem = 'deep' in stem_type # 3x3 deep stem
num_init_features = growth_rate * 2
if aa_layer is None:
st
|
em_pool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
else:
stem_pool = nn.Sequential(*[
nn.MaxPool2d(kernel_size=3, stride=1, padding=1),
aa_layer(channels=num_init_features, stride=2)])
if deep_stem:
stem_chs_1 = stem_chs_2 = growth_rate
if 'tiered' in stem_type:
stem_chs_1 = 3 * (growth_rate // 4)
stem_chs_2 = num_init_features if 'narrow' in stem_type else 6 * (growth_rate // 4)
self.features = nn.Sequential(OrderedDict([
('conv0', nn.Conv2d(in_chans, stem_chs_1, 3, stride=2, padding=1, bias=False)),
('norm0', norm_layer(stem_chs_1)),
('conv1', nn.Conv2d(stem_chs_1, stem_chs_2, 3, stride=1, padding=1, bias=False)),
('norm1', norm_layer(stem_chs_2)),
('conv2', nn.Conv2d(stem_chs_2, num_init_features, 3, stri
|
axmachado/simplepos
|
simplepos/codegen/boolean.py
|
Python
|
gpl-3.0
| 15,374
| 0.00013
|
# -*- coding: utf-8 -*-
"""
Copyright © 2017 - Alexandre Machado <axmachado@gmail.com>
This file is part of Simple POS Compiler.
Simnple POS Compiler is free software: you can redistribute it
and/or modify it under the terms of the GNU General Public License
as published by the Free Software Foundation, either version 3
of the License, or (at your option) any later version.
Simple POS Compiler is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Simple POS Compiler. If not, see <http://www.gnu.org/licenses/>.
Boolean and logical expressions and conditions
@author: Alexandre Machado <axmachado@gmail.com>
"""
import logging
from .base import CodeGenerationError, quotedValue
from .variables import Assignment
from ..objfile import typedefs
# pylint: disable=C0103
# it's not a constant
logger = logging.getLogger("link")
# noinspection PyMethodMayBeStatic
class Conditional(object):
"""
If and While - statements that require an conditional
evaluation before the main block
"""
def __init__(self):
super(Conditional, self).__init__()
def preConditionalCode(self, codeBlock, controlVariable=None):
# pylint: disable=W0613
"""
generate into the codeBlock the code to compute the conditional
before emitting the comparison
"""
pass
def emit(self):
# pylint: disable=R0201
" emit the comparison attributes to the conditional instruction "
return ""
def negate(self):
" Negate the expression, inverting it's logical value "
pass
def usesVariable(self, name):
"check if condition uses a variable"
return False
def assignVariable(self, name):
"check if condition assigns a value to a variable"
return False
class ConstantConditional(Conditional):
"""
The use of as Integer or Logical constant as condition.
Zero is considered false, any other value is true
"""
def __init__(self, value):
super(ConstantConditional, self).__init__()
self.value = value
def negate(self):
self.value = not self.value
class SimpleComparison(Conditional):
"""
Compare the value of a variable to any value
"""
# POSXML conditional operators constants
# pylint: disable=C0103
# those are really constants, but pylint thinks they are class attributes
LT = 'lessthan'
GT = 'greaterthan'
EQ = 'equalto'
NE = 'notequalto'
GE = 'greaterthanorequalto'
LE = 'lessthanorequalto'
def __init__(self, left, op, right):
super(SimpleComparison, self).__init__()
if op == '<':
self.operator = self.LT
elif op == '>':
self.operator = self.GT
elif op == '==':
self.operator = self.EQ
elif op == '!=':
self.operator = self.NE
elif op == '>=':
self.operator = self.GE
elif op == '<=':
self.operator = self.LE
else:
raise CodeGenerationError('Invalid operator "%s"', op)
self.originalOperator = self.operator
self._updateValues(left, right)
def _updateValues(self, vLeft, vRight):
"""
Update the values of the expression.
Used by subclasses and when the simple comparison is used inside
complex logical expression, when the values of the expression must
be evaluated before the comparison
"""
# in POSXML, the left side of a comparison must always be
# a variable. So, if the left side of our expression is not a
# variable, we must "invert" the expression
invert = False
if vLeft.startswith('$'):
self.variable = vLeft
self.value = vRight
else:
self.variable = vRight
self.value = vLeft
invert = True
self.operator = self.originalOperator
if invert:
if self.operator == self.LT:
self.operator = self.GT
elif self.operator == self.GT:
self.operator = self.LT
elif self.operator == self.GE:
self.operator = self.LE
elif self.operator == self.LE:
self.operator = self.GE
def negate(self):
"""
Negate the result of the comparison"
"""
if self.operator == self.LT:
self.operator = self.GE
elif self.operator == self.GT:
self.operator = self.LE
elif self.operator == self.EQ:
self.operator = self.NE
elif self.operator == self.NE:
self.operator = self.EQ
elif self.operator == self.GE:
self.operator = self.LT
elif self.operator == self.LE:
self.operator = self.GT
self.originalOperator = self.operator
def usesVariable(self, name):
varInExpression = '$(' + name + ')'
return (self.variable == varInExpression) \
or (varInExpression in self.value)
def emit(self):
"emits the attributes for the POSXML tag"
return 'variable="%s" operator="%s" value=%s' % \
(self.variable, self.operator, quotedValue(self.value))
class LogicalValueContext(object):
"""
Context to process intermediate code generation for logical
expressions and values
"""
def __init__(self, codeBlock):
super(LogicalValueContext, self).__init__()
self.codeBlock = codeBlock
def procNegatedValue(self, value):
"""
Processes the negation of an expression value: !(expression)
"""
# import here to avoid cyclic import problems
from .control import IfStatement
valueToNegate = self.procValue(value.value)
if valueToNegate.startswith('$'):
# the value is a variable. It's necessary to produce
# an "if" statement
condition = SimpleComparison('$(%s)' % valueToNegate, '==', '0')
ifBlock = [Assignment(typedefs.INT, valueToNegate, '1')]
elseBlock = [Assignment(typedefs.INT, valueToNegate, '0')]
ifStm = IfStatement(self.codeBlock)
ifStm.selfGenerated(condition, ifBlock, elseBlock)
self.codeBlock.addStatements(ifStm)
return valueToNegate
else:
return '1' if int(valueToNegate) == 0 else '0'
def procRelationalExpression(self, value):
"processes a relational expression"
# import here to avoid cyclic import problems
from .control import IfStatement
exprResult = self.codeBlock.currentScope().autoInt()
leftVal = self.procValue(value.left)
rightVal = self.procValue(value.right)
conditional = SimpleComparison(leftVal, value.operator, rightVal)
ifBlock = [Assignment(typedefs.INT, exprRe
|
sult, '1')]
elseBlock = [Assignment(typedefs.INT, exprResult, '0')]
ifStm = IfStatement(self.codeBlock)
ifStm.selfGenerated(conditional, ifBlock, elseBlock)
self.codeBlock.addStatements(ifStm)
return "$
|
(%s)" % exprResult
def procLogicalExpression(self, value):
"processes a logical expression"
exprResult = self.codeBlock.currentScope().autoInt()
logExpr = LogicalExpr(value.left, value.operator, value.right)
logExpr.preConditionalCode(self.codeBlock, exprResult)
return "$(%s)" % exprResult
def procValue(self, value):
"processes a logical value, generating intermediate code"
result = None
if not isinstance(value, typedefs.Value):
result = str(value)
elif isinstance(value, typedefs.Constant):
result = str(value)
elif isinstance(value, typedefs.VarValue):
result = value.value
elif isinstance(value, typedefs.FunctionReturnV
|
lukas-hetzenecker/home-assistant
|
tests/components/dexcom/test_config_flow.py
|
Python
|
apache-2.0
| 4,420
| 0
|
"""Test the Dexcom config flow."""
from unittest.mock import patch
from pydexcom import AccountError, SessionError
from homeassistant import config_entries, data_entry_flow
from homeassistant.components.dexcom.const import DOMAIN, MG_DL, MMOL_L
from homeassistant.const import CONF_UNIT_OF_MEASUREMENT, CONF_USERNAME
from tests.common import MockConfigEntry
from tests.components.dexcom import CONFIG
async def test_form(hass):
"""Test we get the form."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {}
with patch(
"homeassistant.components.dexcom.config_flow.Dexcom.create_session",
return_value="test_session_id",
), patch(
"homeassistant.components.dexcom.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
CONFIG,
)
await hass.async_block_till_done()
assert result2["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result2["title"] == CONFIG[CONF_USERNAME]
assert result2["data"] == CONFIG
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_account_error(hass):
"""Test we handle account error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"homeassistant.components.dexcom.config_flow.Dexcom",
side_effect=AccountError,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
CONFIG,
)
assert result2["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result2["errors"] == {"base": "invalid_auth"}
async def test_form_session_error(hass):
"""Test we handle session error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"homeassistant.components.dexcom.config_flow.Dexcom",
side_effect=SessionError,
):
|
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
CONFIG,
)
assert
|
result2["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result2["errors"] == {"base": "cannot_connect"}
async def test_form_unknown_error(hass):
"""Test we handle unknown error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"homeassistant.components.dexcom.config_flow.Dexcom",
side_effect=Exception,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
CONFIG,
)
assert result2["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result2["errors"] == {"base": "unknown"}
async def test_option_flow_default(hass):
"""Test config flow options."""
entry = MockConfigEntry(
domain=DOMAIN,
data=CONFIG,
options=None,
)
entry.add_to_hass(hass)
result = await hass.config_entries.options.async_init(entry.entry_id)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "init"
result2 = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={},
)
assert result2["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result2["data"] == {
CONF_UNIT_OF_MEASUREMENT: MG_DL,
}
async def test_option_flow(hass):
"""Test config flow options."""
entry = MockConfigEntry(
domain=DOMAIN,
data=CONFIG,
options={CONF_UNIT_OF_MEASUREMENT: MG_DL},
)
entry.add_to_hass(hass)
result = await hass.config_entries.options.async_init(entry.entry_id)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "init"
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={CONF_UNIT_OF_MEASUREMENT: MMOL_L},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["data"] == {
CONF_UNIT_OF_MEASUREMENT: MMOL_L,
}
|
eltoncarr/tubular
|
tubular/tests/test_utils.py
|
Python
|
agpl-3.0
| 3,467
| 0.001154
|
"""
Tests of the utility code.
"""
from __future__ import absolute_import
from __future__ import unicode_literals
from copy import copy
import boto
from boto.ec2.autoscale.launchconfig import LaunchConfiguration
from boto.ec2.autoscale.group import AutoScalingGroup
from boto.ec2.autoscale import Tag
import six
def create_asg_with_tags(asg_name, tags, ami_id="ami-abcd1234", elbs=None):
"""
Create an ASG with the given name, tags and AMI. This is meant to be
used in tests that are decorated with the @mock_autoscaling moto decorator.
Arguments:
asg_name(str): The name of the new auto-scaling group.
tags(dict): A dict mapping tag names to tag values.
ami_id(str): The ID of the AMI that should be deployed.
Returns:
boto.ec2.autoscale.group.AutoScalingGroup
"""
tag_list = [
Tag(
key=k,
value=v,
resource_id=asg_name,
|
propagate_at_launch=True
) for k, v in six.iteritems(tags)
]
if elbs is None:
elbs = []
# Create asgs
conn = boto.ec2.autoscale.connect_to_region('us-east-1')
config = LaunchConfiguration(
name='{}_lc'.format(asg_name),
image_id=ami_id,
instance_type='t2.medium',
)
conn.c
|
reate_launch_configuration(config)
group = AutoScalingGroup(
name=asg_name,
availability_zones=['us-east-1c', 'us-east-1b'],
default_cooldown=60,
desired_capacity=2,
load_balancers=elbs,
health_check_period=100,
health_check_type="EC2",
max_size=2,
min_size=2,
launch_config=config,
placement_group="test_placement",
vpc_zone_identifier='subnet-1233abcd',
termination_policies=["OldestInstance", "NewestInstance"],
tags=tag_list,
)
conn.create_auto_scaling_group(group)
# Each ASG tag that has 'propagate_at_launch' set to True is *supposed* to be set on the instances.
# However, it seems that moto (as of 0.4.30) does not properly set the tags on the instances created by the ASG.
# So set the tags on the ASG instances manually instead.
ec2_conn = boto.connect_ec2()
for asg in conn.get_all_groups():
if asg.name == asg_name:
asg_instance_ids = [instance.instance_id for instance in asg.instances]
for instance_id in asg_instance_ids:
ec2_conn.create_tags(instance_id, tags)
return group
def create_elb(elb_name):
"""
Method to create an Elastic Load Balancer.
"""
boto_elb = boto.connect_elb()
zones = ['us-east-1a', 'us-east-1b']
ports = [(80, 8080, 'http'), (443, 8443, 'tcp')]
load_balancer = boto_elb.create_load_balancer(elb_name, zones, ports)
instance_ids = ['i-4f8cf126', 'i-0bb7ca62']
load_balancer.register_instances(instance_ids)
return load_balancer
def clone_elb_instances_with_state(elb, state):
"""
Shallow clone an ELB and gives the instances inside the state provided
Arguments:
elb(iterable): The ELB containing the instances
state(string): The state the instances inside the ELB should have. Should be either "OutOfService"
or "InService"
Returns: an elb object
"""
elb_copy = copy(elb)
for idx, instance in enumerate(elb):
elb_copy[idx] = copy(instance)
elb_copy[idx].state = state
return elb_copy
|
robin900/sqlalchemy
|
test/orm/test_deferred.py
|
Python
|
mit
| 30,663
| 0.003131
|
import sqlalchemy as sa
from sqlalchemy import testing, util
from sqlalchemy.orm import mapper, deferred, defer, undefer, Load, \
load_only, undefer_group, create_session, synonym, relationship, Session,\
joinedload, defaultload, aliased, contains_eager, with_polymorphic
from sqlalchemy.testing import eq_, AssertsCompiledSQL, assert_raises_message
from test.orm import _fixtures
from .inheritance._poly_fixtures import Company, Person, Engineer, Manager, \
Boss, Machine, Paperwork, _Polymorphic
class DeferredTest(AssertsCompiledSQL, _fixtures.FixtureTest):
def test_basic(self):
"""A basic deferred load."""
Order, orders = self.classes.Order, self.tables.orders
mapper(Order, orders, properties={
'description': deferred(orders.c.description)})
o = Order()
self.assert_(o.description is None)
q = create_session().query(Order).order_by(Order.id)
def go():
l = q.all()
o2 = l[2]
x = o2.description
self.sql_eq_(go, [
("SELECT orders.id AS orders_id, "
"orders.user_id AS orders_user_id, "
"orders.address_id AS orders_address_id, "
"orders.isopen AS orders_isopen "
"FROM orders ORDER BY orders.id", {}),
("SELECT orders.description AS orders_description "
"FROM orders WHERE orders.id = :param_1",
{'param_1':3})])
def test_defer_primary_key(self):
"""what happens when we try to defer the primary key?"""
Order, orders = self.classes.Order, self.tables.orders
mapper(Order, orders, properties={
'id': deferred(orders.c.id)})
# right now, it's not that graceful :)
q = create_session().query(Order)
assert_raises_message(
sa.exc.NoSuchColumnError,
"Could not locate",
q.first
)
def test_unsaved(self):
"""Deferred loading does not kick in when just PK cols are set."""
Order, orders = self.classes.Order, self.tables.orders
mapper(Order, orders, properties={
'description': deferred(orders.c.description)})
sess = create_session()
o = Order()
sess.add(o)
o.id = 7
def go():
o.description = "some description"
self.sql_count_(0, go)
def test_synonym_group_bug(self):
orders, Order = self.tables.orders, self.classes.Order
mapper(Order, orders, properties={
'isopen':synonym('_isopen', map_column=True),
'description':deferred(orders.c.description, group='foo')
})
sess = create_session()
o1 = sess.query(Order).get(1)
eq_(o1.description, "order 1")
def test_unsaved_2(self):
Order, orders = self.classes.Order, self.tables.orders
mapper(Order, orders, properties={
'description': deferred(orders.c.description)})
sess = create_session()
o = Order()
sess.add(o)
def go():
o.description = "some description"
self.sql_count_(0, go)
def test_unsaved_group(self):
"""Deferred loading doesn't kick in when just PK cols are set"""
orders, Order = self.tables.orders, self.classes.Order
|
mapper(Order, orders, properties=dict(
description=deferred(orders.c.description, group='primary'),
opened=deferred(orders.c.isopen, group='primary')))
sess = create_session()
o = Order()
sess.add(o)
o.id = 7
|
def go():
o.description = "some description"
self.sql_count_(0, go)
def test_unsaved_group_2(self):
orders, Order = self.tables.orders, self.classes.Order
mapper(Order, orders, properties=dict(
description=deferred(orders.c.description, group='primary'),
opened=deferred(orders.c.isopen, group='primary')))
sess = create_session()
o = Order()
sess.add(o)
def go():
o.description = "some description"
self.sql_count_(0, go)
def test_save(self):
Order, orders = self.classes.Order, self.tables.orders
m = mapper(Order, orders, properties={
'description': deferred(orders.c.description)})
sess = create_session()
o2 = sess.query(Order).get(2)
o2.isopen = 1
sess.flush()
def test_group(self):
"""Deferred load with a group"""
orders, Order = self.tables.orders, self.classes.Order
mapper(Order, orders, properties=util.OrderedDict([
('userident', deferred(orders.c.user_id, group='primary')),
('addrident', deferred(orders.c.address_id, group='primary')),
('description', deferred(orders.c.description, group='primary')),
('opened', deferred(orders.c.isopen, group='primary'))
]))
sess = create_session()
q = sess.query(Order).order_by(Order.id)
def go():
l = q.all()
o2 = l[2]
eq_(o2.opened, 1)
eq_(o2.userident, 7)
eq_(o2.description, 'order 3')
self.sql_eq_(go, [
("SELECT orders.id AS orders_id "
"FROM orders ORDER BY orders.id", {}),
("SELECT orders.user_id AS orders_user_id, "
"orders.address_id AS orders_address_id, "
"orders.description AS orders_description, "
"orders.isopen AS orders_isopen "
"FROM orders WHERE orders.id = :param_1",
{'param_1':3})])
o2 = q.all()[2]
eq_(o2.description, 'order 3')
assert o2 not in sess.dirty
o2.description = 'order 3'
def go():
sess.flush()
self.sql_count_(0, go)
def test_preserve_changes(self):
"""A deferred load operation doesn't revert modifications on attributes"""
orders, Order = self.tables.orders, self.classes.Order
mapper(Order, orders, properties = {
'userident': deferred(orders.c.user_id, group='primary'),
'description': deferred(orders.c.description, group='primary'),
'opened': deferred(orders.c.isopen, group='primary')
})
sess = create_session()
o = sess.query(Order).get(3)
assert 'userident' not in o.__dict__
o.description = 'somenewdescription'
eq_(o.description, 'somenewdescription')
def go():
eq_(o.opened, 1)
self.assert_sql_count(testing.db, go, 1)
eq_(o.description, 'somenewdescription')
assert o in sess.dirty
def test_commits_state(self):
"""
When deferred elements are loaded via a group, they get the proper
CommittedState and don't result in changes being committed
"""
orders, Order = self.tables.orders, self.classes.Order
mapper(Order, orders, properties = {
'userident': deferred(orders.c.user_id, group='primary'),
'description': deferred(orders.c.description, group='primary'),
'opened': deferred(orders.c.isopen, group='primary')})
sess = create_session()
o2 = sess.query(Order).get(3)
# this will load the group of attributes
eq_(o2.description, 'order 3')
assert o2 not in sess.dirty
# this will mark it as 'dirty', but nothing actually changed
o2.description = 'order 3'
# therefore the flush() shouldn't actually issue any SQL
self.assert_sql_count(testing.db, sess.flush, 0)
def test_map_selectable_wo_deferred(self):
"""test mapping to a selectable with deferred cols,
the selectable doesn't include the deferred col.
"""
Order, orders = self.classes.Order, self.tables.orders
order_select = sa.select([
orders.c.id,
orders.c.user_id,
orders.c.address_id,
orders.c.description,
orders.c.isopen]).alias()
mapper(Order, order_select, proper
|
MTG/pycompmusic
|
test/essentia/rhythmTest.py
|
Python
|
agpl-3.0
| 306
| 0.006536
|
#!/usr
|
/bin/env python
import rhythm
re = rhythm.RhythmExtract()
# fname = '/media/Data/Data/CompMusicD
|
B/Carnatic/audio/Aneesh_Vidyashankar/Pure_Expressions/7_Jagadoddharana.mp3'
fname = '/media/Code/UPFWork/PhD/Data/CMCMDa/mp3/adi/10014_1313_Bhagyadalakshmi.mp3'
results = re.run(fname)
print("All done")
|
anaran/kuma
|
kuma/wiki/managers.py
|
Python
|
mpl-2.0
| 8,955
| 0.000335
|
from datetime import date, datetime, timedelta
from django.core import serializers
from django.db import models
import bleach
from constance import config
from .constants import (ALLOWED_TAGS, ALLOWED_ATTRIBUTES, ALLOWED_STYLES,
TEMPLATE_TITLE_PREFIX)
from .content import parse as parse_content
from .queries import TransformQuerySet
class TransformManager(models.Manager):
def get_queryset(self):
return TransformQuerySet(self.model)
class BaseDocumentManager(models.Manager):
"""Manager for Documents, assists for queries"""
def clean_content(self, content_in, use_constance_bleach_whitelists=False):
allowed_hosts = config.KUMA_WIKI_IFRAME_ALLOWED_HOSTS
blocked_protocols = config.KUMA_WIKI_HREF_BLOCKED_PROTOCOLS
out = (parse_content(content_in)
.filterIframeHosts(allowed_hosts)
.filterAHrefProtocols(blocked_protocols)
.serialize())
if use_constance_bleach_whitelists:
tags = config.BLEACH_ALLOWED_TAGS
attributes = config.BLEACH_ALLOWED_ATTRIBUTES
styles = config.BLEACH_ALLOWED_STYLES
else:
tags = ALLOWED_TAGS
attributes = ALLOWED_ATTRIBUTES
styles = ALLOWED_STYLES
out = bleach.clean(out, attributes=attributes, tags=tags,
styles=styles)
return out
def get_by_natural_key(self, locale, slug):
return self.get(locale=locale, slug=slug)
|
def get_by_stale_rendering(self):
|
"""Find documents whose renderings have gone stale"""
return (self.exclude(render_expires__isnull=True)
.filter(render_expires__lte=datetime.now()))
def allows_add_by(self, user, slug):
"""
Determine whether the user can create a document with the given
slug. Mainly for enforcing Template: editing permissions
"""
if (slug.startswith(TEMPLATE_TITLE_PREFIX) and
not user.has_perm('wiki.add_template_document')):
return False
# NOTE: We could enforce wiki.add_document here, but it's implicitly
# assumed everyone is allowed.
return True
def filter_for_list(self, locale=None, category=None, tag=None,
tag_name=None, errors=None, noparent=None,
toplevel=None):
docs = (self.filter(is_template=False, is_redirect=False)
.exclude(slug__startswith='User:')
.exclude(slug__startswith='Talk:')
.exclude(slug__startswith='User_talk:')
.exclude(slug__startswith='Template_talk:')
.exclude(slug__startswith='Project_talk:')
.order_by('slug'))
if locale:
docs = docs.filter(locale=locale)
if category:
try:
docs = docs.filter(category=int(category))
except ValueError:
pass
if tag:
docs = docs.filter(tags__in=[tag])
if tag_name:
docs = docs.filter(tags__name=tag_name)
if errors:
docs = (docs.exclude(rendered_errors__isnull=True)
.exclude(rendered_errors__exact='[]'))
if noparent:
# List translated pages without English source associated
docs = docs.filter(parent__isnull=True)
if toplevel:
docs = docs.filter(parent_topic__isnull=True)
# Leave out the html, since that leads to huge cache objects and we
# never use the content in lists.
docs = docs.defer('html')
return docs
def filter_for_review(self, locale=None, tag=None, tag_name=None):
"""Filter for documents with current revision flagged for review"""
query = 'current_revision__review_tags__%s'
if tag_name:
query = {query % 'name': tag_name}
elif tag:
query = {query % 'in': [tag]}
else:
query = {query % 'name__isnull': False}
if locale:
query['locale'] = locale
return self.filter(**query).distinct()
def filter_with_localization_tag(self, locale=None, tag=None, tag_name=None):
"""Filter for documents with a localization tag on current revision"""
query = 'current_revision__localization_tags__%s'
if tag_name:
query = {query % 'name': tag_name}
elif tag:
query = {query % 'in': [tag]}
else:
query = {query % 'name__isnull': False}
if locale:
query['locale'] = locale
return self.filter(**query).distinct()
def dump_json(self, queryset, stream):
"""Export a stream of JSON-serialized Documents and Revisions
This is inspired by smuggler.views.dump_data with customizations for
Document specifics, per bug 747137
"""
objects = []
for doc in queryset.all():
rev = doc.current_or_latest_revision()
if not rev:
# Skip this doc if, for some reason, there's no revision.
continue
# Drop the pk and circular reference to rev.
doc.pk = None
doc.current_revision = None
objects.append(doc)
# Drop the rev pk
rev.pk = None
objects.append(rev)
# HACK: This is kind of awkward, but the serializer only accepts a flat
# list of field names across all model classes that get handled. So,
# this is a mashup whitelist of Document and Revision fields.
fields = (
# TODO: Maybe make this an *exclusion* list by getting the list of
# fields from Document and Revision models and knocking out what we
# don't want? Serializer doesn't support exclusion list directly.
'title', 'locale', 'slug', 'tags', 'is_template', 'is_localizable',
'parent', 'parent_topic', 'category', 'document', 'is_redirect',
'summary', 'content', 'comment',
'keywords', 'tags', 'toc_depth', 'is_approved',
'creator', # HACK: Replaced on import, but deserialize needs it
'is_mindtouch_migration',
)
serializers.serialize('json', objects, indent=2, stream=stream,
fields=fields, use_natural_keys=True)
def load_json(self, creator, stream):
"""Import a stream of JSON-serialized Documents and Revisions
This is inspired by smuggler.views.load_data with customizations for
Document specifics, per bug 747137
"""
counter = 0
objects = serializers.deserialize('json', stream)
for obj in objects:
# HACK: Dig up the deserializer wrapped model object & manager,
# because the deserializer wrapper bypasses some things we need to
# un-bypass here
actual = obj.object
mgr = actual._default_manager
actual.pk = None
if hasattr(mgr, 'get_by_natural_key'):
# If the model uses natural keys, attempt to find the pk of an
# existing record to overwrite.
try:
nk = actual.natural_key()
existing = mgr.get_by_natural_key(*nk)
actual.pk = existing.pk
except actual.DoesNotExist:
pass
# Tweak a few fields on the way through for Revisions.
# Don't do a type check here since that would require importing
if actual._meta.object_name == 'Revision':
actual.creator = creator
actual.created = datetime.now()
actual.save()
counter += 1
return counter
class DocumentManager(BaseDocumentManager):
"""
The actual manager, which filters to show only non-deleted pages.
"""
def get_queryset(self):
return super(DocumentManager, self).get_queryset().filter(deleted=False)
class DeletedDocumentManager(BaseDocumentManager):
"""
Specialized man
|
mattrobenolt/django-sudo
|
sudo/__init__.py
|
Python
|
bsd-3-clause
| 246
| 0
|
"""
sudo
~~~~
:copyright: (c) 2020 by Matt Robenolt.
:license: BSD, see LICENSE for more details.
"""
try:
|
VERSION = __import__("pkg_resources").get_di
|
stribution("sudo").version
except Exception: # pragma: no cover
VERSION = "unknown"
|
mahabs/nitro
|
nssrc/com/citrix/netscaler/nitro/resource/config/audit/auditmessages.py
|
Python
|
apache-2.0
| 6,791
| 0.035488
|
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations und
|
er the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.c
|
itrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class auditmessages(base_resource) :
""" Configuration for audit message resource. """
def __init__(self) :
self._loglevel = []
self._numofmesgs = 0
self._value = ""
self.___count = 0
@property
def loglevel(self) :
"""Audit log level filter, which specifies the types of events to display.
The following loglevels are valid:
* ALL - All events.
* EMERGENCY - Events that indicate an immediate crisis on the server.
* ALERT - Events that might require action.
* CRITICAL - Events that indicate an imminent server crisis.
* ERROR - Events that indicate some type of error.
* WARNING - Events that require action in the near future.
* NOTICE - Events that the administrator should know about.
* INFORMATIONAL - All but low-level events.
* DEBUG - All events, in extreme detail.<br/>Possible values = ALL, EMERGENCY, ALERT, CRITICAL, ERROR, WARNING, NOTICE, INFORMATIONAL, DEBUG.
"""
try :
return self._loglevel
except Exception as e:
raise e
@loglevel.setter
def loglevel(self, loglevel) :
"""Audit log level filter, which specifies the types of events to display.
The following loglevels are valid:
* ALL - All events.
* EMERGENCY - Events that indicate an immediate crisis on the server.
* ALERT - Events that might require action.
* CRITICAL - Events that indicate an imminent server crisis.
* ERROR - Events that indicate some type of error.
* WARNING - Events that require action in the near future.
* NOTICE - Events that the administrator should know about.
* INFORMATIONAL - All but low-level events.
* DEBUG - All events, in extreme detail.<br/>Possible values = ALL, EMERGENCY, ALERT, CRITICAL, ERROR, WARNING, NOTICE, INFORMATIONAL, DEBUG
"""
try :
self._loglevel = loglevel
except Exception as e:
raise e
@property
def numofmesgs(self) :
"""Number of log messages to be displayed.<br/>Default value: 20<br/>Minimum length = 1<br/>Maximum length = 256.
"""
try :
return self._numofmesgs
except Exception as e:
raise e
@numofmesgs.setter
def numofmesgs(self, numofmesgs) :
"""Number of log messages to be displayed.<br/>Default value: 20<br/>Minimum length = 1<br/>Maximum length = 256
"""
try :
self._numofmesgs = numofmesgs
except Exception as e:
raise e
@property
def value(self) :
"""The Audit message.
"""
try :
return self._value
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(auditmessages_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.auditmessages
except Exception as e :
raise e
def _get_object_name(self) :
""" Returns the value of object identifier argument
"""
try :
return None
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
""" Use this API to fetch all the auditmessages resources that are configured on netscaler.
"""
try :
if not name :
obj = auditmessages()
response = obj.get_resources(client, option_)
return response
except Exception as e :
raise e
@classmethod
def get_args(cls, client, args) :
""" Use this API to fetch all the auditmessages resources that are configured on netscaler.
# This uses auditmessages_args which is a way to provide additional arguments while fetching the resources.
"""
try :
obj = auditmessages()
option_ = options()
option_.args = nitro_util.object_to_string_withoutquotes(args)
response = obj.get_resources(client, option_)
return response
except Exception as e :
raise e
@classmethod
def get_filtered(cls, client, filter_) :
""" Use this API to fetch filtered set of auditmessages resources.
filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = auditmessages()
option_ = options()
option_.filter = filter_
response = obj.getfiltered(client, option_)
return response
except Exception as e :
raise e
@classmethod
def count(cls, client) :
""" Use this API to count the auditmessages resources configured on NetScaler.
"""
try :
obj = auditmessages()
option_ = options()
option_.count = True
response = obj.get_resources(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
@classmethod
def count_filtered(cls, client, filter_) :
""" Use this API to count filtered the set of auditmessages resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = auditmessages()
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
class Loglevel:
ALL = "ALL"
EMERGENCY = "EMERGENCY"
ALERT = "ALERT"
CRITICAL = "CRITICAL"
ERROR = "ERROR"
WARNING = "WARNING"
NOTICE = "NOTICE"
INFORMATIONAL = "INFORMATIONAL"
DEBUG = "DEBUG"
class auditmessages_response(base_response) :
def __init__(self, length=1) :
self.auditmessages = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.auditmessages = [auditmessages() for _ in range(length)]
|
jgillis/casadi
|
documentation/examples/solvers/exacthessian.py
|
Python
|
lgpl-3.0
| 2,328
| 0.020189
|
#
# This file is part of CasADi.
#
# CasADi -- A symbolic framework for dynamic optimization.
# Copyright (C) 2010 by Joel Andersson, Moritz Diehl, K.U.Leuven. All rights reserved.
#
# CasADi is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3 of the License, or (at your option) any later version.
#
# CasADi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with CasADi; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
#
#! Exact Hessian
#! =====================
from casadi import *
from numpy import *
import casadi as c
#! We will investigate the use of an exact Hes
|
sian with the help of the Rosenbrock function
x=SX("x")
y=SX("y")
obj = (1-x)**2+100*(y-x**2)**2
#! We choose to add a single constraint
constr = x**2+y**2
f=SXFunction([vertcat([x,y])],[obj])
g=SXFunction([vertcat([x,y])],[constr])
solver = IpoptSolver(f,g)
#! We need the hessian of the lagrangian.
#! A problem with n decision variables and m constraints gives us a hessian of size n x n
sigma=SX("sigma") # A s
|
calar factor
lambd=SX("lambd") # Multipier of the problem, shape m x 1.
xy = vertcat([x,y])
h=SXFunction([xy,lambd,sigma],[sigma*hessian(obj,xy)+lambd*hessian(constr,xy)])
#! We solve the problem with an exact hessian
solver = IpoptSolver(f,g,h)
solver.init()
solver.input("lbx").set([-10]*2)
solver.input("ubx").set([10]*2)
solver.input("lbg").set([0])
solver.input("ubg").set([1])
solver.solve()
for sol in array(solver.output()):
print "%.15f" % sol
#! To compare the behaviour of convergence, we solve the same problem without exact hessian
solver = IpoptSolver(f,g)
solver.init()
solver.input("lbx").set([-10]*2)
solver.input("ubx").set([10]*2)
solver.input("lbg").set([0])
solver.input("ubg").set([1])
solver.solve()
for sol in array(solver.output()):
print "%.15f" % sol
|
mansenfranzen/notebooks
|
Confidence Interval Simulation Bokeh/model.py
|
Python
|
mit
| 2,316
| 0.003886
|
"""This module contains the data handler."""
import numpy as np
import tssim
from bokeh.models import ColumnDataSource
def create_source(
|
series):
return ColumnDataSource(data=dict(x=series.index.values, y=series.values))
def dummy1():
ts = tssim.TimeSeries(start="2017-04-04", freq="D", periods=1100)
ts.
|
add("Noise", lambda x: x * 0 + np.random.normal(0, 0.25, size=x.shape[0]))
ts.add("Log", lambda x: np.log(x))
ts.add("Increase", lambda x: x * 0.01, condition=lambda x: x < 1)
return ts.generate().values
def dummy2():
ts = tssim.TimeSeries(start="2017-04-04", freq="D", periods=1100)
ts.add("Noise", lambda x: x * 0 + np.random.normal(0, 0.25, size=x.shape[0]))
return ts.generate().values
def sources():
return [PlotData("Mixed", dummy1()),
PlotData("Noise", dummy2())]
def table_source(source=False):
columns = ("x", "lower CI", "upper CI", "mean")
result = {column: [] for column in columns}
if source:
return ColumnDataSource(result)
else:
return result
class DataSource:
def __init__(self, data):
self.data = data
self.sync = ColumnDataSource(self.data)
def update(self):
self.sync.data = self.data
class PlotData:
def __init__(self, label, series):
self.label = label
self.series = series
self.source = create_source(series)
empty = dict(x=[], y=[])
self.start = DataSource(empty.copy())
self.end = DataSource(empty.copy())
self.future = DataSource(empty.copy())
self.markers = DataSource(empty.copy())
self.marked = DataSource(empty.copy())
self.ci = DataSource(empty.copy())
self.table = DataSource(table_source())
def update(self):
to_update = ("start", "end", "future", "markers", "marked", "ci", "table")
for item in to_update:
getattr(self, item).update()
def reset(self):
empty = dict(x=[], y=[])
self.start.data.update(empty.copy())
self.end.data.update(empty.copy())
self.future.data.update(empty.copy())
self.markers.data.update(empty.copy())
self.marked.data.update(empty.copy())
self.ci.data.update(empty.copy())
self.table.update(table_source())
self.update()
|
pkdevbox/trac
|
trac/web/session.py
|
Python
|
bsd-3-clause
| 22,324
| 0.000134
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2004-2014 Edgewall Software
# Copyright (C) 2004 Daniel Lundin <daniel@edgewall.com>
# Copyright (C) 2004-2006 Christopher Lenz <cmlenz@gmx.de>
# Copyright (C) 2006 Jonas Borgström <jonas@edgewall.com>
# Copyright (C) 2008 Matt Good <matt@matt-good.net>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Daniel Lundin <daniel@edgewall.com>
# Christopher Lenz <cmlenz@gmx.de>
import re
import time
from trac.admin.api import AdminCommandError, IAdminCommandProvider, \
console_date_format, get_console_locale
from trac.core import Component, ExtensionPoint, TracError, implements
from trac.util import hex_entropy, lazy
from trac.util.datefmt import get_datetime_format_hint, format_date, \
parse_date, to_datetime, to_timestamp
from trac.util.text import print_table
from trac.util.translation import _
from trac.web.api import IRequestHandler, is_valid_default_handler
UPDATE_INTERVAL = 3600 * 24 # Update session last_visit time stamp after 1 day
PURGE_AGE = 3600 * 24 * 90 # Purge session after 90 days idle
COOKIE_KEY = 'trac_session'
# Note: as we often manipulate both the `session` and the
# `session_attribute` tables, there's a possibility of table
# deadlocks (#9705). We try to prevent them to happen by always
# accessing the tables in the same order within the transaction,
# first `session`, then `session_attribute`.
class DetachedSession(dict):
def __init__(self, env, sid):
dict.__init__(self)
self.env = env
self.sid = None
if sid:
self.get_session(sid, authenticated=True)
else:
self.authenticated = False
self.last_visit = 0
self._new = True
self._old = {}
def __setitem__(self, key, value)
|
:
dict.__setitem__(self, key, unicode(value))
def set(self, key, value, default=None):
"""Set a variable in the session, or remove it if it's equal to the
default value.
"""
value = unicode(val
|
ue)
if default is not None:
default = unicode(default)
if value == default:
self.pop(key, None)
return
dict.__setitem__(self, key, value)
def get_session(self, sid, authenticated=False):
self.env.log.debug("Retrieving session for ID %r", sid)
with self.env.db_query as db:
self.sid = sid
self.authenticated = authenticated
self.clear()
for last_visit, in db("""
SELECT last_visit FROM session
WHERE sid=%s AND authenticated=%s
""", (sid, int(authenticated))):
self._new = False
self.last_visit = int(last_visit or 0)
self.update(db("""
SELECT name, value FROM session_attribute
WHERE sid=%s and authenticated=%s
""", (sid, int(authenticated))))
self._old = self.copy()
break
else:
self.last_visit = 0
self._new = True
self._old = {}
def save(self):
items = self.items()
if not self._old and not items:
# The session doesn't have associated data, so there's no need to
# persist it
return
authenticated = int(self.authenticated)
now = int(time.time())
# We can't do the session management in one big transaction,
# as the intertwined changes to both the session and
# session_attribute tables are prone to deadlocks (#9705).
# Therefore we first we save the current session, then we
# eventually purge the tables.
session_saved = False
with self.env.db_transaction as db:
# Try to save the session if it's a new one. A failure to
# do so is not critical but we nevertheless skip the
# following steps.
if self._new:
self.last_visit = now
self._new = False
# The session might already exist even if _new is True since
# it could have been created by a concurrent request (#3563).
try:
db("""INSERT INTO session (sid, last_visit, authenticated)
VALUES (%s,%s,%s)
""", (self.sid, self.last_visit, authenticated))
except self.env.db_exc.IntegrityError:
self.env.log.warning('Session %s already exists', self.sid)
db.rollback()
return
# Remove former values for session_attribute and save the
# new ones. The last concurrent request to do so "wins".
if self._old != self:
if self._old.get('name') != self.get('name') or \
self._old.get('email') != self.get('email'):
self.env.invalidate_known_users_cache()
if not items and not authenticated:
# No need to keep around empty unauthenticated sessions
db("DELETE FROM session WHERE sid=%s AND authenticated=0",
(self.sid,))
db("""DELETE FROM session_attribute
WHERE sid=%s AND authenticated=%s
""", (self.sid, authenticated))
self._old = dict(self.items())
# The session variables might already have been updated by a
# concurrent request.
try:
db.executemany("""
INSERT INTO session_attribute
(sid,authenticated,name,value)
VALUES (%s,%s,%s,%s)
""", [(self.sid, authenticated, k, v)
for k, v in items])
except self.env.db_exc.IntegrityError:
self.env.log.warning('Attributes for session %s already '
'updated', self.sid)
db.rollback()
return
session_saved = True
# Purge expired sessions. We do this only when the session was
# changed as to minimize the purging.
if session_saved and now - self.last_visit > UPDATE_INTERVAL:
self.last_visit = now
mintime = now - PURGE_AGE
with self.env.db_transaction as db:
# Update the session last visit time if it is over an
# hour old, so that session doesn't get purged
self.env.log.info("Refreshing session %s", self.sid)
db("""UPDATE session SET last_visit=%s
WHERE sid=%s AND authenticated=%s
""", (self.last_visit, self.sid, authenticated))
self.env.log.debug('Purging old, expired, sessions.')
db("""DELETE FROM session_attribute
WHERE authenticated=0 AND sid IN (
SELECT sid FROM session
WHERE authenticated=0 AND last_visit < %s
)
""", (mintime,))
# Avoid holding locks on lot of rows on both session_attribute
# and session tables
with self.env.db_transaction as db:
db("""
DELETE FROM session
WHERE authenticated=0 AND last_visit < %s
""", (mintime,))
class Session(DetachedSession):
"""Basic session handling and per-session storage."""
def __init__(self, env, req)
|
eddiejessup/pizza
|
src/dump.py
|
Python
|
gpl-2.0
| 39,268
| 0.001248
|
import sys
import commands
import re
import glob
import types
from os import popen
from math import * # any function could be used by set()
import numpy as np
oldnumeric = False
try:
from DEFAULTS import PIZZA_GUNZIP
except:
PIZZA_GUNZIP = "gunzip"
class dump:
def __init__(self, *list):
self.snaps = []
self.nsnaps = self.nselect = 0
self.names = {}
self.tselect = tselect(self)
self.aselect = aselect(self)
self.atype = "type"
self.bondflag = 0
self.bondlist = []
self.triflag = 0
self.trilist = []
self.lineflag = 0
self.linelist = []
self.objextra = None
# flist = list of all dump file names
words = list[0].split()
self.flist = []
for word in words:
self.flist += glob.glob(word)
if len(self.flist) == 0 and len(list) == 1:
raise StandardError, "no dump file specified"
if len(list) == 1:
self.increment = 0
self.read_all()
else:
self.increment = 1
self.nextfile = 0
self.eof = 0
def read_all(self):
for file in self.flist:
if file[-3:] == ".gz":
f = popen("%s -c %s" % (PIZZA_GUNZIP, file), 'r')
else:
f = open(file)
snap = self.read_snapshot(f)
while snap:
self.snaps.append(snap)
print snap.time,
sys.stdout.flush()
snap = self.read_snapshot(f)
f.close()
# sort entries by timestep, cull duplicates
self.snaps.sort(self.compare_time)
self.cull()
self.nsnaps = len(self.snaps)
print "read %d snapshots" % self.nsnaps
# select all timesteps and atoms
self.tselect.all()
# print column assignments
if len(self.names):
print "assigned columns:", self.names2str()
else:
print "no column assignments made"
# if snapshots are scaled, unscale them
if (not self.names.has_key("x")) or \
(not self.names.has_key("y")) or \
(not self.names.has_key("z")):
print "dump scaling status is unknown"
elif self.nsnaps > 0:
if self.scale_original == 1:
self.unscale()
elif self.scale_original == 0:
print "dump is already unscaled"
else:
print "dump scaling status is unknown"
# read next snapshot from list of files
def next(self):
if not self.increment:
raise StandardError, "cannot read incrementally"
# read next snapshot in current file using eof as pointer
# if fail, try next file
# if new snapshot time stamp already exists, read next snapshot
while 1:
f = open(self.flist[self.nextfile], 'rb')
f.seek(self.eof)
snap = self.read_snapshot(f)
if not snap:
self.nextfile += 1
if self.nextfile == len(self.flist):
return -1
f.close()
self.eof = 0
continue
self.eof = f.tell()
f.close()
try:
self.findtime(snap.time)
continue
except:
break
# select the new snapshot with all its atoms
self.snaps.append(snap)
snap = self.snaps[self.nsnaps]
snap.tselect = 1
snap.nselect = snap.natoms
for i in xrange(snap.natoms):
snap.aselect[i] = 1
self.nsnaps += 1
self.nselect += 1
return snap.time
# read a single snapshot from file f
# return snapshot or 0 if failed
# for first snapshot only:
# assign column names (file must be self-describing)
# set scale_original to 0/1/-1 for unscaled/scaled/unknown
# convert xs,xu to x in names
def read_snapshot(self, f):
try:
snap = Snap()
item = f.readline()
snap.time = int(f.readline().split()[0]) # just grab 1st field
item = f.readline()
snap.natoms = int(f.readline())
snap.aselect = np.zeros(snap.natoms)
item = f.readline()
words = item.split("BOUNDS ")
if len(words) == 1:
snap.boxstr = ""
else:
snap.boxstr = words[1].strip()
if "xy" in snap.boxstr:
snap.triclinic = 1
else:
snap.triclinic = 0
words = f.readline().split()
if len(words) == 2:
snap.xlo, snap.xhi, snap.xy = float(
words[0]), float(words[1]), 0.0
else:
snap.xlo, snap.xhi, snap.xy = \
float(words[0]), float(words[1]), float(words[2])
words = f.readline().split()
if len(words) == 2:
snap.ylo, snap.yhi, snap.xz = float(
words[0]), float(words[1]), 0.0
else:
snap.ylo, snap.yhi, snap.xz = \
float(words[0]), float(words[1]), float(words[2])
words = f.readline().split()
if len(words) == 2:
snap.zlo, snap.zhi, snap.yz = float(
words[0]), float(words[1]), 0.0
else:
snap.zlo, snap.zhi, snap.yz = \
float(words[0]), float(words[1]), float(words[2
|
])
item = f.readline()
|
if len(self.names) == 0:
self.scale_original = -1
xflag = yflag = zflag = -1
words = item.split()[2:]
if len(words):
for i in range(len(words)):
if words[i] == "x" or words[i] == "xu":
xflag = 0
self.names["x"] = i
elif words[i] == "xs" or words[i] == "xsu":
xflag = 1
self.names["x"] = i
elif words[i] == "y" or words[i] == "yu":
yflag = 0
self.names["y"] = i
elif words[i] == "ys" or words[i] == "ysu":
yflag = 1
self.names["y"] = i
elif words[i] == "z" or words[i] == "zu":
zflag = 0
self.names["z"] = i
elif words[i] == "zs" or words[i] == "zsu":
zflag = 1
self.names["z"] = i
else:
self.names[words[i]] = i
if xflag == 0 and yflag == 0 and zflag == 0:
self.scale_original = 0
if xflag == 1 and yflag == 1 and zflag == 1:
self.scale_original = 1
if snap.natoms:
words = f.readline().split()
ncol = len(words)
for i in xrange(1, snap.natoms):
words += f.readline().split()
floats = map(float, words)
if oldnumeric:
atoms = np.zeros((snap.natoms, ncol), np.Float)
else:
atoms = np.zeros((snap.natoms, ncol), np.float)
start = 0
stop = ncol
for i in xrange(snap.natoms):
atoms[i] = floats[start:stop]
start = stop
stop += ncol
else:
atoms = None
snap.atoms = atoms
return snap
except:
return 0
# map atom column names
def map(self, *pairs):
if len(pairs) % 2 != 0:
raise StandardError, "dump map() requires pairs of mappings"
for i in range(0, len(pairs), 2):
j = i + 1
self.names[pairs[j]] =
|
obi-two/Rebelion
|
data/scripts/templates/object/tangible/ship/crafted/reactor/shared_base_reactor_subcomponent_mk5.py
|
Python
|
mit
| 499
| 0.044088
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#
|
### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/ship/crafted/reactor/shared_b
|
ase_reactor_subcomponent_mk5.iff"
result.attribute_template_id = 8
result.stfName("space_crafting_n","base_reactor_subcomponent_mk5")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
TheWardoctor/Wardoctors-repo
|
script.module.uncoded/lib/resources/lib/sources/de/horrorkino.py
|
Python
|
apache-2.0
| 3,418
| 0.005851
|
# -*- coding: utf-8 -*-
"""
Covenant Add-on
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re
import urlparse
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import dom_parser
from resources.lib.modules import source_utils
class source:
def __init__(self):
self.priority = 1
self.language = ['de']
self.genre_filter = ['horror']
self.domains = ['horrorkino.do.am']
self.base_link = 'http://horrorkino.do.am/'
self.search_link = 'video/shv'
def movie(self, imdb, title, localtitle, aliases, year):
try:
url = self.__search([localtitle] + source_utils.aliases_to_array(aliases), year)
if not url and title != localtitle: url = self.__search([title] + source_utils.aliases_to_array(aliases), year)
return url
except:
return
def sources(self, url, hostDict, hostprDict):
sources = []
try:
if not url:
return sources
r = client.request(urlparse.urljoin(self.base_link, url))
r = re.findall('''vicode\s*=\s*["'](.*?)["'];''', r)[0].decode('string_escape')
|
r = dom_parser.parse_dom(r, 'iframe', req='src')
r = [i.attrs['src'] for i in r]
for i in r:
valid, host = source_utils.is_host_valid(i, hostDict)
|
if not valid: continue
sources.append({'source': host, 'quality': 'SD', 'language': 'de', 'url': i, 'direct': False, 'debridonly': False, 'checkquality': True})
return sources
except:
return sources
def resolve(self, url):
return url
def __search(self, titles, year):
try:
t = [cleantitle.get(i) for i in set(titles) if i]
y = ['%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0']
r = client.request(urlparse.urljoin(self.base_link, self.search_link), post={'query': cleantitle.query(titles[0])})
r = dom_parser.parse_dom(r, 'li', attrs={'class': 'entTd'})
r = dom_parser.parse_dom(r, 'div', attrs={'class': 've-screen'}, req='title')
r = [(dom_parser.parse_dom(i, 'a', req='href'), i.attrs['title'].split(' - ')[0]) for i in r]
r = [(i[0][0].attrs['href'], i[1], re.findall('(.+?) \(*(\d{4})', i[1])) for i in r]
r = [(i[0], i[2][0][0] if len(i[2]) > 0 else i[1], i[2][0][1] if len(i[2]) > 0 else '0') for i in r]
r = sorted(r, key=lambda i: int(i[2]), reverse=True) # with year > no year
r = [i[0] for i in r if cleantitle.get(i[1]) in t and i[2] in y][0]
return source_utils.strip_domain(r)
except:
return
|
tuxta/gameframe
|
GameFrame/EntryTextObject.py
|
Python
|
gpl-3.0
| 4,969
| 0
|
import pygame
from GameFrame import TextObject, Globals, Level
class EntryTextObject(TextObject):
def __init__(self, room: Level, x: int, y: int, max_len=4):
TextObject.__init__(self, room, x, y, '')
self.max_len = max_len
self.handle_key_events = True
self.accepting_input = True
self.active = True
def accept_input(self):
self.accepting_input = True
def set_focus(self, in_focus: bool):
self.active = in_focus
def key_pressed(self, key):
if self.accepting_input and self.active:
key_recognised = False
if key[pygame.K_a]:
self.text += 'A'
key_recognised = True
elif key[pygame.K_b]:
self.text += 'B'
key_recognised = True
elif key[pygame.K_c]:
self.text += 'C'
key_recognised = True
elif key[pygame.K_d]:
self.text += 'D'
key_recognised = True
elif key[pygame.K_e]:
self.text += 'E'
key_recognised = True
elif key[pygame.K_f]:
self.text += 'F'
key_recognised = True
elif key[pygame.K_g]:
self.text += 'G'
key_recognised = True
elif key[pygame.K_h]:
self.text += 'H'
key_recognised = True
elif key[pygame.K_i]:
self.text += 'I'
key_recognised = True
elif key[pygame.K_j]:
self.text += 'J'
key_recognised = True
elif key[pygame.K_k]:
self.text += 'K'
key_recognised = True
elif key[pygame.K_l]:
self.text += 'L'
key_recognised = True
elif key[pygame.K_m]:
self.text += 'M'
key_recognised = True
elif key[pygame.K_n]:
self.text += 'N'
key_recognised = True
elif key[pygame.K_o]:
self.text += 'O'
key_recognised = True
elif key[pygame.K_p]:
self.text += 'P'
key_recognised = True
elif key[pygame.K_q]:
self.text += 'Q'
key_recognised = True
elif key[pygame.K_r]:
self.text += 'R'
key_recognised = True
elif key[pygame.K_s]:
self.text += 'S'
key_recognised = True
elif key[pygame.K_t]:
self.text += 'T'
key_recognised = True
elif key[pygame.K_u]:
self.text += 'U'
key_recognised = True
elif key[pygame.K_v]:
self.text += 'V'
key_recognised = True
elif key[pygame.K_w]:
self.text += 'W'
key_recognised = True
elif key[pygame.K_x]:
self.text += 'X'
key_recognised = True
elif key[pygame.K_y]:
self.text += 'Y'
key_recognised = True
elif key[pygame.K_z]:
self.text += 'Z'
key_recognised = True
elif key[pygame.K_SPACE]:
self.text += ' '
key_recognised = True
elif key[pygame.K_1]:
|
self.text += '1'
key_recognised = True
elif key[pygame.K_2]:
self.text += '2'
key_recognised = True
elif key[pygame.K_3]:
self.text += '3'
key_recognised = True
elif key[pygame.K_4]:
self.text += '4'
key_recognised = True
elif key[pygame.K_5]:
self.text += '5'
|
key_recognised = True
elif key[pygame.K_6]:
self.text += '6'
key_recognised = True
elif key[pygame.K_7]:
self.text += '7'
key_recognised = True
elif key[pygame.K_8]:
self.text += '8'
key_recognised = True
elif key[pygame.K_9]:
self.text += '9'
key_recognised = True
elif key[pygame.K_0]:
self.text += '0'
key_recognised = True
elif key[pygame.K_BACKSPACE]:
if len(self.text) > 0:
self.text = self.text[:-1]
key_recognised = True
if key_recognised:
if len(self.text) > self.max_len:
self.text = self.text[:-1]
self.update_text()
Globals.player_name = self.text
self.accepting_input = False
self.set_timer(5, self.accept_input)
|
ryokbys/nap
|
pmd/force_params/QEq_params/extract_QEq_params.py
|
Python
|
mit
| 3,310
| 0.02145
|
#!/usr/bin/env python
"""
Extract atomic parameters for QEq potential.
Usage:
extract_bvs_params.py [options] DATA_FILE NAME [NAME...]
Options:
-h, --help Show this message and exit.
"""
from __future__ import print_function
from docopt import docopt
__author__ = "RYO KOBAYASHI"
__version__ = "180112"
out_Coulomb= 'in.params.Coulomb'
def read_data_file(fname):
params = {}
with open(fname,'r') as f:
lines = f.readlines()
for line in lines:
if line[0] == '#':
continue
data = line.split()
idx = int(data[0])
name = data[1]
ie1 = float(data[2])
ie2 = float(data[3])
ea = float(data[4])
rad = float(data[5])
en = float(data[6])
params[name] = [idx,name,ie1,ie2,ea,rad,en]
return params
def anum_to_range(atomic_number):
"""
Calculate and return the lower and upper limits of the charge of given atomic number.
|
"""
nstates = (0,2,8,18,32,50)
if atomic_number > 86:
raise ValueError('Atomic number greater than 86 is not available.')
elif atomic_number <= sum_array(nstates,1):
n = 1
elif atomic_number <= sum_array(nstates,2):
n = 2
elif atomic_number <= sum_array(nsta
|
tes,3):
n = 3
elif atomic_number <= sum_array(nstates,4):
n = 4
elif atomic_number <= sum_array(nstates,5):
n = 5
else:
raise ValueError('Atomic number is something wrong: ',atomic_number)
freedom = (0,2,6,10,14,18,22)
nval = atomic_number - sum_array(nstates,n-1)
if nval < sum_array(freedom,1):
l = 1
elif nval < sum_array(freedom,2):
l = 2
elif nval < sum_array(freedom,3):
l = 3
elif nval < sum_array(freedom,4):
l = 4
else:
l = 5
if not l <= n:
raise ValueError('not l<=n')
print('anum,n,l,nval=',atomic_number,n,l,nval)
nseat = sum_array(nstates,n) -sum_array(nstates,n-1)
nseatopen = nseat - nval
for il in range(l+1,n+1):
nseatopen -= freedom[il]
print('nseat,nseatopen=',nseat,nseatopen)
qlow = -float(nseatopen)
qup = float(min(nval, freedom[l]+freedom[l-1]))
return qlow,qup
def sum_array(array,n):
if len(array) < n+1:
raise ValueError('len(array) < n')
s = 0
for i in range(n+1):
s += array[i]
return s
def write_Coulomb_params(fname,params,specorder):
with open(fname,'w') as f:
#...declare it is 'variable_charge' Coulomb
f.write(' variable_charge \n')
n = 0
e0 = 0.0
for k in specorder:
n += 1
p = params[k]
anum = p[0]
name = p[1]
ie = p[2]
ea = -p[4]
xi = (ie+ea)/2
J = (ie-ea)
qlow,qup = anum_to_range(anum)
f.write('{0:4d} {1:5s}'.format(n,name)
+' {0:9.3f} {1:9.3f} {2:9.4f}'.format(xi,J,e0)
+' {0:5.1f} {1:5.1f}\n'.format(qlow,qup))
if __name__ == "__main__":
args = docopt(__doc__)
fname = args['DATA_FILE']
specorder = [ name for name in args['NAME'] ]
params = read_data_file(fname)
write_Coulomb_params(out_Coulomb,params,specorder)
|
velorientc/git_test7
|
tests/qt_repomanager_test.py
|
Python
|
gpl-2.0
| 2,859
| 0.001749
|
import mock, unittest
from mercurial import ui
from tortoisehg.hgqt import thgrepo
def mockrepo(ui, path):
m = mock.MagicMock(ui=ui, root=path)
m.unfiltered = lambda: m
return m
LOCAL_SIGNALS = ['repositoryOpened', 'repositoryClosed']
MAPPED_SIGNALS = ['configChanged', 'repositoryChanged', 'repositoryDestroyed']
class RepoManagerMockedTest(unittest.TestCase):
def setUp(self):
self.hgrepopatcher = mock.patch('mercurial.hg.repository', new=mockrepo)
self.watcherpatcher = mock.patch('tortoisehg.hgqt.thgrepo.RepoWatcher')
self.hgrepopatcher.start()
self.watcherpatcher.start()
self.repoman = thgrepo.RepoManager(ui.ui())
for signame in LOCAL_SIGNALS + MAPPED_SIGNALS:
slot = mock.Mock()
setattr(self, signame, slot)
getattr(self.repoman, signame).connect(slot)
def tearDown(self):
self.watcherpatcher.stop()
self.hgrepopatcher.stop()
thgrepo._repocache.clear()
def test_cached(self):
a1 = self.repoman.openRepoAgent('/a')
a2 = self.repoman.openRepoAgent('/a')
self.assertTrue(a1 is a2)
def test_release(self):
self.repoman.openRepoAgent('/a')
self.repoman.openRepoAgent('/a')
self.repoman.releaseRepoAgent('/a')
self.assertTrue(self.repoman.repoAgent('/a'))
self.repoman.releaseRepoAgent('/a')
self.assertFalse(self.repoman.repoAgent('/a'))
def test_signal_map(self):
a = self.repoman.openRepoAgent('/a')
for signame in MAPPED_SIGNALS:
getattr(a, signame).emit()
getattr(self, signame).assert_called_once_with('/a')
def test_disconnect_signal_on_close(self):
a = self.repoman.openRepoAgent('/a')
self.repoman.releaseRepoAgent('/a')
for signame in MAPPED_SIGNALS:
getattr(a, signame).emit()
self.assertFalse(getattr(self, signame).called)
def test_opened_signal(self):
s
|
elf.repoman.repositoryOpened.connect(
lambda: self.assertTrue(self.repoman.repoAgent('/a'
|
)))
self.repoman.openRepoAgent('/a')
self.repositoryOpened.assert_called_once_with('/a')
self.repositoryOpened.reset_mock()
# emitted only if repository is actually instantiated (i.e. not cached)
self.repoman.openRepoAgent('/a')
self.assertFalse(self.repositoryOpened.called)
def test_closed_signal(self):
self.repoman.repositoryClosed.connect(
lambda: self.assertFalse(self.repoman.repoAgent('/a')))
self.repoman.openRepoAgent('/a')
self.repoman.openRepoAgent('/a')
self.repoman.releaseRepoAgent('/a')
self.assertFalse(self.repositoryClosed.called)
self.repoman.releaseRepoAgent('/a')
self.repositoryClosed.assert_called_once_with('/a')
|
doktorinjh/WeatherPane
|
WU_Bridge_GitHub.py
|
Python
|
apache-2.0
| 4,290
| 0.007925
|
import urllib2
import json
#Get json data from Weather Underground by using IP address
f = urllib2.urlopen('http://api.wunderground.com/api/***KEY***/conditions/forecast/hourly/q/autoip.json')
json_string = f.read()
parsed_json = json.loads(json_string)
#Current Location and Observation Time
location = parsed_json['current_observation']['display_location']['full']
time = parsed_json['current_observation']['observation_time']
station = parsed_json['current_observation']['station_id']
#Current Conditions
current_temp = parsed_json['current_observation']['temp_f']
current_weather = parsed_json['current_observation']['weather']
current_precip = parsed_json['hourly_forecast'][0]['pop']
#Today's Forecast
today_high = parsed_json['forecast']['simpleforecast']['forecastday'][0]['high']['fahrenheit']
today_low = parsed_json['forecast']['simpleforecast']['forecastday'][0]['low']['fahrenheit']
today_weather = parsed_json['forecast']['simpleforecast']['forecastday'][0]['conditions']
today_precip = parsed_json['forecast']['txt_forecast']['forecastday'][0]['pop']
today_snow = parsed_json['forecast']['simpleforecast']['forecastday'][0]['snow_allday']['in']
#Tonight's Forecast
tonight_high = parsed_json['forecast']['simpleforecast']['forecastday'][0]['high']['fahrenheit']
tonight_low = parsed_json['forecast']['simpleforecast']['forecastday'][0]['low']['fahrenheit']
tonight_weather = parsed_json['forecast']['simpleforecast']['forecastday'][0]['conditions']
tonight_precip = parsed_json['forecast']['txt_forecast']['forecastday'][1]['pop']
tonight_snow = parsed_json['forecast']['simpleforecast']['forecastday'][0]['snow_night']['in']
#Tomorrow's Forecast
tomorrow_high = parsed_json['forecast']['simpleforecast']['forecastday'][1]['high']['fahrenheit']
tomorrow_low = parsed_json['fo
|
recast']['simpleforecast']['forecastday'][1]['low']['fahrenheit']
tomorrow_weather = parsed_json['forecast']['simpleforecast']['forecastday'][1]['conditions']
to
|
morrow_precip = parsed_json['forecast']['simpleforecast']['forecastday'][1]['pop']
tomorrow_snow = parsed_json['forecast']['simpleforecast']['forecastday'][1]['snow_allday']['in']
#Write output to python text file
#f = open( 'C:\Users\User\Desktop\WU_Test.txt', 'w' ) #For testing purposes
f = open( '/WU_Variables.py', 'w' ) #Change if directory is different
f.write("import sys" + '\n' )
f.write(""'\n')
f.write("sys.path.insert(0, '/usr/lib/python2.7/bridge/')" + '\n' )
f.write(""'\n')
f.write("from bridgeclient import BridgeClient as bridgeclient" + '\n' )
f.write(""'\n')
f.write("value = bridgeclient()" + '\n' )
f.write(""'\n')
f.write("value.put(\"location\",'{}')".format((location)) + '\n')
f.write("value.put(\"time\",'{}')".format((time)) + '\n')
f.write("value.put(\"station\",'{}')".format((station)) + '\n')
f.write(""'\n')
f.write("value.put(\"current_temp\",'{}')".format((current_temp)) + '\n')
f.write("value.put(\"current_weather\",'{}')".format((current_weather)) + '\n')
f.write("value.put(\"current_precip\",'{}')".format((current_precip)) + '\n')
f.write(""'\n')
f.write("value.put(\"today_high\",'{}')".format((today_high)) + '\n')
f.write("value.put(\"today_low\",'{}')".format((today_low)) + '\n')
f.write("value.put(\"today_weather\",'{}')".format((today_weather)) + '\n')
f.write("value.put(\"today_precip\",'{}')".format((today_precip)) + '\n')
f.write("value.put(\"today_snow\",'{}')".format((today_snow)) + '\n')
f.write(""'\n')
f.write("value.put(\"tonight_high\",'{}')".format((tonight_high)) + '\n')
f.write("value.put(\"tonight_low\",'{}')".format((tonight_low)) + '\n')
f.write("value.put(\"tonight_weather\",'{}')".format((tonight_weather)) + '\n')
f.write("value.put(\"tonight_precip\",'{}')".format((tonight_precip)) + '\n')
f.write("value.put(\"tonight_snow\",'{}')".format((tonight_snow)) + '\n')
f.write(""'\n')
f.write("value.put(\"tomorrow_high\",'{}')".format((tomorrow_high)) + '\n')
f.write("value.put(\"tomorrow_low\",'{}')".format((tomorrow_low)) + '\n')
f.write("value.put(\"tomorrow_weather\",'{}')".format((tomorrow_weather)) + '\n')
f.write("value.put(\"tomorrow_precip\",'{}')".format((tomorrow_precip)) + '\n')
f.write("value.put(\"tomorrow_snow\",'{}')".format((tomorrow_snow)) + '\n')
f.close()
|
StellarCN/py-stellar-base
|
stellar_sdk/xdr/operation_body.py
|
Python
|
apache-2.0
| 23,177
| 0.00164
|
# This is an automatically generated file.
# DO NOT EDIT or your changes may be overwritten
import base64
from xdrlib import Packer, Unpacker
from ..type_checked import type_checked
from .allow_trust_op import AllowTrustOp
from .begin_sponsoring_future_reserves_op import BeginSponsoringFutureReservesOp
from .bump_sequence_op import BumpSequenceOp
from .change_trust_op import ChangeTrustOp
from .claim_claimable_balance_op import ClaimClaimableBalanceOp
from .clawback_claimable_balance_op import ClawbackClaimableBalanceOp
from .clawback_op import ClawbackOp
from .create_account_op import CreateAccountOp
from .create_claimable_balance_op import CreateClaimableBalanceOp
from .create_passive_sell_offer_op import CreatePassiveSellOfferOp
from .liquidity_pool_deposit_op import LiquidityPoolDepositOp
from .liquidity_pool_withdraw_op import LiquidityPoolWithdrawOp
from .manage_buy_offer_op import ManageBuyOfferOp
from .manage_data_op import ManageDataOp
from .manage_sell_offer_op import ManageSellOfferOp
from .muxed_account import MuxedAccount
from .operation_type import OperationType
from .path_payment_strict_receive_op import PathPaymentStrictReceiveOp
from .path_payment_strict_send_op import PathPaymentStrictSendOp
from .payment_op import PaymentOp
from .revoke_sponsorship_op import RevokeSponsorshipOp
from .set_options_op import SetOptionsOp
from .set_trust_line_flags_op import SetTrustLineFlagsOp
__all__ = ["OperationBody"]
@type_checked
class OperationBody:
""
|
"
XDR Source Code::
union switch (OperationType type)
{
ca
|
se CREATE_ACCOUNT:
CreateAccountOp createAccountOp;
case PAYMENT:
PaymentOp paymentOp;
case PATH_PAYMENT_STRICT_RECEIVE:
PathPaymentStrictReceiveOp pathPaymentStrictReceiveOp;
case MANAGE_SELL_OFFER:
ManageSellOfferOp manageSellOfferOp;
case CREATE_PASSIVE_SELL_OFFER:
CreatePassiveSellOfferOp createPassiveSellOfferOp;
case SET_OPTIONS:
SetOptionsOp setOptionsOp;
case CHANGE_TRUST:
ChangeTrustOp changeTrustOp;
case ALLOW_TRUST:
AllowTrustOp allowTrustOp;
case ACCOUNT_MERGE:
MuxedAccount destination;
case INFLATION:
void;
case MANAGE_DATA:
ManageDataOp manageDataOp;
case BUMP_SEQUENCE:
BumpSequenceOp bumpSequenceOp;
case MANAGE_BUY_OFFER:
ManageBuyOfferOp manageBuyOfferOp;
case PATH_PAYMENT_STRICT_SEND:
PathPaymentStrictSendOp pathPaymentStrictSendOp;
case CREATE_CLAIMABLE_BALANCE:
CreateClaimableBalanceOp createClaimableBalanceOp;
case CLAIM_CLAIMABLE_BALANCE:
ClaimClaimableBalanceOp claimClaimableBalanceOp;
case BEGIN_SPONSORING_FUTURE_RESERVES:
BeginSponsoringFutureReservesOp beginSponsoringFutureReservesOp;
case END_SPONSORING_FUTURE_RESERVES:
void;
case REVOKE_SPONSORSHIP:
RevokeSponsorshipOp revokeSponsorshipOp;
case CLAWBACK:
ClawbackOp clawbackOp;
case CLAWBACK_CLAIMABLE_BALANCE:
ClawbackClaimableBalanceOp clawbackClaimableBalanceOp;
case SET_TRUST_LINE_FLAGS:
SetTrustLineFlagsOp setTrustLineFlagsOp;
case LIQUIDITY_POOL_DEPOSIT:
LiquidityPoolDepositOp liquidityPoolDepositOp;
case LIQUIDITY_POOL_WITHDRAW:
LiquidityPoolWithdrawOp liquidityPoolWithdrawOp;
}
"""
def __init__(
self,
type: OperationType,
create_account_op: CreateAccountOp = None,
payment_op: PaymentOp = None,
path_payment_strict_receive_op: PathPaymentStrictReceiveOp = None,
manage_sell_offer_op: ManageSellOfferOp = None,
create_passive_sell_offer_op: CreatePassiveSellOfferOp = None,
set_options_op: SetOptionsOp = None,
change_trust_op: ChangeTrustOp = None,
allow_trust_op: AllowTrustOp = None,
destination: MuxedAccount = None,
manage_data_op: ManageDataOp = None,
bump_sequence_op: BumpSequenceOp = None,
manage_buy_offer_op: ManageBuyOfferOp = None,
path_payment_strict_send_op: PathPaymentStrictSendOp = None,
create_claimable_balance_op: CreateClaimableBalanceOp = None,
claim_claimable_balance_op: ClaimClaimableBalanceOp = None,
begin_sponsoring_future_reserves_op: BeginSponsoringFutureReservesOp = None,
revoke_sponsorship_op: RevokeSponsorshipOp = None,
clawback_op: ClawbackOp = None,
clawback_claimable_balance_op: ClawbackClaimableBalanceOp = None,
set_trust_line_flags_op: SetTrustLineFlagsOp = None,
liquidity_pool_deposit_op: LiquidityPoolDepositOp = None,
liquidity_pool_withdraw_op: LiquidityPoolWithdrawOp = None,
) -> None:
self.type = type
self.create_account_op = create_account_op
self.payment_op = payment_op
self.path_payment_strict_receive_op = path_payment_strict_receive_op
self.manage_sell_offer_op = manage_sell_offer_op
self.create_passive_sell_offer_op = create_passive_sell_offer_op
self.set_options_op = set_options_op
self.change_trust_op = change_trust_op
self.allow_trust_op = allow_trust_op
self.destination = destination
self.manage_data_op = manage_data_op
self.bump_sequence_op = bump_sequence_op
self.manage_buy_offer_op = manage_buy_offer_op
self.path_payment_strict_send_op = path_payment_strict_send_op
self.create_claimable_balance_op = create_claimable_balance_op
self.claim_claimable_balance_op = claim_claimable_balance_op
self.begin_sponsoring_future_reserves_op = begin_sponsoring_future_reserves_op
self.revoke_sponsorship_op = revoke_sponsorship_op
self.clawback_op = clawback_op
self.clawback_claimable_balance_op = clawback_claimable_balance_op
self.set_trust_line_flags_op = set_trust_line_flags_op
self.liquidity_pool_deposit_op = liquidity_pool_deposit_op
self.liquidity_pool_withdraw_op = liquidity_pool_withdraw_op
def pack(self, packer: Packer) -> None:
self.type.pack(packer)
if self.type == OperationType.CREATE_ACCOUNT:
if self.create_account_op is None:
raise ValueError("create_account_op should not be None.")
self.create_account_op.pack(packer)
return
if self.type == OperationType.PAYMENT:
if self.payment_op is None:
raise ValueError("payment_op should not be None.")
self.payment_op.pack(packer)
return
if self.type == OperationType.PATH_PAYMENT_STRICT_RECEIVE:
if self.path_payment_strict_receive_op is None:
raise ValueError("path_payment_strict_receive_op should not be None.")
self.path_payment_strict_receive_op.pack(packer)
return
if self.type == OperationType.MANAGE_SELL_OFFER:
if self.manage_sell_offer_op is None:
raise ValueError("manage_sell_offer_op should not be None.")
self.manage_sell_offer_op.pack(packer)
return
if self.type == OperationType.CREATE_PASSIVE_SELL_OFFER:
if self.create_passive_sell_offer_op is None:
raise ValueError("create_passive_sell_offer_op should not be None.")
self.create_passive_sell_offer_op.pack(packer)
return
if self.type == OperationType.SET_OPTIONS:
if self.set_options_op is None:
raise ValueError("set_options_op should not be None.")
self.set_options_op.pack(packer)
return
if self.type == OperationType.CHANGE_TRUST:
if self.change_trust_op is None:
raise ValueError("change_trust_op should not be None
|
Aroliant/iBeon
|
python/ibeon.py
|
Python
|
mit
| 901
| 0.057714
|
import re
class ibeon:
def remSpaces(data):
return data.re
|
place(" ", "")
def remNumbers(data):
return ''.join([x for x in data if not x.isdigit()])
def remSymbols(data):
return re.sub('[^\w]', '', data)
def onlyNumbers(data)
|
:
return re.sub('[^0-9]', '', data)
def onlyAlpabets(data):
return re.sub('[^A-Za-z]', '', data)
def countSpaces(data):
return data.count(' ')
def countNumbers(data):
return len(re.sub('[^0-9]', '', data))
def countAlpabets(data):
return len(re.sub('[^A-Za-z]', '', data))
def strNext(data):
ar = list(data)
r,i='',0
for x in ar:
i=ord(x)+1
i=str(chr(i))
r+=str(i)
return r
def strPrev(data):
ar = list(data)
r,i='',0
for x in ar:
i=ord(x)-1
i=str(chr(i))
r+=str(i)
return r
|
Foxnox/robotique-delpeyroux-monseigne
|
src/direct_kinematics.py
|
Python
|
mit
| 1,409
| 0.034776
|
from math import *
from Vertex import *
#Length of the three subparts of the robot leg
L1 = 51.0
L2 = 63.7
L3 = 93.0
Alpha = 20.69 #Mecanic constraint on Theta 2
Beta = 5.06 #Mecanic constraint on Theta 3
# Check if the given float match with radian (between 2PI and -2PI)
def radValidation (radian):
return (radian <= 2 * pi and radian >= -2 * pi)
# Direct kinamatics for our considered robot (specific of our leg setting)
def leg_dk(theta1, theta2, theta3, l1=L1, l2=L2, l3=L3, alpha = Alpha, beta = Beta):
Angle = Vertex(theta1,theta2,theta3)
#Modification od theta1 and theta2 according constraint
theta2 +
|
= alpha
theta3 = 90-(alpha+beta+theta3)
#print "Angles : " + str(theta1) + " ; " + str(theta2) + " ; " + str(theta3)
theta1=radians(theta1)
theta2=-radians(theta2)
theta3=-radians(theta3)
#Storing all the sinus and cosinus into variable in order to simpl
|
ify and run the calculation only once
c_1 = cos(theta1)
c_2 = cos(theta2)
c_2_3 = cos(theta2 + theta3)
s_1 = sin(theta1)
s_2 = sin(theta2)
s_2_3 = sin(theta2 + theta3)
#calculation of the projections and the differences due to the robot setting
projection = l1 + (l2 * c_2) + (l3 * c_2_3)
#Calculation of the final position
Final = Vertex((projection * c_1), (projection * s_1), ((l2 * s_2) + (l3 * s_2_3)))
return Final
leg_dk(0, 0, 0)
leg_dk(90, 0, 0)
leg_dk(180, -30.501, -67.819)
leg_dk(0, -30.645, 38.501)
|
openstack/tempest
|
tempest/api/volume/admin/test_group_snapshots.py
|
Python
|
apache-2.0
| 12,992
| 0
|
# Copyright 2017 FiberHome Telecommunication Technologies CO.,LTD
# Copyright (C) 2017 Dell Inc. or its subsidiaries.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.volume import base
from tempest.common import waiters
from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
CONF = config.CONF
class BaseGroupSnapshotsTest(base.BaseVolumeAdminTest):
@classmethod
def skip_checks(cls):
super(BaseGroupSnapshotsTest, cls).skip_checks()
if not CONF.volume_feature_enabled.snapshot:
raise cls.skipException("Cinder volume snapshots are disabled")
def _create_group_snapshot(self, **kwargs):
if 'name' not in kwargs:
kwargs['name'] = data_utils.rand_name(
self.__class__.__name__ + '-Group_Snapshot')
group_snapshot = self.group_snapshots_client.create_group_snapshot(
**kwargs)['group_snapshot']
group_snapshot['group_id'] = kwargs['group_id']
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
self._delete_group_snapshot, group_snapshot)
waiters.wait_for_volume_resource_status(
self.group_snapshots_client, group_snapshot['id'], 'available')
return group_snapshot
def _delete_group_snapshot(self, group_snapshot):
self.group_snapshots_client.delete_group_snapshot(group_snapshot['id'])
vols = self.volumes_client.list_volumes(detail=True)['volumes']
snapshots = self.snapshots_client.list_snapshots(
detail=True)['snapshots']
for vol in vols:
for snap in snapshots:
if (vol['group_id'] == group_snapshot['group_id'] and
vol['id'] == snap['volume_id']):
self.snapshots_client.wait_for_resource_deletion(
snap['id'])
self.group_snapshots_client.wait_for_resource_deletion(
group_snapshot['id'])
class GroupSnapshotsTest(BaseGroupSnapshotsTest):
"""Test group snapshot"""
volume_min_microversion = '3.14'
volume_max_microversion = 'latest'
@decorators.idempotent_id('1298e537-f1f0-47a3-a1dd-8adec8168897')
def test_group_snapshot_create_show_list_delete(self):
"""Test create/show/list/delete group snapshot
1. Create volume type "volume_type1"
2. Create group type "group_type1"
3. Create group "group1" with "group_type1" and "volume_type1"
4. Create volume "volume1" with "volume_type1" and "group1"
5. Create group snapshot "group_snapshot1" with "group1"
6. Check snapshot created from "volume1" reaches available status
7. Check the created group snapshot "group_snapshot1"
|
is in the list
of all group snapshots
8. Delete group snapshot "group_snapshot1"
"""
# Create volume type
volume_type = self.create_volume_type()
# Create group type
group_type = self.create_group_type()
# Create group
grp = self.create_group(group_type=group_type['id'],
|
volume_types=[volume_type['id']])
# Create volume
vol = self.create_volume(volume_type=volume_type['id'],
group_id=grp['id'])
# Create group snapshot
group_snapshot_name = data_utils.rand_name('group_snapshot')
group_snapshot = self._create_group_snapshot(
group_id=grp['id'], name=group_snapshot_name)
snapshots = self.snapshots_client.list_snapshots(
detail=True)['snapshots']
for snap in snapshots:
if vol['id'] == snap['volume_id']:
waiters.wait_for_volume_resource_status(
self.snapshots_client, snap['id'], 'available')
self.assertEqual(group_snapshot_name, group_snapshot['name'])
# Get a given group snapshot
group_snapshot = self.group_snapshots_client.show_group_snapshot(
group_snapshot['id'])['group_snapshot']
self.assertEqual(group_snapshot_name, group_snapshot['name'])
# Get all group snapshots with details, check some detail-specific
# elements, and look for the created group snapshot
group_snapshots = self.group_snapshots_client.list_group_snapshots(
detail=True)['group_snapshots']
for grp_snapshot in group_snapshots:
self.assertIn('created_at', grp_snapshot)
self.assertIn('group_id', grp_snapshot)
self.assertIn((group_snapshot['name'], group_snapshot['id']),
[(m['name'], m['id']) for m in group_snapshots])
# Delete group snapshot
self._delete_group_snapshot(group_snapshot)
group_snapshots = self.group_snapshots_client.list_group_snapshots()[
'group_snapshots']
self.assertNotIn((group_snapshot['name'], group_snapshot['id']),
[(m['name'], m['id']) for m in group_snapshots])
@decorators.idempotent_id('eff52c70-efc7-45ed-b47a-4ad675d09b81')
def test_create_group_from_group_snapshot(self):
"""Test creating group from group snapshot
1. Create volume type "volume_type1"
2. Create group type "group_type1"
3. Create group "group1" with "group_type1" and "volume_type1"
4. Create volume "volume1" with "volume_type1" and "group1"
5. Create group snapshot "group_snapshot1" with "group1"
6. Check snapshot created from "volume1" reaches available status
7. Create group "group2" from "group_snapshot1"
8. Check the volumes belonging to "group2" reach available status
9. Check "group2" reaches available status
"""
# Create volume type
volume_type = self.create_volume_type()
# Create group type
group_type = self.create_group_type()
# Create Group
grp = self.create_group(group_type=group_type['id'],
volume_types=[volume_type['id']])
# Create volume
vol = self.create_volume(volume_type=volume_type['id'],
group_id=grp['id'])
# Create group_snapshot
group_snapshot_name = data_utils.rand_name('group_snapshot')
group_snapshot = self._create_group_snapshot(
group_id=grp['id'], name=group_snapshot_name)
self.assertEqual(group_snapshot_name, group_snapshot['name'])
snapshots = self.snapshots_client.list_snapshots(
detail=True)['snapshots']
for snap in snapshots:
if vol['id'] == snap['volume_id']:
waiters.wait_for_volume_resource_status(
self.snapshots_client, snap['id'], 'available')
# Create Group from Group snapshot
grp_name2 = data_utils.rand_name('Group_from_snap')
grp2 = self.groups_client.create_group_from_source(
group_snapshot_id=group_snapshot['id'], name=grp_name2)['group']
self.addCleanup(self.delete_group, grp2['id'])
self.assertEqual(grp_name2, grp2['name'])
vols = self.volumes_client.list_volumes(detail=True)['volumes']
for vol in vols:
if vol['group_id'] == grp2['id']:
waiters.wait_for_volume_resource_status(
self.volumes_client, vol['id'], 'available')
waiters.wait_for_volume_resource_status(
self.groups_client, grp2['id'], 'available')
@decorators.idempotent_id('7d7fc000-0b4c-4376-a372-54411
|
project-callisto/callisto-core
|
callisto_core/notification/migrations/0005_rename_to_emailnotification.py
|
Python
|
agpl-3.0
| 408
| 0
|
# -*- codi
|
ng: utf-8 -*-
# Generated by Django 1.10.7 on 2017-04-24 15:01
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [("notification", "0004_delete_old_email_notification")]
operations = [
migrations.RenameModel(
old_name="TempEmailNotificatio
|
n", new_name="EmailNotification"
)
]
|
SiLab-Bonn/pyBAR
|
pybar/testing/tools/create_fixture.py
|
Python
|
bsd-3-clause
| 1,622
| 0.003699
|
''' Script to reduce data files to create unit test fixtures.
'''
import tables as tb
def create_fixture(file_in, file_out, n_readouts, nodes):
with tb.open_file(file_in, 'r') as in_file:
with tb.open_file(file_out, 'w') as out_file:
in_file.copy_node('/configuration', out_file.root, recursive=True)
start, stop = None, None
if 'meta_data' in nodes:
node = in_file.get_node('/meta_data')
meta_data = node[:n_readouts]
try:
start, stop = meta_data['index_start'][0], meta_data['index_stop'][-1]
except IndexError:
start, stop = meta_data['hit_start'][0], meta_data['hit_stop'][-1]
t = out_file.create_table(out_file.root, name=node.name, description=node.description, filters=node.filters)
t.append(meta_data)
for n in nodes:
if n == 'meta_data':
continue
node = in_file.get_node('/' + n)
data = node[start:stop]
if type(node) == tb.earray.EArray:
earray = out_file.create_earray(out_file.root, name=node.name, atom=tb.UIntAtom(), shape=(0,), title=node.title, filters=node.filters)
earray.append(data)
if __name__ == '__main__':
create_fixture(file_in=r'H:\Testbeam_07032016_LFCMOS\orig
|
inal_data\LFCMOS_1_14Ne
|
q\lfcmos_3_efficiency\117_lfcmos_3_ext_trigger_scan.h5',
file_out='small.h5',
n_readouts=100,
nodes=['raw_data', 'meta_data'])
|
DenisCarriere/mapillary
|
mapillary/geotag.py
|
Python
|
mit
| 6,563
| 0.001067
|
#!/usr/bin/python
# coding: utf8
from upload import create_file_list
import exifread
import math
import pexif
import re
import datetime
import time
from bs4 import BeautifulSoup
from geojson import FeatureCollection, Feature, LineString, Point
import json
def utc_to_localtime(utc_time):
utc_offset_timedelta = datetime.datetime.utcnow() - datetime.datetime.now()
return utc_time - utc_offset_timedelta
def get_lat_lng_time(path):
'''
Read location and time stamps from a track in a GPX file.
Returns a list of tuples (time, lat, lng).
GPX stores time in UTC, assume your camera used the local
timezone and convert accordingly.
'''
soup = BeautifulSoup(open(path))
points = []
last = None
for item in soup.find_all('trkpt'):
t = utc_to_localtime(convert_time(item.time.text))
t = time.mktime(t.timetuple())
if last:
# Ignore bad GPX data
time_offset = abs(last - t)
if time_offset < 15:
points.append([t, float(item['lat']), float(item['lon']), float(item.ele.text)])
last = t
points.sort()
return points
def convert_time(t):
"""Converts String into Datetime object"""
expression = r'\d+'
pattern = re.compile(expression)
match = pattern.findall(t)
if match:
year, month, day, hour, minute, second = match[0:6]
t = datetime.datetime(int(year),
int(month),
int(day),
int(hour),
int(minute),
int(round(float(second), 0)))
return t
def get_datetime_tag(tags):
for tag in [
'Image DateTime',
'EXIF DateTimeOriginal',
'EXIF DateTimeDigitized'
]:
time = str(tags.get(tag))
if time:
return convert_time(time)
def compute_bearing(start_lat, start_lng, end_lat, end_lng):
'''
Get the compass bearing from start to end.
Formula from
http://www.movable-type.co.uk/scripts/latlong.html
'''
# make sure everything is in radians
start_lat = math.radians(start_lat)
start_lng = math.radians(start_lng)
end_lat = math.radians(end_lat)
end_lng = math.radians(end_lng)
dLng = end_lng - start_
|
lng
dPhi = math.log(
math.tan(end_lat / 2.0 + math.pi / 4.0) /
math
|
.tan(start_lat / 2.0 + math.pi / 4.0)
)
if abs(dLng) > math.pi:
if dLng > 0.0:
dLng = -(2.0 * math.pi - dLng)
else:
dLng = (2.0 * math.pi + dLng)
y = math.sin(dLng)*math.cos(end_lat)
x = math.cos(start_lat) * math.sin(end_lat) - math.sin(start_lat) *\
math.cos(end_lat) * math.cos(dLng)
bearing = (math.degrees(math.atan2(y, x)) + 360.0) % 360.0
return bearing
def interpolate_lat_lng(points, timestamp):
'''
Return interpolated lat, lng and compass bearing for time t.
Points is a list of tuples (time, lat, lng, elevation).
'''
t = time.mktime(timestamp.timetuple())
# find the enclosing points in sorted list
if (t < points[0][0]) or (t >= points[-1][0]):
return None, None, None, None
raise ValueError("Time t not in scope of gpx file.")
for i, point in enumerate(points):
if t < point[0]:
if i > 0:
before = points[i-1]
else:
before = points[i]
after = points[i]
break
# time diff
dt_before = t - before[0]
dt_after = after[0] - t
# simple linear interpolation
lat = (before[1] * dt_after + after[1]*dt_before) / (dt_before + dt_after)
lng = (before[2] * dt_after + after[2]*dt_before) / (dt_before + dt_after)
bearing = compute_bearing(before[1], before[2], after[1], after[2])
if before[3] is not None:
elevation = (before[3] * dt_after + after[3] * dt_before) /\
(dt_before + dt_after)
else:
elevation = None
return lat, lng, bearing, elevation
def add_exif_using_timestamp(filename, points, offset_bearing=0, offset_time=0):
'''
Find lat, lng and bearing of filename and write to EXIF.
'''
# Read EXIF tags from file
with open(filename, 'rb') as f:
try:
tags = exifread.process_file(f)
except:
print filename
# Get timestamp and offset time with seconds
timestamp = get_datetime_tag(tags) \
- datetime.timedelta(seconds=offset_time)
# Get Coordinates from timestamp & GPX
lat, lng, bearing, altitude = interpolate_lat_lng(points, timestamp)
if bool(lat and lng):
# Add Geo EXIF to file
img = pexif.JpegFile.fromFile(filename)
# Offset Bearing
bearing = (bearing + offset_bearing) % 360
# Define Lat & Lng
img.set_geo(lat, lng)
img.set_altitude(altitude)
img.set_direction(bearing)
img.set_bearing(bearing)
# Overwrite Save File
img.writeFile(filename)
print('Saving file: %s' % filename)
else:
print('ERROR: %s' % filename)
def save_geojson_gpx(path_gpx):
# Parse all GPX points
gpx = get_lat_lng_time(path_gpx)
# Save as GeoJSON
geom_line = []
geom_point = []
for item in gpx:
geom_line.append((item[2], item[1]))
properties = {'timestamp': item[0], 'lat': item[1], 'lng': item[2]}
geom_point.append(Feature(geometry=Point((item[2], item[1])), properties=properties))
# LineString
with open("/home/denis/GIS/Mapillary/GPX_Line.geojson", 'w') as f:
fc = Feature(geometry=LineString(geom_line))
f.write(json.dumps(fc, indent=2))
with open("/home/denis/GIS/Mapillary/GPX_Point.geojson", 'w') as f:
fc = FeatureCollection(geom_point)
f.write(json.dumps(fc, indent=2))
class Geotag(object):
def __init__(self, path, path_gpx, **kwargs):
# Optional Parameters
offset_time = kwargs.get('time', 0)
offset_bearing = kwargs.get('bearing', 0)
# Parse all GPX points
gpx = get_lat_lng_time(path_gpx)
# Add EXIF to each indiviual file
for filepath in create_file_list(path):
add_exif_using_timestamp(filepath, gpx, offset_bearing, offset_time)
if __name__ == '__main__':
path = '/home/denis/GIS/Mapillary/Orleans_2/'
path_gpx = '/home/denis/GIS/Mapillary/Orleans_2.gpx'
#save_geojson_gpx(path_gpx)
gpx = get_lat_lng_time(path_gpx)
Geotag(path, path_gpx)
|
gtaylor/dott
|
src/game/parents/base_objects/base.py
|
Python
|
bsd-3-clause
| 25,944
| 0.001156
|
"""
Contains base level parents that aren't to be used directly.
"""
from twisted.internet.defer import inlineCallbacks, returnValue
from fuzzywuzzy.process import QRatio
from fuzzywuzzy import utils as fuzz_utils
from src.daemons.server.ansi import ANSI_HILITE, ANSI_NORMAL
from src.daemons.server.objects.exceptions import ObjectHasZoneMembers, NoSuchObject
from src.daemons.server.protocols.proxyamp import EmitToObjectCmd
#noinspection PyShadowingBuiltins
class BaseObject(object):
"""
This is the base parent for every in-game "object". Rooms, Players, and
Things are all considered objects. Behaviors here are very low level.
"""
# Holds this object's command table. Any objects inside of this object
# will check this for command matches before the global table.
local_command_table = None
# Same as above, but for admin-only commands.
local_admin_command_table = None
def __init__(self, mud_service, id, parent, name, description=None,
internal_description=None,
location_id=None, destination_id=None, zone_id=None,
aliases=None, originally_controlled_by_account_id=None,
controlled_by_account_id=None, attributes=None,
created_time=None):
"""
:param MudService mud_service: The MudService class running the game.
:param int id: A unique ID for the object, or None if this is
a new object.
:param str parent: The Python path to the parent class for this
instantiated object.
:param str name: The non-ASCII'd name.
:param str description: The object's description.
:keyword int location_id: The ID of the object this object resides within.
None if this object is location-less.
:keyword int destination_id: Used to determine where an exit leads.
:keyword int zone_id: Optional zone ID (ID of another BaseObject).
:keyword int originally_controlled_by_account_id: Account ID that
first controlled this object (if it was created in conjunction
with an account).
:keyword in controlled_by_account_id: If this object is being controlled
by an account, this will be populated.
:keyword dict kwargs: All objects are instantiated with the values from
the DB as kwargs. Since the DB representation of all of an
objects attributes is just a dict, this works really well.
:keyword datetime.datetime created_time: The time the object was
created.
"""
self.mud_service = mud_service
# This mirrors the 'id' field in dott_objects. If this is set to None
# and the instance is saved, an insert is done.
self.id = id
self.name = name
self.description = description
self.internal_description = internal_description
self.parent = parent
self.location_id = location_id
self.destination_id = destination_id
self.zone_id = zone_id
self.aliases = aliases or []
self.originally_controlled_by_account_id = originally_controlled_by_account_id
self.controlled_by_account_id = controlled_by_account_id
# This stores all of the object's data. This includes core and
# userspace attributes.
self._attributes = attributes or {}
self.created_time = created_time
assert isinstance(self._attributes, dict)
def __str__(self):
return "<%s: %s (#%d)>" % (self.__class__.__name__, self.name, self.id)
def __repr__(self):
return self.__str__()
#
## Begin properties.
#
@property
def _object_store(self):
"""
Short-cut to the global object store.
:rtype: ObjectStore
:returns: Reference to the global object store instance.
"""
return self.mud_service.object_store
@property
def _command_handler(self):
"""
Short-cut to the global command handler.
:rtype: CommandHandler
:returns: Reference to the global command handler instance.
"""
return self.mud_service.command_handler
def _generic_id_to_baseobject_property_getter(self, attrib_name):
"""
A generic getter for attributes that store object IDs. Given an
object ID, retrieve it or None.
:rtype: BaseObject or None
:returns: The ``BaseObject`` instance for the attribute, or None
if there is no value.
:raises: NoSuchObject if the stored object ID has no match in
the DB.
"""
obj_id = getattr(self, attrib_name)
if obj_id:
#noinspection PyTypeChecker
return self._object_store.get_object(obj_id)
else:
return None
def _generic_baseobject_to_id_property_setter(self, attrib_name, obj_or_id):
"""
Sets this object's zone.
:param obj_or_id: The object or object ID to set as the
object's zone master.
:type obj_or_id: A ``BaseObject`` sub-class or an ``int``.
"""
if isinstance(obj_or_id, int):
# Already an int, assume this is an object ID.
setattr(self, attrib_name, obj_or_id)
elif isinstance(obj_or_id, basestring):
raise Exception("BaseObject.set_%s() can't accept strings for object IDs: %s" % (
attrib_name, obj_or_id))
elif obj_or_id is None:
setattr(self, attrib_name, None)
else:
# Looks like a BaseObject sub-class. Grab the object ID.
setattr(self, attrib_name, obj_or_id.id)
@property
def attributes(self):
"""
Redirects to the object's attributes dict.
:rtype: dict
"""
return self._attributes
def get_location(self):
"""
Determines the object's location and returns the instance representing
this object's location.
:returns: The ``BaseObject`` instance (sub-class) that this object
is currently in. Typically a ``RoomObject``, but can also be
other types.
:raises: NoSuchObject if the stored object ID has no match in
the DB.
"""
return self._generic_id_to_baseobject_property_getter('location_id')
def set_location(self, obj_or_id):
"""
Sets this object's location.
:param obj_or_id: The object or object ID to set as the
object's location.
:type obj_or_id: A ``BaseObject`` sub-class or a ``str``.
"""
if self.base_type == 'room':
# Rooms can't have locations.
return
self._generic_baseobject_to_id_property_setter('location_id', obj_or_id)
location = property(get_location, set_location)
def get_zone(self):
"""
Determines the object's zone and returns the instance representing
this object's zone.
:rtype: BaseObject or None
:returns: The ``BaseObject`` instance (sub-class) that is this object's
zone master object.
:raises: NoSuchObject if the stored object ID has no match in
the DB.
"""
re
|
turn self._generic_id_to_baseobject_property_getter('zone_id')
def set_zone(self, obj_or_id):
"""
Sets this object's z
|
one.
:param obj_or_id: The object or object ID to set as the
object's zone master.
:type obj_or_id: A ``BaseObject`` sub-class or an ``int``.
"""
self._generic_baseobject_to_id_property_setter('zone_id', obj_or_id)
zone = property(get_zone, set_zone)
#noinspection PyPropertyDefinition
@property
def base_type(self):
"""
BaseObject's primary three sub-classes are Room, Player, Exit,
and Thing. These are all considered the top-level children, and
everything else will be children of them. Room, Player, Exit, and
Thing are the only three valid base types, and each parent should
return one of the following for quick-and-easy type checking:
* ro
|
samurailens/sop
|
app/samplePythonScript/webapp.py
|
Python
|
gpl-2.0
| 609
| 0.026273
|
from gi.repository import Gtk
from gi.repository import WebKit
class BrowserView:
def __init__(self):
window = Gtk.Window()
window.connect('delete-event',Gtk.main_quit)
self.view = WebKit.WebView()
#self.view.load_uri('http://example.net')
#https://www.raspberrypi.org/documentation/remote-access/web-server/apache.md
#default c
|
ontent in /var/www/index.html
self.view.load_uri('http://192.168.1.6')
window.add(self.view)
window.fullscreen()
window.show_all()
|
if __name__ == "__main__":
BrowserView()
Gtk.main()
|
jgrizou/robot_2WD
|
python/robotControl/Motors.py
|
Python
|
gpl-2.0
| 3,698
| 0.001082
|
import threading
import time
MOVEXCOUNT = 20
GETCOUNT = 21
COUNT = 22
RESETCOUNT = 23
RESETEDCOUNT = 24
SETPOSCONTROLSTATUS = 25
SETACCELERATION = 26
SETDECELERATION = 27
SETMAXSPEED = 28
SETSPEED = 40
GETSPEED = 41
SPEED = 42
SETPID = 43
SETPIDLIMIT = 44
class Motors(threading.Thread):
def __init__(self, cmdMessenger, freq, timeout=0.1):
threading.Thread.__init__(self)
self.daemon = True
self.isRunning = True
self.setFreq(freq)
self.timeout = timeout
self.countM0 = 0
self.countM1 = 0
self.resetedCount = []
self.speedM0 = 0
self.speedM1 = 0
self.cMes = cmdMessenger
self.cMes.attach(COUNT, self.updateCount)
self.cMes.attach(SPEED, self.updateSpeed)
self.cMes.attach(RESETEDCOUNT, self.handleResetedCount)
if not self.cMes.isRunning:
self.cMes.start()
self.setPosControlStatus(1)
self.setPID(0.,1., 0.)
self.setPIDLimit(50.)
self.acceleration = 100.
self.decelearation = 20.
self.maxSpeed = 40.
self.isCountUpdated = True
self.isSpeedUpdated = True
def kill(self):
self.isRunning = False
def run(self):
while self.isRunning:
if self.isCountUpdated:
self.cMes.send(GETCOUNT)
if self.isSpeedUpdated:
self.cMes.send(GETSPEED)
time.sleep(self.sleepTime)
def setFreq(self, freq):
self.sleepTime = 1./freq
def moveXcount(self, nCountM0, nCountM1):
self.cMes.send(MOVEXCOUNT, int(nCountM0), int(nCountM1))
def getCountM0(self):
return self.countM0
def getCountM1(self):
return self.countM1
def getCounts(self):
return self.getCountM0(), self.getCountM1()
def setSpeed(self, speedM0, speedM1):
self.cMes.send(SETSPEED, float(speedM0), float(speedM1))
def getSpeedM0(self):
return self.speedM0
def getSpeedM1(self):
return self.speedM1
def getSpeeds(self):
return self.getSpeedM0(), self.getSpeedM1()
def updateCount(self, cmdList):
self.countM0 = int(cmdList[1])
self.countM1 = int(cmdList[2])
def updateSpeed(self, cmdList):
self.speedM0 = float(cmdList[1])
self.speedM1 = float(cmdList[2])
def handleResetedCount(self, cmdList):
self.resetedCount = cmdList[1:]
def resetCount(self):
self.cMes.send(RESETCOUNT)
startTime = time.tim
|
e()
inTime = True
while not self.resetedCount and inTime:
time.sleep(1e-6)
if time.time() - startTime > self.timeout:
inTime = False
if not inTime:
return [None, None]
tmp = self.resetedCount
self.resetedCount = []
return tmp
def setPosControlStatus(self, status):
self.positionControl = int(status)
self.cMes.send(SETPOSCONTROLSTATUS, self.positionControl)
def setPID(self,
|
KP, KI, KD):
self.KP = float(KP)
self.KI = float(KI)
self.KD = float(KD)
self.cMes.send(SETPID, self.KP, self.KI, self.KD)
def setPIDLimit(self, limit):
self.PIDLimit = float(limit)
self.cMes.send(SETPIDLIMIT, self.PIDLimit)
def setAcceleration(self, acc):
self.acceleration = float(acc)
self.cMes.send(SETACCELERATION, self.acceleration)
def setDeceleration(self, dec):
self.deceleration = float(dec)
self.cMes.send(SETDECELERATION, self.acceleration)
def setMaxSpeed(self, speed):
self.maxSpeed = float(speed)
self.cMes.send(SETMAXSPEED, self.maxSpeed)
|
nkouevda/slack-rtm-bot
|
slack_rtm_bot/handlers/reaction.py
|
Python
|
mit
| 976
| 0.009221
|
import itertools
from .base import MessageHandler
from .. import settings
class ReactionHandler(MessageHandler):
TR
|
IGGER_ANCHOR = ''
TRIGGER_PREFIX = ''
TRIGGERS = sorted(
settings.EMOJI_REACTIONS.keys() + settings.MESSAGE_REACTIONS.keys())
HELP = 'add emoji and message reaction
|
s'
def handle_message(self, event, triggers, query):
for trigger in triggers:
trigger = trigger.lower()
for reaction in self._get_reactions(settings.EMOJI_REACTIONS, trigger):
self.client.api_call(
'reactions.add',
name=reaction,
channel=event['channel'],
timestamp=event['ts'])
return '\n'.join(itertools.chain.from_iterable(
self._get_reactions(settings.MESSAGE_REACTIONS, trigger)
for trigger in triggers))
def _get_reactions(self, reaction_defs, trigger):
reactions = reaction_defs.get(trigger, [])
return [reactions] if isinstance(reactions, basestring) else reactions
|
matteius/parkle-api
|
accounts/models.py
|
Python
|
gpl-3.0
| 422
| 0.00237
|
from django.db import models
# Create your
|
models here.
class ParklePlayer(models.Model):
""" Consider this our User model for a Registered Parkle Player.
"""
username = models.CharField(max_length=30, unique=True)
email = models.EmailField(unique=True)
player_key = models.CharField(max_length=32, u
|
nique=True) # max length of uuid.hex
secret_key = models.CharField(max_length=32, unique=True)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.