repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
ptrendx/mxnet | example/ssd/train.py | Python | apache-2.0 | 8,626 | 0.004637 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import argparse
import mxnet as mx
import os
from train.train_net import train_net
def parse_args():
parser = argparse.ArgumentParser(description='Train a Single-shot detection network')
parser.add_argument('--train-path', dest='train_path', help='train record to use',
default=os.path.join(os.getcwd(), 'data', 'train.rec'), type=str)
parser.add_argument('--train-list', dest='train_list', help='train list to use',
default="", type=str)
parser.add_argument('--val-path', dest='val_path', help='validation record to use',
default=os.path.join(os.getcwd(), 'data', 'val.rec'), type=str)
parser.add_argument('--val-list', dest='val_list', help='validation list to use',
default="", type=str)
parser.add_argument('--network', dest='network', type=str, default='vgg16_reduced',
help='which network to use')
parser.add_argument('--batch-size', dest='batch_size', type=int, default=32,
help='training batch size')
parser.add_argument('--resume', dest='resume', type=int, default=-1,
help='resume training from epoch n')
parser.add_argument('--finetune', dest='finetune', type=int, default=-1,
help='finetune from epoch n, rename the model before doing this')
parser.add_argument('--pretrained', dest='pretrained', help='pretrained model prefix',
default=os.path.join(os.getcwd(), 'model', 'vgg16_reduced'), type=str)
parser.add_argument('--epoch', dest='epoch', help='epoch of pretrained model',
default=1, type=int)
parser.add_argument('--prefix', dest='prefix', help='new model prefix',
default=os.path.join(os.getcwd(), 'model', 'ssd'), type=str)
parser.add_argument('--gpus', dest='gpus', help='GPU devices to train with',
default='0', type=str)
parser.add_argument('--begin-epoch', dest='begin_epoch', help='begin epoch of training',
default=0, type=int)
parser.add_argument('--end-epoch', dest='end_epoch', help='end epoch of training',
default=240, type=int)
parser.add_argument('--frequent', dest='frequent', help='frequency of logging',
default=20, type=int)
parser.add_argument('--data-shape', dest='data_shape', type=int, default=300,
help='set image shape')
parser.add_argument('--label-width', dest='label_width', type=int, default=350,
help='force padding label width to sync across train and validation')
parser.add_argument('--lr', dest='learning_rate', type=float, default=0.002,
help='learning rate')
parser.add_argument('--momentum', dest='momentum', type=float, default=0.9,
help='momentum')
parser.add_argument('--wd', dest='weight_decay', type=float, default=0.0005,
help='weight decay')
parser.add_argument('--mean-r', dest='mean_r', type=float, default=123,
help='red mean value')
parser.add_argument('--mean-g', dest='mean_g', type=float, default=117,
help='green mean value')
parser.add_argument('--mean-b', dest='mean_b', type=float, default=104,
help='blue mean value')
parser.add_argument('--lr-steps', dest='lr_refactor_step', type=str, | default='80, 160',
help='refactor learning rate at specified epochs')
parser.add_argument('--lr-factor', dest='lr_refactor_ratio', type=float, default=0.1,
help='ratio to refactor learning rate')
parser.add_argument('--freeze', dest='freeze_pattern', type=str, default="^(conv1_|conv2_).*",
help='freeze layer pattern')
par | ser.add_argument('--log', dest='log_file', type=str, default="train.log",
help='save training log to file')
parser.add_argument('--monitor', dest='monitor', type=int, default=0,
help='log network parameters every N iters if larger than 0')
parser.add_argument('--pattern', dest='monitor_pattern', type=str, default=".*",
help='monitor parameter pattern, as regex')
parser.add_argument('--num-class', dest='num_class', type=int, default=20,
help='number of classes')
parser.add_argument('--num-example', dest='num_example', type=int, default=16551,
help='number of image examples')
parser.add_argument('--class-names', dest='class_names', type=str,
default='aeroplane, bicycle, bird, boat, bottle, bus, \
car, cat, chair, cow, diningtable, dog, horse, motorbike, \
person, pottedplant, sheep, sofa, train, tvmonitor',
help='string of comma separated names, or text filename')
parser.add_argument('--nms', dest='nms_thresh', type=float, default=0.45,
help='non-maximum suppression threshold')
parser.add_argument('--overlap', dest='overlap_thresh', type=float, default=0.5,
help='evaluation overlap threshold')
parser.add_argument('--force', dest='force_nms', action='store_true',
help='force non-maximum suppression on different class')
parser.add_argument('--use-difficult', dest='use_difficult', action='store_true',
help='use difficult ground-truths in evaluation')
parser.add_argument('--no-voc07', dest='use_voc07_metric', action='store_false',
help='dont use PASCAL VOC 07 11-point metric')
args = parser.parse_args()
return args
def parse_class_names(args):
""" parse # classes and class_names if applicable """
num_class = args.num_class
if len(args.class_names) > 0:
if os.path.isfile(args.class_names):
# try to open it to read class names
with open(args.class_names, 'r') as f:
class_names = [l.strip() for l in f.readlines()]
else:
class_names = [c.strip() for c in args.class_names.split(',')]
assert len(class_names) == num_class, str(len(class_names))
for name in class_names:
assert len(name) > 0
else:
class_names = None
return class_names
if __name__ == '__main__':
args = parse_args()
# context list
ctx = [mx.gpu(int(i)) for i in args.gpus.split(',') if i.strip()]
ctx = [mx.cpu()] if not ctx else ctx
# class names if applicable
class_names = parse_class_names(args)
# start training
train_net(args.network, args.train_path,
args.num_class, args.batch_size,
args.data_shape, [args.mean_r, args.mean_g, args.mean_b],
args.resume, args.finetune, args.pretrained,
args.epoch, args.prefix, ctx, args.begin_epoch, args.end_epoch,
args.frequent, args.learning_rate, args.momentum, args.weight_decay,
args.lr_refactor_step, args.lr_refactor_ratio,
val_path=args.val_path,
num_example=args.num_example,
class_names=class_names,
l |
indautgrp/frappe | frappe/model/utils/rename_field.py | Python | mit | 4,528 | 0.024514 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
import json
from frappe.model import no_value_fields
from frappe.utils.password import rename_password_field
def rename_field(doctype, old_fieldname, new_fieldname):
"""This functions assumes that doctype is already synced"""
meta = frappe.get_meta(doctype, cached=False)
new_field = meta.get_field(new_fieldname)
if not new_field:
print "rename_field: " + (new_fieldname) + " not found in " + doctype
return
if new_field.fieldtype == "Table":
# change parentfield of table mentioned in options
frappe.db.sql("""update `tab%s` set parentfield=%s
where parentfield=%s""" % (new_field.options.split("\n")[0], "%s", "%s"),
(new_fieldname, old_fieldname))
elif new_field.fieldtype not in no_value_fields:
if meta.issingle:
frappe.db.sql("""update `tabSingles` set field=%s
where doctype=%s and field=%s""",
(new_fieldname, doctype, old_fieldname))
else:
# copy field value
frappe.db.sql("""update `tab%s` set `%s`=`%s`""" % \
(doctype, new_fieldname, old_fieldname))
update_reports(doctype, old_fieldname, new_fieldname)
update_users_report_view_settings(doctype, old_fieldname, new_fieldname)
if new_field.fieldtype == "Password":
rename_password_field(doctype, old_fieldname, new_fieldname)
# update in property setter
update_property_setters(doctype, old_fieldname, new_fieldname)
def update_reports(doctype, old_fieldname, new_fieldname):
def _get_new_sort_by(report_dict, report, key):
sort_by = report_dict.get(key) or ""
if sort_by:
sort_by = sort_by.split(".")
if len(sort_by) > 1:
if sort_by[0]==doctype and sort_by[1]==old_fieldname:
sort_by = doctype + "." + new_fieldname
report_dict["updated"] = True
elif report.ref_doctype == doctype and sort_by[0]==old_fieldname:
sort_by = doctype + "." + new_fieldname
report_dict["updated"] = True
if isinstance(sort_by, list):
sort_by = '.'.join(sort_by)
return sort | _by
reports = frappe.db.sql("""select name, ref_doctype, json from tabReport
where report_type = 'Report Builder' and ifnull(is_standard, 'No') = 'No'
and json like %s and json like %s""",
('%%%s%%' % old_fieldname , '%%%s%%' % doctype), as_dict=True)
for r in reports:
report_dict = json.loads(r.json)
# update filters
new_filters = []
for f in report_dict.get("filters"):
if f and len(f) > 1 and f[0] | == doctype and f[1] == old_fieldname:
new_filters.append([doctype, new_fieldname, f[2], f[3]])
report_dict["updated"] = True
else:
new_filters.append(f)
# update columns
new_columns = []
for c in report_dict.get("columns"):
if c and len(c) > 1 and c[0] == old_fieldname and c[1] == doctype:
new_columns.append([new_fieldname, doctype])
report_dict["updated"] = True
else:
new_columns.append(c)
# update sort by
new_sort_by = _get_new_sort_by(report_dict, r, "sort_by")
new_sort_by_next = _get_new_sort_by(report_dict, r, "sort_by_next")
if report_dict.get("updated"):
new_val = json.dumps({
"filters": new_filters,
"columns": new_columns,
"sort_by": new_sort_by,
"sort_order": report_dict.get("sort_order"),
"sort_by_next": new_sort_by_next,
"sort_order_next": report_dict.get("sort_order_next")
})
frappe.db.sql("""update `tabReport` set `json`=%s where name=%s""", (new_val, r.name))
def update_users_report_view_settings(doctype, ref_fieldname, new_fieldname):
user_report_cols = frappe.db.sql("""select defkey, defvalue from `tabDefaultValue` where
defkey like '_list_settings:%'""")
for key, value in user_report_cols:
new_columns = []
columns_modified = False
for field, field_doctype in json.loads(value):
if field == ref_fieldname and field_doctype == doctype:
new_columns.append([new_fieldname, field_doctype])
columns_modified=True
else:
new_columns.append([field, field_doctype])
if columns_modified:
frappe.db.sql("""update `tabDefaultValue` set defvalue=%s
where defkey=%s""" % ('%s', '%s'), (json.dumps(new_columns), key))
def update_property_setters(doctype, old_fieldname, new_fieldname):
frappe.db.sql("""update `tabProperty Setter` set field_name = %s
where doc_type=%s and field_name=%s""", (new_fieldname, doctype, old_fieldname))
frappe.db.sql('''update `tabCustom Field` set insert_after=%s
where insert_after=%s and dt=%s''', (new_fieldname, old_fieldname, doctype))
|
lisatn/workload-automation | wa/workloads/openssl/__init__.py | Python | apache-2.0 | 6,494 | 0.000616 | # Copyright 2018 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from wa import Workload, Parameter, TargetError, WorkloadError, Executable, Alias
from wa.utils.exec_control import once
BLOCK_SIZES = [16, 64, 256, 1024, 8192, 16384]
ECD = ['secp160r1', 'nistp192', 'nistp224', 'nistp256', 'nistp384', 'nistp521',
'nistk163', 'nistk233', 'nistk283', 'nistk409', 'nistk571', 'nistb163',
'nistb233', 'nistb283', 'nistb409', 'nistb571', 'curve25519']
CIPHER_PKI = ['rsa', 'dsa', 'ecdh', 'ecdsa']
EVP_NEW = ['aes-128-cbc', 'aes-192-cbc', 'aes-256-cbc', 'aes-128-gcm', 'aes-192-gcm',
'aes-256-gcm', 'sha1', 'sha256', 'sha384', 'sha512']
class Openssl(Workload):
name = 'openssl'
description = '''
Benchmark Openssl algorithms using Openssl's speed command.
The command tests how long it takes to perfrom typical SSL operations using
a range of supported algorithms and ciphers.
By defalt, this workload will use openssl installed on the target, however
it is possible to provide an alternative binary as a workload resource.
'''
parameters = [
Parameter('algorithm', default='aes-256-cbc',
allowed_values=EVP_NEW + CIPHER_PKI,
description='''
Algorithm to benchmark.
'''),
Parameter('threads', kind=int, default=1,
description='''
The number of threads to use
| '''),
Parameter('use_system_binary', kind=bool, default=True,
description='''
If ``True``, the system Openssl binary will be used.
Otherwise, use the binary provided in the workload
resources.
'''),
]
aliases = [Alias('ossl-' + algo, algorithm=algo)
for algo in EVP_NE | W + CIPHER_PKI]
@once
def initialize(self, context):
if self.use_system_binary:
try:
cmd = '{0} md5sum < $({0} which openssl)'
output = self.target.execute(cmd.format(self.target.busybox))
md5hash = output.split()[0]
version = self.target.execute('openssl version').strip()
context.update_metadata('hashes', 'openssl', md5hash)
context.update_metadata('versions', 'openssl', version)
except TargetError:
msg = 'Openssl does not appear to be installed on target.'
raise WorkloadError(msg)
Openssl.target_exe = 'openssl'
else:
resource = Executable(self, self.target.abi, 'openssl')
host_exe = context.get_resource(resource)
Openssl.target_exe = self.target.install(host_exe)
def setup(self, context):
self.output = None
if self.algorithm in EVP_NEW:
cmd_template = '{} speed -mr -multi {} -evp {}'
else:
cmd_template = '{} speed -mr -multi {} {}'
self.command = cmd_template.format(self.target_exe, self.threads, self.algorithm)
def run(self, context):
self.output = self.target.execute(self.command)
def extract_results(self, context):
if not self.output:
return
outfile = os.path.join(context.output_directory, 'openssl.output')
with open(outfile, 'w') as wfh:
wfh.write(self.output)
context.add_artifact('openssl-output', outfile, 'raw', 'openssl\'s stdout')
def update_output(self, context):
if not self.output:
return
for line in self.output.split('\n'):
line = line.strip()
if not line.startswith('+F'):
continue
parts = line.split(':')
if parts[0] == '+F': # evp ciphers
for bs, value in zip(BLOCK_SIZES, list(map(float, parts[3:]))):
value = value / 2**20 # to MB
context.add_metric('score', value, 'MB/s',
classifiers={'block_size': bs})
elif parts[0] in ['+F2', '+F3']: # rsa, dsa
key_len = int(parts[2])
sign = float(parts[3])
verify = float(parts[4])
context.add_metric('sign', sign, 'seconds',
classifiers={'key_length': key_len})
context.add_metric('verify', verify, 'seconds',
classifiers={'key_length': key_len})
elif parts[0] == '+F4': # ecdsa
ec_idx = int(parts[1])
key_len = int(parts[2])
sign = float(parts[3])
verify = float(parts[4])
context.add_metric('sign', sign, 'seconds',
classifiers={'key_length': key_len,
'curve': ECD[ec_idx]})
context.add_metric('verify', verify, 'seconds',
classifiers={'key_length': key_len,
'curve': ECD[ec_idx]})
elif parts[0] == '+F5': # ecdh
ec_idx = int(parts[1])
key_len = int(parts[2])
op_time = float(parts[3])
ops_per_sec = float(parts[4])
context.add_metric('op', op_time, 'seconds',
classifiers={'key_length': key_len,
'curve': ECD[ec_idx]})
context.add_metric('ops_per_sec', ops_per_sec, 'Hz',
classifiers={'key_length': key_len,
'curve': ECD[ec_idx]})
else:
self.logger.warning('Unexpected result: "{}"'.format(line))
@once
def finalize(self, context):
if not self.use_system_binary:
self.target.uninstall('openssl')
|
Nozdi/webpage-similarity | similarity/__init__.py | Python | mit | 39 | 0 | from si | milarity.webpage import WebPage | |
ghchinoy/tensorflow | tensorflow/python/keras/utils/conv_utils.py | Python | apache-2.0 | 13,585 | 0.006846 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities used by convolution layers.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import numpy as np
from six.moves import range # pylint: disable=redefined-builtin
from tensorflow.python.keras import backend
def convert_data_format(data_format, ndim):
if data_format == 'channels_last':
if ndim == 3:
return 'NWC'
elif ndim == 4:
return 'NHWC'
elif ndim == 5:
return 'NDHWC'
else:
raise ValueError('Input rank not supported:', ndim)
elif data_format == 'channels_first':
if ndim == 3:
return 'NCW'
elif ndim == 4:
return 'NCHW'
elif ndim == 5:
return 'NCDHW'
else:
raise ValueError('Input rank not supported:', ndim)
else:
raise ValueError('Invalid data_format:', data_format)
def normalize_tuple(value, n, name):
"""Transforms a single integer or iterable of integers into an integer tuple.
Arguments:
value: The value to validate and convert. Could an int, or any iterable
of ints.
n: The size of the tuple to be returned.
name: The name of the argument being validated, e.g. "strid | es" or
"kernel_size". This is only used to format error messages.
Returns:
A tuple of n integers.
Raises:
ValueError: If something else than an int/long or iterable thereof was
passed.
"""
if isinstance(value, int):
return (value,) * | n
else:
try:
value_tuple = tuple(value)
except TypeError:
raise ValueError('The `' + name + '` argument must be a tuple of ' +
str(n) + ' integers. Received: ' + str(value))
if len(value_tuple) != n:
raise ValueError('The `' + name + '` argument must be a tuple of ' +
str(n) + ' integers. Received: ' + str(value))
for single_value in value_tuple:
try:
int(single_value)
except (ValueError, TypeError):
raise ValueError('The `' + name + '` argument must be a tuple of ' +
str(n) + ' integers. Received: ' + str(value) + ' '
'including element ' + str(single_value) + ' of type' +
' ' + str(type(single_value)))
return value_tuple
def conv_output_length(input_length, filter_size, padding, stride, dilation=1):
"""Determines output length of a convolution given input length.
Arguments:
input_length: integer.
filter_size: integer.
padding: one of "same", "valid", "full", "causal"
stride: integer.
dilation: dilation rate, integer.
Returns:
The output length (integer).
"""
if input_length is None:
return None
assert padding in {'same', 'valid', 'full', 'causal'}
dilated_filter_size = filter_size + (filter_size - 1) * (dilation - 1)
if padding in ['same', 'causal']:
output_length = input_length
elif padding == 'valid':
output_length = input_length - dilated_filter_size + 1
elif padding == 'full':
output_length = input_length + dilated_filter_size - 1
return (output_length + stride - 1) // stride
def conv_input_length(output_length, filter_size, padding, stride):
"""Determines input length of a convolution given output length.
Arguments:
output_length: integer.
filter_size: integer.
padding: one of "same", "valid", "full".
stride: integer.
Returns:
The input length (integer).
"""
if output_length is None:
return None
assert padding in {'same', 'valid', 'full'}
if padding == 'same':
pad = filter_size // 2
elif padding == 'valid':
pad = 0
elif padding == 'full':
pad = filter_size - 1
return (output_length - 1) * stride - 2 * pad + filter_size
def deconv_output_length(input_length, filter_size, padding,
output_padding=None, stride=0, dilation=1):
"""Determines output length of a transposed convolution given input length.
Arguments:
input_length: Integer.
filter_size: Integer.
padding: one of `"same"`, `"valid"`, `"full"`.
output_padding: Integer, amount of padding along the output dimension.
Can be set to `None` in which case the output length is inferred.
stride: Integer.
dilation: Integer.
Returns:
The output length (integer).
"""
assert padding in {'same', 'valid', 'full'}
if input_length is None:
return None
# Get the dilated kernel size
filter_size = filter_size + (filter_size - 1) * (dilation - 1)
# Infer length if output padding is None, else compute the exact length
if output_padding is None:
if padding == 'valid':
length = input_length * stride + max(filter_size - stride, 0)
elif padding == 'full':
length = input_length * stride - (stride + filter_size - 2)
elif padding == 'same':
length = input_length * stride
else:
if padding == 'same':
pad = filter_size // 2
elif padding == 'valid':
pad = 0
elif padding == 'full':
pad = filter_size - 1
length = ((input_length - 1) * stride + filter_size - 2 * pad +
output_padding)
return length
def normalize_data_format(value):
if value is None:
value = backend.image_data_format()
data_format = value.lower()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('The `data_format` argument must be one of '
'"channels_first", "channels_last". Received: ' +
str(value))
return data_format
def normalize_padding(value):
if isinstance(value, (list, tuple)):
return value
padding = value.lower()
if padding not in {'valid', 'same', 'causal'}:
raise ValueError('The `padding` argument must be a list/tuple or one of '
'"valid", "same" (or "causal", only for `Conv1D). '
'Received: ' + str(padding))
return padding
def convert_kernel(kernel):
"""Converts a Numpy kernel matrix from Theano format to TensorFlow format.
Also works reciprocally, since the transformation is its own inverse.
Arguments:
kernel: Numpy array (3D, 4D or 5D).
Returns:
The converted kernel.
Raises:
ValueError: in case of invalid kernel shape or invalid data_format.
"""
kernel = np.asarray(kernel)
if not 3 <= kernel.ndim <= 5:
raise ValueError('Invalid kernel shape:', kernel.shape)
slices = [slice(None, None, -1) for _ in range(kernel.ndim)]
no_flip = (slice(None, None), slice(None, None))
slices[-2:] = no_flip
return np.copy(kernel[slices])
def conv_kernel_mask(input_shape, kernel_shape, strides, padding):
"""Compute a mask representing the connectivity of a convolution operation.
Assume a convolution with given parameters is applied to an input having N
spatial dimensions with `input_shape = (d_in1, ..., d_inN)` to produce an
output with shape `(d_out1, ..., d_outN)`. This method returns a boolean array
of shape `(d_in1, ..., d_inN, d_out1, ..., d_outN)` with `True` entries
indicating pairs of input and output locations that are connected by a weight.
Example:
```python
>>> input_shape = (4,)
>>> kernel_shape = (2,)
>>> strides = (1,)
>>> padding = "valid"
>>> conv_kernel_mask(input_shape, kernel_shape, strides, padding)
array([[ True, False, False],
[ True, True, False],
[False, True, True],
[False, False, |
devclone/enigma2-9f38fd6 | lib/python/Components/InputDevice.py | Python | gpl-2.0 | 9,159 | 0.026313 | from os import listdir, open as os_open, close as os_close, write as os_write, O_RDWR, O_NONBLOCK
from fcntl import ioctl
from boxbranding import getBoxType, getBrandOEM
import struct
from config import config, ConfigSubsection, ConfigInteger, ConfigYesNo, ConfigText, ConfigSlider
from Tools.Directories import pathExists
boxtype = getBoxType()
# asm-generic/ioctl.h
IOC_NRBITS = 8L
IOC_TYPEBITS = 8L
IOC_SIZEBITS = 13L
IOC_DIRBITS = 3L
IOC_NRSHIFT = 0L
IOC_TYPESHIFT = IOC_NRSHIFT+IOC_NRBITS
IOC_SIZESHIFT = IOC_TYPESHIFT+IOC_TYPEBITS
IOC_DIRSHIFT = IOC_SIZESHIFT+IOC_SIZEBITS
IOC_READ = 2L
def EVIOCGNAME(length):
return (IOC_READ<<IOC_DIRSHIFT)|(length<<IOC_SIZESHIFT)|(0x45<<IOC_TYPESHIFT)|(0x06<<IOC_NRSHIFT)
class inputDevices:
def __init__(self):
self.Devices = {}
self.currentDevice = ""
self.getInputDevices()
def getInputDevices(self):
devices = listdir("/dev/input/")
for evdev in devices:
try:
buffer = "\0"*512
self.fd = os_open("/dev/input/" + evdev, O_RDWR | O_NONBLOCK)
self.name = ioctl(self.fd, EVIOCGNAME(256), buffer)
self.name = self.name[:self.name.find("\0")]
if str(self.name).find("Keyboard") != -1:
self.name = 'keyboard'
os_close(self.fd)
except (IOError,OSError), err:
print '[iInputDevices] getInputDevices <ERROR: ioctl(EVIOCGNAME): ' + str(err) + ' >'
self.name = None
if self.name:
self.Devices[evdev] = {'name': self.name, 'type': self.getInputDeviceType(self.name),'enabled': False, 'configuredName': None }
if boxtype.startswith('et'):
self.setDefaults(evdev) # load default remote control "delay" and "repeat" values for ETxxxx ("QuickFix Scrollspeed Menues" proposed by Xtrend Support)
def getInputDeviceType(self,name):
if "remote control" in name:
return "remote"
elif "keyboard" in name:
return "keyboard"
elif "mouse" in name:
return "mouse"
else:
print "Unknown device type:",name
return None
def getDeviceName(self, x):
if x in self.Devices.keys():
return self.Devices[x].get("name", x)
else:
return "Unknown device name"
def getDeviceList(self):
return sorted(self.Devices.iterkeys())
def setDeviceAttribute(self, device, attribute, value):
#print "[iInputDevices] setting for device", device, "attribute", attribute, " to value", value
if self.Devices.has_key(device):
self.Devices[device][attribute] = value
def getDeviceAttribute(self, device, attribute):
if self.Devices.has_key(device):
if self.Devices[device].has_key(attribute):
return self.Devices[device][attribute]
return None
def setEnabled(self, device, value):
oldval = self.getDeviceAttribute(device, 'enabled')
#print "[iInputDevices] setEnabled for device %s to %s from %s" % (device,value,oldval)
self.setDeviceAttribute(device, 'enabled', value)
if oldval is True and value is False:
self.setDefaults(device)
def setName(self, device, value):
#print "[iInputDevices] setName for device %s to %s" % (device,value)
self.setDeviceAttribute(device, 'configuredName', value)
#struct input_event {
# struct timeval time; -> ignored
# __u16 type; -> EV_REP (0x14)
# __u16 code; -> REP_DELAY (0x00) or REP_PERIOD (0x01)
# __s32 value; -> DEFAULTS: 700(REP_DELAY) or 100(REP_PERIOD)
#}; -> size = 16
def setDefaults(self, device):
print "[iInputDevices] setDefaults for device %s" % device
self.setDeviceAttribute(device, 'configuredName', None)
event_repeat = struct.pack('iihhi', 0, 0, 0x14, 0x01, 100)
event_delay = struct.pack('iihhi', 0, 0, 0x14, 0x00, 700)
fd = os_open("/dev/input/" + device, O_RDWR)
os_write(fd, event_repeat)
os_write(fd, event_delay)
os_close(fd)
def setRepeat(self, device, value): #REP_PERIOD
if self.getDeviceAttribute(device, 'enabled'):
print "[iInputDevices] setRepeat for device %s to %d ms" % (device,value)
event = struct.pack('iihhi', 0, 0, 0x14, 0x01, int(value))
fd = os_open("/dev/input/" + device, O_RDWR)
os_write(fd, event)
os_close(fd)
def setDelay(self, device, value): #REP_DELAY
if self.getDeviceAttribute(device, 'enabled'):
print "[iInputDevices] setDelay for device %s to %d ms" % (device,value)
event = struct.pack('iihhi', 0, 0, 0x14, 0x00, int(value))
fd = os_open("/dev/input/" + device, O_RDWR)
os_write(fd, event)
os_close(fd)
class InitInputDevices:
def __init__(self):
self.currentDevice = ""
self.createConfig()
def createConfig(self, *args):
config.inputDevices = ConfigSubsection()
for device in sorted(iInputDevices.Devices.iterkeys()):
self.currentDevice = device
#print "[InitInputDevices] -> creating config entry for device: %s -> %s " % (self.currentDevice, iInputDevices.Devices[device]["name"])
self.setupConfigEntries(self.currentDevice)
self.currentDevice = ""
def inputDevicesEnabledChanged(self,configElement):
if self.currentDevice != "" and iInputDevices.currentDevice == "":
iInputDevices.setEnabled(self.currentDevice, configElement.value)
elif iInputDevices.currentDevice != "":
iInputDevices.setEnabled(iInputDevices.currentDevice, configElement.value)
def inputDevicesNameChanged(self,configElement):
if self.currentDevice != "" and iInputDevices.currentDevice == "":
iInputDevices.setName(self.currentDevice, configElement.value)
if configElement.value != "":
devname = iInputDevices.getDeviceAttribute(self.currentDevice, 'name')
if devname != configElement.value:
cmd = "config.inputDevices." + self.currentDevice + ".enabled.value = False"
exec cmd
cmd = "config.inputDevices." + self.currentDevice + ".enabled.save()"
exec cmd
elif iInputDevices.currentDevice != "":
iInputDevices.setName(iInputDevices.currentDevice, configElement.value)
def inputDevicesRepeatChanged(self,configElement):
if self.currentDevice != "" and iInputDevices.currentDevice == "":
iInputDevices.setRepeat(self.currentDevice, configElement.value)
elif iInputDevices.currentDevice != "":
iInputDevices.setRepeat(iInputDevices.currentDevice, configElement.value)
def inputDevicesDelayChanged(self,configElement):
if self.currentDevice != "" and iInputDevices.currentDevice == "":
iInputDevices.setDelay(self.currentDevice, configElement.value)
elif iInputDevices.currentDevice != "":
iInputDevices.setDelay(iInputDevices.currentDevice, configElement.value)
def setupConfigEntries(self,device):
cmd = "config.inputDevices." + device + " = ConfigSubsection()"
exec cmd
if boxtype == 'dm800' or boxtype == 'azboxhd':
cmd = "config.inputDevices." + device + ".enabled = ConfigYesNo(default = True)"
else:
cmd = "config.inputDevices." + device + ".enabled = ConfigYesNo(default = False)"
exec cmd
cmd = "config.inputDevices." + device | + ".enabled.addNotifier(self. | inputDevicesEnabledChanged,config.inputDevices." + device + ".enabled)"
exec cmd
cmd = "config.inputDevices." + device + '.name = ConfigText(default="")'
exec cmd
cmd = "config.inputDevices." + device + ".name.addNotifier(self.inputDevicesNameChanged,config.inputDevices." + device + ".name)"
exec cmd
if boxtype in ('maram9', 'classm', 'axodin', 'axodinc', 'starsatlx', 'genius', 'evo', 'galaxym6'):
cmd = "config.inputDevices." + device + ".repeat = ConfigSlider(default=400, increment = 10, limits=(0, 500))"
elif boxtype == 'azboxhd':
cmd = "config.inputDevices." + device + ".repeat = ConfigSlider(default=150, increment = 10, limits=(0, 500))"
else:
cmd = "config.inputDevices." + device + ".repeat = ConfigSlider(default=100, increment = 10, limits=(0, 500))"
exec cmd
cmd = "config.inputDevices." + device + ".repeat.addNotifier(self.inputDevicesRepeatChanged,config.inputDevices." + device + ".repeat)"
exec cmd
if boxtype in ('maram9', 'classm', 'axodin', 'axodinc', 'starsatlx', 'genius', 'evo', 'galaxym6'):
cmd = "config.inputDevices." + device + ".delay = ConfigSlider(default=200, increment = 100, limits=(0, 5000))"
else:
cmd = "config.inputDevices." + device + ".delay = ConfigSlider(default=700, increment = 100, limits=(0, 5000))"
exec cmd
cmd = "config.inputDevices." + device + ".delay.addNotifi |
hughperkins/kgsgo-dataset-preprocessor | thirdparty/future/src/future/backports/test/pystone.py | Python | mpl-2.0 | 7,427 | 0.004309 | #!/usr/bin/env python3
"""
"PYSTONE" Benchmark Program
Version: Python/1.1 (corresponds to C/1.1 plus 2 Pystone fixes)
Author: Reinhold P. Weicker, CACM Vol 27, No 10, 10/84 pg. 1013.
Translated from ADA to C by Rick Richardson.
Every method to preserve ADA-likeness has been used,
at the expense of C-ness.
Translated from C to Python by Guido van Rossum.
Version History:
Version 1.1 corrects two bugs in version 1.0:
First, it leaked memory: in Proc1(), NextRecord ends
up having a pointer to itself. I have corrected this
by zapping NextRecord.PtrComp at the end of Proc1().
Second, Proc3() used the operator != to compare a
record to None. This is rather inefficient and not
true to the intention of the original benchmark (where
a pointer comparison to None is intended; the !=
operator attempts to find a method __cmp__ to do value
comparison of the record). Version 1.1 runs 5-10
percent faster than version 1.0, so benchmark figures
of different versions can't be compared directly.
"""
from __future__ import print_function
from time import clock
LOOPS = 50000
__version__ = "1.1"
[Ident1, Ident2, Ident3, Ident4, Ident5] = range(1, 6)
class Record(object):
def __init__(self, PtrComp = None, Discr = 0, EnumComp = 0,
IntComp = 0, StringComp = 0):
self.PtrComp = PtrComp
self.Discr = Discr
self.EnumComp = EnumComp
self.IntComp = IntComp
self.StringComp = StringComp
def copy(self):
return Record(self.PtrComp, self.Discr, self.EnumComp,
self.IntComp, self.StringComp)
TRUE = 1
FALSE = 0
def main(loops=LOOPS):
benchtime, stones = pystones(loops)
print("Pystone(%s) time for %d passes = %g" % \
(__version__, loops, benchtime))
print("This machine benchmarks at %g pystones/second" % stones)
def pystones(loops=LOOPS):
return Proc0(loops)
IntGlob = 0
BoolGlob = FALSE
Char1Glob = '\0'
Char2Glob = '\0'
Array1Glob = [0]*51
Array2Glob = [x[:] for x in [Array1Glob]*51]
PtrGlb = None
PtrGlbNext = None
def Proc0(loops=LOOPS):
global IntGlob
global BoolGlob
global Char1Glob
global Char2Glob
global Array1Glob
global Array2Glob
global PtrGlb
global PtrGlbNext
starttime = clock()
for i in range(loops):
pass
nulltime = clock() - starttime
PtrGlbNext = Record()
PtrGlb = Record()
PtrGlb.PtrComp = PtrGlbNext
Pt | rGlb.Dis | cr = Ident1
PtrGlb.EnumComp = Ident3
PtrGlb.IntComp = 40
PtrGlb.StringComp = "DHRYSTONE PROGRAM, SOME STRING"
String1Loc = "DHRYSTONE PROGRAM, 1'ST STRING"
Array2Glob[8][7] = 10
starttime = clock()
for i in range(loops):
Proc5()
Proc4()
IntLoc1 = 2
IntLoc2 = 3
String2Loc = "DHRYSTONE PROGRAM, 2'ND STRING"
EnumLoc = Ident2
BoolGlob = not Func2(String1Loc, String2Loc)
while IntLoc1 < IntLoc2:
IntLoc3 = 5 * IntLoc1 - IntLoc2
IntLoc3 = Proc7(IntLoc1, IntLoc2)
IntLoc1 = IntLoc1 + 1
Proc8(Array1Glob, Array2Glob, IntLoc1, IntLoc3)
PtrGlb = Proc1(PtrGlb)
CharIndex = 'A'
while CharIndex <= Char2Glob:
if EnumLoc == Func1(CharIndex, 'C'):
EnumLoc = Proc6(Ident1)
CharIndex = chr(ord(CharIndex)+1)
IntLoc3 = IntLoc2 * IntLoc1
IntLoc2 = IntLoc3 / IntLoc1
IntLoc2 = 7 * (IntLoc3 - IntLoc2) - IntLoc1
IntLoc1 = Proc2(IntLoc1)
benchtime = clock() - starttime - nulltime
if benchtime == 0.0:
loopsPerBenchtime = 0.0
else:
loopsPerBenchtime = (loops / benchtime)
return benchtime, loopsPerBenchtime
def Proc1(PtrParIn):
PtrParIn.PtrComp = NextRecord = PtrGlb.copy()
PtrParIn.IntComp = 5
NextRecord.IntComp = PtrParIn.IntComp
NextRecord.PtrComp = PtrParIn.PtrComp
NextRecord.PtrComp = Proc3(NextRecord.PtrComp)
if NextRecord.Discr == Ident1:
NextRecord.IntComp = 6
NextRecord.EnumComp = Proc6(PtrParIn.EnumComp)
NextRecord.PtrComp = PtrGlb.PtrComp
NextRecord.IntComp = Proc7(NextRecord.IntComp, 10)
else:
PtrParIn = NextRecord.copy()
NextRecord.PtrComp = None
return PtrParIn
def Proc2(IntParIO):
IntLoc = IntParIO + 10
while 1:
if Char1Glob == 'A':
IntLoc = IntLoc - 1
IntParIO = IntLoc - IntGlob
EnumLoc = Ident1
if EnumLoc == Ident1:
break
return IntParIO
def Proc3(PtrParOut):
global IntGlob
if PtrGlb is not None:
PtrParOut = PtrGlb.PtrComp
else:
IntGlob = 100
PtrGlb.IntComp = Proc7(10, IntGlob)
return PtrParOut
def Proc4():
global Char2Glob
BoolLoc = Char1Glob == 'A'
BoolLoc = BoolLoc or BoolGlob
Char2Glob = 'B'
def Proc5():
global Char1Glob
global BoolGlob
Char1Glob = 'A'
BoolGlob = FALSE
def Proc6(EnumParIn):
EnumParOut = EnumParIn
if not Func3(EnumParIn):
EnumParOut = Ident4
if EnumParIn == Ident1:
EnumParOut = Ident1
elif EnumParIn == Ident2:
if IntGlob > 100:
EnumParOut = Ident1
else:
EnumParOut = Ident4
elif EnumParIn == Ident3:
EnumParOut = Ident2
elif EnumParIn == Ident4:
pass
elif EnumParIn == Ident5:
EnumParOut = Ident3
return EnumParOut
def Proc7(IntParI1, IntParI2):
IntLoc = IntParI1 + 2
IntParOut = IntParI2 + IntLoc
return IntParOut
def Proc8(Array1Par, Array2Par, IntParI1, IntParI2):
global IntGlob
IntLoc = IntParI1 + 5
Array1Par[IntLoc] = IntParI2
Array1Par[IntLoc+1] = Array1Par[IntLoc]
Array1Par[IntLoc+30] = IntLoc
for IntIndex in range(IntLoc, IntLoc+2):
Array2Par[IntLoc][IntIndex] = IntLoc
Array2Par[IntLoc][IntLoc-1] = Array2Par[IntLoc][IntLoc-1] + 1
Array2Par[IntLoc+20][IntLoc] = Array1Par[IntLoc]
IntGlob = 5
def Func1(CharPar1, CharPar2):
CharLoc1 = CharPar1
CharLoc2 = CharLoc1
if CharLoc2 != CharPar2:
return Ident1
else:
return Ident2
def Func2(StrParI1, StrParI2):
IntLoc = 1
while IntLoc <= 1:
if Func1(StrParI1[IntLoc], StrParI2[IntLoc+1]) == Ident1:
CharLoc = 'A'
IntLoc = IntLoc + 1
if CharLoc >= 'W' and CharLoc <= 'Z':
IntLoc = 7
if CharLoc == 'X':
return TRUE
else:
if StrParI1 > StrParI2:
IntLoc = IntLoc + 7
return TRUE
else:
return FALSE
def Func3(EnumParIn):
EnumLoc = EnumParIn
if EnumLoc == Ident3: return TRUE
return FALSE
if __name__ == '__main__':
import sys
def error(msg):
print(msg, end=' ', file=sys.stderr)
print("usage: %s [number_of_loops]" % sys.argv[0], file=sys.stderr)
sys.exit(100)
nargs = len(sys.argv) - 1
if nargs > 1:
error("%d arguments are too many;" % nargs)
elif nargs == 1:
try: loops = int(sys.argv[1])
except ValueError:
error("Invalid argument %r;" % sys.argv[1])
else:
loops = LOOPS
main(loops)
|
anhstudios/swganh | data/scripts/templates/object/tangible/lair/base/shared_poi_all_lair_nest_small_evil_fire_small.py | Python | mit | 464 | 0.047414 | #### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result | = Tangible()
result.template = "object/tangible/lair/base/shared_poi_all_lair_nest_small_evil_fire_small.iff"
result.attribute_template_id = -1
result.stfName("lair_n","nest")
#### BEGIN MODIFICATIONS ####
#### END MO | DIFICATIONS ####
return result |
nickaigi/django-pesapal | manage.py | Python | mit | 257 | 0 | #!/u | sr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "django_pesapal.settings")
from django.core.management import execute_from_comm | and_line
execute_from_command_line(sys.argv)
|
StellarCN/py-stellar-base | stellar_sdk/xdr/ledger_upgrade_type.py | Python | apache-2.0 | 1,727 | 0.000579 | # This is an automatically generated file.
# DO NOT EDIT or your changes may be overwritten
import base64
from enum import IntEnum
from xdrlib import Packer, Unpacker
from ..__version__ import __issues__
from ..type_checked import type_checked
__all__ = ["LedgerUpgradeType"]
@type_checked
class LedgerUpgradeType(IntEnum):
"""
| XDR Source Code::
enum LedgerUpgradeType
{
LEDGER_UPGRADE_VERSION = 1,
LEDGER_UPGRADE_BASE_FEE = 2,
LEDGER_UPGRADE_MAX_TX_SET_SIZE = 3,
LEDGER_UPGRADE_BASE_RESERVE = 4
};
"""
LEDGER_UPGRADE_VERSION = 1
LEDGER_UPGRADE_BASE_FEE = 2
LEDGER_UPGRADE_MAX_TX_SET_SIZE = 3
LEDGER_UPGRADE_BASE_RESERVE = 4
def pack(self, packer: Packer) -> None:
packer.pack_int(self.value)
| @classmethod
def unpack(cls, unpacker: Unpacker) -> "LedgerUpgradeType":
value = unpacker.unpack_int()
return cls(value)
def to_xdr_bytes(self) -> bytes:
packer = Packer()
self.pack(packer)
return packer.get_buffer()
@classmethod
def from_xdr_bytes(cls, xdr: bytes) -> "LedgerUpgradeType":
unpacker = Unpacker(xdr)
return cls.unpack(unpacker)
def to_xdr(self) -> str:
xdr_bytes = self.to_xdr_bytes()
return base64.b64encode(xdr_bytes).decode()
@classmethod
def from_xdr(cls, xdr: str) -> "LedgerUpgradeType":
xdr_bytes = base64.b64decode(xdr.encode())
return cls.from_xdr_bytes(xdr_bytes)
@classmethod
def _missing_(cls, value):
raise ValueError(
f"{value} is not a valid {cls.__name__}, please upgrade the SDK or submit an issue here: {__issues__}."
)
|
eciis/web | backend/test/request_handlers_test/user_request_collection_handler_test.py | Python | gpl-3.0 | 4,935 | 0.001824 | # -*- coding: utf-8 -*-
"""User request handler collection test."""
import json
from .. import mocks
from ..test_base_handler import TestBaseHandler
from models import User
from models import Institution
from models import Address
from models import Invite
from handlers.user_request_collection_handler import UserRequestCollectionHandler
from mock import patch
class UserRequestCollectionHandlerTest(TestBaseHandler):
"""Test the handler UserRequestCollectionHandler."""
REQUEST_URI = "/api/institutions/(.*)/requests/user"
@classmethod
def setUp(cls):
"""Provide the base for the tests."""
super(UserRequestCollectionHandlerTest, cls).setUp()
app = cls.webapp2.WSGIApplication(
[(UserRequestCollectionHandlerTest.REQUEST_URI,
UserRequestCollectionHandler)], debug=True)
cls.testapp = cls.webtest.TestApp(app)
# create models
# new User
cls.user_admin = mocks.create_user('useradmin@test.com')
# new institution
cls.other_inst = mocks.create_institution()
# Other user
cls.other_user = mocks.create_user('other_user@test.com')
cls.other_user.institutions = [cls.other_inst.key]
cls.other_user.put()
# new Institution inst test
cls.inst_test = mocks.create_institution()
cls.inst_test.name = 'inst test'
cls.inst_test.photo_url = 'www.photo.com'
cls.inst_test.members = [cls.user_admin.key]
cls.inst_test.followers = [cls.user_admin.key]
cls.inst_test.admin = cls.user_admin.key
cls.user_admin.add_institution(cls.inst_test.key)
cls.inst_test.put()
# create header
cls.headers = {"Institution-Authorization": cls.other_inst.key.urlsafe()}
@patch.object(Invite, "send_invite")
@patch('util.login_service.verify_token', return_value={'email': 'other_user@test.com'})
def test_post(self, verify_token, send_invite):
"""Test method post of UserRequestCollectionHandlerTest."""
data = {
'sender_key': self.other_user.key.urlsafe(),
'is_request': True,
'admin_key': self.user_admin.key.urlsafe(),
'institution_key': self.inst_test.key.urlsafe(),
'type_of_invite': 'REQUEST_USER',
'sender_name': "user name updated",
'office': 'CEO',
'institutional_email': 'other@ceo.com'
}
body = {"data": data}
request = self.testapp.post_json(
"/api/institutions/%s/requests/user" % self.inst_test.key.urlsafe(),
body,
headers=self.headers
)
request = json.loads(request._app_iter[0])
user_updated = self.other_user.key.get()
self.assertEqual(
request['sender'],
self.other_user.email,
'Expected sender email is other_user@test.com')
self.assertEqual(
request['admin_name'],
self.user_admin.name,
'Expected sender admin_name is User Admin')
self.assertEqual(
request['admin_name'],
self.user_admin.name,
'Expected sender admin_name is User Admin')
self.assertEqual(
user_updated.name, 'user name updated',
'Expected new user name is user name updated')
# assert the notification was sent
send_invite.assert_called_with('localhost:80', self.other_inst.key)
@patch.object(Invite, "send_invite")
@patch('util.login_service.verify_token', return_value={'email': 'other_user@test.com'})
def test_post_invalid_request_type(self, verify_token, send_invite):
"""Test if an exception is thrown by passing an invalid request."""
data = {
'sender_key': self.other_user.key.urlsafe(),
'is_request': True,
'admin_key': self.user_admin.key.urlsafe(),
'institution_key': self.inst_test.key.urlsafe(),
'type_of_invite': 'INVITE',
'sender_name': self.other_user.name,
'office': 'CEO',
'institutional_email': 'other@ceo.com',
'institution_name': self.inst_test.name,
'institution_photo_url': self.inst_test.photo_url
}
body = {"d | ata": data}
with self.assertRaises(Exception) as ex:
self.testapp.post_json(
"/api/institutions/" + self.inst_test.key.urlsafe() +
"/reques | ts/user",
body,
headers=self.headers
)
exception_message = self.get_message_exception(ex.exception.message)
self.assertEqual(
'Error! The type must be REQUEST_USER',
exception_message,
"Expected error message is Error! The type must be REQUEST_USER")
# assert the notification was not sent
send_invite.assert_not_called()
|
blue-yonder/tsfresh | tests/integrations/test_full_pipeline.py | Python | mit | 2,570 | 0.000778 | # -*- coding: utf-8 -*-
# This file as well as the whole tsfresh package are licenced under the MIT licence (see the LICENCE.txt)
# Maximilian Christ (maximilianchrist.com), Blue Yonder Gmbh, 2016
import os
import shutil
import tempfile
from unittest import TestCase
import pandas as pd
from sklearn.pipeline import Pipeline
from tests.fixtures import warning_free
from tsfresh.examples.robot_execution_failures import (
download_robot_execution_failures,
load_robot_execution_failures,
)
from tsfresh.transfor | mers import RelevantFeatureAugmenter
class FullPipelineTestCa | se_robot_failures(TestCase):
def setUp(self):
self.temporary_folder = tempfile.mkdtemp()
temporary_file = os.path.join(self.temporary_folder, "data")
download_robot_execution_failures(file_name=temporary_file)
self.timeseries, self.y = load_robot_execution_failures(
file_name=temporary_file
)
self.df = pd.DataFrame(index=self.timeseries.id.unique())
# shrink the time series for this test
self.timeseries = self.timeseries[["id", "time", "F_x"]]
def tearDown(self):
shutil.rmtree(self.temporary_folder)
def test_relevant_extraction(self):
self.assertGreater(len(self.y), 0)
self.assertGreater(len(self.df), 0)
self.assertGreater(len(self.timeseries), 0)
relevant_augmenter = RelevantFeatureAugmenter(
column_id="id", column_sort="time"
)
relevant_augmenter.set_timeseries_container(self.timeseries)
pipe = Pipeline([("relevant_augmenter", relevant_augmenter)])
with warning_free():
pipe.fit(self.df, self.y)
extracted_features = pipe.transform(self.df)
some_expected_features = {
"F_x__abs_energy",
"F_x__absolute_sum_of_changes",
"F_x__autocorrelation__lag_1",
"F_x__binned_entropy__max_bins_10",
"F_x__count_above_mean",
"F_x__longest_strike_above_mean",
"F_x__maximum",
"F_x__mean_abs_change",
"F_x__minimum",
"F_x__quantile__q_0.1",
"F_x__range_count__max_1__min_-1",
"F_x__spkt_welch_density__coeff_2",
"F_x__standard_deviation",
"F_x__value_count__value_0",
"F_x__variance",
"F_x__variance_larger_than_standard_deviation",
}
self.assertGreaterEqual(set(extracted_features.columns), some_expected_features)
self.assertGreater(len(extracted_features), 0)
|
viktorbahr/jaer | server/client.py | Python | lgpl-2.1 | 3,600 | 0.011948 |
from __future__ import print_function, division
import json
import socket
import random
import time
import numpy as np
class Client(object):
SYNC_ON = b'1'
SYNC_OFF = b'2'
EVENT_ON = b'A'
EVENT_OFF = b'D'
ACQ = b'Y'
QUIT = b'X'
def __init__(self, cfgfile='service.cfg',
host='localhost',
interactive=False,
showresponse=False,
timed=False):
"""creates a Client object.
[parameters]
cfgfile -- the path of the configuration file to read from.
host -- the host name of the server.
interactive -- whether or not to run the instance in 'interactive' mode.
showresponse -- whether or not to show responses from the server.
timed -- whether or not to measure response latencies
"""
# read config file
with open(cfgfile, 'r') as fp:
self.config = json.load(fp)
self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.socket.connect((host, self.config['port']))
self.interactive = interactive
self.showresponse = showresponse
self.timed = timed
self.latency = []
self.closed = False
print("connected to: {}".format(self.socket.getpeername()))
def read(self):
return self.socket.recv(1)
def sync(self, value=True):
if self.timed == True:
start = time.perf_counter() * 1e6 # in microsec
self.socket.sendall(Client.SYNC_ON if value == True else Client.SYNC_OFF)
resp = self.read()
if self.timed == True:
stop = time.perf_counter() * 1e6 # in microsec
self.latency.append(stop - start)
if self.showresponse == True:
print(resp)
if self.interactive == True:
print("sync={}".format("ON" if value == True else "OFF"))
def event(self, value=True):
if self.timed == True:
start = time.perf_counter() * 1e6 # in microsec
self.socket.sendall(Client.EVENT_ON if value == True else Client.EVENT_OFF)
resp = self.read()
if self.timed == True:
stop = time.perf_counter() * 1e6 # i | n microsec
self.latency.append(stop - start)
if self.showresponse == | True:
print(resp)
if self.interactive == True:
print("value={}".format("ON" if value == True else "OFF"))
def close(self):
if self.closed == False:
self.socket.send(Client.QUIT)
self.socket.close()
self.closed = True
if self.timed == True:
latency = np.array(self.latency, dtype=float)
print('-'*30)
print('latency: {:.3f}±{:.3f} usec'.format(latency.mean(), latency.std()))
print('-'*30)
if hasattr(self, 'socket'):
del self.socket
def random(self, num=1):
"""randomly generates commands (chosen from sync/event) and sends it via the socket.
[parameter]
num -- the number of commands to be executed.
"""
commands = [lambda obj: obj.sync(True),
lambda obj: obj.sync(False),
lambda obj: obj.event(True),
lambda obj: obj.event(False)]
for i in range(num):
random.choice(commands)(self)
def __del__(self):
if hasattr(self, 'socket'):
self.close()
|
AutorestCI/azure-sdk-for-python | azure-mgmt-network/azure/mgmt/network/v2017_11_01/models/operation_display.py | Python | mit | 1,498 | 0.000668 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights rese | rved.
# Licensed under the MIT License. See License.txt in | the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class OperationDisplay(Model):
"""Display metadata associated with the operation.
:param provider: Service provider: Microsoft Network.
:type provider: str
:param resource: Resource on which the operation is performed.
:type resource: str
:param operation: Type of the operation: get, read, delete, etc.
:type operation: str
:param description: Description of the operation.
:type description: str
"""
_attribute_map = {
'provider': {'key': 'provider', 'type': 'str'},
'resource': {'key': 'resource', 'type': 'str'},
'operation': {'key': 'operation', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
}
def __init__(self, provider=None, resource=None, operation=None, description=None):
super(OperationDisplay, self).__init__()
self.provider = provider
self.resource = resource
self.operation = operation
self.description = description
|
jds2001/ocp-checkbox | plainbox/plainbox/impl/exporter/test_text.py | Python | gpl-3.0 | 1,478 | 0 | # This file is part of Checkbox.
#
# Copyright 2012 Canonical Ltd.
# Written by:
# Zygmunt Krynicki <zygmunt.krynicki@canonical.com>
# Daniel Manrique <roadmr@ubuntu.com>
#
# Checkbox is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Checkbox is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Checkbox. If not, see <http://www.gnu.org/licenses/>.
"""
plainbox.impl.exporter.test_text
========= | =======================
Test definitions for plainbox.impl.exporter.text module
"""
from io import BytesIO
from unittest import TestCase
from plainbox.impl.exporter.text import TextSessionStateExporter
class TextSessionStateExporterTests(TestCase):
def test_default_dump(self):
exporter = TextSessionStateExporter()
# Text exporter expects this d | ata format
data = {'result_map': {'job_name': {'outcome': 'fail'}}}
stream = BytesIO()
exporter.dump(data, stream)
expected_bytes = "job_name: fail\n".encode('UTF-8')
self.assertEqual(stream.getvalue(), expected_bytes)
|
tensorflow/tpu | models/official/retinanet/retinanet_segmentation_main.py | Python | apache-2.0 | 10,349 | 0.004058 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Training script for RetinaNet segmentation model.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import app
from absl import flags
import absl.logging as _logging # pylint: disable=unused-import
import tensorflow.compat.v1 as tf
import dataloader
import retinanet_segmentation_model
from tensorflow.contrib import cluster_resolver as contrib_cluster_resolver
from tensorflow.contrib import tpu as contrib_tpu
from tensorflow.contrib import training as contrib_training
# Cloud TPU Cluster Resolvers
flags.DEFINE_string(
'tpu', default=None,
help='The Cloud TPU to use for training. This should be either the name '
'used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 '
'url.')
flags.DEFINE_string(
'gcp_project', default=None,
help='Project name for the Cloud TPU-enabled project. If not specified, we '
'will attempt to automatically detect the GCE project from metadata.')
flags.DEFINE_string(
'tpu_zone', default=None,
help='GCE zone where the Cloud TPU is located in. If not specified, we '
'will attempt to automatically detect the GCE project from metadata.')
# Model specific paramenters
flags.DEFINE_bool('use_tpu', True, 'Use TPUs rather than CPUs')
flags.DEFINE_string('model_dir', None | , 'Location of model_dir')
flags.DEFINE_string('resnet_checkpoint', None,
'Location of the ResNet50 checkpoint to use for mode | l '
'initialization.')
flags.DEFINE_string('hparams', '',
'Comma separated k=v pairs of hyperparameters.')
flags.DEFINE_integer(
'num_shards', default=8, help='Number of shards (TPU cores)')
flags.DEFINE_integer('train_batch_size', 64, 'training batch size')
flags.DEFINE_integer('eval_batch_size', 8, 'evaluation batch size')
flags.DEFINE_integer('eval_samples', 1449, 'The number of samples for '
'evaluation.')
flags.DEFINE_integer(
'iterations_per_loop', 100, 'Number of iterations per TPU training loop')
flags.DEFINE_string(
'training_file_pattern', None,
'Glob for training data files (e.g., Pascal VOC train set)')
flags.DEFINE_string(
'validation_file_pattern', None,
'Glob for evaluation tfrecords (e.g., Pascal VOC validation set)')
flags.DEFINE_integer('num_examples_per_epoch', 10582,
'Number of examples in one epoch')
flags.DEFINE_integer('num_epochs', 45, 'Number of epochs for training')
flags.DEFINE_string('mode', 'train_and_eval',
'Mode to run: train or eval (default: train)')
flags.DEFINE_bool('eval_after_training', False, 'Run one eval after the '
'training finishes.')
# For Eval mode
flags.DEFINE_integer('min_eval_interval', 180,
'Minimum seconds between evaluations.')
flags.DEFINE_integer(
'eval_timeout', None,
'Maximum seconds between checkpoints before evaluation terminates.')
FLAGS = flags.FLAGS
def main(argv):
del argv # Unused.
if FLAGS.use_tpu:
tpu_cluster_resolver = contrib_cluster_resolver.TPUClusterResolver(
FLAGS.tpu, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
tpu_grpc_url = tpu_cluster_resolver.get_master()
tf.Session.reset(tpu_grpc_url)
if FLAGS.mode in ('train',
'train_and_eval') and FLAGS.training_file_pattern is None:
raise RuntimeError('You must specify --training_file_pattern for training.')
if FLAGS.mode in ('eval', 'train_and_eval'):
if FLAGS.validation_file_pattern is None:
raise RuntimeError('You must specify'
'--validation_file_pattern for evaluation.')
# Parse hparams
hparams = retinanet_segmentation_model.default_hparams()
hparams.parse(FLAGS.hparams)
params = dict(
hparams.values(),
num_shards=FLAGS.num_shards,
num_examples_per_epoch=FLAGS.num_examples_per_epoch,
use_tpu=FLAGS.use_tpu,
resnet_checkpoint=FLAGS.resnet_checkpoint,
mode=FLAGS.mode,
)
run_config = contrib_tpu.RunConfig(
cluster=tpu_cluster_resolver,
evaluation_master='',
model_dir=FLAGS.model_dir,
keep_checkpoint_max=3,
log_step_count_steps=FLAGS.iterations_per_loop,
session_config=tf.ConfigProto(
allow_soft_placement=True, log_device_placement=False),
tpu_config=contrib_tpu.TPUConfig(
FLAGS.iterations_per_loop,
FLAGS.num_shards,
per_host_input_for_training=(
contrib_tpu.InputPipelineConfig.PER_HOST_V2)))
model_fn = retinanet_segmentation_model.segmentation_model_fn
# TPU Estimator
eval_params = dict(
params,
use_tpu=FLAGS.use_tpu,
input_rand_hflip=False,
resnet_checkpoint=None,
is_training_bn=False,
)
if FLAGS.mode == 'train':
train_estimator = contrib_tpu.TPUEstimator(
model_fn=model_fn,
use_tpu=FLAGS.use_tpu,
train_batch_size=FLAGS.train_batch_size,
config=run_config,
params=params)
train_estimator.train(
input_fn=dataloader.SegmentationInputReader(
FLAGS.training_file_pattern, is_training=True),
max_steps=int((FLAGS.num_epochs * FLAGS.num_examples_per_epoch) /
FLAGS.train_batch_size),
)
if FLAGS.eval_after_training:
# Run evaluation on CPU after training finishes.
eval_estimator = contrib_tpu.TPUEstimator(
model_fn=retinanet_segmentation_model.segmentation_model_fn,
use_tpu=FLAGS.use_tpu,
train_batch_size=FLAGS.train_batch_size,
eval_batch_size=FLAGS.eval_batch_size,
config=run_config,
params=eval_params)
eval_results = eval_estimator.evaluate(
input_fn=dataloader.SegmentationInputReader(
FLAGS.validation_file_pattern, is_training=False),
steps=FLAGS.eval_samples//FLAGS.eval_batch_size)
tf.logging.info('Eval results: %s' % eval_results)
elif FLAGS.mode == 'eval':
eval_estimator = contrib_tpu.TPUEstimator(
model_fn=retinanet_segmentation_model.segmentation_model_fn,
use_tpu=FLAGS.use_tpu,
train_batch_size=FLAGS.train_batch_size,
eval_batch_size=FLAGS.eval_batch_size,
config=run_config,
params=eval_params)
def terminate_eval():
tf.logging.info('Terminating eval after %d seconds of no checkpoints' %
FLAGS.eval_timeout)
return True
# Run evaluation when there's a new checkpoint
for ckpt in contrib_training.checkpoints_iterator(
FLAGS.model_dir,
min_interval_secs=FLAGS.min_eval_interval,
timeout=FLAGS.eval_timeout,
timeout_fn=terminate_eval):
tf.logging.info('Starting to evaluate.')
try:
# Note that if the eval_samples size is not fully divided by the
# eval_batch_size. The remainder will be dropped and result in
# differet evaluation performance than validating on the full set.
eval_results = eval_estimator.evaluate(
input_fn=dataloader.SegmentationInputReader(
FLAGS.validation_file_pattern, is_training=False),
steps=FLAGS.eval_samples//FLAGS.eval_batch_size)
tf.logging.info('Eval results: %s' % eval_results)
# Terminate eval job when final checkpoint is reached
current_step = int(os.path.basename(ckpt).split('-')[1])
total_step = int((FLAGS.num_epochs * FLA |
cbertinato/pandas | pandas/io/excel/_xlsxwriter.py | Python | bsd-3-clause | 7,727 | 0 | import pandas._libs.json as json
from pandas.io.excel._base import ExcelWriter
from pandas.io.excel._util import _validate_freeze_panes
class _XlsxStyler:
# Map from openpyxl-oriented styles to flatter xlsxwriter representation
# Ordering necessary for both determinism and because some are keyed by
# prefixes of others.
STYLE_MAPPING = {
'font': [
(('name',), 'font_name'),
(('sz',), 'font_size'),
(('size',), 'font_size'),
(('color', 'rgb',), 'font_color'),
(('color',), 'font_color'),
(('b',), 'bold'),
(('bold',), 'bold'),
(('i',), 'italic'),
(('italic',), 'italic'),
(('u',), 'underline'),
(('underline',), 'underline'),
(('strike',), 'font_strikeout'),
(('vertAlign',), 'font_script'),
(('vertalign',), 'font_script'),
],
'number_format': [
(('format_code',), 'num_format'),
((), 'num_format',),
],
'protection': [
(('locked',), 'locked'),
(('hidden',), 'hidden'),
],
'alignment': [
(('horizontal',), 'align'),
(('vertical',), 'valign'),
(('text_rotation',), 'rotation'),
(('wrap_text',), 'text_wrap'),
(('indent',), 'indent'),
(('shrink_to_fit',), 'shrink'),
],
'fill': [
(('patternType',), 'pattern'),
(('patterntype',), 'pattern'),
(('fill_type',), 'pattern'),
(('start_color', 'rgb',), 'fg_color'),
(('fgColor', 'rgb',), 'fg_color'),
(('fgcolor', 'rgb',), 'fg_color'),
(('start_color',), 'fg_color'),
(('fgColor',), 'fg_color'),
(('fgcolor',), 'fg_color'),
(('end_color', 'rgb',), 'bg_color'),
(('bgColor', 'rgb',), 'bg_color'),
(('bgcolor', 'rgb',), 'bg_color'),
(('end_color',), 'bg_color'),
(('bgColor',), 'bg_color'),
(('bgcolor',), 'bg_color'),
],
'border': [
(('color', 'rgb',), 'border_color'),
(('color',), 'border_color'),
(('style',), 'border'),
(('top', 'color', 'rgb',), 'top_color'),
(('top', 'color',), 'top_color'),
(('top', 'style',), 'top'),
(('top',), 'top'),
(('right', 'color', 'rgb',), 'right_color'),
(('right', 'color',), 'right_color'),
(('right', 'style',), 'right'),
(('right',), 'right'),
(('bottom', 'color', 'rgb',), 'bottom_color'),
(('bottom', 'color',), 'bottom_color'),
(('bottom', 'style',), 'bottom'),
(('bottom',), 'bottom'),
(('left', 'color', 'rgb',), 'left_color'),
(('left', 'color',), 'left_color'),
(('left', 'style',), 'left'),
(('left',), 'left'),
],
}
@classmethod
def convert(cls, style_dict, num_format_str=None):
"""
converts a style_dict to an xlsxwriter format dict
Parameters
----------
style_dict : style dictionary to convert
num_format_str : optional number format string
"""
# Create a XlsxWriter format object.
props = {}
if num_format_str is not None:
props['num_format'] = num_format_str
if style_dict is None:
return props
if 'borders' in style_dict:
style_dict = style_dict.copy()
style_dict['border'] = style_dict.pop('borders')
for style_group_key, style_group in style_dict.items():
for src, dst in cls.STYLE_MAPPING.get(style_group_key, []):
# src is a sequence of keys into a nested dict
# dst is a flat key
if dst in props:
continue
v = style_group
for k in src:
try:
v = v[k]
except (KeyError, TypeError):
break
else:
props[dst] = v
if isinstance(props.get('pattern'), str):
# TODO: support other fill patterns
props['pattern'] = 0 if props['pattern'] == 'none' else 1
for k in ['border', 'top', 'right', 'bottom', 'left']:
if isinstance(props.get(k), str):
try:
props[k] = ['none', 'thin', 'medium', 'dashed', 'dotted',
'thick', 'double', 'hair', 'mediumDashed',
'dashDot', 'mediumDashDot', 'dashDotDot',
'mediumDashDotDot',
'slantDashDot'].index(props[k])
except ValueError:
props[k] = 2
if isinstance(props.get('font_script'), str):
| props['font_script'] = ['baseline', 'superscript',
'subscript'].index(props['font_script'])
if isinstance(props.get('underline'), str):
props['underline'] = {'none': 0, 'single': 1, 'double': 2,
'singleAccounting': 33,
'doubleAccounting': 34 | }[props['underline']]
return props
class _XlsxWriter(ExcelWriter):
engine = 'xlsxwriter'
supported_extensions = ('.xlsx',)
def __init__(self, path, engine=None,
date_format=None, datetime_format=None, mode='w',
**engine_kwargs):
# Use the xlsxwriter module as the Excel writer.
import xlsxwriter
if mode == 'a':
raise ValueError('Append mode is not supported with xlsxwriter!')
super().__init__(path, engine=engine,
date_format=date_format,
datetime_format=datetime_format,
mode=mode,
**engine_kwargs)
self.book = xlsxwriter.Workbook(path, **engine_kwargs)
def save(self):
"""
Save workbook to disk.
"""
return self.book.close()
def write_cells(self, cells, sheet_name=None, startrow=0, startcol=0,
freeze_panes=None):
# Write the frame cells using xlsxwriter.
sheet_name = self._get_sheet_name(sheet_name)
if sheet_name in self.sheets:
wks = self.sheets[sheet_name]
else:
wks = self.book.add_worksheet(sheet_name)
self.sheets[sheet_name] = wks
style_dict = {'null': None}
if _validate_freeze_panes(freeze_panes):
wks.freeze_panes(*(freeze_panes))
for cell in cells:
val, fmt = self._value_with_fmt(cell.val)
stylekey = json.dumps(cell.style)
if fmt:
stylekey += fmt
if stylekey in style_dict:
style = style_dict[stylekey]
else:
style = self.book.add_format(
_XlsxStyler.convert(cell.style, fmt))
style_dict[stylekey] = style
if cell.mergestart is not None and cell.mergeend is not None:
wks.merge_range(startrow + cell.row,
startcol + cell.col,
startrow + cell.mergestart,
startcol + cell.mergeend,
cell.val, style)
else:
wks.write(startrow + cell.row,
startcol + cell.col,
val, style)
|
FFMG/myoddweb.piger | monitor/api/python/Python-3.7.2/Lib/dataclasses.py | Python | gpl-2.0 | 48,530 | 0.000927 | import re
import sys
import copy
import types
import inspect
import keyword
import builtins
import functools
import _thread
__all__ = ['dataclass',
'field',
'Field',
'FrozenInstanceError',
'InitVar',
'MISSING',
# Helper functions.
'fields',
'asdict',
'astuple',
'make_dataclass',
'replace',
'is_dataclass',
]
# Conditions for adding methods. The boxes indicate what action the
# dataclass decorator takes. For all of these tables, when I talk
# about init=, repr=, eq=, order=, unsafe_hash=, or frozen=, I'm
# referring to the arguments to the @dataclass decorator. When
# checking if a dunder method already exists, I mean check for an
# entry in the class's __dict__. I never check to see if an attribute
# is defined in a base class.
# Key:
# +=========+=========================================+
# + Value | Meaning |
# +=========+=========================================+
# | <blank> | No action: no method is added. |
# +---------+-----------------------------------------+
# | add | Generated method is added. |
# +---------+-----------------------------------------+
# | raise | TypeError is raised. |
# +---------+-----------------------------------------+
# | None | Attribute is set to None. |
# +=========+=========================================+
# __init__
#
# +--- init= parameter
# |
# v | | |
# | no | yes | <--- class has __init__ in __dict__?
# +=======+=======+=======+
# | False | | |
# +-------+-------+-------+
# | True | add | | <- the default
# +=======+=======+=======+
# __repr__
#
# +--- repr= parameter
# |
# v | | |
# | no | yes | <--- class has __repr__ in __dict__?
# +=======+=======+=======+
# | False | | |
# +-------+-------+-------+
# | True | add | | <- the default
# +=======+=======+=======+
# __setattr__
# __delattr__
#
# +--- frozen= parameter
# |
# v | | |
# | no | yes | <--- class has __setattr__ or __delattr__ in __dict__?
# +=======+=======+=======+
# | False | | | <- the default
# +-------+-------+-------+
# | True | add | raise |
# +=======+=======+=======+
# Raise because not adding these methods would break the "frozen-ness"
# of the class.
# __eq__
#
# +--- eq= parameter
# |
# v | | |
# | no | yes | <--- class has __eq__ in __dict__?
# +=======+=======+=======+
# | False | | |
# +-------+-------+-------+
# | True | add | | <- the default
# +=======+=======+=======+
# __lt__
# __le__
# __gt__
# __ge__
#
# +--- order= parameter
# |
# v | | |
# | no | yes | <--- class has any comparison method in __dict__?
# +=======+=======+=======+
# | False | | | <- the default
# +-------+-------+-------+
# | True | add | raise |
# +=======+=======+=======+
# Raise because to allow this case would interfere with using
# functools.total_ordering.
# __hash__
# +------------------- unsafe_hash= parameter
# | +----------- eq= parameter
# | | +--- frozen= parameter
# | | |
# v v v | | |
# | no | yes | <--- class has explicitly defined __hash__
# +=======+=======+=======+========+========+
# | False | False | False | | | No __eq__, use the base class __hash__
# +-------+-------+-------+--------+--------+
# | False | False | True | | | No __eq__, use the base class __hash__
# +-------+-------+-------+--------+--------+
# | False | True | False | None | | <-- the default, not hashable
# +-------+-------+-------+--------+--------+
# | False | True | True | add | | Frozen, so hashable, allows override
# +-------+-------+-------+--------+--------+
# | True | False | False | add | raise | Has no __eq__, but hashable
# +-------+-------+-------+--------+--------+
# | True | False | True | add | raise | Has no __eq__, but hashable
# +-------+-------+-------+--------+--------+
# | True | True | False | add | raise | Not frozen, but hashable
# +-------+-------+-------+--------+--------+
# | True | True | True | add | raise | Frozen, so hashable
# +=======+=======+=======+========+========+
# For boxes that are blank, __hash__ is untouched and therefore
# inherited from the base class. If the base is object, then
# id-based hashing is used.
#
# Note that a class may already have __hash__=None if it specified an
# __eq__ method in the class body (not one that was created by
# @dataclass).
#
# See _hash_action (below) for a coded version of this table.
# Raised when an attempt is made to modify a frozen class.
class FrozenInstanceError(AttributeError): pass
# A sentinel object for default values to signal that a default
# factory will be used. This is given a nice repr() which will appear
# in the function signature of dataclasses' constructors.
class _HAS_DEFAULT_FACTORY_CLASS:
def __repr__(self):
return '<factory>'
_HAS_DEFAULT_FACTORY = _HAS_DEFAULT_FACTORY_CLASS()
# A sentinel object to detect if a parameter is supplied or not. Use
# a class to give it a better repr.
class _MISSING_TYPE:
pass
MISSING = _MISSING_TYPE()
# Since most per-field metadata will be unused, create an empty
# read-only proxy that can be shared among all fields.
_EMPTY_METADATA = types.MappingProxyType({})
# Markers for the various kinds of fields and pseudo-fields.
class _FIELD_BASE:
def __init__(self, name):
self.name = name
def __repr__(self):
return self.name
_FIELD = _FIELD_BASE('_FIELD')
_FIELD_CLASSVAR = _FIELD_BASE('_FIELD_CLASSVAR')
_FIELD_INITVAR = _FIELD_BASE('_FIELD_INITVAR')
# The name of an attribute on the class where we store the Field
# objects. Also used to check if a class is a Data Class.
_FIELDS = '__dataclass_fields__'
# The name of an attribute on the class that stores the parameters to
# @dataclass.
_PARAMS = '__dataclass_params__'
# The name of the function, that if it exists, is called at the end of
# __init__.
_POST_INIT_NAME = '__post_init__'
# String regex that string annotations for ClassVar or InitVar must match.
# Allows "identifier.identifier[" or "identifier[".
# https://bugs.python.org/issue33453 for details.
_MODULE_IDENTIFIER_RE = re.compile(r'^(?:\s*(\w+)\s*\.)?\s*(\w+)')
class _InitVarMeta(type):
def __getitem__(self, params):
return self
class InitVar(metaclass=_InitVarMeta):
pass
# Instances of Field are only ever created from within this module,
# and only from the field() function, although Field instances are
# exposed externally as (conceptually) read-only objects.
#
# name and type are filled in after the fact, not in __init__.
# They're not known at the time this class is instantiated, but it's
# convenient if they're available later.
#
# When cls._FIELDS is filled in with a list of Field objects, the name
# and type fields will have been populated.
class Field:
__slots__ = ('name',
'type',
'default',
'default_factory',
'repr',
'hash',
'init',
'compare',
'metadata',
'_field_type', # Private: not to be used by user code.
)
def __init__(self, default, default_factory, init, repr, hash, compare,
metadata):
self.name = None
self | .type = None
self.default = default
self.default_factory = default_factory
self.init = init
self.repr = repr
self.hash = hash
self.compare = compare
self.metadata = (_EMPTY_METADATA
if metadata is None or len(metadata) == 0 else
types.Mapping | ProxyType(metadata))
self._field_type = None
|
jebai0521/easy_thumbnail_sample | src/Photo/settings.py | Python | apache-2.0 | 2,259 | 0.000443 | """
Django settings for Photo project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.pat | h.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'k8%b02eaonf@nsylxt%4e-p@0@b9%c9&%x@-+^^=wq@h&#crdw'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'dj | ango.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'Photo',
'easy_thumbnails',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'Photo.urls'
WSGI_APPLICATION = 'Photo.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
# Media files
MEDIA_ROOT = os.path.join(BASE_DIR, 'images')
MEDIA_URL = '/media/'
THUMBNAIL_ALIASES = {
'': {
'avatar': {'size': (50, 50), 'crop': True},
},
}
|
wangjiezhe/note_for_python | Graph/mincut_maxflow.py | Python | gpl-3.0 | 898 | 0 | #!/usr/bin/env python
import networkx as nx
from networkx.algorithms.flow import edmonds_karp
def main():
| g = nx.DiGraph()
g.add_edge('s', 'v1', capacity=5)
g.add_edge('s', 'v2', capacity=22)
g.add_edge('s', 'v3', capacity=15)
g.add_edge('v1', 'v4', capacity=10)
g.add_edge('v2', 'v1', capacity=4)
g.add_edge('v2', 'v3', capacity=5)
g.add_edge('v2', 'v4', capacit | y=5)
g.add_edge('v2', 'v5', capacity=9)
g.add_edge('v3', 'v5', capacity=6)
g.add_edge('v4', 'v5', capacity=15)
g.add_edge('v4', 't', capacity=30)
g.add_edge('v5', 'v4', capacity=18)
g.add_edge('v5', 't', capacity=10)
flow = nx.maximum_flow(g, 's', 't')
print(flow)
print(nx.minimum_cut(g, 's', 't'))
F = edmonds_karp(g, 's', 't')
for u, v in sorted(g.edges_iter()):
print(('(%s, %s) %s' % (u, v, F[u][v])))
if __name__ == "__main__":
main()
|
mozvip/Sick-Beard | sickbeard/providers/piratebay/__init__.py | Python | gpl-3.0 | 15,509 | 0.01296 | # Author: Mr_Orange <mr_orange@hotmail.it>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
import re
import urllib, urllib2
import sys
import os
import sickbeard
from sickbeard.providers import generic
from sickbeard.common import Quality
from sickbeard.name_parser.parser import NameParser, InvalidNameException
from sickbeard import logger
from sickbeard import tvcache
from sickbeard import helpers
from sickbeard import show_name_helpers
from sickbeard.common import Overview
from sickbeard.exceptions import ex
from sickbeard import encodingKludge as ek
proxy_dict = {
'Getprivate.eu (NL)' : 'http://getprivate.eu/',
'15bb51.info (US)' : 'http://15bb51.info/',
'Hideme.nl (NL)' : 'http://hideme.nl/',
'Rapidproxy.us (GB)' : 'http://rapidproxy.us/',
'Proxite.eu (DE)' :'http://proxite.eu/',
'Shieldmagic.com (GB)' : 'http://www.shieldmagic.com/',
'Webproxy.cz (CZ)' : 'http://webproxy.cz/',
'Freeproxy.cz (CZ)' : 'http://www.freeproxy.cz/',
}
class ThePirateBayProvider(generic. | TorrentProvider):
def __init__(self):
generic.TorrentProvider.__init__(self, "PirateBay")
self.supportsBacklog = True
self.cache = ThePirateBayCache(self)
self.proxy = ThePirateBayWebproxy()
self.url = 'http://thepiratebay.se/'
self.searchurl = self.url+'search/%s/0/7/200' # order by seed
self.re_title_url = '/torr | ent/(?P<id>\d+)/(?P<title>.*?)//1".+?(?P<url>magnet.*?)//1".+?(?P<seeders>\d+)</td>.+?(?P<leechers>\d+)</td>'
def isEnabled(self):
return sickbeard.THEPIRATEBAY
def imageName(self):
return 'piratebay.png'
def getQuality(self, item):
quality = Quality.nameQuality(item[0])
return quality
def _reverseQuality(self,quality):
quality_string = ''
if quality == Quality.SDTV:
quality_string = 'HDTV x264'
if quality == Quality.SDDVD:
quality_string = 'DVDRIP'
elif quality == Quality.HDTV:
quality_string = '720p HDTV x264'
elif quality == Quality.FULLHDTV:
quality_string = '1080p HDTV x264'
elif quality == Quality.RAWHDTV:
quality_string = '1080i HDTV mpeg2'
elif quality == Quality.HDWEBDL:
quality_string = '720p WEB-DL'
elif quality == Quality.FULLHDWEBDL:
quality_string = '1080p WEB-DL'
elif quality == Quality.HDBLURAY:
quality_string = '720p Bluray x264'
elif quality == Quality.FULLHDBLURAY:
quality_string = '1080p Bluray x264'
return quality_string
def _find_season_quality(self,title,torrent_id):
""" Return the modified title of a Season Torrent with the quality found inspecting torrent file list """
mediaExtensions = ['avi', 'mkv', 'wmv', 'divx',
'vob', 'dvr-ms', 'wtv', 'ts'
'ogv', 'rar', 'zip']
quality = Quality.UNKNOWN
fileName = None
fileURL = self.proxy._buildURL(self.url+'ajax_details_filelist.php?id='+str(torrent_id))
data = self.getURL(fileURL)
if not data:
return None
filesList = re.findall('<td.+>(.*?)</td>',data)
if not filesList:
logger.log(u"Unable to get the torrent file list for "+title, logger.ERROR)
for fileName in filter(lambda x: x.rpartition(".")[2].lower() in mediaExtensions, filesList):
quality = Quality.nameQuality(os.path.basename(fileName))
if quality != Quality.UNKNOWN: break
if fileName!=None and quality == Quality.UNKNOWN:
quality = Quality.assumeQuality(os.path.basename(fileName))
if quality == Quality.UNKNOWN:
logger.log(u"No Season quality for "+title, logger.DEBUG)
return None
try:
myParser = NameParser()
parse_result = myParser.parse(fileName)
except InvalidNameException:
return None
logger.log(u"Season quality for "+title+" is "+Quality.qualityStrings[quality], logger.DEBUG)
if parse_result.series_name and parse_result.season_number:
title = parse_result.series_name+' S%02d' % int(parse_result.season_number)+' '+self._reverseQuality(quality)
return title
def _get_season_search_strings(self, show, season=None):
search_string = {'Episode': []}
if not show:
return []
seasonEp = show.getAllEpisodes(season)
wantedEp = [x for x in seasonEp if show.getOverview(x.status) in (Overview.WANTED, Overview.QUAL)]
#If Every episode in Season is a wanted Episode then search for Season first
if wantedEp == seasonEp and not show.air_by_date:
search_string = {'Season': [], 'Episode': []}
for show_name in set(show_name_helpers.allPossibleShowNames(show)):
ep_string = show_name +' S%02d' % int(season) #1) ShowName SXX
search_string['Season'].append(ep_string)
ep_string = show_name+' Season '+str(season)+' -Ep*' #2) ShowName Season X
search_string['Season'].append(ep_string)
#Building the search string with the episodes we need
for ep_obj in wantedEp:
search_string['Episode'] += self._get_episode_search_strings(ep_obj)[0]['Episode']
#If no Episode is needed then return an empty list
if not search_string['Episode']:
return []
return [search_string]
def _get_episode_search_strings(self, ep_obj):
search_string = {'Episode': []}
if not ep_obj:
return []
if ep_obj.show.air_by_date:
for show_name in set(show_name_helpers.allPossibleShowNames(ep_obj.show)):
ep_string = show_name_helpers.sanitizeSceneName(show_name) +' '+ str(ep_obj.airdate)
search_string['Episode'].append(ep_string)
else:
for show_name in set(show_name_helpers.allPossibleShowNames(ep_obj.show)):
ep_string = show_name_helpers.sanitizeSceneName(show_name) +' '+ \
sickbeard.config.naming_ep_type[2] % {'seasonnumber': ep_obj.season, 'episodenumber': ep_obj.episode} +'|'+\
sickbeard.config.naming_ep_type[0] % {'seasonnumber': ep_obj.season, 'episodenumber': ep_obj.episode} +'|'+\
sickbeard.config.naming_ep_type[3] % {'seasonnumber': ep_obj.season, 'episodenumber': ep_obj.episode} \
search_string['Episode'].append(ep_string)
return [search_string]
def _doSearch(self, search_params, show=None, season=None):
results = []
items = {'Season': [], 'Episode': []}
for mode in search_params.keys():
for search_string in search_params[mode]:
searchURL = self.proxy._buildURL(self.searchurl %( |
TheWeiTheTruthAndTheLight/senior-design | src/get_unique.py | Python | mit | 2,936 | 0.003065 | """
get_unique.py
USAGE: get_unique.py [-h] [--sarcastic_path SARCASTIC_PATH]
[--non_sarcastic_path NON_SARCASTIC_PATH]
Create one json file with unique tweets
optional arguments:
-h, --help show this help message and exit
--sarcastic_path
path to directory of sarcastic tweet jsons. Needs
trailing "/"
--non_sarcastic_path
path to directory of non sarcastic tweet jsons. Needs
trailing "/"
"""
import glob
import json
import os
import argparse
from json_io import list_to_json, list_from_json
if __name__ == "__main__":
# Setup CLA parser
parser = argparse.ArgumentParser(description='Create one json file with unique tweets')
parser.add_argument('--sarcastic_path', help='path to directory of sarcastic tweet jsons. Needs trailing "/"')
parser.add_argument('--non_sarcastic_path', help='path to directory of non sarcastic tweet jsons. Needs trailing "/"')
# Parse CLAs
args = parser.parse_args()
top_lvl_paths_lst = []
if args.sarcastic_path:
if not os.path.exists(args.sarcastic_path):
raise Exception("Invalid path: {}".format(args.sarcastic_path))
top_lvl_paths_lst.append(args.sarcastic_path)
if args.non_sarcastic_path:
if not os.path.exists(args.non_sarcastic_path):
raise Exception("Invalid path: {}".format(args.non_sarcastic_path))
top_lvl_paths_lst.append(args.non_sarcastic_path)
# set static filenames |
FN_HASH = "hash_dict.json"
FN_UNIQUE = "unique.json"
# Populate list with paths to jsons
j | son_paths_lst = [glob.glob(p + "*-*-*_*-*-*.json") for p in top_lvl_paths_lst]
# Find and save unique tweets and updated hash dict
for json_paths, top_lvl_path in zip(json_paths_lst, top_lvl_paths_lst):
# load in existing list of unique tweets if it exists
unique_tweets_lst = []
if os.path.exists(top_lvl_path + FN_UNIQUE):
unique_tweets_lst = list_from_json(top_lvl_path + FN_UNIQUE)
# load in existing hash dict if it exists
hash_dict = {}
if os.path.exists(top_lvl_path + FN_HASH):
hash_dict = list_from_json(top_lvl_path + FN_HASH)
# populate list with all tweets (possibly non-unique) for user passed directory
tweets_lst = [ tweet for json_path in json_paths for tweet in list_from_json(json_path)]
# for each tweet, check its existence in hash dict. Update unique list and hash dict
for tweet in tweets_lst:
if str(tweet['id']) not in hash_dict:
unique_tweets_lst.append(tweet)
hash_dict[str(tweet['id'])] = True
# Save updated unique tweets list and hash dict
list_to_json(unique_tweets_lst, top_lvl_path + FN_UNIQUE, old_format=False)
list_to_json(hash_dict, top_lvl_path + FN_HASH)
|
bootphon/abkhazia | abkhazia/corpus/corpus_filter.py | Python | gpl-3.0 | 13,553 | 0.000295 | # Copyright 2016 Thomas Schatz, Xuan-Nga Cao, Mathieu Bernard
#
# This file is part of abkhazia: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Abkhazia is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with abkhazia. If not, see <http://www.gnu.org/licenses/>.
"""Provides the CorpusFilter class"""
import os
from collections import defaultdict
from math import exp
from abkhazia.utils import logger, open_utf8
class CorpusFilter(object):
"""A class for Filtering the distribution of speech duration
over an Abkhazia corpus.
corpus : The abkhazia corpus to filter. The corpus is assumed
to be valid.
log : A logging.Logger instance to send log messages
random_seed : Seed for pseudo-random numbers generation (default
is to use the current system time)
function : A string that specifies the cutting function that
will be used on the corpus
plot : A boolean which, if set to true, enables the plotting
of the speech distribution and the cutting function
"""
# For the THCHS30 corpus, select 150 utt from each text (A,
# B, C, D), from 2 males and 2 females, and 2x50 utterances
# from each text, from 4 males and 4 females
THCHS30_family = ['A08', 'B08', 'C04', 'D21']
THCHS30_outsiders1 = ['A33', 'B06', 'C08', 'D08']
THCHS30_outsiders2 = ['A36', 'B02', 'C19', 'D07']
avoid_utts = ['M01_B23_C1_N_te_fr_3', 'M01_B23_C1_N_te_fr_4',
'M01_B23_C1_N_te_fr_5', 'M01_B23_C1_N_te_fr_2']
limit = {'A': 100, 'B': 350, 'C': 600, 'D': 850}
def __init__(self, corpus, log=logger.null_logger(),
random_seed=None):
self.log = log
self.corpus = corpus
# read utt2spk from the input corpus
utt_ids, utt_speakers = zip(*self.corpus.utt2spk.items())
self.utts = zip(utt_ids, utt_speakers)
self.size = len(utt_ids)
self.speakers = set(utt_speakers)
self.limits = dict()
self.gender = dict()
self.spk2utts = dict()
self.log.debug('loaded %i utterances from %i speakers',
self.size, len(self.speakers))
def create_filter(self, out_path, function,
nb_speaker=None,
new_speakers=10, THCHS30=False):
"""Prepare the corpus for the cutting
The speakers are sorted by their speech duration.
A cutting function is then computed with the
function specified in input
If plot=True, a plot of the speech duration
distribution and of the cutting function will be displayed.
"""
utt2spk = self.corpus.utt2spk
utt2dur = self.corpus.utt2duration()
spkr2dur = dict()
self.log.info('sorting speaker by the total duration of speech')
# Sort Speech duration from longest to shortest
spk2utts_temp = defaultdict(list)
spk2utts = defaultdict(list)
for spkr in self.speakers:
spkr2dur[spkr] = sum([utt2dur[utt_id]
for utt_id in utt2spk
if utt2spk[utt_id] == spkr])
sorted_speaker = sorted(spkr2dur.items(),
key=lambda k, v: (v, k))
sorted_speaker.reverse()
# For the LibriSpeech corpus, read SPEAKER.TXT to find the genders :
# male=set()
# self.librispeech_gender(
# path='/home/julien/workspace/\
# data/librispeech-raw/LibriSpeech/SPEAKERS.TXT')
# for speaker in speakers:
# try:
# self.gender[speaker]
# except:
# continue
# if self.gender[speaker]=='M':
# male.add(speaker)
# if specified, reduce the number of speakers
if nb_speaker:
if nb_speaker < 1 or nb_speaker > len(sorted_speaker):
self.log.info(
'Invalid number of speaker, keeping all speakers')
nb_speaker = len(sorted_speaker)
sorted_speaker = sorted_speaker[0:nb_speaker]
# Create distribution according to decided function
names = [spk_id for (spk_id, duration) in sorted_speaker]
times = [duration for (spk_id, duration) in sorted_speaker]
total = self.corpus.duration(format='seconds')
x_axis = range(0, len(names))
# Compute the distribution used to cut the corpus
if function == "exponential":
distrib = [duration0*exp(-0.4*(ind-1)) for ind in x_axis]
elif function == "power-law":
exponent = 1
distrib = [duration0/((ind)**exponent) + 30 for ind in x_axis]
elif function == "step":
# number of speaker for which we keep the whole speech
spk_threshold = new_speakers
# duration of speech we keep for the other speakers :
dur_threshold = 10 * 60
distrib = times[0:spk_threshold]
t_outsiders = [dur_threshold] * (len(times) - spk_threshold)
distrib[spk_threshold + 1:len(times)] = t_outsiders
distrib = [dist
if dist <= dur else dur
for dist, dur in zip(distrib, times)]
# keep the speakers in the "family", to construct the test part
family_temp = sorted_speaker[0:spk_threshold]
family = [speaker for speaker, duration in family_temp]
distrib = [dist for (ind, dist) in enumerate(distrib)]
elif function == "nothing":
distrib = times
limits = dict(zip(names, distrib))
# write the names of the "family" speakers, to use them in the test
if not THCHS30:
return(self.filter_corpus(names, function, limits))
else:
return(self.filter_THCHS30(names, function, limits))
def filter_corpus(self, names, function, limits):
"""Cut the corpus according to the cutting function specified
Return the subcorpus
"""
utt2dur = self.corpus.utt2duration()
utt_ids = []
spk2utts = defaultdict(list)
not_kept_utts = defaultdict(list)
# for each speaker, list utterances
#for utt,spkr in self.utts:
# spk2utts[spkr].append(utt)
spk2utts_temp = defaultdict(list)
for utt, spkr in self.utts:
utt_start = self.corpus.segments[utt][1]
spk2utts_temp[spkr]. | append([utt, utt_start])
for spkr in self.speakers:
spk2utts_temp[spkr] = sorted(spk2utts_temp[spkr], key=lambda x: x[1])
spk2utts[spkr] = [utt for utt, utt_start in spk2utts_temp[spkr]]
# create lists of utterances we wa | nt to keep,
# utterances we don't want to keep
for speaker in names:
utt_and_dur = zip(spk2utts[speaker],
[utt2dur[utt] for utt
in spk2utts[speaker]])
decreasing_utts = sorted(utt_and_dur,
key=lambda utt_and_dur: utt_and_dur[1],
reverse=True)
ascending_utts = sorted(utt_and_dur,
key=lambda utt_and_dur: utt_and_dur[1])
nb_utt = 0
if limits[speaker] == 0:
continue
time = 0
# keep adding utterances until we reach the limit
# TODO Create generator and change for/if in while
for utts in spk2utts[speaker]:
time += utt2dur[utts]
if utts in self.avoid_utts:
continue
if time < limits[speaker] or nb_utt < 10:
utt_ids.append(utt |
ClearCorp/knowledge | document_page_approval/models/document_page_history_workflow.py | Python | agpl-3.0 | 6,036 | 0 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013 Savoir-faire Linux (<http://www.savoirfairelinux.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from datetime import datetime
from openerp.tools.translate import _
from openerp.tools import DEFAULT_SERVER_DATETIME_FORMAT
from openerp import models, fields, api
class DocumentPageHistoryWorkflow(models.Model):
"""Useful to manage edition's workflow on a document."""
_inherit = 'document.page.history'
@api.multi
def page_approval_draft(self):
"""Set a document state as draft and notified the reviewers."""
self.write({'state': 'draft'})
template = self.env.ref(
'document_page_approval.email_template_new_draft_need_approval')
for page in self:
if page.is_parent_approval_required:
template.send_mail(page.id, force_send=True)
return True
@api.multi
def page_approval_approved(self):
"""Set a document state as approve."""
message_obj = self.env['mail.message']
self.write({
'state': 'approved',
'approved_date': datetime.now().strftime(
DEFAULT_SERVER_DATETIME_FORMAT),
'approved_uid': self.env.uid
})
# Notify followers a new version is available
for page_history in self:
subtype = self.env.ref('mail.mt_comment')
message_obj.create(
{'res_id': page_history.page_id.id,
'model': 'document.page',
'subtype_id': subtype.id,
'body': _('New version of the document %s'
' approved.') % page_history.page_id.name
}
)
return True
@api.multi
def _can_user_approve_page(self):
"""Check if a user cas approve the page."""
user = self.env.user
for page in self:
page.can_user_approve_page = page.can_user_approve_this_page(
page.page_id,
user
)
def can_user_approve_this_page(self, page, user):
"""Check if a user can approved the page."""
if page:
res = page.approver_gid in user.groups_id
res = res or self.can_user_approve_this_page(page.parent_id, user)
else:
res = False
return res
@api.multi
def get_approvers_guids(self):
"""Return the approvers group."""
res = {}
for page in self:
res[page.id] = self.get_approvers_guids_for_page(page.page_id)
return res
def get_approvers_guids_for_page(self, page):
"""Return the approvers group for a page."""
if page:
if page.approver_gid:
res = [page.approver_gid.id]
| else:
res = []
res.extend(self.get_approvers_guids_for_page(page.parent_id))
else:
res = []
return res
@api.multi
def _get_approvers_email(self):
"""Get the approvers email."""
for page in self:
emails = ''
guids = self.get_approvers_guids()
uids = [i.id for i in self.env['res.users'].search([
('groups_id', 'in', guids[page.id])
])]
users | = self.env['res.users'].browse(uids)
for user in users:
if user.email:
emails += user.email
emails += ','
else:
empl = self.env['hr.employee'].search([
('login', '=', user.login)
])
if empl.work_email:
emails += empl.work_email
emails += ','
page.get_approvers_email = emails[:-1]
@api.multi
def _get_page_url(self):
"""Get the page url."""
for page in self:
base_url = self.env['ir.config_parameter'].get_param(
'web.base.url',
default='http://localhost:8069'
)
page.get_page_url = (
'{}/web#db={}&id={}&view_type=form&'
'model=document.page.history').format(
base_url,
self.env.cr.dbname,
page.id
)
state = fields.Selection(
[('draft', 'Draft'), ('approved', 'Approved')],
'Status',
readonly=True
)
approved_date = fields.Datetime("Approved Date")
approved_uid = fields.Many2one(
'res.users',
"Approved By"
)
is_parent_approval_required = fields.Boolean(
related='page_id.is_parent_approval_required',
string="parent approval",
store=False
)
can_user_approve_page = fields.Boolean(
compute=_can_user_approve_page,
string="can user approve this page",
store=False
)
get_approvers_email = fields.Text(
compute=_get_approvers_email,
string="get all approvers email",
store=False
)
get_page_url = fields.Text(
compute=_get_page_url,
string="URL",
store=False
)
|
ryfeus/lambda-packs | Keras_tensorflow_nightly/source2.7/tensorflow/python/__init__.py | Python | mit | 10,498 | 0.000476 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Import core names of TensorFlow.
Programs that want to build TensorFlow Ops and Graphs without having to import
the constructors and utilities individually can import this file:
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
"""
import ctypes
import importlib
import sys
import traceback
# TODO(drpng): write up instructions for editing this file in a doc and point to
# the doc instead.
# If you want to edit this file to expose modules in public tensorflow API, you
# need to follow these steps:
# 1. Consult with tensorflow team and get approval for adding a new API to the
# public interface.
# 2. Document the module in the gen_docs_combined.py.
# 3. Import the module in the main tensorflow namespace by adding an import
# statement in this file.
# 4. Sanitize the entry point by making sure that your module does not expose
# transitively imported modules used for implementation, such as os, sys.
# go/tf-wildcard-import
# pylint: disable=wildcard-import,g-bad-import-order,g-import-not-at-top
import numpy as np
from tensorflow.python import pywrap_tensorflow
# Protocol buffers
from tensorflow.core.framework.graph_pb2 import *
from tensorflow.core.framework.node_def_pb2 import *
from tensorflow.core.framework.summary_pb2 import *
from tensorflow.core.framework.attr_value_pb2 import *
from tensorflow.core.protobuf.meta_graph_pb2 import TensorInfo
from tensorflow.core.protobuf.meta_graph_pb2 import MetaGraphDef
from tensorflow.core.protobuf.config_pb2 import *
from tensorflow.core.protobuf.tensorflow_server_pb2 import *
from tensorflow.core.util.event_pb2 import *
# Framework
from tensorflow.python.framework.framework_lib import * # pylint: disable=redefined-builtin
from tensorflow.python.framework.versions import *
from tensorflow.python.framework import errors
from tensorflow.python.framework import graph_util
# Session
from tensorflow.python.client.client_lib import *
# Ops
from tensorflow.python.ops.standard_ops import *
# Namespaces
from tensorflow.python.ops import initializers_ns as initializers
# pylint: enable=wildcard-import
# Bring in subpackages.
from tensorflow.python import data
from tensorflow.python import keras
from tensorflow.python.estimator import estimator_lib as estimator
from tensorflow.python.feature_column import feature_column_lib as feature_column
from tensorflow.python.layers import layers
from tensorflow.python.ops import bitwise_ops as bitwise
from tensorflow.python.ops import image_ops as image
from tensorflow.python.ops import manip_ops as manip
from tensorflow.python.ops import metrics
from tensorflow.python.ops import nn
from tensorflow.python.ops import sets
from tensorflow.python.ops import spectral_ops as spectral
from tensorflow.python.ops.distributions import distributions
from tensorflow.python.ops.linalg import linalg
from tensorflow.python.ops.losses import losses
from tensorflow.python.profiler import profiler
from tensorflow.python.saved_model import saved_model
from tensorflow.python.summary import summary
from tensorflow.python.user_ops import user_ops
from tensorflow.python.util import compat
# Import boosted trees ops to make sure the ops are registered (but unused).
from tensorflow.python.ops import gen_boosted_trees_ops as _gen_boosted_trees_ops
# Import cudnn rnn ops to make sure their ops are registered.
from tensorflow.python.ops import gen_cudnn_rnn_ops as _
# Import the names from python/training.py as train.Name.
from tensorflow.python.training import training as train
# Sub-package for performing i/o directly instead of via ops in a graph.
from tensorflow.python.lib.io import python_io
# Make some application and test modules available.
from tensorflow.python.platform import app
from tensorflow.python.platform import flags
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.platform import resource_loader
from tensorflow.python.platform import sysconfig
from tensorflow.python.platform import test
from tensorflow.python.util.all_util import remove_undocumented
from tensorflow.python.util.all_util import make_all
from tensorflow.python.util.tf_export import tf_export
# Import modules whose docstrings contribute, for use by remove_undocumented
# below.
from tensorflow.python.client import client_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import framework_lib
from tensorflow.python.framework import subscribe
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import confusion_matrix as confusion_matrix_m
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import histogram_ops
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import script_ops
from tensorflow.python.ops import session_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.ops import tensor_array_ops
# Eager execution
from tensorflow.python.eager.context import executing_eagerly
from tensorflow.python.framework.ops import enable_eager_execution
# Symbols whitelisted for export without documentation.
# TODO(cwhipkey): review these and move to contrib, expose through
# documentation, or remove.
_allowed_symbols = [
'AttrValue',
'ConfigProto',
'ClusterDef',
'DeviceSpec',
'Event',
'GPUOptions',
'GRAPH_DEF_VERSION',
'GRAPH_DEF_VERSION_MIN_CONSUMER',
'GRAPH_DEF_VERSION_MIN_PRODUCER',
'GraphDef',
'GraphOptions',
'HistogramProto',
'LogMessage',
'MetaGraphDef',
'NameAttrList',
'NodeDef',
'OptimizerOptions',
'RunOptions',
'RunMetadata',
'SessionLog',
'Summary',
'SummaryMetadata',
'TensorInfo', # Used for tf.saved_model functionality.
]
# Export protos
# pylint: disable=undefined-variable
tf_export('AttrValue')(AttrValue)
tf_export('ConfigProto')(ConfigProto)
tf_export('Event', 'summary.Event')(Event)
tf_export('GPUOptions')(GPUOptions)
tf_export('GraphDef')(GraphDef)
tf_export('GraphOptions')(GraphOptions)
tf_exp | ort('HistogramProto')(HistogramProto)
tf_export('LogMessage')(LogMessage)
tf_export('MetaGraphDef')(MetaGraphDef)
tf_export('NameAttrList')(NameAttrList)
tf_export('NodeDef')(NodeDef)
tf_export('OptimizerOptions')(OptimizerOptions)
tf_export('RunMetadata' | )(RunMetadata)
tf_export('RunOptions')(RunOptions)
tf_export('SessionLog', 'summary.SessionLog')(SessionLog)
tf_export('Summary', 'summary.Summary')(Summary)
tf_export('summary.SummaryDescription')(SummaryDescription)
tf_export('SummaryMetadata')(SummaryMetadata)
tf_export('summary.TaggedRunMetadata')(TaggedRunMetadata)
tf_export('TensorInfo')(TensorInfo)
# pylint: enable=undefined-variable
# The following symbols are kept for compatibility. It is our plan
# to remove them in the future.
_allowed_symbols.extend([
'arg_max',
'arg_min',
'create_partitioned_variables',
'deserialize_many_sparse',
'lin_space',
'listdiff', # Use tf.listdiff instead.
'parse_single_sequence_example',
'serialize_many_sparse',
'serialize_sparse',
'sparse_matmul', ## use tf.matmul instead.
])
# This is needed temporarily because we import it e |
AltarBeastiful/rateItSeven | rateItSeven/scan/legacy/moviescanner.py | Python | gpl-3.0 | 1,660 | 0 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# === This file is part of RateItSeven ===
#
# Copyright 2015, Paolo de Vathaire <paolo.devathaire@gmail.com>
#
# RateItSeven is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# RateItSeven is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with RateItSeven. If not, see <http://www.gnu.org/licenses/>.
| #
import guessit
from rateItSeven.scan.legacy.filescanner import FileScanner
from rateItSeven.scan.legacy.containers.movieguess import MovieGuess
class MovieScanner(object):
"""
Scan file system dire | ctories for video files
Find info for each file wrapped into a MovieGuess
"""
def __init__(self, dir_paths: list):
self.dir_paths = dir_paths
def list_movies(self):
return self.list_videos_in_types(["movie"])
def list_episodes(self):
return self.list_videos_in_types(["episode"])
def list_videos_in_types(self, video_types):
file_scanner = FileScanner(self.dir_paths)
for abs_path in file_scanner.absolute_file_paths():
guess = MovieGuess(guessit.guessit(abs_path), abs_path)
if guess.is_video_in_types(video_types):
yield guess
|
GeeXboX/mplayer-ui | TOOLS/subfont-c/osd/gen.py | Python | gpl-2.0 | 8,012 | 0.021468 | #!/usr/bin/python
from math import *
import sys
import string
k = (sqrt(2.)-1.)*4./3.
chars = []
encoding = []
count = 1
first = 1
def append(s):
chars.append(s)
def rint(x):
return int(round(x))
"""
if x>=0:
return int(x+0.5)
else:
return int(x-0.5)
"""
class vec:
def __init__(self, x, y=0):
if type(x) is type(()):
self.x, self.y = x
else:
self.x = x
self.y = y
def set(self, x, y):
self.__init__(x, y)
def move(self, x, y):
self.x = self.x + x
self.y = self.y + y
def __add__(self, v):
return vec(self.x+v.x, self.y+v.y)
def __sub__(self, v):
return vec(self.x-v.x, self.y-v.y)
def int(self):
return vec(rint(self.x), rint(self.y))
def t(self):
return (self.x, self.y)
class pvec(vec):
def __init__(self, l, a):
self.x = l * cos(a)
self.y = l * sin(a)
pen = vec(0,0)
def moveto(x, y=0):
global first
dx = rint(x-pen.x)
dy = rint(y-pen.y)
if dx!=0:
if dy!=0:
append("\t%i %i rmoveto" % (dx, dy))
else:
append("\t%i hmoveto" % (dx))
elif dy!=0:
append("\t%i vmoveto" % (dy))
elif first:
append("\t0 hmoveto")
first = 0
pen.x = pen.x+dx
pen.y = pen.y+dx
def rlineto(v):
if v.x!=0:
if v.y!=0:
append("\t%i %i rlineto" % (v.x, v.y))
else:
append("\t%i hlineto" % (v.x))
elif v.y!=0:
append("\t%i vlineto" % (v.y))
def closepath():
append("\tclosepath")
history = []
def movebase(x, y=0):
history.append((x,y))
pen.move(-x, -y)
def moveback():
x, y = history.pop()
pen.move(x, y)
def ellipse(rx, ry = None, half=0):
# rx>0 => counter-clockwise (filled)
# rx<0 => clockwise
if ry==None: ry = abs(rx)
dx1 = rint(k*rx)
dx2 = rx-dx1
dy1 = rint(k*ry)
dy2 = ry-dy1
rx = abs(rx)
moveto(0, -ry)
append("\t%i 0 %i %i 0 %i rrcurveto" % (+dx1, +dx2, +dy2, +dy1))
append("\t0 %i %i %i %i 0 rrcurveto" % (+dy1, -dx2, +dy2, -dx1))
if not half:
append("\t%i 0 %i %i 0 %i rrcurveto" % (-dx1, -dx2, -dy2, -dy1))
append("\t0 %i %i %i %i 0 rrcurveto" % (-dy1, +dx2, -dy2, +dx1))
closepath()
if half:
pen.set(0, ry)
else:
pen.set(0, -ry)
circle = ellipse
def rect(w, h):
moveto(0, 0)
if w>0:
append("\t%i hlineto" % (w))
append("\t%i vlineto" % (h))
append("\t%i hlineto" % (-w))
pen.set(0, h)
else:
append("\t%i vlineto" % (h))
append("\t%i hlineto" % (-w))
append("\t%i vlineto" % (-h))
pen.set(-w, 0)
closepath()
def poly(p):
moveto(0, 0)
prev = vec(0, 0)
for q in p:
rlineto(vec(q)-prev)
prev = vec(q)
closepath()
pen.set(prev.x, prev.y)
def line(w, l, a):
vw = pvec(w*.5, a-pi*.5)
vl = pvec(l, a)
p = vw
moveto(p.x, p.y)
p0 = p
#print '%%wla %i %i %.3f: %.3f %.3f' % (w, l, a, p0.x, p0.y)
p = p+vl
rlineto((p-p0).int())
p0 = p
#print '%%wla %i %i %.3f: %.3f %.3f' % (w, l, a, p0.x, p0.y)
p = p-vw-vw
rlineto((p-p0).int())
p0 = p
#print '%%wla %i %i %.3f: %.3f %.3f' % (w, l, a, p0.x, p0.y)
p = p-vl
#print '%%wla %i %i %.3f: %.3f %.3f' % (w, l, a, p.x, p.y)
rlineto((p-p0).int())
closepath()
pen.set(p.x, p.y)
def begin(name, code, hsb, w):
global first, count, history
history = []
pen.set(0, 0)
append("""\
/uni%04X { %% %s
%i %i hsbw""" % (code+0xE000, name, hsb, w))
i = len(encoding)
while i<code:
encoding.append('dup %i /.notdef put' % (i,))
i = i+1
encoding.append('dup %i /uni%04X put' % (code, code+0xE000))
count = count + 1
first = 1
def end():
append("""\
endchar
} ND""")
########################################
r = 400
s = 375
hsb = 200 # horizontal side bearing
hsb2 = 30
over = 10 # overshoot
width = 2*r+2*over+2*hsb2
########################################
begin('play', 0x01, hsb, width)
poly(( (s,r),
(0, 2*r),))
end()
########################################
w=150
begin('pause', 0x02, hsb, width)
rect(w, 2*r)
movebase(2*w)
rect(w, 2*r)
end()
########################################
begin('stop', 0x03, hsb, width)
rect(665, 720)
end()
########################################
begin('rewind', 0x04, hsb/2, width)
movebase(2*s+15)
poly(( (0, 2*r),
(-s, r),))
movebase(-s-15)
poly(( (0, 2*r),
(-s, r),))
end()
########################################
begin('fast forward', 0x05, hsb/2, width)
poly(( (s,r),
(0, 2*r),))
movebase(s+15)
poly(( (s,r),
(0, 2*r),))
end()
########################################
begin('clock', 0x06, hsb2, width)
movebase(r, r)
circle(r+over)
wc = 65
r0 = r-3*wc
n = 4
movebase(-wc/2, -wc/2)
rect(-wc, wc)
moveback()
for i in range(n):
a = i*2*pi/n
v = pvec(r0, a)
movebase(v.x, v.y)
line(-wc, r-r0, a)
moveback()
hh = 11
mm = 8
line(-50, r*.5, pi/2-2*pi*(hh+mm/60.)/12)
line(-40, r*.9, pi/2-2*pi*mm/60.)
end()
########################################
beg | in('contrast', 0x07, hsb2, width)
movebase(r, r)
circle(r+over)
circle(-(r+over-80), half=1)
end()
| ########################################
begin('saturation', 0x08, hsb2, width)
movebase(r, r)
circle(r+over)
circle(-(r+over-80))
v = pvec(160, pi/2)
movebase(v.x, v.y)
circle(80)
moveback()
v = pvec(160, pi/2+pi*2/3)
movebase(v.x, v.y)
circle(80)
moveback()
v = pvec(160, pi/2-pi*2/3)
movebase(v.x, v.y)
circle(80)
end()
########################################
begin('volume', 0x09, 0, 1000)
poly(( (1000, 0),
(1000, 500),))
end()
########################################
begin('brightness', 0x0A, hsb2, width)
movebase(r, r)
circle(150)
circle(-100)
rb = 375
wb = 50
l = 140
n = 8
for i in range(n):
a = i*2*pi/n
v = pvec(l, a)
movebase(v.x, v.y)
line(wb, rb-l, a)
moveback()
end()
########################################
begin('hue', 0x0B, hsb2, width)
movebase(r, r)
circle(r+over)
ellipse(-(322), 166)
movebase(0, 280)
circle(-(60))
end()
########################################
begin('progress [', 0x10, (334-182)/2, 334)
poly(( (182, 0),
(182, 90),
(145, 90),
(145, 550),
(182, 550),
(182, 640),
(0, 640),
))
end()
########################################
begin('progress |', 0x11, (334-166)/2, 334)
rect(166, 640)
end()
########################################
begin('progress ]', 0x12, (334-182)/2, 334)
poly(( (182, 0),
(182, 640),
(0, 640),
(0, 550),
(37, 550),
(37, 90),
(0, 90),
))
end()
########################################
begin('progress .', 0x13, (334-130)/2, 334)
movebase(0, (640-130)/2)
rect(130, 130)
end()
########################################
print """\
%!PS-AdobeFont-1.0: OSD 1.00
%%CreationDate: Sun Jul 22 12:38:28 2001
%
%%EndComments
12 dict begin
/FontInfo 9 dict dup begin
/version (Version 1.00) readonly def
/Notice (This is generated file.) readonly def
/FullName (OSD) readonly def
/FamilyName (OSD) readonly def
/Weight (Regular) readonly def
/ItalicAngle 0.000000 def
/isFixedPitch false def
/UnderlinePosition -133 def
/UnderlineThickness 49 def
end readonly def
/FontName /OSD def
/PaintType 0 def
/StrokeWidth 0 def
/FontMatrix [0.001 0 0 0.001 0 0] def
/FontBBox {0 -10 1000 810} readonly def
/Encoding 256 array"""
print string.join(encoding, '\n')
i = len(encoding)
while i<256:
print 'dup %i /.notdef put' % i
i = i+1
print """\
readonly def
currentdict end
currentfile eexec
dup /Private 15 dict dup begin
/RD{string currentfile exch readstring pop}executeonly def
/ND{noaccess def}executeonly def
/NP{noaccess put}executeonly def
/ForceBold false def
/BlueValues [ -10 0 800 810 640 650 720 730 ] def
/StdHW [ 65 ] def
/StdVW [ 65 ] def
/StemSnapH [ 65 800 ] def
/StemSnapV [ 65 150 ] def
/MinFeature {16 16} def
/password 5839 def
/Subrs 1 array
dup 0 {
return
} NP
ND
2 index
/CharStrings %i dict dup begin""" % count
print """\
/.notdef {
0 400 hsbw
endchar
} ND"""
print string.join(chars, '\n')
print """\
end
end
readonly put
noaccess put
dup/FontName get exch definefont pop
mark currentfile closefile"""
|
hfp/tensorflow-xsmm | tensorflow/python/ops/control_flow_util.py | Python | apache-2.0 | 12,264 | 0.0106 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilty functions for control flow.
This file is necessary to avoid cyclic dependencies between ops.py and
control_flow_ops.py.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import traceback
from tensorflow.python.platform import tf_logging as logging
ENABLE_CONTROL_FLOW_V2 = (os.getenv("TF_ENABLE_CONTROL_FLOW_V2", "0") != "0" or
os.getenv("TF_ENABLE_COND_V2", "0") != "0" or
os.getenv("TF_ENABLE_WHILE_V2", "0") != "0" or
os.getenv("TF_ENABLE_TENSOR_ARRAY_V2", "0") != "0")
def EnableControlFlowV2(graph):
"""Returns whether control flow v2 should be used in `graph`."""
# Enable new control flow in FuncGraphs (but not legacy _FuncGraphs).
# TODO(skyewm): do something better than hasattr without messing up imports.
return ENABLE_CONTROL_FLOW_V2 or (
graph.building_function and not hasattr(graph, "_captured"))
def IsInXLAContext(op):
try:
xla_compile = op.get_attr("_XlaCompile")
if xla_compile: return True
except ValueError:
pass
ctxt = op._get_control_flow_context() # pylint: disable=protected-access
return GetContainingXLAContext(ctxt) is not None
def InXlaContext(graph):
ctxt = graph._get_control_flow_context() # pylint: disable=protected-access
return GetContainingXLAContext(ctxt) is not None
def GraphOrParentsInXlaContext(graph):
while True:
if InXlaContext(graph): return True
try:
graph = graph.outer_graph
except AttributeError:
return False
def IsInWhileLoop(op):
ctxt = op._get_control_flow_context() # pylint: disable=protected-access
return GetContainingWhileContext(ctxt) is not None
def IsInCond(op):
ctxt = op._get_control_flow_context() # pylint: disable=protected-access
return GetContainingCondContext(ctxt) is not None
def IsSwitch(op):
"""Return true if `op` is a Switch."""
return op.type == "Switch" or op.type == "RefSwitch"
def IsMerge(op):
"""Return true if `op` is a Merge."""
return op.type == "Merge" or op.type == "RefMerge"
def IsLoopEnter(op):
"""Returns true if `op` is an Enter."""
return op.type == "Enter" or op.type == "RefEnter"
def IsLoopExit(op):
"""Return true if `op` is an Exit."""
return op.type == "Exit" or op.type == "RefExit"
def IsCondSwitch(op):
"""Return true if `op` is the Switch for a conditional."""
if not IsSwitch(op):
return False
if not op.outputs:
return False
# Switch nodes are not part of the cond control flow context that they
# represent, so consider the consumers of its outputs to determine if it is
# cond switch or not. A switch is a cond switch iff all its consumers are in
# cond contexts.
is_cond_switch = True
for o in op.outputs:
for c in o.consumers():
ctxt = c._get_control_flow_context() # pylint: disable=protected-access
if IsLoopEnter(c):
ctxt = ctxt.outer_context
is_cond_switch = is_cond_switch and (ctxt is not None and
ctxt.IsCondContext())
return is_cond_switch
def IsCondMerge(op):
"""Return true if `op` is the Merge for a conditional."""
if not IsMerge(op):
return False
if not op.inputs:
return False
# Merge nodes are not part of the cond control flow context that they
# represent, so consider the inputs to the merge of to determine if it is
# cond merge or not: A merge is a cond merge iff all its inputs are in
# cond contexts.
is_cond_merge = True
for i in op.inputs:
ctxt = GetOut | putContext(i.op)
is_cond_merge = is_cond_merge and ctxt is not None an | d ctxt.IsCondContext()
return is_cond_merge
def IsLoopSwitch(op):
"""Return true if `op` is the Switch for a while loop."""
if IsSwitch(op):
ctxt = op._get_control_flow_context() # pylint: disable=protected-access
return ctxt is not None and ctxt.IsWhileContext() and not IsCondSwitch(op)
return False
def IsLoopMerge(op):
"""Return true if `op` is the Merge for a while loop."""
if IsMerge(op):
ctxt = op._get_control_flow_context() # pylint: disable=protected-access
return ctxt is not None and ctxt.IsWhileContext() and not IsCondMerge(op)
return False
def IsLoopConstantEnter(op):
"""Return true iff op is a loop invariant."""
return IsLoopEnter(op) and op.get_attr("is_constant")
def GetLoopConstantEnter(value):
"""Return the enter op if we can infer `value` to be a loop invariant."""
id_ops = {"Switch", "RefSwitch", "Identity", "RefIdentity"}
op = value.op
while op.type in id_ops:
op = op.inputs[0].op
return op if IsLoopConstantEnter(op) else None
def GetOutputContext(op):
"""Return the control flow context for the output of an op."""
ctxt = op._get_control_flow_context() # pylint: disable=protected-access
# Exit nodes usually have a control flow context, except in the case where the
# exit node was imported via import_graph_def (in which case no nodes have
# control flow contexts).
if ctxt is not None and IsLoopExit(op):
ctxt = ctxt.outer_context
return ctxt
def GetContainingWhileContext(ctxt, stop_ctxt=None):
"""Returns the first ancestor WhileContext of `ctxt`.
Returns `ctxt` if `ctxt` is a WhileContext, or None if `ctxt` is not in a
while loop.
Args:
ctxt: ControlFlowContext
stop_ctxt: ControlFlowContext, optional. If provided, the search will end
if it sees stop_ctxt.
Returns:
`ctxt` if `ctxt` is a WhileContext, the most nested WhileContext containing
`ctxt`, or None if `ctxt` is not in a while loop. If `stop_ctxt` is not
`None`, this returns `ctxt` if it matches `stop_ctxt` in its traversal.
"""
while ctxt:
if ctxt.IsWhileContext() or ctxt == stop_ctxt: return ctxt
ctxt = ctxt.outer_context
return None
def GetContainingXLAContext(ctxt):
"""Returns the first ancestor XLAContext of `ctxt`.
Returns `ctxt` if `ctxt` is a XLAContext, or None if `ctxt` is not in a
while loop.
Args:
ctxt: ControlFlowContext
Returns:
`ctxt` if `ctxt` is a XLAContext, the most nested XLAContext containing
`ctxt`, or None if `ctxt` is not in a while loop.
"""
while ctxt:
if ctxt.IsXLAContext(): return ctxt
ctxt = ctxt.outer_context
return None
def GetContainingCondContext(ctxt):
"""Returns the first ancestor CondContext of `ctxt`.
Returns `ctxt` if `ctxt` is a CondContext, or None if `ctxt` is not in a cond.
Args:
ctxt: ControlFlowContext
Returns:
`ctxt` if `ctxt` is a CondContext, the most nested CondContext containing
`ctxt`, or None if `ctxt` is not in a cond.
"""
while ctxt:
if ctxt.IsCondContext(): return ctxt
ctxt = ctxt.outer_context
return None
def IsContainingContext(ctxt, maybe_containing_ctxt):
"""Returns true if `maybe_containing_ctxt` is or contains `ctxt`."""
while ctxt is not maybe_containing_ctxt:
if ctxt is None: return False
ctxt = ctxt.outer_context
return True
def OpInContext(op, ctxt):
return IsContainingContext(op._get_control_flow_context(), ctxt) # pylint: disable=protected-access
def TensorInContext(tensor, ctxt):
return OpInContext(tensor.op, ctxt)
def CheckInputFromValidContext(op, input_op):
"""Returns whether `input_op` can be used from `op`s context.
Conceptually, only inputs from op's while context or any ancestor while
context (including outside of any context) are valid. In practice, there ar |
nijinashok/sos | sos/plugins/manageiq.py | Python | gpl-2.0 | 3,030 | 0 | # -*- python -*-
# -*- coding: utf-8 -*-
# Copyright (C) 2015 Red Hat, Inc., Pep Turró Mauri <pep@redhat.com>
# This file is part of the sos project: https://github.com/sosreport/sos
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# version 2 of the GNU General Public License.
#
# See the LICENSE file in the source distribution for further information.
from sos.plugins import Plugin, RedHatPlugin
import os.path
class ManageIQ(Plugin, RedHatPlugin):
"""ManageIQ/CloudForms related information
"""
plugin_name = 'manageiq'
miq_dir = '/var/www/miq/vmdb'
packages = ('cfme',)
files = (
os.path.join(miq_dir, 'BUILD'),
os.path.join(miq_dir, 'GUID'),
os.path.join(miq_dir, 'VERSION')
)
# Config files to collect from miq_dir/config/
miq_conf_dir = os.path.join(miq_dir, "config")
miq_conf_files = [
'application.rb',
'boot.rb',
'environment.rb',
'preinitializer.rb',
'routes.rb',
'environments/metric_fu.rb',
'environments/production.rb',
'api.yml',
'broker_notify_properties.tmpl.yml',
'capacity.tmpl.yml',
'dashboard.yml',
'event_handling.tmpl.yml',
'hostdefaults.tmpl.yml',
'mongrel_cluster.yml',
'mongrel_win.yml',
'storage.tmpl.yml',
'vmdb.tmpl.yml',
'vmdb.yml.db',
'event_handling.yml.db',
'lighttpd.conf',
'replication.conf'
]
# Log files to collect from miq_dir/log/
miq_log_dir = os.path.join(miq_dir, "log")
miq_log_files = [
'appliance_console.log',
'api.log',
'audit.log',
'automation.log',
'aws.log',
'evm.log',
'fog.log',
'miq_ntpdate.log',
'mongrel.log',
'policy.log',
'prince.log',
'productio | n.log',
'rhevm.log',
'scvmm.log',
'top_output.log',
'vim.log',
'vmdb_restart.log',
'vmstat_output.log',
'vmstat_output.log',
'apache/miq_apache.log',
'apache/ssl_access.log',
'apache/ssl_error.log',
'apache/ssl_request.log',
'apac | he/ssl_mirror_request.log',
'apache/ssl_mirror_error.log',
'apache/ssl_mirror_access_error.log',
'gem_list.txt',
'last_startup.txt',
'package_list_rpm.txt',
'vendor_gems.txt'
]
def setup(self):
if self.get_option("all_logs"):
# turn all log files to a glob to include logrotated ones
self.miq_log_files = map(lambda x: x + '*', self.miq_log_files)
self.add_copy_spec(list(self.files))
self.add_copy_spec([
os.path.join(self.miq_conf_dir, x) for x in self.miq_conf_files
])
self.add_copy_spec([
os.path.join(self.miq_log_dir, x) for x in self.miq_log_files
])
# vim: set et ts=4 sw=4 :
|
martbhell/wasthereannhlgamelastnight | src/lib/pyasn1_modules/rfc7906.py | Python | mit | 18,921 | 0.002537 | #
# This file is part of pyasn1-modules software.
#
# Created by Russ Housley.
#
# Copyright (c) 2019, Vigil Security, LLC
# License: http://snmplabs.com/pyasn1/license.html
#
# NSA's CMS Key Management Attributes
#
# ASN.1 source from:
# https://www.rfc-editor.org/rfc/rfc7906.txt
# https://www.rfc-editor.org/errata/eid5850
#
from pyasn1.type import char
from pyasn1.type import constraint
from pyasn1.type import namedtype
from pyasn1.type import namedval
from pyasn1.type import tag
from pyasn1.type import univ
from pyasn1_modules import rfc2634
from pyasn1_modules import rfc4108
from pyasn1_modules import rfc5280
from pyasn1_modules import rfc5652
from pyasn1_modules import rfc6010
from pyasn1_modules import rfc6019
from pyasn1_modules import rfc7191
MAX = float('inf')
# Imports From RFC 2634
id_aa_contentHint = rfc2634.id_aa_contentHint
ContentHints = rfc2634.ContentHints
id_aa_securityLabel = rfc2634.id_aa_securityLabel
SecurityPolicyIdentifier = rfc2634.SecurityPolicyIdentifier
SecurityClassification = rfc2634.SecurityClassification
ESSPrivacyMark = rfc2634.ESSPrivacyMark
SecurityCategories= rfc2634.SecurityCategories
ESSSecurityLabel = rfc2634.ESSSecurityLabel
# Imports From RFC 4108
id_aa_communityIdentifiers = rfc4108.id_aa_communityIdentifiers
CommunityIdentifier = rfc4108.CommunityIdentifier
CommunityIdentifiers = rfc4108.CommunityIdentifiers
# Imports From RFC 5280
AlgorithmIdentifier = rfc5280.AlgorithmIdentifier
Name = rfc5280.Name
Certificate = rfc5280.Certificate
GeneralNames = rfc5280.GeneralNames
GeneralName = rfc5280.GeneralName
SubjectInfoAccessSyntax = rfc5280.SubjectInfoAccessSyntax
id_pkix = rfc5280.id_pkix
id_pe = rfc5280.id_pe
id_pe_subjectInfoAccess = rfc5280.id_pe_subjectInfoAccess
# Imports From RFC 6010
CMSContentConstraints = rfc6010.CMSContentConstraints
# Imports From RFC 6019
BinaryTime = rfc6019.BinaryTime
id_aa_binarySigningTime = rfc6019.id_aa_binarySigningTime
BinarySigningTime = rfc6019.BinarySigningTime
# Imports From RFC 5652
Attribute = rfc5652.Attribute
CertificateSet = rfc5652.CertificateSet
CertificateChoices = rfc5652.CertificateChoices
id_contentType = rfc5652.id_contentType
ContentType = rfc5652.ContentType
id_messageDigest = rfc5652.id_messageDigest
MessageDigest = rfc5652.MessageDigest
# Imports From RFC 7191
SIREntityName = rfc7191.SIREntityName
id_aa_KP_keyPkgIdAndReceiptReq = rfc7191.id_aa_KP_keyPkgIdAndReceiptReq
KeyPkgIdentifierAndReceiptReq = rfc7191.KeyPkgIdentifierAndReceiptReq
# Key Province Attribute
id_aa_KP_keyProvinceV2 = univ.ObjectIdentifier('2.16.840.1.101.2.1.5.71')
class KeyProvinceV2(univ.ObjectIdentifier):
pass
aa_keyProvince_v2 = Attribute()
aa_keyProvince_v2['attrType'] = id_aa_KP_keyProvinceV2
aa_keyProvince_v2['attrValues'][0] = KeyProvinceV2()
# Manifest Attribute
id_aa_KP_manifest = univ.ObjectIdentifier('2.16.840.1.101.2.1.5.72')
class ShortTitle(char.PrintableString):
pass
class Manifest(univ.SequenceOf):
pass
Manifest.componentType = ShortTitle()
Manifest.subtypeSpec=constraint.ValueSizeConstraint(1, MAX)
aa_manifest = Attribute()
aa_manifest['attrType'] = id_aa_KP_manifest
aa_manifest['attrValues'][0] = Manifest()
# Key Algorithm Attribute
id_kma_keyAlgorithm = univ.ObjectIdentifier('2.16.840.1.101.2.1.13.1')
class KeyAlgorithm(univ.Sequence):
pass
KeyAlgorithm.componentType = namedtype.NamedTypes(
namedtype.NamedType('keyAlg', univ.ObjectIdentifier()),
namedtype.OptionalNamedType('checkWordAlg', univ.ObjectIdentifier( | ).subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
namedtype.OptionalNamedType('crcAlg', univ.ObjectIdentifier().subtype(
| implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)))
)
aa_keyAlgorithm = Attribute()
aa_keyAlgorithm['attrType'] = id_kma_keyAlgorithm
aa_keyAlgorithm['attrValues'][0] = KeyAlgorithm()
# User Certificate Attribute
id_at_userCertificate = univ.ObjectIdentifier('2.5.4.36')
aa_userCertificate = Attribute()
aa_userCertificate['attrType'] = id_at_userCertificate
aa_userCertificate['attrValues'][0] = Certificate()
# Key Package Receivers Attribute
id_kma_keyPkgReceiversV2 = univ.ObjectIdentifier('2.16.840.1.101.2.1.13.16')
class KeyPkgReceiver(univ.Choice):
pass
KeyPkgReceiver.componentType = namedtype.NamedTypes(
namedtype.NamedType('sirEntity', SIREntityName().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.NamedType('community', CommunityIdentifier().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
)
class KeyPkgReceiversV2(univ.SequenceOf):
pass
KeyPkgReceiversV2.componentType = KeyPkgReceiver()
KeyPkgReceiversV2.subtypeSpec=constraint.ValueSizeConstraint(1, MAX)
aa_keyPackageReceivers_v2 = Attribute()
aa_keyPackageReceivers_v2['attrType'] = id_kma_keyPkgReceiversV2
aa_keyPackageReceivers_v2['attrValues'][0] = KeyPkgReceiversV2()
# TSEC Nomenclature Attribute
id_kma_TSECNomenclature = univ.ObjectIdentifier('2.16.840.1.101.2.1.13.3')
class CharEdition(char.PrintableString):
pass
class CharEditionRange(univ.Sequence):
pass
CharEditionRange.componentType = namedtype.NamedTypes(
namedtype.NamedType('firstCharEdition', CharEdition()),
namedtype.NamedType('lastCharEdition', CharEdition())
)
class NumEdition(univ.Integer):
pass
NumEdition.subtypeSpec = constraint.ValueRangeConstraint(0, 308915776)
class NumEditionRange(univ.Sequence):
pass
NumEditionRange.componentType = namedtype.NamedTypes(
namedtype.NamedType('firstNumEdition', NumEdition()),
namedtype.NamedType('lastNumEdition', NumEdition())
)
class EditionID(univ.Choice):
pass
EditionID.componentType = namedtype.NamedTypes(
namedtype.NamedType('char', univ.Choice(componentType=namedtype.NamedTypes(
namedtype.NamedType('charEdition', CharEdition().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
namedtype.NamedType('charEditionRange', CharEditionRange().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2)))
))
),
namedtype.NamedType('num', univ.Choice(componentType=namedtype.NamedTypes(
namedtype.NamedType('numEdition', NumEdition().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))),
namedtype.NamedType('numEditionRange', NumEditionRange().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 4)))
))
)
)
class Register(univ.Integer):
pass
Register.subtypeSpec = constraint.ValueRangeConstraint(0, 2147483647)
class RegisterRange(univ.Sequence):
pass
RegisterRange.componentType = namedtype.NamedTypes(
namedtype.NamedType('firstRegister', Register()),
namedtype.NamedType('lastRegister', Register())
)
class RegisterID(univ.Choice):
pass
RegisterID.componentType = namedtype.NamedTypes(
namedtype.NamedType('register', Register().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 5))),
namedtype.NamedType('registerRange', RegisterRange().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 6)))
)
class SegmentNumber(univ.Integer):
pass
SegmentNumber.subtypeSpec = constraint.ValueRangeConstraint(1, 127)
class SegmentRange(univ.Sequence):
pass
SegmentRange.componentType = namedtype.NamedTypes(
namedtype.NamedType('firstSegment', SegmentNumber()),
namedtype.NamedType('lastSegment', SegmentNumber())
)
class SegmentID(univ.Choice):
pass
SegmentID.componentType = namedtype.NamedTypes(
namedtype.NamedType('segmentNumber', SegmentNumber().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 7))),
namedtype.NamedType('segmentRange', SegmentRange().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 8)))
)
class TSECNomenclature(univ.Sequence):
pass
TSECNomenclature.componentType = namedtype.NamedTypes(
namedtype.NamedType('shortTitle', ShortTitle()),
namedtyp |
pybee/toga-iOS | toga_iOS/window.py | Python | bsd-3-clause | 1,335 | 0.002247 | from toga.interface.window import Window as WindowInterface
from .container import Container
from .libs import *
from . import dialogs
class Window(WindowInterface):
_IMPL_CLASS = UIWindow
_CONTAINER_CLASS = Container
_DIALOG_MODULE = dialogs
def __init__(self, title=None, position=(100, 100), size=(640, 480), toolbar=None, resizeable=True, closeable=True, minimizable=True):
super().__init__(title=None, position=(100, 100), size=(640, 480), toolbar=None, resizeable=True, closeable=False, minimizable=False)
self._create()
def create(self):
self._screen = UIScreen.mainScreen()
self._impl = self._IMPL_CLASS.alloc().initWithFrame_(self._screen.bounds)
self._impl._interface = self
self._controller = UIViewController.alloc().init()
self._impl.rootViewController = self._controller
d | ef _set_content(self, widget):
self._controller.view = self._container._impl
def _set_title(self, title):
pass
def show(self):
self._impl.makeKeyAndVisible()
# self._impl.visualizeConstraints_(self._impl.contentView().constraints())
# Do the | first layout render.
self.content._update_layout(
width=self._screen.bounds.size.width,
height=self._screen.bounds.size.height
)
|
appop/bitcoin | qa/rpc-tests/prioritise_transaction.py | Python | mit | 5,996 | 0.003169 | #!/usr/bin/env python3
# Copyright (c) 2015-2016 The nealcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the prioritisetransaction mining RPC."""
from test_framework.test_framework import nealcoinTestFramework
from test_framework.util import *
from test_framework.mininode import COIN, MAX_BLOCK_BASE_SIZE
class PrioritiseTransactionTest(nealcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 1
self.txouts = gen_return_txouts()
def setup_network(self):
self.nodes = []
self.is_network_split = False
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug", "-printpriority=1"]))
self.relayfee = self.nodes[0].getnetworkinfo()['relayfee']
def run_test(self):
utxo_count = 90
utxos = create_confirmed_utxos(self.relayfee, self.nodes[0], utxo_count)
base_fee = self.relayfee*100 # our transactions are smaller than 100kb
txids = []
# Create 3 batches of transactions at 3 different fee rate levels
range_size = utxo_count // 3
for i in range(3):
txids.append([])
start_range = i * range_size
end_range = start_range + range_size
txids[i] = create_lots_of_big_transactions(self.nodes[0], self.txouts, utxos[start_range:end_range], end_range - start_range, (i+1)*base_fee)
# Make sure that the size of each group of transactions exceeds
# MAX_BLOCK_BASE_SIZE -- otherwise the test needs to be revised to create
# more transactions.
mempool = self.nodes[0].getrawmempool(True)
sizes = [0, 0, 0]
for i in range(3):
for j in txids[i]:
assert(j in mempool)
sizes[i] += mempool[j]['size']
assert(sizes[i] > MAX_BLOCK_BASE_SIZE) # Fail => raise utxo_count
# add a fee delta to something in the cheapest bucket and make sure it gets mined
# also check that a different entry in the cheapest bucket is NOT mined (lower
# the priority to ensure its not mined due to priority)
self.nodes[0].prioritisetransaction(txids[0][0], 0, int(3*base_fee*COIN))
self.nodes[0].prioritisetransaction(txids[0][1], -1e15, 0)
self.nodes[0].generate(1)
mempool = self.nodes[0].getrawmempool()
print("Assert that prioritised transaction was mined")
assert(txids[0][0] not in mempool)
assert(txids[0][1] in mempool)
high_fee_tx = None
for x in txids[2]:
if x not in mempool:
high_fee_tx = x
# Something high-fee should have been mined!
assert(high_fee_tx != None)
# Add a prioritisation before a tx is in the mempool (de-prioritising a
# high-fee transaction so that it's now low fee).
self.nodes[0].prioritisetransaction(high_fee_tx, -1e15, -int(2*base_fee*COIN))
# Add everything back to mempool
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
# Check to make sure our high fee rate tx is back in the mempool
mempool = self.nod | es[0].getrawmempool()
assert(high_fee_tx in mempool)
# Now verify the modified-high feerate transaction isn't mined before
# the other high fee transactions. Keep mining until our mempool has
# decreased by all the high fee size that we calculated above.
while (self.nodes[0].getmempoolinfo()['bytes'] > sizes[0] + sizes[1]):
| self.nodes[0].generate(1)
# High fee transaction should not have been mined, but other high fee rate
# transactions should have been.
mempool = self.nodes[0].getrawmempool()
print("Assert that de-prioritised transaction is still in mempool")
assert(high_fee_tx in mempool)
for x in txids[2]:
if (x != high_fee_tx):
assert(x not in mempool)
# Create a free, low priority transaction. Should be rejected.
utxo_list = self.nodes[0].listunspent()
assert(len(utxo_list) > 0)
utxo = utxo_list[0]
inputs = []
outputs = {}
inputs.append({"txid" : utxo["txid"], "vout" : utxo["vout"]})
outputs[self.nodes[0].getnewaddress()] = utxo["amount"] - self.relayfee
raw_tx = self.nodes[0].createrawtransaction(inputs, outputs)
tx_hex = self.nodes[0].signrawtransaction(raw_tx)["hex"]
txid = self.nodes[0].sendrawtransaction(tx_hex)
# A tx that spends an in-mempool tx has 0 priority, so we can use it to
# test the effect of using prioritise transaction for mempool acceptance
inputs = []
inputs.append({"txid": txid, "vout": 0})
outputs = {}
outputs[self.nodes[0].getnewaddress()] = utxo["amount"] - self.relayfee
raw_tx2 = self.nodes[0].createrawtransaction(inputs, outputs)
tx2_hex = self.nodes[0].signrawtransaction(raw_tx2)["hex"]
tx2_id = self.nodes[0].decoderawtransaction(tx2_hex)["txid"]
try:
self.nodes[0].sendrawtransaction(tx2_hex)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26) # insufficient fee
assert(tx2_id not in self.nodes[0].getrawmempool())
else:
assert(False)
# This is a less than 1000-byte transaction, so just set the fee
# to be the minimum for a 1000 byte transaction and check that it is
# accepted.
self.nodes[0].prioritisetransaction(tx2_id, 0, int(self.relayfee*COIN))
print("Assert that prioritised free transaction is accepted to mempool")
assert_equal(self.nodes[0].sendrawtransaction(tx2_hex), tx2_id)
assert(tx2_id in self.nodes[0].getrawmempool())
if __name__ == '__main__':
PrioritiseTransactionTest().main()
|
rhyswhitley/savanna_iav | src/figures/modchecks/test_phenology.py | Python | cc0-1.0 | 3,041 | 0.005919 | #!/usr/bin/env python2
import numpy as np
import pandas as pd
import datetime as dt
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from os.path import expanduser
import scipy
def treegrass_frac(ndvi, day_rs):
"""
Process based on Donohue et al. (2009) to separate out tree and grass cover,
using moving windows (adapted here for daily time-step)
"""
# first calculate the 7-month moving minimum window across the time-series
# period = 7
fp1 = moving_something(np.min, ndvi, period=3, day_rs=day_rs)
# period = 9
fp2 = moving_something(lambda x: sum(x)/(9*day_rs), fp1, period=9, day_rs=day_rs)
fr1 = ndvi - fp2
ftree = [p2 - np.abs(r1) if r1 < 0 else p2 for p2, r1 in zip(fp2, fr1)]
fgrass = ndvi - ftree
return pd.DataFrame({'total':ndvi, 'tree':ftree, 'grass':fgrass})
def moving_something(_fun, tseries, period, day_rs=16, is_days=True):
"""
Applies a function to a moving window of the time-series:
ft_ = function([ f(t-N), f(t). f(t+N)])
"""
# if the time-series is at a day-time step, update the window to a step-size of 16 days
if is_days:
p0 = period*day_rs
else:
p0 = period
# find upper and lower bounds of the moving window
half = p0//2
tlen = len(tseries)
twin = [0]*tlen
for im in range(tlen):
# find the something for the window that satisfy the edge conditions
if im < half:
# fold back onto the end of the time-series
twin[im] = _fun(np.hstack([tseries[tlen-(half-im):tlen],\
tseries[0:im+half]]))
elif im > tlen | -half:
# fold back into the beginning of the time-series
twin[im] = _fun(np.hstack([tseries[im-half:tlen],\
tseries[0:half-(tlen-im)]]))
else:
twin[im] = _fun(tseries[im-half:im+half])
return twin
def import_one_year(file_name):
"""
Imports the one-year climatology, resetting time columns
as a multi-index pandas dataframe
"""
# universal time l | abels
time_label = ['Month', 'Day', 'Hour', 'Min']
# import data
clim_raw = pd.read_csv(clim_met_file)
# fix column names
clim_raw.columns = time_label + list(clim_raw.columns[4:])
# datetime column
clim_raw['DT'] = pd.date_range("2004-01-01", periods=len(clim_raw), freq="30min")
# index on time
clim_data = clim_raw.set_index('DT')
# return to user
return clim_data.ix[:, 4:]
def main():
climo_raw = import_one_year(clim_met_file).resample('D', how='mean')
phen = treegrass_frac(climo_raw["Lai_1km_new_smooth"], 30)
plt.plot(phen.tree*0.7, c='blue')
plt.plot(phen.grass*1.3, c='red')
plt.show()
return None
if __name__ == "__main__":
clim_met_file = expanduser("~/Dropbox/30 minute met driver climatology v12a HowardSprings.csv")
ec_tower_file = expanduser("~/Dropbox/30 minute met driver 2001-2015 v12a HowardSprings.csv")
main()
|
BrightnessMonitor/BrightnessMonitorClient | src/brightnessmonitorclient/raspberry/dbController.py | Python | mit | 2,396 | 0.002087 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import sqlite3 as lite
import sys
import time
sqlite_file = '/var/db/BrightnessMonitor/db.sqlite'
table = 'data'
vartime = 'time'
vardata = 'data'
con = None
cur = None
def create():
'''
Creates database table 'data' with Integer values time and data
'''
try:
con = lite.connect(sqlite_file)
cur = con.cursor()
except lite.OperationalError:
print "create(): Error %s:" % lite.OperationalError.args[0]
except lite.Error, e:
print "create(): Error %s:" % e.args[0]
sys.exit(1)
with con:
cur.execute("CREATE TABLE IF NOT EXISTS {table} ({time} INTEGER, {data} INTEGER);".
format(table=table, time=vartime, data=vardata))
con.commit()
def drop_recreate_db():
delete()
create()
def insert(rawdata):
'''
Inserts data into data table, adds timestamp to data
Args:
rawdata: data to be inserted
'''
try:
con = lite.connect(sqlite_file)
cur = con.cursor()
# inserts time and raw data into database
# time represents the number of seconds since Jan 1, 1970 00:00:00
cur.execute("INSERT INTO {table} VALUES ({time}, {data})".
format(table=table, time=int(time.time()), data= rawdata))
con.commit()
except lite.OperationalError:
print "insert(): Error %s:" % lite.OperationalError.args[0]
except lite.Error, e:
print "insert(): Error %s:" % e.args[0]
sys.exit(1)
finally:
con.close()
def retrieve():
data = []
try:
con = lite.connect(sqlite_file)
cur = con.cursor()
except lite.OperationalError:
print "retrieve(): Error %s:" % lite.OperationalError.args[0]
except lite.Error, e:
print "retrieve(): Error %s:" % e.args[0]
sys.exit(1)
with con:
cur.execute('SELECT * FROM data')
table = cur.fetchall()
for row in table:
data.append(row)
return data
def delete():
try:
con = lite.connect(sqlite_file)
cur = con.cursor()
except lite.OperationalError:
print "delete(): Error %s:" % lite.OperationalError.args[0]
except lite.Error, e | :
print "delete: Error %s:" % e.args[0]
sys.exit(1)
with con:
cur.execute("DROP TABLE IF EXI | STS data") |
tseaver/google-cloud-python | monitoring/tests/unit/gapic/v3/test_metric_service_client_v3.py | Python | apache-2.0 | 16,399 | 0.000915 | # -*- coding: utf-8 -*-
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests."""
import mock
import pytest
from google.api import metric_pb2 as api_metric_pb2
from google.api import monitored_resource_pb2
from google.cloud import monitoring_v3
from google.cloud.monitoring_v3 import enums
from google.cloud.monitoring_v3.proto import common_pb2
from google.cloud.monitoring_v3.proto import metric_pb2 as proto_metric_pb2
from google.cloud.monitoring_v3.proto import metric_service_pb2
from google.protobuf import empty_pb2
class MultiCallableStub(object):
"""Stub for the grpc.UnaryUnaryMultiCallable interface."""
def __init__(self, method, channel_stub):
self.method = method
self.channel_stub = channel_stub
def __call__(self, request, timeout=None, metadata=None, credentials=None):
self.channel_stub.requests.append((self.method, request))
response = None
if self.channel_stub.responses:
response = self.channel_stub.responses.pop()
if isinstance(response, Exception):
raise response
if response:
return response
class ChannelStub(object):
"""Stub for the grpc.Channel interface."""
def __init__(self, responses=[]):
self.responses = responses
self.requests = []
def unary_unary(self, method, request_serializer=None, response_deserializer=None):
return MultiCallableStub(method, self)
class CustomException(Exception):
pass
class TestMetricServiceClient(object):
def test_list_monitored_resource_descriptors(self):
# Setup Expected Response
next_page_token = ""
resource_descriptors_element = {}
resource_descriptors = [resource_descriptors_element]
expected_response = {
"next_page_token": next_page_token,
"resource_descriptors": resource_descriptors,
}
expected_response = metric_service_pb2.ListMonitoredResourceDescriptorsResponse(
**expected_response
)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.MetricServiceClient()
# Setup Request
name = client.project_path("[PROJECT]")
paged_list_response = client.list_monitored_resource_descriptors(name)
resources = list(paged_list_response)
assert len(resources) == 1
assert expected_response.resource_descriptors[0] == resources[0]
assert len(channel.requests) == 1
expected_request = metric_service_pb2.ListMonitoredResourceDescriptorsRequest(
name=name
)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_list_monitored_resource_descriptors_exception(self):
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.MetricServiceClient()
# Setup request
name = client.project_path("[PROJECT]")
paged_list_response = client.list_monitored_resource_descriptors(name)
with pytest.raises(CustomException):
list(paged_list_response)
def test_get_monitored_resource_descriptor(self):
# Setup Expected Response
name_2 = "name2-1052831874"
type_ = "type3575610"
display_name = "displayName1615086568"
description = "description-1724546052"
expected_response = {
"name": name_2,
"type": type_,
"display_name": display_name,
"description": description,
}
expected_response = monitored_resource_pb2.MonitoredResourceDescriptor(
**expected_response
)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
| with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.MetricServiceClient()
# Setup Request
name = client.monitored_resource_descriptor_path(
"[PROJECT]", "[MONITORED_RESOURCE_DESCRIP | TOR]"
)
response = client.get_monitored_resource_descriptor(name)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = metric_service_pb2.GetMonitoredResourceDescriptorRequest(
name=name
)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_get_monitored_resource_descriptor_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.MetricServiceClient()
# Setup request
name = client.monitored_resource_descriptor_path(
"[PROJECT]", "[MONITORED_RESOURCE_DESCRIPTOR]"
)
with pytest.raises(CustomException):
client.get_monitored_resource_descriptor(name)
def test_list_metric_descriptors(self):
# Setup Expected Response
next_page_token = ""
metric_descriptors_element = {}
metric_descriptors = [metric_descriptors_element]
expected_response = {
"next_page_token": next_page_token,
"metric_descriptors": metric_descriptors,
}
expected_response = metric_service_pb2.ListMetricDescriptorsResponse(
**expected_response
)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.MetricServiceClient()
# Setup Request
name = client.project_path("[PROJECT]")
paged_list_response = client.list_metric_descriptors(name)
resources = list(paged_list_response)
assert len(resources) == 1
assert expected_response.metric_descriptors[0] == resources[0]
assert len(channel.requests) == 1
expected_request = metric_service_pb2.ListMetricDescriptorsRequest(name=name)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_list_metric_descriptors_exception(self):
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.MetricServiceClient()
# Setup request
name = client.project_path("[PROJECT]")
paged_list_response = client.list_metric_descriptors(name)
with pytest.raises(CustomException):
list(paged_list_response)
def test_get_metric_descriptor(self):
# Setup Expected Response
name_2 = "name2-1052831874"
type_ = "type3575610"
unit = "unit3594628"
description = "description-1724546052"
display_name = "displayName1615086568"
|
gkotian/zulip | zerver/test_signup.py | Python | apache-2.0 | 20,333 | 0.00182 | # -*- coding: utf-8 -*-
from django.conf import settings
from django.test import TestCase
from zilencer.models import Deployment
from zerver.models import (
get_realm, get_user_profile_by_email,
PreregistrationUser, Realm, ScheduledJob, UserProfile,
)
from zerver.lib.actions import (
create_stream_if_needed,
do_add_subscription,
set_default_streams,
)
from zerver.lib.digest import send_digest_email
from zerver.lib.notifications import enqueue_welcome_emails, one_click_unsubscribe_link
from zerver.lib.test_helpers import AuthedTestCase, find_key_by_email, queries_captured
from zerver.lib.test_runner import slow
from zerver.lib.session_user import get_session_dict_user
import re
import ujson
from urlparse import urlparse
class PublicURLTest(TestCase):
"""
Account creation URLs are accessible even when not logged in. Authenticated
URLs redirect to a page.
"""
def fetch(self, method, urls, expected_status):
for url in urls:
if method == "get":
response = self.client.get(url)
else:
response = self.client.post(url)
self.assertEqual(response.status_code, expected_status,
msg="Expected %d, received %d for %s to %s" % (
expected_status, response.status_code, method, url))
def test_public_urls(self):
"""
Test which views are accessible when not logged in.
"""
# FIXME: We should also test the Tornado URLs -- this codepath
# can't do so becaus | e this Django test mechanism doesn't go
# through Tornado.
get_urls = {200: ["/accounts/home/", "/accounts/login/"],
302: ["/"],
401: ["/api/v1/streams/Denmark/members",
"/api/v1/users/me/subscriptions",
"/api/v1/messages",
],
}
post_urls = {200: ["/accounts | /login/"],
302: ["/accounts/logout/"],
401: ["/json/get_public_streams",
"/json/get_old_messages",
"/json/update_pointer",
"/json/send_message",
"/json/invite_users",
"/json/settings/change",
"/json/subscriptions/remove",
"/json/subscriptions/exists",
"/json/subscriptions/add",
"/json/subscriptions/property",
"/json/get_subscribers",
"/json/fetch_api_key",
"/api/v1/users/me/subscriptions",
],
400: ["/api/v1/send_message",
"/api/v1/external/github",
"/api/v1/fetch_api_key",
],
}
for status_code, url_set in get_urls.iteritems():
self.fetch("get", url_set, status_code)
for status_code, url_set in post_urls.iteritems():
self.fetch("post", url_set, status_code)
def test_get_gcid_when_not_configured(self):
with self.settings(GOOGLE_CLIENT_ID=None):
resp = self.client.get("/api/v1/fetch_google_client_id")
self.assertEquals(400, resp.status_code,
msg="Expected 400, received %d for GET /api/v1/fetch_google_client_id" % resp.status_code,
)
data = ujson.loads(resp.content)
self.assertEqual('error', data['result'])
def test_get_gcid_when_configured(self):
with self.settings(GOOGLE_CLIENT_ID="ABCD"):
resp = self.client.get("/api/v1/fetch_google_client_id")
self.assertEquals(200, resp.status_code,
msg="Expected 200, received %d for GET /api/v1/fetch_google_client_id" % resp.status_code,
)
data = ujson.loads(resp.content)
self.assertEqual('success', data['result'])
self.assertEqual('ABCD', data['google_client_id'])
class LoginTest(AuthedTestCase):
"""
Logging in, registration, and logging out.
"""
def test_login(self):
self.login("hamlet@zulip.com")
user_profile = get_user_profile_by_email('hamlet@zulip.com')
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
def test_login_bad_password(self):
self.login("hamlet@zulip.com", "wrongpassword")
self.assertIsNone(get_session_dict_user(self.client.session))
def test_login_nonexist_user(self):
result = self.login("xxx@zulip.com", "xxx")
self.assertIn("Please enter a correct email and password", result.content)
def test_register(self):
realm = get_realm("zulip.com")
streams = ["stream_%s" % i for i in xrange(40)]
for stream in streams:
create_stream_if_needed(realm, stream)
set_default_streams(realm, streams)
with queries_captured() as queries:
self.register("test", "test")
# Ensure the number of queries we make is not O(streams)
self.assert_length(queries, 67)
user_profile = get_user_profile_by_email('test@zulip.com')
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
def test_register_deactivated(self):
"""
If you try to register for a deactivated realm, you get a clear error
page.
"""
realm = get_realm("zulip.com")
realm.deactivated = True
realm.save(update_fields=["deactivated"])
result = self.register("test", "test")
self.assertIn("has been deactivated", result.content.replace("\n", " "))
with self.assertRaises(UserProfile.DoesNotExist):
get_user_profile_by_email('test@zulip.com')
def test_login_deactivated(self):
"""
If you try to log in to a deactivated realm, you get a clear error page.
"""
realm = get_realm("zulip.com")
realm.deactivated = True
realm.save(update_fields=["deactivated"])
result = self.login("hamlet@zulip.com")
self.assertIn("has been deactivated", result.content.replace("\n", " "))
def test_logout(self):
self.login("hamlet@zulip.com")
self.client.post('/accounts/logout/')
self.assertIsNone(get_session_dict_user(self.client.session))
def test_non_ascii_login(self):
"""
You can log in even if your password contain non-ASCII characters.
"""
email = "test@zulip.com"
password = u"hümbüǵ"
# Registering succeeds.
self.register("test", password)
user_profile = get_user_profile_by_email(email)
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
self.client.post('/accounts/logout/')
self.assertIsNone(get_session_dict_user(self.client.session))
# Logging in succeeds.
self.client.post('/accounts/logout/')
self.login(email, password)
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
def test_register_first_user_with_invites(self):
"""
The first user in a realm has a special step in their signup workflow
for inviting coworkers. Do as realistic an end-to-end test as we can
without Tornado running.
"""
username = "user1"
password = "test"
domain = "test.com"
email = "user1@test.com"
# Create a new realm to ensure that we're the first user in it.
Realm.objects.create(domain=domain, name="Test Inc.")
# Start the signup process by supplying an email address.
result = self.client.post('/accounts/home/', {'email': email})
# Check the redirect telling you to check your mail for a confirmation
# link.
self.assertEquals(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/send_confirm/%s@%s" % (username, domain)))
result = |
start-jsk/jsk_apc | jsk_2016_01_baxter_apc/node_scripts/fcn_mask_for_label_names.py | Python | bsd-3-clause | 5,525 | 0 | #!/usr/bin/env python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import cv2
from chainer import cuda
import chainer.serializers as S
from chainer import Variable
from fcn.models import FCN32s
import numpy as np
import cv_bridge
import jsk_ap | c2016_common
from jsk_topic_tools import ConnectionBasedTransport
from jsk_topic_tools.log_utils import logwarn_throttle
from jsk_topic_tools.log_utils import jsk_logwarn
import message_filters
import rospy
from sensor_msgs.msg import Image
from skimage.color import label2rgb
from skimage.transform import resize
clas | s FCNMaskForLabelNames(ConnectionBasedTransport):
mean_bgr = np.array((104.00698793, 116.66876762, 122.67891434))
def __init__(self):
super(self.__class__, self).__init__()
# set target_names
self.target_names = ['background'] + \
[datum['name']
for datum in jsk_apc2016_common.get_object_data()]
n_class = len(self.target_names)
assert n_class == 40
# load model
self.gpu = rospy.get_param('~gpu', 0)
chainermodel = rospy.get_param('~chainermodel')
self.model = FCN32s(n_class=n_class)
S.load_hdf5(chainermodel, self.model)
if self.gpu != -1:
self.model.to_gpu(self.gpu)
jsk_logwarn('>> Model is loaded <<')
while True:
self.tote_contents = rospy.get_param('~tote_contents', None)
if self.tote_contents is not None:
break
logwarn_throttle(10, 'param ~tote_contents is not set. Waiting..')
rospy.sleep(0.1)
self.label_names = rospy.get_param('~label_names')
jsk_logwarn('>> Param is set <<')
self.pub = self.advertise('~output', Image, queue_size=1)
self.pub_debug = self.advertise('~debug', Image, queue_size=1)
def subscribe(self):
self.sub_img = message_filters.Subscriber(
'~input', Image, queue_size=1, buff_size=2**24)
self.sub_mask = message_filters.Subscriber(
'~input/mask', Image, queue_size=1, buff_size=2**24)
sync = message_filters.ApproximateTimeSynchronizer(
[self.sub_img, self.sub_mask], queue_size=100, slop=0.1)
sync.registerCallback(self._callback)
def unsubscribe(self):
self.sub_img.unregister()
self.sub_mask.unregister()
def _callback(self, img_msg, mask_msg):
bridge = cv_bridge.CvBridge()
bgr_img = bridge.imgmsg_to_cv2(img_msg, desired_encoding='bgr8')
mask_img = bridge.imgmsg_to_cv2(mask_msg, desired_encoding='mono8')
if mask_img.size < 1:
logwarn_throttle(10, 'Too small sized image')
return
logwarn_throttle(10, '[FCNMaskForLabelNames] >> Start Processing <<')
if mask_img.ndim == 3 and mask_img.shape[2] == 1:
mask_img = mask_img.reshape(mask_img.shape[:2])
if mask_img.shape != bgr_img.shape[:2]:
jsk_logwarn('Size of mask and color image is different.'
'Resizing.. mask {0} to {1}'
.format(mask_img.shape, bgr_img.shape[:2]))
mask_img = resize(mask_img, bgr_img.shape[:2],
preserve_range=True).astype(np.uint8)
blob = bgr_img - self.mean_bgr
blob = blob.transpose((2, 0, 1))
x_data = np.array([blob], dtype=np.float32)
if self.gpu != -1:
x_data = cuda.to_gpu(x_data, device=self.gpu)
x = Variable(x_data, volatile=True)
self.model(x)
pred_datum = cuda.to_cpu(self.model.score.data[0])
candidate_labels = [self.target_names.index(name)
for name in self.tote_contents]
label_pred_in_candidates = pred_datum[candidate_labels].argmax(axis=0)
label_pred = np.zeros_like(label_pred_in_candidates)
for idx, label_val in enumerate(candidate_labels):
label_pred[label_pred_in_candidates == idx] = label_val
label_pred[mask_img == 0] = 0 # set bg_label
label_viz = label2rgb(label_pred, bgr_img, bg_label=0)
label_viz = (label_viz * 255).astype(np.uint8)
debug_msg = bridge.cv2_to_imgmsg(label_viz, encoding='rgb8')
debug_msg.header = img_msg.header
self.pub_debug.publish(debug_msg)
output_mask = np.ones(mask_img.shape, dtype=np.uint8)
output_mask *= 255
for label_val, label_name in enumerate(self.target_names):
if label_name in self.label_names:
assert label_name == 'kleenex_paper_towels'
assert label_val == 21
label_mask = ((label_pred == label_val) * 255).astype(np.uint8)
contours, hierachy = cv2.findContours(
label_mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cv2.drawContours(output_mask, contours, -1, 255, -1)
# output_mask[label_pred == label_val] = False
# output_mask = output_mask.astype(np.uint8)
# output_mask[output_mask == 1] = 255
output_mask[mask_img == 0] = 0
output_mask_msg = bridge.cv2_to_imgmsg(output_mask, encoding='mono8')
output_mask_msg.header = img_msg.header
self.pub.publish(output_mask_msg)
logwarn_throttle(10, '[FCNMaskForLabelNames] >> Finshed processing <<')
if __name__ == '__main__':
rospy.init_node('fcn_mask_for_label_names')
FCNMaskForLabelNames()
rospy.spin()
|
mohamedhagag/dvit-odoo8 | hr_annual_increase/__init__.py | Python | agpl-3.0 | 90 | 0.022222 | ##python package initial | ization
# -*- coding: utf-8 -*-
import models
#import calcula | tion
|
kubernetes-client/python | kubernetes/client/models/v2beta2_metric_spec.py | Python | apache-2.0 | 7,901 | 0 | # coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: release-1.23
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class V2beta2MetricSpec(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the valu | e is json key in definition.
"""
openapi_types = {
'container_resource': 'V2beta2ContainerResourceMetricSource' | ,
'external': 'V2beta2ExternalMetricSource',
'object': 'V2beta2ObjectMetricSource',
'pods': 'V2beta2PodsMetricSource',
'resource': 'V2beta2ResourceMetricSource',
'type': 'str'
}
attribute_map = {
'container_resource': 'containerResource',
'external': 'external',
'object': 'object',
'pods': 'pods',
'resource': 'resource',
'type': 'type'
}
def __init__(self, container_resource=None, external=None, object=None, pods=None, resource=None, type=None, local_vars_configuration=None): # noqa: E501
"""V2beta2MetricSpec - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._container_resource = None
self._external = None
self._object = None
self._pods = None
self._resource = None
self._type = None
self.discriminator = None
if container_resource is not None:
self.container_resource = container_resource
if external is not None:
self.external = external
if object is not None:
self.object = object
if pods is not None:
self.pods = pods
if resource is not None:
self.resource = resource
self.type = type
@property
def container_resource(self):
"""Gets the container_resource of this V2beta2MetricSpec. # noqa: E501
:return: The container_resource of this V2beta2MetricSpec. # noqa: E501
:rtype: V2beta2ContainerResourceMetricSource
"""
return self._container_resource
@container_resource.setter
def container_resource(self, container_resource):
"""Sets the container_resource of this V2beta2MetricSpec.
:param container_resource: The container_resource of this V2beta2MetricSpec. # noqa: E501
:type: V2beta2ContainerResourceMetricSource
"""
self._container_resource = container_resource
@property
def external(self):
"""Gets the external of this V2beta2MetricSpec. # noqa: E501
:return: The external of this V2beta2MetricSpec. # noqa: E501
:rtype: V2beta2ExternalMetricSource
"""
return self._external
@external.setter
def external(self, external):
"""Sets the external of this V2beta2MetricSpec.
:param external: The external of this V2beta2MetricSpec. # noqa: E501
:type: V2beta2ExternalMetricSource
"""
self._external = external
@property
def object(self):
"""Gets the object of this V2beta2MetricSpec. # noqa: E501
:return: The object of this V2beta2MetricSpec. # noqa: E501
:rtype: V2beta2ObjectMetricSource
"""
return self._object
@object.setter
def object(self, object):
"""Sets the object of this V2beta2MetricSpec.
:param object: The object of this V2beta2MetricSpec. # noqa: E501
:type: V2beta2ObjectMetricSource
"""
self._object = object
@property
def pods(self):
"""Gets the pods of this V2beta2MetricSpec. # noqa: E501
:return: The pods of this V2beta2MetricSpec. # noqa: E501
:rtype: V2beta2PodsMetricSource
"""
return self._pods
@pods.setter
def pods(self, pods):
"""Sets the pods of this V2beta2MetricSpec.
:param pods: The pods of this V2beta2MetricSpec. # noqa: E501
:type: V2beta2PodsMetricSource
"""
self._pods = pods
@property
def resource(self):
"""Gets the resource of this V2beta2MetricSpec. # noqa: E501
:return: The resource of this V2beta2MetricSpec. # noqa: E501
:rtype: V2beta2ResourceMetricSource
"""
return self._resource
@resource.setter
def resource(self, resource):
"""Sets the resource of this V2beta2MetricSpec.
:param resource: The resource of this V2beta2MetricSpec. # noqa: E501
:type: V2beta2ResourceMetricSource
"""
self._resource = resource
@property
def type(self):
"""Gets the type of this V2beta2MetricSpec. # noqa: E501
type is the type of metric source. It should be one of \"ContainerResource\", \"External\", \"Object\", \"Pods\" or \"Resource\", each mapping to a matching field in the object. Note: \"ContainerResource\" type is available on when the feature-gate HPAContainerMetrics is enabled # noqa: E501
:return: The type of this V2beta2MetricSpec. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this V2beta2MetricSpec.
type is the type of metric source. It should be one of \"ContainerResource\", \"External\", \"Object\", \"Pods\" or \"Resource\", each mapping to a matching field in the object. Note: \"ContainerResource\" type is available on when the feature-gate HPAContainerMetrics is enabled # noqa: E501
:param type: The type of this V2beta2MetricSpec. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and type is None: # noqa: E501
raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501
self._type = type
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V2beta2MetricSpec):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V2beta2MetricSpec):
return True
return self.to_dict() != other.to_dict()
|
28064212D/Prueba | Prueba.py | Python | gpl-2.0 | 1,593 | 0.03076 | import urllib2
def sumaDos():
print 10*20
def division(a,b):
result=a/b
print result
def cast():
i=10
f=10.5
int(10.6)
print "Funcion: "
def main():
sumaDos()
division(10, 20)
def cast():
lista=[1,2,3,"Hola",{"key1":"K","key2":"John"},(1,2,3)]
tupla=(1,2,3)
diccionario={"key1":"K","key2":"John","key3":"Peter"}
print "Lista"
for i in lista:
print i
lista.append("Hello")
print "Tupla"
for i in tupla:
print i
print "Diccionario"
for i in diccionario:
print i
for k,v in diccionario.items():
print "%s %s"%(k,v)
class Estudiante(object):
def __init__(self, nombre, edad):
self.nombre=nombre
self.edad=edad
def hola(self):
return self.nombre
def esMayor(self):
if self.edad>=18:
return True
| else:
return False
def EXCEPTION():
try:
3/0
except Exception:
print "Error"
def main():
e=Estudiante("K" | , 20)
print "Hola %s" % e.hola()
if e.esMayor():
print "Es mayor de edad"
else:
print "No es mayor de edad"
contador=0
while contador<=10:
print contador
contador +=1
EXCEPTION()
def getWeb():
try:
web = urllib2.urlopen("http://itjiquilpan.edu.mx/")
print web.read()
web.close()
except urllib2.HTTPError, e:
print e
except urllib2.URLError as e:
print e
def main():
getWeb()
def main():
cast()
if __name__ == "__main__":
main()
|
industrydive/premailer | premailer/__init__.py | Python | bsd-3-clause | 124 | 0 | from __future__ import absolute_import, uni | code_literals
from .premailer import Premailer, transform
__version__ = '2.9. | 4'
|
anhstudios/swganh | data/scripts/templates/object/building/poi/shared_dathomir_singingmtnclanpatrol_large2.py | Python | mit | 467 | 0.047109 | #### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY | BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Building()
result.template = "object/building/poi/shared_dathom | ir_singingmtnclanpatrol_large2.iff"
result.attribute_template_id = -1
result.stfName("poi_n","base_poi_building")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result |
SchrodingersGat/kicad-footprint-generator | scripts/Connector/Connector_Molex/conn_molex_micro-fit-3.0_tht_top_dual_row.py | Python | gpl-3.0 | 11,943 | 0.007117 | #!/usr/bin/env python
'''
kicad-footprint-generator is free software: you can redistribute it and/or
modify it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
kicad-footprint-generator is distributed in the | hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPO | SE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with kicad-footprint-generator. If not, see < http://www.gnu.org/licenses/ >.
'''
import sys
import os
#sys.path.append(os.path.join(sys.path[0],"..","..","kicad_mod")) # load kicad_mod path
# export PYTHONPATH="${PYTHONPATH}<path to kicad-footprint-generator directory>"
sys.path.append(os.path.join(sys.path[0], "..", "..", "..")) # load parent path of KicadModTree
from math import sqrt
import argparse
import yaml
from helpers import *
from KicadModTree import *
sys.path.append(os.path.join(sys.path[0], "..", "..", "tools")) # load parent path of tools
from footprint_text_fields import addTextFields
series = "Micro-Fit_3.0"
series_long = 'Micro-Fit 3.0 Connector System'
manufacturer = 'Molex'
orientation = 'V'
number_of_rows = 2
variant_params = {
'solder_mounting':{
'mount_pins': 'solder', # remove this
'datasheet': 'http://www.molex.com/pdm_docs/sd/430450212_sd.pdf',
'C_minus_B': 6,
'part_code': "43045-{n:02}12",
'alternative_codes': [
"43045-{n:02}13",
"43045-{n:02}24"
]
},
}
pins_per_row_range = range(1,13)
pitch = 3.0
drill = 1.02
peg_drill = 1.02
pad_to_pad_clearance = 1.5 # Voltage rating is up to 600V (http://www.molex.com/pdm_docs/ps/PS-43045.pdf)
max_annular_ring = 0.5
min_annular_ring = 0.15
pad_size = [pitch - pad_to_pad_clearance, pitch - pad_to_pad_clearance]
if pad_size[0] - drill < 2*min_annular_ring:
pad_size[0] = drill + 2*min_annular_ring
if pad_size[0] - drill > 2*max_annular_ring:
pad_size[0] = drill + 2*max_annular_ring
if pad_size[1] - drill < 2*min_annular_ring:
pad_size[1] = drill + 2*min_annular_ring
if pad_size[1] - drill > 2*max_annular_ring:
pad_size[1] = drill + 2*max_annular_ring
pad_shape=Pad.SHAPE_OVAL
if pad_size[1] == pad_size[0]:
pad_shape=Pad.SHAPE_CIRCLE
def generate_one_footprint(pins_per_row, variant, configuration):
mpn = variant_params[variant]['part_code'].format(n=pins_per_row*number_of_rows)
alt_mpn = [code.format(n=pins_per_row*number_of_rows) for code in variant_params[variant]['alternative_codes']]
# handle arguments
orientation_str = configuration['orientation_options'][orientation]
footprint_name = configuration['fp_name_format_string'].format(man=manufacturer,
series=series,
mpn=mpn, num_rows=number_of_rows, pins_per_row=pins_per_row, mounting_pad = "",
pitch=pitch, orientation=orientation_str)
kicad_mod = Footprint(footprint_name)
kicad_mod.setDescription("Molex {:s}, {:s} (compatible alternatives: {:s}), {:d} Pins per row ({:s}), generated with kicad-footprint-generator".format(series_long, mpn, ', '.join(alt_mpn), pins_per_row, variant_params[variant]['datasheet']))
kicad_mod.setTags(configuration['keyword_fp_string'].format(series=series,
orientation=orientation_str, man=manufacturer,
entry=configuration['entry_direction'][orientation]))
#kicad_mod.setAttribute('smd')
########################## Dimensions ##############################
B = (pins_per_row-1)*pitch
A = B + 6.65
C = B + variant_params[variant]['C_minus_B']
pad_row_1_y = 0
pad1_x = 0
peg1_x = (B-C)/2
peg2_x = (B+C)/2
peg_y = pad_row_1_y + pitch + 0.94
tab_w = 1.4
tab_l = 1.4
body_edge={
'left': (B-A)/2,
'right': (A+B)/2,
'top': -2.47+0.5
}
body_edge['bottom'] = body_edge['top'] + (7.37-0.5)
y_top_min = -2.47
chamfer={'x': 1.2, 'y': 0.63}
############################# Pads ##################################
#
# Pegs
#
kicad_mod.append(Pad(at=[peg1_x, peg_y], number="",
type=Pad.TYPE_NPTH, shape=Pad.SHAPE_CIRCLE, size=peg_drill,
drill=peg_drill, layers=Pad.LAYERS_NPTH))
kicad_mod.append(Pad(at=[peg2_x, peg_y], number="",
type=Pad.TYPE_NPTH, shape=Pad.SHAPE_CIRCLE, size=peg_drill,
drill=peg_drill, layers=Pad.LAYERS_NPTH))
#
# Add pads
#
optional_pad_params = {}
if configuration['kicad4_compatible']:
optional_pad_params['tht_pad1_shape'] = Pad.SHAPE_RECT
else:
optional_pad_params['tht_pad1_shape'] = Pad.SHAPE_ROUNDRECT
for row_idx in range(2):
kicad_mod.append(PadArray(
start=[pad1_x, pad_row_1_y+pitch*row_idx], initial=row_idx*pins_per_row+1,
pincount=pins_per_row, increment=1, x_spacing=pitch, size=pad_size,
type=Pad.TYPE_THT, shape=pad_shape, layers=Pad.LAYERS_THT, drill=drill,
**optional_pad_params))
######################## Fabrication Layer ###########################
main_body_poly= [
{'x': body_edge['left'] + chamfer['x'], 'y': body_edge['top']},
{'x': body_edge['left'] + chamfer['x'], 'y': y_top_min},
{'x': body_edge['left'], 'y': y_top_min},
{'x': body_edge['left'], 'y': body_edge['bottom']},
{'x': body_edge['right'], 'y': body_edge['bottom']},
{'x': body_edge['right'], 'y': y_top_min},
{'x': body_edge['right'] - chamfer['x'], 'y': y_top_min},
{'x': body_edge['right'] - chamfer['x'], 'y': body_edge['top']},
{'x': body_edge['left'] + chamfer['x'], 'y': body_edge['top']}
]
kicad_mod.append(PolygoneLine(polygone=main_body_poly,
width=configuration['fab_line_width'], layer="F.Fab"))
kicad_mod.append(Line(
start={
'x': body_edge['left'],
'y': body_edge['top'] + chamfer['y']
},
end={
'x': body_edge['left'] + chamfer['x'],
'y': body_edge['top']
},
width=configuration['fab_line_width'], layer="F.Fab"
))
kicad_mod.append(Line(
start={
'x': body_edge['right'],
'y': body_edge['top'] + chamfer['y']
},
end={
'x': body_edge['right'] - chamfer['x'],
'y': body_edge['top']
},
width=configuration['fab_line_width'], layer="F.Fab"
))
tab_poly = [
{'x': B/2-tab_l/2, 'y': body_edge['bottom']},
{'x': B/2-tab_l/2, 'y': body_edge['bottom'] + tab_w},
{'x': B/2+tab_l/2, 'y': body_edge['bottom'] + tab_w},
{'x': B/2+tab_l/2, 'y': body_edge['bottom']},
]
kicad_mod.append(PolygoneLine(polygone=tab_poly,
width=configuration['fab_line_width'], layer="F.Fab"))
p1m_sl = 1
p1m_poly = tab_poly = [
{'x': pad1_x - p1m_sl/2, 'y': body_edge['top']},
{'x': pad1_x, 'y': body_edge['top'] + p1m_sl/sqrt(2)},
{'x': pad1_x + p1m_sl/2, 'y': body_edge['top']}
]
kicad_mod.append(PolygoneLine(polygone=tab_poly,
width=configuration['fab_line_width'], layer="F.Fab"))
############################ SilkS ##################################
# Top left corner
silk_pad_off = configuration['silk_pad_clearance'] + configuration['silk_line_width']/2
ymp_top = peg_y - peg_drill/2 - silk_pad_off
ymp_bottom = peg_y + peg_drill/2 + silk_pad_off
off = configuration['silk_fab_offset']
poly_s_b = [
{'x': body_edge['left'] - off, 'y': ymp_bottom},
{'x': body_edge['left'] - off, 'y': body_edge['bottom'] + off},
{'x': body_edge['right'] + off, 'y': body_edge['bottom'] + off},
{'x': body_edge['right'] + off, 'y': ymp_bottom},
]
kicad_mod.append(PolygoneLine(polygone=poly_s_b,
width=configuration['silk_line_width'], layer="F.SilkS"))
poly_s_t = [
{'x': body_edge['left'] - off, 'y': ymp_top},
{'x': body_edge['left'] - off, 'y': y_top_min - off}, |
albertoconnor/website | wordpress_importer/__init__.py | Python | mit | 74 | 0 | default_ | app_config = 'wordpress_importer.apps.WordpressImporterAppConfig' | |
sasha-gitg/python-aiplatform | google/cloud/aiplatform_v1beta1/services/index_endpoint_service/async_client.py | Python | apache-2.0 | 35,143 | 0.001366 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import functools
import re
from typing import Dict, Sequence, Tuple, Type, Union
import pkg_resources
import google.api_core.client_options as ClientOptions # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.api_core import operation as gac_operation # type: ignore
from google.api_core import operation_async # type: ignore
from google.cloud.aiplatform_v1beta1.services.index_endpoint_service import pagers
from google.cloud.aiplatform_v1beta1.types import index_endpoint
from google.cloud.aiplatform_v1beta1.types import index_endpoint as gca_index_endpoint
from google.cloud.aiplatform_v1beta1.types import index_endpoint_service
from google.cloud.aiplatform_v1beta1.types import operation as gca_operation
from google.protobuf import empty_pb2 # type: ignore
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
from .transports.base import IndexEndpointServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc_asyncio import IndexEndpointServiceGrpcAsyncIOTransport
from .client import IndexEndpointServiceClient
class IndexEndpointServiceAsyncClient:
"""A service for managing Vertex AI's IndexEndpoints."""
_client: IndexEndpointServiceClient
DEFAULT_ENDPOINT = IndexEndpointServiceClient.DEFAULT_ENDPOINT
DEFAULT_MTLS_ENDPOINT = IndexEndpointServiceClient.DEFAULT_MTLS_ENDPOINT
index_path = staticmethod(IndexEndpointServiceClient.index_path)
parse_index_path = staticmethod(IndexEndpointServiceClient.parse_index_path)
index_endpoint_path = staticmethod(IndexEndpointServiceClient.index_endpoint_path)
parse_index_endpoint_path = staticmethod(
IndexEndpointServiceClient.parse_index_endpoint_path
)
common_billing_account_path = staticmethod(
IndexEndpointServiceClient.common_billing_account_path
)
parse_common_billing_account_path = staticmethod(
IndexEndpointServiceClient.parse_common_billing_account_path
)
common_folder_path = staticmethod(IndexEndpointServiceClient.common_folder_path)
parse_common_folder_path = staticmethod(
IndexEndpointServiceClient.parse_common_folder_path
)
common_organization_path = staticmethod(
IndexEndpointServiceClient.common_organization_path
)
parse_common_organization_path = staticmethod(
IndexEndpointServiceClient.parse_common_organization_path
)
common_project_path = staticmethod(IndexEndpointServiceClient.common_project_path)
parse_common_project_path = staticmethod(
IndexEndpointServiceClient.parse_common_project_path
)
common_location_path = staticmethod(IndexEndpointServiceClient.common_location_path)
parse_common_location_path = staticmethod(
IndexEndpointServiceClient.parse_common_location_path
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
IndexEndpointServiceAsyncClient: The constructed client.
"""
return IndexEndpointServiceClient.from_service_account_info.__func__(IndexEndpointServiceAsyncClient, info, *args, **kwargs) # type: ignore
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
IndexEndpointServiceAsyncClient: The constructed client.
"""
return IndexEndpointServiceClient.from_service_account_file.__func__(IndexEndpointServiceAsyncClient, filename, *args, **kwargs) # type: ignore
from_service_account_json = from_service_account_file
@property
def transport(self) -> IndexEndpointServiceTransport:
"""Returns the transport used by the client instance.
Returns:
IndexEndpointServiceTransport: The transport used by the client instance.
"""
return self._client.transport
get_transport_class = functools.partial(
type(IndexEndpointServiceClient).get_transport_class,
type(IndexEndpointServiceClient),
)
def __init__(
self,
*,
credentials: ga_credentials.Credentials = None,
transport: Union[str, IndexEndpointServiceTransport] = "grpc_asyncio",
client_options: ClientOptions = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the index endpoint service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.IndexEndpointServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (ClientOptions): Custom options for the client. It
won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
Raises:
google.auth.e | xceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
"""
self._client = IndexEndpointServiceClient(
credentials=credentials,
transport=transport,
client_options=client_options,
client_info=client_info,
)
async def create_index_endpoint(
self,
request: index_end | point_service.CreateIndexEndpointRe |
rtucker-mozilla/mozilla_inventory | mozdns/mozbind/tests/build_tests.py | Python | bsd-3-clause | 10,431 | 0.000096 | # These tests are similar to the ones in the scripts directory. They not ran on
# real data so the testing db needs to be filled with info.
import os
from django.test.client import Client
from django.test import TestCase
from mozdns.soa.models import SOA
from mozdns.domain.models import Domain
from mozdns.address_record.models import AddressRecord
from mozdns.view.models import View
from mozdns.tests.utils import random_label, random_byte
from mozdns.mozbind.builder import DNSBuilder, BuildError
from mozdns.tests.utils import create_fake_zone
from core.task.models import Task
from scripts.dnsbuilds.tests.build_tests import BuildScriptTests
class MockBuildScriptTests(BuildScriptTests, TestCase):
def setUp(self):
Domain.objects.get_or_create(name="arpa")
Domain.objects.get_or_create(name="in-addr.arpa")
self.r1, _ = Domain.objects.get_or_create(name="10.in-addr.arpa")
Domain.objects.get_or_create(name="com")
Domain.objects.get_or_create(name="mozilla.com")
self.cleint = Client()
super(MockBuildScriptTests, self).setUp()
self.stop_update_file = '/tmp/fake/stop.update'
def get_post_data(self, random_str):
"""Return a valid set of data"""
return {
'root_domain': '{0}.{0}.mozilla.com'.format(
random_label() + random_str),
'soa_primary': 'ns1.mozilla.com',
'soa_contact': 'noc.mozilla.com',
'nameserver_1': 'ns1.mozilla.com',
'nameserver_2': 'ns2.mozilla.com',
'nameserver_3': 'ns3.mozilla.com',
'ttl_1': random_byte(),
'ttl_2': random_byte(),
'ttl_3': random_byte(),
}
def test_build_zone(self):
create_fake_zone('asdf1')
b = DNSBuilder(STAGE_DIR=self.stage_dir, PROD_DIR=self.prod_dir,
LOCK_FILE=self.lock_file, LOG_SYSLOG=False,
FIRST_RUN=True, PUSH_TO_PROD=False,
STOP_UPDATE_FILE=self.stop_update_file)
b.build_dns()
create_fake_zone('asdf2')
b.build_dns()
create_fake_zone('asdf3')
create_fake_zone('asdf4')
b.build_dns()
create_fake_zone('asdf5')
b.build_dns()
def test_change_a_record(self):
root_domain = create_fake_zone('asdfz1')
b = DNSBuilder(STAGE_DIR=self.stage_dir, PROD_DIR=self.prod_dir,
LOCK_FILE=self.lock_file, LOG_SYSLOG=False,
FIRST_RUN=True, PUSH_TO_PROD=False,
STOP_UPDATE_FILE=self.stop_update_file)
b.build_dns() # This won't check anything in since PUSH_TO_PROD==False
self.assertEqual((26, 0), b.svn_lines_changed(b.PROD_DIR))
b.PUSH_TO_PROD = True
b.build_dns() # This checked stuff in
# no lines should have changed
b.build_dns()
self.assertEqual((0, 0), b.svn_lines_changed(b.PROD_DIR))
# Now add a record.
a, c = AddressRecord.objects.get_or_create(
label='', domain=root_domain, ip_str="10.0.0.1", ip_type='4'
)
a.views.add(View.objects.get_or_create(name='private')[0])
if not c:
a.ttl = 8
a.save()
self.assertTrue(SOA.objects.get(pk=root_domain.soa.pk).dirty)
tmp_serial = SOA.objects.get(pk=root_domain.soa.pk).serial
b.PUSH_TO_PROD = False # Task isn't deleted
b.build_dns() # Serial get's incrimented
self.assertEqual(
SOA.objects.get(pk=root_domain.soa.pk).serial, tmp_serial + 1
)
self.assertFalse(SOA.objects.get(pk=root_domain.soa.pk).dirty)
# added new record (1) and new serials (2 for both views), old serials
# removed.
self.assertEqual((3, 2), b.svn_lines_changed(b.PROD_DIR))
tmp_serial = SOA.objects.get(pk=root_domain.soa.pk).serial
self.assertFalse(SOA.objects.get(pk=root_domain.soa.pk).dirty)
b.PUSH_TO_PROD = True
b.build_dns()
self.assertFalse(SOA.objects.get(pk=root_domain.soa.pk).dirty)
# Serial is again incremented because PUSH_TO_PROD was False during the
# last build. When PUSH_TO_PROD is false, no scheduled tasts are
# deleted so we should still see this soa being rebuil | t.
self.assertEqual(
SOA.objects.get(pk=root_domain.soa.pk).serial, tmp_seria | l + 1
)
self.assertEqual((0, 0), b.svn_lines_changed(b.PROD_DIR))
# no lines should have changed if we would have built again
self.assertFalse(SOA.objects.get(pk=root_domain.soa.pk).dirty)
tmp_serial = SOA.objects.get(pk=root_domain.soa.pk).serial
b.PUSH_TO_PROD = False
b.build_dns()
self.assertEqual(SOA.objects.get(pk=root_domain.soa.pk).serial,
tmp_serial)
self.assertFalse(SOA.objects.get(pk=root_domain.soa.pk).dirty)
self.assertEqual((0, 0), b.svn_lines_changed(b.PROD_DIR))
def test_one_file_svn_lines_changed(self):
b = DNSBuilder(STAGE_DIR=self.stage_dir, PROD_DIR=self.prod_dir,
LOCK_FILE=self.lock_file, LOG_SYSLOG=False,
FIRST_RUN=True, PUSH_TO_PROD=False,
STOP_UPDATE_FILE=self.stop_update_file)
test_file = os.path.join(self.prod_dir, 'test')
with open(test_file, 'w+') as fd:
fd.write('line 1\n')
lc = b.svn_lines_changed(b.PROD_DIR)
self.assertEqual((1, 0), lc)
b.svn_checkin(lc)
with open(test_file, 'w+') as fd:
fd.write('line 1\nline 2\n')
lc = b.svn_lines_changed(b.PROD_DIR)
self.assertEqual((1, 0), lc)
b.svn_checkin(lc)
with open(test_file, 'w+') as fd:
fd.write('line 1\n')
lc = b.svn_lines_changed(b.PROD_DIR)
self.assertEqual((0, 1), lc)
b.svn_checkin(lc)
def test_too_many_config_lines_changed(self):
create_fake_zone('asdf86')
root_domain1 = create_fake_zone('asdf87')
root_domain2 = create_fake_zone('asdf88')
root_domain3 = create_fake_zone('asdf89')
b = DNSBuilder(STAGE_DIR=self.stage_dir, PROD_DIR=self.prod_dir,
LOCK_FILE=self.lock_file, LOG_SYSLOG=False,
FIRST_RUN=True, PUSH_TO_PROD=True,
STOP_UPDATE_FILE=self.stop_update_file)
b.build_dns()
for ns in root_domain1.nameserver_set.all():
ns.delete()
b.build_dns() # One zone removed should be okay
for ns in root_domain2.nameserver_set.all():
ns.delete()
for ns in root_domain3.nameserver_set.all():
ns.delete()
self.assertRaises(BuildError, b.build_dns)
def test_two_file_svn_lines_changed(self):
b = DNSBuilder(STAGE_DIR=self.stage_dir, PROD_DIR=self.prod_dir,
LOCK_FILE=self.lock_file, LOG_SYSLOG=False,
FIRST_RUN=True, PUSH_TO_PROD=False,
STOP_UPDATE_FILE=self.stop_update_file)
test1_file = os.path.join(self.prod_dir, 'test1')
test2_file = os.path.join(self.prod_dir, 'test2')
with open(test1_file, 'w+') as fd:
fd.write('line 1.1\n')
lc = b.svn_lines_changed(b.PROD_DIR)
self.assertEqual((1, 0), lc)
b.svn_checkin(lc)
with open(test1_file, 'w+') as fd:
fd.write('line 1.1\nline 1.2\n')
with open(test2_file, 'w+') as fd:
fd.write('line 2.1\nline 2.2\n')
lc = b.svn_lines_changed(b.PROD_DIR)
self.assertEqual((3, 0), lc)
b.svn_checkin(lc)
with open(test1_file, 'w+') as fd:
fd.write('line 1\n')
lc = b.svn_lines_changed(b.PROD_DIR)
self.assertEqual((1, 2), lc)
b.svn_checkin(lc)
with open(test1_file, 'w+') as fd:
fd.write('line 1.1\nline 1.2\n')
with open(test2_file, 'w+') as fd:
fd.write('line 2.3\nline 2.4\n')
lc = b.svn_lines_changed(b.PROD_DIR)
self.assertEqual((4, 3), lc)
b.svn_checkin(lc)
def test_svn_ |
googleinterns/nlu-seq2graph | src/trainer.py | Python | apache-2.0 | 15,345 | 0.007559 | """
Copyright 2020 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
"""Trainer.
Usage:
Training:
trainer --data_folder ~/workspace/seq2graph/spider \
--epochs 500 --save_path ~/workspace/seq2graph/seq2seq_savedmodel
Predicting:
trainer --save_model_path ~/workspace/seq2graph/seq2seq_savedmodel \
--predict ~/workspace/seq2graph/spider/train.record \
--predict_output ~/tmp/seq2seq_train.txt
"""
from absl import app
from absl import flags
from absl import logging
import os
import json
import sys
import shutil
import time
import tensorflow as tf
from vocabulary import Vocabulary
from dataset import build_dataset
from training_utils import NoamSchedule
from training_utils import SequenceLoss
from transformer import Transformer
flags.DEFINE_string("data_spec", None, "Path to training data spec.")
flags.DEFINE_integer("batch_size", 32, "Batch size.")
flags.DEFINE_integer("model_dim", 128, "Model dim.")
flags.DEFINE_integer("epochs", 10, "Num of epochs.")
flags.DEFINE_integer("beam_size", 1, "beam size.")
flags.DEFINE_float("dropout", 0.2, "Dropout rate.")
flags.DEFINE_string("save_model_path", None, "Save model path.")
flags.DEFINE_string(
"predict", None,
"Init model from save_model_path and run prediction on the data set,")
flags.DEFINE_string("predict_output", None, "Prediction output.")
flags.DEFINE_bool("eager_run", False, "Run in eager mode for debugging.")
FLAGS = flags.FLAGS
@tf.function
def process_one_batch(model,
loss_fn,
examples,
tgt_vocab,
optimizer,
token_accuracy,
exact_accuracy,
is_train=True):
with tf.GradientTape() as tape:
# Shape: [batch_sz, max_num_tgt_tokens, tgt_vocab_size + max_num_src_tokens]
tgt_token_ids = examples["tgt_token_ids"]
predictions = None
beam_predictions = None
if FLAGS.beam_size > 1 and is_train is False:
beam_predictions, scores = model(examples,
beam_size=FLAGS.beam_size,
is_train=is_train)
eos_index = tf.cast(tf.equal(beam_predictions,
tgt_vocab.token2idx[tgt_vocab.EOS]),
dtype=tf.int32)
last_index = tf.cast(tf.equal(tf.range(beam_predictions.shape[2]),
beam_predictions.shape[2] - 1),
dtype=tf.int32)
non_eos_index = 1 - tf.reduce_sum(eos_index, axis=-1)
non_eos_index = tf.expand_dims(non_eos_index, 2) * tf.expand_dims(
last_index, 0)
eos_index += non_eos_index
eos_scores = tf.reshape(
tf.boolean_mask(scores, tf.cast(eos_index, dtype=tf.bool)),
[-1, FLAGS.beam_size])
_, best_scores_index = tf.math.top_k(eos_scores, k=1)
predictions = tf.cast(tf.squeeze(
tf.gather_nd(beam_predictions,
tf.expand_dims(best_scores_index, 2),
batch_dims=1), 1),
dtype=tf.int64)
loss = 0
else:
prediction_logits = model(examples, is_train=is_train)
loss = loss_fn(prediction_logits, tgt_token_ids)
predictions = tf.argmax(prediction_logits, axis=-1)
if is_train:
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
# Shape: [batch_sz, max_num_tgt_tokens]
oov_mask = tf.not_equal(tgt_token_ids, tgt_vocab.token2idx[tgt_vocab.PAD])
token_accuracy.update_state(tgt_token_ids,
predictions,
sample_weight=tf.cast(oov_mask,
dtype=tf.float32))
exact_match = tf.cast(
tf.reduce_all(tf.logical_or(tf.equal(tgt_token_ids, predictions),
tf.logical_not(oov_mask)),
axis=1), tf.floa | t32)
exact_accuracy.update_state(tf.ones_like(exact_match), exact_match)
return loss, predictions, beam_predictions
def train(model_type, epochs, train_set, dev_set, src_vocab, tgt_vocab, hparams,
save_model_path):
optimizer = tf.keras.optimizers.Adam(
learning_rate=NoamSchedule(FLAGS.model_dim))
model = model_type(src_vocab, tgt_vocab, hparams)
loss_fn = SequenceLoss(tgt_vocab)
if save_model_path:
try:
shutil.rmtree(save_model_path)
except:
| pass
os.mkdir(save_model_path)
best_token_accuracy_train = 0
best_token_accuracy_train_epoch = 0
best_token_accuracy_dev = 0
best_token_accuracy_dev_epoch = 0
for epoch in range(epochs):
train_start = time.time()
total_train_loss = 0
token_accuracy_train = tf.keras.metrics.Accuracy()
exact_accuracy_train = tf.keras.metrics.Accuracy()
for batch, examples in enumerate(train_set):
batch_loss, _, _ = process_one_batch(model, loss_fn, examples, tgt_vocab,
optimizer, token_accuracy_train,
exact_accuracy_train)
total_train_loss += batch_loss
if batch % 100 == 0:
logging.info("Epoch {} Batch {} Loss {:.4f}".format(
epoch + 1, batch, batch_loss.numpy()))
num_train_batch = batch + 1
dev_start = time.time()
total_dev_loss = 0
token_accuracy_dev = tf.keras.metrics.Accuracy()
exact_accuracy_dev = tf.keras.metrics.Accuracy()
for batch, examples in enumerate(dev_set):
batch_loss, _, _ = process_one_batch(model,
loss_fn,
examples,
tgt_vocab,
optimizer,
token_accuracy_dev,
exact_accuracy_dev,
is_train=False)
total_dev_loss += batch_loss
# break
num_dev_batch = batch + 1
epoch_finish = time.time()
token_accuracy_train_val = token_accuracy_train.result().numpy()
exact_accuracy_train_val = exact_accuracy_train.result().numpy()
token_accuracy_dev_val = token_accuracy_dev.result().numpy()
exact_accuracy_dev_val = exact_accuracy_dev.result().numpy()
if best_token_accuracy_train < token_accuracy_train_val:
best_token_accuracy_train = token_accuracy_train_val
best_token_accuracy_train_epoch = epoch + 1
write_model = False
if best_token_accuracy_dev < token_accuracy_dev_val:
best_token_accuracy_dev = token_accuracy_dev_val
best_token_accuracy_dev_epoch = epoch + 1
write_model = True
message = " ".join([
"Epoch {} Train Loss {:.4f} TokenAcc {:.4f} ExactMatch {:.4f}".format(
epoch + 1, total_train_loss / num_train_batch,
token_accuracy_train_val, exact_accuracy_train_val),
"best {:.4f}@{}".format(best_token_accuracy_train,
best_token_accuracy_train_epoch),
"Dev Loss {:.4f} TokenAcc {:.4f} ExactMatch {:.4f}".format(
total_dev_loss / num_dev_batch, token_accuracy_dev_val,
exact_accuracy_dev_val),
"best {:.4f}@{}".format(best_token_accuracy_dev,
best_token_accuracy_dev_epoch)
])
logging.info(message)
if save_model_path:
with open(os.path.join(save_model_path, "log"), "a") as log_f:
print(message, file=log_f)
if write_model:
if save_model_path:
src_vocab.save(os.path.join(save_model_path, |
danleyb2/Instagram-API | InstagramAPI/src/http/Response/Objects/Comment.py | Python | mit | 1,671 | 0 | from .User import User
class Comment(object):
def __init__(self, commentData):
self.status = None
self.username_id = None
self.created_at_utc = None
self.created_at = None
self.b | it_flags = None
self.user = None
self.comment = None
self.pk = None
self.type = None
self.media_id = None
self.status = commentData['status']
if 'user_id' in commentData and commentData['user_id']:
self.username_id = commentData['user_id']
self.created_at_utc = commentData['created_at_utc']
self.created_at = comme | ntData['created_at']
if 'bit_flags' in commentData and commentData['bit_flags']:
self.bit_flags = commentData['bit_flags']
self.user = User(commentData['user'])
self.comment = commentData['text']
self.pk = commentData['pk']
if 'type' in commentData and commentData['type']:
self.type = commentData['type']
if 'media_id' in commentData and commentData['media_id']:
self.media_id = commentData['media_id']
def getStatus(self):
return self.status
def getUsernameId(self):
return self.username_id
def getCreatedAtUtc(self):
return self.created_at_utc
def created_at(self):
return self.created_at
def getBitFlags(self):
return self.bit_flags
def getUser(self):
return self.user
def getComment(self):
return self.comment
def getCommentId(self):
return self.pk
def getType(self):
return self.type
def getMediaId(self):
return self.media_id
|
smithchristian/arcpy-create-chainages | main.py | Python | mit | 2,746 | 0.005462 | #-------------------------------------------------------------------------------
# Name: Main.py
# Purpose: This script creates chainages from a single or mutile line
#
# Author: smithc5
#
# Created: 10/02/2015
# Copyright: (c) smithc5 2015
# Licence: <your licence>
#------------------------------------------------------------------------------
import os
import arcpy
import sys
import traceback
from modules import create_chainages
source_align_location = arcpy.GetParameterAsText(0)
# Variable to store the location of the original | source alignment.
database_locatio | n = arcpy.GetParameterAsText(1)
# Variable to store the location where the database is created to store the.
# feature classes.
chainage_distance = arcpy.GetParameterAsText(2)
new_fc_name = os.path.basename(source_align_location[:-4])
# New name for the copied feature class. Original name minus file extension
database_name = "{}.gdb".format(new_fc_name)
# Variable to store the name of the .gdb to store the feature classes.
DATABASE_FLIEPATH = os.path.join(database_location, database_name)
new_fc_filepath = os.path.join(DATABASE_FLIEPATH, new_fc_name)
# New file path to the copied feature class
new_fc_filepath_with_m = "{0}_M".format(new_fc_filepath)
# New file path to the copied feature class
chainage_feature_class = "{0}_Chainages".format(new_fc_filepath)
# This is the output feature class to store the chainages.
def main():
try:
create_chainages.check_if_gdb_exist(DATABASE_FLIEPATH)
create_chainages.create_gdb(database_location, database_name)
create_chainages.copy_features(source_align_location, new_fc_filepath)
create_chainages.create_route(new_fc_filepath, "Name", new_fc_filepath_with_m)
create_chainages.create_chainages(new_fc_filepath_with_m, chainage_distance,
database_location, new_fc_filepath_with_m,
DATABASE_FLIEPATH, chainage_feature_class)
except:
tb = sys.exc_info()[2]
tbinfo = traceback.format_tb(tb)[0]
pymsg = "PYTHON ERRORS:\nTraceback Info:\n{0}\nError Info:\n {1}: {2}\n".format(tbinfo,
str(sys.exc_type),
str(sys.exc_value))
msgs = "ARCPY ERRORS:\n{}\n".format(arcpy.GetMessages(2))
arcpy.AddError(msgs)
arcpy.AddError(pymsg)
print msgs
print pymsg
arcpy.AddMessage(arcpy.GetMessages(1))
print arcpy.GetMessages(1)
if __name__ == '__main__':
main()
|
cysuncn/python | spark/crm/PROC_A_SUBJECT_D002015.py | Python | gpl-3.0 | 3,179 | 0.015479 | #coding=UTF-8
from pyspark import SparkContext, SparkConf, SQLContext, Row, HiveContext
from pyspark.sql.types import *
from datetime import date, datetime, timedelta
import sys, re, os
st = datetime.now()
conf = SparkConf().setAppName('PROC_A_SUBJECT_D002015').setMaster(sys.argv[2])
sc = SparkContext(conf = conf)
sc.setLogLevel('WARN')
if len(sys.argv) > 5:
if sys.argv[5] == "hive":
sqlContext = HiveContext(sc)
else:
sqlContext = SQLContext(sc)
hdfs = sys.argv[3]
dbname = sys.argv[4]
#处理需要使用的日期
etl_date = sys.argv[1]
#etl日期
V_DT = etl_date
#上一日日期
V_DT_LD = (date(int(etl_date[0:4]), int(etl_date[4:6]), int(etl_date[6:8])) + timedelta(-1)).strftime("%Y%m%d")
#月初日期
V_DT_FMD = date(int(etl_date[0:4]), int(etl_date[4:6]), 1).strftime("%Y%m%d")
#上月末日期
V_DT_LMD = (date(int(etl_date[0:4]), int(etl_date[4:6]), 1) + timedelta(-1)).strftime("%Y%m%d")
#10位日期
V_DT10 = (date(int(etl_date[0:4]), int(etl_date[4:6]), int(etl_date[6:8]))).strftime("%Y-%m-%d")
V_STEP = 0
ACRM_F_CI_ASSET_BUSI_PROTO = sqlContext.read.parquet(hdfs+'/ACRM_F_CI_ASSET_BUSI_PROTO/*')
ACRM_F_CI_ASSET_BUSI_PROTO.registerTempTable("ACRM_F_CI_ASSET_BUSI_PROTO")
#任务[21] 001-01::
V_STEP = V_STEP + 1
sql = """
SELECT CAST(A.CUST_ID AS VARCHAR(32)) AS CUST_ID
,CAST('' AS VARCHAR(20)) AS ORG_ID --插入的空值,包顺龙2017/05/13
,CAST('D002015' AS VARCHAR(20)) AS INDEX_CODE
,CAST(SUM(TAKE_CGT_LINE) AS DECIMAL(22,2)) AS INDEX_VALUE
,CAST(SUBSTR(V_DT, 1, 7) AS VARCHAR(7)) AS YEAR_MONTH
,CAST(V_DT AS DATE) AS ETL_DATE
,CAST(A.CUST_T | YP AS VARCHAR(5)) AS CUST_TYPE
,CAST(A.FR_ID AS VARCHAR(5)) AS FR_ID
FROM ACRM_F_CI_ASSET_BUSI | _PROTO A
WHERE A.BAL > 0
AND A.LN_APCL_FLG = 'N'
AND(A.PRODUCT_ID LIKE '1010%'
OR A.PRODUCT_ID LIKE '1030%'
OR A.PRODUCT_ID LIKE '1040%'
OR A.PRODUCT_ID LIKE '1050%'
OR A.PRODUCT_ID LIKE '1060%'
OR A.PRODUCT_ID LIKE '1070%'
OR A.PRODUCT_ID LIKE '2010%'
OR A.PRODUCT_ID LIKE '2020%'
OR A.PRODUCT_ID LIKE '2030%'
OR A.PRODUCT_ID LIKE '2040%'
OR A.PRODUCT_ID LIKE '2050%')
GROUP BY A.CUST_ID
,A.CUST_TYP
,A.FR_ID """
sql = re.sub(r"\bV_DT\b", "'"+V_DT10+"'", sql)
ACRM_A_TARGET_D002015 = sqlContext.sql(sql)
ACRM_A_TARGET_D002015.registerTempTable("ACRM_A_TARGET_D002015")
dfn="ACRM_A_TARGET_D002015/"+V_DT+".parquet"
ACRM_A_TARGET_D002015.cache()
nrows = ACRM_A_TARGET_D002015.count()
ACRM_A_TARGET_D002015.write.save(path=hdfs + '/' + dfn, mode='overwrite')
ACRM_A_TARGET_D002015.unpersist()
ACRM_F_CI_ASSET_BUSI_PROTO.unpersist()
ret = os.system("hdfs dfs -rm -r /"+dbname+"/ACRM_A_TARGET_D002015/"+V_DT_LD+".parquet")
et = datetime.now()
print("Step %d start[%s] end[%s] use %d seconds, insert ACRM_A_TARGET_D002015 lines %d") % (V_STEP, st.strftime("%H:%M:%S"), et.strftime("%H:%M:%S"), (et-st).seconds, nrows)
|
mick-d/nipype | nipype/interfaces/ants/utils.py | Python | bsd-3-clause | 11,113 | 0.00189 | # -*- coding: utf-8 -*-
"""ANTS Apply Transforms interface
Change directory to provide relative paths for doctests
>>> import os
>>> filepath = os.path.dirname( os.path.realpath( __file__ ) )
>>> datadir = os.path.realpath(os.path.join(filepath, '../../testing/data'))
>>> os.chdir(datadir)
"""
from __future__ import print_function, division, unicode_literals, absolute_import
import os
from ..base import TraitedSpec, File, traits, InputMultiPath
from .base import ANTSCommand, ANTSCommandInputSpec
class AverageAffineTransformInputSpec(ANTSCommandInputSpec):
dimension = traits.Enum(3, 2, argstr='%d', usedefault=False, mandatory=True,
position=0, desc='image dimension (2 or 3)')
output_affine_transform = File(argstr='%s', mandatory=True, position=1,
desc='Outputfname.txt: the name of the resulting transform.')
transforms = InputMultiPath(File(exists=True), argstr='%s', mandatory=True,
position=3, desc='transforms to average')
class AverageAffineTransformOutputSpec(TraitedSpec):
affine_transform = File(exists=True, desc='average transform file')
class AverageAffineTransform(ANTSCommand):
"""
Examples
--------
>>> from nipype.interfaces.ants import AverageAffineTransform
>>> avg = AverageAffineTransform()
>>> avg.inputs.dimension = 3
>>> avg.inputs.transforms = ['trans.mat', 'func_to_struct.mat']
>>> avg.inputs.output_affine_transform = 'MYtemplatewarp.mat'
>>> avg.cmdline # doctest: +ALLOW_UNICODE
'AverageAffineTransform 3 MYtemplatewarp.mat trans.mat func_to_struct.mat'
"""
_cmd = 'AverageAffineTransform'
input_spec = AverageAffineTransformInputSpec
output_spec = AverageAffineTransformOutputSpec
def _format_arg(self, opt, spec, val):
return super(AverageAffineTransform, self)._format_arg(opt, spec, val)
def _list_outputs(self):
outputs = self._outputs().get()
outputs['affine_transform'] = os.path.abspath(
self.inputs.output_affine_transform)
return outputs
class AverageImagesInputSpec(ANTSCommandInputSpec):
dimension = traits.Enum(3, 2, argstr='%d', mandatory=True,
position=0, desc='image dimension (2 or 3)')
output_average_image = File(
"average.nii", argstr='%s', position=1, usedefault=True, hash_files=False,
desc='the name of the resulting image.')
normalize = traits.Bool(
argstr="%d", mandatory=True, position=2,
desc='Normalize: if true, the 2nd image is divided by its mean. '
'This will select the largest image to average into.')
images = InputMultiPath(
File(exists=True), argstr='%s', mandatory=True, position=3,
desc='image to apply transformation to (generally a coregistered functional)')
class AverageImagesOutputSpec(TraitedSpec):
output_average_image = File(exists=True, desc='average image file')
class AverageImages(ANTSCommand):
"""
Examples
--------
>>> from nipype.interfaces.ants import AverageImages
>>> avg = AverageImages()
>>> avg.inputs.dimension = 3
>>> avg.inputs.output_average_image = "average.nii.gz"
>>> avg.inputs.normalize = True
>>> avg.inputs.images = ['rc1s1.nii', 'rc1s1.nii']
>>> avg.cmdline # doctest: +ALLOW_UNICODE
'AverageImages 3 average.nii.gz 1 rc1s1.nii rc1s1.nii'
"""
_cmd = 'AverageImages'
input_spec = AverageImagesInputSpec
output_spec = AverageImagesOutputSpec
def _format_arg(self, opt, spec, val):
return super(AverageImages, self)._format_arg(opt, spec, val)
def _list_outputs(self):
outputs = self._outputs().get()
outputs['output_average_image'] = os.path.realpath(
self.inputs.output_average_image)
return outputs
class MultiplyImagesInputSpec(ANTSCommandInputSpec):
dimension = traits.Enum(3, 2, argstr='%d', usedefault=False, mandatory=True, position=0,
desc='image dimension (2 or 3)')
first_input = File(argstr='%s', exists=True,
mandatory=True, position=1, desc='image 1')
second_input = traits.Either(
File(exists=True), traits.Float, argstr='%s', mandatory=True, position=2,
desc='image 2 or multiplication weight')
output_product_image = File(argstr='%s', mandatory=True, position=3,
desc='Outputfname.nii.gz: the name of the resulting image.')
class MultiplyImagesOutputSpec(TraitedSpec):
output_product_image = File(exists=True, desc='average image file')
class MultiplyImages(ANTSCommand):
"""
Examples
--------
>>> from nipype.interfaces.ants import MultiplyImages
>>> test = MultiplyImages()
>>> test.inputs.dimension = 3
>>> test.inputs.first_input = 'moving2.nii'
>>> test.inputs.second_input = 0.25
>>> test.inputs.output_product_image = "out.nii"
>>> test.cmdline # doctest: +ALLOW_UNICODE
'MultiplyImages 3 moving2.nii 0.25 out.nii'
"""
_cmd = 'MultiplyImages'
input_spec = MultiplyImagesInputSpec
output_spec = MultiplyImagesOutputSpec
def _format_arg(self, opt, spec, val):
return super(MultiplyImages, self)._format_arg(opt, spec, val)
def _list_outputs(self):
outputs = self._outputs().get()
outputs['output_product_image'] = os.path.abspath(
self.inputs.output_product_image)
return outputs
class CreateJacobianDeterminantImageInputSpec(ANTSCommandInputSpec):
imageDimension = traits.Enum(3, 2, argstr='%d', usedefault=False, mandatory=True,
position=0, desc='image dimension (2 or 3)')
deformationField = File(argstr='%s', exists=True, mandatory=True,
position=1, desc='deformation transformation file')
outputImage = File(argstr='%s', mandatory=True,
position=2,
desc='output filename')
doLogJacobian = traits.Enum(0, 1, argstr='%d', position=3,
desc='return the log jacobian')
useGeometric = traits.Enum(0, 1, argstr='%d', position= | 4,
desc='return the geometric jacobian')
class CreateJacobianDeterminantImageOutputSpec(TraitedSpec):
jacobian_image = File(exists=True, desc='jacobian image')
class CreateJacobianDeterminantImage(ANTSCommand):
"""
Examples
--------
>>> from nipype.interfaces.ants import CreateJacobianDeterminantImage
>>> jacobian = | CreateJacobianDeterminantImage()
>>> jacobian.inputs.imageDimension = 3
>>> jacobian.inputs.deformationField = 'ants_Warp.nii.gz'
>>> jacobian.inputs.outputImage = 'out_name.nii.gz'
>>> jacobian.cmdline # doctest: +ALLOW_UNICODE
'CreateJacobianDeterminantImage 3 ants_Warp.nii.gz out_name.nii.gz'
"""
_cmd = 'CreateJacobianDeterminantImage'
input_spec = CreateJacobianDeterminantImageInputSpec
output_spec = CreateJacobianDeterminantImageOutputSpec
def _format_arg(self, opt, spec, val):
return super(CreateJacobianDeterminantImage, self)._format_arg(opt, spec, val)
def _list_outputs(self):
outputs = self._outputs().get()
outputs['jacobian_image'] = os.path.abspath(
self.inputs.outputImage)
return outputs
class AffineInitializerInputSpec(ANTSCommandInputSpec):
dimension = traits.Enum(3, 2, usedefault=True, position=0, argstr='%s',
desc='dimension')
fixed_image = File(exists=True, mandatory=True, position=1, argstr='%s',
desc='reference image')
moving_image = File(exists=True, mandatory=True, position=2, argstr='%s',
desc='moving image')
out_file = File('transform.mat', usedefault=True, position=3, argstr='%s',
desc='output transform file')
# Defaults in antsBrainExtraction.sh -> 15 0.1 0 10
search_factor = traits.Float(15.0, usedefault=True, position=4, argstr='%f',
desc='increments (degrees) for affine search')
|
DailyActie/Surrogate-Model | 01-codes/tensorflow-master/tensorflow/python/kernel_tests/matrix_solve_ls_op_test.py | Python | mit | 9,206 | 0.000978 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.math_ops.matrix_solve."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
def BatchMatMul(a, b):
# A numpy implementation of tf.batch_matmul().
if a.ndim < 3:
return n | p.dot(a, b)
# Get the number of matrices.
n | = np.prod(a.shape[:-2])
assert n == np.prod(b.shape[:-2])
a_flat = np.reshape(a, tuple([n]) + a.shape[-2:])
b_flat = np.reshape(b, tuple([n]) + b.shape[-2:])
c_flat_shape = [n, a.shape[-2], b.shape[-1]]
c_flat = np.empty(c_flat_shape)
for i in range(n):
c_flat[i, :, :] = np.dot(a_flat[i, :, :], b_flat[i, :, :])
return np.reshape(c_flat, a.shape[:-1] + b_flat.shape[-1:])
def BatchRegularizedLeastSquares(matrices, rhss, l2_regularization=0.0):
# A numpy implementation of regularized least squares solver using
# the normal equations.
matrix_dims = matrices.shape
matrices_transposed = np.swapaxes(matrices, -2, -1)
rows = matrix_dims[-2]
cols = matrix_dims[-1]
if rows >= cols:
preconditioner = l2_regularization * np.identity(cols)
gramian = BatchMatMul(matrices_transposed, matrices) + preconditioner
inverse = np.linalg.inv(gramian)
left_pseudo_inverse = BatchMatMul(inverse, matrices_transposed)
return BatchMatMul(left_pseudo_inverse, rhss)
else:
preconditioner = l2_regularization * np.identity(rows)
gramian = BatchMatMul(matrices, matrices_transposed) + preconditioner
inverse = np.linalg.inv(gramian)
right_pseudo_inverse = BatchMatMul(matrices_transposed, inverse)
return BatchMatMul(right_pseudo_inverse, rhss)
class MatrixSolveLsOpTest(tf.test.TestCase):
def _verifySolve(self, x, y):
for np_type in [np.float32, np.float64]:
a = x.astype(np_type)
b = y.astype(np_type)
np_ans, _, _, _ = np.linalg.lstsq(a, b)
for fast in [True, False]:
with self.test_session():
tf_ans = tf.matrix_solve_ls(a, b, fast=fast).eval()
self.assertEqual(np_ans.shape, tf_ans.shape)
# Check residual norm.
tf_r = b - BatchMatMul(a, tf_ans)
tf_r_norm = np.sum(tf_r * tf_r)
np_r = b - BatchMatMul(a, np_ans)
np_r_norm = np.sum(np_r * np_r)
self.assertAllClose(np_r_norm, tf_r_norm)
# Check solution.
if fast or a.shape[0] >= a.shape[1]:
# We skip this test for the underdetermined case when using the
# slow path, because Eigen does not return a minimum norm solution.
# TODO(rmlarsen): Enable this check for all paths if/when we fix
# Eigen's solver.
self.assertAllClose(np_ans, tf_ans, atol=1e-5, rtol=1e-5)
def _verifySolveBatch(self, x, y):
# Since numpy.linalg.lsqr does not support batch solves, as opposed
# to numpy.linalg.solve, we just perform this test for a fixed batch size
# of 2x3.
for np_type in [np.float32, np.float64]:
a = np.tile(x.astype(np_type), [2, 3, 1, 1])
b = np.tile(y.astype(np_type), [2, 3, 1, 1])
np_ans = np.empty([2, 3, a.shape[-1], b.shape[-1]])
for dim1 in range(2):
for dim2 in range(3):
np_ans[dim1, dim2, :, :], _, _, _ = np.linalg.lstsq(
a[dim1, dim2, :, :], b[dim1, dim2, :, :])
for fast in [True, False]:
with self.test_session():
tf_ans = tf.batch_matrix_solve_ls(a, b, fast=fast).eval()
self.assertEqual(np_ans.shape, tf_ans.shape)
# Check residual norm.
tf_r = b - BatchMatMul(a, tf_ans)
tf_r_norm = np.sum(tf_r * tf_r)
np_r = b - BatchMatMul(a, np_ans)
np_r_norm = np.sum(np_r * np_r)
self.assertAllClose(np_r_norm, tf_r_norm)
# Check solution.
if fast or a.shape[-2] >= a.shape[-1]:
# We skip this test for the underdetermined case when using the
# slow path, because Eigen does not return a minimum norm solution.
# TODO(rmlarsen): Enable this check for all paths if/when we fix
# Eigen's solver.
self.assertAllClose(np_ans, tf_ans, atol=1e-5, rtol=1e-5)
def _verifyRegularized(self, x, y, l2_regularizer):
for np_type in [np.float32, np.float64]:
# Test with a single matrix.
a = x.astype(np_type)
b = y.astype(np_type)
np_ans = BatchRegularizedLeastSquares(a, b, l2_regularizer)
with self.test_session():
tf_ans = tf.matrix_solve_ls(a,
b,
l2_regularizer=l2_regularizer,
fast=True).eval()
self.assertAllClose(np_ans, tf_ans, atol=1e-5, rtol=1e-5)
# Test with a 2x3 batch of matrices.
a = np.tile(x.astype(np_type), [2, 3, 1, 1])
b = np.tile(y.astype(np_type), [2, 3, 1, 1])
np_ans = BatchRegularizedLeastSquares(a, b, l2_regularizer)
with self.test_session():
tf_ans = tf.batch_matrix_solve_ls(a,
b,
l2_regularizer=l2_regularizer,
fast=True).eval()
self.assertAllClose(np_ans, tf_ans, atol=1e-5, rtol=1e-5)
def testSquare(self):
# 2x2 matrices, 2x3 right-hand sides.
matrix = np.array([[1., 2.], [3., 4.]])
rhs = np.array([[1., 0., 1.], [0., 1., 1.]])
self._verifySolve(matrix, rhs)
self._verifySolveBatch(matrix, rhs)
self._verifyRegularized(matrix, rhs, l2_regularizer=0.1)
def testOverdetermined(self):
# 2x2 matrices, 2x3 right-hand sides.
matrix = np.array([[1., 2.], [3., 4.], [5., 6.]])
rhs = np.array([[1., 0., 1.], [0., 1., 1.], [1., 1., 0.]])
self._verifySolve(matrix, rhs)
self._verifySolveBatch(matrix, rhs)
self._verifyRegularized(matrix, rhs, l2_regularizer=0.1)
def testUnderdetermined(self):
# 2x2 matrices, 2x3 right-hand sides.
matrix = np.array([[1., 2., 3], [4., 5., 6.]])
rhs = np.array([[1., 0., 1.], [0., 1., 1.]])
self._verifySolve(matrix, rhs)
self._verifySolveBatch(matrix, rhs)
self._verifyRegularized(matrix, rhs, l2_regularizer=0.1)
def testWrongDimensions(self):
# The matrix and right-hand sides should have the same number of rows.
with self.test_session():
matrix = tf.constant([[1., 0.], [0., 1.]])
rhs = tf.constant([[1., 0.]])
with self.assertRaises(ValueError):
tf.matrix_solve_ls(matrix, rhs)
with self.assertRaises(ValueError):
tf.batch_matrix_solve_ls(matrix, rhs)
def testEmpty(self):
full = np.array([[1., 2.], [3., 4.], [5., 6.]])
empty0 = np.empty([3, 0])
empty1 = np.empty([0, 2])
for fast in [True, False]:
with self.test_session():
|
philterphactory/prosthetic-runner | base_prosthetic.py | Python | mit | 3,132 | 0.008301 | # Copyright (C) 2011 Philter Phactory Ltd.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE X
# CONSORTIUM BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# Except as contained in this notice, the name of Philter Phactory Ltd. shall
# not be used in advertising or otherwise to promote the sale, use or other
# dealings in this Software without prior written authorization from Philter
# Phactory Ltd..
#
import datetime
import random
import re
import logging
try: from django.utils import simplejson as json
except ImportError: import json
class Prosthetic(object):
"""Base class for all prosthetics. Implements some defaults - prosthetics
run once every 2 hours, and do nothing. They also have no post-oauth
callback view, so anyone attaching a ptk will just be sent to the 'success'
page.
"""
def __init__(self, token):
self.token = token
def get(self, path, params = {}):
return self.token.get_json( path, params )
def post(self, path, params = {}):
return self.token.post( path, params )
# this will be called every run. return True if you did something
def act(self, force=False):
return False
def post_oauth_callback(self):
return None
# run throttle
@classmethod
def time_between_runs(cls):
# default to four hours
return 3600 * 4
def persist_state(f):
"""If your ptk wants state, and you don't want to manage it yourself,
use this as a decorator on your 'act' implementation. It'll store JSON in
the 'data' property of the weavr token, presenting it as the 'state' property
of the prosthetic instance.
@persist_state
def act(self, force):
self.state | ["foo"] = "bar"
return "persisted state!"
"""
def wrap(self, *args, **argv):
if self.token.data:
self.state = json.loads(self.token.data)
else:
self.state = {}
| try:
ret = f(self, *args, **argv)
finally:
self.token.data = json.dumps(self.state)
# we can rely on the runner to save the token
return ret
return wrap
|
Microvellum/Fluid-Designer | win64-vc/2.78/python/lib/test/test_property.py | Python | gpl-3.0 | 8,501 | 0.002823 | # Test case for property
# more tests are in test_descr
import sys
import unittest
class PropertyBase(Exception):
pass
class PropertyGet(PropertyBase):
pass
class PropertySet(PropertyBase):
pass
class PropertyDel(PropertyBase):
pass
class BaseClass(object):
def __init__(self):
self._spam = 5
@property
def spam(self):
"""BaseClass.getter"""
return self._spam
@spam.setter
def spam(self, value):
self._spam = value
@spam.deleter
def spam(self):
del self._spam
class SubClass(BaseClass):
@BaseClass.spam.getter
def spam(self):
"""SubClass.getter"""
raise PropertyGet(self._spam)
@spam.setter
def spam(self, value):
raise PropertySet(self._spam)
@spam.deleter
def spam(self):
raise PropertyDel(self._spam)
class PropertyDocBase(object):
_spam = 1
def _get_spam(self):
return self._spam
spam = property(_get_spam, doc="spam spam spam")
class PropertyDocSub(PropertyDocBase):
@Prope | rtyDocBase.spam.getter
def spam(self):
"""The decorator does not use this doc string"""
return self._spam
class PropertySubNewGetter(BaseClass):
@BaseClass.spam.getter
def spam(self):
"""new docstring"""
return 5
class PropertyNewGetter(object):
@property
def spam(self):
"""original docstring"""
return 1
@spam.getter
def spam(self):
"""new docstring"""
return 8
class Proper | tyWritableDoc(object):
@property
def spam(self):
"""Eggs"""
return "eggs"
class PropertyTests(unittest.TestCase):
def test_property_decorator_baseclass(self):
# see #1620
base = BaseClass()
self.assertEqual(base.spam, 5)
self.assertEqual(base._spam, 5)
base.spam = 10
self.assertEqual(base.spam, 10)
self.assertEqual(base._spam, 10)
delattr(base, "spam")
self.assertTrue(not hasattr(base, "spam"))
self.assertTrue(not hasattr(base, "_spam"))
base.spam = 20
self.assertEqual(base.spam, 20)
self.assertEqual(base._spam, 20)
def test_property_decorator_subclass(self):
# see #1620
sub = SubClass()
self.assertRaises(PropertyGet, getattr, sub, "spam")
self.assertRaises(PropertySet, setattr, sub, "spam", None)
self.assertRaises(PropertyDel, delattr, sub, "spam")
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_property_decorator_subclass_doc(self):
sub = SubClass()
self.assertEqual(sub.__class__.spam.__doc__, "SubClass.getter")
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_property_decorator_baseclass_doc(self):
base = BaseClass()
self.assertEqual(base.__class__.spam.__doc__, "BaseClass.getter")
def test_property_decorator_doc(self):
base = PropertyDocBase()
sub = PropertyDocSub()
self.assertEqual(base.__class__.spam.__doc__, "spam spam spam")
self.assertEqual(sub.__class__.spam.__doc__, "spam spam spam")
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_property_getter_doc_override(self):
newgettersub = PropertySubNewGetter()
self.assertEqual(newgettersub.spam, 5)
self.assertEqual(newgettersub.__class__.spam.__doc__, "new docstring")
newgetter = PropertyNewGetter()
self.assertEqual(newgetter.spam, 8)
self.assertEqual(newgetter.__class__.spam.__doc__, "new docstring")
def test_property___isabstractmethod__descriptor(self):
for val in (True, False, [], [1], '', '1'):
class C(object):
def foo(self):
pass
foo.__isabstractmethod__ = val
foo = property(foo)
self.assertIs(C.foo.__isabstractmethod__, bool(val))
# check that the property's __isabstractmethod__ descriptor does the
# right thing when presented with a value that fails truth testing:
class NotBool(object):
def __bool__(self):
raise ValueError()
__len__ = __bool__
with self.assertRaises(ValueError):
class C(object):
def foo(self):
pass
foo.__isabstractmethod__ = NotBool()
foo = property(foo)
C.foo.__isabstractmethod__
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_property_builtin_doc_writable(self):
p = property(doc='basic')
self.assertEqual(p.__doc__, 'basic')
p.__doc__ = 'extended'
self.assertEqual(p.__doc__, 'extended')
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_property_decorator_doc_writable(self):
sub = PropertyWritableDoc()
self.assertEqual(sub.__class__.spam.__doc__, 'Eggs')
sub.__class__.spam.__doc__ = 'Spam'
self.assertEqual(sub.__class__.spam.__doc__, 'Spam')
# Issue 5890: subclasses of property do not preserve method __doc__ strings
class PropertySub(property):
"""This is a subclass of property"""
class PropertySubSlots(property):
"""This is a subclass of property that defines __slots__"""
__slots__ = ()
class PropertySubclassTests(unittest.TestCase):
def test_slots_docstring_copy_exception(self):
try:
class Foo(object):
@PropertySubSlots
def spam(self):
"""Trying to copy this docstring will raise an exception"""
return 1
except AttributeError:
pass
else:
raise Exception("AttributeError not raised")
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_docstring_copy(self):
class Foo(object):
@PropertySub
def spam(self):
"""spam wrapped in property subclass"""
return 1
self.assertEqual(
Foo.spam.__doc__,
"spam wrapped in property subclass")
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_property_setter_copies_getter_docstring(self):
class Foo(object):
def __init__(self): self._spam = 1
@PropertySub
def spam(self):
"""spam wrapped in property subclass"""
return self._spam
@spam.setter
def spam(self, value):
"""this docstring is ignored"""
self._spam = value
foo = Foo()
self.assertEqual(foo.spam, 1)
foo.spam = 2
self.assertEqual(foo.spam, 2)
self.assertEqual(
Foo.spam.__doc__,
"spam wrapped in property subclass")
class FooSub(Foo):
@Foo.spam.setter
def spam(self, value):
"""another ignored docstring"""
self._spam = 'eggs'
foosub = FooSub()
self.assertEqual(foosub.spam, 1)
foosub.spam = 7
self.assertEqual(foosub.spam, 'eggs')
self.assertEqual(
FooSub.spam.__doc__,
"spam wrapped in property subclass")
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_property_new_getter_new_docstring(self):
class Foo(object):
@PropertySub
def spam(self):
"""a docstring"""
return 1
@spam.getter
def spam(self):
"""a new docstring"""
return 2
self.assertEqual(Foo.spam.__doc__, "a new docstring")
class FooBase(object):
@PropertySub
def spam(self):
|
nikdoof/test-auth | app/eve_api/models/corporation.py | Python | bsd-3-clause | 3,689 | 0.002982 | from django.db import models
from django.contrib.auth.models import Group
from gargoyle import gargoyle
from eve_api.models import EVEAPIModel
from eve_api.app_defines import *
class EVEPlayerCorporation(EVEAPIModel):
"""
Represents a player-controlled corporation. Updated from a mixture of
the alliance and corporation API pullers.
"""
name = models.CharField(max_length=255, blank=True, null=True)
ticker = models.CharField(max_length=15, blank=True, null=True)
description = models.TextField(blank=True, null=True)
url = models.URLField(verify_exists=False, blank=True, null=True)
ceo_character = models.ForeignKey('eve_api.EVEPlayerCharacter', blank=True, null=True)
alliance = models.ForeignKey('eve_api.EVEPlayerAlliance', blank=True, null=True)
alliance_join_date = models.DateField(blank=True, null=True)
tax_rate = model | s.Fl | oatField(blank=True, null=True)
member_count = models.IntegerField(blank=True, null=True)
shares = models.IntegerField(blank=True, null=True)
# Logo generation stuff
logo_graphic_id = models.IntegerField(blank=True, null=True)
logo_shape1 = models.IntegerField(blank=True, null=True)
logo_shape2 = models.IntegerField(blank=True, null=True)
logo_shape3 = models.IntegerField(blank=True, null=True)
logo_color1 = models.IntegerField(blank=True, null=True)
logo_color2 = models.IntegerField(blank=True, null=True)
logo_color3 = models.IntegerField(blank=True, null=True)
group = models.ForeignKey(Group, blank=True, null=True)
@property
def directors(self):
""" Return a queryset of corporate Directors """
return self.eveplayercharacter_set.filter(roles__name="roleDirector")
@property
def api_keys(self):
""" Returns the number of characters with stored API keys """
return self.eveplayercharacter_set.filter(eveaccount__isnull=False).distinct().count()
@property
def active_api_keys(self):
""" Returns the number of characters with stored and active API keys """
return self.eveplayercharacter_set.filter(eveaccount__isnull=False, eveaccount__api_status=API_STATUS_OK).distinct().count()
@property
def director_api_keys(self):
if gargoyle.is_active('eve-cak'):
return self.directors.filter(eveaccount__isnull=False, eveaccount__api_keytype__in=[API_KEYTYPE_CORPORATION, API_KEYTYPE_FULL], eveaccount__api_status=API_STATUS_OK)
else:
return self.directors.filter(eveaccount__isnull=False, eveaccount__api_keytype=API_KEYTYPE_FULL, eveaccount__api_status=API_STATUS_OK)
@property
def api_key_coverage(self):
""" Returns the percentage coverage of API keys for the corporation's members """
# Check if we have a full director key, see if we can base our assumptions off what is in auth already
if self.director_api_keys.count():
membercount = self.eveplayercharacter_set.count()
else:
membercount = self.member_count
if self.api_keys and membercount:
return (float(self.api_keys) / membercount) * 100
else:
return float(0.0)
@property
def average_sp(self):
return self.eveplayercharacter_set.aggregate(models.Avg('total_sp'))['total_sp__avg']
@models.permalink
def get_absolute_url(self):
return ('eveapi-corporation', [self.pk])
class Meta:
app_label = 'eve_api'
verbose_name = 'Corporation'
verbose_name_plural = 'Corporations'
def __unicode__(self):
if self.name:
return self.name
else:
return u"Corp #%d" % self.id
|
jainanisha90/WeVoteServer | elected_office/urls.py | Python | mit | 1,030 | 0.007767 | # elected_office/urls.py
# Brought to you by We Vote. Be good.
# -*- coding: UTF-8 -*-
from django.conf.urls import url
from . import views_admin
urlpatterns = [
# views_admin
url(r'^$', views_admin.elected_office_list_view, name='elected_office_list', ),
url(r'^delete/$', views_admin.elected_office_delete_process_view, name='elected_office_delete_process'),
url(r'^edit_process/$', views_admin.elected_office_edit_process_view, name='elected_office_edit_process'),
url(r'^new/$', views_admin.elected_office_new_view, name='elected_office_new'),
url(r'^update/$', views_admin.elected_off | ice_update_view, name='elected_office_update'),
url(r'^real_time | _status/$', views_admin.elected_office_update_status, name='elected_office_update_status'),
url(r'^(?P<elected_office_id>[0-9]+)/edit/$', views_admin.elected_office_edit_view, name='elected_office_edit'),
url(r'^(?P<elected_office_id>[0-9]+)/summary/$', views_admin.elected_office_summary_view,
name='elected_office_summary'),
]
|
jeremiah-c-leary/vhdl-style-guide | vsg/rules/instantiation/rule_024.py | Python | gpl-3.0 | 526 | 0.001901 |
from vsg import deprecated_rule
class rule_024(deprecated_rule.Rule):
'''
This rule has been split in | to:
* `generic_map_008 <generic_map_rules.html#generic-map-008>`_
* `port_map_008 <port_map_rules.html#port-map-008>`_
'''
def __init__(self):
deprecated_rule.Rule.__init__(self, 'instantiation', '024')
self.message.append('Rule ' + s | elf.unique_id + ' has been split into rules:')
self.message.append(' generic_map_008')
self.message.append(' rule port_map_005')
|
matthiask/django-content-editor | content_editor/contents.py | Python | bsd-3-clause | 2,812 | 0.000356 | from itertools import chain
from operator import attrgetter
__all__ = ("Contents", "contents_for_items", "contents_for_item")
class Contents:
def __init__(self, regions):
self.regions = regions
self._sorted = False
self._contents = {region.key: [] for region in self.regions}
self._unknown_region_contents = []
def add(self, content):
self._sorted = False
try:
self._contents[content.region].append(content)
except KeyError:
self._unknown_region_contents.append(content)
def _sort(self):
for region_key in list(self._contents):
self._contents[region_key] = sorted(
self._contents[region_key], key=attrgetter("ordering")
)
self._sorted = True
def __getattr__(self, key):
if key.startswith("_"):
raise AttributeError(f"Invalid region key {key!r} on {self!r}")
if not self._sorted:
self._sort()
return self._contents.get(key, [])
def __getitem__(self, key):
if key.startswith("_"):
raise KeyError(f"Invalid region key {key!r} on {self!r}")
if not self._sorted:
self._sort()
return self._contents.get(key, [])
def __iter__(self):
if not self._sorted:
self._sort()
return chain.from_iterable(
self._contents[region.key] for region in self.regions
)
def __len__(self):
return sum((len(contents) for contents in self._contents.values()), 0)
def inherit_regions(self, contents):
for region in self.regions:
if not region.inherited or self[region.key]:
continue
self._contents[region.key] = contents[region.key] # Still sorted
def contents_for_items(items, plugins, *, regions=None):
contents = {item: Contents(regions or item.regions) for item in items}
items_dict = {item.pk: item for item in contents}
for plugin in plugins:
queryset = plugin.get_queryset().filter(parent__in=contents.keys())
if regions is not None:
| queryset = queryset.filter(region__in=[region.key for region in regions])
queryset._known_related_objects.setdefault(
plugin._meta.get_field("parent"), {}
).update(items_dict)
for obj in queryset:
contents[obj.parent].add(ob | j)
return contents
def contents_for_item(item, plugins, *, inherit_from=None, regions=None):
inherit_from = list(inherit_from) if inherit_from else []
all_contents = contents_for_items(
[item] + inherit_from, plugins=plugins, regions=regions
)
contents = all_contents[item]
for item in inherit_from:
contents.inherit_regions(all_contents[item])
return contents
|
Horta/limix | limix/sh/_hash.py | Python | apache-2.0 | 582 | 0 | from __future__ import division
# # TODO: document those functions
# def array_hash(X):
# wr | iteable = X.flags.writeable
# X.flags.writeable = False
# h = hash(X.tobytes())
# X.flags.writeable = writeable
# return h
def filehash(filepath):
r"""Compute sha256 from a given file."""
import hashlib
BUF_SIZE = 65536
sha256 = hashlib.sha256()
with open(filepath, "rb") as f:
| while True:
data = f.read(BUF_SIZE)
if not data:
break
sha256.update(data)
return sha256.hexdigest()
|
bettocr/rpi-proyecto-final | opt/luz.py | Python | bsd-3-clause | 874 | 0.052632 | #!/usr/bin/env python
# Realizado por: Roberto Arias (@bettocr)
#
# Permite encender y apagar luces leds
#
import RPi.GPIO as GPIO, time, os
GPIO.setmode(GPIO.BCM)
on = 0 # luces encendidas
MAX=5200 # luminocidad maxima antes de encender el led, entre mayor mas oscuro
PIN=23 # pin al relay
PINRC=24 #pin que lee la photocell
GPIO.setup(PIN,GPIO.OUT)
def RCtime (RCpin):
| reading = 0
GPIO.setup(RCpin, GPIO.OUT)
GPIO.output(RCpin, GPIO.LOW)
time.sleep(0.1)
GPIO.setup(RCpin, GPIO.IN)
|
while (GPIO.input(RCpin) == GPIO.LOW):
reading += 1
return reading
while True:
#print RCtime(24)
luz = RCtime(PINRC)
if luz > MAX:
GPIO.output(PIN,True)
on = 1
if luz < MAX and on == 1:
GPIO.output(PIN,False)
on = 0
|
ging/keystone | keystone/tests/ksfixtures/appserver.py | Python | apache-2.0 | 2,648 | 0 | # Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable | law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS | OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import fixtures
from paste import deploy
from keystone.common import environment
from keystone import config
CONF = config.CONF
MAIN = 'main'
ADMIN = 'admin'
class AppServer(fixtures.Fixture):
"""A fixture for managing an application server instance.
"""
def __init__(self, config, name, cert=None, key=None, ca=None,
cert_required=False, host='127.0.0.1', port=0):
super(AppServer, self).__init__()
self.config = config
self.name = name
self.cert = cert
self.key = key
self.ca = ca
self.cert_required = cert_required
self.host = host
self.port = port
def setUp(self):
super(AppServer, self).setUp()
app = deploy.loadapp(self.config, name=self.name)
self.server = environment.Server(app, self.host, self.port)
self._setup_SSL_if_requested()
self.server.start(key='socket')
# some tests need to know the port we ran on.
self.port = self.server.socket_info['socket'][1]
self._update_config_opt()
self.addCleanup(self.server.stop)
def _setup_SSL_if_requested(self):
# TODO(dstanek): fix environment.Server to take a SSLOpts instance
# so that the params are either always set or not
if (self.cert is not None and
self.ca is not None and
self.key is not None):
self.server.set_ssl(certfile=self.cert,
keyfile=self.key,
ca_certs=self.ca,
cert_required=self.cert_required)
def _update_config_opt(self):
"""Updates the config with the actual port used."""
opt_name = self._get_config_option_for_section_name()
CONF.set_override(opt_name, self.port)
def _get_config_option_for_section_name(self):
"""Maps Paster config section names to port option names."""
return {'admin': 'admin_port', 'main': 'public_port'}[self.name]
|
lordwolfchild/subnetmapper | pySubnetMapConvert.py | Python | gpl-3.0 | 4,646 | 0.016143 | #!/usr/bin/python
from plistlib import *
import socket, struct
from ctypes import *
from optparse import OptionParser
usage = "usage: %prog [options] infile outfile"
parser = OptionParser(usage)
(options,args)=parser.parse_args()
if len(args)!=2:
parser.error("incorrect number of arguments")
try:
with open(args[0]) as f: pass
except IOError as e:
parser.error('input file does not exist')
pl = readPlist(args[0])
outpfile = open(args[1],"w")
subnet_indices = pl['$objects'][pl['$top']['root']['CF$UID']]['NS.objects'];
outpfile.write(unicode('<?xml version="1.0" encoding="UTF-8"?>\n'))
outpfile.write(unicode('<SubnetMap fileformat="2" writer="SubnetMapper" version="2.0.0">\n'))
for subnet_index in subnet_indices:
if (pl['$objects'][pl['$objects'][subnet_index['CF$UID']]['$class']['CF$UID']]['$classname']=='Subnet'):
# Found a subnet
sn_color_index = pl['$objects'][subnet_index['CF$UID']]['SubnetColor']['CF$UID'];
sn_IP_index = pl['$objects'][subnet_index['CF$UID']]['SubnetIP']['CF$UID'];
sn_NM_index = pl['$objects'][subnet_index['CF$UID']]['SubnetNM']['CF$UID'];
sn_identifier_index = pl['$objects'][subnet_index['CF$UID']]['SubnetIdentifier']['CF$UID'];
sn_description_index = pl['$objects'][subnet_index['CF$UID']]['SubnetDescription']['CF$UID'];
sn_notes_index = pl['$objects'][subnet_index['CF$UID']]['SubnetNotes']['CF$UID'];
# Available Keys: SubnetIdentifier, SubnetIP, SubnetDescription
# SubnetColor, SubnetNM, SubnetNotes
# We start with the interesting part, the color. It is saved as a string
# representation of the color channels as float values. 1 is max, so
# we have to split the floats up to single values, convert them to
# real floats, map them to the value range of 0-255 and pack them
# back into a string which we can use:
# unwrap the string and clean it up
colordata = pl['$objects'][sn_color_index]['NSRGB'].data.strip('\x00')
# split it over whitespaces
colordata_split = colordata.split()
# now convert the single channels to integer values between 0 and 255.
color_r = int(float(colordata_split[0])*255.0);
color_g = int(float(colordata_split[1])*255.0);
color_b = int(float(colordata_split[2])*255.0);
# now pack everything back into a friendly colorstring as we all know
# it from the interwebs.
c | olor_str = "<color>#{0:02X}{1:02X}{2:02X}</color>".format(color_r,color_g,color_b);
# Ok, I lied a little bit, another interesting part is the ip address
# and the netma | sk. In the original fileformat they are saved as integers
# and need to be converted into strings in the classical 4-byte long,
# dot-seperated format.
# get the integer
ip_int=pl['$objects'][sn_IP_index]
# convert it into a ctype uint32 and get a pointer to it
i = c_uint32(ip_int)
pi = pointer(i)
# cast the pointer to a 4 byte array and retrieve the bytes for
# string conversion.
pic=cast(pi,POINTER(c_ubyte*4))
# store the finished string for later processing
ip_str="<address>{0}.{1}.{2}.{3}</address>".format(pic[0][3], pic[0][2], pic[0][1], pic[0][0])
# The same method can be used for the netmask.
# get the integer
nm_int=pl['$objects'][sn_NM_index]
# convert it into a ctype uint32 and get a pointer to it
i = c_uint32(nm_int)
pi = pointer(i)
# cast the pointer to a 4 byte array and retrieve the bytes for
# string conversion.
pic=cast(pi,POINTER(c_ubyte*4))
# store the finished string for later processing
nm_str="<netmask>{0}.{1}.{2}.{3}</netmask>".format(pic[0][3], pic[0][2], pic[0][1], pic[0][0])
# Convert the notes to a standard text..
notes_str = pl['$objects'][pl['$objects'][sn_notes_index]['NSString']['CF$UID']]
# store the description and identifier
identifier_str = pl['$objects'][sn_identifier_index]
description_str = pl['$objects'][sn_description_index]
# now just plug everything together. the
# subnet_block variable stores the complete xml block for this subnet.
subnet_block = unicode("",errors='ignore')
subnet_block = u" <identifier>{0}</identifier>\n {1}\n {2}\n <description>{3}</description>\n <notes>{4}</notes>\n {5}\n".format(identifier_str,ip_str,nm_str,description_str,notes_str,color_str)
outpfile.write(unicode(' <subnet ipversion="IPv4">\n'))
outpfile.write(subnet_block.encode('utf-8'))
outpfile.write(unicode(' </subnet>\n'))
outpfile.write(unicode('</SubnetMap>\n'))
outpfile.close()
|
google/uncertainty-baselines | baselines/jft/experiments/jft300m_vit_base16_finetune_cifar100.py | Python | apache-2.0 | 4,319 | 0.017365 | # coding=utf-8
# Copyright 2022 The Uncertainty Baselines Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=line-too-long
r"""ViT-B/16 finetuning on CIFAR.
"""
# pylint: enable=line-too-long
import ml_collections
# TODO(dusenberrymw): Open-source remaining imports.
def get_sweep(hyper):
# Below shows an example for how to sweep hyperparameters.
# lr_grid = [1e-4, 3e-4, 6e-4, 1e-3, 1.3e-3, 1.6e-3, 2e-3]
return hyper.product([
# hyper.sweep('config.lr.base', lr_grid),
])
def get_config():
"""Config for training a patch-transformer on JFT."""
config = ml_collections.ConfigDict()
# Fine-tuning dataset
config.dataset = 'cifar100'
config.val_split = 'train[98%:]'
config.train_split = 'train[:98%]'
config.num_classes = 100
BATCH_SIZE = 512 # pylint: disable=invalid-name
config.batch_size = BATCH_SIZE
config.total_steps = 10_000
INPUT_RES = 384 # pylint: disable=invalid-name
pp_common = '|value_range(-1, 1)'
# pp_common += f'|onehot({config.num_classes})'
# To use ancestor 'smearing', use this line instead:
pp_common += f'|onehot({config.num_classes}, key="label", key_result="labels")' # pylint: disable=line-too-long
pp_common += '|keep(["image", "labels"])'
config.pp_train = f'decode|inception_crop({INPUT_RES})|flip_lr' + pp_common
config.pp_eval = f'decode|resize({INPUT_RES})' + pp_common
# OOD eval
# ood_split is the data split for both the ood_dataset and the dataset.
config.ood_datasets = ['cifar10', 'svhn_cropped']
config.ood_num_classes = [10, 10]
config.ood_split = 'test'
config.ood_methods = ['msp', 'entropy', 'maha', 'rmaha']
pp_eval_ood = []
for num_classes in config.ood_num_classes:
if num_classes > config.num_classes:
# Note that evaluation_fn ignores the entries with all zero labels for
# evaluation. When num_classes > n_cls, we should use onehot{num_classes},
# otherwise the labels that are greater than n_cls will be encoded with
# all zeros and then be ignored.
pp_eval_ood.append(
config.pp_eval.replace(f'onehot({config.num_classes}',
f'onehot({num_classes}'))
else:
pp_eval_ood.append(config.pp_eval)
config.pp_eval_ood = pp_eval_ood
config.shuffle_buffer_size = 50_000 # Per host, so small-ish is ok.
config.log_training_steps = 10
config.log_eval_steps = 100
# NOTE: eval is very fast O(seconds) so it's fine to run it often.
config.checkpoint_steps = 1000
config.checkpoint_timeout = 1
config.prefetch_to_device = 2
config.trial = 0
# Model section
# pre-trained model ckpt file
# !!! The below section should be modified per experiment
config.model_init = '/path/to/pretrained_model_ckpt.npz'
# Model definition to be copied from the pre-training config
config.model = ml_collections.ConfigDict()
config.model.patches = ml_collections.ConfigDict()
config.model.patches.size = [16, 16]
config.model.hidden_size = 768
config.model.transformer = ml_collections.ConfigD | ict()
conf | ig.model.transformer.attention_dropout_rate = 0.
config.model.transformer.dropout_rate = 0.
config.model.transformer.mlp_dim = 3072
config.model.transformer.num_heads = 12
config.model.transformer.num_layers = 12
config.model.classifier = 'token' # Or 'gap'
# This is "no head" fine-tuning, which we use by default
config.model.representation_size = None
# Optimizer section
config.optim_name = 'Momentum'
config.optim = ml_collections.ConfigDict()
config.grad_clip_norm = 1.0
config.weight_decay = None # No explicit weight decay
config.loss = 'softmax_xent' # or 'sigmoid_xent'
config.lr = ml_collections.ConfigDict()
config.lr.base = 0.002
config.lr.warmup_steps = 500
config.lr.decay_type = 'cosine'
return config
|
Azure/azure-sdk-for-python | sdk/automanage/azure-mgmt-automanage/azure/mgmt/automanage/operations/_configuration_profile_preferences_operations.py | Python | mit | 22,341 | 0.004431 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ConfigurationProfilePreferencesOperations(object):
"""ConfigurationProfilePreferencesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~automanage_client.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def create_or_update(
self,
configuration_profile_preference_name, # type: str
resource_group_name, # type: str
parameters, # type: "models.ConfigurationProfilePreference"
**kwargs # type: Any
):
# type: (...) -> "models.ConfigurationProfilePreference"
"""Creates a configuration profile preference.
:param configuration_profile_preference_name: Name of the configuration profile preference.
:type configuration_profile_preference_name: str
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param parameters: Parameters supplied to create or update configuration profile preference.
:type parameters: ~automanage_client.models.ConfigurationProfilePreference
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ConfigurationProfilePreference, or the result of cls(response)
:rtype: ~automanage_client.models.ConfigurationProfilePreference
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.ConfigurationProfilePreference"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-30-preview"
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self.create_or_update.metadata['url'] # type: ignore
path_format_arguments = {
'configurationProfilePreferenceName': self._serialize.url("configuration_profile_preference_name", configuration_profile_preference_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = 'application/json'
| body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ConfigurationProfilePreference')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
| map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ConfigurationProfilePreference', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ConfigurationProfilePreference', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Automanage/configurationProfilePreferences/{configurationProfilePreferenceName}'} # type: ignore
def get(
self,
configuration_profile_preference_name, # type: str
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "models.ConfigurationProfilePreference"
"""Get information about a configuration profile preference.
:param configuration_profile_preference_name: The configuration profile preference name.
:type configuration_profile_preference_name: str
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ConfigurationProfilePreference, or the result of cls(response)
:rtype: ~automanage_client.models.ConfigurationProfilePreference
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.ConfigurationProfilePreference"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-30-preview"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'configurationProfilePreferenceName': self._serialize.url("configuration_profile_preference_name", configuration_profile_preference_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
request = self._client.get(url, query_parameters, header_parameters)
|
chriszs/redash | redash/query_runner/influx_db.py | Python | bsd-2-clause | 2,575 | 0 | import logging
from redash.query_runner import *
from redash.utils import json_dumps
logger = logging.getLogger(__name__)
try:
from influxdb import InfluxDBClusterClient
enabled = True
except ImportError:
enabled = False
def _transform_result(results):
result_columns = []
result_rows = []
for result in results:
for series in result.raw.get('series', []):
for column in series['columns']:
if column not in result_columns:
result_columns.append(column)
tags = series.get('tags', {})
for key in tags.keys():
if key not in result_columns:
result_columns.append(key)
for result in results:
for series in result.raw.get('series', []):
for point in series['values']:
result_row = {}
for column in result_columns:
tags = series.get('tags', {})
if column in tags:
result_row[column] = tags[column]
elif column in series['columns']:
index = series['columns'].index(column)
value = point[index]
result_row[column] = value
result_rows.append(result_row)
return json_dumps({
"columns": [{'name': c} for c in result_columns],
"rows": result_rows
| })
class InfluxDB(BaseQueryRunner):
noop_query = "show measurements limit 1"
@classmethod
def configuration_schema(cls):
return {
'type': 'object',
'properties': {
'url': {
'type': 'string'
}
} | ,
'required': ['url']
}
@classmethod
def enabled(cls):
return enabled
@classmethod
def annotate_query(cls):
return False
@classmethod
def type(cls):
return "influxdb"
def run_query(self, query, user):
client = InfluxDBClusterClient.from_DSN(self.configuration['url'])
logger.debug("influxdb url: %s", self.configuration['url'])
logger.debug("influxdb got query: %s", query)
try:
results = client.query(query)
if not isinstance(results, list):
results = [results]
json_data = _transform_result(results)
error = None
except Exception as ex:
json_data = None
error = ex.message
return json_data, error
register(InfluxDB)
|
zhaogp/oxygen | oxygen/wsgi.py | Python | mit | 390 | 0 | """
WSGI config for oxygen project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setd | efault("DJANGO_SETTIN | GS_MODULE", "oxygen.settings")
application = get_wsgi_application()
|
open-craft/edx-analytics-dashboard | analytics_dashboard/courses/tests/test_views/test_csv.py | Python | agpl-3.0 | 5,748 | 0.002088 | from ddt import ddt, data
from django.core.urlresolvers import reverse
from django.test import TestCase
import mock
from analyticsclient.exceptions import NotFoundError
from courses.tests import SwitchMixin
from courses.tests.test_views import ViewTestMixin, DEMO_COURSE_ID, DEPRECATED_DEMO_COURSE_ID
from courses.tests.utils import convert_list_of_dicts_to_csv, get_mock_api_enrollment_geography_data, \
get_mock_api_enrollment_data, get_mock_api_course_activity, get_mock_api_enrollment_age_data, \
get_mock_api_enrollment_education_data, get_mock_api_enrollment_gender_data
@ddt
# pylint: disable=abstract-method
class CourseCSVTestMixin(ViewTestMixin):
client = None
column_headings = None
base_file_name = None
def assertIsValidCSV(self, course_id, csv_data):
response = self.client.get(self.path(course_id=course_id))
# Check content type
self.assertResponseContentType(response, 'text/csv')
# Check filename
csv_prefix = u'edX-DemoX-Demo_2014' if course_id == DEMO_COURSE_ID else u'edX-DemoX-Demo_Course'
filename = '{0}--{1}.csv'.format(csv_prefix, self.base_file_name)
self.assertResponseFilename(response, filename)
# Check data
self.assertEqual(response.content, csv_data)
def assertResponseContentType(self, response, content_type):
self.assertEqual(response['Content-Type'], content_type)
def assertResponseFilename(self, response, filename):
self.assertEqual(response['Content-Disposition'], 'attachment; filename="{0}"'.format(filename))
def _test_csv(self, course_id, csv_data):
with mock.patch(self.api_method, return_value=csv_data):
self.assertIsValidCSV(course_id, csv_data)
@data(DEMO_COURSE_ID, DEPRECATED_DEMO_COURSE_ID)
def test_response_no_data(self, course_id):
# Create an "empty" CSV that only has headers
csv_data = convert_list_of_dicts_to_csv([], self.column_headings)
self._test_csv(course_id, csv_data)
@data(DEMO_COURSE_ID, DEPRECATED_DEMO_COURSE_ID)
def test_response(self, course_id):
csv_data = self.get_mock_data(course_id)
csv_data = convert_list_of_dicts_to_csv(csv_data)
self._test_csv(course_id, csv_data)
def test_404(self):
| course_id = 'fakeOrg/soFake/Fake_Course'
self.grant_ | permission(self.user, course_id)
path = reverse(self.viewname, kwargs={'course_id': course_id})
with mock.patch(self.api_method, side_effect=NotFoundError):
response = self.client.get(path, follow=True)
self.assertEqual(response.status_code, 404)
class CourseEnrollmentByCountryCSVViewTests(CourseCSVTestMixin, TestCase):
viewname = 'courses:csv:enrollment_geography'
column_headings = ['count', 'country', 'course_id', 'date']
base_file_name = 'enrollment-location'
api_method = 'analyticsclient.course.Course.enrollment'
def get_mock_data(self, course_id):
return get_mock_api_enrollment_geography_data(course_id)
class CourseEnrollmentCSVViewTests(CourseCSVTestMixin, TestCase):
viewname = 'courses:csv:enrollment'
column_headings = ['count', 'course_id', 'date']
base_file_name = 'enrollment'
api_method = 'analyticsclient.course.Course.enrollment'
def get_mock_data(self, course_id):
return get_mock_api_enrollment_data(course_id)
class CourseEnrollmentModeCSVViewTests(SwitchMixin, CourseCSVTestMixin, TestCase):
viewname = 'courses:csv:enrollment'
column_headings = ['count', 'course_id', 'date', 'audit', 'honor', 'professional', 'verified']
base_file_name = 'enrollment'
api_method = 'analyticsclient.course.Course.enrollment'
@classmethod
def setUpClass(cls):
cls.toggle_switch('display_verified_enrollment', True)
def get_mock_data(self, course_id):
return get_mock_api_enrollment_data(course_id)
class CourseEnrollmentDemographicsByAgeCSVViewTests(CourseCSVTestMixin, TestCase):
viewname = 'courses:csv:enrollment_demographics_age'
column_headings = ['birth_year', 'count', 'course_id', 'created', 'date']
base_file_name = 'enrollment-by-birth-year'
api_method = 'analyticsclient.course.Course.enrollment'
def get_mock_data(self, course_id):
return get_mock_api_enrollment_age_data(course_id)
class CourseEnrollmentDemographicsByEducationCSVViewTests(CourseCSVTestMixin, TestCase):
viewname = 'courses:csv:enrollment_demographics_education'
column_headings = ['count', 'course_id', 'created', 'date', 'education_level.name', 'education_level.short_name']
base_file_name = 'enrollment-by-education'
api_method = 'analyticsclient.course.Course.enrollment'
def get_mock_data(self, course_id):
return get_mock_api_enrollment_education_data(course_id)
class CourseEnrollmentByDemographicsGenderCSVViewTests(CourseCSVTestMixin, TestCase):
viewname = 'courses:csv:enrollment_demographics_gender'
column_headings = ['count', 'course_id', 'created', 'date', 'gender']
base_file_name = 'enrollment-by-gender'
api_method = 'analyticsclient.course.Course.enrollment'
def get_mock_data(self, course_id):
return get_mock_api_enrollment_gender_data(course_id)
class CourseEngagementActivityTrendCSVViewTests(CourseCSVTestMixin, TestCase):
viewname = 'courses:csv:engagement_activity_trend'
column_headings = ['any', 'attempted_problem', 'course_id', 'interval_end', 'interval_start',
'played_video', 'posted_forum']
base_file_name = 'engagement-activity'
api_method = 'analyticsclient.course.Course.activity'
def get_mock_data(self, course_id):
return get_mock_api_course_activity(course_id)
|
hermanschaaf/mafan | bin/mafan_download.py | Python | mit | 291 | 0.003436 | from mafan import download_data
if __name__ == '__main__':
confirm = i | nput(
"You are about to download all dicti | onary files. Could be up to 50MB in total. Are you sure?\n (y/n) ")
if confirm == 'y' or confirm == 'yes':
download_data.download_traditional_word_list()
|
fpischedda/django-messages | django_messages/admin.py | Python | bsd-3-clause | 4,053 | 0.00074 | from django import forms
from django.conf import settings
from django.utils.translation import gettext_lazy as _
from django.contrib import admin
from django.contrib.auth.models import Group
from django_messages.utils import get_user_model
User = get_user_model()
if "notification" in settings.INSTALLED_APPS:
from notification import models as notification
else:
notification = None
from django_messages.models import Message
class MessageAdminForm(forms.ModelForm):
"""
Custom AdminForm to enable messages to groups and all users.
"""
group = forms.ChoiceField(label=_('group'), required=False,
help_text=_('Creates the message optionally for all users or a group of users.'))
def __init__(self, *args, **kwargs):
super(MessageAdminForm, self).__init__(*args, **kwargs)
self.fields['group'].choices = self._get_group_choices()
self.fields['recipient'].required = True
def _get_group_choices(self):
return [('', u'---------'), ('all', _('All users'))] + \
[(group.pk, group.name) for group in Group.objects.all()]
class Meta:
model = Message
fields = ('sender', 'recipient', 'group', 'parent_msg', 'subject',
'body', 'read_at', 'replied_at',
'sender_deleted_at', 'recipient_deleted_at')
class MessageAdmin(admin.ModelAdmin):
form = MessageAdminForm
fieldsets = (
(None, {
'fields': (
'sender',
('recipient', 'group'),
),
}),
(_('Message'), {
'fields': (
'parent_msg',
'subject', 'body',
),
'classes': ('monospace'),
}),
(_('Date/time'), {
'fields': (
'sent_at', 'read_at', 'replied_at',
'sender_deleted_at', 'recipient_deleted_at',
),
'classes': ('collapse', 'wide'),
}),
)
list_display = ('subject', 'sender', 'recipient', 'sent_at', 'read_at')
list_filter = ('sent_at', 'sender', 'recipient')
search_fields = ('subject', 'body')
raw_id_fields = ('sender', 'recipient', 'parent_msg')
def save_model(self, request, obj, form, change):
"""
Saves the message for the | recipient and looks in the form instance
for other possible recipients. Prevents duplication by excludin the
origin | al recipient from the list of optional recipients.
When changing an existing message and choosing optional recipients,
the message is effectively resent to those users.
"""
obj.save()
if notification:
# Getting the appropriate notice labels for the sender and
# recipients.
if obj.parent_msg is None:
sender_label = 'messages_sent'
recipients_label = 'messages_received'
else:
sender_label = 'messages_replied'
recipients_label = 'messages_reply_received'
# Notification for the sender.
notification.send([obj.sender], sender_label, {'message': obj})
if form.cleaned_data['group'] == 'all':
# send to all users
recipients = User.objects.exclude(pk=obj.recipient.pk)
else:
# send to a group of users
recipients = []
group = form.cleaned_data['group']
if group:
group = Group.objects.get(pk=group)
recipients.extend(
list(group.user_set.exclude(pk=obj.recipient.pk)))
# create messages for all found recipients
for user in recipients:
obj.pk = None
obj.recipient = user
obj.save()
if notification:
# Notification for the recipient.
notification.send([user], recipients_label, {'message': obj})
admin.site.register(Message, MessageAdmin)
|
shearern/PyWizard | src/py_wizard/console_wiz_iface/ConsoleNameQuestion.py | Python | mit | 276 | 0.007246 | '''
Creat | ed on Sep 20, 2013
@author: nshearer
'''
from ConsoleSimpleQuestion import ConsoleSimpleQuestion
class ConsoleNameQuestion(ConsoleSimpleQuestion):
def __init__(self, question) | :
super(ConsoleNameQuestion, self).__init__(question)
|
cuducos/getgist | getgist/local.py | Python | mit | 2,630 | 0 | import os
from click import confirm
from getgist import GetGistCommons
class LocalTools(GetGistCommons):
"""Helpers to deal with local files and local file system"""
def __init__(self, filename, assume_yes=False):
"""
Sets the file name to be used by the instance.
:param filename: (str) local file name (ro be read or written)
:param assume_yes: (bool) assume yes (or first option) for all prompts
return: (None)
"""
self.cwd = os.getcwd()
self.file_path = os.path.expanduser(filename)
self.filename = os.path.basename(filename)
self.assume_yes = assume_yes
def save(self, content):
"""
Save any given content to the instance file.
:param content: (str or bytes)
:return: (None)
"""
# backup existing file if needed
if os.path.exists(self.file_path) and not self.assume_yes:
message = "Overwrite existing {}? (y/n) "
if not confirm(message.format(self.filename)):
self.backup()
# write file
self.output("Saving " + self.filename)
with open(self.file_path, "wb") as handler:
if not isinstance(content, bytes):
content = bytes(content, "u | tf-8")
handler.write(content)
self.yeah("Done!")
def backup(self):
"""Backups files with the same name of the instance filename"""
count = 0
name = "{}.bkp".format(self.filename)
backup = os.path.join(self.cwd, name)
while os.path.exists(backup):
count += 1
name = "{}.bkp{}".format(self.filename, count)
backup = os.path.join(self.cwd, name)
self.hey("Moving exist | ing {} to {}".format(self.filename, name))
os.rename(os.path.join(self.cwd, self.filename), backup)
def read(self, file_path=None):
"""
Read the contents of a file.
:param filename: (str) path to a file in the local file system
:return: (str) contents of the file, or (False) if not found/not a file
"""
if not file_path:
file_path = self.file_path
# abort if the file path does not exist
if not os.path.exists(file_path):
self.oops("Sorry, but {} does not exist".format(file_path))
return False
# abort if the file path is not a file
if not os.path.isfile(file_path):
self.oops("Sorry, but {} is not a file".format(file_path))
return False
with open(file_path) as handler:
return handler.read()
|
psf/black | tests/data/fmtonoff.py | Python | mit | 9,645 | 0.015034 | #!/usr/bin/env python3
import asyncio
import sys
from third_party import X, Y, Z
from library import some_connection, \
some_decorator
# fmt: off
from third_party import (X,
Y, Z)
# fmt: on
f'trigger 3.6 mode'
# Comment 1
# Comment 2
# fmt: off
def func_no_args():
a; b; c
if True: raise RuntimeError
if False: ...
for i in range(10):
print(i)
continue
exec('new-style exec', {}, {})
return None
async def coroutine(arg, exec=False):
'Single-line docstring. Multiline is harder to reformat.'
async with some_connection() as conn:
await conn.do_what_i_mean('SELECT bobby, tables FROM xkcd', timeout=2)
await asyncio.sleep(1)
@asyncio.coroutine
@some_decorator(
with_args=True,
many_args=[1,2,3]
)
def function_signature_stress_test(number:int,no_annotation=None,text:str='default',* ,debug:bool=False,**kwargs) -> str:
return text[number:-1]
# fmt: on
def spaces(a=1, b=(), c=[], d={}, e=True, f=-1, g=1 if False else 2, h="", i=r''):
offset = attr.ib(default=attr.Factory( lambda: _r.uniform(1, 2)))
assert task._cancel_stack[:len(old_stack)] == old_stack
def spaces_types(a: int = 1, b: tuple = (), c: list = [], d: dict = {}, e: bool = True, f: int = -1, g: int = 1 if False else 2, h: str = "", i: str = r''): ...
def spaces2(result= _core.Value(None)):
...
something = {
# fmt: off
key: 'value',
}
def subscriptlist():
atom[
# fmt: off
'some big and',
'complex subscript',
# fmt: on
goes + here, andhere,
]
def import_as_names():
# fmt: off
from hello import a, b
'unformatted'
# fmt: on
def testlist_star_expr():
# fmt: off
a , b = *hello
'unformatted'
# fmt: on
def yield_expr():
# fmt: off
yield hello
'unformatted'
# fmt: on
'formatted'
# fmt: off
( yield hello )
'unformatted'
# fmt: on
def example(session):
# fmt: off
result = session\
.query(models.Customer.id)\
.filter(models.Customer.account_id == account_id,
models.Customer.email == email_address)\
.order_by(models.Customer.id.asc())\
.all()
# fmt: on
def off_and_on_without_data():
"""All comments here are technically on the same prefix.
The comments between will be formatted. This is a known limitation.
"""
# fmt: off
#hey, that won't work
# fmt: on
pass
def on_and_off_broken():
"""Another known limitation."""
# fmt: on
# fmt: off
this=should.not_be.formatted()
and_=indeed . it is not formatted
because . the . handling . inside . generate_ignored_nodes()
now . considers . multiple . fmt . directives . within . one . prefix
# fmt: on
# fmt: off
# ...but comments still get reformatted even though they should not be
# fmt: on
def long_lines():
if True:
typedargslist.extend(
gen_annotated_params(ast_args.kwonlyargs, ast_args.kw_defaults, parameters, implicit_default=True)
)
# fmt: off
a = (
unnecessary_bracket()
)
# fmt: on
_type_comment_re = re.compile(
r"""
^
[\t ]*
\#[ ]type:[ ]*
(?P<type>
[^#\t\n]+?
)
(?<!ignore) # note: this will force the non-greedy + in <type> to match
# a trailing space which is why we need the silliness below
(?<!ignore[ ]{1})(?<!ignore[ ]{2})(?<!ignore[ ]{3})(?<!ignore[ ]{4})
(?<!ignore[ ]{5})(?<!ignore[ ]{6})(?<!ignore[ ]{7})(?<!ignore[ ]{8})
(?<!ignore[ ]{9})(?<!ignore[ ]{10})
[\t ]*
(?P<nl>
(?:\#[^\n]*)?
\n?
)
$
""",
# fmt: off
re.MULTILINE|re.VERBOSE
# fmt: on
)
def single_literal_yapf_disable():
"""Black does not support this."""
BAZ = {
(1, 2, 3, 4),
(5, 6, 7, 8),
(9, 10, 11, 12)
} # yapf: disable
cfg.rule(
"Default", "address",
xxxx_xxxx=["xxx-xxxxxx-xxxxxxxxxx"],
xxxxxx="xx_xxxxx", xxxxxxx="xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx",
xxxxxxxxx_xxxx=True, xxxxxxxx_xxxxxxxxxx=False,
xxxxxx_xxxxxx=2, xxxxxx_xxxxx_xxxxxxxx=70, xxxxxx_xxxxxx_xxxxx=True,
# fmt: off
xxxxxxx_xxxxxxxxxxxx={
"xxxxxxxx": {
"xxxxxx": False,
"xxxxxxx": False,
"xxxx_xxxxxx": "xxxxx",
},
"xxxxxxxx-xxxxx": {
"xxxxxx": False,
"xxxxxxx": True,
"xxxx_xxxxxx": "xxxxxx",
},
},
# fmt: on
xxxxxxxxxx_xxxxxxxxxxx_xxxxxxx_xxxxxxxxx=5
)
# fmt: off
yield 'hello'
# No formatting to the end of the file
l=[1,2,3]
d={'a':1,
'b':2}
# output
#!/usr/bin/env python3
import asyncio
import sys
from third_party import X, Y, Z
from library import some_connection, some_decorator
# fmt: off
from third_party import (X,
Y, Z)
# fmt: on
f"trigger 3.6 mode"
# Comment 1
# Comment 2
# fmt: off
def func_no_args():
a; b; c
if True: raise RuntimeError
if False: ...
for i in range(10):
print(i)
continue
exec('new-style exec', {}, {})
return None
async def coroutine(arg, exec=False):
'Single-line docstring. Multiline is harder to reformat.'
async with some_connection() as conn:
await conn.do_what_i_mean('SELECT bobby, tables FROM xkcd', timeout=2)
await asyncio.sleep(1)
@asyncio.coroutine
@some_decorator(
with_args=True,
many_args=[1,2,3]
)
def function_signature_stress_test(number:int,no_annotation=None,text:str='default',* ,debug:bool=False,**kwargs) -> str:
return text[number:-1]
# fmt: on
def spaces(a=1, b=(), c=[], d={}, e=True, f=-1, g=1 if False else 2, h="", i=r""):
offset = attr.ib(default=attr.Factory(lambda: _r.uniform(1, 2)))
assert task._cancel_stack[: len(old_stack)] == old_stack
def spaces_types(
a: int = 1,
b: tuple = (),
c: list = [],
d: dict = {},
e: bool = True,
f: int = -1,
g: int = 1 if False else 2,
h: str = "",
i: str = r"",
):
...
def spaces2(result=_core.Value(None)):
...
something = {
# fmt: off
key: 'value',
}
def subscriptlist():
atom[
# fmt: off
'some big and',
'complex subscript',
# fmt: on
goes + here,
andhere,
]
def import_as_names():
# fmt: off
from hello import a, b
'unformatted'
# fmt: on
def testlist_star_expr():
# fmt: off
a , b = *hello
'unformatted'
# fmt: on
def yield_expr():
# fmt: off
yield hello
'unformatted'
# fmt: on
"formatted"
# fmt: off
( yield hello )
'unformatted'
# fmt: on
def example(session):
# fmt: off
result = session\
.query(models.Customer.id)\
.filter(models.Customer.account_id == account_id,
models.Customer.email == email_address)\
.order_by(models.Customer.id.asc())\
.all()
# fmt: on
def off_and_on_without_data():
"""All comments here are technically on the same prefix.
The comments between will be formatted. This is a known limitation.
"""
# fmt: off
# hey, that won't work
# fmt: on
pass
def on_and_off_broken():
"""Another known limitation."""
# fmt: on
# fmt: off
this=should.not_be.formatted()
and_=indeed . it is not formatted
because . the . handling . inside . generate_ignored_nodes()
now . considers . multiple . fmt . directives . within . one . prefix
# fmt: on
# fmt: off
# ...but comments still get reformatted even though they should not be
# fmt: on
def long_lines():
if True:
typedargslist.extend(
gen_annotated_params(
ast_args.kwonlyargs,
ast_args.kw_defaults,
parameters,
implicit_default=True,
)
)
# fmt: off
| a = (
unnecessary_brack | et()
)
# fmt: on
_type_comment_re = re.compile(
r"""
^
[\t ]*
\#[ ]type:[ ]*
(?P<type>
[^#\t\n]+?
|
harrijs/gns3-server | gns3server/web/response.py | Python | gpl-3.0 | 3,573 | 0.001679 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2015 GNS3 Technologies Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import json
import jsonschema
import aiohttp.web
import logging
import sys
import jinja2
from ..version import __version__
log = logging.getLogger(__name__)
renderer = jinja2.Environment(loader=jinja2.PackageLoader('gns3server', 'templates'))
class Response(aiohttp.web.Response):
def __init__(self, request=None, route=None, output_schema=None, headers={}, **kwargs):
self._route = route
self._output_schema = output_schema
self._request = request
headers['X-Route'] = self._route
headers['Server'] = "Python/{0[0]}.{0[1]} GNS3/{1}".format(sys.version_info, __version__)
super().__init__(headers=headers, **kwargs)
def start(self, request):
if log.getEffectiveLevel() == logging.DEBUG:
log.info("%s %s", request.method, request.path_qs)
log.debug("%s", dict(request.headers))
if isinstance(request.json, dict):
log.debug("%s", request.json)
log.info("Response: %d %s", self.status, self.reason)
log.debug(dict(self.headers))
if hasattr(self, 'body') and self.body is not None and self.headers["CONTENT-TYPE"] == "application/json":
log.debug(json.loads(self.body.decode('utf-8')))
return super().start(request)
def html(self, answer):
"""
Set the response content type to text/html and serialize
the content.
:param anwser The response as a Python object
"""
self.content_type = "text/html"
self.body = answer.encode('utf-8')
def template( | self, template_filename, **kwargs):
"""
Render a template
:params template: Template name
:params kwargs: Template parameters
"""
template = renderer.get_template(template_filename)
| kwargs["gns3_version"] = __version__
kwargs["gns3_host"] = self._request.host
self.html(template.render(**kwargs))
def json(self, answer):
"""
Set the response content type to application/json and serialize
the content.
:param anwser The response as a Python object
"""
self.content_type = "application/json"
if hasattr(answer, '__json__'):
answer = answer.__json__()
if self._output_schema is not None:
try:
jsonschema.validate(answer, self._output_schema)
except jsonschema.ValidationError as e:
log.error("Invalid output query. JSON schema error: {}".format(e.message))
raise aiohttp.web.HTTPBadRequest(text="{}".format(e))
self.body = json.dumps(answer, indent=4, sort_keys=True).encode('utf-8')
def redirect(self, url):
"""
Redirect to url
:params url: Redirection URL
"""
raise aiohttp.web.HTTPFound(url)
|
thaim/ansible | lib/ansible/modules/crypto/openssh_cert.py | Python | mit | 22,137 | 0.003072 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2018, David Kainz <dkainz@mgit.at> <dave.jokain@gmx.at>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: openssh_cert
author: "David Kainz (@lolcube)"
version_added: "2.8"
short_description: Generate OpenSSH host or user certificates.
description:
- Generate and regenerate OpenSSH host or user certificates.
requirements:
- "ssh-keygen"
options:
state:
description:
- Whether the host or user certificate should exist or not, taking action if the state is different from what is stated.
type: str
default: "present"
choices: [ 'present', 'absent' ]
type:
description:
- Whether the module should generate a host or a user certificate.
type: str
required: true
choices: ['host', 'user']
force:
description:
- Should the certificate be regenerated even if it already exists and is valid.
type: bool
default: false
path:
description:
- Path of the file containing the certificate.
type: path
required: true
signing_key:
description:
- The path to the private openssh key that is used for signing the public key in order to generate the certificate.
type: path
required: true
public_key:
description:
- The path to the public key that will be signed with the signing key in order to generate the certificate.
type: path
required: true
valid_from:
description:
- "The point in time the certificate is valid from. Time can be specified either as relative time or as absolute timestamp.
Time will always be interpreted as UTC. Valid formats are: C([+-]timespec | YYYY-MM-DD | YYYY-MM-DDTHH:MM:SS | YYYY-MM-DD HH:MM:SS | always)
where timespec can be an integer + C([w | d | h | m | s]) (e.g. C(+32w1d2h).
Note that if using relative time this module is NOT idempotent."
type: str
required: true
valid_to:
description:
- "The point in time the certificate is valid to. Time can be specified either as relative time or as absolute timestamp.
Time will always be interpreted as UTC. Valid formats are: C([+-]timespec | YYYY-MM-DD | YYYY-MM-DDTHH:MM:SS | YYYY-MM-DD HH:MM:SS | forever)
where timespec can be an integer + C([w | d | h | m | s]) (e.g. C(+32w1d2h).
Note that if using relative time this module is NOT idempotent."
type: str
required: true
valid_at:
description:
- "Check if the certificate is valid at a certain point in time. If it is not the certificate will be regenerated.
Time will always be interpreted as UTC. Mainly to be used with relative timespec for I(valid_from) and / or I(valid_to).
Note that if using relative time this module is NOT idempotent."
type: str
principals:
description:
- "Certificates may be limited to be valid for a set of principal (user/host) names.
By default, generated certificates are valid for all users or hosts."
type: list
elements: str
options:
description:
- "Specify certificate options when signing a key. The option that are valid for user certificates are:"
- "C(clear): Clear all enabled permissions. This is useful for clearing the default set of permissions so permissions may be added individually."
- "C(force-command=command): Forces the execution of command instead of any shell or
command specified by the user when the certificate is used for authentication."
- "C(no-agent-forwarding): Disable ssh-agent forwarding (permitted by default)."
- "C(no-port-forwarding): Disable port forwarding (permitted by default)."
- "C(no-pty Disable): PTY allocation (permitted by default)."
- "C(no-user-rc): Disable execution of C(~/.ssh/rc) by sshd (permitted by default)."
- "C(no-x11-forwarding): Disable X11 forwarding (permitted by default)"
- "C(permit-agent-forwarding): Allows ssh-agent forwarding."
- "C(permit-port-forwarding): Allows port forwarding."
- "C(permit-pty): Allows PTY allocation."
- "C(permit-user-rc): Allows execution of C(~/.ssh/rc) by sshd."
- "C(permit-x11-forwarding): Allows X11 forwarding."
- "C(source-address=address_list): Restrict the source addresses from which the certificate is considered valid.
The C(address_list) is a comma-separated list of one or more address/netmask pairs in CIDR format."
- "At present, no options are valid for host keys."
type: list
elements: str
identifier:
description:
- Specify the key identity when signing a public key. The identifier that is logged by the server when the certificate is used for authentication.
type: str
serial_number:
description:
- "Specify the certificate serial number.
The serial number is logged by the server when the certificate is used for authentication.
The certificate serial number may be used in a KeyRevocationList.
The serial number may be omitted for checks, but must be specified again for a new certificate.
Note: The default value set by ssh-keygen is 0."
type: int
extends_documentation_fragment: files
'''
EXAMPLES = '''
# Generate an OpenSSH user certificate that is valid forever and for all users
- openssh_cert:
type: user
signing_key: /path/to/private_key
public_key: /path/to/public_key.pub
path: /path/to/certificate
valid_from: always
valid_to: forever
# Generate an OpenSSH host certificate that is valid for 32 weeks from now and will be regenerated
# if it is valid for less than 2 weeks from the time the module is being run
- openssh_cert:
type: host
signing_key: /path/to/private_key
public_key: /path/to/public_key.pub
path: /path/to/certificate
valid_from: +0s
valid_to: +32w
valid_at: +2w
# Generate an OpenSSH host certificate that is valid forever and only for example.com and examplehost
- openssh_cert:
type: host
signing_key: /path/to/private_key
public_key: /path/to/public_key.pub
path: /path/to/certificate
valid_from: always
valid_to: forever
principals:
- example.com
- examplehost
# Generate an OpenSSH host C | ertificate that is valid from 21.1.2001 to 21.1.2019
- openssh_cert: |
type: host
signing_key: /path/to/private_key
public_key: /path/to/public_key.pub
path: /path/to/certificate
valid_from: "2001-01-21"
valid_to: "2019-01-21"
# Generate an OpenSSH user Certificate with clear and force-command option:
- openssh_cert:
type: user
signing_key: /path/to/private_key
public_key: /path/to/public_key.pub
path: /path/to/certificate
valid_from: always
valid_to: forever
options:
- "clear"
- "force-command=/tmp/bla/foo"
'''
RETURN = '''
type:
description: type of the certificate (host or user)
returned: changed or success
type: str
sample: host
filename:
description: path to the certificate
returned: changed or success
type: str
sample: /tmp/certificate-cert.pub
info:
description: Information about the certificate. Output of C(ssh-keygen -L -f).
returned: change or success
type: list
'''
import os
import errno
import re
import tempfile
from datetime import datetime
from datetime import MINYEAR, MAXYEAR
from shutil import copy2
from shutil import rmtree
from ansible.module_utils.basic import AnsibleModule
from ansible.m |
mottosso/pyblish | pyblish/__init__.py | Python | lgpl-3.0 | 720 | 0 | """Pyblish initialisation
Attributes:
_registered_paths: Currently registered plug-in paths.
_registered_plugins: Currently registered plug-ins.
"""
from .version import version, version_info, __version__
_registered_paths = list()
_registered_callbacks = dict()
_registered_plugins = dict()
_registered_services = dict()
_registered_test = dict()
_registered_hosts = list()
_registered_targets = list()
_registered_gui = list()
__all__ = [
"version",
"version_info",
"__version__",
"_registered_paths",
"_registered_callbacks",
"_registered_plugins",
"_registered_services",
"_registered_test",
"_registered_hosts",
| "_registered_targets",
"_registe | red_gui",
]
|
bmcfee/gordon | gordon/db/gordon_db.py | Python | gpl-3.0 | 29,325 | 0.023257 | #!/usr/bin/env python
# Copyright (C) 2010 Douglas Eck
#
# This file is part of Gordon.
#
# Gordon is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Gordon is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Gordon. If not, see <http://www.gnu.org/licenses/>.
'''
Main Gordon namespace
Includes the database model and a set of utility functions for
validating the contents of the database, removing duplicates, etc.
'''
import numpy, os, logging
from shutil import move, copy
from sqlalchemy import func, select
from string import join, replace
from sys import platform, version
from time import sleep
from unicodedata import decomposition, normalize
from gordon.db.model import (Artist, Album, Annotation, Track, Collection, FeatureExtractor,
session, commit, album, Mbalbum_recommend,
execute_ | raw_sql)
#from gordon.db.config import DEF_GORDON_DIR, DEF_DBUSER, DEF_DBPASS, DEF_DBHOST
from gordon.db import config
from gordon.i | o import AudioFile
log = logging.getLogger('gordon')
log.info('Using gordon directory %s', config.DEF_GORDON_DIR)
#------------------
#Helper functions
#-------------------
#we have some values cached (album.trackcount, artist.trackcount)
#after we do our cascading deletes we need to update those things....
def reassign_artist(oldid,newid) :
"""Reassigns all tracks and albums from oldid to newid then
deletes old artist."""
oldartist = Artist.query.get(oldid)
newartist = Artist.query.get(newid)
if not oldartist :
raise ValueError('Bad id for oldartist')
if not newartist :
raise ValueError('Bad id for newartist')
print "Tracks"
for t in oldartist.tracks :
print t
if len(t.artists)==0 :
print 'Missing artist'
elif len(t.artists)==1 :
if t.artists[0]==oldartist :
t.artists[0]=newartist
t.artist=newartist.name
print 'Reassigned',oldartist,'to',newartist
else :
print 'Mismatched artist'
else :
print 'Multiple artists'
print "Albums"
for r in oldartist.albums :
print r
for idx in range(len(r.artists)) :
a = r.artists[idx]
if a==oldartist :
r.artists[idx]=newartist
print "Replaced",oldartist,"with",newartist
print "Updating trackcount"
newartist.update_trackcount()
session.delete(oldartist)
commit()
def delete_source(source) :
"""Deletes all tracks and albums for a given source. The source is
stored in the Track. Must be run interactively because it runs gordon_validate
which asks questions at the prompt"""
#This woudl be the "right" way to do it but it's slow because
#we update track count on every track
#for t in Track.query.filter_by(source=source) :
# print 'Deleting',t
# delete_track(t)
for t in Track.query.filter_by(source=source) :
print 'Deleting',t
session.delete(t)
commit()
gordon_validate()
def delete_album(album) :
#we handle cascading deletes ourselves for track and artist
for t in album.tracks :
artists = t.artists
session.delete(t)
for a in artists :
a.update_trackcount()
session.delete(album)
commit()
def delete_track(track) :
session.delete(track)
session.flush() #should this be a commit?
# Updating albums and artists too
for a in track.albums :
a.update_trackcount()
if a.trackcount==0 :
session.delete(a)
for a in track.artists :
a.update_trackcount()
if a.trackcount==0 :
session.delete(a)
commit()
def deaccent_unicode(s):
if not isinstance(s,unicode) :
return s
#save some time by checking if we already have an ascii-compatable string
if s==s.encode('ascii','replace') :
return s
#we don't have an ascii-compatable string so translate it
return s.translate(unaccented_map())
def delete(rec) :
session.delete(rec)
def command_with_output(cmd):
cmd = unicode(cmd,'utf-8')
#should this be a part of slashify or command_with_output?
if platform=='darwin' :
cmd = normalize('NFC',cmd)
child = os.popen(cmd.encode('utf-8'))
data = child.read()
# err = child.close()
child.close()
return data
def postgres_column_to_str(col) :
#transforms postgres column tos tring
st = join(col)
try :
st = unicode(st,'utf-8')
except :
log.warning('Could not translate string '+st+' into utf-8 unicode')
st = st.replace("'",'')
st = st.replace('"','')
st = st.replace('\n','')
return st
class unaccented_map(dict):
#Found on a newsgroup
#Fredrik Lundh fredrik at pythonware.com
#Fri Mar 24 20:37:03 CET 2006
#import sys
#import unicodedata
CHAR_REPLACEMENT = {
0xc6: u"AE", # LATIN CAPITAL LETTER AE
0xd0: u"D", # LATIN CAPITAL LETTER ETH
0xd8: u"OE", # LATIN CAPITAL LETTER O WITH STROKE
0xde: u"Th", # LATIN CAPITAL LETTER THORN
0xdf: u"ss", # LATIN SMALL LETTER SHARP S
0xe6: u"ae", # LATIN SMALL LETTER AE
0xf0: u"d", # LATIN SMALL LETTER ETH
0xf8: u"oe", # LATIN SMALL LETTER O WITH STROKE
0xfe: u"th", # LATIN SMALL LETTER THORN
}
def mapchar(self, key):
ch = self.get(key)
if ch is not None:
return ch
ch = unichr(key)
try:
ch = unichr(int(decomposition(ch).split()[0], 16))
except (IndexError, ValueError):
ch = self.CHAR_REPLACEMENT.get(key, ch)
# uncomment the following line if you want to remove remaining
# non-ascii characters
# if ch >= u"\x80": return None
self[key] = ch
return ch
if version >= "2.5":
__missing__ = mapchar
else:
__getitem__ = mapchar
def _set_perms(path, perm, groupName=None) :
if os.name == 'nt': return #jorgeorpinel: may render file unreadable on Windows ... check out cacls for cmd.exe
os.system('chmod %d "%s"' % (perm, path))
#if os.system("chmod %d %s" % (perm, path))>0 :
# print "Error executing chmod %d on %s" % (perm, path)
if groupName:
os.system('chgrp %s "%s"' % (groupName, path))
#if os.system("chgrp %s %s" % (groupName, path))>0 :
# print "Error changing group of %s to %s" % (path, groupName)
pass
def make_subdirs_and_move(src,tgt) :
make_subdirs(tgt)
move(src,tgt)
def make_subdirs_and_copy(src, tgt) :
make_subdirs(tgt)
copy(src, tgt)
_set_perms(tgt, 664)
def make_subdirs(tgt) :
"""Make target directory.
If necessary, also set permissions for any subdir we make!"""
parts = os.path.abspath(tgt).split(os.sep)
subdir = '' if os.name <> 'nt' else parts[0] #jorgeorpinel: part[0] is the drive letter in Windows
for part in parts[1:len(parts)-1] :
subdir = subdir + os.sep + part
if not os.path.isdir(subdir) :
print ' * creating dir', subdir
os.mkdir(subdir)
_set_perms(subdir, 775) #jorgeorpinel: no effect on Windows
def get_albumcover_filename(aid) :
return '%s/A%s_cover.jpg' % (get_tiddirectory(aid), str(aid))
def get_full_albumcovername(aid, gordonDir=config.DEF_GORDON_DIR) :
"""Returns the full album cover name.
If gordonDir is not provided, we use config.DEF_GORDON_DIR as the prefix.
"""
return os.path.join(gordonDir, 'data', 'covers', get_albumcover_filename(aid))
def get_full_audiofilename(tid,gordonDir=config.DE |
molly/brandeis | tests/testvalidator.py | Python | gpl-3.0 | 3,946 | 0.007856 | # -*- coding: utf-8 -*-
# Brandeis - A tool to convert plaintext court cases (from the lochner
# tool: http://gitorious.org/lochner/) to wikitext.
#
# Copyright (C) 2013 Molly White
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as | published by
# the Free So | ftware Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from validator import Validator
from bexceptions import *
import os, unittest
class TestFileValidation(unittest.TestCase):
'''Test functions that validate the input files.'''
def setUp(self):
pass
def tearDown(self):
self.buffer.close()
def testGoodTitlePlacement(self):
with open('buffer.txt', 'w', encoding='utf-8') as self.buffer:
self.buffer.write('\n\n\t\t\t\t<h1>Person v. Person - 100 U.S. 25 (2000)</h1>')
v = Validator('buffer.txt')
try:
v.validateTitlePlacement()
except:
self.fail('Validator did not pass a good title.')
def testPoorlyPlacedTitle(self):
with open('buffer.txt', 'w', encoding='utf-8') as self.buffer:
self.buffer.write('\n\n\t\t\t\t<div></div><h1>Person v. Person - 100 U.S. 25 (2000)</h1>')
v = Validator('buffer.txt')
with self.assertRaises(BadTitle, msg='Validator passed a title that was not at the '
'beginning of the file.'):
v.validateTitlePlacement()
def testNoTitle(self):
with open('buffer.txt', 'w', encoding='utf-8') as self.buffer:
self.buffer.write('\n\n\t\t\t')
v = Validator('buffer.txt')
with self.assertRaises(BadTitle, msg='Validator passed a file with no title.'):
v.validateTitlePlacement()
def testGoodTitleParts(self):
with open('buffer.txt', 'w', encoding='utf-8') as self.buffer:
self.buffer.write('\t\t\t\t<h1>Foo v. Bar - 100 U.S. 200 (2013)</h1><div>Extra stuff</div>')
v = Validator('buffer.txt')
try:
v.validateTitleParts()
except:
self.fail('Validator did not pass a title with good parts.')
def testIdentifyCaseGroup(self):
with open('buffer.txt', 'w', encoding='utf-8') as self.buffer:
self.buffer.write('\t\t\t<h1>Group of Cases - 100 U.S. 200 (2013)</h1>\t\t\t')
v = Validator('buffer.txt')
with self.assertRaises(GroupedCase, msg='Validator failed to identify a group of cases'
' as such.'):
v.validateTitleParts()
def testBadTitleDate(self):
with open('buffer.txt', 'w', encoding='utf-8') as self.buffer:
self.buffer.write('<h1>Foo v. Bar - 100 U.S. 200 (203)</h1>')
v = Validator('buffer.txt')
with self.assertRaises(BadTitle, msg='Validator passed a title containing an improperly'
'formatted date.'):
v.validateTitleParts()
def testBadTitleNumber(self):
with open('buffer.txt', 'w', encoding='utf-8') as self.buffer:
self.buffer.write('<h1>Foo v. Bar - U.S. 200 (2013)</h1>')
v = Validator('buffer.txt')
with self.assertRaises(BadTitle, msg='Validator passed a title containing an improperly'
'formatted case number.'):
v.validateTitleParts()
if __name__ == "__main__":
unittest.main()
try:
os.remove('buffer.txt')
except:
pass |
cloudbase/compute-hyperv | hyperv/tests/unit/test_migrationops.py | Python | apache-2.0 | 25,443 | 0 | # Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import mock
from nova import exception
from os_win import exceptions as os_win_exc
from oslo_utils import units
from hyperv.nova import constants
from hyperv.nova import migrationops
from hyperv.tests import fake_instance
from hyperv.tests.unit import test_base
class MigrationOpsTestCase(test_base.HyperVBaseTestCase):
"""Unit tests for the Hyper-V MigrationOps class."""
_FAKE_DISK = 'fake_disk'
_FAKE_TIMEOUT = 10
_FAKE_RETRY_INTERVAL = 5
def setUp(self):
super(MigrationOpsTestCase, self).setUp()
self.context = 'fake-context'
self._migrationops = migrationops.MigrationOps()
self._migrationops._hostutils = mock.MagicMock()
self._migrationops._vmops = mock.MagicMock()
self._migrationops._vmutils = mock.MagicMock()
self._migrationops._pathutils = mock.Mock()
self._migrationops._vhdutils = mock.MagicMock()
self._migrationops._pathutils = mock.MagicMock()
self._migrationops._volumeops = mock.MagicMock()
self._migrationops._imagecache = mock.MagicMock()
self._migrationops._block_dev_man = mock.MagicMock()
def _check_migrate_disk_files(self, shared_storage=False):
instance_path = 'fake/instance/path'
dest_instance_path = 'remote/instance/path'
self._migrationops._pathutils.get_instance_dir.side_effect = (
instance_path, dest_instance_path)
get_revert_dir = (
self._migrationops._pathutils.get_instance_migr_revert_dir)
check_shared_storage = (
self._migrationops._pathutils.check_dirs_shared_storage)
check_shared_storage.return_value = shared_storage
self._migrationops._pathutils.exists.return_value = True
fake_disk_files = [os.path.join(instance_path, disk_name)
for disk_name in
['root.vhd', 'configdrive.vhd', 'configdrive.iso',
'eph0.vhd', 'eph1.vhdx']]
expected_get_dir = [mock.call(mock.sentinel.instance_name),
mock.call(mock.sentinel.instance_name,
mock.sentinel.dest_path)]
expected_move_calls = [mock.call(instance_path,
get_revert_dir.return_value)]
self._migrationops._migrate_disk_files(
instance_name=mock.sentinel.instance_name,
disk_files=fake_disk_files,
dest=mock.sentinel.dest_path)
self._migrationops._pathutils.exists.assert_called_once_with(
dest_instance_path)
check_shared_storage.assert_called_once_with(
instance_path, dest_instance_path)
get_revert_dir.assert_called_with(mock.sentinel.instance_name,
remove_dir=True, create_dir=True)
if shared_storage:
fake_dest_path = '%s_tmp' % instance_path
expected_move_calls.append(mock.call(fake_dest_path,
instance_path))
self._migrationops._pathutils.rmtree.assert_called_once_with(
fake_dest_path)
else:
fake_dest_path = dest_instance_path
self._migrationops._pathutils.makedirs.assert_called_once_with(
fake_dest_path)
check_remove_dir = self._migrationops._pathutils.check_remove_dir
check_remove_dir.assert_called_once_with(fake_dest_path)
self._migrationops._pathutils.get_instance_dir.assert_has_calls(
expected_get_dir)
self._migrationops._pathutils.copy.assert_has_calls(
mock.call(fake_disk_file, fake_dest_path)
for fake_disk_file in fake_disk_files)
self.assertEqual(len(fake_disk_files),
self._migrationops._pathutils.copy.call_count)
self._migrationops._pathutils.move_folder_files.assert_has_calls(
expected_move_calls)
def test_migrate_disk_files(self):
self._check_migrate_disk_files()
def test_migrate_disk_files_same_host(self):
self._check_migrate_disk_files(shared_storage=True)
@mock.patch.object(migrationops.MigrationOps,
'_cleanup_failed_disk_migration')
def test_migrate_disk_files_exception(self, mock_ | cleanup):
instance_path = 'fake/instance/path'
fake_dest_path = '%s_tmp' % instance_path
self._migrationops._pathutils.get_instance_dir.return_value = (
instance_path)
get_revert_dir = (
self._migrationops._pathutils.get_instance_migr_revert_dir)
self._migrationops._hostutils.get_local_ips.return_value = [
mock.sentin | el.dest_path]
self._migrationops._pathutils.copy.side_effect = IOError(
"Expected exception.")
self.assertRaises(IOError, self._migrationops._migrate_disk_files,
instance_name=mock.sentinel.instance_name,
disk_files=[self._FAKE_DISK],
dest=mock.sentinel.dest_path)
mock_cleanup.assert_called_once_with(instance_path,
get_revert_dir.return_value,
fake_dest_path)
def test_cleanup_failed_disk_migration(self):
self._migrationops._pathutils.exists.return_value = True
self._migrationops._cleanup_failed_disk_migration(
instance_path=mock.sentinel.instance_path,
revert_path=mock.sentinel.revert_path,
dest_path=mock.sentinel.dest_path)
expected = [mock.call(mock.sentinel.dest_path),
mock.call(mock.sentinel.revert_path)]
self._migrationops._pathutils.exists.assert_has_calls(expected)
move_folder_files = self._migrationops._pathutils.move_folder_files
move_folder_files.assert_called_once_with(
mock.sentinel.revert_path, mock.sentinel.instance_path)
self._migrationops._pathutils.rmtree.assert_has_calls([
mock.call(mock.sentinel.dest_path),
mock.call(mock.sentinel.revert_path)])
def test_check_target_flavor(self):
mock_instance = fake_instance.fake_instance_obj(self.context)
mock_instance.flavor.root_gb = 1
mock_flavor = mock.MagicMock(root_gb=0)
self.assertRaises(exception.InstanceFaultRollback,
self._migrationops._check_target_flavor,
mock_instance, mock_flavor)
def test_check_and_attach_config_drive(self):
mock_instance = fake_instance.fake_instance_obj(
self.context, expected_attrs=['system_metadata'])
mock_instance.config_drive = 'True'
self._migrationops._check_and_attach_config_drive(
mock_instance, mock.sentinel.vm_gen)
self._migrationops._vmops.attach_config_drive.assert_called_once_with(
mock_instance,
self._migrationops._pathutils.lookup_configdrive_path.return_value,
mock.sentinel.vm_gen)
def test_check_and_attach_config_drive_unknown_path(self):
instance = fake_instance.fake_instance_obj(
self.context, expected_attrs=['system_metadata'])
instance.config_drive = 'True'
self._migrationops._pathutils.lookup_configdrive_path.return_value = (
None)
self.assertRaises(exception.ConfigDriveNotFound,
self._migrationops._check_and_attach_config_drive,
instance,
|
paplorinc/intellij-community | python/testData/inspections/PyTypeCheckerInspection/NamedTupleBaseClass.py | Python | apache-2.0 | 186 | 0.010753 | from collections im | port namedtuple
class C(namedtupl | e('C', ['foo', 'bar'])):
pass
def f(x):
return x.foo, x.bar
def g():
x = C(foo=0, bar=1)
return f(x)
print(g()) |
korbonits/kaggle-tau-to-three-muons | ensemble_model.py | Python | mit | 5,133 | 0.003117 | import numpy as np
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
import xgboost as xgb
from keras.models import Sequential
from keras.layers.core import Dense, Activation, Dropout
from keras.layers.advanced_activations import PReLU
from keras.models import Sequential
from keras.utils import np_utils
from hep_ml.losses import BinFlatnessLossFunction
from hep_ml.gradientboosting import UGradientBoostingClassifier
from sklearn.preprocessing import StandardScaler
trainingFilePath = 'training.csv'
testFilePath = 'test.csv'
def get_training_data():
filter_out = ['id', 'min_ANNmuon', 'production', 'mass', 'signal', 'SPDhits', 'IP', 'IPSig', 'isolationc']
f = open(trainingFilePath)
data = []
y = []
ids = []
for i, l in enumerate(f):
if i == 0:
labels = l.rstrip().split(',')
label_indices = dict((l, i) for i, l in enumerate(labels))
continue
values = l.rstrip().split(',')
filtered = []
for v, l in zip(values, labels):
if l not in filter_out:
filtered.append(float(v))
label = values[label_indices['signal']]
ID = values[0]
data.append(filtered)
y.append(float(label))
ids.append(ID)
return ids, np.array(data), np.array(y)
def get_test_data():
filter_out = ['id', 'min_ANNmuon', 'production', 'mass', 'signal', 'SPDhits', 'IP', 'IPSig', 'isolationc']
f = open(testFilePath)
data = []
ids = []
for i, l in enumerate(f):
if i == 0:
labels = l.rstrip().split(',')
continue
values = l.rstrip().split(',')
filtered = []
for v, l in zip(values, labels):
if l not in filter_out:
filtered.append(float(v))
ID = values[0]
data.append(filtered)
ids.append(ID)
return ids, np.array(data)
def preprocess_data(X, scaler=None):
if not scaler:
scaler = StandardScaler()
scaler.fit(X)
X = scaler.transform(X)
return X, scaler
# get training data
ids, X | , y = get_training_data()
print('Data shape:', X.shape)
# shuffle the data
np.random.s | eed(248)
np.random.shuffle(X)
np.random.seed(248)
np.random.shuffle(y)
print('Signal ratio:', np.sum(y) / y.shape[0])
# preprocess the data
X, scaler = preprocess_data(X)
y = np_utils.to_categorical(y)
# split into training / evaluation data
nb_train_sample = int(len(y) * 0.97)
X_train = X[:nb_train_sample]
X_eval = X[nb_train_sample:]
y_train = y[:nb_train_sample]
y_eval = y[nb_train_sample:]
print('Train on:', X_train.shape[0])
print('Eval on:', X_eval.shape[0])
# deep pyramidal MLP, narrowing with depth
model = Sequential()
model.add(Dropout(0.15))
model.add(Dense(X_train.shape[1],200))
model.add(PReLU((200,)))
model.add(Dropout(0.13))
model.add(Dense(200, 150))
model.add(PReLU((150,)))
model.add(Dropout(0.12))
model.add(Dense(150,100))
model.add(PReLU((100,)))
model.add(Dropout(0.11))
model.add(Dense(100, 50))
model.add(PReLU((50,)))
model.add(Dropout(0.09))
model.add(Dense(50, 30))
model.add(PReLU((30,)))
model.add(Dropout(0.07))
model.add(Dense(30, 25))
model.add(PReLU((25,)))
model.add(Dense(25, 2))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
# train model
model.fit(X_train, y_train, batch_size=128, nb_epoch=75, validation_data=(X_eval, y_eval), verbose=2, show_accuracy=True)
# generate submission
ids, X = get_test_data()
print('Data shape:', X.shape)
X, scaler = preprocess_data(X, scaler)
predskeras = model.predict(X, batch_size=256)[:, 1]
print("Load the training/test data using pandas")
train = pd.read_csv(trainingFilePath)
test = pd.read_csv(testFilePath)
print("Eliminate SPDhits, which makes the agreement check fail")
features = list(train.columns[1:-5])
print("Train a UGradientBoostingClassifier")
loss = BinFlatnessLossFunction(['mass'], n_bins=15, uniform_label=0)
clf = UGradientBoostingClassifier(loss=loss, n_estimators=50, subsample=0.1,
max_depth=6, min_samples_leaf=10,
learning_rate=0.1, train_features=features, random_state=11)
clf.fit(train[features + ['mass']], train['signal'])
fb_preds = clf.predict_proba(test[features])[:,1]
print("Train a Random Forest model")
rf = RandomForestClassifier(n_estimators=250, n_jobs=-1, criterion="entropy", random_state=1)
rf.fit(train[features], train["signal"])
print("Train a XGBoost model")
params = {"objective": "binary:logistic",
"eta": 0.2,
"max_depth": 10,
"min_child_weight": 1,
"silent": 1,
"colsample_bytree": 0.7,
"seed": 1}
num_trees=300
gbm = xgb.train(params, xgb.DMatrix(train[features], train["signal"]), num_trees)
print("Make predictions on the test set")
test_probs = (0.30*rf.predict_proba(test[features])[:,1]) + (0.30*gbm.predict(xgb.DMatrix(test[features])))+(0.30*predskeras) + (0.10*fb_preds)
submission = pd.DataFrame({"id": test["id"], "prediction": test_probs})
submission.to_csv("rf_xgboost_keras.csv", index=False)
|
JohannesV/Fall_Prevention_2013 | Data and scripts/plot_acc_vector.py | Python | apache-2.0 | 1,112 | 0.022482 | import matplotlib.pyplot as plt
import math
import peak_algorithm as pa
z_data = []
x_data = []
y_data = []
with open('a_z.txt') as file:
for line in file:
line = line.strip()
z_data.append(float(line))
w | ith open('a_x.txt') as file:
for line in file:
line = line.strip()
x_data.append(float(line))
with open('a_y.txt') as file:
for line in file:
line = line.strip()
y_data.append(float(line))
vector = []
for i in xrange(len(z_data)):
length = math.sqrt(x_data[i]**2 + z_data[i]**2 + y_data[i]**2)
vector.append(length)
#plt.plot(vector)
#s_vector_1 = pa.smooth_moving(vector, 1)
#plt.plot(s_vector_1)
#s_vector_2 = pa.smooth_moving(vector, 2)
#plt.plot(s_vector_2)
s_vector_3 = pa.smooth_movin | g(vector, 3)
plt.plot(s_vector_3)
#s_vector_4 = pa.smooth_moving(vector, 4)
#plt.plot(s_vector_4)
#s_vector_5 = pa.smooth_moving(vector, 5)
#plt.plot(s_vector_5)
#peaks = pa.find_peaks(time_series=s_vector_5)
#plt.plot(peaks)
#plt.hlines(th, 0, 4500)
# Print peaks
#for peak in peaks:
# plt.vlines(peak, min(vector), max(vector), color='r')
plt.title('Accelerometer Vector Length')
plt.show()
|
ericstalbot/movespy | movespy/ratetable.py | Python | bsd-2-clause | 8,746 | 0.01635 | # Copyright (c) 2013, Resource Systems Group, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# - Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
''' This module defines two functions:
- getRateTable
- getAverageSpeedRateTable
Use these functions to generate lookup tables for emissions rates.
'''
import moves
st_ids = (11,21,31,32,41,42,43,51,52,53,54,61,62)
def getRateTable(activity,
options,
operating_mode_ids = None,
source_type_ids = None):
#to do: add option to select pollutants (for faster runs)
'''Get an emissions rate lookup table.
Parameters
- activity: a dictionary structured exactly like the activity parameter
to the moves.Moves initializer, except that it does not have the `links` key.
- options: a dictionary structured like the options parameter to the moves.Moves
initializer, except that only the 'breakdown' key is allowed, and the only
value allowed in the list is 'process'
- operating_mode_ids: a sequence of operating mode IDs to be included in the lookup
table. Optional; if it is not included all operating modes
will be included. Limiting the number of operating modes will make
this function go faster
- source_type_ids: as above, but for source type IDs.
Return Value
Returns a list of dictionaries.
Each dictionary has keys for opmode, pollutant, source, and quantity;
and optionally for process.
All quantity values are for one vehicle hour, and are either in grams or kJ.
Example::
>>> activity = {'age_distr': {21: {5: 1.0},
... 11: {5: 1.0}},
... 'county': 50027,
... 'day_type': 5,
... 'hour': 16,
... 'month': 6,
... 'year': 2015}
>>> options = {}
>>> table = getRateTable(activity,
... options,
... operating_mode_ids = [0, 1],
... source_type_ids = [11, 21])
running MOVES ...
>>> print sorted(table)[0]
{'source': 21, 'opmode': 0, 'pollutant': 1, 'quantity': 0.147307}
'''
if operating_mode_ids is None:
operating_mode_ids = (0,1,11,12,13,14,15,16,21,22,23,24,25,
27,28,29,30,33,35,37,38,39,40,501)
if source_type_ids is None:
source_type_ids = st_ids
assert all([st in activity['age_distr'] for st in source_type_ids])
num_source_types = len(source_type_ids)
sourcetypeshare = 1.0 / len(source_type_ids)
activity['links'] = None
linkid = 5
link_lookup = {}
links = {}
for opmode in operating_mode_ids:
link_lookup[linkid] = opmode
link = {'grade': 0.,
'length': 15.,
'road_type': 5,
'speed': 30.,
'volume': 2. * num_source_types} #one vehicle hour for each source type
link['source_distr'] = dict.fromkeys(source_typ | e_ids, sour | cetypeshare)
link['opmode_distr'] = dict.fromkeys(source_type_ids, {opmode:1.0})
links[linkid] = link
linkid += 1
activity['links'] = links
options2 = {'detail':'opmode',
'breakdown':['source']}
if 'process' in options.get('breakdown',[]):
options2['breakdown'].append('process')
m = moves.Moves(activity, options2)
moves_out = m.run()
ratetable = list(moves_out)
#what could go wrong with the db table?
#in the perfect world, there would be one row for every
#combination opmode, pollutant, source, and process.
#in the real world, it could be that if a certain combination
#has a quantity of zero, the quanity could be None (null),
#or maybe there is no row at all
#so how to make it all agree?
#for now, just set nulls to 0, and ignore the
#possibility of missing rows
for row in ratetable:
linkid = row.pop('link')
opmode = link_lookup[linkid]
row['opmode'] = opmode
if row['quantity'] is None:
row['quantity'] = 0.0
return ratetable
def getAverageSpeedRateTable(activity,
roadtype_grade_speed,
source_type_ids = None):
'''Get an emissions rate lookup table.
Parameters
- activity: a dictionary structured exactly like the activity paramter
to the moves.Moves initializer, except that it does not have the `links` key.
- roadtype_grade_speed: a sequence of tuples. Each is tuple of road type id,
grade, and speed. Road type IDs can be found in the MOVES database. Grade
is given in percent, and speed in miles per hour.
- source_type_ids: a sequence of source type IDs to be included in the lookup
table. Optional; if it is not given all source types
will be included. Limiting the number of source types will make
this function go faster
Return Value
Returns a nested dict keyed by pollutant, source type, roadtype, grade, speed
All values are for one vehicle hour, and are either in grams or kJ.
Example::
activity = {'age_distr': dict.fromkeys((11,21,31,32,41,42,43,51,52,53,54,61,62),
{5: 1.0}),
'county': 50027,
'day_type': 5,
'hour': 16,
'month': 6,
'year': 2015}
table = getAverageSpeedRateTable(activity,
[(5, 1., 20.),(4, 1., 50.)])
'''
if source_type_ids is None:
source_type_ids = st_ids
assert all([st in activity['age_distr'] for st in source_type_ids])
sourcetypeshare = 1.0 / len(source_type_ids)
activity['links'] = None
linkid = 5
link_lookup = {}
links = {}
for roadtype, grade, speed in roadtype_grade_speed:
link_lookup[linkid] = dict(roadtype = roadtype,
grade = grade,
speed = speed)
link = {'grade': float(grade),
'length': float(speed) / 2.,
'road_type': roadtype,
'speed': float(speed), #was 30
'volume': 2.} #one vehicle hour
link['source_distr'] = dict.fromkeys(source_type_ids, sourcetypeshare)
links[linkid] = link
linkid += 1
activity['links'] = links
options = {'detail':'average',
'breakdown':['source']}
m = moves.Moves(activity, options)
moves_out = m.run()
ratetable = {}
for row in moves_out:
linkid = row['link']
roadtype = link_lookup[linkid]['roadtype']
grade = link_lookup[linkid]['grade']
speed = link_lookup[li |
enthought/graystruct | setup.py | Python | bsd-3-clause | 4,358 | 0 | # -*- coding: utf-8 -*-
# Copyright (c) 2015 Simon Jagoe and Enthought Ltd
# All rights reserved.
#
# This software may be modified and distributed under the terms
# of the 3-clause BSD license. See the LICENSE.txt file for details.
import os
import subprocess
from setuptools import setup
MAJOR = 0
MINOR = 1
MICRO = 2
IS_RELEASED = False
VERSION = '%d.%d.%d' % (MAJOR, MINOR, MICRO)
# Return the git revision as a string
def git_version():
def _minimal_ext_cmd(cmd):
# construct minimal environment
env = {}
for k in ['SYSTEMROOT', 'PATH']:
v = os.environ.get(k)
if v is not None:
env[k] = v
# LANGUAGE is used on win32
env['LANGUAGE'] = 'C'
env['LANG'] = 'C'
env['LC_ALL'] = 'C'
out = subprocess.Popen(
cmd, stdout=subprocess.PIPE, env=env,
).communicate()[0]
return out
def _fallback():
try:
out = _minimal_ext_cmd(['git', 'rev-list', '--count', 'HEAD'])
git_count = out.strip().decode('ascii')
except OSError:
git_count = '0'
return git_count
try:
out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD'])
git_revision = out.strip().decode('ascii')
except OSError:
| git_r | evision = "Unknown"
try:
out = _minimal_ext_cmd(['git', 'describe', '--long'])
out = out.strip().decode('ascii')
if len(out) == 0:
git_count = _fallback()
else:
last_tag, git_count, _ = out.split('-')
except OSError:
git_count = '0'
return git_revision, git_count
def write_version_py(filename='graystruct/_version.py'):
template = """\
# THIS FILE IS GENERATED FROM SETUP.PY
version = '{version}'
full_version = '{full_version}'
git_revision = '{git_revision}'
is_released = {is_released}
if not is_released:
version = full_version
"""
# Adding the git rev number needs to be done inside
# write_version_py(), otherwise the import of graystruct._version
# messes up the build under Python 3.
fullversion = VERSION
if os.path.exists('.git'):
git_rev, dev_num = git_version()
elif os.path.exists('graystruct/_version.py'):
# must be a source distribution, use existing version file
try:
from graystruct._version import git_revision as git_rev
from graystruct._version import full_version as full_v
except ImportError:
raise ImportError("Unable to import git_revision. Try removing "
"graystruct/_version.py and the build "
"directory before building.")
import re
match = re.match(r'.*?\.dev(?P<dev_num>\d+)\+.*', full_v)
if match is None:
dev_num = '0'
else:
dev_num = match.group('dev_num')
else:
git_rev = 'Unknown'
dev_num = '0'
if not IS_RELEASED:
fullversion += '.dev{0}'.format(dev_num)
with open(filename, "wt") as fp:
fp.write(template.format(version=VERSION,
full_version=fullversion,
git_revision=git_rev,
is_released=IS_RELEASED))
return fullversion
if __name__ == "__main__":
install_requires = [
'graypy',
'structlog',
]
__version__ = write_version_py()
with open('README.rst') as fh:
long_description = fh.read()
setup(
name='graystruct',
version=__version__,
url='https://github.com/enthought/graystruct',
author='Enthought Ltd',
author_email='info@enthought.com',
classifiers=[
'Development Status :: 1 - Planning',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
'Topic :: Software Development',
'Topic :: System :: Logging',
],
description=('Integration between structlog and graylog GELF, '
'provided by graypy'),
long_description=long_description,
license='BSD',
packages=['graystruct'],
install_requires=install_requires,
extras_require={'amqp': ['amqp==1.4.6']},
)
|
helfertool/helfertool | src/registration/feeds.py | Python | agpl-3.0 | 1,186 | 0.001686 | from django.conf import settings
from django.urls import reverse
from django_ical.views import ICalFeed
from registration.utils import get_or_404
class HelperFeed(ICalFeed):
timezone = settings.TIME_ZONE
product_id = "-//helfertool.org//Helfertool"
def get_object(self, request, event_url_name, helper_pk):
event, job, shift, helper = get_or_404(event_url_name, helper_pk=helper_pk)
self._event = event
self._helper = helper
self._registered_link = reverse('registered', args=[event.url_name, helper.pk])
return helper
def file_name(self, helper):
return "{}.ics".format(helper.event.name)
def items(self, helper):
return helper.shifts.all()
def item_title(self, shift):
return "{} - {}".format(self._event.name, shif | t.job.name)
def item_description(self, shift):
return ""
def item_link(self, shift) | :
return self._registered_link
def item_start_datetime(self, shift):
return shift.begin
def item_end_datetime(self, shift):
return shift.end
def item_guid(self, shift):
return "{}#{}".format(self.item_link(shift), shift.pk)
|
rmadapur/networking-brocade | networking_brocade/vdx/tests/unit/ml2/drivers/brocade/test_brocade_l3_plugin.py | Python | apache-2.0 | 2,112 | 0 | # Copyright (c) 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
import mock
from neutron.db import api as db
from neutron.i18n import _LI
from neutron.tests.unit.extensions import test_l3
from oslo_config import cfg
from oslo_context import context as oslo_context
from oslo_log import log as logging
from oslo_utils import importutils
LOG = logging.getLogger(__name__)
L3_SVC_PLUGIN = ('neutron.services.l3_router.'
'brocade.l3_router_plugin.BrocadeSVIPlugin')
class BrocadeSVIPlugin_TestCases(test_l3.TestL3NatBasePlugin):
def setUp(self):
def mocked_brocade_init(self):
LOG.debug("brocadeSVIPlugin::mocked_brocade_init()")
self._switch = {'address': cfg.CONF.ml2_brocade.address,
'username': cfg.CONF.ml2_brocade.username,
'password': cfg.CONF.ml2_brocade.password,
'rbridge_id': cfg.CONF.ml2_brocade.rbridge_id
}
LOG.info(_LI("rbridge id %s"), self._switch['rbridge_id'])
self._driver = mock.MagicMock()
self.l3_plugin = importutils.import_object(L3_SVC_PLUGIN)
with mock.patch.object(self.l3_plugin,
'brocade_init', new=mocked_brocade_init):
super(BrocadeSVIPlugin_TestCases, self).setUp()
self.context = oslo_context.get_admin_context()
self.context.session = db.get_session()
class TestBrocadeSVINatBase(test_l3.L3Na | tExtensionTestCase, |
BrocadeSVIPlugin_TestCases):
pass
|
rgtjf/Semantic-Texual-Similarity-Toolkits | stst/modules/features.py | Python | mit | 5,022 | 0.001799 | # coding: utf8
from __future__ import print_function
import json
import os
import pyprind
from stst import utils
from stst import config
class Feature(object):
def __init__(self,
load=True,
dimension=None,
**kwargs):
self.load = load
self.feature_name = self.__class__.__name__
# self.feature_file = config.FEARURE_DIR + '/' + self.feature_name + '.txt'
self.dimension = dimension
self.kwargs = kwargs
def extract_dataset_instances(self, train_instances, train_file):
"""
../resources/data/sts-en-en/.. -> ../features/sts-en-en/..
"""
# Need This to help global function
self.train_file = train_file
# Re-define self.feature_file to prevent self.feature_file change two times (i.e., train and test)
train_file_name = os.path.basename(train_file)
train_file_name = os.path.splitext(train_file_name)[0]
self.feature_file = config.FEATURE_DIR + '/' + train_file_name + '/' + self.feature_name + ' | .txt'
return self.load_instances(train_instances)
def load_instances(self, train_instances):
""" extract features from train_set """
if self.load is False or not os.path.isfile(self.feature_file): |
print(self.feature_file)
''' extract features to features '''
features, infos = self.extract_instances(train_instances)
''' write features to file '''
Feature.write_feature_to_file(self.feature_file, features, infos)
''' load features from file '''
features, n_dim, n_instance = Feature.load_feature_from_file(self.feature_file)
return features, n_dim, n_instance
def extract_instances(self, train_instances):
""" extract features to features """
# first extract information from train_instance
# for only be used to extract data_set information and can reuse the pyprind
self.extract_information(train_instances)
features = []
infos = []
process_bar = pyprind.ProgPercent(len(train_instances))
for train_instance in train_instances:
process_bar.update()
feature, info = self.extract(train_instance) ##可变参数进行传递!
features.append(feature)
infos.append(info)
return features, infos
def extract_information(self, train_instances):
""" extract information from train_instances """
pass
def extract(self, train_instance):
""" extract feature from a instance """
pass
@staticmethod
def write_feature_to_file(feature_file, features, infos):
"""
write features string to file
"""
dim = len(features[0])
f_feature = utils.create_write_file(feature_file)
''' write features infomation to file '''
print(len(features), dim, file=f_feature)
''' write features string to file '''
for feature, info in zip(features, infos):
''' type(feature) is list '''
feature_string = Feature._feat_list_to_string(feature)
info_string = Feature._info_list_to_string(info)
print(feature_string + '\t#\t' + info_string, file=f_feature)
f_feature.close()
@staticmethod
def load_feature_from_file(feature_file):
"""
load features from file
"""
f_feature = utils.create_read_file(feature_file)
feature_information = f_feature.readline()
n_instance, n_dim = feature_information.strip().split()
n_instance, n_dim = int(n_instance), int(n_dim)
features = []
for feature in f_feature:
feature_string, instance_string = feature.split("\t#\t")
features.append(feature_string)
return features, n_dim, n_instance
@staticmethod
def _feat_list_to_string(feat_list):
"""
[0, 1, 0, 1] => 2:1 4:1
"""
feat_dict = {}
for index, item in enumerate(feat_list):
if item != 0:
feat_dict[index + 1] = item
transformed_list = [str(key) + ":" + str(feat_dict[key]) for key in sorted(feat_dict.keys())]
feat_string = " ".join(transformed_list)
return feat_string
@staticmethod
def _feat_string_to_list(feat_string, ndim):
feat_list = [0] * ndim
for feat in feat_string.split():
index, value = feat.split(':')
index = int(index) - 1
value = eval(value)
feat_list[index] = value
return feat_list
@staticmethod
def _info_list_to_string(info_list):
info_string = json.dumps(info_list)
return info_string
class CustomFeature(Feature):
def extract(self, train_instance):
"""
Extract features and info from one train instance
"""
features = 'features'
infos = 'infos'
return features, infos |
geminy/aidear | oss/qt/qt-everywhere-opensource-src-5.9.0/qtwebengine/src/3rdparty/chromium/media/audio/PRESUBMIT.py | Python | gpl-3.0 | 1,539 | 0.008447 | # Copyright (c) 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Top-level presubmit script for media/audi | o/.
See http://dev.chromium.org/developers/how-tos/depottools/presub | mit-scripts
for more details about the presubmit API built into depot_tools.
"""
import re
def PostUploadHook(cl, change, output_api):
"""git cl upload will call this hook after the issue is created/modified.
This hook modifies the CL description in order to run extra GPU
tests (in particular, the WebGL 2.0 conformance tests) in addition
to the regular CQ try bots. This test suite is too large to run
against all Chromium commits, but should be run against changes
likely to affect these tests.
"""
rietveld_obj = cl.RpcServer()
issue = cl.issue
description = rietveld_obj.get_description(issue)
if re.search(r'^CQ_INCLUDE_TRYBOTS=.*', description, re.M | re.I):
return []
bots = [
'master.tryserver.chromium.linux:linux_optional_gpu_tests_rel',
'master.tryserver.chromium.mac:mac_optional_gpu_tests_rel',
'master.tryserver.chromium.win:win_optional_gpu_tests_rel',
]
results = []
new_description = description
new_description += '\nCQ_INCLUDE_TRYBOTS=%s' % ';'.join(bots)
results.append(output_api.PresubmitNotifyResult(
'Automatically added optional GPU tests to run on CQ.'))
if new_description != description:
rietveld_obj.update_description(issue, new_description)
return results
|
jdahlin/pygobject | tests/test_signal.py | Python | lgpl-2.1 | 12,201 | 0.001229 | # -*- Mode: Python -*-
import gc
import unittest
import sys
from gi.repository import GObject
try:
import testhelper
except ImportError:
testhelper = None
from compathelper import _long
class C(GObject.GObject):
__gsignals__ = {'my_signal': (GObject.SignalFlags.RUN_FIRST, None,
(GObject.TYPE_INT,))}
def do_my_signal(self, arg):
self.arg = arg
class D(C):
def do_my_signal(self, arg2):
self.arg2 = arg2
C.do_my_signal(self, arg2)
class TestSignalCreation(unittest.TestCase):
# Bug 540376.
def test_illegals(self):
self.assertRaises(TypeError, lambda: GObject.signal_new('test',
None,
0,
None,
(GObject.TYPE_LONG,)))
class TestChaining(unittest.TestCase):
def setUp(self):
self.inst = C()
self.inst.connect("my_signal", self.my_signal_handler_cb, 1, 2, 3)
def my_signal_handler_cb(self, *args):
assert len(args) == 5
assert isinstance(args[0], C)
assert args[0] == self.inst
assert isinstance(args[1], int)
assert args[1] == 42
assert args[2:] == (1, 2, 3)
def testChaining(self):
self.inst.emit("my_signal", 42)
assert self.inst.arg == 42
def testChaining2(self):
inst2 = D()
inst2.emit("my_signal", 44)
assert inst2.arg == 44
assert inst2.arg2 == 44
# This is for bug 153718
class TestGSignalsError(unittest.TestCase):
def testInvalidType(self, *args):
def foo():
class Foo(GObject.GObject):
__gsignals__ = None
self.assertRaises(TypeError, foo)
gc.collect()
def testInvalidName(self, *args):
def foo():
class Foo(GObject.GObject):
__gsignals__ = {'not-exists': 'override'}
self.assertRaises(TypeError, foo)
gc.collect()
class TestGPropertyError(unittest.TestCase):
def testInvalidType(self, *args):
def foo():
class Foo(GObject.GObject):
__gproperties__ = None
self.assertRaises(TypeError, foo)
gc.collect()
def testInvalidName(self, *args):
def foo():
class Foo(GObject.GObject):
__gproperties__ = {None: None}
self.assertRaises(TypeError, foo)
gc.collect()
class TestList(unittest.TestCase):
def testListObject(self):
self.assertEqual(GObject.signal_list_names(C), ('my-signal',))
def my_accumulator(ihint, return_accu, handler_return, user_data):
"""An accumulator that stops emission when the sum of handler
returned values reaches 3"""
assert user_data == "accum data"
if return_accu >= 3:
return False, return_accu
return True, return_accu + handler_return
class Foo(GObject.GObject):
__gsignals__ = {
'my-acc-signal': (GObject.SignalFlags.RUN_LAST, GObject.TYPE_INT,
(), my_accumulator, "accum data"),
'my-other-acc-signal': (GObject.SignalFlags.RUN_LAST, GObject.TYPE_BOOLEAN,
(), GObject.signal_accumulator_true_handled)
}
class TestAccumulator(unittest.TestCase):
def testAccumulator(self):
inst = Foo()
inst.connect("my-acc-signal", lambda obj: 1)
inst.connect("my-acc-signal", lambda obj: 2)
## the value returned in the following handler will not be
## considered, because at this point the accumulator already
## reached its limit.
inst.connect("my-acc-signal", lambda obj: 3)
retval = inst.emit("my-acc-signal")
self.assertEqual(retval, 3)
def testAccumulatorTrueHandled(self):
inst = Foo()
inst.connect("my-other-acc-signal", self._true_handler1)
inst.connect("my-other-acc-signal", self._true_handler2)
## the following handler will not be called because handler2
## returns True, so it should stop the emission.
inst.connect("my-other-acc-signal", self._true_handler3)
self.__true_val = None
inst.emit("my-other-acc-signal")
self.assertEqual(self.__true_val, 2)
def _true_handler1(self, obj):
self.__true_val = 1
return False
def _true_handler2(self, obj):
self.__true_val = 2
return True
def _true_handler3(self, obj):
self.__true_val = 3
return False
class E(GObject.GObject):
__gsignals__ = {'signal': (GObject.SignalFlags.RUN_FIRST, None,
())}
def __init__(self):
GObject.GObject.__init__(self)
self.status = 0
def do_signal(self):
assert self.status == 0
self.status = 1
class F(GObject.GObject):
__gsignals__ = {'signal': (GObject.SignalFlags.RUN_FIRST, None,
())}
def __init__(self):
GObject.GObject.__init__(self)
self.status = 0
def do_signal(self):
self.status += 1
class TestEmissionHook(unittest.TestCase):
def testAdd(self):
self.hook = True
e = E()
e.connect('signal', self._callback)
GObject.add_emission_hook(E, "signal", self._emission_hook)
e.emit('signal')
self.assertEqual(e.status, 3)
def testRemove(self):
self.hook = False
e = E()
e.connect('signal', self._callback)
hook_id = GObject.add_emission_hook(E, "signal", self._emission_hook)
GObject.remove_emission_hook(E, "signal", hook_id)
e.emit('signal')
self.assertEqual(e.status, 3)
def _emission_hook(self, e):
self.assertEqual(e.status, 1)
| e.status = 2
def _callback(self, e):
if self.hook:
self.assertEqual(e.status, 2)
else:
self.assertEqual(e.status, 1)
e.status = 3
def testCallbackReturnFalse(self):
self.hook = False
obj = F()
def _emission_hook(obj):
obj.status += 1
return False
GObject.add_emission_hook(obj, "signal", _emission_hook)
obj.emit('signal')
obj.emit('signal')
| self.assertEqual(obj.status, 3)
def testCallbackReturnTrue(self):
self.hook = False
obj = F()
def _emission_hook(obj):
obj.status += 1
return True
hook_id = GObject.add_emission_hook(obj, "signal", _emission_hook)
obj.emit('signal')
obj.emit('signal')
GObject.remove_emission_hook(obj, "signal", hook_id)
self.assertEqual(obj.status, 4)
def testCallbackReturnTrueButRemove(self):
self.hook = False
obj = F()
def _emission_hook(obj):
obj.status += 1
return True
hook_id = GObject.add_emission_hook(obj, "signal", _emission_hook)
obj.emit('signal')
GObject.remove_emission_hook(obj, "signal", hook_id)
obj.emit('signal')
self.assertEqual(obj.status, 3)
class TestClosures(unittest.TestCase):
def setUp(self):
self.count = 0
def _callback(self, e):
self.count += 1
def testDisconnect(self):
e = E()
e.connect('signal', self._callback)
e.disconnect_by_func(self._callback)
e.emit('signal')
self.assertEqual(self.count, 0)
def testHandlerBlock(self):
e = E()
e.connect('signal', self._callback)
e.handler_block_by_func(self._callback)
e.emit('signal')
self.assertEqual(self.count, 0)
def testHandlerUnBlock(self):
e = E()
signal_id = e.connect('signal', self._callback)
e.handler_block(signal_id)
e.handler_unblock_by_func(self._callback)
e.emit('signal')
self.assertEqual(self.count, 1)
def testHandlerBlockMethod(self):
# Filed as #375589
class A:
def __init__(self):
self.a = 0
|
waipu/bakawipe | lib/sup/special.py | Python | gpl-3.0 | 2,073 | 0.00193 | from io import BytesIO
try:
import simplejson as json
except ImportError:
import json
try:
import msgpack
has_msgpack = True
except ImportError:
has_msgpack = False
class SpecialBase(object):
def __init__(self, *args, **kvargs):
self.encoding = 'utf-8'
def json(self, *args, **kvargs) -> object:
"Try to parse json from self and return it"
return json.loads(self, *args, **kvargs)
class SpecialBytes(SpecialBase, bytes):
def decode(self, encoding=None, errors='strict') -> str:
"bytes.decode with per-object default encoding"
return bytes.decode(
self, encoding if encoding else self.encoding,
errors)
def s_decode(self, encoding=None, errors='strict') -> SpecialBase:
"Same as decode, but returns SpecialString"
e = encoding if encoding else self.encoding
s = SpecialString(bytes.decode(self, e, errors))
s.encoding = e
return s
def msgpack(self, *args, **kvargs) -> object:
if not has_msgpack:
raise NotImplementedError('No msgpack library here')
return msgpack.unpackb(self)
def json(self, *args, **kvargs) -> object:
return self.s_decode().json(*args, **kvargs)
class SpecialString(SpecialBase, str):
def encode(self, encoding=None, errors='strict') -> bytes:
"str.encode with per-object default encoding"
return str.encode(
self, encoding if encoding else self.encoding,
errors)
def s_encode(self, encoding=None, errors='strict') -> SpecialBase:
"Same as encode, but returns SpecialBytes"
e = | encoding if encoding else self.encoding
b = SpecialBytes(str.encode(self, e, errors))
b.encoding = e
return b
class SpecialBytesIO(BytesIO):
def __init__(self, *args, **kvargs):
self.encoding = 'utf-8'
super().__init__(*args, **kvargs)
def s_getvalue(self) -> SpecialBytes: |
s = SpecialBytes(self.getbuffer())
s.encoding = self.encoding
return s
|
vuolter/pyload | src/pyload/plugins/accounts/RapidfileshareNet.py | Python | agpl-3.0 | 532 | 0.00188 | # -*- coding: utf-8 -*-
from ..base.xfs_account import XFSAccount
clas | s RapidfileshareNet(XFSAccount):
__name__ = "RapidfileshareNet"
__type__ = "account"
__version__ = "0.11"
__statu | s__ = "testing"
__description__ = """Rapidfileshare.net account plugin"""
__license__ = "GPLv3"
__authors__ = [("guidobelix", "guidobelix@hotmail.it")]
PLUGIN_DOMAIN = "rapidfileshare.net"
TRAFFIC_LEFT_PATTERN = r'>Traffic available today:</TD><TD><label for="name">\s*(?P<S>[\d.,]+)\s*(?:(?P<U>[\w^_]+))?'
|
bjodah/chempy | chempy/equilibria.py | Python | bsd-2-clause | 16,931 | 0.000886 | # -*- coding: utf-8 -*-
"""
Module collecting classes and functions for dealing with (multiphase) chemical
equilibria.
.. Note::
This module is provisional at the moment, i.e. the API is not stable and may
break without a deprecation cycle.
"""
import warnings
import numpy as np
from .chemistry import equilibrium_quotient, Equilibrium, Species
from .reactionsystem import ReactionSystem
from ._util import get_backend
from .util.pyutil import deprecated
from ._eqsys import EqCalcResult, NumSysLin, NumSysLog, NumSysSquare as _NumSysSquare
NumSysSquare = deprecated()(_NumSysSquare)
class EqSystem(ReactionSystem):
_BaseReaction = Equilibrium
_BaseSubstance = Species
def html(self, *args, **kwargs):
k = "color_categories"
kwargs[k] = kwargs.get(k, False)
return super(EqSystem, self).html(*args, **kwargs)
def eq_constants(self, non_precip_rids=(), eq_params=None, small=0):
if eq_params is None:
eq_params = [eq.param for eq in self.rxns]
return [
small if idx in non_precip_rids else eq for idx, eq in enumerate(eq_params)
]
def equilibrium_quotients(self, concs):
stoichs = self.stoichs()
return [equilibrium_quotient(concs, stoichs[ri, :]) for ri in range(self.nr)]
def stoichs_constants(
self, eq_params=None, rref=False, Matrix=None, backend=None, non_precip_rids=()
):
if eq_params is None:
eq_params = self.eq_constants()
if rref:
from pyneqsys.symbolic import linear_rref
be = get_backend(backend)
rA, rb = linear_rref(
self.stoichs(non_precip_rids), list(map(be.log, eq_params)), Matrix
)
return rA.tolist(), list(map(be.exp, rb))
else:
return (self.stoichs(non_precip_rids), eq_params)
def composition_conservation(self, concs, init_concs):
composition_vecs, comp_keys = self.composition_balance_vectors()
A = np.array(composition_vecs)
return (
comp_keys,
np.dot(A, self.as_per_substance_array(concs).T),
np.dot(A, self.as_per_substance_array(init_concs).T),
)
def other_phase_species_idxs(self, phase_idx=0):
return [
idx
for idx, s in enumerate(self.substances.values())
if s.phase_idx != phase_idx
]
@property
@deprecated(
last_supported_version="0.3.1",
will_be_missing_in="0.8.0",
use_instead=other_phase_species_idxs,
)
def precipitate_substance_idxs(self):
return [idx for idx, s in enumerate(self.substances.values()) if s.precipitate]
def phase_transfer_reaction_idxs(self, phase_idx=0):
return [
idx
for idx, rxn in enumerate(self.rxns)
if rxn.has_precipitates(self.substances)
]
@property
@deprecated(
last_supported_version="0.3.1",
will_be_missing_in="0.8.0",
use_instead=phase_transfer_reaction_idxs,
)
def precipitate_rxn_idxs(self):
return [
idx
for idx, rxn in enumerate(self.rxns)
if rxn.has_precipitates(self.substances)
]
def dissolved(self, concs):
"""Return dissolved concentrations"""
n | ew_concs = concs.copy()
for r in self.rxns:
if r.has_precipitates(self.substances):
net_stoich = np.asarray(r.net_stoich(self.substances))
s_net, s_sto | ich, s_idx = r.precipitate_stoich(self.substances)
new_concs -= new_concs[s_idx] / s_stoich * net_stoich
return new_concs
def _fw_cond_factory(self, ri, rtol=1e-14):
rxn = self.rxns[ri]
def fw_cond(x, p):
precip_stoich_coeff, precip_idx = rxn.precipitate_stoich(self.substances)[
1:3
]
q = rxn.Q(self.substances, self.dissolved(x))
k = rxn.equilibrium_constant()
if precip_stoich_coeff > 0:
return q * (1 + rtol) < k
elif precip_stoich_coeff < 0:
return q > k * (1 + rtol)
else:
raise NotImplementedError
return fw_cond
def _bw_cond_factory(self, ri, small):
rxn = self.rxns[ri]
def bw_cond(x, p):
precipitate_idx = rxn.precipitate_stoich(self.substances)[2]
if x[precipitate_idx] < small:
return False
else:
return True
return bw_cond
def _SymbolicSys_from_NumSys(
self, NS, conds, rref_equil, rref_preserv, new_eq_params=True
):
from pyneqsys.symbolic import SymbolicSys
import sympy as sp
ns = NS(
self,
backend=sp,
rref_equil=rref_equil,
rref_preserv=rref_preserv,
precipitates=conds,
new_eq_params=new_eq_params,
)
symb_kw = {}
if ns.pre_processor is not None:
symb_kw["pre_processors"] = [ns.pre_processor]
if ns.post_processor is not None:
symb_kw["post_processors"] = [ns.post_processor]
if ns.internal_x0_cb is not None:
symb_kw["internal_x0_cb"] = ns.internal_x0_cb
return SymbolicSys.from_callback(
ns.f,
self.ns,
nparams=self.ns + (self.nr if new_eq_params else 0),
**symb_kw
)
def get_neqsys_conditional_chained(
self, rref_equil=False, rref_preserv=False, NumSys=NumSysLin, **kwargs
):
from pyneqsys import ConditionalNeqSys, ChainedNeqSys
def factory(conds):
return ChainedNeqSys(
[
self._SymbolicSys_from_NumSys(
NS, conds, rref_equil, rref_preserv, **kwargs
)
for NS in NumSys
]
)
cond_cbs = [
(self._fw_cond_factory(ri), self._bw_cond_factory(ri, NumSys[0].small))
for ri in self.phase_transfer_reaction_idxs()
]
return ConditionalNeqSys(cond_cbs, factory)
def get_neqsys_chained_conditional(
self, rref_equil=False, rref_preserv=False, NumSys=NumSysLin, **kwargs
):
from pyneqsys import ConditionalNeqSys, ChainedNeqSys
def mk_factory(NS):
def factory(conds):
return self._SymbolicSys_from_NumSys(
NS, conds, rref_equil, rref_preserv, **kwargs
)
return factory
return ChainedNeqSys(
[
ConditionalNeqSys(
[
(self._fw_cond_factory(ri), self._bw_cond_factory(ri, NS.small))
for ri in self.phase_transfer_reaction_idxs()
],
mk_factory(NS),
)
for NS in NumSys
]
)
def get_neqsys_static_conditions(
self,
rref_equil=False,
rref_preserv=False,
NumSys=(NumSysLin,),
precipitates=None,
**kwargs
):
if precipitates is None:
precipitates = (False,) * len(self.phase_transfer_reaction_idxs())
from pyneqsys import ChainedNeqSys
return ChainedNeqSys(
[
self._SymbolicSys_from_NumSys(
NS, precipitates, rref_equil, rref_preserv, **kwargs
)
for NS in NumSys
]
)
def get_neqsys(self, neqsys_type, NumSys=NumSysLin, **kwargs):
new_kw = {"rref_equil": False, "rref_preserv": False}
if neqsys_type == "static_conditions":
new_kw["precipitates"] = None
for k in new_kw:
if k in kwargs:
new_kw[k] = kwargs.pop(k)
try:
NumSys[0]
except TypeError:
new_kw["NumSys"] = (NumSys,)
else:
new_kw["NumSys"] = NumSys
return getattr(self, "get_neqsys_" + neqsys_type)(**new_kw)
def non_precip_rids(self, precipitates):
|
frucker/CryoTUM | sub_vi-s/Equipment/PPMS/PPMSLabView/Socket_Server/qdinstrument.py | Python | gpl-3.0 | 2,308 | 0.001733 | import win32com.client
import pythoncom
class QDInstrument:
def __init__(self, instrument_type):
instrument_type = instrument_type.upper()
if instrument_type == 'DYNACOOL':
self._class_id = 'QD.MULTIVU.DYNACOOL.1'
elif instrument_type == 'PPMS':
self._class_id = 'QD.MULTIVU.PPMS.1'
elif instrument_type == 'VERSALAB':
self._class_id = 'QD.MULTIVU.VERSALAB.1'
elif instrument_type == 'MPMS3':
self._class_id = 'QD.MULTIVU.MPMS3.1'
else:
raise Exception('Unrecognized instrument type: {0}.'.format(instrument_type))
self._mvu = win32com.client.Dispatch(self._class_id)
def set_temperature(self, temperature, rate, mode):
"""Sets temperature and returns MultiVu error code"""
err = self._mvu.SetTemperature(temperature, rate, mode)
return err
def get_temperature(self):
"""Gets and returns temperature info as (MultiVu error, temperature, status)"""
arg0 = win32com.client.VARIANT(pythoncom.VT_BYREF | pythoncom.VT_R8, 0.0)
arg1 = win32com.client.VARIANT(pythoncom.VT_BYREF | pythoncom.VT_I4, 0)
err = self._mvu.GetTemperature(arg0, arg1)
# win32com reverses the arguments, so:
return err, arg1.value, arg0.value
def set_field(self, field, rate, approach, mode):
"""Sets field and returns MultiVu error code"""
err = self._mvu.SetField(field, rate, approach, mode)
return err
def get_field(self):
"""Gets and returns field info as (MultiVu error, field, status)"""
arg0 = win32com.client.VARIANT(pythoncom.VT_BYREF | pythoncom.VT_R8, 0.0)
arg | 1 = win32com.client.VARIANT(pythoncom.VT_BYREF | pythoncom.VT_I4, 0)
err = self._mvu.GetField(arg0, arg1)
# win32com reverses the arguments, so:
return err, arg1.value, arg0.value
def set_chamber(self, code):
"""Sets chamber and returns MultiVu error code"""
err = self._mvu.SetChamber(code)
return err
def get_chamber(self):
"""Gets chamber st | atus and returns (MultiVu error, status)"""
arg0 = win32com.client.VARIANT(pythoncom.VT_BYREF | pythoncom.VT_I4, 0)
err = self._mvu.GetChamber(arg0)
return err, arg0.value
|
ScienceMob/vmicroc | data/views.py | Python | gpl-2.0 | 1,284 | 0.000779 | from django import forms
from django.contrib import messages
from django.contrib.admin.views.decorators import staff_member_required
from django.shortcuts import render
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from data.models import ImportTask
class ImportTaskForm(forms.ModelForm):
class Meta:
model = ImportTask
fields = ('data_file',)
@staff_member_required
def upload(request):
if request.method == 'POST':
import_task_form = ImportTaskForm(data=request.POST, files=request.FILES)
if i | mport_task_form.is_valid():
import_task = import_task_form.save()
import_task.enqueue()
| messages.info(request, 'Data file queued for processing')
return HttpResponseRedirect(reverse('data_upload'))
else:
import_task_form = ImportTaskForm()
return render(request, 'data/upload.html', {
'import_task_form': import_task_form,
})
@staff_member_required
def enqueue(request, import_task_id):
import_task = ImportTask.objects.get(pk=import_task_id)
import_task.enqueue()
messages.info(request, 'Data file queued for processing')
return HttpResponseRedirect(reverse('admin:data_importtask_changelist'))
|
paulboal/pexpect-curses | demo/demo3.py | Python | mit | 1,476 | 0.023035 | #!/usr/bin/python
import pexpect
import sys
import logging
import vt102
import os
import time
def termcheck(child, timeout=0):
time.sleep(0.05)
try:
logging.debug("Waiting for EOF or timeout=%d"%timeout)
child.expect(pexpect.EOF, timeout=timeout)
except pexpect.exceptions.TIMEOUT:
logging.debug("Hit timeout and have %d characters in child.before"%len(child.before))
return child.before
def termkey(child, stream, screen, key, timeout=0):
logging.debug("Sending '%s' to child"%key)
child.send(key)
s = termcheck(child)
logging.debug("Sending child.before text to vt102 stream")
stream.process(child.before)
logging.debug("vt102 screen dump")
logging.debug(screen)
# START LOGGING
logging.basicConfig(filename='menu_demo.log',level=logging.DEBUG)
# SETUP VT102 EMULATOR
#rows, columns = os.popen('stty size', 'r').read().split()
rows, columns = (50,120)
stream=vt102.stream()
screen=vt102.screen((int(rows), int(columns)))
screen.attach(stream)
logging.debug("Setup vt102 with %d %d"%(int(rows),int(columns)))
logging.debug("Starting demo2.py child process...")
child = pexpect.spawn('./demo2.py', maxread=65536, dimensions=(int(rows),int(columns))) |
s = termcheck(child)
logging.debug("Sending child.before (len=%d) text to vt102 stream"%len(child.before))
stream.process(child.before)
logging.debug("vt102 screen dump")
logging.debug(screen)
termkey(child, stre | am, screen, "a")
termkey(child, stream, screen, "1")
logging.debug("Quiting...")
|
isomer/faucet | faucet/valve_of.py | Python | apache-2.0 | 11,359 | 0.000264 | """Utility functions to parse/create OpenFlow messages."""
# Copyright (C) 2013 Nippon Telegraph and Telephone Corporation.
# Copyright (C) 2015 Brad Cowie, Christopher Lorier and Joe Stringer.
# Copyright (C) 2015 Research and Education Advanced Network New Zealand Ltd.
# Copyright (C) 2015--2017 The Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASISo
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import namedtuple
import ipaddress
from ryu.lib import ofctl_v1_3 as ofctl
from ryu.ofproto import ether
from ryu.ofproto import ofproto_v1_3 as ofp
from ryu.ofproto import ofproto_v1_3_parser as parser
VLAN_GROUP_OFFSET = 4096
ROUTE_GROUP_OFFSET = VLAN_GROUP_OFFSET * 2
OFP_VERSIONS = [ofp.OFP_VERSION]
def ignore_port(port_num):
"""Return True if FAUCET should ignore this port.
Args:
port_num (int): switch port.
Returns:
bool: True if FAUCET should ignore this port.
"""
# 0xF0000000 and up are not physical ports.
return port_num > 0xF0000000
def is_flowmod(ofmsg):
"""Return True if flow message is a FlowMod.
Args:
ofmsg: ryu.ofproto.ofproto_v1_3_parser message.
Returns:
bool: True if is a FlowMod
"""
return isinstance(ofmsg, parser.OFPFlowMod)
def is_groupmod(ofmsg):
"""Return True if OF message is a GroupMod.
Args:
ofmsg: ryu.ofproto.ofproto_v1_3_parser message.
Returns:
bool: True if is a GroupMod
"""
return isinstance(ofmsg, parser.OFPGroupMod)
def is_flowdel(ofmsg):
"""Return True if flow message is a FlowMod and a delete.
Args:
ofmsg: ryu.ofproto.ofproto_v1_3_parser message.
Returns:
bool: True if is a FlowMod delete/strict.
"""
if (is_flowmod(ofmsg) and
(ofmsg.command == ofp.OFPFC_DELETE or
ofmsg.command == ofp.OFPFC_DELETE_STRICT)):
return True
return False
def is_groupdel(ofmsg):
"""Return True if OF message is a GroupMod and command is delete.
Args:
ofmsg: ryu.ofproto.ofproto_v1_3_parser message.
Returns:
bool: True if is a GroupMod delete
"""
if (is_groupmod(ofmsg) and
(ofmsg.command == ofp.OFPGC_DELETE)):
return True
return False
def is_groupadd(ofmsg):
"""Return True if OF message is a GroupMod and command is add.
Args:
ofmsg: ryu.ofproto.ofproto_v1_3_parser message.
Returns:
bool: True if is a GroupMod add
"""
if (is_groupmod(ofmsg) and
(ofmsg.command == ofp.OFPGC_ADD)):
return True
return False
def apply_actions(actions):
"""Return instruction that applies action list.
Args:
actions (list): list of OpenFlow actions.
Returns:
ryu.ofproto.ofproto_v1_3_parser.OFPInstruction: instruction of actions.
"""
return parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions)
def goto_table(table_id):
"""Return instruction to goto table.
Args:
table_id (int): table to goto.
Returns:
ryu.ofproto.ofproto_v1_3_parser.OFPInstruction: goto instruction.
"""
return parser.OFPInstructionGotoTable(table_id)
def set_eth_src(eth_src):
"""Return action to set source Ethernet MAC address.
Args:
eth_src (str): source Ethernet MAC address.
Returns:
ryu.ofproto.ofproto_v1_3_parser.OFPActionSetField: set field action.
"""
return parser.OFPActionSetField(eth_src=eth_src)
def set_eth_dst(eth_dst):
"""Return action to set destination Ethernet MAC address.
Args:
eth_src (str): destination Ethernet MAC address.
Returns:
ryu.ofproto.ofproto_v1_3_parser.OFPActionSetField: set field action.
"""
return parser.OFPActionSetField(eth_dst=eth_dst)
def vid_present(vid):
"""Return VLAN VID with VID_PRESENT flag set.
Args:
vid (int): VLAN VID
Returns:
int: VLAN VID with VID_PRESENT.
"""
return vid | ofp.OFPVID_PRESENT
def set_vlan_vid(vlan_vid):
"""Set VLAN VID with VID_PRESENT flag set.
Args:
vid (int): VLAN VID
Returns:
ryu.ofproto.ofproto_v1_3_parser.OFPActionSetField: set VID with VID_PRESENT.
"""
return parser.OFPActionSetField(vlan_vid=vid_present(vlan_vid))
def push_vlan_act(vlan_vid, eth_type=ether.ETH_TYPE_8021Q):
"""Return OpenFlow action list to push Ethernet 802.1Q header with VLAN VID.
Args:
vid (int): VLAN VID
Returns:
list: actions to push 802.1Q header with VLAN VID set.
"""
return [
parser.OFPActionPushVlan(eth_type),
set_vlan_vid(vlan_vid),
]
def dec_ip_ttl():
"""Return OpenFlow action to decrement IP TTL.
Returns:
ryu.ofproto.ofproto_v1_3_parser.OFPActionDecNwTtl: decrement IP TTL.
"""
return parser.OFPActionDecNwTtl()
def pop_vlan():
"""Return OpenFlow action to pop outermost Ethernet 802.1Q VLAN header.
Returns:
ryu.ofproto.ofproto_v1_3_parser.OFPActionPopVlan: Pop VLAN.
"""
return parser.OFPActionPopVlan()
def output_port(port_num, max_len=0):
"""Return OpenFlow action to output to a port.
Args:
port_num (int): port to output to.
max_len (int): maximum length of packet to output (default no maximum).
Returns:
ryu.ofproto.ofproto_v1_3_parser.OFPActionOutput: output to port action.
"""
return parser.OFPActionOutput(port_num, max_len=max_len)
def output_in_port():
"""Return OpenFlow action to output out input por | t.
Returns:
ryu.ofproto.ofproto_v1_3_parser.OFPActionOutput.
"""
return output_port(ofp.OFPP_IN_PORT)
def output_controller():
"""Return OpenFlow action | to packet in to the controller (max 128 bytes).
Returns:
ryu.ofproto.ofproto_v1_3_parser.OFPActionOutput: packet in action.
"""
return output_port(ofp.OFPP_CONTROLLER, 128)
def packetout(port_num, data):
"""Return OpenFlow action to packet out to dataplane from controller.
Args:
port_num (int): port to output to.
data (str): raw packet to output.
Returns:
ryu.ofproto.ofproto_v1_3_parser.OFPActionOutput: packet out action.
"""
return parser.OFPPacketOut(
datapath=None,
buffer_id=ofp.OFP_NO_BUFFER,
in_port=ofp.OFPP_CONTROLLER,
actions=[output_port(port_num)],
data=data)
def barrier():
"""Return OpenFlow barrier request.
Returns:
ryu.ofproto.ofproto_v1_3_parser.OFPBarrierRequest: barrier request.
"""
return parser.OFPBarrierRequest(None)
def table_features(body):
return parser.OFPTableFeaturesStatsRequest(
datapath=None, body=body)
def match(match_fields):
"""Return OpenFlow matches from dict.
Args:
match_fields (dict): match fields and values.
Returns:
ryu.ofproto.ofproto_v1_3_parser.OFPMatch: matches.
"""
return parser.OFPMatch(**match_fields)
def match_from_dict(match_dict):
null_dp = namedtuple('null_dp', 'ofproto_parser')
null_dp.ofproto_parser = parser
acl_match = ofctl.to_match(null_dp, match_dict)
return acl_match
def _match_ip_masked(ip):
if isinstance(ip, ipaddress.IPv4Network) or isinstance(ip, ipaddress.IPv6Network):
return (str(ip.network_address), str(ip.netmask))
else:
return (str(ip.ip), str(ip.netmask))
def build_match_dict(in_port=None, vlan=None,
eth_type=None, eth_src=None,
eth_dst=None, eth_dst_mask=None,
ipv6_nd_target=None, icmpv6_type=None,
nw_proto=None,
nw_src=None, nw_dst=None):
match_dict = {}
if in_port is no |
shuaizi/leetcode | leetcode-python/num315.py | Python | apache-2.0 | 1,742 | 0.000574 | __author__ = 'shuai'
class Solution(object):
def countSmaller(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
count = []
ret = [0] * len(nums)
for i in range(len(nums)):
count.append({'index': i,
'value': nums[i]})
self.merge_sort(count, 0, len(count) - 1, ret)
return ret
def merge(self, nums, start, mid, end, ret):
tmp = [{}] * (end - start + 1)
i = start
j = mid + 1
k = 0
jump = 0
while i <= mid or j <= end:
if i > mid:
tmp[k] = nums[j]
j += 1
jump += 1
elif j | > end:
tmp[k] = nums[i]
ret[tmp[k].get('index')] += jump
i += 1
elif nums[i].get('value') <= nums[j].get('value'):
tmp[k] = nums[i]
ret[tmp[k].get('index')] += jump
i += 1
| else:
tmp[k] = nums[j]
j += 1
jump += 1
k += 1
while i <= mid:
tmp[k] = nums[i]
i += 1
k += 1
while j <= end:
tmp[k] = nums[j]
j += 1
k += 1
k = 0
for m in range(start, end + 1):
nums[m] = tmp[k]
k += 1
def merge_sort(self, nums, start, end, ret):
if start >= end:
return
mid = (end + start) / 2
self.merge_sort(nums, start, mid, ret)
self.merge_sort(nums, mid+1, end, ret)
self.merge(nums, start, mid, end, ret)
print nums
sol = Solution()
print sol.countSmaller([1, 1])
|
mooster311/Gigabot | cogs/events.py | Python | mit | 4,025 | 0.004969 | import discord
import injector
import database
import pymongo
class Events:
"""Events"""
def __init__(self, bot):
self.bot = bot
async def on_connect(self):
try:
await database.client.server_info()
except pymongo.errors.ServerSelectionTimeoutError as error:
print("Unable to connect to database: {0}".format(error))
await self.bot.logout()
async def on_ready(self):
print('Logged in as')
print(self.bot.user.name)
print(self.bot.user.id)
| print('------------')
# Retrieve the playing status options in 'options.json'
playing_status = injector.get_options()['other']['playing_status']
# Set the bot's playing status
await self.bot.change_presence(game=discord.Game(name=playing_status['name'], url=playing_status['url'],
| type=playing_status['type']))
async def on_command(self, command):
"""Logs all commands that are sent"""
# Get the command name only
name = command.message.content[1:]
name = name.rsplit(' ', 1)[0]
# Define guild_id and Command database class
guild_id = command.guild.id
Command = database.Command()
# Retrieve command from database
document = await Command.find(name=name, guild_id=guild_id)
if document is None:
# The command does not exist so we must create one
await Command.create(name=name, guild_id=guild_id, uses=1)
else:
# The command exists so we must increment its uses
await Command.increment(_id=document['_id'], uses=1)
async def on_member_join(self, member):
"""Sends a welcome message to a user when they join"""
guild = member.guild
await guild.default_channel.send("Hello {0}, welcome to {1}".format(member.mention, guild.name))
async def on_member_remove(self, member):
"""Sends a message informing other members about a member leaving"""
guild = member.guild
await guild.default_channel.send("Goodbye {0}...".format(member.mention))
async def on_guild_join(self, guild):
"""Retrieves basic information about the server and stores in database"""
await database.Guild().update_or_create(guild_id=guild.id, name=guild.name, icon=guild.icon, owner_id=guild.
owner_id, unavailable=guild.unavailable, mfa_level=
guild.mfa_level, splash=guild.splash, large=guild.large, icon_url=guild.
icon_url, splash_url=guild.splash_url, chunked=guild.chunked, created_at
=guild.created_at)
overwrites = {
guild.default_role: discord.PermissionOverwrite(send_messages=False),
guild.me: discord.PermissionOverwrite(send_messages=True)
}
if discord.utils.find(lambda m: m.name == 'bot-log', guild.channels) is None:
await guild.create_text_channel("not-log", overwrites=overwrites)
if discord.utils.find(lambda m: m.name == 'starboard', guild.channels) is None:
await guild.create_text_channel("starboard", overwrites=overwrites)
async def on_guild_update(self, before, after):
"""Update the database with new guild data"""
await database.Guild().update_or_create(guild_id=after.id, name=after.name, icon=after.icon, owner_id=
after.owner_id, unavailable=after.unavailable, mfa_level=after.mfa_level
, splash=after.splash, large=after.large, icon_url=after.icon_url,
splash_url=after.splash_url, chunked=after.chunked, created_at=after.
created_at)
def setup(bot):
bot.add_cog(Events(bot))
|
FeiZhan/Algo-Collection | answers/other/in sap/Pascal's Triangle.py | Python | mit | 470 | 0.031915 | class Solution(object):
def generate(self, numRows):
"""
:type numRows: int
: | rtype: List[List[int]]
"""
pascal = []
if numRows >= 1:
pascal.append([1])
for i in range(1, numRows):
pascal.append([pascal[-1][0]])
for j in range(1, len(pascal[-2])):
pascal[-1].append(pascal[-2][j - 1] + pascal[-2][j])
pascal[-1].append(pascal[-2][-1])
return p | ascal |
pizzapanther/Neutron-Sync | nsync_server/nstore/apps.py | Python | mit | 153 | 0.013072 | from django.apps import AppConfig
class NstoreConfig(AppConfig):
default_auto_field = 'd | jango.db.models.BigAutoField'
name = 'nsync_s | erver.nstore'
|
pixelated-project/pixelated-dispatcher | pixelated/test/integration/smoke_test.py | Python | agpl-3.0 | 7,956 | 0.003394 | #
# Copyright (c) 2014 ThoughtWorks Deutschland GmbH
#
# Pixelated is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pixelated is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Pixelated. If not, see <http://www.gnu.org/licenses/>.
import json
import unittest
import os
import threading
import time
import psutil
import requests
from tempdir import TempDir
from mock import patch
from pixelated.client.dispatcher_api_client import PixelatedDispatcherClient
from pixelated.proxy import DispatcherProxy
from pixelated.manager import DispatcherManager, SSLConfig, DEFAULT_PORT
from pixelated.test.util import EnforceTLSv1Adapter, cafile, certfile, keyfile
__author__ = 'fbernitt'
INHERIT = None
class SmokeTest(unittest.TestCase):
__slots__ = ('_run_method', '_shutdown_method', '_thread_name', '_thread')
class Server(object):
def __init__(self, run_method, shutdown_method, thread_name=None):
self._run_method = run_method
self._shutdown_method = shutdown_method
self._thread_name = thread_name
self._thread = None
def _start_server(self):
self._thread = threading.Thread(target=self._run_method)
self._thread.setDaemon(True)
if self._thread_name:
self._thread.setName(self._thread_name)
self._thread.start()
def __enter__(self):
self._start_server()
time.sleep(0.3) # let server start
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self._shutdown_method()
self._thread.join()
self._kill_subprocesses()
def _kill_subprocesses(self):
for child in psutil.Process(os.getpid()).children():
try:
p = psutil.Process(child.pid)
p.kill()
except psutil.Error:
pass
def setUp(self):
self._tmpdir = TempDir()
self.ssl_request = requests.Session()
self.ssl_request.mount('https://', EnforceTLSv1Adapter())
def tearDown(self):
self._tmpdir.dissolve()
def _dispatcher_manager(self):
fake_mailpile = os.path.join(os.path.dirname(__file__), 'fake_mailpile.py')
ssl_config = SSLConfig(certfile(), keyfile())
provider_ca = None
server = DispatcherManager(self._tmpdir.name, fake_mailpile, ssl_config, 'leap provider hostname', provider_ca, mailpile_virtualenv=INHERIT)
return SmokeTest.Server(server.serve_forever, server.shutdown, thread_name='PixelatedServerManager')
def _dispatcher_proxy(self):
dispatcher = DispatcherProxy(PixelatedDispatcherClient('localhost', DEFAULT_PORT, cacert=cafile(), assert_hostname=False), port=12345, certfile=certfile(),
keyfile=keyfile())
return SmokeTest.Server(dispatcher.serve_forever, dispatcher.shutdown, thread_name='PixelatedDispatcherProxy')
def _method(self, method, url, form_data=None, json_data=None, timeout=5.0):
if json_data:
headers = {'content-type': 'application/json'}
data = json.dumps(json_data)
cookies = None
else:
cookies = {'_xsrf': '2|7586b241|47c876d965112a2f547c63c95cbc44b1|1402910163'}
headers = None
data = form_data.copy()
data['_xsrf'] = '2|7586b241|47c876d965112a2f547c63c95cbc44b1|1402910163'
return method(url, data=data, headers=headers, cookies=cookies, timeout=timeout, verify=cafile())
def get(self, url):
return self.ssl_request.get(url, verify=cafile())
def put(self, url, form_data=None, json_data=None):
return self._method(self.ssl_request.put, url, form_data=form_data, json_data=json_data)
def post(self, url, form_data=None, json_data=None):
return self._method(self.ssl_request.post, url, form_data=form_data, json_data=json_data)
@patch('pixelated.manager.LeapCertificate')
@patch('pixelated.manager.LeapProvider')
def test_dispatcher_run(self, leap_provider_mock, leap_certificate_mock):
with self._dispatcher_manager():
self.assertSuccess(
self.post('https://localhost:4443/agents', json_data={'name': 'test', 'password': 'some password'}))
self.assertSuccess | (self.get('https://localhost:4443/agents'), json_body={
'agents': [{'name': 'test', 'state': 'stopped', 'uri': 'http://localhost:4443/agents/test'}]})
self.assertSuccess(
self.put('https://localhost:4443/agents/test/state', json_data={'state': 'running'}))
self.assertSuccess(self.g | et('https://localhost:4443/agents/test/runtime'),
json_body={'state': 'running', 'port': 5000})
time.sleep(2) # let mailpile start
self.assertSuccess(self.get('http://localhost:5000/'))
self.assertSuccess(
self.put('https://localhost:4443/agents/test/state', json_data={'state': 'stopped'}))
def test_dispatcher_starts(self):
with self._dispatcher_proxy():
self.assertSuccess(self.get('https://localhost:12345/auth/login'))
@patch('pixelated.manager.LeapCertificate')
@patch('pixelated.manager.LeapProvider')
def test_server_dispatcher_combination(self, leap_provider_mock, leap_certificate_mock):
with self._dispatcher_manager():
with self._dispatcher_proxy():
# add user
self.assertSuccess(
self.post('https://localhost:4443/agents', json_data={'name': 'test', 'password': 'some password'}))
# try to login with agent down
# self.assertError(302, self.post('https://localhost:12345/auth/login',
# form_data={'username': 'test', 'password': 'test'}))
# start agent
self.assertSuccess(
self.put('https://localhost:4443/agents/test/state', json_data={'state': 'running'}))
# let mailpile start
time.sleep(1)
self.assertMemoryUsage(
self.get('https://localhost:4443/stats/memory_usage'))
try:
# try to login with agent up
self.assertSuccess(self.post('https://localhost:12345/auth/login',
form_data={'username': 'test', 'password': 'some password'}),
body='Hello World!')
finally:
# shutdown mailple
self.put('https://localhost:4443/agents/test/state', json_data={'state': 'stopped'})
def assertSuccess(self, response, body=None, json_body=None):
status = response.status_code
self.assertTrue(200 <= status < 300, msg='%d: %s' % (response.status_code, response.reason))
if body:
self.assertEqual(body, response.content)
if json_body:
self.assertEqual(json_body, response.json())
def assertError(self, error_code, response):
self.assertEqual(error_code, response.status_code,
'Expected status code %d but got %d' % (error_code, response.status_code))
def assertMemoryUsage(self, response):
self.assertSuccess(response)
usage = response.json()
self.assertEqual(1, len(usage['agents']))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.