repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
GFibrizo/TPS_7529 | TP1/Estadistico de orden K/k_heapsort.py | Python | apache-2.0 | 927 | 0.037756 | import heapq
def obtener_estadistico_orden_k(conjunto, k):
heap = conjunto[:]
heapq.heapify(heap)
elemento = None
for i in xrange(k+1):
elemento = heapq.heappop(heap)
return elemento
################################################################################
################################################################################
def test():
print obtener_estadistico_orden_k([1], 0) == 1
print obtener_est | adistico_orden_k([2,1], 1) == 2
print obtener_estadistico_orden_k([3,1,4,2,7], 3) == 4
print obtener_estadistico_o | rden_k([1,2,3,4,5,6,7,8], 0) == 1
print obtener_estadistico_orden_k([1,2,3,4,5,6,7,8], 7) == 8
print obtener_estadistico_orden_k([1,2,3,4,5,6,7,8], 4) == 5
################################################################################
################################################################################
test()
|
idan/oauthlib | tests/oauth2/rfc6749/clients/test_legacy_application.py | Python | bsd-3-clause | 6,427 | 0.002645 | # -*- coding: utf-8 -*-
import os
import urllib.parse as urlparse
from unittest.mock import patch
from oauthlib import signals
from oauthlib.oauth2 import LegacyApplicationClient
from tests.unittest import TestCase
@patch('time.time', new=lambda: 1000)
class LegacyApplicationClientTest(TestCase):
client_id = "someclientid"
client_secret = 'someclientsecret'
scope = ["/profile"]
kwargs = {
"some": "providers",
"require": "extra arguments"
}
username = "user_username"
password = "user_password"
body = "not=empty"
body_up = "not=empty&grant_type=password&username={}&password={}".format(username, password)
body_kwargs = body_up + "&some=providers&require=extra+arguments"
token_json = ('{ "access_token":"2YotnFZFEjr1zCsicMWpAA",'
' "token_type":"example",'
' "expires_in":3600,'
' "scope":"/profile",'
' "refresh_token":"tGzv3JOkF0XG5Qx2TlKWIA",'
' "example_parameter":"example_value"}')
token = {
"access_token": "2YotnFZFEjr1zCsicMWpAA",
"token_type": "example",
"expires_in": 3600,
"expires_at": 4600,
"scope": scope,
"refresh_token": "tGzv3JOkF0XG5Qx2TlKWIA",
"example_parameter": "example_value"
}
def test_request_body(self):
client = LegacyApplicationClient(self.client_id)
# Basic, no extra arguments
body = client.prepare_request_body(self.username, self.password,
body=self.body)
self.assertFormBodyEqual(body, self.body_up)
# With extra parameters
body = client.prepare_request_body(self.username, self.password,
body=self.body, **self.kwargs)
self.assertFormBodyEqual(body, self.body_kwargs)
def test_parse_token_response(self):
client = LegacyApplicationClient(self.client_id)
# Parse code and state
response = client.parse_request_body_response(self.token_json, scope=self.scope)
self.assertEqual(response, self.token)
self.assertEqual(client.access_token, response.get("access_token"))
self.assertEqual(client.refresh_token, response.get("refresh_token"))
self.assertEqual(client.token_type, response.get("token_type"))
# Mismatching state
self.assertRaises(Warning, client.parse_request_body_response, self.token_json, scope="invalid")
os.environ['OAUTHLIB_RELAX_TOKEN_SCOPE'] = '5'
token = client.parse_request_body_response(self.token_json, scope="invalid")
self.assertTrue(token.scope_changed)
scope_changes_recorded = []
def record_scope_change(sender, message, old, new):
scope_changes_recorded.append((message, old, new))
signals.scope_changed.connect(record_scope_change)
try:
client.parse_request_body_response(self.token_json, scope="invalid")
self.assertEqual(len(scope_changes_recorded), 1)
message, old, new = scope_changes_recorded[0]
self.assertEqual(message, 'Scope has changed from "invalid" to "/profile".')
self.assertEqual(old, ['invalid'])
self.assertEqual(new, ['/profile'])
finally:
signals.scope_changed.disconnect(record_scope_change)
del os.environ['OAUTHLIB_RELAX_TOKEN_SCOPE']
def test_prepare_request_body(self):
"""
see issue #585
https://github.com/oauthlib/oauthlib/issues/585
"""
client = LegacyApplicationClient(self.client_id)
# scenario 1, default behavior to not include `client_id`
r1 = client.prepare_request_body(username=self.username, password=self.password)
self.assertIn(r1, ('grant_type=password&username={}&password={}'.format(self.username, self.password),
'grant_type=password&password={}&username={}'.format(self.password, self.username),
))
# scenario 2, include `client_id` in the body
r2 = client.prepare_request_body(username=self.username, password=self.password, include_client_id=True)
r2_params = dict(urlparse.parse_qsl(r2, keep_blank_values=True))
self.assertEqual(len(r2_params.keys()), 4)
self.assertEqual(r2_params['grant_type'], 'password')
self.assertEqual(r2_params['username'], self.username)
self.assertEqual(r2_params['password'], self.password)
self.assertEqual(r2_params['client_id'], self.client_id)
# scenario 3, include `client_id` + `client_secret` in the body
r3 = client.prepare_request_body(username=self.username, password=self.password, include_client_id=True, client_secret=self.client_secret)
r3_params = dict(urlparse.parse_qsl(r3, keep_blank_values=True))
self.assertEqual(len(r3_params.keys()), 5)
self.assertEqual(r3_params['grant_type'], 'password')
self.assertEqual(r3_params['username'], self.username)
self.assertEqual(r3_params['password'], self.password)
self.assertEqual(r3_params['client_id'], self.client_id)
self.assertEqual(r3_params['client_secret'], self.client_secret)
# scenario 4, `client_secret` is an empty string
r4 = client.prepare_request_body(username=self.username, password=self.password, include_client_id=True, client_secret='')
| r4_params = dict(urlparse.parse_qsl(r4, keep_blank_values=True))
self.assertEqual(len(r4_params.keys()), 5)
self.assertEqual(r4_params['grant_type'], 'password')
self.assertEqual(r4_params['username'], self.username)
self.assertEqual(r4_params['password'], self.password)
self.assertEqual(r4_params['client_id'], self.client_id)
self.assertEqual(r4_params['client_secret'], '')
# scenario 4b`,` client_secret is `None`
r4b = client.prepare_req | uest_body(username=self.username, password=self.password, include_client_id=True, client_secret=None)
r4b_params = dict(urlparse.parse_qsl(r4b, keep_blank_values=True))
self.assertEqual(len(r4b_params.keys()), 4)
self.assertEqual(r4b_params['grant_type'], 'password')
self.assertEqual(r4b_params['username'], self.username)
self.assertEqual(r4b_params['password'], self.password)
self.assertEqual(r4b_params['client_id'], self.client_id)
|
Tatsh/libipa | setup.py | Python | mit | 555 | 0 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
from setuptools import setup
setup(
name='libipa',
version='0.0.6',
author='Andrew Udvare',
autho | r_email='audvare@gmail.com',
packages=['ipa'],
scripts=['bin/ipa-unzip-bin', 'bin/ipa-dump-info'],
url='https://gith | ub.com/Tatsh/libipa',
license='LICENSE.txt',
description='Library to read IPA files (iOS application archives).',
test_suite='ipa.test',
long_description='No description.',
install_requires=[
'biplist>=0.7',
'six>=1.7.3',
],
)
|
aalitaiga/improved_wgan_training | gan_64x64.py | Python | mit | 23,661 | 0.009763 | import os, sys
sys.path.append(os.getcwd())
import time
import functools
import numpy as np
import tensorflow as tf
import sklearn.datasets
import tflib as lib
import tflib.ops.linear
import tflib.ops.conv2d
import tflib.ops.batchnorm
import tflib.ops.deconv2d
import tflib.save_images
import tflib.small_imagenet
import tflib.ops.layernorm
import tflib.plot
# Download 64x64 ImageNet at http://image-net.org/small/download.php and
# fill in the path to the extracted files here!
DATA_DIR = ''
if len(DATA_DIR) == 0:
raise Exception('Please specify path to data directory in gan_64x64.py!')
MODE = 'wgan-gp' # dcgan, wgan, wgan-gp, lsgan
DIM = 64 # Model dimensionality
CRITIC_ITERS = 5 # How many iterations to train the critic for
N_GPUS = 1 # Number of GPUs
BATCH_SIZE = 64 # Batch size. Must be a multiple of N_GPUS
ITERS = 200000 # How many iterations to train for
LAMBDA = 10 # Gradient penalty lambda hyperparameter
OUTPUT_DIM = 64*64*3 # Number of pixels in each iamge
lib.print_model_settings(locals().copy())
def GeneratorAndDiscriminator():
"""
Choose which generator and discriminator architecture to use by
uncommenting one of these lines.
"""
# Baseline (G: DCGAN, D: DCGAN)
return DCGANGenerator, DCGANDiscriminator
# No BN and constant number of filts in G
# return WGANPaper_CrippledDCGANGenerator, DCGANDiscriminator
# 512-dim 4-layer ReLU MLP G
# return FCGenerator, DCGANDiscriminator
# No normalization anywhere
# return functools.partial(DCGANGenerator, bn=False), functools.partial(DCGANDiscriminator, bn=False)
# Gated multiplicative nonlinearities everywhere
# return MultiplicativeDCGANGenerator, MultiplicativeDCGANDiscriminator
# tanh nonlinearities everywhere
# return functools.partial(DCGANGenerator, bn=True, nonlinearity=tf.tanh), \
# functools.partial(DCGANDiscriminator, bn=True, nonlinearity=tf.tanh)
# 101-layer ResNet G and D
# return ResnetGenerator, ResnetDiscriminator
raise Exception('You must choose an architecture!')
DEVICES = ['/gpu:{}'.format(i) for i in xrange(N_GPUS)]
def LeakyReLU(x, alpha=0.2):
return tf.maximum(alpha*x, x)
def ReLULayer(name, n_in, n_out, inputs):
output = lib.ops.linear.Linear(name+'.Linear', n_in, n_out, inputs, initialization='he')
return tf.nn.relu(output)
def LeakyReLULayer(name, n_in, n_out, inputs):
output = lib.ops.linear.Linear(name+'.Linear', n_in, n_out, inputs, initialization='he')
return LeakyReLU(output)
def Batchnorm(name, axes, inputs):
if ('Discriminator' in name) and (MODE == 'wgan-gp'):
if axes != [0,2,3]:
raise Exception('Layernorm over non-standard axes is unsupported')
return lib.ops.layernorm.Layernorm(name,[1,2,3],inputs)
else:
return lib.ops.batchnorm.Batchnorm(name,axes,inputs,fused=True)
def pixcnn_gated_nonlinearity(a, b):
return tf.sigmoid(a) * tf.tanh(b)
def SubpixelConv2D(*args, **kwargs):
kwargs['output_dim'] = 4*kwargs['output_dim']
output = lib.ops.conv2d.Conv2D(*args, **kwargs)
output = tf.transpose(output, [0,2,3,1])
output = tf.depth_to_space(output, 2)
output = tf.transpose(output, [0,3,1,2])
return output
def ResidualBlock(name, input_dim, output_dim, filter_size, inputs, resample=None, he_init=True):
"""
resample: None, 'down', or 'up'
"""
if resample=='down':
conv_shortcut = functools.partial(lib.ops.conv2d.Conv2D, stride=2)
conv_1 = functools.partial(lib.ops.conv2d.Conv2D, input_dim=input_dim, output_dim=input_dim/2)
conv_1b = functools.partial(lib.ops.conv2d.Conv2D, input_dim=input_dim/2, output_dim=output_dim/2, stride=2)
conv_2 = functools.partial(lib.ops.conv2d.Conv2D, input_dim=output_dim/2, output_dim=output_dim)
elif resample=='up':
conv_shortcut = SubpixelConv2D
conv_1 = functools.partial(lib.ops.conv2d.Conv2D, input_dim=input_dim, output_dim=input_dim/2)
conv_1b = functools.partial(lib.ops.deconv2d.Deconv2D, input_dim=input_dim/2, output_dim=output_dim/2)
conv_2 = functools.partial(lib.ops.conv2d.Conv2D, input_dim=output_dim/2, output_dim=output_dim)
elif resample==None:
conv_shortcut = lib.ops.conv2d.Conv2D
conv_1 = functools.partial(lib.ops.conv2d.Conv2D, input_dim=input_dim, output_dim=input_dim/2)
conv_1b = functools.partial(lib.ops.conv2d.Conv2D, input_dim=input_dim/2, output_dim=output_dim/2)
conv_2 = functools.partial(lib.ops.conv2d.Conv2D, input_dim=input_dim/2, output_dim=output_dim)
else:
raise Exception('invalid resample value')
if output_dim==input_dim and resample==None:
shortcut = inputs # Identity skip-connection
else:
shortcut = conv_shortcut(name+'.Shortcut', input_dim=input_dim, output_dim=output_dim, filter_size=1,
he_init=False, biases=True, inputs=inputs)
output = inputs
output = tf.nn.relu(output)
output = conv_1(name+'.Conv1', filter_size=1, inputs=output, he_init=he_init, weightnorm=False)
output = tf.nn.relu(output)
output = conv_1b(name+'.Conv1B', filter_size=filter_size, inputs=output, he_init=he_init, weightnorm=False)
output = tf.nn.relu(output)
output = conv_2(name+'.Conv2', filter_size=1, inputs=output, he_init=he_init, weightnorm=False, biases=False)
output = Batchnorm(name+'.BN', [0,2,3], output)
return shortcut + (0.3*output)
# ! Generators
def FCGenerator(n_samples, noise=None, FC_DIM=512):
if noise is None:
noise = tf.random_normal([n_samples, 128])
output = ReLULayer('Generator.1', 128, FC_DIM, noise)
output = ReLULayer('Generator.2', FC_DIM, FC_DIM, output)
output = ReLULayer('Generator.3', FC_DIM, FC_DIM, output)
output = ReLULayer('Generator.4', FC_DIM, FC_DIM, output)
output = lib.ops.linear.Linear('Generator.Out', FC_DIM, OUTPUT_DIM, output)
output = tf.tanh(output)
return output
def DCGANGenerator(n_samples, noise=None, dim=DIM, bn=True, nonlinearity=tf.nn.relu):
lib.ops.conv2d.set_weights_stdev(0.02)
lib.ops.deconv2d.set_weights_stdev(0.02)
lib.ops.linear.set_weights_stdev(0.02)
if noise is None:
noise = tf.random_normal([n_samples, 128])
output = lib.ops.linear.Linear('Generator.Input', 128, 4*4*8*dim, noise)
output = tf.reshape(output, [-1, 8*dim, 4, 4])
if bn:
output = Batchnorm('Generator.BN1', [0,2,3], output)
output = nonlinearity(output)
output = lib.ops.deconv2d.Deconv2D('Generator.2', 8*dim, 4*dim, 5, output)
if bn:
output = Batchnorm('Generator.BN2', [0,2,3], output)
output = nonlinearity(output)
output = lib.ops.deconv2d.Deconv2D('Generator.3', 4*dim, 2*dim, 5, output)
if bn:
output = Batchnorm('G | enerator.BN3', [0,2,3], output)
output = nonlinearity(output)
output = lib.ops.deconv2d.Deconv2D('Generator.4', 2*dim, dim, 5, output)
if bn:
output = Batchnorm('Generator.BN4', [0,2,3], output)
output = nonlinearity(output)
output = lib.ops.deconv2d.Deconv2D('Generator.5', dim, 3, 5, output)
output = tf.tanh(output)
lib.ops.conv2d.unset_weights_stdev()
| lib.ops.deconv2d.unset_weights_stdev()
lib.ops.linear.unset_weights_stdev()
return tf.reshape(output, [-1, OUTPUT_DIM])
def WGANPaper_CrippledDCGANGenerator(n_samples, noise=None, dim=DIM):
if noise is None:
noise = tf.random_normal([n_samples, 128])
output = lib.ops.linear.Linear('Generator.Input', 128, 4*4*dim, noise)
output = tf.nn.relu(output)
output = tf.reshape(output, [-1, dim, 4, 4])
output = lib.ops.deconv2d.Deconv2D('Generator.2', dim, dim, 5, output)
output = tf.nn.relu(output)
output = lib.ops.deconv2d.Deconv2D('Generator.3', dim, dim, 5, output)
output = tf.nn.relu(output)
output = lib.ops.deconv2d.Deconv2D('Generator.4', dim, dim, 5, output)
output = tf.nn.relu(output)
output = lib.ops.deconv2d.Deconv2D('Generator.5', dim, 3, 5, output)
output = tf.tanh(output)
return tf.reshape(output, [-1, OUTP |
krthkj/learningPython | readjson.py | Python | mit | 157 | 0.012739 | import json
from p | print import pprint
json_data=open('jsonFormated')
#json_data=open('jsonFile')
data = json.load(json_data)
pprint(data)
json_dat | a.close()
|
DiptoDas8/Biponi | lib/python2.7/site-packages/braintree/unknown_payment_method.py | Python | mit | 206 | 0.009709 | import braintree
from braintree.resource import Resour | ce
class UnknownPaymentMethod(Resource):
def image_url(self):
return "https://assets.braintreegateway.com/payment_metho | d_logo/unknown.png"
|
apache/incubator-airflow | airflow/www/forms.py | Python | apache-2.0 | 7,018 | 0.002566 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
from datetime import datetime as dt
import pendulum
from flask_appbuilder.fieldwidgets import (
BS3PasswordFieldWidget,
BS3TextAreaFieldWidget,
BS3TextFieldWidget,
Select2Widget,
)
from flask_appbuilder.forms import DynamicForm
from flask_babel import lazy_gettext
from flask_wtf import FlaskForm
from wtforms import widgets
from wtforms.fields import Field, IntegerField, PasswordField, SelectField, StringField, TextAreaField
from wtforms.validators import InputRequired, Optional
from airflow.configuration import conf
from airflow.utils import timezone
from airflow.utils.types import DagRunType
from airflow.www.widgets import (
AirflowDateTimePickerROWidget,
AirflowDateTimePickerWidget,
BS3TextAreaROWidget,
BS3TextFieldROWidget,
)
class DateTimeWithTimezoneField(Field):
"""A text field which stores a `datetime.datetime` matching a format."""
widget = widgets.TextInput()
def __init__(self, label=None, validators=Non | e, datetime_format='%Y-%m-%d %H:%M:%S%Z', **kwargs):
super().__init__(label, validators, **kwargs)
self.format = datetime_fo | rmat
self.data = None
def _value(self):
if self.raw_data:
return ' '.join(self.raw_data)
if self.data:
return self.data.strftime(self.format)
return ''
def process_formdata(self, valuelist):
if not valuelist:
return
date_str = ' '.join(valuelist)
try:
# Check if the datetime string is in the format without timezone, if so convert it to the
# default timezone
if len(date_str) == 19:
parsed_datetime = dt.strptime(date_str, '%Y-%m-%d %H:%M:%S')
default_timezone = self._get_default_timezone()
self.data = default_timezone.convert(parsed_datetime)
else:
self.data = pendulum.parse(date_str)
except ValueError:
self.data = None
raise ValueError(self.gettext('Not a valid datetime value'))
def _get_default_timezone(self):
current_timezone = conf.get("core", "default_timezone")
if current_timezone == "system":
default_timezone = pendulum.local_timezone()
else:
default_timezone = pendulum.timezone(current_timezone)
return default_timezone
class DateTimeForm(FlaskForm):
"""Date filter form needed for task views"""
execution_date = DateTimeWithTimezoneField("Logical date", widget=AirflowDateTimePickerWidget())
class DateTimeWithNumRunsForm(FlaskForm):
"""
Date time and number of runs form for tree view, task duration
and landing times
"""
base_date = DateTimeWithTimezoneField(
"Anchor date", widget=AirflowDateTimePickerWidget(), default=timezone.utcnow()
)
num_runs = SelectField(
"Number of runs",
default=25,
choices=(
(5, "5"),
(25, "25"),
(50, "50"),
(100, "100"),
(365, "365"),
),
)
class DateTimeWithNumRunsWithDagRunsForm(DateTimeWithNumRunsForm):
"""Date time and number of runs and dag runs form for graph and gantt view"""
execution_date = SelectField("DAG run")
class DagRunEditForm(DynamicForm):
"""Form for editing DAG Run.
We don't actually want to allow editing, so everything is read-only here.
"""
dag_id = StringField(lazy_gettext('Dag Id'), widget=BS3TextFieldROWidget())
start_date = DateTimeWithTimezoneField(lazy_gettext('Start Date'), widget=AirflowDateTimePickerROWidget())
end_date = DateTimeWithTimezoneField(lazy_gettext('End Date'), widget=AirflowDateTimePickerROWidget())
run_id = StringField(lazy_gettext('Run Id'), widget=BS3TextFieldROWidget())
state = StringField(lazy_gettext('State'), widget=BS3TextFieldROWidget())
execution_date = DateTimeWithTimezoneField(
lazy_gettext('Logical Date'),
widget=AirflowDateTimePickerROWidget(),
)
conf = TextAreaField(lazy_gettext('Conf'), widget=BS3TextAreaROWidget())
def populate_obj(self, item):
"""Populates the attributes of the passed obj with data from the form’s fields."""
super().populate_obj(item)
item.run_type = DagRunType.from_run_id(item.run_id)
if item.conf:
item.conf = json.loads(item.conf)
class TaskInstanceEditForm(DynamicForm):
"""Form for editing TaskInstance"""
dag_id = StringField(lazy_gettext('Dag Id'), validators=[InputRequired()], widget=BS3TextFieldROWidget())
task_id = StringField(
lazy_gettext('Task Id'), validators=[InputRequired()], widget=BS3TextFieldROWidget()
)
start_date = DateTimeWithTimezoneField(lazy_gettext('Start Date'), widget=AirflowDateTimePickerROWidget())
end_date = DateTimeWithTimezoneField(lazy_gettext('End Date'), widget=AirflowDateTimePickerROWidget())
state = SelectField(
lazy_gettext('State'),
choices=(
('success', 'success'),
('running', 'running'),
('failed', 'failed'),
('up_for_retry', 'up_for_retry'),
),
widget=Select2Widget(),
validators=[InputRequired()],
)
execution_date = DateTimeWithTimezoneField(
lazy_gettext('Logical Date'),
widget=AirflowDateTimePickerROWidget(),
validators=[InputRequired()],
)
class ConnectionForm(DynamicForm):
"""Form for editing and adding Connection"""
conn_id = StringField(
lazy_gettext('Connection Id'), validators=[InputRequired()], widget=BS3TextFieldWidget()
)
description = StringField(lazy_gettext('Description'), widget=BS3TextAreaFieldWidget())
host = StringField(lazy_gettext('Host'), widget=BS3TextFieldWidget())
schema = StringField(lazy_gettext('Schema'), widget=BS3TextFieldWidget())
login = StringField(lazy_gettext('Login'), widget=BS3TextFieldWidget())
password = PasswordField(lazy_gettext('Password'), widget=BS3PasswordFieldWidget())
port = IntegerField(lazy_gettext('Port'), validators=[Optional()], widget=BS3TextFieldWidget())
extra = TextAreaField(lazy_gettext('Extra'), widget=BS3TextAreaFieldWidget())
|
compmem/ptsa | ptsa/data/rawbinwrapper.py | Python | gpl-3.0 | 9,972 | 0.005716 | #emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
#ex: set sts=4 ts=4 sw=4 et:
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See the COPYING file distributed along with the PTSA package for the
# copyright and license terms.
#
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
# local imports
from basewrapper import BaseWrapper
# global imports
import numpy as np
import string
import struct
import os
from glob import glob
class RawBinWrapper(BaseWrapper):
"""
Interface to data stored in binary format with a separate file for
each channel.
"""
def __init__(self,dataroot,samplerate=None,format='int16',gain=1):
"""Initialize the interface to the data. You must specify the
dataroot, which is a string that contains the path to and
root, up to the channel numbers, where the data are stored."""
# set up the basic params of the data
self._dataroot = dataroot
self._samplerate = samplerate
self._format = format
self._gain = gain
# see if can find them from a params file in dataroot
self._params = self._get_params(dataroot)
# set what we can get from the params
if self._params.has_key('samplerate'):
self._samplerate = self._params['samplerate']
if self._params.has_key('format'):
self._format = self._params['format']
if self._params.has_key('dataformat'):
self._format = self._params['dataformat']
if self._params.has_key('gain'):
self._gain = self._params['gain']
# set the nBytes and format str
if self._format == 'single':
self._nbytes = 4
self._fmt_str = 'f'
elif self._format == 'short' or self._format == 'int16':
self._nbytes = 2
self._fmt_str = 'h'
elif self._format == 'double':
self._nbytes = 8
self._fmt_str = 'd'
self._chanfiles = glob(self._dataroot+'.*[0-9]')
# sorting because the order of the output from glob is
# arbitrary (not strictly necessary, but nice to have
# consistency):
self._chanfiles.sort()
self._nchannels = len(self._chanfiles)
self._nsamples = None
# collate channel info:
numbers = []
names = []
for i in range(self._nchannels):
numbers.append(np.int(self._chanfiles[i].split('.')[-1]))
names.append(self._chanfiles[i].split('.')[-1])
self._channel_info = np.rec.fromarrays(
[numbers, names], names='number,name')
def _get_dataroot(self, channel=None):
# Same dataroot for all channels:
return self._dataroot
def _get_samplerate(self, channel=None):
# Same samplerate for all channels:
return self._samplerate
def _get_nsamples(self,channel=None):
# get the dimensions of the data
# must open a valid channel and seek to the end
if channel is not None:
raise NotImplementedError('Channel cannot be specified!')
if self._nsamples is None:
chanfile = open(self._chanfiles[0], 'rb')
chanfile.seek(0, 2)
if chanfile.tell() % self._nbytes != 0:
raise ValueError(
'File length does not correspond to data format!')
else:
self._nsamples = chanfile.tell()/self._nbytes
return self._nsamples
def _get_nchannels(self):
# get the dimensions of the data
# must loop through directory identifying valid channels
return self._nchannels
def _get_channel_info(self):
return self._channel_info
def _get_annotations(self):
# no annotations for raw data
annot = None
return annot
def _get_params(self,dataroot):
"""Get parameters of the data from the dataroot."""
params = {}
# first look for dataroot. | params file
param_file = dataroot + '.params'
if not os.path.isfile(param_file):
# see if it's params.txt
param_file = os.path.join(os.path.dirname(dataroot), 'params.txt')
if not os.path.isfile(param_file):
raise IOError(
'No params file found in '+str(dataroot)+
'. Params files must be in the same directory ' +
'as the EEG data and must be named \".params\" ' +
| 'or \"params.txt\".')
# we have a file, so open and process it
for line in open(param_file,'r').readlines():
# get the columns by splitting
cols = line.strip().split()
# set the params
params[cols[0]] = eval(string.join(cols[1:]))
if (not params.has_key('samplerate')) or (not params.has_key('gain')):
raise ValueError(
'Params file must contain samplerate and gain!\n' +
'The following fields were supplied:\n' + str(params.keys()))
# return the params dict
return params
def _load_data(self,channels,event_offsets,dur_samp,offset_samp):
"""
"""
# allocate for data
eventdata = np.empty((len(channels),len(event_offsets),dur_samp),
dtype=np.float)*np.nan
# loop over channels
for c, channel in enumerate(channels):
# determine the file
eegfname = self._dataroot+'.'+self._channel_info['name'][channel]
# eegfname = '{}.{:0>3}'.format(self._dataroot,channel)
if os.path.isfile(eegfname):
efile = open(eegfname,'rb')
else:
raise IOError(
'EEG file not found: '+eegfname)
# 'EEG file not found for channel {:0>3} '.format(channel) +
# 'and file root {}\n'.format(self._dataroot))
# loop over events
for e, ev_offset in enumerate(event_offsets):
# seek to the position in the file
thetime = offset_samp + ev_offset
efile.seek(self._nbytes * thetime,0)
# read the data
data = efile.read(int(self._nbytes * dur_samp))
# convert from string to array based on the format
# hard codes little endian
data = np.array(struct.unpack(
'<' + str(len(data) / self._nbytes) +
self._fmt_str, data))
# make sure we got some data
if len(data) < dur_samp:
raise IOError(
'Event with offset ' + str(ev_offset) +
' is outside the bounds of file ' + str(eegfname))
# append it to the events
eventdata[c, e, :] = data
# multiply by the gain
eventdata *= self._gain
return eventdata
dataroot = property(lambda self: self._get_dataroot())
# # This doesn't seem to work and also doesn't seem to belong in this file:
# def createEventsFromMatFile(matfile):
# """Create an events data array with data wrapper information from
# an events structure saved in a Matlab mat file."""
# # load the mat file
# mat = loadmat(matfile)
# if 'events' not in mat.keys():
# raise "\nError processing the Matlab file: %s\n" + \
# "This file must contain an events structure" + \
# "with the name \"events\" (case sensitive)!\n" +\
# "(All other content of the file is ignored.)" % matfile
# # get the events
# events = mat['events'][0]
# # get num events
# numEvents = len(events)
# # determine the fieldnames and formats
# fields = events[0]._fieldnames
# def loadfield(events,field,dtype=None):
# data = []
# for x in events:
# dat = getattr(x,field)
# if len(dat) == 0:
# data.append(None)
# else:
# |
dergraaf/xpcc | tools/device_files/parameters.py | Python | bsd-3-clause | 3,905 | 0.037132 | # -*- coding: utf-8 -*-
# Copyright (c) 2013, Roboterclub Aachen e.V.
# All Rights Reserved.
#
# The file is part of the xpcc library and is released under the 3-clause BSD
# license. See the file `LICENSE` for the full license governing this code.
class ParameterDB:
""" Parameter Data Base
Manages Parameters
"""
def __init__(self, userParameters=None, logger=None):
if logger == None:
self.log = Logger()
else:
self.log = logger
self._parameters = []
self.addUserParameters(userParameters)
def addUserParameters(self, item | s):
"""
Parses and adds items form [parameters] section in `project.cfg`
"""
if items is None:
return
for item in items:
p = UserParameter.fromUserConfigItem(item, self.log)
if p is not None:
self._parameters.append(p)
def addDriverParameter(self, param):
"""
"""
self.log.error("Ple | ase implement ParameterDB.addDriverParameter")
def getParametersForDriver(self, driver):
parameters = []
for param in self._parameters:
if param.driver_type == driver.type and param.driver_name == driver.name:
parameters.append({'name': param.name, 'value': param.value, 'instance': param.driver_instance})
return parameters
@classmethod
def fromDictionary(self, dictionary, logger=None):
"""fromDictionary
"""
p = ParameterDB(userParameters=None, logger=logger)
for param in dictionary['_parameters']:
p._parameters.append(UserParameter.fromDictionary(param, p.log))
return p
def toDictionary(self):
""" Serializes this object into a dictionary that can be used for
dependency tracking with scons.
"""
param_dicts = []
for p in self._parameters:
param_dicts.append(p.toDictionary())
return { '_parameters': param_dicts }
class UserParameter:
""" UserParameter
Represents a parameter declared by the user...
"""
def __init__(self, logger=None):
if logger == None:
self.log = Logger()
else:
self.log = logger
# initialize all members to None
self.driver_type = None
self.driver_name = None
self.driver_instance = None
self.level = None
self.name = None
self.value = None
@classmethod
def fromUserConfigItem(self, user_item, logger=None):
"""fromUserConfigString
"""
incorrect_string_msg = ("Incorrect parameter config line!\n"
"Valid inputs are:\n"
"\tdriver_type.parameter_name = value\n"
"\tdriver_type.driver_instance.parameter_name = value\n"
"\tdriver_type.driver_name.parameter_name = value\n"
"\tdriver_type.driver_name.driver_instance.parameter_name = value")
# Create Parameter
p = UserParameter(logger)
# Sanity check user input
parts = user_item[0].split('.')
if len(parts) not in [2,3,4]:
p.log.error(incorrect_string_msg)
return None
p.driver_type = parts[0]
p.name = parts[-1:][0]
if len(parts) == 3:
if parts[1].isdigit():
p.driver_instance = int(parts[1])
else:
p.driver_name = parts[1]
elif len(parts) == 4:
p.driver_name = parts[1]
p.driver_instance = int(parts[2])
p.value = user_item[1]
p.level = 'user' # this parameter comes directly from the user
return p
@classmethod
def fromDictionary(self, dictionary, logger=None):
"""fromDictionary
"""
p = UserParameter(logger)
p.driver_type = dictionary['driver_type']
p.driver_name = dictionary['driver_name']
p.driver_instance = dictionary['driver_instance']
p.level = dictionary['level']
p.name = dictionary['name']
p.value = dictionary['value']
return p
def toDictionary(self):
""" Serializes this object into a dictionary that can be used for
dependency tracking with scons.
"""
return {'driver_type': self.driver_type,
'driver_name': self.driver_name,
'driver_instance': self.driver_instance,
'level': self.level,
'name': self.name,
'value': self.value}
|
pli3/enigma2-pli | RecordTimer.py | Python | gpl-2.0 | 32,081 | 0.032511 | import os
from enigma import eEPGCache, getBestPlayableServiceReference, \
eServiceReference, iRecordableService, quitMainloop, eActionMap
from Components.config import config
from Components.UsageConfig import defaultMoviePath
from Components.TimerSanityCheck import TimerSanityCheck
from Screens.MessageBox import MessageBox
import Screens.Standby
from Tools import Directories, Notifications, ASCIItranslit, Trashcan
from Tools.XMLTools import stringToXML
import timer
import xml.etree.cElementTree
import NavigationInstance
from ServiceReference import ServiceReference
from time import localtime, strftime, ctime, time
from bisect import insort
from sys import maxint
# ok, for descriptions etc we have:
# service reference (to get the service name)
# name (title)
# description (description)
# event data (ONLY for time adjustments etc.)
# parses an event, and gives out a (begin, end, name, duration, eit)-tuple.
# begin and end will be corrected
def parseEvent(ev, description = True):
if description:
name = ev.getEventName()
description = ev.getShortDescription()
if description == "":
description = ev.getExtendedDescription()
else:
name = ""
description = ""
begin = ev.getBeginTime()
end = begin + ev.getDuration()
eit = ev.getEventId()
begin -= config.recording.margin_before.value * 60
end += config.recording.margin_after.value * 60
return (begin, end, name, description, eit)
class AFTEREVENT:
NONE = 0
STANDBY = 1
DEEPSTANDBY = 2
AUTO = 3
def findSafeRecordPath(dirname):
if not dirname:
return None
from Components import Harddisk
dirname = os.path.realpath(dirname)
mountpoint = Harddisk.findMountPoint(dirname)
if mountpoint in ('/', '/media'):
print '[RecordTimer] media is not mounted:', dirname
return None
if not os.path.isdir(dirname):
try:
os.makedirs(dirname)
except Exception, ex:
print '[RecordTimer] Failed to create dir "%s":' % dirname, ex
return None
return dirname
def chechForRecordings():
if NavigationInstance.instance.getRecordings():
return True
rec_time = NavigationInstance.instance.RecordTimer.getNextTimerTime()
return rec_time > 0 and (rec_time - time()) < 360
# please do not translate log messages
class RecordTimerEntry(timer.TimerEntry, object):
######### the following static methods and members are only in use when the box is in (soft) standby
wasInStandby = False
wasInDeepStandby = False
receiveRecordEvents = False
@staticmethod
def keypress(key=None, flag=1):
if flag and (RecordTimerEntry.wasInStandby or RecordTimerEntry.wasInDeepStandby):
RecordTimerEntry.wasInStandby = False
RecordTimerEntry.wasInDeepStandby = False
eActionMap.getInstance().unbindAction('', RecordTimerEntry.keypress)
@staticmethod
def setWasInDeepStandby():
RecordTimerEntry.wasInDeepStandby = True
eActionMap.getInstance().bindAction('', -maxint - 1, RecordTimerEntry.keypress)
@staticmethod
def setWasInStandby():
if not RecordTimerEntry.wasInStandby:
if not RecordTimerEntry.wasInDeepStandby:
eActionMap.getInstance().bindAction('', -maxint - 1, RecordTimerEntry.keypress)
RecordTimerEntry.wasInDeepStandby = False
RecordTimerEntry.wasInStandby = True
@staticmethod
def shutdown():
quitMainloop(1)
@staticmethod
def staticGotRecordEvent(recservice, event):
if event == iRecordableService.evEnd:
print "RecordTimer.staticGotRecordEvent(iRecordableService.evEnd)"
if not chechForRecordings():
print "No recordings busy of sceduled within 6 minutes so shutdown"
RecordTimerEntry.shutdown() # immediate shutdown
elif event == iRecordableService.evStart:
print "RecordTimer.staticGotRecordEvent(iRecordableService.evStart)"
@staticmethod
def stopTryQuitMainloop():
print "RecordTimer.stopTryQuitMainloop"
NavigationInstance.instance.record_event.remove(RecordTimerEntry.staticGotRecordEvent)
RecordTimerEntry.receiveRecordEvents = False
@staticmethod
def TryQuitMainloop():
if not RecordTimerEntry.receiveRecordEvents and Screens.Standby.inStandby:
print "RecordTimer.TryQuitMainloop"
NavigationInstance.instance.record_event.append(RecordTimerEntry.staticGotRecordEvent)
RecordTimerEntry.receiveRecordEvents = True
# send fake event.. to check if another recordings are running or
# other timers start in a few seconds
RecordTimerEntry.staticGotRecordEvent(None, iRecordableService.evEnd)
#################################################################
def __init__(self, serviceref, begin, end, name, description, eit, disabled = False, justplay = False, afterEvent = AFTEREVENT.AUTO, checkOldTimers = False, dirname = None, tags = None, descramble = True, record_ecm = False, always_zap = False):
timer.TimerEntry.__init__(self, int(begin), int(end))
if checkOldTimers == True:
if self.begin < time() - 1209600:
self.begin = int(time())
if self.end < self.begin:
self.end = self.begin
assert isinstance(serviceref, ServiceReference)
if serviceref.isRecordable():
self.service_ref = serviceref
else:
self.service_ref = ServiceReference(None)
self.eit = eit
self.dontSave = False
self.name = name
self.description = description
self.disabled = disabled
self.timer = None
self.__record_service = None
self.start_prepare = 0
self.justplay = justplay
self.always_zap = always_zap
self.afterEvent = afterEvent
self.dirname = dirname
self.dirnameHadToFallback = False
self.autoincrease = False
self.autoincreasetime = 3600 * 24 # 1 day
self.tags = tags or []
self.descramble = descramble
self.record_ecm = record_ecm
self.log_entries = []
self.resetState()
def __repr__(self):
return "RecordTimerEntry(name=%s, begin=%s, serviceref=%s, justplay=%s)" % (self.name, ctime(self.begin), self.service_ref, self.justplay)
| def log(self, code, msg):
self.log_entries.append((int(time()), code, msg))
print "[TIMER]", msg
def calculateFilename(self):
service_name = self.service_ref.getServiceName()
begin_date = strftime("%Y%m%d %H%M", localtime(self.begin))
print "begin_date: ", begin_date
print "service_name: ", service_name
print "name:", self.name
print | "description: ", self.description
filename = begin_date + " - " + service_name
if self.name:
if config.recording.filename_composition.value == "short":
filename = strftime("%Y%m%d", localtime(self.begin)) + " - " + self.name
elif config.recording.filename_composition.value == "long":
filename += " - " + self.name + " - " + self.description
else:
filename += " - " + self.name # standard
if config.recording.ascii_filenames.value:
filename = ASCIItranslit.legacyEncode(filename)
if not self.dirname:
dirname = findSafeRecordPath(defaultMoviePath())
else:
dirname = findSafeRecordPath(self.dirname)
if dirname is None:
dirname = findSafeRecordPath(defaultMoviePath())
self.dirnameHadToFallback = True
if not dirname:
return None
self.Filename = Directories.getRecordingFilename(filename, dirname)
self.log(0, "Filename calculated as: '%s'" % self.Filename)
return self.Filename
def tryPrepare(self):
if self.justplay:
return True
else:
if not self.calculateFilename():
self.do_backoff()
self.start_prepare = time() + self.backoff
return False
rec_ref = self.service_ref and self.service_ref.ref
if rec_ref and rec_ref.flags & eServiceReference.isGroup:
rec_ref = getBestPlayableServiceReference(rec_ref, eServiceReference())
if not rec_ref:
self.log(1, "'get best playable service for group... record' failed")
return False
self.record_service = rec_ref and NavigationInstance.instance.recordService(rec_ref)
if not self.record_service:
self.log(1, "'record service' failed")
return False
if self.repeated:
epgcache = eEPGCache.getInstance()
queryTime=self.begin+(self.end-self.begin)/2
evt = epgcache.lookupEventTime(rec_ref, queryTime)
if evt:
self.description = evt.getShortDescription()
if self.description == "":
self.description = evt.getExtendedDescription()
event_id = evt.getEventId()
else:
event_id = |
jiafengwu0301/App_BackEnd | api/views.py | Python | apache-2.0 | 1,474 | 0.003392 | from rest_framework import generics, permissions, views, response,status
from .models import Account
from .serializers import AccountCreateSerializer, AccountSerializer, AuthenticateSerializer, \
UpdateAccountSerializ | er, AccountRetrieveSerializer
# Create your views here.
class AccountCreateView(generics.CreateAPIView):
queryset = Account.objects.all()
serializer_class = AccountCreateSerializer
permission_classes = [permissions.AllowAny]
class AccountListView(generics.ListAPIView):
queryset = Account.objects.all()
serializer_class = AccountSerializer
| permission_classes = [permissions.IsAuthenticated]
class AccountRetrieveView(generics.RetrieveAPIView):
queryset = Account.objects.all()
serializer_class = AccountRetrieveSerializer
class UpdateAccountView(generics.UpdateAPIView):
queryset = Account.objects.all()
serializer_class = UpdateAccountSerializer
# permission_classes = [permissions.IsAuthenticated]
class AccountAuthenticationView(views.APIView):
queryset = Account.objects.all()
serializer_class = AuthenticateSerializer
def post(self, request):
data = request.data
serializer = AuthenticateSerializer(data=data)
if serializer.is_valid(raise_exception=True):
new_date = serializer.data
return response.Response(new_date,status=status.HTTP_200_OK)
return response.Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) |
shirtsgroup/InterMol | intermol/molecule.py | Python | mit | 823 | 0 | class Molecule(object):
"""An abstract molecule object. """
def __init__(self, name=None):
"""Initialize the molecule
Args:
name (str): name of the molecule
"""
if not name:
name = "MOL"
self.name = name
self._atoms = list()
def add_atom(self, atom):
"""Add an atom
Args:
| atom (Atom): the atom to add into the molecule
"""
self._atoms.append(atom)
@property
def atoms(self):
"""Return an orderedset of atoms. """
return self. | _atoms
def __repr__(self):
return "Molecule '{}' with {} atoms".format(self.name, len(self.atoms))
def __str__(self):
return "Molecule{} '{}' with {} atoms".format(
id(self), self.name, len(self.atoms))
|
ratoaq2/Flexget | flexget/plugins/exit/__init__.py | Python | mit | 37 | 0 | """Plugins for "exit" task phas | e."""
| |
ktbyers/pynet-ons-jan17 | napalm_example/test_ios_cfg.py | Python | apache-2.0 | 1,143 | 0.000875 | #!/usr/bin/env python
from getpass import getpass
from pprint import pprint as pp
from napalm_base import get_network_driver
host = '184.105.247.70'
username = 'pyclass'
password = getpass()
optional_args = {}
driver = get_network_driver('ios')
device = driver(host, username, password, optional_args=optional_args)
print
print "\n\n>>>Test device open"
device.open()
print
print ">>>Load config change (merge) - no commit"
device.load_merge_candidate(filename='ios_merge.conf')
print device.compare_config()
print
raw_input("Hit any key to continue: ")
print
print ">>>Discard config change (merge)"
device.discard_config()
print device.compare_config()
print
raw_input("Hit any key to continue: ")
print
print ">>>Load config change (merge) - commit"
device.load_merge_candidate(filename='ios_merge.conf')
print device.compare_config()
device.commit_config()
print
raw_input("Hit any key to continue: ")
print
print ">> | >Load config change (replace) - commit"
device.load_replace_candidate(filename='pynet_rtr1. | txt')
print device.compare_config()
device.commit_config()
print
raw_input("Hit any key to continue: ")
#device.rollback()
|
lsst-ts/ts_wep | python/lsst/ts/wep/task/RefCatalogInterface.py | Python | gpl-3.0 | 5,449 | 0.000184 | # This file is part of ts_wep.
#
# Developed for the LSST Telescope and Site Systems.
# This product includes software developed by the LSST Project
# (https://www.lsst.org).
# See the COPYRIGHT file at the top-level directory of this distribution
# for details of code ownership.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import lsst.geom
from lsst.obs.base import createInitialSkyWcsFromBoresight
from lsst.meas.algorithms.htmIndexer import HtmIndexer
class RefCatalogInterface(object):
def __init__(self, boresightRa, boresightDec, boresightRotAng):
"""
Class to provide tools to interact with reference catalog
in Butler repository and select pieces of the catalog
that cover the sky area of a pointing.
Parameters
----------
boresightRa : float
Boresight RA in degrees.
boresightDec : float
Boresight Dec in degrees.
boresightRotAng : float
Boresight rotation angle in degreees.
"""
# Set the pointing information
self.boresightRa = boresightRa
self.boresightDec = boresightDec
self.boresightRotAng = boresightRotAng
def getHtmIds(self, radius=1.8):
"""
Get the htmIds for the pieces of the reference catalog
that overlap the circular area within `radius` (in degrees)
of the boresight pointing. HtmIds are the spatial indices
identifying hierarchical triangular mesh (HTM) shards that
are used to store the reference catalogs in gen3 butler
repositories in more easily accessible pieces.
Parameters
----------
radius : float, optional
Radius in degrees of the pointing footprint.
(the default is 1.8 degrees, enough to cover one LSST pointing.)
Returns
-------
numpy.ndarray
Array of htmIds in the butler for t | he pieces of the
reference catalog that overlap the pointing.
"""
# HTM depth specifies the resolution of HTM grid that covers the sky.
# DM G | en3 ingests refernce catalogs with an HTM depth of 7.
htmIdx = HtmIndexer(depth=7)
centerPt = lsst.geom.SpherePoint(
self.boresightRa, self.boresightDec, lsst.geom.degrees
)
htmIds = htmIdx.getShardIds(
centerPt, lsst.geom.Angle(radius, lsst.geom.degrees)
)
return htmIds[0]
def getDataRefs(self, htmIds, butler, catalogName, collections):
"""
Get the butler references and dataIds
for the reference catalog shards specified.
Parameters
----------
htmIds : array
HtmIds for the shards of the reference catalog we need.
butler : lsst.daf.butler.Butler
Butler instance pointing at the repo with the reference catalog.
catalogName : str
Name of the reference catalog in the repository.
collections : str or list of str
Collections in the repository with the reference catalog.
Returns
-------
list
List of the deferred dataset references pointing to the pieces
of the reference catalog we want in the butler.
list
List of the dataIds for the reference catalog shards.
"""
registry = butler.registry
deferredList = []
dataIds = []
for htmId in htmIds:
# Shards of the reference catalog in the Gen3 butler
# are identified with a dataId with the key labelled "htm7".
htmDataId = {"htm7": htmId}
dataRef = list(
registry.queryDatasets(
catalogName, dataId=htmDataId, collections=collections
).expanded()
)
if len(dataRef) == 0:
continue
deferredList.append(butler.getDeferred(dataRef[0], collections=collections))
dataIds.append(dataRef[0].dataId)
return deferredList, dataIds
def getDetectorWcs(self, detector):
"""
Create a WCS for the detector with the initialized
pointing information.
Parameters
----------
detector : lsst.afw.cameraGeom.Detector
Detector for which we want to generate a source catalog.
Returns
-------
lsst.afw.geom.SkyWcs
Wcs object defining the pixel to sky (and inverse) transform for
the supplied detector.
"""
boresightPointing = lsst.geom.SpherePoint(
self.boresightRa, self.boresightDec, lsst.geom.degrees
)
return createInitialSkyWcsFromBoresight(
boresightPointing,
self.boresightRotAng * lsst.geom.degrees,
detector,
flipX=False,
)
|
cassinius/right-to-forget-data | src/plots/plotting.py | Python | apache-2.0 | 13,050 | 0.020307 | import os, csv
import numpy as np
import matplotlib.pyplot as plt
from src.plots.plots_blur import gradient_fill
MODE = 'anonymization'
# MODE = 'perturbation'
# MODE = 'outliers'
# OUTLIER_TARGET = ''
OUTLIER_TARGET = 'outliers/'
# OUTLIER_TARGET = 'random_comparison/'
# OUTLIER_TARGET = 'original/'
# OUTLIER_TARGET = 'outliers_removed/'
OUTLIER_PREFIX = 'adults_outliers_removed_'
# OUTLIER_PREFIX = 'adults_random_deletion_'
# TARGET = 'education_num/'
# TARGET = 'marital_status/'
TARGET = 'income/'
# Input files
ALGORITHMS = {
'gradient_boost': '../../output/' + MODE + '/adults_target_' + TARGET + OUTLIER_TARGET + '/results_gradient_boosting.csv',
'logistic_regression': '../../output/' + MODE + '/adults_target_' + TARGET + OUTLIER_TARGET + '/results_logistic_regression.csv',
'onevsrest_bagging': '../../output/' + MODE + '/adults_target_' + TARGET + OUTLIER_TARGET + '/results_onevsrest_bagging.csv',
'random_forest': '../../output/' + MODE + '/adults_target_' + TARGET + OUTLIER_TARGET + '/results_random_forest.csv',
'linear_svc': '../../output/' + MODE + '/adults_target_' + TARGET + OUTLIER_TARGET + '/results_linear_svc.csv'
}
ALGO = ALGORITHMS['logistic_regression']
OUTLIERS_DIRECTORY = '../../output/outliers/adults_target_' + TARGET + '/' + OUTLIER_TARGET
OUTLIERS_ALGORITHMS = ['gradient_boosting', 'logistic_regression', 'random_forest', 'linear_svc']
PERTURBATION_FILES = {
'education_num': [
'age_0',
'marital-status_Divorced',
'marital-status_Married-civ-spouse',
'occupation_Tech-support',
'relationship_Husband',
'workclass_Federal-gov'
],
'marital_status': [
'age_0',
'relationship_Husband',
'relationship_Not-in-family',
'relationship_Unmarried',
'relationship_Own-child',
'relationship_Wife'
]
}
markers = ['o', '^', 'D', 'x', 'v', 'p', 'H']
linestyles = ['-', '--', '-.', '-.-', '.-.', ':']
colors = ['g', 'saddlebrown', 'r', 'b', 'm', 'k', 'g']
def readOutlierResultsIntoHash():
results_file_list = [f for f in sorted(os.listdir(OUTLIERS_DIRECTORY)) if f.startswith("results") and f.endswith(".csv")]
results = {}
for input_file in results_file_list:
# print input_file
results[input_file] = {}
results_file = open(OUTLIERS_DIRECTORY + "/" + input_file, 'r')
results_csv = csv.reader(results_file, delimiter=',')
# ignore the headers
next(results_csv, None)
for line in results_csv:
results[input_file][line[0]] = {}
results[input_file][line[0]]['precision'] = line[1]
results[input_file][line[0]]['recall'] = line[2]
results[input_file][line[0]]['f1'] = line[3]
# print results[input_file]
results_file.close()
return results
def readResultsIntoHash(file_name):
results_file = open(file_name, 'r')
results_csv = csv.reader(results_file, delimiter=',')
# ignore the headers
next(results_csv, None)
# create the dict we need
results = {}
for line in results_csv:
results[line[0]] = {}
results[line[0]]['precision'] = line[1]
results[line[0]]['recall'] = line[2]
results[line[0]]['f1'] = line[3]
results_file.close()
return results
def plotOutlierResults(results):
lines = {}
out_factors = np.linspace(0.1, 0.95, 18)
### Collect Classification Results ###
linear_svc_line_f1 = [results["results_linear_svc.csv"]["adults_original_dataset.csv"]["f1"]]
logistic_regression_line_f1 = [results["results_logistic_regression.csv"]["adults_original_dataset.csv"]["f1"]]
random_forest_line_f1 = [results["results_random_forest.csv"]["adults_original_dataset.csv"]["f1"]]
gradient_boosting_line_f1 = [results["results_gradient_boosting.csv"]["adults_original_dataset.csv"]["f1"]]
lines["Linear SVC"] = linear_svc_line_f1
lines["Logistic Regression"] = logistic_regression_line_f1
lines["Random Forest"] = random_forest_line_f1
lines["Gradient Boosting"] = gradient_boosting_line_f1
for o in out_factors:
linear_svc_line_f1.append(results["results_linear_svc.csv"][OUTLIER_PREFIX + str(o) + ".csv"]["f1"])
logistic_regression_line_f1.append(results["results_logistic_regression.csv"][OUTLIER_PREFIX + str(o) + ".csv"]["f1"])
random_forest_line_f1.append(results["results_random_forest.csv"][OUTLIER_PREFIX + str(o) + ".csv"]["f1"])
gradient_boosting_line_f1.append(results["results_gradient_boosting.csv"][OUTLIER_PREFIX + str(o) + ".csv"]["f1"])
min_score = min(min(linear_svc_line_f1), min(logistic_regression_line_f1), min(random_forest_line_f1), min(gradient_boosting_line_f1))
max_score = max(max(linear_svc_line_f1), max(logistic_regression_line_f1), max(random_forest_line_f1), max(gradient_boosting_line_f1))
print( "Min score: " + min_score )
print( "Max score: " + max_score )
x = range(0, len(out_factors) + 1)
x_labels = [0]
for o in out_factors:
x_labels.append(o)
print( "x: " + str(x))
print( "Labels: " + str( x_labels ) )
### Collect Std. Deviation from data_stats
### HACK - refactor out into own function !!!
sizes = []
std_devs = []
with open(OUTLIERS_DIRECTORY + "/data_stats.csv", 'r') as f:
next(f)
stat_lines = [line.split(',') for line in f]
for idx, line in enumerate(stat_lines):
sizes.append(float(line[1]))
std_devs.append(float(line[2]))
# print( "line{0} = {1}".format(idx, line) )
### HACK
std_devs = [23067.4001036, 22639.1158206, 22571.5070026, 22620.6618975, 22589.9337135, 22059.7985866, 22424.0912575, 21930.8899104, 21970.3662728, 21994.9221648, 21593.5026667, 21800.1532851, 21585.9000612, 21449.0850529, 21210.0238979, 20915.885174, 20901.7194893, 19527.862345, 19057.6693942]
print( "Std.Dev.: " + str(std_devs) )
min_std = min(std_devs)
max_std = max(std_devs)
print( "Min Std: %s" %(min_std) )
print( "Max Std: %s" %(max_std) )
print( "Sizes: " + str(sizes) )
min_size = min(sizes)
max_size = max(sizes)
print( "Min Size: %s" %(min_size) )
print( "Max Size: %s" %(max_size) )
### START PLOTTING ###
fig, (ax_top, ax_bottom_std) = plt.subplots(2, 1, gridspec_kw={'height_ratios': [3, 1]})
fig.patch.set_facecolor('white')
# ax.set_axis_bgcolor((116/256.0, 139/256.0, 197/256.0))
# ax.set_axis_bgcolor((255/256.0, 199/256.0, 0/256.0))
# ax.set_axis_bgcolor((50/256.0, 50/256.0, 50/256.0))
if (OUTLIER_TARGET == 'outliers/'):
target_label = 'outliers'
else:
target_label = 'random data points'
plt.suptitle("F1 score dependent on " + target_label + " removed")
for idx, key in enumerate(lines):
line = lines[key]
gradient_fill( np.array(x),
np.array(list(map(float, line))),
y_min=float(min_score),
y_max=float(max_score),
# zfunc=plots_blur.zfunc,
ax=ax_top,
marker=markers[idx],
color=colors[idx],
label=key )
# ax_top.plot(line, marker=markers[idx], color=colors[idx], label=key)
# Create a legend (Matplotlib madness...!!!)
handles, labels = ax_top.get_legend_handles_labels()
ax_top.legend(handles, labels)
ax_top.axis([0, max(x), float(min_score), float(max_score)])
ax_top.locator_params(nbins=18, axis='x')
ax_top.set_xticklabels(x_labels)
ax_top.set_xlabel('% of ' + target_label + ' removed')
ax_top.set_ylabel('F1 score')
gradient_fill(np.array(x),
np.array(list(map(float, std_devs))),
y_min=float(min_std),
y_max=float(max_std),
# zfunc=plots_blur.zfunc,
| ax=ax_bottom_std,
# marker=markers[idx],
color='green',
label='standard deviation')
# ax_bottom.plot(std_devs)
ax_bottom_std.axis([0, max(x), 19000, 24000])
ax_bottom_std.locator_params(nbins=18, axis='x')
ax_bottom_std.set_xticklabels(x_labels)
ax_bottom_std.set_xlabel('% of ' + target_label + ' removed')
ax_bottom_st | d.set_ylabel('Std.Dev.', color="g")
ax_bottom_size = ax_bottom_std.twinx()
gradient_fill(np.array(x),
np.array(list(map(float, sizes))),
y_min=float(min_size),
y_max=float(max_size),
|
czervenka/gap | gap/templates/tests/test_webapp.py | Python | apache-2.0 | 300 | 0 | from gap.utils.tests | import WebAppTestBase
class TestApp(WebAppTestBase):
def test_welcome(self):
resp = self.get('/')
self.assertEquals(resp.status_code, 200)
self.assertEquals(resp.content_type, 'text/html')
self.assertTrue('<b>Example project</b>' in re | sp,)
|
2ndQuadrant/ansible | lib/ansible/module_utils/vmware_rest_client.py | Python | gpl-3.0 | 13,483 | 0.001483 | # -*- coding: utf-8 -*-
# Copyright: (c) 2018, Ansible Project
# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import traceback
REQUESTS_IMP_ERR = None
try:
import requests
HAS_REQUESTS = True
except ImportError:
REQUESTS_IMP_ERR = traceback.format_exc()
HAS_REQUESTS = False
PYVMOMI_IMP_ERR = None
try:
from pyVim import connect
from pyVmomi import vim, vmodl
HAS_PYVMOMI = True
except ImportError:
PYVMOMI_IMP_ERR = traceback.format_exc()
HAS_PYVMOMI = False
VSPHERE_IMP_ERR = None
try:
from com.vmware.vapi.std_client import DynamicID
from vmware.vapi.vsphere.client import create_vsphere_client
from com.vmware.vapi.std.errors_client import Unauthorized
from com.vmware.content.library_client import Item
from com.vmware.vcenter_client import (Folder,
Datacenter,
ResourcePool,
Datastore,
Cluster,
Host)
HAS_VSPHERE = True
except ImportError:
VSPHERE_IMP_ERR = traceback.format_exc()
HAS_VSPHERE = False
from ansible.module_utils.basic import env_fallback, missing_required_lib
class VmwareRestClient(object):
def __init__(self, module):
"""
Constructor
"""
self.module = module
self.params = module.params
self.check_required_library()
self.api_client = self.connect_to_vsphere_client()
# Helper function
def get_error_message(self, error):
"""
Helper function to show human readable error messages.
"""
err_msg = []
if not error.messages:
if isinstance(error, Unauthorized):
return "Authorization required."
return "Generic error occurred."
for err in error.messages:
err_msg.append(err.default_message % err.args)
return " ,".join(err_msg)
def check_required_library(self):
"""
Check required libraries
"""
if not HAS_REQUESTS:
self.module.fail_json(msg=missing_required_lib('requests'),
| exception=REQUESTS_IMP_ERR)
if not HAS_PYVMOMI:
self.module.fail_json(msg=missing_required_lib('PyVmomi'),
exception=PYVMOMI_IMP_ERR)
if not HAS_VSPHERE:
self.module.fail_json(
msg=missing_required_lib('vSphere Automation SDK',
url='https://code.vmware.com/web/sdk/65/vsphere-automation-python'),
| exception=VSPHERE_IMP_ERR)
@staticmethod
def vmware_client_argument_spec():
return dict(
hostname=dict(type='str',
fallback=(env_fallback, ['VMWARE_HOST'])),
username=dict(type='str',
fallback=(env_fallback, ['VMWARE_USER']),
aliases=['user', 'admin']),
password=dict(type='str',
fallback=(env_fallback, ['VMWARE_PASSWORD']),
aliases=['pass', 'pwd'],
no_log=True),
protocol=dict(type='str',
default='https',
choices=['https', 'http']),
validate_certs=dict(type='bool',
fallback=(env_fallback, ['VMWARE_VALIDATE_CERTS']),
default=True),
)
def connect_to_vsphere_client(self):
"""
Connect to vSphere API Client with Username and Password
"""
username = self.params.get('username')
password = self.params.get('password')
hostname = self.params.get('hostname')
session = requests.Session()
session.verify = self.params.get('validate_certs')
if not all([hostname, username, password]):
self.module.fail_json(msg="Missing one of the following : hostname, username, password."
" Please read the documentation for more information.")
client = create_vsphere_client(
server=hostname,
username=username,
password=password,
session=session)
if client is None:
self.module.fail_json(msg="Failed to login to %s" % hostname)
return client
def get_tags_for_object(self, tag_service=None, tag_assoc_svc=None, dobj=None):
"""
Return list of tag objects associated with an object
Args:
dobj: Dynamic object
tag_service: Tag service object
tag_assoc_svc: Tag Association object
Returns: List of tag objects associated with the given object
"""
# This method returns list of tag objects only,
# Please use get_tags_for_dynamic_obj for more object details
tags = []
if not dobj:
return tags
if not tag_service:
tag_service = self.api_client.tagging.Tag
if not tag_assoc_svc:
tag_assoc_svc = self.api_client.tagging.TagAssociation
tag_ids = tag_assoc_svc.list_attached_tags(dobj)
for tag_id in tag_ids:
tags.append(tag_service.get(tag_id))
return tags
def get_tags_for_dynamic_obj(self, mid=None, type=None):
"""
Return list of tag object details associated with object
Args:
mid: Dynamic object for specified object
type: Type of DynamicID to lookup
Returns: List of tag object details associated with the given object
"""
tags = []
if mid is None:
return tags
dynamic_managed_object = DynamicID(type=type, id=mid)
temp_tags_model = self.get_tags_for_object(dobj=dynamic_managed_object)
category_service = self.api_client.tagging.Category
for tag_obj in temp_tags_model:
tags.append({
'id': tag_obj.id,
'category_name': category_service.get(tag_obj.category_id).name,
'name': tag_obj.name,
'description': tag_obj.description,
'category_id': tag_obj.category_id,
})
return tags
def get_tags_for_cluster(self, cluster_mid=None):
"""
Return list of tag object associated with cluster
Args:
cluster_mid: Dynamic object for cluster
Returns: List of tag object associated with the given cluster
"""
return self.get_tags_for_dynamic_obj(mid=cluster_mid, type='ClusterComputeResource')
def get_tags_for_hostsystem(self, hostsystem_mid=None):
"""
Return list of tag object associated with host system
Args:
hostsystem_mid: Dynamic object for host system
Returns: List of tag object associated with the given host system
"""
return self.get_tags_for_dynamic_obj(mid=hostsystem_mid, type='HostSystem')
def get_tags_for_vm(self, vm_mid=None):
"""
Return list of tag object associated with virtual machine
Args:
vm_mid: Dynamic object for virtual machine
Returns: List of tag object associated with the given virtual machine
"""
return self.get_tags_for_dynamic_obj(mid=vm_mid, type='VirtualMachine')
def get_vm_tags(self, tag_service=None, tag_association_svc=None, vm_mid=None):
"""
Return list of tag name associated with virtual machine
Args:
tag_service: Tag service object
tag_association_svc: Tag association object
vm_mid: Dynamic object for virtual machine
Returns: List of tag names associated with the given virtual machine
"""
# This API returns just names of tags
# Please u |
ifwe/wxpy | src/tests/wxPythonTests/testTopLevelWindow.py | Python | mit | 4,148 | 0.008679 | """Unit tests for wx.TopLevelWindow.
Methods yet to test for wx.TopLevelWindow:
__init__, CenterOnScreen, CentreOnScreen, EnableCloseButton, GetDefaultItem, GetIcon,
GetTmpDefaultItem, Iconize, IsActive, IsAlwaysMaximized, IsIconized,
MacGetMetalAppearance, MacSetMetalAppearance, RequestUserAttention, Restore, SetDefaultItem,
SetIcon, SetIcons, SetShape, SetTmpDefaultItem"""
import unittest
import wx
import testWindow
import testSize
import wxtest
class TopLevelWindowTest(unittest.TestCase):
def setUp(self):
self.app = wx.PySimpleApp()
def tearDown(self):
self.app.Destroy()
def testConstructorFails(self):
"""__init__"""
self.assertRaises(AttributeError, wx.TopLevelWindow)
# -----------------------------------------------------------
class TopLevelWindowBase(testWindow.WindowTest):
def __init__(self, arg):
# superclass setup
super(TopLevelWindowBase,self).__init__(arg)
self.title = "Lorem Ipsum"
def setUp(self):
self.app = wx.PySimpleApp()
self.frame = wx.Frame(parent=None, id=wx.ID_ANY)
self.testControl = None
def tearDown(self):
self.frame.Hide()
self.frame.Destroy()
self.testControl.Hide()
self.testControl.Destroy()
self.app.Destroy()
def testFullScreen(self):
"""ShowFullScreen, IsFullScreen"""
self.testControl.ShowFullScreen(True)
self.assert_(self.testControl.IsFullScreen())
self.testControl.ShowFullScreen(False)
self.assert_(not self.testControl.IsFullScreen())
'''
# TODO: determine expected behavior of ShowFullScreen's return value, and update test
def testShowFullScreen(self):
"""ShowFullScreen"""
self.assert_(self. | testControl.ShowFullScreen(True))
self.assert_(not self.testControl.ShowFullScreen(True))
self.assert_(self.testControl.ShowFullScreen(False))
self.assert_(not self.testControl.ShowFullScreen(False))
'''
def testMaximize(s | elf):
"""Maximize, IsMaximized"""
self.testControl.Maximize()
self.assert_(self.testControl.IsMaximized())
self.testControl.Maximize(False)
self.assert_(not self.testControl.IsMaximized())
self.testControl.Maximize(True)
self.assert_(self.testControl.IsMaximized())
# TODO: test title with newlines and special characters
def testTitle(self):
"""SetTitle, GetTitle"""
self.testControl.SetTitle(self.title)
self.assertEquals(self.title, self.testControl.GetTitle())
def testTitleConstructor(self):
"""__init__, GetTitle"""
self.testControl = type(self.testControl)(self.frame, title=self.title)
self.assertEquals(self.title, self.testControl.GetTitle())
def testTopLevel(self):
"""IsTopLevel"""
self.assert_(self.testControl.IsTopLevel())
# Although SizeHints are a method of wx.Window, they are basically deprecated
# for all but TopLevelWindows, so we should test them here.
def testSizeHints(self):
"""SetSizeHints, GetMinWidth, GetMinHeight, GetMaxWidth, GetMaxHeight"""
data = testSize.getSizeData()
for (minW,minH),(maxW,maxH) in zip(data,data):
maxW += 1
maxH += 1 # maxes greater than mins
self.testControl.SetSizeHints(minW, minH, maxW, maxH)
self.assertEquals(minW, self.testControl.GetMinWidth())
self.assertEquals(minH, self.testControl.GetMinHeight())
self.assertEquals(maxW, self.testControl.GetMaxWidth())
self.assertEquals(maxH, self.testControl.GetMaxHeight())
# TODO: make the whole thing more robust
def testInvalidSizeHints(self):
"""SetSizeHints"""
# max can't be less than min (except on Ubuntu?)
if wxtest.PlatformIsNotGtk():
self.assertRaises(wx.PyAssertionError, self.testControl.SetSizeHints, 100,100,10,10)
if __name__ == '__main__':
unittest.main(defaultTest='TopLevelWindowTest') |
UK992/servo | tests/wpt/web-platform-tests/service-workers/service-worker/resources/navigation-redirect-body.py | Python | mpl-2.0 | 246 | 0.004065 | import os
filename = os.path.basename(__file__)
|
def main(request, response):
if request.method == 'POST':
return 302, [('Location', './%s?red | irect' % filename)], ''
return [('Content-Type', 'text/plain')], request.request_path
|
kaaustubh/pjsip | tests/pjsua/scripts-pesq/200_codec_speex_8000.py | Python | gpl-2.0 | 537 | 0.01676 | # $Id: 200_codec_speex_8000.py 20 | 63 2008-06-26 18:52:16Z nanang $
#
from inc_cfg import *
ADD_PARAM = ""
if (HAS_SND_DEV == 0):
ADD_PARAM += "--null-audio"
# Call with Speex/8000 codec
test_param = TestParam(
"PESQ codec Speex NB",
[
InstanceParam("UA1", ADD_PARAM + " --max-calls=1 --add-codec speex/8000 --clock-rate 8000 --play-file wavs/input.8.wav"),
InstanceParam("UA2", "--null-aud | io --max-calls=1 --add-codec speex/8000 --clock-rate 8000 --rec-file wavs/tmp.8.wav --auto-answer 200")
]
)
pesq_threshold = 3.0
|
tkzeng/molecular-design-toolkit | moldesign/exceptions.py | Python | apache-2.0 | 1,118 | 0.002683 | # Copyright 2016 Autodesk Inc.
#
# Licensed under the Apache Li | cense, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law | or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class ConvergenceFailure(Exception):
""" Raised when an iterative calculation fails to converge """
pass
class NotCalculatedError(Exception):
""" Raised when a molecular property is requested that hasn't been calculated """
pass
class UnhandledValenceError(Exception):
def __init__(self, atom):
self.message = 'Atom %s has unhandled valence: %d' % (atom, atom.valence)
class QMConvergenceError(Exception):
""" Raised when an iterative QM calculation (typically SCF) fails to converge
"""
pass
|
ThinkboxSoftware/Deadline | Custom/events/Zabbix/API/pyzabbix/__init__.py | Python | apache-2.0 | 5,406 | 0.00148 | import httpretty
import logging
import requests
import json
class _NullHandler(logging.Handler):
def emit(self, record):
pass
logger = logging.getLogger(__name__)
logger.addHandler(_NullHandler())
class ZabbixAPIException(Exception):
""" generic zabbix api exception
code list:
-32602 - Invalid params (eg already exists)
-32500 - no permissions
"""
pass
class ZabbixAPI(object):
def __init__(self,
server='http://localhost/zabbix',
session=None,
use_authenticate=False,
timeout=None):
"""
Parameters:
server: Base URI for zabbix web interface (omitting /api_jsonrpc.php)
session: optional pre-configured requests.Session instance
use_authenticate: Use old (Zabbix 1.8) style authentication
timeout: optional connect and read timeout in seconds, default: None (if you're using Reques | ts >= 2.4 you can set it as tuple: "(connect, read)" which is used to set individual connect and read timeouts.)
"""
| if session:
self.session = session
else:
self.session = requests.Session()
# Default headers for all requests
self.session.headers.update({
'Content-Type': 'application/json-rpc',
'User-Agent': 'python/pyzabbix'
})
self.use_authenticate = use_authenticate
self.auth = ''
self.id = 0
self.timeout = timeout
self.url = server + '/api_jsonrpc.php'
logger.info("JSON-RPC Server Endpoint: %s", self.url)
def login(self, user='', password=''):
"""Convenience method for calling user.authenticate and storing the resulting auth token
for further commands.
If use_authenticate is set, it uses the older (Zabbix 1.8) authentication command"""
# If we have an invalid auth token, we are not allowed to send a login
# request. Clear it before trying.
self.auth = ''
if self.use_authenticate:
self.auth = self.user.authenticate(user=user, password=password)
else:
self.auth = self.user.login(user=user, password=password)
def confimport(self, format='', source='', rules=''):
"""Alias for configuration.import because it clashes with
Python's import reserved keyword"""
return self.do_request(
method="configuration.import",
params={"format": format, "source": source, "rules": rules}
)['result']
def api_version(self):
return self.apiinfo.version()
def do_request(self, method, params=None):
request_json = {
'jsonrpc': '2.0',
'method': method,
'params': params or {},
'id': self.id,
}
# We don't have to pass the auth token if asking for the apiinfo.version
if self.auth and method != 'apiinfo.version':
request_json['auth'] = self.auth
logger.debug("Sending: %s", json.dumps(request_json,
indent=4,
separators=(',', ': ')))
response = self.session.post(
self.url,
data=json.dumps(request_json),
timeout=self.timeout
)
logger.debug("Response Code: %s", str(response.status_code))
# NOTE: Getting a 412 response code means the headers are not in the
# list of allowed headers.
response.raise_for_status()
if not len(response.text):
raise ZabbixAPIException("Received empty response")
try:
response_json = json.loads(response.text)
except ValueError:
raise ZabbixAPIException(
"Unable to parse json: %s" % response.text
)
logger.debug("Response Body: %s", json.dumps(response_json,
indent=4,
separators=(',', ': ')))
self.id += 1
if 'error' in response_json: # some exception
msg = "Error {code}: {message}, {data} while sending {json}".format(
code=response_json['error']['code'],
message=response_json['error']['message'],
data=response_json['error']['data'],
json=str(request_json)
)
raise ZabbixAPIException(msg, response_json['error']['code'])
return response_json
def __getattr__(self, attr):
"""Dynamically create an object class (ie: host)"""
return ZabbixAPIObjectClass(attr, self)
class ZabbixAPIObjectClass(object):
def __init__(self, name, parent):
self.name = name
self.parent = parent
def __getattr__(self, attr):
"""Dynamically create a method (ie: get)"""
def fn(*args, **kwargs):
if args and kwargs:
raise TypeError("Found both args and kwargs")
return self.parent.do_request(
'{0}.{1}'.format(self.name, attr),
args or kwargs
)['result']
return fn
|
cr33dog/pyxfce | xfconf/xfconf.py | Python | bsd-3-clause | 46 | 0.021739 | #!/usr/bin/env | python
from _xf | conf import *
|
manashmndl/dfvfs | tests/vfs/compressed_stream_file_system.py | Python | apache-2.0 | 2,758 | 0.002538 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""Tests for the compressed stream file system implementation."""
import os
import unittest
from dfvfs.lib import definitions
from dfvfs.path import compressed_stream_path_spec
from dfvfs.path import os_path_spec
from dfvfs.resolver import context
from dfvfs.vfs import compressed_stream_file_system
class CompressedStreamFileSystemTest(unittest.TestCase):
"""The unit test for the compressed stream file system object."""
def setUp(self):
"""Sets up the needed objects used throughout the test."""
self._resolver_context = context.Context()
test_file = os.path.join(u'test_data', u'syslog.bz2')
path_spec = os_path_spec.OSPathSpec(location=test_file)
self._compressed_stream_path_spec = (
compressed_stream_path_spec.CompressedStreamPathSpec(
compression_method=definitions.COMPRESSION_METHOD_BZIP2,
parent=path_spec))
def testOpenAndClose(self):
"""Test the open and close functionality."""
file_system = compressed_stream_file_system.CompressedStreamFileSystem(
self._resolver_context)
self.assertNotEqual(file_system, None)
file_system.Open(path_spec=self._compressed_stream_path_spec)
file_system.Close()
def testFileEntryExistsByPathSpec(self):
"""Test the file entry exists by p | ath spe | cification functionality."""
file_system = compressed_stream_file_system.CompressedStreamFileSystem(
self._resolver_context)
self.assertNotEqual(file_system, None)
file_system.Open(path_spec=self._compressed_stream_path_spec)
self.assertTrue(file_system.FileEntryExistsByPathSpec(
self._compressed_stream_path_spec))
file_system.Close()
def testGetFileEntryByPathSpec(self):
"""Test the get entry by path specification functionality."""
file_system = compressed_stream_file_system.CompressedStreamFileSystem(
self._resolver_context)
self.assertNotEqual(file_system, None)
file_system.Open(path_spec=self._compressed_stream_path_spec)
file_entry = file_system.GetFileEntryByPathSpec(
self._compressed_stream_path_spec)
self.assertNotEqual(file_entry, None)
self.assertEqual(file_entry.name, u'')
file_system.Close()
def testGetRootFileEntry(self):
"""Test the get root file entry functionality."""
file_system = compressed_stream_file_system.CompressedStreamFileSystem(
self._resolver_context)
self.assertNotEqual(file_system, None)
file_system.Open(path_spec=self._compressed_stream_path_spec)
file_entry = file_system.GetRootFileEntry()
self.assertNotEqual(file_entry, None)
self.assertEqual(file_entry.name, u'')
file_system.Close()
if __name__ == '__main__':
unittest.main()
|
imron/scalyr-agent-2 | tests/unit/builtin_monitors/mysql_monitor_test.py | Python | apache-2.0 | 2,104 | 0.000951 | # Copyright 2019 Scalyr Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
| # See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------
#
# author: Imron Alston <imron@scalyr.com>
from __future__ import absolute_import
from __future__ import print_function
__author__ = "imron@scalyr.com"
import sys
from scalyr_agent import UnsupportedSystem
from scalyr_agent.test_base import ScalyrTestCase
class MySqlMonitorTest(ScalyrTes | tCase):
def _import_mysql_monitor(self):
import scalyr_agent.builtin_monitors.mysql_monitor # NOQA
self.assertTrue(True)
def test_min_python_version(self):
if sys.version_info[:2] < (2, 7):
self.assertRaises(UnsupportedSystem, lambda: self._import_mysql_monitor())
else:
self._import_mysql_monitor()
def test_missing_qcache_hits(self):
if sys.version_info[:2] < (2, 7):
print(
"Skipping test 'test_missing_qcache_hits'.\n"
"This test is non-critical for pre-2.7 testing.\n"
)
return
from scalyr_agent.builtin_monitors.mysql_monitor import MysqlDB
class TestMysqlDB(MysqlDB):
def __init__(self):
# do nothing, because we don't actually want to connect to a DB
# for this test
pass
db = TestMysqlDB()
globalVars = {}
globalStatusMap = {"global.com_select": 10}
expected = 0
actual = db._derived_stat_query_cache_efficiency(globalVars, globalStatusMap)
self.assertEqual(expected, actual)
|
gregvonkuster/tools-iuc | tools/ena_upload/dump_yaml.py | Python | mit | 1,252 | 0 | import sys
import yaml
def fetch_table_data(table_path):
data_dict = {}
with open(table_path) as table_to_load:
# load headers
headers = table_to_load.readline().strip('\n').split('\t')
row_id = 0
for line in table_to_load. | readlines():
# print(line)
line_data = line.strip('\n').split('\t')
row_dict | = {}
for col_num in range(len(headers)):
col_name = headers[col_num]
row_dict[col_name] = line_data[col_num]
data_dict[row_id] = row_dict
row_id += 1
return data_dict
all_data_dict = {}
print('YAML -------------')
studies_table_path = sys.argv[1]
table_data = fetch_table_data(studies_table_path)
all_data_dict['ENA_study'] = table_data
samples_table_path = sys.argv[2]
table_data = fetch_table_data(samples_table_path)
all_data_dict['ENA_sample'] = table_data
experiments_table_path = sys.argv[3]
table_data = fetch_table_data(experiments_table_path)
all_data_dict['ENA_experiment'] = table_data
runs_table_path = sys.argv[4]
table_data = fetch_table_data(runs_table_path)
all_data_dict['ENA_run'] = table_data
# print(all_data_dict)
print(yaml.dump(all_data_dict))
print('YAML -------------')
|
yumingfei/virt-manager | virtManager/details.py | Python | gpl-2.0 | 134,336 | 0.001399 | #
# Copyright (C) 2006-2008, 2013 Red Hat, Inc.
# Copyright (C) 2006 Daniel P. Berrange <berrange@redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301 USA.
#
import logging
import traceback
# pylint: disable=E0611
from gi.repository import GObject
from gi.repository import Gtk
from gi.repository import Gdk
# pylint: enable=E0611
import libvirt
import virtManager.uihelpers as uihelpers
from virtManager.storagebrowse import vmmStorageBrowser
from virtManager.baseclass import vmmGObjectUI
from virtManager.addhardware import vmmAddHardware
from virtManager.choosecd import vmmChooseCD
from virtManager.console import vmmConsolePages
from virtManager.serialcon import vmmSerialConsole
from virtManager.graphwidgets import Sparkline
from virtManager import util as util
import virtinst
# Parameters that can be editted in the details window
EDIT_TOTAL = 37
(EDI | T_NAME,
EDIT_ACPI,
EDIT_APIC,
EDIT_CLOCK,
EDIT_MACHTYPE,
EDIT_SECURITY,
EDIT_DESC,
EDIT_VCPUS,
EDIT_CPUSET,
EDIT_CPU,
EDIT_TOPOLOGY,
EDIT_MEM,
EDIT_AUTOSTART,
EDIT_BOOTORDER,
EDIT_BOOTMENU,
EDIT_KERNEL,
EDIT_INIT,
EDIT_DISK_R | O,
EDIT_DISK_SHARE,
EDIT_DISK_CACHE,
EDIT_DISK_IO,
EDIT_DISK_BUS,
EDIT_DISK_SERIAL,
EDIT_DISK_FORMAT,
EDIT_DISK_IOTUNE,
EDIT_SOUND_MODEL,
EDIT_SMARTCARD_MODE,
EDIT_NET_MODEL,
EDIT_NET_VPORT,
EDIT_NET_SOURCE,
EDIT_GFX_PASSWD,
EDIT_GFX_TYPE,
EDIT_GFX_KEYMAP,
EDIT_VIDEO_MODEL,
EDIT_WATCHDOG_MODEL,
EDIT_WATCHDOG_ACTION,
EDIT_CONTROLLER_MODEL
) = range(EDIT_TOTAL)
# Columns in hw list model
HW_LIST_COL_LABEL = 0
HW_LIST_COL_ICON_NAME = 1
HW_LIST_COL_ICON_SIZE = 2
HW_LIST_COL_TYPE = 3
HW_LIST_COL_DEVICE = 4
# Types for the hw list model: numbers specify what order they will be listed
HW_LIST_TYPE_GENERAL = 0
HW_LIST_TYPE_STATS = 1
HW_LIST_TYPE_CPU = 2
HW_LIST_TYPE_MEMORY = 3
HW_LIST_TYPE_BOOT = 4
HW_LIST_TYPE_DISK = 5
HW_LIST_TYPE_NIC = 6
HW_LIST_TYPE_INPUT = 7
HW_LIST_TYPE_GRAPHICS = 8
HW_LIST_TYPE_SOUND = 9
HW_LIST_TYPE_CHAR = 10
HW_LIST_TYPE_HOSTDEV = 11
HW_LIST_TYPE_VIDEO = 12
HW_LIST_TYPE_WATCHDOG = 13
HW_LIST_TYPE_CONTROLLER = 14
HW_LIST_TYPE_FILESYSTEM = 15
HW_LIST_TYPE_SMARTCARD = 16
HW_LIST_TYPE_REDIRDEV = 17
remove_pages = [HW_LIST_TYPE_NIC, HW_LIST_TYPE_INPUT,
HW_LIST_TYPE_GRAPHICS, HW_LIST_TYPE_SOUND, HW_LIST_TYPE_CHAR,
HW_LIST_TYPE_HOSTDEV, HW_LIST_TYPE_DISK, HW_LIST_TYPE_VIDEO,
HW_LIST_TYPE_WATCHDOG, HW_LIST_TYPE_CONTROLLER,
HW_LIST_TYPE_FILESYSTEM, HW_LIST_TYPE_SMARTCARD,
HW_LIST_TYPE_REDIRDEV]
# Boot device columns
BOOT_DEV_TYPE = 0
BOOT_LABEL = 1
BOOT_ICON = 2
BOOT_ACTIVE = 3
# Main tab pages
PAGE_CONSOLE = 0
PAGE_DETAILS = 1
PAGE_DYNAMIC_OFFSET = 2
def prettyify_disk_bus(bus):
if bus in ["ide", "sata", "scsi", "usb"]:
return bus.upper()
if bus in ["xen"]:
return bus.capitalize()
if bus == "virtio":
return "VirtIO"
if bus == "spapr-vscsi":
return "vSCSI"
return bus
def prettyify_disk(devtype, bus, idx):
busstr = prettyify_disk_bus(bus) or ""
if devtype == "floppy":
devstr = "Floppy"
busstr = ""
elif devtype == "cdrom":
devstr = "CDROM"
else:
devstr = devtype.capitalize()
if busstr:
ret = "%s %s" % (busstr, devstr)
else:
ret = devstr
return "%s %s" % (ret, idx)
def safeint(val, fmt="%.3d"):
try:
int(val)
except:
return str(val)
return fmt % int(val)
def prettyify_bytes(val):
if val > (1024 * 1024 * 1024):
return "%2.2f GB" % (val / (1024.0 * 1024.0 * 1024.0))
else:
return "%2.2f MB" % (val / (1024.0 * 1024.0))
def build_redir_label(redirdev):
# String shown in the devices details section
addrlabel = ""
# String shown in the VMs hardware list
hwlabel = ""
if redirdev.type == 'spicevmc':
addrlabel = None
elif redirdev.type == 'tcp':
addrlabel += _("%s:%s") % (redirdev.host, redirdev.service)
else:
raise RuntimeError("unhandled redirection kind: %s" % redirdev.type)
hwlabel = _("Redirected %s") % redirdev.bus.upper()
return addrlabel, hwlabel
def build_hostdev_label(hostdev):
# String shown in the devices details section
srclabel = ""
# String shown in the VMs hardware list
hwlabel = ""
typ = hostdev.type
vendor = hostdev.vendor
product = hostdev.product
addrbus = hostdev.bus
addrdev = hostdev.device
addrslt = hostdev.slot
addrfun = hostdev.function
addrdom = hostdev.domain
def dehex(val):
if val.startswith("0x"):
val = val[2:]
return val
hwlabel = typ.upper()
srclabel = typ.upper()
if vendor and product:
# USB by vendor + product
devstr = " %s:%s" % (dehex(vendor), dehex(product))
srclabel += devstr
hwlabel += devstr
elif addrbus and addrdev:
# USB by bus + dev
srclabel += (" Bus %s Device %s" %
(safeint(addrbus), safeint(addrdev)))
hwlabel += " %s:%s" % (safeint(addrbus), safeint(addrdev))
elif addrbus and addrslt and addrfun and addrdom:
# PCI by bus:slot:function
devstr = (" %s:%s:%s.%s" %
(dehex(addrdom), dehex(addrbus),
dehex(addrslt), dehex(addrfun)))
srclabel += devstr
hwlabel += devstr
return srclabel, hwlabel
def lookup_nodedev(vmmconn, hostdev):
def intify(val, do_hex=False):
try:
if do_hex:
return int(val or '0x00', 16)
else:
return int(val)
except:
return -1
def attrVal(node, attr):
if not hasattr(node, attr):
return None
return getattr(node, attr)
devtype = hostdev.type
found_dev = None
vendor_id = product_id = bus = device = \
domain = slot = func = None
# For USB we want a device, not a bus
if devtype == 'usb':
devtype = 'usb_device'
vendor_id = hostdev.vendor or -1
product_id = hostdev.product or -1
bus = intify(hostdev.bus)
device = intify(hostdev.device)
elif devtype == 'pci':
domain = intify(hostdev.domain, True)
bus = intify(hostdev.bus, True)
slot = intify(hostdev.slot, True)
func = intify(hostdev.function, True)
devs = vmmconn.get_nodedevs(devtype, None)
for dev in devs:
# Try to match with product_id|vendor_id|bus|device
if (attrVal(dev, "product_id") == product_id and
attrVal(dev, "vendor_id") == vendor_id and
attrVal(dev, "bus") == bus and
attrVal(dev, "device") == device):
found_dev = dev
break
else:
# Try to get info from bus/addr
dev_id = intify(attrVal(dev, "device"))
bus_id = intify(attrVal(dev, "bus"))
dom_id = intify(attrVal(dev, "domain"))
func_id = intify(attrVal(dev, "function"))
slot_id = intify(attrVal(dev, "slot"))
if ((dev_id == device and bus_id == bus) or
(dom_id == domain and func_id == func and
bus_id == bus and slot_id == slot)):
found_dev = dev
break
return found_dev
class vmmDetails(vmmGObjectUI):
__gsignals__ = {
"action-save-domain": (GObject.SignalFlags.RUN_FIRST, None, [str, str]),
"action-destroy-domain": (GObject |
SAOImageDS9/SAOImageDS9 | ttkthemes/setup.py | Python | gpl-3.0 | 1,699 | 0.001177 | """
Author: RedFantom
License: GNU GPLv3
Copyright (c) 2017-2018 RedFantom
"""
import os
from tkinter import TkVersion
from setuptools import setup
if TkVersion <= 8.5:
message = "This version of ttkthemes does not support Tk 8.5 and earlier. Please install a later version."
raise Runt | imeError(message)
def read(fname):
return open(os.path.join(os | .path.dirname(__file__), fname)).read()
setup(
name="ttkthemes",
packages=["ttkthemes"],
package_data={"ttkthemes": ["themes/*", "png/*", "gif/*", "advanced/*"]},
version="3.2.2",
description="A group of themes for the ttk extensions of Tkinter with a Tkinter.Tk wrapper",
author="The ttkthemes authors",
author_email="redfantom@outlook.com",
url="https://github.com/RedFantom/ttkthemes",
download_url="https://github.com/RedFantom/ttkthemes/releases",
include_package_data=True,
keywords=["tkinter", "ttk", "gui", "tcl", "theme"],
license="GPLv3",
long_description=read("README.md"),
long_description_content_type="text/markdown",
classifiers=[
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Environment :: Win32 (MS Windows)",
"Environment :: X11 Applications",
"License :: OSI Approved :: GNU General Public License v3 (GPLv3)",
"Topic :: Software Development :: Libraries :: Tcl Extensions",
"Topic :: Software Development :: Libraries :: Python Modules"
],
zip_safe=False,
install_requires=["pillow"],
has_ext_modules=lambda: True,
python_requires=">=3.5"
)
|
slogan621/tscharts | tschartslib/medications/medications.py | Python | apache-2.0 | 8,744 | 0.01075 | #(C) Copyright Syd Logan 2017-2020
#(C) Copyright Thousand Smiles Foundation 2017-2020
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#
#You may obtain a copy of the License at
#http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
'''
Unit tests for medications application. Assumes django server is up
and running on the specified host and port
'''
import unittest
import getopt, sys
import json
from tschartslib.service.serviceapi import ServiceAPI
from tschartslib.tscharts.tscharts import Login, Logout
class CreateMedications(ServiceAPI):
def __init__(self, host, port, token, payload):
super(CreateMedications, self).__init__()
self.setHttpMethod("POST")
self.setHost(host)
self.setPort(port)
self.setToken(token)
self.setPayload(payload)
self.setURL("tscharts/v1/medications/")
class GetMedications(ServiceAPI):
def makeURL(self):
hasQArgs = False
if not self._id == None:
base = "tscharts/v1/medications/{}/".format(self._id)
else:
base = "tscharts/v1/medications/"
if not self._name == None:
if not hasQArgs:
base += "?"
else:
base += "&"
base += "name={}".format(self._name)
hasQArgs = True
self.setURL(base)
def __init__(self, host, port, token):
super(GetMedications, self).__init__()
self.setHttpMethod("GET")
self.setHost(host)
self.setPort(port)
self.setToken(token)
self._name = None
self._id = None
self.makeURL();
def setId(self, id):
self._id = id;
self.makeURL()
def setName(self,val):
self._name = val
self.makeURL()
class DeleteMedications(ServiceAPI):
def __init__(self, host, port, token, id):
super(DeleteMedications, self).__init__()
self.setHttpMethod("DELETE")
self.setHost(host)
self.setPort(port)
self.setToken(token)
self.setURL("tscharts/v1/medications/{}/".format(id))
class TestTSMedications(unittest.TestCase):
def setUp(self):
login = Login(host, port, username, password)
ret = login.send(timeout=30)
self.assertEqual(ret[0], 200)
self.assertTrue("token" in ret[1])
global token
token = ret[1]["token"]
def testCreateMedications(self):
data = {}
data["name"] = "AAAAA"
x = CreateMedications(host, port, token, data)
ret = x.send(timeout = 30)
self.assertEqual(ret[0], 200)
id = int(ret[1]["id"])
x = GetMedications(host, port, token)
x.setId(id)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
ret = ret[1]
self.assertEqual(ret['name'], "AAAAA")
x = CreateMedications(host, port, token, data)
ret = x.send(timeout = 30)
self.assertEqual(ret[0], 400) #bad request test uniqueness
x = DeleteMedications(host, port, token, id)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
x = GetMedications(host, port, token)
x.setId(id)
ret = x.send(timeout = 30)
self.assertEqual(ret[0], 404) # not found
data = {}
x = CreateMedications(host, port, token, data)
ret = x.send(timeout = 30)
self.assertEqual(ret[0], 400) #bad request
data["names"] = "AAAAA"
x = CreateMedications(host, port, token, data)
ret = x.send(timeout = 30)
self.assertEqual(ret[0], 400) #bad request
data = {}
data["name"] = ""
x = CreateMedications(host, port, token, data)
ret = x.send(timeout = 30)
self.assertEqual(ret[0], 400)
data = {}
data["name"] = 123
x = CreateMedications(host, port, token, data)
ret = x.send(timeout = 30)
self.assertEqual(ret[0], 400)
def testDeleteMedications(self):
data = {}
data["name"] = "AAAAA"
x = CreateMedications(host, port, token, data)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
self.assertTrue("id" in ret[1])
id = int(ret[1]["id"])
x = GetMedications(host, port, token)
x.setId(id)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
ret = ret[1]
self.assertEqual(ret["name"], "AAAAA")
self.assertEqual(ret["id"], id)
x = DeleteMedications(host, port, token, id)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
x = GetMedications(host, port, token)
x.setId(id)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 404) # not found
x = DeleteMedications(host, port, token, id)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 404) # not found
def testGetMedications(self):
data = {}
data["name"] = "AAAAA"
x = CreateMedications(host, port, token, | data)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
self.assertTrue("id" in ret[1])
x = GetMedications(host, port, token); #test get a medication by its id
x.setId(int(ret[1]["id"]))
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
ret = ret[1]
id = int(ret["id"])
self.assertTrue(ret["name"] == "A | AAAA")
x = DeleteMedications(host, port, token, id)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
x = GetMedications(host, port, token)
x.setId(id)
ret = x.send(timeout = 30)
self.assertEqual(ret[0], 404)
data = {}
data["name"] = "CCCCCC"
x = CreateMedications(host, port, token, data)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
self.assertTrue("id" in ret[1])
id = ret[1]["id"]
x = GetMedications(host, port, token) #test get a medication by its name
x.setName("CCCCCC")
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
self.assertTrue(ret[1]["name"] == "CCCCCC")
x = GetMedications(host, port, token)
x.setName("aaaa")
ret = x.send(timeout = 30)
self.assertEqual(ret[0], 404) #not found
x = DeleteMedications(host, port, token, id)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
namelist = ['CCCCC','AAAAA','BBBBB']
copynamelist = ['CCCCC','AAAAA','BBBBB']
idlist = []
for x in namelist:
data = {}
data["name"] = x
x = CreateMedications(host, port, token, data)
ret = x.send(timeout = 30)
idlist.append(ret[1]["id"])
self.assertEqual(ret[0], 200)
x = GetMedications(host, port, token) #test get a list of medications
ret = x.send(timeout = 30)
for name in namelist:
self.assertTrue(name in ret[1])
copynamelist.remove(name)
self.assertEqual(copynamelist, [])
for id in idlist:
x = DeleteMedications(host, port, token, id)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
for id in idlist:
x = GetMedications(host, port, token)
x.setId(id)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 404) #not found
def usage():
print("medications [-h host] [-p port] [-u username] [-w password]")
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], "h:p:u:w:")
except getopt.GetoptError as err: |
flav-io/flavio | flavio/statistics/test_likelihood.py | Python | mit | 13,195 | 0.00773 | import unittest
import flavio
from .likelihood import *
from flavio.classes import *
from flavio.statistics.probability import *
import numpy.testing as npt
import voluptuous as vol
import os
import tempfile
class TestMeasurementLikelihood(unittest.TestCase):
def test_class(self):
o = Observable( 'test_obs' )
def f(wc_obj, par_dict):
return par_dict['m_b']*2
Prediction('test_obs', f)
d = NormalDistribution(4.2, 0.2)
m = Measurement( 'measurement of test_obs' )
m.add_constraint(['test_obs'], d)
with self.assertRaises(ValueError):
# specify include_measurements and exclude_measurements simultaneously
MeasurementLikelihood(['test_obs'],
include_measurements=['measurement of test_obs'],
exclude_measurements=['measurement of test_obs'])
ml = MeasurementLikelihood(['test_obs'])
pred = ml.get_predictions_par({'m_b': 4}, None)
self.assertDictEqual(pred, {'test_obs': 8})
self.assertEqual(ml.get_measurements, ['measurement of test_obs'])
Observable.del_instance('test_obs')
Measurement.del_instance('measurement of test_obs')
def test_correlation_warning(self):
o1 = Observable( 'test_obs 1' )
o2 = Observable( 'test_obs 2' )
d1 = MultivariateNormalDistribution([1,2],[[1,0],[0,2]])
d2 = MultivariateNormalDistribution([1,2],[[1,0],[0,2]])
par = flavio.default_parameters
m1 = Measurement( '1st measurement of test_obs 1 and 2' )
m1.add_constraint(['test_obs 1', 'test_obs 2'], d1)
# this should not prompt a warning
MeasurementLikelihood(observables=['test_obs 1'])
m2 = Measurement( '2nd measurement of test_obs 1 and 2' )
m2.add_constraint(['test_obs 1', 'test_obs 2'], d2)
# this should now prompt a warning
with self.assertWarnsRegex(UserWarning,
".*test_obs 2.*test_obs 1.*"):
MeasurementLikelihood(observables=['test_obs 1'])
Observable.del_instance('test_obs 1')
Observable.del_instance('test_obs 2')
Measurement.del_instance('1st measurement of test_obs 1 and 2')
Measurement.del_instance('2nd measurement of test_obs 1 and 2')
def test_load(self):
d = {}
o = Observable( 'test_obs' )
def f(wc_obj, par_dict):
return par_dict['m_b']*2
Prediction('test_obs', f)
d = NormalDistribution(4.2, 0.2)
m = Measurement( 'measurement of test_obs' )
m.add_constraint(['test_obs'], d)
with self.assertRaises(vol.error.Error):
# string instead of list
ml = MeasurementLikelihood.load_dict({'observables' : 'test_obs'})
with self.assertRaises(TypeError):
# compulsory argument missing
ml = MeasurementLikelihood.load_dict({})
# should work
ml = MeasurementLikelihood.load_dict({'observables' : ['test_obs']})
pred = ml.get_predictions_par({'m_b': 4}, None)
self.assertDictEqual(pred, {'test_obs': 8})
self.assertEqual(ml.get_measurements, ['measurement of test_obs'])
self.assertEqual(ml.get_number_observations(), 1)
m = Measurement( 'measurement 2 of test_obs' )
m.add_constraint(['test_obs'], d)
self.assertEqual(ml.get_number_observations(), 2)
Observable.del_instance('test_obs')
Measurement.del_instance('measurement of test_obs')
Measurement.del_instance('measurement 2 of test_obs')
class TestParameterLikelihood(unittest.TestCase):
def test_parameter_likelihood(self):
par = copy.deepcopy(flavio.parameters.default_parameters)
par.set_constraint('m_b', '4.2+-0.2')
par.set_constraint('m_c', '1.2+-0.1')
pl = ParameterLikelihood(par, ['m_b', 'm_c'])
self.assertListEqual(pl.parameters, ['m_b', 'm_c'])
npt.assert_array_equal(pl.get_central, [4.2, 1.2])
self.assertEqual(len(pl.get_random), 2)
# test likelihood
chi2_central = -2 * pl.log_likelihood_par({'m_b': 4.2, 'm_c': 1.2})
chi2_2s = -2 * pl.log_likelihood_par({'m_b': 4.6, 'm_c': 1.0})
self.assertAlmostEqual(chi2_2s - chi2_central, 4 + 4)
def test_load(self):
par = copy.deepcopy(flavio.parameters.default_parameters)
par.set_constraint('m_b', '4.2+-0.2')
par.set_constraint('m_c', '1.2+-0.1')
pl = ParameterLikelihood.load_dict({'par_obj': [{'m_b': '4.2+-0.2'},
{'m_c': '1.2+-0.1'}],
'parameters': ['m_b', 'm_c']})
self.assertListEqual(pl.parameters, ['m_b', 'm_c'])
npt.assert_array_equal(pl.get_central, [4.2, 1.2])
self.assertEqual(len(pl.get_random), 2)
# test likelihood
chi2_central = -2 * pl.log_likelihood_par({'m_b': 4.2, 'm_c': 1.2})
chi2_2s = -2 * pl.log_likelihood_par({'m_b': 4.6, 'm_c': 1.0})
self.assertAlmostEqual(chi2_2s - chi2_central, 4 + 4)
class TestLikelihood(unittest.TestCase):
def test_likelihood(self):
o = Observable( 'test_obs' )
def f(wc_obj, par_dict):
return par_dict['m_b']*2
Prediction('test_obs', f)
d = NormalDistribution(4.2, 0.2)
m = Measurement( 'measurement of test_obs' )
m.add_constraint(['test_obs'], d)
par = copy.deepcopy(flavio.parameters.default_parameters)
par.set_constraint('m_b', '4.2+-0.2')
par.set_constraint('m_c', '1.2+-0.1')
pl | = Likelihood(pa | r, ['m_b', 'm_c'], ['test_obs'])
# npt.assert_array_equal(pl.get_central, [4.2, 1.2])
# self.assertEqual(len(pl.get_random), 2)
# test likelihoods
chi2_central = -2 * pl.log_prior_fit_parameters({'m_b': 4.2, 'm_c': 1.2})
chi2_2s = -2 * pl.log_prior_fit_parameters({'m_b': 4.6, 'm_c': 1.2})
self.assertAlmostEqual(chi2_2s - chi2_central, 4)
chi2_central = -2 * pl.log_prior_fit_parameters({'m_b': 4.2, 'm_c': 1.2})
chi2_2s = -2 * pl.log_prior_fit_parameters({'m_b': 4.2, 'm_c': 1.0})
self.assertAlmostEqual(chi2_2s - chi2_central, 4)
Observable.del_instance('test_obs')
Measurement.del_instance('measurement of test_obs')
def test_load(self):
o = Observable( 'test_obs' )
def f(wc_obj, par_dict):
return par_dict['m_b']*2
Prediction('test_obs', f)
d = NormalDistribution(4.2, 0.2)
m = Measurement( 'measurement of test_obs' )
m.add_constraint(['test_obs'], d)
par = copy.deepcopy(flavio.parameters.default_parameters)
par.set_constraint('m_b', '4.2+-0.2')
par.set_constraint('m_c', '1.2+-0.1')
pl = Likelihood.load_dict({'par_obj': [{'m_b': '4.2+-0.2'},
{'m_c': '1.2+-0.1'}],
'fit_parameters': ['m_b', 'm_c'],
'observables': ['test_obs']})
pl = Likelihood(par, ['m_b', 'm_c'], ['test_obs'])
# npt.assert_array_equal(pl.get_central, [4.2, 1.2])
# self.assertEqual(len(pl.get_random), 2)
# test likelihoods
chi2_central = -2 * pl.log_prior_fit_parameters({'m_b': 4.2, 'm_c': 1.2})
chi2_2s = -2 * pl.log_prior_fit_parameters({'m_b': 4.6, 'm_c': 1.2})
self.assertAlmostEqual(chi2_2s - chi2_central, 4)
chi2_central = -2 * pl.log_prior_fit_parameters({'m_b': 4.2, 'm_c': 1.2})
chi2_2s = -2 * pl.log_prior_fit_parameters({'m_b': 4.2, 'm_c': 1.0})
self.assertAlmostEqual(chi2_2s - chi2_central, 4)
Observable.del_instance('test_obs')
Measurement.del_instance('measurement of test_obs')
class TestCovariances(unittest.TestCase):
def test_sm_covariance(self):
# dummy observables
o1 = Observable( 'test_obs 1' )
o2 = Observable( 'test_obs 2' )
# dummy predictions
def f1(wc_obj, par_dict):
return par_dict['m_b']
def f2(wc_obj, par_dict):
return 2.5
Prediction( 'test_ |
jrbourbeau/cr-composition | comptools/__init__.py | Python | mit | 1,516 | 0.00066 |
__version__ = '0.0.1'
import os
from .base import (get_config_paths, check_output_dir, ComputingEnvironemtError,
get_training_features, partition)
from .cluster import localized
from . import simfunctions
from .simfunctions import level3_sim_file_batches, level3_sim_GCD_file
from . import dataf | unctions
from .datafunctions import level3_data_file_batches, level3_data_GCD_file
from .io import (load_data, load_sim, apply_quality_cuts,
dataframe_to_X_y, load_trained_model)
from .composition_encoding import (get_comp_list, comp_to_label, label_to_comp,
decode_composition_groups)
from .livetime import get_livetime_file, get_detector_livetime
from .efficiencies import get_efficiencies_file, get_detector_eff | iciencies
from .plotting import get_color_dict, plot_steps, get_colormap, get_color
from .pipelines import get_pipeline
from .model_selection import (get_CV_frac_correct, cross_validate_comp,
get_param_grid, gridsearch_optimize)
from .spectrumfunctions import (get_flux, model_flux, counts_to_flux,
broken_power_law_flux)
from .data_functions import ratio_error
from .unfolding import unfolded_counts_dist, response_matrix
from .mapfunctions import (equatorial_to_healpy, smooth_map, mask_map,
plot_skymap)
from .binning import get_energybins, get_comp_bins, get_zenith_bins, get_bins
paths = get_config_paths()
color_dict = get_color_dict()
|
butterscotchstallion/limnoria-plugins | IMDB/config.py | Python | mit | 1,389 | 0.00576 | ###
# Copyright (c) 2015, butterscotchstallion
# All rights reserved.
#
#
###
import supybot.conf as conf
import supybot.registry as registry
try:
from supybot.i18n import PluginInternationalization
_ = PluginInternationalization('IMDB')
except:
# Placeholder that allows to run the plugin on a bot
# without the i18n module
_ = lambda x: x
def configure(advanced):
# This will be called by supybot to configure this module. advanced is
# a bool that specifies whether the user identified themself as an advanced
# user or not. You should effect your configuration by manipulating the
# registry as appropriate.
from supybot.questions import expect, anything, something, yn
conf.registerPlugin('IMDB', True)
IMDB = conf.registerPlugin('IMDB')
conf.registerGlobalValue(IMDB, 'template',
registry.String("$title ($year, $country) - Rating: $imdbRating :: $plot :: http://imdb.com/title/$imdbID", _("""Template for the out | put of a search query.""")))
# alternative template:
# $title ($year - $director) :: [i:$imdbRating r:$tomatoMeter m:$metascore] $plot :: http://imdb.com/title/$imdbID
conf.registerGlobalValue(IMDB, 'noResultsMessage',
registry.String("No results for that qu | ery.", _("""This message is sent when there are no results""")))
# vim:set shiftwidth=4 tabstop=4 expandtab textwidth=79:
|
shackra/thomas-aquinas | notestno/test_openwindow.py | Python | bsd-3-clause | 3,333 | 0.003302 | # coding: utf-8
# This file is part of Thomas Aquinas.
#
# Thomas Aquinas is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Thomas Aquinas is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Thomas Aquinas. If not, see <http://www.gnu.org/licenses/>.
#
# veni, Sancte Spiritus.
import nose
from nose.tools import eq_, ok_
import logging
import sfml
from time import sleep
from lib.scenefactory import AbstractScene
from lib.scenemanager import Director
class Scene(AbstractSc | ene):
def __init__(self, scenemanager, initialmapfile):
AbstractScene.__init__(self, scenemanager, initialmapfile)
self.clock = sfml.Clock()
self.timelapsed = 0
def on_draw(self, window):
wi | ndow.draw(self)
self.timelapsed += self.clock.restart().milliseconds
if self.timelapsed >= 10000.0:
self.scenemanager.exitgame()
def on_event(self, event):
pass
def reset(self):
self.timelapsed = 0
def __str__(self):
return "<Scene: Vacía>"
class SceneMove(AbstractScene):
def __init__(self, scenemanager, initialmapfile):
AbstractScene.__init__(self, scenemanager, initialmapfile)
self.clock = sfml.Clock()
self.timelapsed = 0
self.moving = False
def on_draw(self, window):
if not self.moving:
self.moving = True
self.startmoving()
window.draw(self)
self.timelapsed += self.clock.restart().milliseconds
if self.timelapsed >= 20000.0:
self.scenemanager.exitgame()
def on_event(self, event):
pass
def startmoving(self):
""" movemos la camara atraves del escenario.
"""
self.scenemanager.movecamera(18*32, 12*32, False, 16)
def __str__(self):
return "<Scene: Vacía>"
def setup_func():
global director
global scene
director = Director()
scene = Scene(director, None)
def setup_func2():
global director
global scene
director = Director()
scene = SceneMove(director, None)
def teardown_func():
director.changescene(scene)
director.loop()
sleep(1)
def teardown_func2():
sleep(1)
@nose.with_setup(setup_func, teardown_func)
def test_openwindow():
director.changescene(scene)
@nose.with_setup(setup_func, teardown_func)
def test_minimapa():
scene.loadanothermap("/uniteststuff/minimap.tmx")
scene.loadmaptiles()
scene.loadmapimages()
scene.loadmapobjects()
@nose.with_setup(setup_func, teardown_func)
def test_bigmapa():
scene.loadanothermap("/uniteststuff/bigmap.tmx")
scene.loadmaptiles()
scene.loadmapimages()
scene.loadmapobjects()
@nose.with_setup(setup_func2, teardown_func)
def test_movebigmapa():
scene.loadanothermap("/uniteststuff/bigmap.tmx")
scene.loadmaptiles()
scene.loadmapimages()
scene.loadmapobjects()
|
LLNL/spack | var/spack/repos/builtin/packages/snap/package.py | Python | lgpl-2.1 | 1,642 | 0.000609 | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Snap(MakefilePackage):
"""SNAP serves as a proxy application to model
the performance of a modern discrete ordinates
neutral particle transport application.
SNAP may be considered an update to Sweep3D,
intended for hybrid computing architectures.
It is modeled off the Los Alamos National Laboratory code PARTISN."""
homepage = "https://github.com/lanl/SNAP"
git = "https://github.com/lanl/SNAP.git"
tags = ['proxy-app']
version('master')
variant('openmp', default=False, description='Build with OpenMP support')
variant('opt', default=True, description='Build with debugging')
variant('mp | i', default=True, description='Build with MPI support')
depends_on('mpi', when='+mpi')
build_directory = 'src'
def edit(self, spec, prefix):
with working_dir(self.build_directory):
makefile = FileFilter('Makefile')
if '~opt' in spec:
makefile.filter('OPT = yes', 'OPT = no')
if '~mpi' in spec:
makefile.filter | ('MPI = yes', 'MPI = no')
if '~openmp' in spec:
makefile.filter('OPENMP = yes', 'OPENMP = no')
makefile.filter('FFLAGS =.*', 'FFLAGS =')
def install(self, spec, prefix):
mkdirp(prefix.bin)
install('src/gsnap', prefix.bin)
install_tree('qasnap', prefix.qasnap)
install('README.md', prefix)
|
MikkCZ/kitsune | kitsune/wiki/events.py | Python | bsd-3-clause | 13,208 | 0.000151 | import difflib
import logging
from django.conf import settings
from django.contrib.sites.models import Site
from django.core.urlresolvers import reverse as django_reverse
from django.utils.translation import ugettext as _, ugettext_lazy as _lazy
from bleach import clean
from tidings.events import InstanceEvent, Event, EventUnion
from tidings.utils import hash_to_unsigned
from wikimarkup.parser import ALLOWED_TAGS, ALLOWED_ATTRIBUTES
from kitsune.sumo import email_utils
from kitsune.sumo.templatetags.jinja_helpers import add_utm
from kitsune.sumo.urlresolvers import reverse
from kitsune.wiki.models import Document
log = logging.getLogger('k.wiki.events')
def get_diff_for(doc, old_rev, new_rev):
fromfile = u'[%s] %s #%s' % (doc.locale, doc.title, old_rev.id)
tofile = u'[%s] %s #%s' % (doc.locale, doc.title, new_rev.id)
# difflib expects these to be bytes, not unicode
fromfile = fromfile.encode('utf8')
tofile = tofile.encode('utf8')
# Get diff
diff_parts = difflib.unified_diff(
old_rev.content.splitlines(1),
new_rev.content.splitlines(1),
fromfile=fromfile, tofile=tofile)
# Join diff parts
# XXX this is super goofy
acc = u''
for d in diff_parts:
if isinstance(d, unicode):
acc = acc + d
else:
acc = acc + d.decode('utf8')
# Clean output
return clean(acc, ALLOWED_TAGS, ALLOWED_ATTRIBUTES)
def context_dict(revision, ready_for_l10n=False, revision_approved=False):
"""Return a dict that fills in the blanks in KB notification templates."""
diff = ''
l10n = revision.document.revisions.filter(is_ready_for_localization=True)
approved = revision.document.revisions.filter(is_approved=True)
if ready_for_l10n and l10n.count() > 1:
old_rev = l10n.order_by('-created')[1]
diff = get_diff_for(revision.document, old_rev, revision)
elif revision_approved and approved.count() > 1:
old_rev = approved.order_by('-created')[1]
diff = get_diff_for(revision.document, old_rev, revision)
elif revision.document.current_revision is not None:
old_rev = revision.document.current_revision
diff = get_diff_for(revision.document, old_rev, revision)
return {
'document_title': revision.document.title,
'creator': revision.creator,
'host': Site.objects.get_current().domain,
'diff': diff,
'summary': clean(revision.summary, ALLOWED_TAGS, ALLOWED_ATTRIBUTES),
'fulltext': clean(revision.content, ALLOWED_TAGS, ALLOWED_ATTRIBUTES),
}
class EditDocumentEvent(InstanceEvent):
"""Event fired when a certain document is edited"""
event_type = 'wiki edit document'
content_type = Document
def __init__(self, revision):
super(EditDocumentEvent, self).__init__(revision.document)
self.revision = revision
def _mails(self, users_and_watches):
revision = self.revision
document = revision.document
log.debug('Sending edited notification email for document (id=%s)' %
document.id)
subject = _lazy(u'{title} was edited by {creator}')
url = reverse('wiki.document_revisions', locale=document.locale,
args=[document.slug])
context = context_dict(revision)
context['revisions_url'] = add_utm(url, 'wiki-edit')
context['locale'] = document.locale
context['title'] = document.title
context['creator'] = revision.creator
context['comment'] = revision.comment
return email_utils.emails_with_users_and_watches(
subject=subject,
text_template='wiki/email/edited.ltxt',
html_template='wiki/email/edited.html',
context_vars=context,
users_and_watches=users_and_watches,
default_locale=document.locale)
class _RevisionConstructor(object):
"""An event that receives a revision when constructed"""
def __init__(self, revision):
super(_RevisionConstructor, self).__init__()
self.revision = revision
class _BaseProductFilter(object):
"""A base class for product filters.
It adds a _filter_by_product method that filters down a list of
(user, watches) to only the users watching the products for the
revision.
"""
def _filter_by_product(self, all_watchers):
products = self.revision.document.get_products()
product_hashes = [hash_to_unsigned(s.slug) for s in products]
watchers_and_watches = []
# Weed out the users that have a product filter that isn't one of the
# document's products.
for user, watches in all_watchers:
for watch in watches:
# Get the product filters for the watch, if any.
prods = watch.filters.filter(
name='product').values_list('value', flat=True)
# If there are no product filters, they are watching them all.
if len(prods) == 0:
watchers_and_watches.append((user, watches))
break
# Otherwise, check if they are watching any of the document's
# products.
for prod in prods:
if prod in product_hashes:
watchers_and_watches.append((user, watches))
break
return watchers_and_watches
class _ProductFilter(_BaseProductFilter):
"""An event that receives a revision when constructed and filters according
to that revision's document's products"""
filters = set(['product'])
# notify(), stop_notifying(), and is_notifying() take...
# (user_or_email, product=optional_product)
def _users_watching(self, **kwargs):
# Get the users watching any or all products.
users = list(self._users_watching_by_filter(**kwargs))
# Weed out the users that have a product filter that isn't one of the
# document's products.
return self._filter_by_product(users)
class _LocaleAndProductFilter(_BaseProductFilter):
"""An event that receives a revision when constructed and filters according
to that revision's document's locale and products."""
filters = set(['locale', 'product'])
# notify(), stop_notifying(), and is_notifying() take...
# (user_or_email, locale=some_locale, product=optional_product)
def _users_watching(self, **kwargs):
locale = self.revision.document.locale
# Get the users just subscribed to the locale (any and all products).
users = list(self._users_watching_by_filter(locale=locale, **kwargs))
# Weed out the users that have a product filter that isn't one of the
# document's products.
return self._filter_by_product(users)
class ReviewableRevisionInLocaleEvent(_RevisionConstructor,
_LocaleAndProductFilter,
| Event):
"""Event fired when any revision in a certain locale is ready for review"""
# Our event_type suffices to limit our scope, so we don't bother
# setting content_type.
event_type = 'reviewable wiki in locale'
def _mails(self, users_and_watches):
revision = self.revision
document = revision.document
log.debug('Sending ready for review email for revision (id=%s)' %
revision.id)
subject = _lazy( | u'{title} is ready for review ({creator})')
url = reverse('wiki.review_revision',
locale=document.locale,
args=[document.slug, revision.id])
context = context_dict(revision)
context['revision_url'] = add_utm(url, 'wiki-ready-review')
context['locale'] = document.locale
context['title'] = document.title
context['creator'] = revision.creator
context['comment'] = revision.comment
users = []
for u, w in users_and_watches:
if document.allows(u, 'review_revision'):
users.append((u, w |
johntauber/MITx6.00.2x | Unit3/Lecture8Exercise1.py | Python | mit | 532 | 0.005639 |
def clt():
""" Flips a coin to generate a sample.
Modifies meanOfMeans and stdOfMeans defined before the function
| to get the means and stddevs based on the sample means.
Does not return anything """
for sampleSize in sampleSizes:
sampleMeans = []
for t in range(20):
sample = flipCoin(sampleSize)
sampleMeans.append | (getMeanAndStd(sample)[0])
meanOfMeans.append(getMeanAndStd(sampleMeans)[0])
stdOfMeans.append(getMeanAndStd(sampleMeans)[1]) |
ONSdigital/eq-survey-runner | app/views/flush.py | Python | mit | 2,659 | 0.002256 | from flask import Blueprint, Response, request, session, current_app
from sdc.crypto.encrypter import encrypt
from sdc.crypto.decrypter import decrypt
from app.authentication.user import User
from app.globals import (get_answer_store, get_metadata, get_questionnaire_store,
get_completed_blocks, get_collection_metadata)
from app.questionnaire.path_finder import PathFinder
from app.keys import KEY_PURPOSE_AUTHENTICATION, KEY_PURPOSE_SUBMISSION
from app.submitter.converter import convert_answers
from app.submitter.submission_failed import SubmissionFailedException
from app.utilities.schema import load_schema_from_metadata
flush_blueprint = Blueprint('flush', __name__)
@flush_blueprint.route('/flush', methods=['POST'])
def flush_data():
if session:
session.clear()
encrypted_token = request.args.get('token')
if not encrypted_token or encrypted_token is None:
return Response(status=403)
decrypted_token = decrypt(token=encrypted_token,
key_store=current_app.eq['key_store'],
key_purpose=KEY_PURPOSE_AUTHENTICATION,
leeway=current_app.config['EQ_JWT_LEEWAY_IN_SECONDS'])
roles = decrypted_token.get('roles')
if roles and 'flusher' in roles:
user = _get_user(decrypted_token)
if _submit_data(user):
return Response(stat | us=200)
return Response(status=404)
return Response(status=403)
def _submit_data(user):
answer_store = get_answer_store(user)
if answer_store:
metadata = get_metadata(user)
collection_metadata = get_c | ollection_metadata(user)
schema = load_schema_from_metadata(metadata)
completed_blocks = get_completed_blocks(user)
routing_path = PathFinder(schema, answer_store, metadata, completed_blocks).get_full_routing_path()
message = convert_answers(metadata, collection_metadata, schema, answer_store, routing_path, flushed=True)
encrypted_message = encrypt(message, current_app.eq['key_store'], KEY_PURPOSE_SUBMISSION)
sent = current_app.eq['submitter'].send_message(encrypted_message, current_app.config['EQ_RABBITMQ_QUEUE_NAME'], metadata['tx_id'])
if not sent:
raise SubmissionFailedException()
get_questionnaire_store(user.user_id, user.user_ik).delete()
return True
return False
def _get_user(decrypted_token):
id_generator = current_app.eq['id_generator']
user_id = id_generator.generate_id(decrypted_token)
user_ik = id_generator.generate_ik(decrypted_token)
return User(user_id, user_ik)
|
jmesteve/saas3 | openerp/addons_extra/l10n_es_account_invoice_sequence/__openerp__.py | Python | agpl-3.0 | 2,298 | 0.00743 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2011 NaN Projectes de Programari Lliure, S.L. All Rights Reserved.
# http://www.NaN-tic.com
# Copyright (c) 2013 Serv. Tecnol. Avanzados (http://www.serviciosbaeza.com)
# Pedro Manuel Baeza <pedro.baeza@serviciosbaeza.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name" : "Secuencia para facturas separada de la secuencia de asientos",
"version" : "1.1",
"author" : "Localización española de OpenERP",
"website" : "https://launchpad.net/openerp-spain",
"category" : "Accounting",
"license" : "AGPL-3",
| "description": """
Este módulo separa los números de las facturas de los de los asientos. Para
ello, convierte el campo number de 'related' a campo de texto normal, y le
asigna un valor según una nueva secuencia definida en el diario
correspondiente.
Su uso es obligatorio para España, ya que legalmente las facturas deben llevar
una | numeración única y continua, lo que no es compatible con el sistema que
utiliza OpenERP por defecto.
**AVISO**: Hay que configurar las secuencias correspondientes para todos los
diarios de ventas, compras, abono de ventas y abono de compras utilizados
después de instalar este módulo.
""",
"depends" : [
'account',
],
"data" : [
'account_view.xml',
],
"demo" : [],
"active": False,
"installable": True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
jespino/urwintranet | urwintranet/ui/__init__.py | Python | apache-2.0 | 62 | 0.016129 | # -*- coding: u | tf-8 -*-
"""
urwintranet.ui
~~~~ | ~~~~~~~~
"""
|
blairg23/Apocalypse-Defense | src/apocalypsedefense/python-prototype/driver.py | Python | mit | 2,354 | 0.016143 | '''
Author: Blair Gemmer
Purpose: Runs the main game loop, creates characters, upgrades characters
'''
from GameEngine import *
#Game variables:
GameRunning = True
height = 500
width = 500
#Create survivors:
w = Gun()
w.range = width
s1 = Survivor(w)
x = 50
y = 100
p = Point(x,y)
s1.position = p
s1.name = 'Fred'
s2 = Survivor(w)
x = 100
y = 50
p = Point(x,y)
s2.position = p
s2.name = 'Bob'
s3 = Survivor(w)
x = 150
y = 50
p = Point(x,y)
s3.position = p
s3.name = 'Frank'
s4 = Survivor(w)
x = 50
y = 150
p = Point(x,y)
s4.position = p
s4.name = 'Petey'
s5 = Survivor(w)
x = 150
y = 150
p = Point(x,y)
s5.position = p
s5.name = 'Jill'
survivors = [s1, s2, s3, s4, s5]
#Create zombies:
z1 = Zombie()
x = randint(0,width-1)
y = randint(0,height-1)
p = Point(x,y)
z1.position = p
z1.sightRange = width/2
z2 = Zombie()
x = randint(0,width-1)
y = randint(0,height-1)
p = Point(x,y)
z2.position = p
z2.sightRange = width/2
zombies = [z1, z2]
#Creates a game with window size height by width:
g = Game(height, width, survivors=survivors, zombies=zombies)
#Starts a new game:
g.startNew()
#Lists of coordinates for our plot demo:
survivorPlot = []
zombiePlot = []
#While we're in our game loop:
while GameRunning:
#for x in range(0,500):
if not g.isPaused and g.isRunning: #If the game isn't paused
g.update() #Update the game objects
#Append the plot coords:
survivorPlot.append([position for position in g.sPositions])
zombiePlot.append([position for position in g.zPositions])
elif g.isRunning == False:
GameRunning = False
if not g.isRunning: #If the game stops
GameRunning = False #Stop the game loop
elif g.isPaused:
print 'Game is paused.'
numZombies = 2
numSurvivors = 5
plot(0,0,'r', label='zombies (n=' + str(numZombies)+')')
plot(1,1,'b', label='survivors (n=' + str(numSurvivors)+')')
survivorPlot = array(survivorPlot)
zombiePlot = array(zombiePlot)
for steps in zombiePlot:
| for positions in steps:
# print 'position: ' + str(positions)
plot(positions[0],positions[1], 'ro-')
for steps in survivorPlot:
for positions in step | s:
# print 'position: ' + str(positions)
plot(positions[0],positions[1], 'bo-')
title('With RandomWalk() and DirectedWalk() without Zombie SightRange')
legend()
show()
print 'we made it'
|
dbracewell/pyHermes | tweet_analyze.py | Python | apache-2.0 | 1,469 | 0.003404 | import csv
import regex as re
import gensim
import pandas as pd
hashtag_pattern = re.compile('#[^\s\p{P}]+')
dictionary = gensim.corpora.Di | ctionary()
texts = []
with open('/home/dbb/PycharmProjects/twitter_crawler/music.csv') as rdr:
csv = csv.reader(rdr)
for row in csv:
if len(row) > 0:
text = row[0]
ta | gs = [t.lower() for t in hashtag_pattern.findall(text)]
if len(tags) > 0:
texts.append(dictionary.doc2bow(tags, allow_update=True))
lda_model = gensim.models.LdaModel(corpus=texts, id2word=dictionary, alpha='auto', num_topics=50, iterations=500)
for i in range(lda_model.num_topics):
print([x[0] for x in lda_model.show_topic(i)])
def topic_prob_extractor(gensim_hdp):
shown_topics = gensim_hdp.show_topics(num_topics=-1, formatted=False)
topics_nos = [x[0] for x in shown_topics]
weights = [sum([item[1] for item in shown_topics[topicN][1]]) for topicN in topics_nos]
return pd.DataFrame({'topic_id': topics_nos, 'weight': weights})
lda_model = gensim.models.HdpModel(corpus=texts, id2word=dictionary, T=20)
df = topic_prob_extractor(lda_model)
for row in df.iterrows():
print(row[1]['topic_id'], row[1]['weight'])
# for topic in lda_model.show_topics(num_topics=-1, num_words=10):
# id = topic[0]
# words = topic[1]
# wout = []
# for w in words.split(' '):
# if '*' in w:
# wout.append(w.split('*')[1])
# print(id, wout)
|
dimagi/commcare-hq | corehq/apps/app_manager/migrations/0017_migrate_case_search_relevant.py | Python | bsd-3-clause | 745 | 0 | from django.core.management import call_command
from django.db import migrations
from corehq.toggles import SYNC_SEARCH_CASE_CLAIM
from corehq.util.django_migrations import skip_on_fresh_install
@skip_on_fresh_install
def _migrat | e_case_search_relevant(apps, schema_editor):
for domain in sorted(SYNC_SEARCH_CASE_CLAIM.get_enabled_domains()):
call_command('migrate_case_search_relevant', domain=domain)
class Migration(migrations.Migration) | :
dependencies = [
('app_manager', '0016_alter_exchangeapplication'),
]
operations = [
migrations.RunPython(_migrate_case_search_relevant,
reverse_code=migrations.RunPython.noop,
elidable=True),
]
|
tmaiwald/OSIM | OSIM/Modeling/Testbenches/VBIC_Kennlinien_TBs/VBIC_NPN_TB.py | Python | bsd-2-clause | 2,279 | 0.017113 | import matplotlib.pyplot as plt
from OSIM.Modeling.Components.NPN_Vertical_Bipolar_Intercompany_Model.VBIC_Currents.IRCI import *
from OSIM.Modeling.Components.Resistor import Resistor
from OSIM.Modeling.Components.VoltageSource import VoltageSource
from OSIM.Modeling.CircuitSystemEquations import CircuitSystemEquations
from OSIM.Modeling.Components.NPN_Vertical_Bipolar_Intercompany_Model.NPN_VBIC import NPN_VBIC
from OSIM.Simulation.CircuitAnalysis.CircuitAnalysis import CircuitAnalysis
from OSIM.Simulation.NRConvergenceException import NRConvergenceException
import numpy as np
fig = plt.figure()
ax = fig.gca(projection='3d')
nb = 'b'
nc = 'c'
ne = 'e'
m1 = 'm1'
m2 = 'm2'
#Anzeigeparamter
raster = 0.05
BFIX = 0.9
CMIN = -0.5
CMAX = 0.5
EMIN = -0.5
EMAX = 0.5
r1 = Resistor([m1,nb],"R1",0.001,None)
r2 = Resistor([m2,nc],"R2",0.001,None)
v1 = VoltageSource([m1,'0'],"V1",0,None)
v2 = VoltageSource([m2,'0'],"V2",0,None)
v3 = VoltageSource([ne,'0'],"V3",0,None)
npn = NPN_VBIC([nc, nb, ne, '0'], "Q", 0, None, pParams="../../__Parameter/NPN_VBIC_npn13G2.comp")
TBSys = CircuitSystemEquations([npn,r1,r2,v3,v1,v2])
print(TBSys.compDict)
ca = CircuitAnalysis(TBSys)
TBSys.atype = CircuitSystemEquations.ATYPE_DC
xE = np.arange(EMIN, EMAX, raster)
yC = np.arange(CMIN, CMAX, raster)
B, C = np.meshgrid(xE, yC)
I = np.zeros((len(yC),len(xE)))
v1.changeMyVoltageInSys(BFIX)
for cidx, c in enumerate(yC):
for eidx,e in enumerate(xE):
v3.changeMyVoltageInSys(e)
v2.changeMyVoltageInSys(c)
try:
ca.newtonRaphson(TBSys)
sol = npn.getTransportCurrent()
except NRConvergenceException:
print("Convergence problem at: ")
print("E: | %G"%(e))
print("C: %G"%(c))
npn.IT.debugPrint()
#x = raw_input()
sol = 0
#print(TBSys.curNewtonIteration)
#a = raw_input()
| I[cidx][eidx] = sol
# ax.plot_surface(B, C, I, rstride=8, cstride=8, alpha=0.3)
ax.plot_wireframe(B, C, I, rstride=5, cstride=5, alpha=0.3)
#cset = ax.contour(B, C, I, zdir='x', offset=BMAX, cmap=cm.coolwarm)
ax.set_xlabel('E')
ax.set_xlim(EMIN, EMAX)
ax.set_ylabel('C')
ax.set_ylim(CMIN, CMAX)
ax.set_zlabel('I')
ax.set_zlim(np.amin(I), np.amax(I))
plt.show()
|
Nefry/taurus | tests/mocks.py | Python | apache-2.0 | 5,148 | 0.000389 | """ test """
import logging
import os
import tempfile
import sys
import random
from bzt.engine import Engine, Configuration, FileLister
from bzt.utils import load_class
from bzt.engine import Provisioning, ScenarioExecutor, Reporter, AggregatorListener
from bzt.modules.aggregator import ResultsReader
from tests import random_sample
try:
from exceptions import KeyboardInterrupt
except ImportError:
from builtins import KeyboardInterrupt
class EngineEmul(Engine):
"""
emulating engine
"""
def __init__(self):
Engine.__init__(self, logging.getLogger(''))
self.artifacts_base_dir = os.path.dirname(__file__) + "/../build/test"
self._create_artifacts_dir()
self.finalize_exc = None
self.was_finalize = False
def _shutdown(self):
return super(EngineEmul, self)._shutdown()
def dump_config(self):
""" test """
fname = tempfile.mkstemp()[1]
self.config.dump(fname, Configuration.JSON)
with open(fname) as fh:
logging.debug("JSON:\n%s", fh.read())
class ModuleMock(ScenarioExecutor, Provisioning, Reporter, FileLister):
""" mock """
def __init__(self):
super(ModuleMock, self).__init__()
self.postproc_exc = None
self.check_exc = None
self.prepare_exc = None
self.startup_exc = None
self.shutdown_exc = None
self.check_iterations = sys.maxsize
self.was_shutdown = False
self.was_startup = False
self.was_prepare = False
self.was_check = False
self.was_postproc = False
def prepare(self):
"""
:raise self.prepare_exc:
"""
self.log.info("Preparing mock")
self.was_prepare = True
self.check_iterations = int(self.settings.get('check_iterations', "2"))
self.postproc_exc = self.get_exc("postproc")
self.check_exc = self.get_exc("check")
self.prepare_exc = self.get_exc("prepare")
self.startup_exc = self.get_exc("startup")
self.shutdown_exc = self.get_exc("shutdown")
if isinstance(self.engine.aggregator, ResultsReader):
reader = MockReader()
for num in range(0, self.check_iterations):
for quan in range(0, int(random.random() * 10)):
reader.data.append(random_sample(num))
self.engine.aggregator.add_reader(reader)
if self.prepare_exc:
raise self.prepare_exc
def startup(self):
"""
:raise self.startup_exc:
"""
self.log.info("Startup mock")
self.was_startup = True
if self.startup_exc:
raise self.startup_exc
def check(self):
"""
:return: :raise self.check_exc:
"""
self.was_check = True
self.log.info("Checks remaining: %s", self.check_iterations)
self.check_iterations -= 1
if not self.check_iterations:
if self.check_exc:
raise self.check_exc
else:
return True
return False
def shutdown(self):
"""
:raise self.shutdown_exc:
"""
self.log.info("Shutdown mock")
self.was_shutdown = True
if self.shutdown_exc:
raise self.shutdown_exc
def post_process(self):
"""
:raise self.postproc_exc:
"""
self.log.info("Postproc mock")
self.was_postproc = True
if self.postproc_exc:
raise self.postproc_exc
def get_exc(self, param):
"""
:type param: str
:return:
"""
name = self.settings.get(param, "")
if name:
cls = load_class(name)
return cls()
return None
def resource_files(self):
"""
:return:
"""
return [__file__]
class MockReader(ResultsReader, AggregatorListener):
"""
test
"""
def __init__(self):
super(MockReader, self).__init__()
self.results = []
self.data = []
self.add_listener(self)
self.track_percentiles = [0, 50, 90, 99, 99.5, 100]
def _read(self, final_pass=False):
"""
Emulating read samples
:type final_pass: bool
:return:
"""
while self.data:
# logging.debug("Emul read: %s", self.data[0])
yield self.data.pop(0)
def aggregated_second(self, data):
"""
Store and assert aggregate sequence
:type data: dict
:raise AssertionError:
"""
if self.results:
if self.res | ults[-1]["ts"] >= data["ts"]:
raise Assertio | nError("TS sequence wrong: %s>=%s" % (self.results[-1]["ts"], data["ts"]))
logging.info("Data: %s", data)
self.results.append(data)
def download_progress_mock(blocknum, blocksize, totalsize):
pass
class ResultChecker(AggregatorListener):
def __init__(self, callback):
super(ResultChecker, self).__init__()
self.callback = callback
def aggregated_second(self, data):
self.callback(data)
|
mpkato/mobileclick | setup.py | Python | mit | 1,565 | 0.017891 | # -*- coding:utf-8 -*-
from setuptools import setup
setup(
name = "mobileclick",
description = "mobileclick provides baseline methods and utility scripts for the NTCIR-12 MobileClick-2 task",
author = "Makoto P. Kato",
author_email = "kato@dl.kuis.kyoto-u.ac.jp",
license = "MIT License",
url = "https://github.com/mpkato/mobileclick",
version='0.2.0',
packages=[
'mobileclick',
'mobileclick.nlp',
'mobileclick.methods',
'mobileclick.scripts'
],
install_requires = [
'BeautifulSoup',
'nltk>=3.1',
'numpy'],
entry_points = {
'console_scripts': [
'mobileclick_download_training_data=mobileclick.scripts.mobileclick_download_training_data:main',
'mobileclick_download_test_data=mobileclick.scripts.mobileclick_download_test_data:main',
'mobileclick_random_ranking_method=mobileclick.scripts.mobileclick_random_ranking_method:main',
'mobileclick_lang_model_ranking_method=mobileclick.scripts.mobileclick_lang_model_ranking_method:main',
'mobileclick_random_summarization_method=mobileclick.scripts.mobileclick_random_summarization_method:main',
'mobileclick_lang_model_summarization_metho | d=mobileclick.scripts.mobileclick_lang_model_summarization_method:main',
'mobileclick_lang_model_two_layer_summarization_method=mobileclick.scripts.mobileclick_lang_model_two_layer_summa | rization_method:main',
],
},
tests_require=['nose']
)
|
hk4n/milk | tests/resources/test_plugins/testplugin3.py | Python | mit | 177 | 0 | from milk | .plugin import Plugin
class testplugin3(Plugin):
def __init__(self, config):
for key, value in config.items():
print("%s: %s" | % (key, value))
|
rahulunair/nova | nova/scheduler/weights/disk.py | Python | apache-2.0 | 1,534 | 0 | # Copyright (c) 2015 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file | except in compliance with the License | . You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Disk Weigher. Weigh hosts by their disk usage.
The default is to spread instances across all hosts evenly. If you prefer
stacking, you can set the 'disk_weight_multiplier' option (by configuration
or aggregate metadata) to a negative number and the weighing has the opposite
effect of the default.
"""
import nova.conf
from nova.scheduler import utils
from nova.scheduler import weights
CONF = nova.conf.CONF
class DiskWeigher(weights.BaseHostWeigher):
minval = 0
def weight_multiplier(self, host_state):
"""Override the weight multiplier."""
return utils.get_weight_multiplier(
host_state, 'disk_weight_multiplier',
CONF.filter_scheduler.disk_weight_multiplier)
def _weigh_object(self, host_state, weight_properties):
"""Higher weights win. We want spreading to be the default."""
return host_state.free_disk_mb
|
Ghini/ghini.desktop | bauble/plugins/garden/test_import_pocket_log.py | Python | gpl-2.0 | 22,436 | 0.001649 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2018 Mario Frasca <mario@anche.no>.
# Copyright 2018 Tanager Botanical Garden <tanagertourism@gmail.com>
#
# This file is part of ghini.desktop.
#
# ghini.desktop is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ghini.desktop is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ghini.desktop. If not, see <http://www.gnu.org/licenses/>.
#
import os
import datetime
from unittest import TestCase
from gi.repository import Gtk
import logging
logger = logging.getLogger(__name__)
from nose import SkipTest
import bauble.db as db
from bauble.test import BaubleTestCase
from bauble import utils
from bauble.plugins.garden.accession import Accession, Verification
from bauble.plugins.garden.plant import Plant, PlantNote, PlantChange
from bauble.plugins.garden.location import Location
from bauble.plugins.plants import Family, Genus, Species
from .import_pocket_log import process_line, lookup
class ImportNewPlant(BaubleTestCase):
def test_importing_nothing(self):
# prepare T0
# test T0
self.assertEquals(self.session.query(Accession).first(), None)
self.assertEquals(self.session.query(Plant).first(), None)
# action
line = '20180905_170619 :PENDING_EDIT: : Eugenia stipitata : 1 : (@;@)'
process_line(self.session, line, 1536845535)
# T1
self.assertEquals(self.session.query(Accession).first(), None)
self.assertEquals(self.session.query(Plant).first(), None)
def test_comple | tely_identified_existing_species(self):
# prepare T0
fam = Family(epithet='Myrtaceae')
gen = Genus(epithet='Eugenia', family=fam)
s | pe = Species(epithet='stipitata', genus=gen)
self.session.add_all([fam, gen, spe])
self.session.commit()
# test T0
a = self.session.query(Accession).filter_by(code='2018.0001').first()
self.assertEquals(a, None)
eugenia = self.session.query(Genus).filter_by(epithet='Eugenia').first()
self.assertNotEquals(eugenia, None)
s = self.session.query(Species).filter_by(genus=eugenia, epithet='stipitata').first()
self.assertNotEquals(s, None)
# action
line = '20180905_170619 :PENDING_EDIT: 2018.0001.1 : Eugenia stipitata : 1 : (@;@)'
process_line(self.session, line, 1536845535)
# T1
a = self.session.query(Accession).filter_by(code='2018.0001').first()
self.assertNotEquals(a, None)
self.assertEquals(a.species.genus.epithet, 'Eugenia')
self.assertEquals(a.species.epithet, 'stipitata')
self.assertEquals(a.quantity_recvd, 1)
self.assertEquals(len(a.plants), 1)
self.assertEquals(a.plants[0].quantity, 1)
def test_completely_identified_new_species(self):
# prepare T0
fam = Family(epithet='Myrtaceae')
gen = Genus(epithet='Eugenia', family=fam)
self.session.add_all([fam, gen])
self.session.commit()
# test T0
a = self.session.query(Accession).filter_by(code='2018.0001').first()
self.assertEquals(a, None)
eugenia = self.session.query(Genus).filter_by(epithet='Eugenia').first()
self.assertNotEquals(eugenia, None)
s = self.session.query(Species).filter_by(genus=eugenia, epithet='stipitata').first()
self.assertEquals(s, None)
# action
db.current_user.override('Pasquale')
line = '20180905_170619 :PENDING_EDIT: 2018.0001.1 : Eugenia stipitata : 1 : (@;@)'
process_line(self.session, line, 1536845535)
db.current_user.override()
# T1
a = self.session.query(Accession).filter_by(code='2018.0001').first()
self.assertNotEquals(a, None)
self.assertEquals(a.species.genus.epithet, 'Eugenia')
self.assertEquals(a.species.epithet, 'stipitata')
self.assertEquals(a.quantity_recvd, 1)
self.assertEquals(len(a.plants), 1)
self.assertEquals(a.plants[0].quantity, 1)
self.assertEquals(len(a.verifications), 1)
self.assertEquals(a.verifications[0].verifier, 'Pasquale')
def test_genus_identified(self):
# prepare T0
fam = Family(epithet='Myrtaceae')
gen = Genus(epithet='Eugenia', family=fam)
self.session.add_all([fam, gen])
self.session.commit()
# test T0
a = self.session.query(Accession).filter_by(code='2018.0001').first()
self.assertEquals(a, None)
eugenia = self.session.query(Genus).filter_by(epithet='Eugenia').first()
self.assertNotEquals(eugenia, None)
s = self.session.query(Species).filter_by(genus=eugenia, infrasp1='sp', infrasp1_rank=None).first()
self.assertEquals(s, None)
# action
line = '20180905_170619 :PENDING_EDIT: 2018.0001.1 : Eugenia : 1 : (@;@)'
process_line(self.session, line, 1536845535)
# T1
eugenia_sp = self.session.query(Species).filter_by(genus=eugenia, infrasp1='sp', infrasp1_rank=None).first()
a = self.session.query(Accession).filter_by(code='2018.0001').first()
self.assertNotEquals(a, None)
self.assertEquals(a.species.genus, eugenia)
self.assertEquals(a.species, eugenia_sp)
self.assertEquals(a.quantity_recvd, 1)
self.assertEquals(len(a.plants), 1)
self.assertEquals(a.plants[0].quantity, 1)
self.assertEquals(len(a.verifications), 0)
def test_not_identified(self):
# prepare T0
# test T0
a = self.session.query(Accession).filter_by(code='2018.0001').first()
self.assertEquals(a, None)
s = self.session.query(Species).first()
self.assertEquals(s, None)
# action
line = '20180905_170619 :PENDING_EDIT: 2018.0001.1 : : 1 : (@;@)'
process_line(self.session, line, 1536845535)
self.session.commit()
# T1
a = self.session.query(Accession).filter_by(code='2018.0001').first()
self.assertNotEquals(a, None)
self.assertEquals(a.species.infrasp1, 'sp')
self.assertEquals(a.species.genus.epithet, 'Zzd-Plantae')
self.assertEquals(a.species.genus.family.epithet, 'Zz-Plantae')
self.assertEquals(a.quantity_recvd, 1)
self.assertEquals(len(a.plants), 1)
self.assertEquals(a.plants[0].quantity, 1)
def test_not_identified_no_quantity_defaults_to_one(self):
# prepare T0
# test T0
a = self.session.query(Accession).filter_by(code='2018.0001').first()
self.assertEquals(a, None)
s = self.session.query(Species).first()
self.assertEquals(s, None)
# action
line = '20180905_170619 :PENDING_EDIT: 2018.0001.1 : : : (@;@)'
process_line(self.session, line, 1536845535)
self.session.commit()
# T1
a = self.session.query(Accession).filter_by(code='2018.0001').first()
self.assertNotEquals(a, None)
self.assertEquals(a.species.infrasp1, 'sp')
self.assertEquals(a.species.genus.epithet, 'Zzd-Plantae')
self.assertEquals(a.species.genus.family.epithet, 'Zz-Plantae')
self.assertEquals(a.quantity_recvd, 1)
self.assertEquals(len(a.plants), 1)
self.assertEquals(a.plants[0].quantity, 1)
def test_not_identified_some_quantity_not_one(self):
# prepare T0
# test T0
a = self.session.query(Accession).filter_by(code='2018.0001').first()
self.assertEquals(a, None)
s = self.session.query(Species).first()
self.assertEquals(s, None)
# action
line = '20180905_170619 :PENDING_EDIT: 2018.0001.1 : : 3 : (@;@)'
process_line(self.session, line, 1536845535)
|
kobotoolbox/formpack | tests/fixtures/grouped_translated/__init__.py | Python | gpl-3.0 | 297 | 0 | # | coding: utf-8
'''
grouped_translated
'''
from ..load_fixture_json import load_fixture_json
DATA = {
'title': 'Animal identification survey with translations and groups',
'id_string': 'grouped_translated',
'versions': [
load_fi | xture_json('grouped_translated/v1'),
],
}
|
mperignon/anuga-sedtransport | file_conversion/generic_dem2pts.py | Python | gpl-2.0 | 6,790 | 0.008542 | # external modules
import numpy as num
# ANUGA modules
import anuga.utilities.log as log
from anuga.config import netcdf_mode_r, netcdf_mode_w, netcdf_mode_a, \
netcdf_float
from generic_asc2dem import generic_asc2dem
def generic_dem2pts(name_in, name_out=None, quantity_name=None,
easting_min=None, easting_max=None,
northing_min=None, northing_max=None,
use_cache=False, verbose=False,):
"""Read raster file from the following NetCDF format (.dem)
Generic function, created from dem2pts
Example:
ncols 3121
nrows 1800
xllcorner 722000
yllcorner 5893000
cellsize 25
NODATA_value -9999
138.3698 137.4194 136.5062 135.5558 ..........
name_in may be a .asc or .dem file to be converted.
Convert to NetCDF pts format which is
points: (Nx2) float array
elevation: N float array
"""
kwargs = {'name_out': name_out,
'quantity_name': quantity_name,
'easting_min': easting_min,
'easting_max': easting_max,
'northing_min': northing_min,
'northing_max': northing_max,
'verbose': verbose}
if use_cache is True:
from caching import cache
result = cache(_generic_dem2pts, name_in, kwargs,
dependencies = [name_in],
verbose = verbose)
else:
result = apply(_generic_dem2pts, [name_in], kwargs)
return result
def _generic_dem2pts(name_in, name_out=None, quantity_name=None, verbose=False,
easting_min=None, easting_max=None,
northing_min=None, northing_max=None):
"""Read raster from the following NetCDF format (.dem)
Internal function. See public function generic_dem2pts for details.
"""
# FIXME: Can this be written feasibly using write_pts?
import os
from anuga.file.netcdf import NetCDFFile
root = name_in[:-4]
if name_in[-4:] == '.asc':
intermediate = root + '.dem'
if verbose:
log.critical('Preconvert %s from asc to %s' % \
(name_in, intermediate))
asc2dem(name_in)
name_in = intermediate
elif name_in[-4:] != '.dem':
raise IOError('Input file %s should be of type .asc or .dem.' % name_in)
if name_out != None and basename_out[-4:] != '.pts':
raise IOError('Input file %s should be of type .pts.' % name_out)
# Get NetCDF
infile = NetCDFFile(name_in, netcdf_mode_r)
if verbose: log.critical('Reading raster from %s' % (name_in))
ncols = int(infile.ncols)
nrows = int(infile.nrows)
xllcorner = float(infile.xllcorner) # Easting of lower left corner
yllcorner = float(infile.yllcorner) # Northing of lower left corner
cellsize = float(infile.cellsize)
NODATA_value = float(infile.NODATA_value)
dem_elevation = infile.variables[quantity_name]
zone = int(infile.zone)
false_easting = float(infile.false_easting)
false_northing = float(infile.false_northing)
#print ncols, nrows, xllcorner,yllcorner, cellsize, NODATA_value, zone
# Text strings
projection = infile.projection
datum = infile.datum
units = infile.units
#print projection, datum, units
# Get output file
if name_out == None:
ptsname = root + '.pts'
else:
ptsname = name_out
if verbose: log.critical('Store to NetCDF file %s' % ptsname)
# NetCDF file definition
outfile = NetCDFFile(ptsname, netcdf_mode_w)
# Create new file
outfile.institution = 'Geoscience Australia'
outfile.description = 'NetCDF pts format for compact and portable ' \
'storage of spatial point data'
# Assign default values
if easting_min is None: easting_min = xllcorner
if easting_max is None: easting_max = xllcorner + ncols*cellsize
if northing_min is None: northing_min = yllcorner
if northing_max is None: northing_max = yllcorner + nrows*cellsize
#print easting_min, easting_max, northing_min, northing_max
# Compute offsets to update georeferencing
easting_offset = xllcorner - easting_min
northing_offset = yllcorner - northing_min
# Georeferencing
outfile.zone = zone
outfile.xllcorner = easting_min # Easting of lower left corner
outfile.yllcorner = northing_min # Northing of lower left corner
outfile.false_easting = false_easting
outfile.false_northing = false_northing
outfile.projection = projection
outfile.datum = datum
outfile.units = units
# Grid info (FIXME: probably not going to be used, but heck)
outfile.ncols = ncols
outfile.nrows = nrows
dem_elevation_r = num.reshape(dem_elevation, (nrows, ncols))
totalnopoints = nrows*ncols
#========================================
# Do the preceeding with numpy
#========================================
y = num.arange(nrows,dtype=num.float)
y = yllcorner + (nrows-1)*cellsize - y*cellsize
x = num.arange(ncols,dtype=num.float)
x = xllcorner + x*cellsize
xx,yy = num.meshgrid(x,y)
xx = xx.flatten()
yy = yy.flatten()
flag = num.logical_and(num.logical_and((xx <= easting_max),(xx >= easting_min)),
num.logical_and((yy <= northing_max),(yy >= northing_min)))
dem = dem_elevation[:].flatten()
id = num.where(flag)[0]
xx = xx[id]
yy = yy[id]
dem = dem[id]
clippednopoints = len(dem)
#print clippedpoints
#print xx
#print yy
#print dem
data_flag = dem != NODATA_value
data_id = num.where(data_flag)
xx = xx[data_id]
yy = yy[data_id]
dem = dem[data_id]
nn = clippednopoints - len(dem)
nopoints = | len(dem)
if verbose:
log.critical('There are %d values in the raster' % totalnopoints)
log.critical('There are %d values in the clipped raster'
% clippednopoints)
log.critical('There are %d NODATA_values in the clipped raster' % nn)
ou | tfile.createDimension('number_of_points', nopoints)
outfile.createDimension('number_of_dimensions', 2) #This is 2d data
# Variable definitions
outfile.createVariable('points', netcdf_float, ('number_of_points',
'number_of_dimensions'))
outfile.createVariable(quantity_name, netcdf_float, ('number_of_points',))
# Get handles to the variables
points = outfile.variables['points']
elevation = outfile.variables[quantity_name]
points[:,0] = xx - easting_min
points[:,1] = yy - northing_min
elevation[:] = dem
infile.close()
outfile.close()
|
rholy/dnf | dnf/package.py | Python | gpl-2.0 | 6,052 | 0.002148 | # package.py
# Module defining the dnf.Package class.
#
# Copyright (C) 2012-2013 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
""" Contains the dnf.Package class. """
from __future__ import absolute_import
from __future__ import unicode_literals
import binascii
import dnf.rpm
import dnf.yum.misc
import hawkey
import logging
import os
logger = logging.getLogger("dnf")
class Package(hawkey.Package):
""" Represents a package. #:api """
def __init__(self, initobject, base):
super(Package, self).__init__(initobject)
self.base = base
self._chksum = None
self._repo = None
self._size = None
@property
def chksum(self):
if self._chksum:
return self._chksum
if self.from_cmdline:
chksum_type = dnf.yum.misc.get_default_chksum_type()
chksum_val = dnf.yum.misc.checksum(chksum_type, self.location)
return (hawkey.chksum_type(chksum_type),
binascii.unhexlify(chksum_val))
return super(Package, self).chksum
@chksum.setter
def chksum(self, val):
self._chksum = val
@property
def from_cmdline(self):
return self.reponame == hawkey.CMDLINE_REPO_NAME
@property
def from_system(self):
return self.reponame == hawkey.SYSTEM_REPO_NAME
@property
def from_repo(self):
yumdb_info = self.base.yumdb.get_package(self) if self.from_system else {}
if 'from_repo' in yumdb_info:
return '@'+yumdb_info.from_repo
return self.reponame
@property
def header(self):
return dnf.rpm.header(self.localPkg())
@property
def size(self):
if self._size:
return self._size
return super(Package, self).size
@size.setter
def size(self, val):
self._size = val
@property
def pkgid(self):
try:
(_, chksum) = self.hdr_chksum
return binascii.hexlify(chksum)
except AttributeError:
return None
@property # yum compatibility attribute
def idx(self):
""" Always type it to int, rpm bindings expect it like that. """
return int(self.rpmdbid)
@property # yum compatibility attribute
def repoid(self):
return self.reponame
@property # yum compatibility attribute
def pkgtup(self):
return (self.name, self.arch, str(self.e), self.v, self.r)
@property # yum compatibility attribute
def repo(self):
if self._repo:
return self._repo
return self.base.repos[self.reponame]
@repo.setter
def repo(self, val):
self._repo = val
@property # yum compatibility attribute
def relativepath(self):
return self.location
@property # yum compatibility attribute
def a(self):
return self.arch
@property # yum compatibility attribute
def e(self):
return self.epoch
@property # yum compatibility attribute
def v(self):
return self.version
@property # yum compatibility attribute
def r(self):
return self.release
@property # yum compatibility attribute
def ui_from_repo(self):
return self.reponame
# yum compatibility method
def evr_eq(self, pkg):
return self.evr_cmp(pkg) == 0
# yum compatibility method
def evr_gt(self, pkg):
return self.evr_cmp(pkg) > 0
# y | um compatibility method
def evr_lt(self, pkg):
return self.evr_cmp(pkg) < 0
# yum compatibility method
def getDiscNum(self):
return self.medianr
# yum compatibility method
def localPkg(self):
""" Package's location in the filesystem.
For packages in remote repo returns where the package will be/has
been downloaded.
"""
if self.from_cmdline:
| return self.location
if self.baseurl:
path = os.path.join(self.baseurl, self.location)
if path.startswith("file://"):
path = path[7:]
return path
loc = self.location
if not self.repo.local:
loc = os.path.basename(loc)
return os.path.join(self.repo.pkgdir, loc)
# yum compatibility method
def returnIdSum(self):
""" Return the chksum type and chksum string how the legacy yum expects
it.
"""
(chksum_type, chksum) = self.chksum
return (hawkey.chksum_name(chksum_type), binascii.hexlify(chksum).decode())
# yum compatibility method
def verifyLocalPkg(self):
if self.from_system:
raise ValueError("Can not verify an installed package.")
if self.from_cmdline:
return True # local package always verifies against itself
(chksum_type, chksum) = self.returnIdSum()
real_sum = dnf.yum.misc.checksum(chksum_type, self.localPkg(),
datasize=self.size)
if real_sum != chksum:
logger.debug('%s: %s check failed: %s vs %s' %
(self, chksum_type, real_sum, chksum))
return False
return True
|
Pugsworth/ScratchPad | ScratchPad.py | Python | mit | 3,907 | 0.041976 | import sublime, sublime_plugin, tempfile, os, re;
global g_current_file;
global g_last_view;
g_current_file = None;
g_last_view = None;
# Two methods that could be used here:
# get language name
# check if .sublime-build exists for language name
# if it doesn't, somehow get the file extension
# check for .sublime-build using file extension
# wouldn't work if it's a scratch buffer
# create temp file
# change extension (just incase) of temp file to extension of running file
# quickly switch to that file and run_command("build")
# immediately after, run_command("close")
# path = sublime.packages_path().split("\\");
# path.pop();
# path.append(view.settings().get('syntax'));
# open("/".join(path).replace("tmLanguage", "sublime-build"));
# re.search("<string>(\w+)</string>", open(os.path.join("\\".join(sublime.packages_path().split("\\")[:-1]), view.settings().get('syntax'))).read()).group(1)
class ScratchpadFile: # class to delegate the data to
def __init__(self, file):
self.file = file;
self.file_name = file.name;
def set_file(self, file):
self.file = file;
def unlink(self):
try:
os.unlink(self.file_name);
except OSError, e:
print("Couldn't remove file %s, %i, %s" % (self.file_name, e.errorno, e.strerror));
class ScratchpadCommand(sublime_plugin.TextCommand):
def __get_filetype(self):
syntaxpath = os.path.join( os.path.split( os.path.normcase(sublime.packages_path()) )[0], os.path.normcase(self.view.settings().get('syntax')) ); # obtain the absolute path to the syntax file
# so now we have a path where the last 3 entries are: packages / syntax folder / syntax.tmlanguage
# splitpath = syntaxpath.split(os.sep);
text = None;
with open(syntaxpath, "rt") as f:
text = f.read(); # not sure how the fileTypes array can be implemented in the file, but we will just read the entire file for now
if text != None:
filetype = re.search("<key>.*(\n?).*<array>.*(\n?).*<string>(\w+)<\/string>", text).group(3); # hacky regex to find first filetype result
return filetype;
# name = re.search("", text); # another to get the name (py.sublime-build doesn't exist, but python.sublime-build does)
# if os.path.exists(path):
# elif os.path.exists():
# syntax.sublime-build
# name/syntax.sublime-build
# name/name.sublime-build
def __get_selection(self):
selection = self.view.sel()[0]; # only the first selection, for now...
selectedText = "";
if selection.empty():
selectedText = self.view.substr(sublime.Region(0, self.view.size())); # grab entire file
else:
selectedText = self.view.substr(selection); # grab just the selected text
return selectedText;
def run(self, edit):
if self.view.sel()[0].empty() and not(self.view.is_dirty() or self.view.is_scratch()) and self.view.file_name() != None:
self.view.window().run_command("build");
return;
global g_current_file;
settings = sublime.load_settings("ScratchPad.sublime-settings");
filetype = "." + self.__get_filetype();
selectedText = self.__get_selection();
new_view = None;
with tempfile.NamedTemporaryFile(mode='w+t', delete=False, prefix="scratchpad", suffix=filetype) as f:
f.write(selectedText);
g_current_file = ScratchpadFile(f);
new_view = self.view.window().open_file(f.name) | ;
global g_last_view;
g_last_view = self.view;
class ScratchpadEvent(sublime_plugin.EventListener):
def on_load(self, view):
global g_current_file;
if g_current_file != None and os.path.normcase(g_current_file.file_name) == os.path.normcase(view.file_name()):
window = view.window();
window.run_command("build");
window.run_command("close");
# g_current_file.unlink(); # build is an asynchronous call
global g_last_view;
if g_last_view != None and | window.active_view() != g_last_view:
window.focus_view(g_last_view);
g_last_view = None;
g_current_file = None;
|
gallifrey17/eden | modules/tests/inv/send_item.py | Python | mit | 3,148 | 0.0054 | """ Sahana Eden Module Automated Tests - INV001 Send Items
@copyright: 2011-2012 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
inclu | ded in all copies or | substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
from helper import InvTestFunctions
class SendItem(InvTestFunctions):
"""
Inventory Test - Send Workflow (Send items)
@param items: This test sends a specific item to another party.
This test assume that test/inv-mngt has been added to prepop
- e.g. via demo/IFRC_Train
@Case: INV001
@TestDoc: https://docs.google.com/spreadsheet/ccc?key=0AmB3hMcgB-3idG1XNGhhRG9QWF81dUlKLXpJaFlCMFE
@Test Wiki: http://eden.sahanafoundation.org/wiki/DeveloperGuidelines/Testing
"""
# -------------------------------------------------------------------------
def test_inv001_send_items(self):
""" Tests for Send Workflow """
user = "admin"
self.login(account="admin", nexturl="inv/send/create")
send_data = [("site_id",
"Timor-Leste Red Cross Society (CVTL) National Warehouse (Warehouse)",
),
("type",
"Internal Shipment",
),
("to_site_id",
"Lospalos Warehouse (Warehouse)",
),
("sender_id",
"Beatriz de Carvalho",
),
("recipient_id",
"Liliana Otilia",
)
]
item_data = [
[("send_inv_item_id",
"Blankets - Australian Red Cross",
"inv_widget",
),
("quantity",
"3",
),
],
]
result = self.send(user, send_data)
send_id = self.send_get_id(result)
for data in item_data:
result = self.track_send_item(user, send_id, data)
# Send the shipment
self.send_shipment(user, send_id)
|
dundeemt/SoCo | examples/commandline/discover.py | Python | mit | 175 | 0 | from __future__ import print_fun | ction
import soco
""" Prints the name of eac | h discovered player in the network. """
for zone in soco.discover():
print(zone.player_name)
|
kaarolch/ansible | lib/ansible/inventory/yaml.py | Python | gpl-3.0 | 6,401 | 0.002656 | # Copyright 2016 RedHat, inc
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#############################################
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import re
from ansible import constants as C
from ansible.inventory.host import Host
from ansible.inventory.group import Group
from ansible.inventory.expand_hosts import detect_range
from ansible.inventory.expand_hosts import expand_hostname_range
from ansible.parsing.utils.addresses import parse_address
from ansible.compat.six import string_types
class InventoryParser(object):
"""
Takes a YAML-format inventory file and builds a list of groups and subgroups
with their associated hosts and variable settings.
"""
def __init__(self, loader, groups, filename=C.DEFAULT_HOST_LIST):
self._loader = loader
self.filename = filename
# Start with an empty host list and whatever groups we're passed in
# (which should include the default 'all' and 'ungrouped' groups).
self.hosts = {}
self.patterns = {}
self.groups = groups
# Read in the hosts, groups, and variables defined in the
# inventory file.
data = loader.load_from_file(filename)
self._parse(data)
def _parse(self, data):
'''
Populates self.groups from the given array of lines. Raises an error on
any parse failure.
'''
self._compile_patterns()
# We expect top level ke | ys to correspond to groups, iterate over them
# to get host, vars and subgroups (which we iterate over recursivelly | )
for group_name in data.keys():
self._parse_groups(group_name, data[group_name])
# Finally, add all top-level groups as children of 'all'.
# We exclude ungrouped here because it was already added as a child of
# 'all' at the time it was created.
for group in self.groups.values():
if group.depth == 0 and group.name not in ('all', 'ungrouped'):
self.groups['all'].add_child_group(Group(group_name))
def _parse_groups(self, group, group_data):
if group not in self.groups:
self.groups[group] = Group(name=group)
if isinstance(group_data, dict):
#make sure they are dicts
for section in ['vars', 'children', 'hosts']:
if section in group_data and isinstance(group_data[section], string_types):
group_data[section] = { group_data[section]: None}
if 'vars' in group_data:
for var in group_data['vars']:
if var != 'ansible_group_priority':
self.groups[group].set_variable(var, group_data['vars'][var])
else:
self.groups[group].set_priority(group_data['vars'][var])
if 'children' in group_data:
for subgroup in group_data['children']:
self._parse_groups(subgroup, group_data['children'][subgroup])
self.groups[group].add_child_group(self.groups[subgroup])
if 'hosts' in group_data:
for host_pattern in group_data['hosts']:
hosts = self._parse_host(host_pattern, group_data['hosts'][host_pattern])
for h in hosts:
self.groups[group].add_host(h)
def _parse_host(self, host_pattern, host_data):
'''
Each host key can be a pattern, try to process it and add variables as needed
'''
(hostnames, port) = self._expand_hostpattern(host_pattern)
hosts = self._Hosts(hostnames, port)
if isinstance(host_data, dict):
for k in host_data:
for h in hosts:
h.set_variable(k, host_data[k])
if k in ['ansible_host', 'ansible_ssh_host']:
h.address = host_data[k]
return hosts
def _expand_hostpattern(self, hostpattern):
'''
Takes a single host pattern and returns a list of hostnames and an
optional port number that applies to all of them.
'''
# Can the given hostpattern be parsed as a host with an optional port
# specification?
try:
(pattern, port) = parse_address(hostpattern, allow_ranges=True)
except:
# not a recognizable host pattern
pattern = hostpattern
port = None
# Once we have separated the pattern, we expand it into list of one or
# more hostnames, depending on whether it contains any [x:y] ranges.
if detect_range(pattern):
hostnames = expand_hostname_range(pattern)
else:
hostnames = [pattern]
return (hostnames, port)
def _Hosts(self, hostnames, port):
'''
Takes a list of hostnames and a port (which may be None) and returns a
list of Hosts (without recreating anything in self.hosts).
'''
hosts = []
# Note that we decide whether or not to create a Host based solely on
# the (non-)existence of its hostname in self.hosts. This means that one
# cannot add both "foo:22" and "foo:23" to the inventory.
for hn in hostnames:
if hn not in self.hosts:
self.hosts[hn] = Host(name=hn, port=port)
hosts.append(self.hosts[hn])
return hosts
def get_host_variables(self, host):
return {}
def _compile_patterns(self):
'''
Compiles the regular expressions required to parse the inventory and stores them in self.patterns.
'''
self.patterns['groupname'] = re.compile( r'''^[A-Za-z_][A-Za-z0-9_]*$''')
|
cdoremus/udacity-python_web_development-cs253 | src/unit5/blog_datastore_factory.py | Python | apache-2.0 | 683 | 0.010249 | '''
Created on Apr 30, 2012
@author: h87966
'''
from unit5.blog_datastore_memory import BlogMemory | DataStore
from unit5.blog_datastore_appengine import BlogAppengineDataStore
class BlogDataStoreFactory():
'''
classdocs
'''
storage_implementations = {'memory':BlogMemoryDataStore(),
'appengine':BlogAppengineDataStore()}
def __init__(se | lf, storage_impl='appengine'):
'''
Constructor
'''
self.storage = self.storage_implementations[storage_impl]
def set_storage(self, blog_storage):
self.storage = blog_storage
def get_storage(self):
return self.storage
|
ppruitt/site | site/paulpruitt_net/settings.py | Python | mit | 2,273 | 0.00396 | " | ""
Django settings for paulpruitt_net project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
from secrets import SECRET_KEY, DB_USER, DB_PASSWORD
BASE_DIR = | os.path.dirname(os.path.realpath(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'taggit',
'pblog'
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'paulpruitt_net.urls'
WSGI_APPLICATION = 'paulpruitt_net.wsgi.application'
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, 'templates'),
)
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE' : 'django.db.backends.postgresql_psycopg2',
'NAME' : 'site',
'USER' : DB_USER,
'PASSWORD': DB_PASSWORD,
'HOST' : '127.0.0.1',
'PORT' : '',
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = '/srv/www/site/static'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static"),
)
# Do not allow IFrames
X_FRAME_OPTIONS = 'DENY'
|
tsukinowasha/ansible-lint-rules | rules/ShellAltService.py | Python | mit | 567 | 0.001764 | from ansiblelint import AnsibleLintRule
class ShellAltService(AnsibleLintRule):
| id = 'E507'
shortdesc = 'Use service module'
description = ''
tags = ['shell']
def matchtask(self, file, task):
if task['action']['__ansible_module__'] not in ['shell', 'command']:
return False
args = task['action']['__ansible_arguments__']
if 'service' in args:
return True
if 'systemctl' in args:
return True
if '/etc/rc.d/init.d/' in args:
return True
| return False
|
malongge/selenium-pom | tests/test_index_page.py | Python | apache-2.0 | 3,269 | 0.002478 | import time
import pytest
from pages.index_page import IndexPage
from .base import BaseTest
class TestIndexPage(BaseTest):
@pytest.fixture(autouse=True)
def _go_to_index_page(self, request, base_url, selenium, login):
_, pg = login
# self.home_pg = pg
index_pg = IndexPage(base_url, selenium)
index_pg.switch_to_index_page(pg)
self.index_pg = index_pg
# yield
@pytest.mark.flaky(reruns=1)
def test_index_focus_pic_auto_run(self):
# pg = self.home_pg
index_pg = self.index_pg
# index_pg.switch_to_index_page(pg)
# 根据按钮的数量来确定轮播图的数量
btns = index_pg.get_focus_pic_hover_buttons()
assert len(btns) > 1
# 轮播图每 4s 进行切换, 注意在执 | 行的时候,鼠标不要放到轮播图上, 否则会影响测试结果
links = []
for _ in btns:
link = index_pg.get_focus_pic_img_link()
links.append(link)
time.sleep(4)
| for index, value in enumerate(links[:-1]):
assert links[index] != links[index + 1], '轮播第 {} 张链接地址为 {} 应该在 4s 后轮播, 但未检测到有轮播'.format(index, value)
# @pytest.mark.flaky(reruns=1)
# def test_index_focus_pic_auto_run(self, base_url, selenium, login):
# _, pg = login
# index_pg = IndexPage(base_url, selenium)
# index_pg.switch_to_index_page(pg)
# # 根据按钮的数量来确定轮播图的数量
# btns = index_pg.get_focus_pic_hover_buttons()
# assert len(btns) > 1
#
# # 轮播图每 4s 进行切换, 注意在执行的时候,鼠标不要放到轮播图上, 否则会影响测试结果
# links = []
# for _ in btns:
# link = index_pg.get_focus_pic_img_link()
# links.append(link)
# time.sleep(4)
#
# for index, value in enumerate(links[:-1]):
# assert links[index] != links[index + 1], '轮播第 {} 张链接地址为 {} 应该在 4s 后轮播, 但未检测到有轮播'.format(index, value)
@pytest.mark.flaky(reruns=1)
def test_index_hover_focus_pic_btn(self):
# pg = self.home_pg
index_pg = self.index_pg
# index_pg.switch_to_index_page(pg)
# 根据按钮的数量来确定轮播图的数量
btns = index_pg.get_focus_pic_hover_buttons()
assert len(btns) > 1
img_links = []
for index, btn in enumerate(btns):
index_pg.hover(btn)
img = index_pg.get_focus_pic_img_link()
assert self._check_link_request_code(img, index_pg), '第 {} 张轮播图:{} 不显示'.format(index, img)
link = index_pg.get_focus_pic_link()
assert self._check_link_request_code(link, index_pg), '第 {} 张轮播图的链接 {} 请求状态码非 200'.format(index, img)
img_links.append(img)
for index in range(0, len(img_links) - 1):
assert img_links[index] != img_links[index + 1], '轮播图应该在鼠标放到轮播 {} 按钮时进行切换, 但未检测到切换'.format(index + 1)
|
ivanamihalek/tcga | tcga/01_somatic_mutations/old_tcga_tools/007_mutation_type_cleanup.py | Python | gpl-3.0 | 5,786 | 0.01158 | #!/usr/bin/python
#
# This source code is part of tcga, a TCGA processing pipeline, written by Ivana Mihalek.
# Copyright (C) 2014-2016 Ivana Mihalek.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see<http://www.gnu.org/licenses/>.
#
# Contact: ivana.mihalek@gmail.com
#
import os.path
import re
from old_tcga_tools.tcga_utils.utils import make_named_fields, is_informative
from old_tcga_tools.tcga_utils.ucsc import *
from time import time
verbose = True
#########################################
def store_fixed_row (cursor, fixed_row):
return
mutation_annot_pattern = re.compile('(\D+)(\-*\d+)(\D+)')
#########################################
def parse_mutation (mutation):
if not mutation or len(mutation)==0: return ["", "",""]
match_return = re.match(mutation_annot_pattern, mutation)
mut_from = match_return.group(1)
mut_to = match_return.group(3)
mut_position = int (match_return.group(2))
return [mut_position, mut_from, mut_to]
#########################################
def check_aa_type (cursor, assembly_dict, fields):
checks = True
fixed_row = {}
conflict = fields['conflict']
aa_change = fields['aa_change']
variant_classification = fields['variant_classification']
# I'll fix the absolute minimum that I can scrape by with
if not conflict and (variant_classification!="missense_mutation" or i | s_informative(aa_change)):
return [checks, fixed_row]
id = fields['id']
hugo_symbol = fields ['hugo_symbol']
start_ | position = fields['start_position']
end_position = fields['end_position']
tumor1 = fields['tumor_seq_allele1']
tumor2 = fields['tumor_seq_allele2']
norm1 = fields['match_norm_seq_allele1']
norm2 = fields['match_norm_seq_allele2']
reference = fields['reference_allele']
aa_change = fields['aa_change']
cdna_change = fields['cdna_change']
meta_info_index = fields['meta_info_index']
assembly = assembly_dict[meta_info_index]
chromosome = fields['chromosome']
ucsd_segment = segment_from_das(assembly, chromosome, start_position, end_position)
print id, hugo_symbol
print assembly, chromosome, start_position, end_position, ucsd_segment
print reference, norm1, norm2, tumor1, tumor2, cdna_change, aa_change
print parse_mutation (cdna_change)
print parse_mutation (aa_change)
print "conflict: ", conflict
print
switch_to_db(cursor, 'ucsc')
qry = "select * from canonical_transcripts_%s where hugo_name='%s'" % (assembly, hugo_symbol)
rows = search_db(cursor, qry)
print rows
exit(1)
return [checks, fixed_row]
#########################################
def get_assemblies (cursor):
assembly = {}
qry = "select id, assembly from mutations_meta"
rows = search_db(cursor, qry)
if not rows:
print "assembly not found"
exit(1) # db not found
for row in rows:
assembly[row[0]] = row[1]
return assembly
#########################################
def main():
db = connect_to_mysql()
cursor = db.cursor()
sample_type = "metastatic"
if sample_type == "primary":
table = 'somatic_mutations'
elif sample_type == "metastatic":
table = 'metastatic_mutations'
else:
print "I don't know how to hadndle ", sample_type, " sample types"
exit(1) # unknown sample type
db_names = ["ACC", "BLCA", "BRCA", "CESC", "CHOL", "COAD", "DLBC", "ESCA", "GBM", "HNSC", "KICH" ,"KIRC",
"KIRP", "LAML", "LGG", "LIHC", "LUAD", "LUSC", "MESO", "OV", "PAAD", "PCPG", "PRAD", "REA",
"SARC", "SKCM", "STAD", "TGCT", "THCA", "THYM", "UCEC", "UCS", "UVM"]
#db_names = ["LUAD"]
chunk = 10 # we process rows 10 by 10+
offset = -chunk
for db_name in db_names:
qry = "show databases like '%s'" % db_name
rows = search_db(cursor, qry)
if not rows:
print db_name, "not found"
exit(1) # db not found
print " ** ", db_name
switch_to_db (cursor, db_name)
if ( check_table_exists (cursor, db_name, table)):
print table, "table found in ", db_name
else:
print table, "table not found in ", db_name
header_fields = get_column_names (cursor, db_name, table)
if not header_fields:
print "\t no columnn names (?)"
continue
assembly = get_assemblies (cursor)
done = False
while not done:
offset += chunk
if offset and not offset%1000: print "offset: ", offset
switch_to_db(cursor, db_name) # check_aa_type will switch to ucsc db
qry = "select * from %s limit %d, %d" % (table, offset, chunk)
rows = search_db(cursor, qry)
if not rows:
done = True
continue
for row in rows:
[checks, fixed_row] = check_aa_type (cursor, assembly, make_named_fields (header_fields, row) )
if checks: continue
store_fixed_row (cursor, fixed_row)
cursor.close()
db.close()
#########################################
if __name__ == '__main__':
main()
|
cernops/nova | nova/virt/libvirt/imagebackend.py | Python | apache-2.0 | 45,765 | 0.000284 | # Copyright 2012 Grid Dynamics
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import base64
import contextlib
import functools
import os
import shutil
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import excutils
from oslo_utils import fileutils
from oslo_utils import strutils
from oslo_utils import units
import six
import nova.conf
from nova import exception
from nova.i18n import _
from nova.i18n import _LE, _LI, _LW
from nova import image
from nova import keymgr
from nova import utils
from nova.virt.disk import api as disk
from nova.virt.image import model as imgmodel
from nova.virt import images
from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt.storage import dmcrypt
from nova.virt.libvirt.storage import lvm
from nova.virt.libvirt.storage import rbd_utils
from nova.virt.libvirt import utils as libvirt_utils
__imagebackend_opts = [
cfg.StrOpt('images_type',
default='default',
choices=('raw', 'qcow2', 'lvm', 'rbd', 'ploop', 'default'),
help='VM Images format. If default is specified, then'
' use_cow_images flag is used instead of this one.'),
cfg.StrOpt('images_volume_group',
help='LVM Volume Group that is used for VM images, when you'
' specify images_type=lvm.'),
cfg.BoolOpt('sparse_logical_volumes',
default=False,
help='Create sparse logical volumes (with virtualsize)'
' if this flag is set to True.'),
cfg.StrOpt('images_rbd_pool',
default='rbd',
help='The RADOS pool in which rbd volumes are stored'),
cfg.StrOpt('images_rbd_ceph_conf',
default='', # default determined by librados
help='Path to the ceph configuration file to use'),
cfg.StrOpt('hw_disk_discard',
choices=('ignore', 'unmap'),
help='Discard option for nova managed disks. Need'
' Libvirt(1.0.6) Qemu1.5 (raw format) Qemu1.6(qcow2'
' format)'),
]
CONF = nova.conf.CONF
CONF.register_opts(__imagebackend_opts, 'libvirt')
CONF.import_opt('rbd_user', 'nova.virt.libvirt.volume.net', group='libvirt')
CONF.import_opt('rbd_secret_uuid', 'nova.virt.libvirt.volume.net',
group='libvirt')
LOG = logging.getLogger(__name__)
IMAGE_API = image.API()
@six.add_metaclass(abc.ABCMeta)
class Image(object):
SUPPORTS_CLONE = False
def __init__(self, source_type, driver_format, is_block_dev=False):
"""Image initialization.
:source_type: block or file
:driver_format: raw or qcow2
:is_block_dev:
"""
if (CONF.ephemeral_storage_encryption.enabled and
not self._supports_encryption()):
raise exception.NovaException(_('Incompatible settings: '
'ephemeral storage encryption is supported '
'only for LVM images.'))
self.source_type = source_type
self.driver_format = driver_format
self.driver_io = None
self.discard_mode = CONF.libvirt.hw_disk_discard
self.is_block_dev = is_block_dev
self.preallocate = False
# NOTE(dripton): We store lines of json (path, disk_format) in this
# file, for some image types, to prevent attacks based on changing the
# disk_format.
self.disk_info_path = None
# NOTE(mikal): We need a lock directory which is shared along with
# instance files, to cover the scenario where multiple compute nodes
# are trying to create a base file at the same time
self.lock_path = os.path.join(CONF.instances_path, 'locks')
def _supports_encryption(self):
"""Used to test that the backend supports encryption.
Override in the subclass if backend supports encryption.
"""
return False
@abc.abstractmethod
def create_image(self, prepare_template, base, size, *args, **kwargs):
"""Create image from template.
Contains specific behavior for each image type.
:prepare_template: function, that creates template.
Should accept `target` argument.
:base: Template name
:size: Size of created image in bytes
"""
pass
@abc.abstractmethod
def resize_image(self, size):
"""Resize image to size (in bytes).
:size: Desired size of image in bytes
"""
pass
def libvirt_info(self, disk_bus, disk_dev, device_type, cache_mode,
extra_specs, hypervisor_version):
"""Get `LibvirtConfigGuestDisk` filled for this image.
:disk_dev: Disk bus device name
:disk_bus: Disk bus type
:device_type: Device type for this image.
:cache_mode: Caching mode for this image
:extra_specs: Instance type extra specs dict.
:hypervisor_version: the hypervisor version
"""
info = vconfig.LibvirtConfigGuestDisk()
info.source_type = self.source_type
info.source_device = device_type
info.target_bus = disk_bus
info.target_dev = disk_dev
info.driver_cache = cache_mode
info.driver_discard = self.discard_mode
info.driver_io = self.driver_io
info.driver_format = self.driver_format
driver_name = libvirt_utils.pick_disk_driver_name(hypervisor_version,
self.is_block_dev)
info.driver_name = driver_name
info.source_path = self.path
self.disk_qos(info, extra_specs)
return info
def disk_qos(self, info, extra_specs):
tune_items = ['disk_read_bytes_sec', 'disk_read_iops_sec',
'disk_write_bytes_sec', 'disk_write_iops_sec',
'disk_total_bytes_sec', 'disk_total_iops_sec']
for key, value in six.iteritems(extra_specs):
scope = key.split(':')
if len(scope) > 1 and scope[0] == 'quota':
if scope[1] in tune_items:
setattr(info, scope[1], value)
def libvirt_fs_info(self, target, driver_type=None):
"""Get `LibvirtConfigGuestFilesys` filled for this image.
:target: target directory inside a container.
:driver_type: filesystem driver type, can be loop
nbd or ploop.
"""
info = vconfig.LibvirtConfigGuestFilesys()
info.target_dir = target
if self.is_block_dev:
info | .source_type = "b | lock"
info.source_dev = self.path
else:
info.source_type = "file"
info.source_file = self.path
info.driver_format = self.driver_format
if driver_type:
info.driver_type = driver_type
else:
if self.driver_format == "raw":
info.driver_type = "loop"
else:
info.driver_type = "nbd"
return info
def check_image_exists(self):
return os.path.exists(self.path)
def cache(self, fetch_func, filename, size=None, *args, **kwargs):
"""Creates image from template.
Ensures that template and image not already exists.
Ensures that base directory exists.
Synchronizes on template fetching.
:fetch_func: Function that creates the base image
Should accept `target` argument.
:filename: Name of the file in th |
joh12041/route-externalities | preprocessing/grid_creation.py | Python | mit | 3,570 | 0.003641 | import os
import json
import argparse
from math import ceil, floor
from geojson import Polygon, Feature, FeatureCollection, dump
from shapely.geometry import shape, Point
"""
Code adapted from answer to question here:
http://gis.stackexchange.com/questions/54119/creating-square-grid-polygon-shapefile-with-python
"""
# Output directory for the county geojson grids
GRIDS_DIR = "data/grids/"
SCALE = 3
def grid(outputGridfn, xmin, xmax, ymin, ymax, gridHeight, gridWidth, boundary):
# check all floats
xmin = float(xmin)
xmax = float(xmax)
ymin = float(ymin)
ymax = float(ymax)
gridWidth = float(gridWidth)
gridHeight = float(gridHeight)
# get rows
rows = ceil((ymax - ymin) / gridHeight)
# get columns
cols = ceil((xmax - xmin) / gridWidth)
# create grid cells
countcols = 0
features = []
while countcols < cols:
# set x coordinate for this column
grid_x_left = xmin + (countcols * gridWidth)
countcols += 1
# reset count for rows
countrows = 0
while countrows < rows:
# update y coordinate for this row
grid_y_bottom = ymin + (countrows * gridHeight)
countrows += 1
# check if grid centroid contained in county boundary
bottomleftcorner = (grid_x_left, grid_y_bottom)
coords = [bottomleftcorner]
# add other three corners of gridcell before closing grid with starting point again
for i in [(0.001, 0), (0.001, 0.001), (0, 0.001), (0, 0)]:
coords.append((bottomleftcorner[0] + i[1], bottomleftcorner[1] + i[0]))
intersects = False
for corner in coords[1:]:
if boundary.contains(Point(corner)):
intersects = True
break
if intersects:
properties = {'rid': round(grid_y_bottom * 10**SCALE), 'cid': round(grid_x_left * 10**SCALE)}
features.append(Feature(geometry=Polygon([coords]), properties=properties))
with open(outputGridfn, 'w') as fout:
dump(FeatureCollection(features), fout)
def main():
"""Generate grids for a list of counties."""
parser = argparse.ArgumentParser()
parser.add_argument("features_geojson", help="Path to GeoJSON with features to be gridded.")
parser.add_argument("output_folder", help="Folder to contain output grid GeoJSONs.")
args = parser.parse_args()
with open(args.features_geojson, 'r') as fin:
features_gj = json.load(fin)
if not os.path.isdir(GRIDS_DIR):
os.mkdir(GRIDS_DIR)
count = 0
for feature in features_gj['features']:
try:
feature['properties']['FIPS'] = "{0}{1}".format(feature['properties']['STATE'], feature['properties' | ]['COUNTY'])
except:
pass
count += 1
boundary = shape(feature['geometry'])
bb = boundary.bounds
xmin = bb[0] # most western point
xmax = bb[2] # most eastern poin | t
ymin = bb[1] # most southern point
ymax = bb[3] # most northern point
gridHeight = 0.001
gridWidth = 0.001
xmin = floor(xmin * 10**SCALE) / 10**SCALE
ymax = ceil(ymax * 10**SCALE) / 10**SCALE
grid("{0}.geojson".format(os.path.join(args.output_folder, feature['properties']['FIPS'])),
xmin, xmax, ymin, ymax, gridHeight, gridWidth, boundary)
if count % 150 == 0:
print("{0} counties complete.".format(count))
if __name__ == "__main__":
main() |
SiLab-Bonn/monopix_daq | monopix_daq/scans/en_tune.py | Python | gpl-2.0 | 7,608 | 0.024842 | #!/usr/bin/env python
import os,sys,time
import numpy as np
import bitarray
import tables as tb
import logging
import yaml
import matplotlib.pyplot as plt
import monopix_daq.scan_base as scan_base
import monopix_daq.analysis.interpreter as interpreter
local_configuration={"exp_time": 1.0,
"cnt_th": 1,
"n_pix": 512,
"th_start": 0.85,
"th_stop": 0.5,
"th_step":[-0.01,-0.002,-0.0005]
}
class EnTune(scan_base.ScanBase):
scan_id = "en_tune"
def scan(self,**kwargs):
th=kwargs.pop("th_start",0.85)
th_stop=kwargs.pop("th_stop",0.5)
th_step=kwargs.pop("th_step",[-0.01,-0.002,-0.0005])
cnt_th=kwargs.pop("cnt_th",1)
exp_time=kwargs.pop("exp_time",1.0)
n_pix=kwargs.pop("n_pix",512)
####################
## create a table for scan_params
param_dtype=[("scan_param_id","<i4"),("th","<f2")]
description=np.zeros((1,),dtype=param_dtype).dtype
self.scan_param_table = self.h5_file.create_table(self.h5_file.root,
name='scan_parameters', title='scan_parameters',
description=description, filters=self.filter_tables)
scan_param_id=0
en_org=np.copy(self.dut.PIXEL_CONF["PREAMP_EN"][:,:])
th_step_i=0
fig,ax=plt.subplots(2,2)
plt.ion()
while th > th_stop or th_step_i==len(th_step):
self.monopix.set_th(th)
en=np.copy(self.dut.PIXEL_CONF["PREAMP_EN"][:,:])
self.monopix.set_monoread()
with self.readout(scan_param_id=scan_param_id,fill_buffer=True,clear_buffer=True,
readout_interval=0.005):
time.sleep(exp_time)
self.monopix.stop_monoread()
scan_param_id=scan_param_id+1
##########################
### get data from buffer
buf = self.fifo_readout.data
if len(buf)==0:
self.logger.info("en_tune:th=%.4f pix=%d, no data"%(th,len(np.argwhere(en))))
th=th+th_step[th_step_i]
continue
elif th_step_i!=(len(th_step)-1):
self.logger.info("en_tune:th=%.4f step=%.4f "%(th,th_step[th_step_i]))
th=th-th_step[th_step_i]
th_step_i=th_step_i+1
continue
data = np.concatenate([buf.popleft()[0] for i in range(len(buf))])
img=interpreter.raw2img(data,delete_noise=False)
##########################
## showing status
self.logger.info("en_tune:==== %.4f===data %d=====cnt %d======en %d====="%(
th,len(data),np.sum(img), len(en[en])))
ax[0,0].cla()
ax[0,0].imshow(np.transpose(img),vmax=min(np.max(img),100),origin="low",aspect="auto")
ax[0,0].set_title("th=%.4f"%th)
ax[1,0].cla()
ax[1,0].imshow(np.transpose(self.monopix.get_tdac_memory()),vmax=16,vmin=0,origin="low",aspect="auto")
ax[0,1].cla()
ax[0,1].imshow(np.transpose(en),vmax=1,vmin=0,origin="low",aspect="auto")
ax[0,1].set_title("en=%d"%len(np.where(en)))
fig.tight_layout()
fig.savefig(os.path.join(self.working_dir,"last_scan.png"),format="png")
plt.pause(0.003)
##########################
### find noisy
arg=np.argwhere(img>cnt_th)
s="en_tune:noisy pixel %d"%len(arg)
for a in arg:
s="[%d,%d]=%d"%(a[0],a[1],img[a[0],a[1]]),
self.logger.info(s)
self.logger.info("en_tune:th=%.4f en=%d"%(th,len(np.argwhere(en))))
en=np.bitwise_and(en,img<=cnt_th)
if n_pix >= len(np.argwhere(en)):
self.monopix.set_th(th-th_step[th_step_i])
break
else:
th=th+th_step[th_step_i]
self.monopix.set_preamp_en(en)
self.logger.info("en_tune:th=%.4f en=%d"%(
self.dut.SET_VALUE["TH"],
len(np.argwhere(self.dut.PIXEL_CONF["PREAMP_EN"][:,:]))
))
def analyze(self):
pass
def plot(self):
fraw = self.output_filename +'.h5'
fpdf = self.output_filename +'.pdf'
import monopix_daq.analysis.plotting_base as plotting_base
with plotting_base.PlottingBase(fpdf,save_png=True) as plotting:
with tb.open_file(fraw) as f:
firmware=yaml.load(f.root.meta_data.a | ttrs.firmware)
## DAC Configuration page
dat=yaml.load(f.root.meta_data.at | trs.dac_status)
dat.update(yaml.load(f.root.meta_data.attrs.power_status))
plotting.table_1value(dat,page_title="Chip configuration")
## Pixel Configuration page (before tuning)
dat=yaml.load(f.root.meta_data.attrs.pixel_conf_before)
plotting.plot_2d_pixel_4(
[dat["PREAMP_EN"],dat["INJECT_EN"],dat["MONITOR_EN"],dat["TRIM_EN"]],
page_title="Pixel configuration before tuninig",
title=["Preamp","Inj","Mon","TDAC"],
z_min=[0,0,0,0], z_max=[1,1,1,15])
## Preamp Configuration
dat=yaml.load(f.root.meta_data.attrs.pixel_conf)
plotting.plot_2d_pixel_hist(np.array(dat["PREAMP_EN"]),
title="Enabled preamp",
z_max=1)
if __name__ == "__main__":
from monopix_daq import monopix
import argparse
parser = argparse.ArgumentParser(usage="python en_tune.py",
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument("--config_file", type=str, default=None)
parser.add_argument('-e',"--exp_time", type=float, default=local_configuration["exp_time"])
parser.add_argument('-npix',"--n_pix", type=float, default=local_configuration["n_pix"])
parser.add_argument('-t',"--th_start", type=float, default=local_configuration["th_start"])
parser.add_argument("-f","--flavor", type=str, default="28:32")
parser.add_argument("--tdac", type=int, default=None)
parser.add_argument("--LSBdacL", type=int, default=None)
parser.add_argument("-p","--power_reset", action='store_const', const=1, default=0) ## defualt=True: skip power reset
parser.add_argument("-fout","--output_file", type=str, default=None)
args=parser.parse_args()
local_configuration["exp_time"]=args.exp_time
local_configuration["n_pix"]=args.n_pix
local_configuration["th_start"]=args.th_start
m=monopix.Monopix(no_power_reset=not bool(args.power_reset))
scan = EnTune(m, fout=args.output_file, online_monitor_addr="tcp://127.0.0.1:6500")
if args.config_file is not None:
m.load_config(args.config_file)
if args.flavor is not None:
m.set_preamp_en("none")
if args.flavor=="all":
collist=np.arange(0,36,1)
else:
tmp=args.flavor.split(":")
collist=np.arange(int(tmp[0]),int(tmp[1]),1)
en=np.copy(m.dut.PIXEL_CONF["PREAMP_EN"][:,:])
for c in collist:
en[c,:]=True
m.set_preamp_en(en)
if args.tdac is not None:
m.set_tdac(args.tdac)
if args.LSBdacL is not None:
m.set_global(LSBdacL=args.LSBdacL)
scan.start(**local_configuration)
#scan.analyze()
scan.plot()
|
mick-d/nipype | nipype/interfaces/c3.py | Python | bsd-3-clause | 1,785 | 0.00112 | # -*- coding: utf-8 -*-
"""The ants module provides basic functions for interfacing with ants functions.
Change directory to provide relative paths for doctests
>>> import os
>>> filepath = os.path.dirname( os.path.realpath( __file__ ) )
>>> datadir = os.path.realpath(os.path.join(filepath, '../testing/data'))
>>> os.chdir(datadir)
"""
from __future__ import print_function, division, unicode_literals, absolute_import
from .base import (CommandLineInputSpec, traits, TraitedSpec,
File, SEMLikeCommandLine)
class C3dAffineToolInputSpec(CommandLineInputSpec):
reference_file = File(exists=True, argstr="-ref %s", position=1)
source_file = File(exists=True, argstr='-src %s', position=2)
transform_file = File(exists=True, argstr='%s', position=3)
itk_transform = traits.Either(traits.Bool, File(), hash_files=False,
desc="Export ITK transform.",
argstr="-oi | tk %s", position=5)
fsl2ras = traits.Bool(argstr='-fsl2ras', position=4)
class C3dAffineToolOutputSpec(TraitedSpec):
itk_transform = File(exists=True)
class C3dAffineTool(SEMLikeCommandLine):
"""Converts fsl-style Affine registration into ANTS compatible itk format
Example
=======
>>> from nipype.interfaces.c3 import C3dAffineTool
>>> c3 = C3dAffineTool()
>>> c3.inputs.source_file = 'cmatrix.mat'
>>> c3.inpu | ts.itk_transform = 'affine.txt'
>>> c3.inputs.fsl2ras = True
>>> c3.cmdline # doctest: +ALLOW_UNICODE
'c3d_affine_tool -src cmatrix.mat -fsl2ras -oitk affine.txt'
"""
input_spec = C3dAffineToolInputSpec
output_spec = C3dAffineToolOutputSpec
_cmd = 'c3d_affine_tool'
_outputs_filenames = {'itk_transform': 'affine.txt'}
|
lrks/python-escpos | escpos/printer.py | Python | gpl-3.0 | 5,425 | 0.005346 | #!/usr/bin/python
"""
@author: Manuel F Martinez <manpaz@bashlinux.com>
@organization: Bashlinux
@copyright: Copyright (c) 2012 Bashlinux
@license: GNU GPL v3
"""
import usb.core
import usb.util
import serial
import socket
from .escpos import *
from .constants import *
from .exceptions import *
class Usb(Escpos):
""" Define USB printer """
def __init__(self, idVendor, idProduct, interface=0, in_ep=0x82, out_ep=0x01):
"""
@param idVendor : Vendor ID
@param idProduct : Product ID
@param interface : USB device interface
@param in_ep : Input end point
@param out_ep : Output end point
"""
self.idVendor = idVendor
self.idProduct = idProduct
self.interface = interface
self.in_ep = in_ep
self.out_ep = out_ep
self.open()
def open(self):
""" Search device on USB tree and set is as escpos device """
self.device = usb.core.find(idVendor=self.idVendor, idProduct=self.idProduct)
if self.device is None:
print("Cable isn't plugged in")
check_driver = None
try:
check_driver = self.device.is_kernel_driver_active(0)
except NotImplementedError:
pass
if check_driver is None or check_driver:
try:
self.device.detach_kernel_driver(0)
except usb.core.USBError as e:
if check_driver is not None:
print("Could not detatch kernel driver: %s" % str(e))
try:
self.device.set_configuration()
self.device.reset()
except usb.core.USBError as e:
print("Could not set configuration: %s" % str(e))
def _raw(self, msg):
""" Print any command sent in raw format """
self.device.write(self.out_ep, msg, self.interface)
def __del__(self):
""" Release USB interface """
if self.device:
usb.util.dispose_resources(self.device)
self.device = None
class Serial(Escpos):
""" Define Serial printer """
def __init__(self, devfile="/dev/ttyS0", baudrate=9600, bytesize=8, timeout=1,
parity=serial.PARITY_NONE, stopbits=serial.STOPBITS_ONE,
xonxoff=False , dsrdtr=True):
"""
@param devfile : Device file under dev filesystem
@param baudrate : Baud rate for serial transmission
@param bytesize : Serial buffer si | ze
@param timeout : Read/Write timeout
@param parity : Parity checking
@param stopbits : Number of stop b | its
@param xonxoff : Software flow control
@param dsrdtr : Hardware flow control (False to enable RTS/CTS)
"""
self.devfile = devfile
self.baudrate = baudrate
self.bytesize = bytesize
self.timeout = timeout
self.parity = parity
self.stopbits = stopbits
self.xonxoff = xonxoff
self.dsrdtr = dsrdtr
self.open()
def open(self):
""" Setup serial port and set is as escpos device """
self.device = serial.Serial(port=self.devfile, baudrate=self.baudrate,
bytesize=self.bytesize, parity=self.parity,
stopbits=self.stopbits, timeout=self.timeout,
xonxoff=self.xonxoff, dsrdtr=self.dsrdtr)
if self.device is not None:
print("Serial printer enabled")
else:
print("Unable to open serial printer on: %s" % self.devfile)
def _raw(self, msg):
""" Print any command sent in raw format """
self.device.write(msg)
def __del__(self):
""" Close Serial interface """
if self.device is not None:
self.device.close()
class Network(Escpos):
""" Define Network printer """
def __init__(self,host,port=9100):
"""
@param host : Printer's hostname or IP address
@param port : Port to write to
"""
self.host = host
self.port = port
self.open()
def open(self):
""" Open TCP socket and set it as escpos device """
self.device = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.device.connect((self.host, self.port))
if self.device is None:
print("Could not open socket for %s" % self.host)
def _raw(self, msg):
""" Print any command sent in raw format """
if isinstance(msg, str):
self.device.send(msg.encode())
else:
self.device.send(msg)
def __del__(self):
""" Close TCP connection """
self.device.close()
def close(self):
self.__del__()
class File(Escpos):
""" Define Generic file printer """
def __init__(self, devfile="/dev/usb/lp0"):
"""
@param devfile : Device file under dev filesystem
"""
self.devfile = devfile
self.open()
def open(self):
""" Open system file """
self.device = open(self.devfile, "wb")
if self.device is None:
print("Could not open the specified file %s" % self.devfile)
def _raw(self, msg):
""" Print any command sent in raw format """
self.device.write(msg);
def __del__(self):
""" Close system file """
self.device.close()
|
AwesomeTTS/awesometts-anki-addon | awesometts/service/naverclovapremium.py | Python | gpl-3.0 | 5,322 | 0.003946 | # -*- coding: utf-8 -*-
# AwesomeTTS text-to-speech add-on for Anki
# Copyright (C) 2010-Present Anki AwesomeTTS Development Team
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Service implementation for the Naver Clova Premium voice service
https://apidocs.ncloud.com/en/ai-naver/clova_premium_voice/tts/
"""
import time
import datetime
import requests
import urllib
from .base import Service
from .languages import Language
from .languages import Gender
from .languages import StandardVoice
from .voicelist import VOICE_LIST
from typing import List
__all__ = ['NaverClovaPremium']
class NaverClovaPremium(Service):
"""
Provides a Service-compliant implementation for Naver Clova Premium Text To Speech.
"""
__slots__ = [
]
NAME = "Naver Clova Premium"
# Although Naver Clova is an Internet service, we do not mark it with
# Trait.INTERNET, as it is a paid-for-by-the-user API, and we do not want
# to rate-limit it or trigger error caching behavior
TRAITS = []
def desc(self):
"""Returns name with a voice count."""
return "Naver Clova Premium TTS API (%d voices)" % len(VOICE_LIST)
def extras(self):
"""The Azure API requires an API key."""
if self.languagetools.use_plus_mode():
# plus mode, no need for an API key
return []
return [dict(key='clientid', label="API Client Id", required=True),
dict(key='clientsecret', label="API Client Secret", required=True)]
def get_voices(self) -> List[StandardVoice]:
naver_voices = [x for x in VOICE_LIST if x['service'] == 'Naver']
naver_voices = sorted(naver_voices, key=lambda x: x['voice_description'])
voice_list = []
for voice_data in naver_voices:
voice_list.append(StandardVoice( | voice_data))
return voice_list
def get_voice_list(self):
voice_list = [(voice.get_key(), voice.get_description()) for voice in self.get_voices()]
voice_list.sort(key=lambda x: x[1])
return voice_list
def get_voice_for_key(self, key) -> StandardVoice:
voice = [voice for voice in self.get_voices() if voice.get_key() == key]
assert(len(voice) == 1)
return voice[0]
def opti | ons(self):
return [
dict(key='voice',
label="Voice",
values=self.get_voice_list(),
transform=lambda value: value),
dict(
key='speed',
label="Speed",
values=(-5, 5),
transform=int,
default=0,
),
dict(
key='pitch',
label="Pitch",
values=(-5, 5),
transform=int,
default=0,
)
]
def run(self, text, options, path):
"""Downloads from Naver Clova API directly to an MP3."""
voice_key = options['voice']
voice = self.get_voice_for_key(voice_key)
speed = options['speed']
pitch = options['pitch']
if self.languagetools.use_plus_mode():
self._logger.info(f'using language tools API')
service = 'Naver'
voice_key = voice.get_voice_key()
language = voice.get_language_code()
options = {
'pitch': pitch,
'speed': speed
}
self.languagetools.generate_audio_v2(text, service, 'batch', language, 'n/a', voice_key, options, path)
else:
client_id = options['clientid']
client_secret = options['clientsecret']
encText = urllib.parse.quote(text)
voice_name = voice.get_key()
data = f"speaker={voice_name}&speed={speed}&pitch={pitch}&text={encText}"
url = 'https://naveropenapi.apigw.ntruss.com/tts-premium/v1/tts'
self._logger.debug(f"url: {url}, data: {data}")
request = urllib.request.Request(url)
request.add_header("X-NCP-APIGW-API-KEY-ID",client_id)
request.add_header("X-NCP-APIGW-API-KEY",client_secret)
response = urllib.request.urlopen(request, data=data.encode('utf-8'))
rescode = response.getcode()
if(rescode==200):
self._logger.debug("successful response")
response_body = response.read()
with open(path, 'wb') as f:
f.write(response_body)
else:
error_message = f"Status code: {rescode}"
self._logger.error(error_message)
raise ValueError(error_message)
|
apruden/genwiki | genwiki/model.py | Python | lgpl-3.0 | 5,404 | 0.002036 | import codecs
import os
import re
import json
from . import WIKI_DIR
from collections import defaultdict
def _get_filename(slug):
return os.path.join(WIKI_DIR, '%s.md' % (slug,))
class Index(object):
def __init__(self):
self.texts, self.words = {}, set()
self.finvindex = defaultdict(set)
def update_index(self, doc_id, words):
for w in words:
self.finvindex[w].add((doc_id, self.texts[doc_id].index(w)))
def put(self, doc_id, content):
self.remove(doc_id)
txt = filter(None, map(lambda x: re.sub('[^a-z0-9]', '', x.lower()), filter(lambda w: len(w) > 3, content.split())))
self.texts[doc_id] = txt
self.update_index(doc_id, set(txt))
def remove(self, doc_id):
for k, v in self.finvindex.items():
to_delete = []
for w in v:
if w[0] == doc_id:
to_delete.append(w)
for t in to_delete:
v.remove(t)
def term_search(self, terms):
if not set(terms).issubset(set(self.finvindex.keys())):
return set()
return reduce(set.intersection,
(set(x[0] for x in txtindx)
for term, txtindx in self.finvindex.items()
if term in terms),
set(self.texts.keys()))
def search(self, phrase):
import difflib
wordsinphrase = phrase.strip().split()
tmp = []
for w in wordsinphrase:
r = difflib.get_close_matches(w, self.finvindex.keys(), cutoff=0.8)
if r:
tmp.append(r[0])
else:
tmp.append(w)
wordsinphrase = tmp
if not set(wordsinphrase).issubset(set(self.finvindex.keys())):
return set()
if len(wordsinphrase) < 2:
firstword, otherwords = wordsinphrase[0], wordsinphrase[1:]
else:
firstword, otherwords = wordsinphrase[0], []
found = []
for txt in self.term_search(wordsinphrase):
for firstindx in (indx for t,indx in self.finvindex[firstword] if t == txt):
if all((txt, firstindx+1 + otherindx) in self.finvindex[otherword]
for otherindx, otherword in enumerate(otherwords)):
found.append(txt)
return found
class Post(object):
def __init__(self, title, body, created=None, modified=None, tags=None, **kwargs):
self.title = str(title).strip()
self.body = str(body.strip()) if body e | lse None
self.slug = str(Post.build_slug(self.title))
self.tags = filter(None, tags.split(',') if isinstance(tags, basestring) else tags if tags else [])
self. | created = str(created) if created else None
self.modified = str(modified) if modified else None
def __cmp__(self, other):
if not other:
return -1
return (int(self.created > other.created) or -1) if self.created != other.created else 0
def serialize(self):
buf = ['<!---']
for k, v in self.__dict__.items():
if k not in ['body', 'slug', 'tags']:
buf.append('='.join((str(k), str(v))))
elif k == 'tags':
buf.append('%s=%s' % (k, ','.join(self.tags)))
buf.append('--->')
buf.append(self.body)
return '\n'.join(buf)
@staticmethod
def build_slug(title):
return re.sub(r'[\.!,;/\?#\ ]+', '-', title).strip().lower()
@staticmethod
def build(data, title=None):
tmp = {}
body = []
header = False
for line in data.split('\n'):
if line == '<!---':
header = True
elif line == '--->':
header = False
elif header:
(k, v) = [v.strip() for v in line.split('=')]
tmp[k] = v
body.append(line)
tmp['body'] = '\n'.join(body)
if not tmp.get('title'):
tmp['title'] = ' '.join(title.replace('.md', '').split('-'))
return Post(**tmp)
class PostProxy(object):
def __init__(self, slug):
self.slug = slug
self.post = None
def __getattr__(self, name):
if not self.post:
with codecs.open(_get_filename(self.slug), 'r', 'utf8') as f:
self.post = Post.build(f.read())
if name == 'body' and not getattr(self.post, 'body', None):
with codecs.open(os.path.join(WIKI_DIR, '%s.md' % (self.slug,)), 'r', 'utf8') as f:
self.post.body = f.read()
return getattr(self.post, name)
class Wiki(object):
def add_post(self, post):
self._save_post(post)
def del_post(self, post):
os.remove(_get_filename(post.slug))
def get_post(self, slug):
if os.path.exists(_get_filename(slug)):
with codecs.open(_get_filename(slug), 'r', 'utf8') as f:
return Post.build(f.read())
def find_all(self):
return [PostProxy(f.replace('.md', '')) for f in os.listdir(WIKI_DIR)]
def _save_post(self, post):
with codecs.open(_get_filename(post.slug), 'w', 'utf8') as f:
tmp = post.__dict__.items()
body = tmp.pop('body', '')
f.write('<!---\n%s\n--->\n' % '\n'.join(['%s = %s' % (k, v) for k,v in tmp.items()]))
f.write(post.body)
|
upgradeadvice/fofix-grisly-virtualenv | FoFiX/Menu.py | Python | gpl-2.0 | 21,146 | 0.02199 | #####################################################################
# -*- coding: iso-8859-1 -*- #
# #
# Frets on Fire #
# Copyright (C) 2006 Sami Kyöstilä #
# 2008 myfingershurt #
# 2008 evilynux <evilynux@gmail.com> #
# #
# This program is free software; you can redistribute it and/or #
# modify it under the terms of the GNU General Public License #
# as published by the Free Software Foundation; either version 2 #
# of the License, or (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program; if not, write to the Free Software #
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, #
# MA 02110-1301, USA. #
#####################################################################
import math
import os
import Data
import Dialogs
from Input import KeyListener
import Log
from OpenGL.GL import glRotate
from OpenGL.raw.GL import glBegin
from OpenGL.raw.GL import glBlendFunc
from OpenGL.raw.GL import glColor3f
from OpenGL.raw.GL import glEnable
from OpenGL.raw.GL import glEnd
from OpenGL.raw.GL import glPopMatrix
from OpenGL.raw.GL import glPushMatrix
from OpenGL.raw.GL import glTranslatef
from OpenGL.raw.GL import glVertex2f
from OpenGL.raw.GL.constants import GL_BLEND
from OpenGL.raw.GL.constants import GL_COLOR_MATERIAL
from OpenGL.raw.GL.constants import GL_ONE_MINUS_SRC_ALPHA
from OpenGL.raw.GL.constants import GL_SRC_ALPHA
from OpenGL.raw.GL.constants import GL_TRIANGLES
import Player
from View import Layer
import pygame
class Choice:
def __init__(self, text, callback, name = None, values = None, valueIndex = 0, append_submenu_char = True, tipText = None):
#Log.debug("Choice class init (Menu.py)...")
self.text = unicode(text)
self.callback = callback
self.name = name
self.values = values
self.valueIndex = valueIndex
self.append_submenu_char = append_submenu_char
self.tipText = tipText
if self.text.endswith(" >"):
self.text = text[:-2]
self.isSubMenu = True
else:
self.isSubMenu = isinstance(self.callback, Menu) or isinstance(self.callback, list)
#MFH - add support for passing position values to the callback "next menu"
def trigger(self, engine = None):
if engine and isinstance(self.callback, list):
#MFH
if self.values:
nextMenu = Menu(engine, self.callback, name = self.name, pos = self.values, selectedIndex = self.valueIndex )
else:
nextMenu = Menu(engine, self.callback, name = self.name)
elif engine and isinstance(self.callback, Menu):
nextMenu = self.callback
elif self.values:
nextMenu = self.callback(self.values[self.valueIndex])
else:
nextMenu = self.callback()
if isinstance(nextMenu, Menu):
engine.view.pushLayer(nextMenu)
def selectNextValue(self):
if self.values:
self.valueIndex = (self.valueIndex + 1) % len(self.values)
self.trigger()
def selectPreviousValue(self):
if self.values:
self.valueIndex = (self.valueIndex - 1) % len(self.values)
self.trigger()
def getText(self, selected):
if not self.values:
if self.isSubMenu and self.append_submenu_char:
return "%s >" % self.text
return self.text
if selected:
return "%s: %s%s%s" % (self.text, Data.LEFT, self.values[self.valueIndex], Data.RIGHT)
else:
return "%s: %s" % (self.text, self.values[self.valueIndex])
class Menu(Layer, KeyListener):
def __init__(self, engine, choices, name = None, onClose = None, onCancel = None, pos = (.2, .66 - .35), viewSize = 6, fadeScreen = False, font = "font", mainMenu = None, textColor = None, selectedColor = None, append_submenu_char = True, selectedIndex = None, showTips = True, selectedBox = False):
self.engine = engine
self.logClassInits = self.engine.config.get("game", "log_class_inits")
if self.logClassInits == 1:
Log.debug("Menu class init (Menu.py)...")
#Get theme
self.themename = self.engine.data.themeLabel
self.theme = self.engine.data.theme
self.choices = []
self.currentIndex = 0
#MFH
if selectedIndex:
self.currentIndex = selectedIndex
self.time = 0
self.onClose = onClose
self.onCancel = onCancel
self.viewOffset = 0
self.name = name # akedrou - for graphical support
self.mainMenu = False
self.graphicMenu = False
self.useSelectedBox = selectedBox
self.useGraphics = self.engine.config.get("game", "use_graphical_submenu")
self.gfxText = None
self.scrolling = 0
self.delay = 0
self.rate = 0
self.scroller = [0, self.scrollUp, self.scrollDown, self.scrollLeft, self.scrollRight]
self.textColor = textColor
self.selectedColor = selectedColor
self.tipColor = self.engine.theme.menuTipTextColor
#self.sfxVolume = self.engine.config.get("audio", "SFX_volume")
self.drumNav = self.engine.config.get("game", "drum_navigation") #MFH
if self.name and self.useGraphics > 0:
try:
if self.engine.loadImgDrawing(self, "menuBackground", os.path.join("themes",self.themename,"menu","%s.png" % self.name)):
if self.menuBackground.height1() == 1:
raise KeyError
else:
raise KeyError
self.gfxText = "%stext%d" % (self.name, len(choices))
if not self.engine.loadImgDrawing(self, "menuText", os.path.join("themes",self.themename,"menu","%s.png" % self.gfxText)):
raise KeyError
self.graphicMenu = True
self.menux = self.engine.theme.submenuX[self.gfxText]
self.menuy = self.engine.theme.submenuY[self.gfxText]
self.menuScale = self.engine.theme.submenuScale[self.gfxText]
self.vSpace = self.engine.theme.submenuVSpace[self.gfxText]
if str(self.menux) != "None" and str(self.menuy) != "None":
self.menux = float(self.menux)
self.menuy = float(self.menuy)
else:
self.menux = .4
self.menuy = .4
if str(self.menuScale) != "None":
self.menuScale = float(self.menuScale)
else:
self.menuScale = .5
if str(self.vSpace) != "None":
self.vSpace = float(self.vSpace)
else:
self.vSpace = .08
Log.debug("Graphic menu enabled for submenu: %s" % self.name)
except KeyError:
Log.warn("Your theme does not appear to properly support the %s graphical submenu. Check to be sure you have the latest version of your theme." % self.name)
self.menuBackground = None
self.menuText = None
if pos == (.2, .66 - .35): #MFH - default position, not called with a special one - this is a submenu:
self.sub_menu_x = self.engine.theme.sub_menu_xVar
self.sub_menu_y = self.engine.theme.sub_menu_yVar
if engine.data.theme == 0:
if self.sub_menu_x == None:
| self.sub_menu_x = .44
if self.sub_menu_y == None:
self.sub_m | enu_y = .14
elif engine.data.theme == 1:
if self.sub_menu_x == None:
self.sub_menu_x = .38
if self.sub_menu_y == None:
self.sub_menu_y = .15
elif engine.data.theme == 2:
if self.sub_menu_x == None:
self.sub_menu_x = .25
if sel |
jlopezbi/rhinoUnfolder | rhino_unwrapper/weight_functions.py | Python | gpl-3.0 | 536 | 0.009328 | import rhinoscriptsy | ntax as rs
import random as rand
def edgeAngle(myMesh, edgeIndex):
#TODO: make myMesh function which findes angel between two faces of a given edge
faceIdxs = myMesh.getFacesForEdge(edgeI | ndex)
if (len(faceIdxs) == 2):
faceNorm0 = myMesh.face_normal(faceIdxs[0])
faceNorm1 = myMesh.face_normal(faceIdxs[1])
return rs.VectorAngle(faceNorm0, faceNorm1)
else:
return None
def uniform(mesh, edgeIndex):
return 1
def random(mesh, edgeIndex):
return rand.random()
|
gjbex/vsc-tools-lib | vsc/pbs/qstat.py | Python | lgpl-3.0 | 4,075 | 0.000491 | # | !/usr/bin/env python
'''Utilities to deal with PBS torque qstat'''
from vsc.utils import walltime2seconds
from vsc.pbs.job import PbsJob
class QstatParser(object):
'''Parser for full PBS torque qstat output'''
def __init__(self, config):
'''constructor'''
self._config = config
self._jobs = {}
def _get_value(self, line):
'''extract value from line'''
_, value = line.split('=', 1)
return value.strip()
def parse_file(self, q | stat_file):
'''parse a file that contains qstat -f output'''
qstat_output = ''.join(qstat_file.readlines())
return self.parse(qstat_output)
def parse_record(self, record):
'''parse an individual job record'''
job = None
resource_specs = {}
resources_used = {}
state = None
host_str = None
for line in record.split('\n'):
line = line.strip()
if state == 'exec_host':
if not line.startswith('exec_port'):
host_str += line
continue
else:
hosts = {}
for host in host_str.split('+'):
node, core = host.split('/')
if node not in hosts:
hosts[node] = []
hosts[node].append(core)
job.exec_host = hosts
state = None
host_str = None
if line.startswith('Job Id:'):
_, job_id = line.split(':', 1)
job = PbsJob(self._config, job_id.strip())
elif line.startswith('Job_Name ='):
job.name = self._get_value(line)
elif line.startswith('euser ='):
job.user = self._get_value(line)
elif line.startswith('job_state = '):
job.state = self._get_value(line)
elif line.startswith('queue ='):
job.queue = self._get_value(line)
elif line.startswith('Account_Name ='):
job.project = self._get_value(line)
elif line.startswith('resources_used.walltime ='):
walltime = self._get_value(line)
resources_used['walltime'] = walltime2seconds(walltime)
elif line.startswith('Resource_List.walltime ='):
walltime = self._get_value(line)
resource_specs['walltime'] = walltime2seconds(walltime)
elif line.startswith('Resource_List.nodect = '):
nodect = int(self._get_value(line))
resource_specs['nodect'] = nodect
elif line.startswith('exec_host ='):
host_strs = self._get_value(line) .split('+')
exec_host = dict()
for host_str in host_strs:
if '/' in host_str:
host, cores = host_str.split('/')
exec_host[host] = cores
else:
exec_host[host_str] = None
job.exec_host = exec_host
elif line.startswith('Resource_List.partition ='):
job.partition = self._get_value(line)
elif line.startswith('qtime = '):
job.queue_time = self._get_value(line)
elif line.startswith('start_time = '):
job.start_time = self._get_value(line)
job.add_resource_specs(resource_specs)
job.add_resources_used(resources_used)
return job
def parse(self, qstat_str):
'''parse PBS torque qstat full output, and return list of jobs'''
jobs = []
job_str = None
for line in qstat_str.split('\n'):
if line.startswith('Job Id:'):
if job_str:
jobs.append(self.parse_record(job_str))
job_str = line
elif line.strip():
job_str += '\n' + line
if job_str:
jobs.append(self.parse_record(job_str))
return jobs
|
kingvuplus/boom | lib/python/Screens/Rc.py | Python | gpl-2.0 | 3,561 | 0.001966 | from Components.Pixmap import MovingPixmap, MultiPixmap
from Tools.Directories import resolveFilename, SCOPE_SKIN
from xml.etree.ElementTree import ElementTree
from Components.config import config, ConfigInteger
from Compon | ents.RcModel import rc_model
from boxbranding import getBoxType
config.misc.rcused = ConfigInteger(default=1)
class Rc:
def __init__(self):
self['rc'] = MultiPixmap()
self['arrowdown'] = MovingPixmap()
self['arrowdown2'] = MovingPixmap()
self['arrowu | p'] = MovingPixmap()
self['arrowup2'] = MovingPixmap()
config.misc.rcused = ConfigInteger(default=1)
self.isDefaultRc = rc_model.rcIsDefault()
self.rcheight = 500
self.rcheighthalf = 250
self.selectpics = []
self.selectpics.append((self.rcheighthalf, ['arrowdown', 'arrowdown2'], (-18, -70)))
self.selectpics.append((self.rcheight, ['arrowup', 'arrowup2'], (-18, 0)))
self.readPositions()
self.clearSelectedKeys()
self.onShown.append(self.initRc)
def initRc(self):
if getBoxType() in ('uniboxhd1', 'uniboxhd2', 'uniboxhd3', 'sezam5000hd', 'mbtwin', 'beyonwizt3'):
self['rc'].setPixmapNum(config.misc.rcused.value)
elif self.isDefaultRc:
self['rc'].setPixmapNum(config.misc.rcused.value)
else:
self['rc'].setPixmapNum(0)
def readPositions(self):
if self.isDefaultRc:
target = resolveFilename(SCOPE_SKIN, 'rcpositions.xml')
else:
target = rc_model.getRcLocation() + 'rcpositions.xml'
tree = ElementTree(file=target)
rcs = tree.getroot()
self.rcs = {}
for rc in rcs:
id = int(rc.attrib['id'])
self.rcs[id] = {}
for key in rc:
name = key.attrib['name']
pos = key.attrib['pos'].split(',')
self.rcs[id][name] = (int(pos[0]), int(pos[1]))
def getSelectPic(self, pos):
for selectPic in self.selectpics:
if pos[1] <= selectPic[0]:
return (selectPic[1], selectPic[2])
return None
def hideRc(self):
self['rc'].hide()
self.hideSelectPics()
def showRc(self):
self['rc'].show()
def selectKey(self, key):
if self.isDefaultRc:
rc = self.rcs[config.misc.rcused.value]
else:
try:
rc = self.rcs[2]
except:
rc = self.rcs[config.misc.rcused.value]
if rc.has_key(key):
rcpos = self['rc'].getPosition()
pos = rc[key]
selectPics = self.getSelectPic(pos)
selectPic = None
for x in selectPics[0]:
if x not in self.selectedKeys:
selectPic = x
break
if selectPic is not None:
print 'selectPic:', selectPic
self[selectPic].moveTo(rcpos[0] + pos[0] + selectPics[1][0], rcpos[1] + pos[1] + selectPics[1][1], 1)
self[selectPic].startMoving()
self[selectPic].show()
self.selectedKeys.append(selectPic)
return
def clearSelectedKeys(self):
self.showRc()
self.selectedKeys = []
self.hideSelectPics()
def hideSelectPics(self):
for selectPic in self.selectpics:
for pic in selectPic[1]:
self[pic].hide()
|
dandxy89/rf_helicopter | pytests.py | Python | mit | 5,028 | 0.000199 | # Purpose: Test Script to Ensure that as the complexity of the Scripts Grows functionality can be checked
#
# Info: Uses py.test to test each of the track building functions are working
#
# Running the Test from the COMMAND LINE: py.test test.py
#
# Developed as part of the Software Agents Course at City University
#
# Dev: Dan Dixey and Enrico Lopedoto
#
#
import os
import logging
import numpy as np
from Model.Wind_Generation import Obstacle_Tracks
from Model.Defaults import *
from Model.World import helicopter_world
# Logging Controls Level of Printing
logging.basicConfig(format='[%(asctime)s] : [%(levelname)s] : [%(message)s]',
level=logging.INFO)
def test_creating_obstacles_details():
routes = Obstacle_Tracks(MAX_OBS_HEIGHT=MAX_OBS_HEIGHT,
MAX_OBS_WIDTH=MAX_OBS_WIDTH,
WINDOW_HEIGHT=WINDOW_HEIGHT,
WINDOW_WIDTH=WINDOW_WIDTH,
N_OBSTABLE_GEN=N_OBSTABLE_GEN,
MIN_GAP=MIN_GAP,
N_TRACKS_GEN=N_TRACKS_GEN)
# Get Obstacles Irregularities
output1 = routes.get_obstable_metrics
assert isinstance(output1, list) and isinstance(
output1[0], tuple), 'Types Not as Expected in Output1'
def test_creating_obstacles():
routes = Obstacle_Tracks(MAX_OBS_HEIGHT=MAX_OBS_HEIGHT,
MAX_OBS_WIDTH=MAX_OBS_WIDTH,
WINDOW_HEIGHT=WINDOW_HEIGHT,
WINDOW_WIDTH=WINDOW_WIDTH,
N_OBSTABLE_GEN=N_OBSTABLE_GEN,
MIN_GAP=MIN_GAP,
N_TRACKS_GEN=N_TRACKS_GEN)
# Generate Obstacles
output2 = routes.generate_obstacles
assert isinstance(output2, list) and isinstance(
output2[0], np.ndarray), 'Types Not as Expected in Output2'
def test_creating_tracks():
routes = Obstacle_Tracks(MAX_OBS_HEIGHT=MAX_OBS_HEIGHT,
MAX_OBS_WIDTH=MAX_OBS_WIDTH,
WINDOW_HEIGHT=WINDOW_HEIGHT,
WINDOW_WIDTH=WINDOW_WIDTH,
N_OBSTABLE_GEN=N_OBSTABLE_GEN,
MIN_GAP=MIN_GAP,
N_TRACKS_GEN=N_TRACKS_GEN)
# Generate Tracks / Paths
output3 = routes.generate_tracks
assert isinstance(output3, list) and isinstance(
output3[0], np.ndarray) and len(output3) == N_TRACKS_GEN, \
'Types Not as Expected in Output3'
def test_saving_obstacles():
routes = Obstacle_Tracks(MAX_OBS_HEIGHT=MAX_OBS_HEIGHT,
MAX_OBS_WIDTH=MAX_OBS_WIDTH,
WINDOW_HEIGHT=WINDOW_HEIGHT,
WINDOW_WIDTH=WINDOW_WIDTH,
N_OBSTABLE_GEN=N_OBSTABLE_GEN,
MIN_GAP=MIN_GAP,
N_TRACKS_GEN=N_TRACKS_GEN)
# Generate Obstacles
output4 = routes.generate_obstacles
# Get the first obstacle
saved = output4[0]
# Save the obstacle
np.save(os.path.join(os.getcwd(),
'Tests',
'Test_Obstacle'), saved)
# Load obstacle
loaded = np.load(os.path.join(os.getcwd(),
'Tests',
'Test_Obstacle.npy'))
assert saved.shape == loaded.shape, 'Dimensions Incorrect'
def test_saving_tracks():
routes = Obstacle_Tracks(MAX_OBS_HEIGHT=MAX_OBS_HEIGHT,
MAX_OBS_WIDTH=MAX_OBS_WIDTH,
WINDOW_HEIGHT=WINDOW_HEIGHT,
WINDOW_WIDTH=WINDOW_WIDTH,
N_OBSTABLE_GEN=N_OBSTABLE_GEN,
MIN_GAP=MIN_GAP,
N_TRACKS_GEN=N_TRACKS_GEN)
# Generate Obstacles
output5 = routes.generate_tracks
# Get the first obstacle
saved = output5[0]
# Save the obstacle
np.save(os.path.join(os.getcwd(),
'Tests',
'Test_Track'), saved)
# Load obstacle
loaded = np.load(os.path.join(os.getcwd(),
'Tests',
'Test_Track.npy'))
assert saved.shape == loaded.shape, 'Dimensions Incorrect'
def test_world_load_defaults():
world = helicopter_world()
assert isinstance(world.track, np.ndarray), 'Loading Default Failed'
def test_world_loading():
world = helicopter_world(file_name=os.path.join(os.getcwd(),
"Tests",
| "Test_Track.npy"))
loaded_track = np.load(os.path.join(os.getcwd(),
"Tests",
"Test_Track.npy"))
assert l | oaded_track.shape == world.track.shape, \
"Loading Track into World Failed"
|
khushboo9293/postorius | src/postorius/tests/test_auth_decorators.py | Python | gpl-3.0 | 8,684 | 0.000345 | # -*- coding: utf-8 -*-
# Copyright (C) 2012-2015 by the Free Software Foundation, Inc.
#
# This file is part of Postorius.
#
# Postorius is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
# Postorius is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# Postorius. If not, see <http://www.gnu.org/licenses/>.
from django.contrib.auth.models import AnonymousUser, User
from django.core.exceptions import PermissionDenied
from django.test.client import RequestFactory
from django.utils import unittest
from mock import patch
from postorius.auth.decorators import (list_owner_required,
list_moderator_required,
basic_auth_login)
from postorius.models import (Domain, List, Member, MailmanUser,
MailmanApiError, Mailman404Error)
from mailmanclient import Client
@list_owner_required
def dummy_function(request, list_id):
return True
@list_moderator_required
def dummy_function_mod_req(request, list_id):
return True
class ListOwnerRequiredTest(unittest.TestCase):
"""Tests the list_owner_required auth decorator."""
def setUp(self):
from django.test.client import RequestFactory
from postorius.tests.utils import create_mock_list
self.request_factory = RequestFactory()
# create a mock list with members
list_name = 'foolist.example.org'
list_id = 'foolist.example.org'
self.mock_list = create_mock_list(dict(
fqdn_listname=list_name,
list_id=list_id))
@patch.object(Client, 'get_list')
def test_not_authenticated(self, mock_get_list):
"""Should raise PermissionDenied if user is not authenticated."""
mock_get_list.return_value = self.mock_list
request = self.request_factory.get('/lists/foolist.example.org/'
'settings/')
request.user = AnonymousUser()
self.assertRaises(PermissionDenied, dummy_function, request,
list_id='foolist.example.org')
@patch.object(Client, 'get_list')
def test_superuser(self, mock_get_list):
"""Should call the dummy method, if user is superuser."""
mock_get_list.return_value = self.mock_list
request = self.request_factory.get('/lists/foolist.example.org/'
'settings/')
request.user = User.objects.create_superuser('su1', 'su@sodo.org',
'pwd')
return_value = dummy_function(request,
list_id='foolist.example.org')
self.assertEqual(return_value, True)
@patch.object(Client, 'get_list')
def test_non_list_owner(self, mock_get_list):
"""Should raise PermissionDenied if user is not a list owner."""
# prepare mock list object
self.mock_list.owners = ['geddy@rush.it']
mock_get_list.return_value = self.mock_list
# prepare request
request = self.request_factory.get('/lists/foolist.example.org/'
'settings/')
request.user = User.objects.create_user('les c', 'les@primus.org',
'pwd')
self.assertRaises(PermissionDenied, dummy_function, request,
list_id='foolist.example.org')
@patch.object(Client, 'get_list')
def test_list_owner(self, mock_get_list):
"""Should return fn return value if user is the list owner."""
# prepare mock list object
self.mock_list.owners = ['les@primus.org']
mock_get_ | list.return_value = self.mock_list
# prepare request
request = self.request_factory.get('/lists/foolist.example.org/'
'settings/')
request.user = User.objects.create_user('les cl', 'les@primus.org',
| 'pwd')
return_value = dummy_function(request,
list_id='foolist.example.org')
self.assertEqual(return_value, True)
class ListModeratorRequiredTest(unittest.TestCase):
"""Tests the list_owner_required auth decorator."""
def setUp(self):
from django.test.client import RequestFactory
from postorius.tests.utils import create_mock_list
self.request_factory = RequestFactory()
# create a mock list with members
list_name = 'foolist.example.org'
list_id = 'foolist.example.org'
self.mock_list = create_mock_list(dict(
fqdn_listname=list_name,
list_id=list_id))
@patch.object(Client, 'get_list')
def test_not_authenticated(self, mock_get_list):
"""Should raise PermissionDenied if user is not authenticated."""
mock_get_list.return_value = self.mock_list
request = self.request_factory.get('/lists/foolist.example.org/'
'settings/')
request.user = AnonymousUser()
self.assertRaises(PermissionDenied, dummy_function_mod_req, request,
list_id='foolist.example.org')
@patch.object(Client, 'get_list')
def test_superuser(self, mock_get_list):
"""Should call the dummy method, if user is superuser."""
mock_get_list.return_value = self.mock_list
request = self.request_factory.get('/lists/foolist.example.org/'
'settings/')
request.user = User.objects.create_superuser('su2', 'su@sodo.org',
'pwd')
return_value = dummy_function_mod_req(request,
list_id=
'foolist.example.org')
self.assertEqual(return_value, True)
@patch.object(Client, 'get_list')
def test_non_list_moderator(self, mock_get_list):
"""Should raise PermissionDenied if user is not a list owner."""
# prepare mock list object
self.mock_list.moderators = ['geddy@rush.it']
mock_get_list.return_value = self.mock_list
# prepare request
request = self.request_factory.get('/lists/foolist.example.org/'
'settings/')
request.user = User.objects.create_user('les cl2', 'les@primus.org',
'pwd')
self.assertRaises(PermissionDenied, dummy_function_mod_req, request,
list_id='foolist.example.org')
@patch.object(Client, 'get_list')
def test_list_owner(self, mock_get_list):
"""Should return fn return value if user is the list owner."""
# prepare mock list object
self.mock_list.owners = ['les@primus.org']
mock_get_list.return_value = self.mock_list
# prepare request
request = self.request_factory.get('/lists/foolist.example.org/'
'settings/')
request.user = User.objects.create_user('les cl3', 'les@primus.org',
'pwd')
return_value = dummy_function_mod_req(request,
list_id=
'foolist.example.org')
self.assertEqual(return_value, True)
@patch.object(Client, 'get_list')
def test_list_moderator(self, mock_get_list):
"""Should return fn return value if user is the list moderator."""
# prepare mock list object
self.mock_list.moderators = ['les@primus.org']
mock_get_list.return_value = self.mock_list
|
sionide21/HackerTracker | tests/event_tests.py | Python | mit | 4,529 | 0.000883 | import csv
import unittest
from datetime import datetime, timedelta
from hackertracker import event
from hackertracker.database import Model, Session
from sqlalchemy import create_engine
class TestEvents(unittest.TestCase):
def setUp(self):
engine = create_engine('sqlite:///:memory:', echo=True)
Model.metadata.create_all(engine)
Session.configure(bind=engine)
event.Event.for_name("Drink glass of water", create=True)
def tearDown(self):
Session.remove()
def assertDatetimesEqual(self, w1, w2):
"Assert datetimes are equal to the second"
self.assertEqual(w1.replace(microsecond=0), w2.replace(microsecond=0))
def test_get_event(self):
e = event.Event.for_name("Drink pint of water", create=True)
self.assertEqual(e.name, "Drink pint of water")
e = event.Event.for_name("Drink pint of water")
self.assertEqual(e.name, "Drink pint of water")
self.assertRaises(event.EventNotFound, event.Event.for_name, "You'll never find me")
def test_basic_track(self):
e = event.Event.for_name("Drink glass of water")
o = e.track()
self.assertEqual(list(e.entries()), [o])
def test_events_persist(self):
e = event.Event.for_name("Drink glass of water")
o = e.track(attrs=dict(size="16", location="office"))
when = o.when
attrs = dict(o.attrs)
# Reload from db
Session.commit()
Session.remove()
e = event.Event.for_name("Drink glass of water")
o1 = e.entries()[0]
self.assertDatetimesEqual(when, o1.when)
self.assertEqual(attrs, o1.attrs)
def test_entry_count(self):
e = event.Event.for_name("Drink glass of water")
e.track()
e.track()
e.track()
Session.commit()
self.assertEqual(e.entry_count(), 3)
def test_latest_entry(self):
e = event.Event.for_name("Drink glass of water")
e.track(when=earlier(seconds=3))
e.track(when=earlier(seconds=2))
f = e.track(when=earlier(seconds=1))
Session.commit()
self.assertEqual(e.latest_entry().id, f.id)
def test_display_entry(self):
e = event.Event.for_name("Drink glass of water")
o = e.track(when=datetime(2014, 1, 1, 16, 6, 20, 216238))
self.assertEqual(str(o), "Jan 01, 2014 04:06PM")
o = e.track(when=datetime(2015, 3, 2, 0, 34, 53, 327128))
self.assertEqual(str(o), "Mar 02, 2015 12:34AM")
def test_list_events(self):
e1 = event.Event.for_name("Drink glass of water")
e2 = event.Event.for_name("Clean litter box", create=True)
self.assertEqual(event.Event.all(), [e2, e1])
def test_alternate_time(self):
e = event.Event.for_name("Drink glass of water")
o = e.track()
self.assertDatetimesEqual(o.when, datetime.utcnow())
when = earlier(hours=10)
o = e.track(when)
self.assertDatetimesEqual(o.when, when)
def test_attributes(self):
e = event.Event.for_name("Drink glass of water")
o = e.track(attrs=dict(size="16", location="office"))
self.assertEqual(o.attrs, {
"size": "16",
"location": "office"
})
def test_list_attributes(self):
e = event.Event.for_name("Drink glass of water")
e.track(attrs=dict(size="16", location="office"))
e.track(attrs=dict(hello="world"))
e.track(attrs=dict(hello="goodbye", location="office"))
event.Event.for_name("Fire ze missile", create=True).track(attrs=dict(le_tired="true"))
Session.commit()
self.assertEqual(e.attributes(), ["hello", "location", "size"])
def test_slug(self):
e = event.Event.for_name("Drink glass of water")
self.assertEqual(e.slug, "Drink_glass_of_water")
def test_exports_csv(self):
e = event.Event.for_name("Drink glass of water")
o = e.track(when=earlier(seconds=-1), attrs=dict(size="16", location="office"))
| e.track(attrs=dict(hello="world", when="now"))
e.track(attrs=dict(hello="goodbye", location="office"))
Session.commit()
csv_file = list(csv.reader(e.export_csv().splitlines() | ))
self.assertEqual(csv_file[0], ["When", "hello", "location", "size", "when"])
self.assertEqual(csv_file[1], [str(o.when), "", "office", "16", ""])
self.assertEqual(len(csv_file), 4)
def earlier(**kwargs):
return datetime.utcnow() - timedelta(**kwargs)
|
fredericklussier/ObservablePy | .vscode/.ropeproject/config.py | Python | mit | 4,037 | 0 | # The default ``config.py``
# flake8: noqa
def set_prefs(prefs):
"""This function is called before opening the project"""
# Specify which files and folders to ignore in the project.
# Changes to ignored resources are not added to the history and
# VCSs. Also they are not returned in `Project.get_files()`.
# Note that ``?`` and ``*`` match all characters but slashes.
# '*.pyc': matches 'test.pyc' and 'pkg/test.pyc'
# 'mod*.pyc': matches 'test/mod1.pyc' but not 'mod/1.pyc'
# '.svn': matches 'pkg/.svn' and all of its children
# 'build/*.o': matches 'build/lib.o' but not 'build/sub/lib.o'
# 'build//*.o': matches 'build/lib.o' and 'build/sub/lib.o'
prefs['ignored_resources'] = ['*.pyc', '*~', '.ropeproject',
| '.hg', '.svn', '_svn', '.git', '.tox']
# Specifies which file | s should be considered python files. It is
# useful when you have scripts inside your project. Only files
# ending with ``.py`` are considered to be python files by
# default.
# prefs['python_files'] = ['*.py']
# Custom source folders: By default rope searches the project
# for finding source folders (folders that should be searched
# for finding modules). You can add paths to that list. Note
# that rope guesses project source folders correctly most of the
# time; use this if you have any problems.
# The folders should be relative to project root and use '/' for
# separating folders regardless of the platform rope is running on.
# 'src/my_source_folder' for instance.
# prefs.add('source_folders', 'src')
# You can extend python path for looking up modules
# prefs.add('python_path', '~/python/')
# Should rope save object information or not.
prefs['save_objectdb'] = True
prefs['compress_objectdb'] = False
# If `True`, rope analyzes each module when it is being saved.
prefs['automatic_soa'] = True
# The depth of calls to follow in static object analysis
prefs['soa_followed_calls'] = 0
# If `False` when running modules or unit tests "dynamic object
# analysis" is turned off. This makes them much faster.
prefs['perform_doa'] = True
# Rope can check the validity of its object DB when running.
prefs['validate_objectdb'] = True
# How many undos to hold?
prefs['max_history_items'] = 32
# Shows whether to save history across sessions.
prefs['save_history'] = True
prefs['compress_history'] = False
# Set the number spaces used for indenting. According to
# :PEP:`8`, it is best to use 4 spaces. Since most of rope's
# unit-tests use 4 spaces it is more reliable, too.
prefs['indent_size'] = 4
# Builtin and c-extension modules that are allowed to be imported
# and inspected by rope.
prefs['extension_modules'] = []
# Add all standard c-extensions to extension_modules list.
prefs['import_dynload_stdmods'] = True
# If `True` modules with syntax errors are considered to be empty.
# The default value is `False`; When `False` syntax errors raise
# `rope.base.exceptions.ModuleSyntaxError` exception.
prefs['ignore_syntax_errors'] = False
# If `True`, rope ignores unresolvable imports. Otherwise, they
# appear in the importing namespace.
prefs['ignore_bad_imports'] = False
# If `True`, rope will insert new module imports as
# `from <package> import <module>` by default.
prefs['prefer_module_from_imports'] = False
# If `True`, rope will transform a comma list of imports into
# multiple separate import statements when organizing
# imports.
prefs['split_imports'] = False
# If `True`, rope will sort imports alphabetically by module name
# instead of alphabetically by import statement, with from imports
# after normal imports.
prefs['sort_imports_alphabetically'] = False
def project_opened(project):
"""This function is called after opening the project"""
# Do whatever you like here!
|
msincenselee/vnpy | vnpy/trader/gateway.py | Python | mit | 29,138 | 0.002194 | """
"""
import sys
from abc import ABC, abstractmethod
from typing import Any, Sequence, Dict, List, Optional, Callable
from copy import copy,deepcopy
from logging import INFO, DEBUG, ERROR
from datetime import datetime
from vnpy.event import Event, EventEngine
from .event import (
EVENT_TICK,
EVENT_BAR,
EVENT_ORDER,
EVENT_TRADE,
EVENT_POSITION,
EVENT_ACCOUNT,
EVENT_CONTRACT,
EVENT_LOG,
)
from .object import (
TickData,
BarData,
OrderData,
TradeData,
PositionData,
AccountData,
ContractData,
LogData,
OrderRequest,
CancelRequest,
SubscribeRequest,
HistoryRequest,
Exchange,
Status
)
from vnpy.trader.utility import get_folder_path, round_to, get_underlying_symbol, get_real_symbol_by_exchange
from vnpy.trader.util_logger import setup_logger
class BaseGateway(ABC):
"""
Abstract gateway class for creating gateways connection
to different trading systems.
# How to implement a gateway:
---
## Basics
A gateway should satisfies:
* this class should be thread-safe:
* all methods should be thread-safe
* no mutable shared properties between objects.
* all methods should be non-blocked
* satisfies all requirements written in docstring for every method and callbacks.
* automatically reconnect if connection lost.
---
## methods must implements:
all @abstractmethod
---
## callbacks must response manually:
* on_tick
* on_bar
* on_trade
* on_order
* on_position
* on_account
* on_contract
All the XxxData passed to callback should be constant, which means that
the object should not be modified after passing to on_xxxx.
So if you use a cache to store reference of data, use copy.copy to create a new object
before passing that data into on_xxxx
"""
# Fields required in setting dict for connect function.
default_setting: Dict[str, Any] = {}
# Exchanges supported in the gateway.
exchanges: List[Exchange] = []
def __init__(self, event_engine: EventEngine, gateway_name: str):
""""""
self.event_engine: EventEngine = event_engine
self.gateway_name: str = gateway_name
self.logger = None
self.accountid = ""
self.create_logger()
# 所有订阅on_bar的都会添加
self.klines = {}
self.status = {'name': gateway_name, 'con': False}
self.prices: Dict[str, float] = {} # vt_symbol, last_price
self.query_functions = []
self.rejected_orders = {} # 当日被拒单得订单, vt_symbol_direction_offset:[orders]
def create_logger(self):
"""
创建engine独有的日志
:return:
"""
log_path = get_folder_path("log")
log_filename = str(log_path.joinpath(self.gateway_name))
print(u'create logger:{}'.format(log_filename))
from vnpy.trader.setting import SETTINGS
self.logger = setup_logger(file_name=log_filename, name=self.gateway_name,
log_level=SETTINGS.get('log.level', DEBUG))
def on_event(self, type: str, data: Any = None) -> None:
"""
General event push.
"""
event = Event(type, data)
self.event_engine.put(event)
def on_tick(self, tick: TickData) -> None:
"""
Tick event push.
Tick event of a specific vt_symbol is also pushed.
"""
self.prices.update({tick.vt_symbol: tick.last_price})
self.on_event(EVENT_TICK, tick)
# self.on_event(EVENT_TICK + tick.vt_symbol, tick)
# 推送Bar
kline = self.klines.get(tick.vt_symbol, None)
if kline:
kline.update_tick(tick)
def on_bar(self, bar: BarData) -> None:
"""市场行情推送"""
# bar, 或者 barDict
self.on_event(EVENT_BAR, bar)
#self.write_log(f'on_bar Event:{bar.__dict__}')
| def on_trade(self, trade: TradeData) -> None:
"""
Trade event push.
| Trade event of a specific vt_symbol is also pushed.
"""
self.on_event(EVENT_TRADE, trade)
# self.on_event(EVENT_TRADE + trade.vt_symbol, trade)
def on_order(self, order: OrderData) -> None:
"""
Order event push.
Order event of a specific vt_orderid is also pushed.
"""
# 如果是拒单,进行登记
if order.status == Status.REJECTED:
k = f'{order.vt_symbol}_{order.direction.value}_{order.offset.value}'
orders = self.rejected_orders.get(k,[])
orders.append(deepcopy(order))
self.rejected_orders.update({k:orders})
self.on_event(EVENT_ORDER, order)
# self.on_event(EVENT_ORDER + order.vt_orderid, order)
def on_position(self, position: PositionData) -> None:
"""
Position event push.
Position event of a specific vt_symbol is also pushed.
"""
self.on_event(EVENT_POSITION, position)
# self.on_event(EVENT_POSITION + position.vt_symbol, position)
def on_account(self, account: AccountData) -> None:
"""
Account event push.
Account event of a specific vt_accountid is also pushed.
"""
self.on_event(EVENT_ACCOUNT, account)
# self.on_event(EVENT_ACCOUNT + account.vt_accountid, account)
def on_log(self, log: LogData) -> None:
"""
Log event push.
"""
self.on_event(EVENT_LOG, log)
def on_contract(self, contract: ContractData) -> None:
"""
Contract event push.
"""
self.on_event(EVENT_CONTRACT, contract)
def write_log(self, msg: str, level: int = INFO, on_log: bool = False) -> None:
"""
Write a log event from gateway.
"""
if self.logger:
self.logger.log(level, msg)
if on_log:
log = LogData(msg=msg, level=level, gateway_name=self.gateway_name)
self.on_log(log)
def write_error(self, msg: str, error: dict = {}):
"""
write error log
:param msg:
:return:
"""
if len(error) > 0:
error_id = error.get("ErrorID", '')
error_msg = error.get("ErrorMsg", '')
msg = f"{msg},代码:{error_id},信息:{error_msg}"
self.write_log(msg, level=ERROR, on_log=True)
print(msg, file=sys.stderr)
def check_status(self) -> bool:
"""
check gateway connection or market data status.
"""
return False
@abstractmethod
def connect(self, setting: dict) -> None:
"""
Start gateway connection.
to implement this method, you must:
* connect to server if necessary
* log connected if all necessary connection is established
* do the following query and response corresponding on_xxxx and write_log
* contracts : on_contract
* account asset : on_account
* account holding: on_position
* orders of account: on_order
* trades of account: on_trade
* if any of query above is failed, write log.
future plan:
response callback/change status instead of write_log
"""
pass
@abstractmethod
def close(self) -> None:
"""
Close gateway connection.
"""
pass
@abstractmethod
def subscribe(self, req: SubscribeRequest) -> None:
"""
Subscribe tick data update.
"""
pass
@abstractmethod
def send_order(self, req: OrderRequest) -> str:
"""
Send a new order to server.
implementation should finish the tasks blow:
* create an OrderData from req using OrderRequest.create_order_data
* assign a unique(gateway instance scope) id to OrderData.orderid
* send request to server
* if request is sent, OrderData.status should be set to Status.SUBMITTING
* if request is failed to sent, OrderData.status should be set to Status.REJECTED
* response on_order:
* return vt_orderid
:return str vt_orderid for created OrderData
"""
pass
|
qedsoftware/commcare-hq | corehq/apps/locations/tests/test_location_fixtures.py | Python | bsd-3-clause | 14,654 | 0.002115 | import mock
import os
from xml.etree import ElementTree
from corehq.util.test_utils import flag_enabled
from datetime import datetime, timedelta
from django.test import TestCase
from casexml.apps.phone.models import SyncLog
from casexml.apps.phone.tests.utils import create_restore_user
from corehq.apps.domain.shortcuts import create_domain
from corehq.apps.domain.models import Domain
from corehq.apps.commtrack.tests.util import bootstrap_domain
from corehq.apps.users.models import CommCareUser
from corehq.apps.app_manager.tests.util import TestXmlMixin
from casexml.apps.case.xml import V2
from corehq.apps.users.dbaccessors.all_commcare_users import delete_all_users
from .util import (
LocationHierarchyPerTest,
setup_location_types_with_structure,
setup_locations_with_structure,
LocationStructure,
LocationTypeStructure,
)
from ..fixtures import _location_to_fixture, LocationSet, should_sync_locations, location_fixture_generator, \
flat_location_fixture_generator
from ..models import SQLLocation, LocationType, Location
class FixtureHasLocationsMixin(TestXmlMixin):
root = os.path.dirname(__file__)
file_path = ['data']
def _assert_fixture_has_locations(self, xml_name, desired_locations, flat=False):
ids = {
"{}_id".format(desired_location.lower().replace(" ", "_")): (
self.locations[desired_location].location_id
)
for desired_location in desired_locations
} # eg: {"massachusetts_id" = self.locations["Massachusetts"].location_id}
generator = flat_location_fixture_generator if flat else location_fixture_generator
fixture = ElementTree.tostring(generator(self.user, V2)[0])
desired_fixture = self.get_xml(xml_name).format(
user_id=self.user.user_id,
**ids
)
self.assertXmlEqual(desired_fixture, fixture)
@mock.patch.object(Domain, 'uses_locations', return_value=True) # removes dependency on accounting
class LocationFixturesTest(LocationHierarchyPerTest, FixtureHasLocationsMixin):
location_type_names = ['state', 'county', 'city']
location_structure = [
('Massachusetts', [
('Middlesex', [
('Cambridge', []),
('Somerville', []),
]),
('Suffolk', [
('Boston', []),
('Revere', []),
])
]),
('New York', [
('New York City', [
('Manhattan', []),
('Brooklyn', []),
('Queens', []),
]),
]),
]
def setUp(self):
super(LocationFixturesTest, self).setUp()
self.user = create_restore_user(self.domain, 'user', '123')
def test_no_user_locations_returns_empty(self, uses_locations):
empty_fixture = "<fixture id='commtrack:locations' user_id='{}' />".format(self.user.user_id)
fixture = ElementTree.tostring(location_fixture_generator(self.user, V2)[0])
self.assertXmlEqual(empty_fixture, fixture)
def test_simple_location_fixture(self, uses_locations):
self.user.set_location(self.locations['Suffolk'].couch_location)
self._assert_fixture_has_locations(
'simple_fixture',
['Massachusetts', 'Suffolk', 'Boston', 'Revere']
)
def test_multiple_locations(self, uses_locations):
self.user.add_to_assigned_locations(self.locations['Suffolk'].couch_location)
self.user.add_to_assigned_locations(self.locations['New York City'].couch_location)
self._assert_fixture_has_locations(
'multiple_locations',
['Massachusetts', 'Suffolk', 'Boston', 'Revere', 'New York',
'New York City', 'Manhattan', 'Queens', 'Brooklyn']
)
def test_all_locations_flag_returns_all_locations(self, uses_locations):
with flag_enabled('SYNC_ALL_LOCATIONS'):
self._assert_fixture_has_locations(
'expand_from_root',
['Massachusetts', 'Suffolk', 'Middlesex', 'Boston', 'Revere', 'Cambridge',
'Somerville', 'New York', 'New York City', 'Manhattan', 'Queens', 'Brooklyn']
)
@mock.patch.object(CommCareUser, 'locations')
@mock.patch.object(Domain, 'supports_multiple_locations_per_user')
def test_multiple_locations_returns_multiple_trees(
self,
supports_multiple_locations,
user_locations,
uses_locations
):
multiple_locations_different_states = [
self.locations['Suffolk'].couch_location,
self.locations['New York City'].couch_location
]
supports_multiple_locations.__get__ = mock.Mock(return_value=True)
user_locations.__get__ = mock.Mock(return_value=multiple_locations_different_states)
self._assert_fixture_has_locations(
'multiple_locations',
['Massachusetts', 'Suffolk', 'Boston', 'Revere', 'New York',
'New York City', 'Manhattan', 'Queens', 'Brooklyn']
)
def test_expand_to_county(self, uses_locations):
"""
expand to "county"
should return:
Mass
- Suffolk
"""
self.user.set_location(self.locations['Suffolk'].couch_location)
location_type = self.locations['Suffolk'].location_type
location_type.expand_to = location_type
location_type.save()
self._assert_fixture_has_locations(
'expand_to_county',
['Massachusetts', 'Suffolk']
)
def test_expand_to_county_from_state(self, uses_locations):
self.user.set_location(self.locations['Massachusetts'].couch_location)
location_type = self.locations['Massachusetts'].location_type
location_type.expand_to = self.locations['Suffolk'].location_type
location_type.save()
self._assert_fixture_has_locations(
'expand_to_county_from_state',
['Massachusetts', 'Suffolk', 'Middlesex']
)
def test_expand_from_county_at_city(self, uses_locations):
self.user.set_location(self.locations['Boston'].couch_location)
location_type = self.locations['Boston'].location_type
location_type.expand_from = self.locations['Suffolk'].location_type
location_type.save()
self._assert_fixture_has_locations(
'expand_from_county_at_city',
['Massachusetts', 'Suffolk', 'Middlesex', 'Boston', 'Revere']
)
def test_expand_from_root_at_city(self, uses_locations):
| self.user.set_location(self.locations['Boston'].couch_location)
location_type = self.locations['Boston'].location_type
location_type.expand_from_root = True
location_type.save()
self._assert_fixture_has_locations(
'expand_from_root',
['Massachusetts', 'Suffolk', 'Middlesex', 'Boston', 'Revere', 'Cambridge',
'Somerville', 'New York', 'New York City', 'Manhatt | an', 'Queens', 'Brooklyn']
)
def test_expand_from_root_to_county(self, uses_locations):
self.user.set_location(self.locations['Massachusetts'].couch_location)
location_type = self.locations['Massachusetts'].location_type
location_type.expand_from_root = True
location_type.expand_to = self.locations['Suffolk'].location_type
location_type.save()
self._assert_fixture_has_locations(
'expand_from_root_to_county',
['Massachusetts', 'Suffolk', 'Middlesex', 'New York', 'New York City']
)
def test_flat_sync_format(self, uses_locations):
with flag_enabled('SYNC_ALL_LOCATIONS'):
with flag_enabled('FLAT_LOCATION_FIXTURE'):
self._assert_fixture_has_locations(
'expand_from_root_flat',
['Massachusetts', 'Suffolk', 'Middlesex', 'Boston', 'Revere', 'Cambridge',
'Somerville', 'New York', 'New York City', 'Manhattan', 'Queens', 'Brooklyn'],
flat=True,
)
def test_include_without_expanding(self, uses_locations):
|
coderanger/brix | templates/legacy_region.py | Python | apache-2.0 | 3,676 | 0.000272 | #
# Author:: Noah Kantrowitz <noah@coderanger.net>
#
# Copyright 2014, Balanced, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from stratosphere import Ref
from .balanced_region import BalancedRegionBase, FindInRegionMap
from .base import Stack
class AppStack(Stack):
def __init__(self, *args, **kwargs):
super(AppStack, self).__init__(*args, **kwargs)
def DependsOn(self):
return [self.template.srta_RouteAssocA(), self.template.srta_RouteAssocB()]
def Parameters(self):
params = {
'VpcId': self.template.vpc(),
'KeyName': Ref(self.template.param_KeyName()),
'AmiId': FindInRegionMap(self.template.map_RegionMap(), 'AmiId'),
'SubnetA': Ref(self.template.subnet_SubnetA()),
'SubnetB': Ref(self.template.subnet_SubnetB()),
'GatewaySecurityGroupA': 'sg-cdbdafa1',
'GatewaySecurityGroupB': 'sg-cdbdafa1',
'PublicSubnetA': 'subnet-ae832dc7',
'PublicSubnetB': 'subnet-5a812f33',
}
params.update(self._parameters)
return params
class LegacyRegionTemplate(BalancedRegionBase):
"""Template our legacy VPC region."""
@classmethod
def STRATOSPHERE_TYPES(cls):
types = BalancedRegionBase.STRATOSPHERE_TYPES()
types.update({
'app': AppStack,
})
return types
def vpc(self):
return 'vpc-d6832dbf'
def rtb_RouteTableA(self):
return 'rtb-ac832dc5'
def rtb_RouteTableB(self):
return 'rtb-5c1c9b35'
def subnet_SubnetA(self):
"""AZ A network subnet."""
return {
'VpcId': self.vpc(),
'AvailabilityZone': 'us-west-1a',
'CidrBlock': '10.3.200.0/20',
}
def srta_RouteAssocA(self):
"""Association between the AZ A subnet and the route table."""
return {
'RouteTableId': self.rtb_RouteTableA(),
'SubnetId': Ref(self.subnet_SubnetA()),
}
def subnet_SubnetB(self):
"""AZ B network subnet."""
return {
'VpcId': self.vpc(),
| 'AvailabilityZone': 'us-west-1b',
'CidrBlock': '10.3.216.0/20',
}
def srta_RouteAssocB(self):
"""Association between the AZ B subnet and the route table."""
return {
'RouteTableId': self.rtb_RouteTableB(),
'SubnetId': Ref(self.subnet_SubnetB()),
}
def app_BalancedDocs(self):
"""Balanced documentation stack."""
return {'TemplateName': 'balanced_docs'}
def app_BalancedApiProduction(self):
"""Bal | anced API production stack."""
return {
'TemplateName': 'balanced_api',
'Parameters': {
'Env': 'production',
'ChefEnv': 'production',
'Capacity': 4,
},
}
def app_BalancedApiTest(self):
"""Balanced API test stack."""
return {
'TemplateName': 'balanced_api',
'Parameters': {
'Env': 'test',
'ChefEnv': 'test',
'Capacity': 2,
},
}
|
xodus7/tensorflow | tensorflow/python/estimator/training_test.py | Python | apache-2.0 | 85,718 | 0.003955 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for training.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import glob
import json
import os
import random
import shutil
import tempfile
import time
import numpy as np
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.estimato | r import estimator as estimator_lib
from tensorflow.python.estimator import exporter as exporter_lib
from tensorflow.python.estimator import model_fn as model_fn_lib
from tensorflow.python.estimator import run_ | config as run_config_lib
from tensorflow.python.estimator import training
from tensorflow.python.estimator.canned import dnn
from tensorflow.python.estimator.canned import prediction_keys
from tensorflow.python.estimator.export import export as export_lib
from tensorflow.python.feature_column import feature_column
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import metrics as metrics_lib
from tensorflow.python.ops import state_ops
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary_iterator
from tensorflow.python.summary.writer import writer_cache
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import monitored_session
from tensorflow.python.training import server_lib
from tensorflow.python.training import session_run_hook
from tensorflow.python.training import training_util
from tensorflow.python.util import compat
_DEFAULT_EVAL_STEPS = 100
_DEFAULT_EVAL_DELAY_SECS = 120
_DEFAULT_EVAL_THROTTLE_SECS = 600
_DELAY_SECS_PER_WORKER = 5
_GLOBAL_STEP_KEY = ops.GraphKeys.GLOBAL_STEP
_INVALID_INPUT_FN_MSG = '`input_fn` must be callable'
_INVALID_HOOK_MSG = 'All hooks must be `SessionRunHook` instances'
_INVALID_MAX_STEPS_MSG = 'Must specify max_steps > 0'
_INVALID_STEPS_MSG = 'Must specify steps > 0'
_INVALID_NAME_MSG = '`name` must be string'
_INVALID_EVAL_DELAY_SECS_MSG = 'Must specify start_delay_secs >= 0'
_INVALID_EVAL_THROTTLE_SECS_MSG = 'Must specify throttle_secs >= 0'
_INVALID_ESTIMATOR_MSG = '`estimator` must have type `tf.estimator.Estimator`'
_STALE_CHECKPOINT_MSG = 'There was no new checkpoint after the training.'
_INVALID_EXPORTER_MSG = '`exporters` must be an Exporter'
_INVALID_EXPORTER_NAME_TYPE_MSG = 'An Exporter must have a string name'
_DUPLICATE_EXPORTER_NAMES_MSG = '`exporters` must have unique names.'
_NONE_EXPORTER_NAME_MSG = (
'An Exporter cannot have a name that is `None` or empty.')
_INVALID_TRAIN_SPEC_MSG = '`train_spec` must have type `tf.estimator.TrainSpec`'
_INVALID_EVAL_SPEC_MSG = '`eval_spec` must have type `tf.estimator.EvalSpec`'
_EVAL_SPEC_OR_NONE_MSG = (
'`eval_spec` must be either `None` or have type `tf.estimator.EvalSpec`')
_INVALID_EVAL_LISTENER_MSG = 'must have type `_ContinuousEvalListener`'
_INVALID_CONFIG_FOR_STD_SERVER_MSG = 'Could not start server; .*TF_CONFIG'
_INVALID_LOCAL_TASK_WITH_CLUSTER = '`task.type` in TF_CONFIG cannot be `local`'
_INVALID_TASK_TYPE = '`estimator.config` must have task_type set.'
_INPROPER_THROTTL_SECS = (
'EvalSpec.throttle_secs is set as 0.*Please consider to increase')
# The message should NOT have 'local' word as part of it. As (?!word) is looking
# ahead, so, the $ (ending) check is required; otherwise, it will match
# partially and return successuful.
_INVALID_TASK_TO_RUN = (
'Task type .* is not supported. Supported task types are ((?!local).)*$')
_INVALID_EMPTY_EVAL_RESULT_ERR = (
'Internal error: `Estimator.evaluate` should never return empty metrics')
_INVALID_EVAL_RESULT_TYPE_ERR = '`Estimator.evaluate` should return dict.'
_MISSING_GLOBAL_STEP_IN_EVAL_RESULT_ERR = (
'Internal error: `Estimator.evaluate` result should have `global_step`')
_INVALID_EVAL_TASK_ID_ERR = (
'there can only be one `evaluator` task .*with task id 0')
_TF_CONFIG_FOR_CHIEF = {
'cluster': {
run_config_lib.TaskType.CHIEF: ['host0:0'],
run_config_lib.TaskType.PS: ['host1:1', 'host2:2'],
run_config_lib.TaskType.WORKER: ['host3:3', 'host4:4']
},
'task': {
'type': run_config_lib.TaskType.CHIEF,
'index': 0
}
}
_TF_CONFIG_FOR_MASTER = {
'cluster': {
run_config_lib.TaskType.MASTER: ['host0:0'],
run_config_lib.TaskType.PS: ['host1:1', 'host2:2'],
run_config_lib.TaskType.WORKER: ['host3:3', 'host4:4']
},
'task': {
'type': run_config_lib.TaskType.MASTER,
'index': 0
}
}
_TF_CONFIG_FOR_WORKER = {
'cluster': {
run_config_lib.TaskType.CHIEF: ['host0:0'],
run_config_lib.TaskType.PS: ['host1:1', 'host2:2'],
run_config_lib.TaskType.WORKER: ['host3:3', 'host4:4']
},
'task': {
'type': run_config_lib.TaskType.WORKER,
'index': 1
}
}
_TF_CONFIG_FOR_PS = {
'cluster': {
run_config_lib.TaskType.CHIEF: ['host0:0'],
run_config_lib.TaskType.PS: ['host1:1', 'host2:2'],
run_config_lib.TaskType.WORKER: ['host3:3', 'host4:4']
},
'task': {
'type': run_config_lib.TaskType.PS,
'index': 1
}
}
_TF_CONFIG_FOR_EVALUATOR = {
'cluster': {
run_config_lib.TaskType.CHIEF: ['host0:0'],
run_config_lib.TaskType.PS: ['host1:1', 'host2:2'],
run_config_lib.TaskType.WORKER: ['host3:3', 'host4:4']
},
'task': {
'type': run_config_lib.TaskType.EVALUATOR,
'index': 0
}
}
_TF_CONFIG_FOR_GOOGLE = {'environment': 'google'}
class _FakeHook(session_run_hook.SessionRunHook):
"""Fake implementation of `SessionRunHook`."""
class _InvalidHook(object):
"""Invalid hook (not a subclass of `SessionRunHook`)."""
def _create_exporter(name):
class FakeExporter(exporter_lib.Exporter):
def __init__(self, name):
self._name = name
@property
def name(self):
return self._name
def export(self, *args, **kwargs):
del args, kwargs
return FakeExporter(name=name)
def _create_run_config_with_cluster_spec(tf_config):
with test.mock.patch.dict('os.environ', {'TF_CONFIG': json.dumps(tf_config)}):
return run_config_lib.RunConfig()
class TrainSpecTest(test.TestCase):
"""Tests TrainSpec."""
def testRequiredArgumentsSet(self):
"""Tests that no errors are raised when all required arguments are set."""
spec = training.TrainSpec(input_fn=lambda: 1)
self.assertEqual(1, spec.input_fn())
self.assertIsNone(spec.max_steps)
self.assertEqual(0, len(spec.hooks))
def testAllArgumentsSet(self):
"""Tests that no errors are raised when all arguments are set."""
hooks = [_FakeHook()]
spec = training.TrainSpec(input_fn=lambda: 1, max_steps=2, hooks=hooks)
self.assertEqual(1, spec.input_fn())
self.assertEqual(2, spec.max_steps)
self.assertEqual(tuple(hooks), spec.hooks)
def testInvalidInputFn(self):
with self.assertRaisesRegexp(TypeError, _INVALID_INPUT_FN_MSG):
training.TrainSpec(input_fn='invalid')
def testInvalidMaxStep(self):
with self.assertRaisesRegexp(ValueError, _INVALID_MAX_STEPS_MSG):
training.TrainSpec(input_fn=lambda: 1, max_steps=0)
def testInvalidHook(self):
with self.assertRaisesRegexp(TypeError, _INVALID_HOOK_MSG):
training.TrainSpec(input_fn=lambda: 1, hooks=[_InvalidHook()])
class Eva |
SasView/sasmodels | example/model.py | Python | bsd-3-clause | 1,240 | 0.012097 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
from bumps.names import *
from sasmodels.core import load_model
from sasmodels.bumps_model import Model, Experiment
from sasmodels.data impor | t load_data, set_beam_stop, set_top
""" IMPORT THE DATA USED """
radial_data = load_data('DEC07267.DAT')
set_b | eam_stop(radial_data, 0.00669, outer=0.025)
set_top(radial_data, -.0185)
kernel = load_model("ellipsoid")
model = Model(kernel,
scale=0.08,
radius_polar=15, radius_equatorial=800,
sld=.291, sld_solvent=7.105,
background=0,
theta=90, phi=0,
theta_pd=15, theta_pd_n=40, theta_pd_nsigma=3,
radius_polar_pd=0.222296, radius_polar_pd_n=1, radius_polar_pd_nsigma=0,
radius_equatorial_pd=.000128, radius_equatorial_pd_n=1, radius_equatorial_pd_nsigma=0,
phi_pd=0, phi_pd_n=20, phi_pd_nsigma=3,
)
# SET THE FITTING PARAMETERS
model.radius_polar.range(15, 1000)
model.radius_equatorial.range(15, 1000)
model.theta_pd.range(0, 360)
model.background.range(0,1000)
model.scale.range(0, 10)
#cutoff = 0 # no cutoff on polydisperisity loops
#cutoff = 1e-5 # default cutoff
cutoff = 1e-3 # low precision cutoff
M = Experiment(data=radial_data, model=model, cutoff=cutoff)
problem = FitProblem(M) |
froyobin/ironic | ironic/drivers/modules/ilo/deploy.py | Python | apache-2.0 | 25,377 | 0.000355 | # Copyright 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
iLO Deploy Driver(s) and supporting methods.
"""
import tempfile
from oslo.config import cfg
from ironic.common import exception
from ironic.common.i18n import _
from ironic.common.i18n import _LE
from ironic.common.i18n import _LI
from ironic.common import images
from ironic.common import states
from ironic.common import swift
from ironic.conductor import task_manager
from ironic.conductor import utils as manager_utils
from ironic.drivers import base
from ironic.drivers.modules import agent
from ironic.drivers.modules import deploy_utils
from ironic.drivers.modules.ilo import common as ilo_common
from ironic.drivers.modules import ipmitool
from ironic.drivers.modules import iscsi_deploy
from ironic.drivers.modules import pxe
from ironic.drivers import utils as driver_utils
from ironic.openstack.common import log as logging
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
REQUIRED_PROPERTIES = {
'ilo_deploy_iso': _("UUID (from Glance) of the deployment ISO. "
"Required.")
}
COMMON_PROPERTIES = REQUIRED_PROPERTIES
CONF.import_opt('pxe_append_params', 'ironic.drivers.modules.iscsi_deploy',
group='pxe')
CONF.import_opt('swift_ilo_container', 'ironic.drivers.modules.ilo.common',
group='ilo')
BOOT_DEVICE_MAPPING_TO_ILO = {'pxe': 'NETWORK', 'disk': 'HDD',
'cdrom': 'CDROM', 'bios': 'BIOS', 'safe': 'SAFE'}
def _update_ipmi_properties(task):
"""Update ipmi properties to node driver_info
:param task: a task from TaskManager.
"""
node = task.node
info = node.driver_info
#updating ipmi credentials
info['ipmi_address'] = info['ilo_address']
info['ipmi_username'] = info['ilo_username']
info['ipmi_password'] = info['ilo_password']
if 'console_port' in info:
info['ipmi_terminal_port'] = info['console_port']
#saving ipmi credentials to task object
task.node.driver_info = info
def _get_boot_iso_object_name(node):
"""Returns the floppy image name for a given node.
:param node: the node for which image name is to be provided.
"""
return "boot-%s" % node.uuid
def _get_boot_iso(task, root_uuid):
"""This method returns a boot ISO to boot the node.
It chooses one of the two options in the order as below:
1. Image deployed has a meta-property 'boot_iso' in Glance. This should
refer to the UUID of the boot_iso which exists in Glance.
2. Generates a boot ISO on the fly using kernel and ramdisk mentioned in
the image deployed. It uploads the generated boot ISO to Swift.
:param task: a TaskManager instance containing the node to act on.
:param root_uuid: the uuid of the root partition.
:returns: the information about the boot ISO. Returns the information in
the format 'glance:<glance-boot-iso-uuid>' or
'swift:<swift-boot_iso-object-name>'. In case of Swift, it is assumed
that the object exists in CONF.ilo.swift_ilo_container.
On error finding the boot iso, it returns None.
:raises: MissingParameterValue, if any of the required parameters are
missing in the node's driver_info or instance_info.
:raises: InvalidParameterValue, if any of the parameters have invalid
value in the node's driver_info or instance_info.
:raises: SwiftOperationError, if operation with Swift fails.
:raises: ImageCreationFailed, if creation of boot ISO failed.
"""
# Option 1 - Check if user has provided a boot_iso in Glance.
LOG.debug("Trying to get a boot ISO to boot the baremetal node")
deploy_info = _parse_deploy_info(task.node)
image_uuid = deploy_info['image_source']
boot_iso_uuid = images.get_glance_image_property(task.context,
image_uuid, 'boot_iso')
if boot_iso_uuid:
LOG.debug("Found boot_iso %s in Glance", boot_iso_uuid)
return 'glance:%s' % boot_iso_uuid
# NOTE(faizan) For uefi boot_mode, operator should provide efi capable
# boot-iso in glance
if driver_utils.get_node_capability(task.node, 'boot_mode') == 'uefi':
LOG.error(_LE("Unable to find boot_iso in Glance, required to deploy "
"node %(node)s in UEFI boot mode."),
{'node': task.node.uuid})
return
kernel_uuid = images.get_glance_image_property(task.context,
image_uuid, 'kernel_id')
ramdisk_uuid = images.get_glance_image_property(task.context,
image_uuid, 'ramdisk_id')
if not kernel_uuid or not ramdisk_uuid:
LOG.error(_LE("Unable to find 'kernel_id' and 'ramdisk_id' in Glance "
| "image %(image)s for generating boot ISO for % | (node)s"),
{'image': image_uuid, 'node': task.node.uuid})
return
# NOTE(rameshg87): Functionality to share the boot ISOs created for
# similar instances (instances with same deployed image) is
# not implemented as of now. Creation/Deletion of such a shared boot ISO
# will require synchronisation across conductor nodes for the shared boot
# ISO. Such a synchronisation mechanism doesn't exist in ironic as of now.
# Option 2 - Create boot_iso from kernel/ramdisk, upload to Swift
# and provide its name.
boot_iso_object_name = _get_boot_iso_object_name(task.node)
kernel_params = CONF.pxe.pxe_append_params
container = CONF.ilo.swift_ilo_container
with tempfile.NamedTemporaryFile() as fileobj:
boot_iso_tmp_file = fileobj.name
images.create_boot_iso(task.context, boot_iso_tmp_file,
kernel_uuid, ramdisk_uuid, root_uuid, kernel_params)
swift_api = swift.SwiftAPI()
swift_api.create_object(container, boot_iso_object_name,
boot_iso_tmp_file)
LOG.debug("Created boot_iso %s in Swift", boot_iso_object_name)
return 'swift:%s' % boot_iso_object_name
def _clean_up_boot_iso_for_instance(node):
"""Deletes the boot ISO created in Swift for the instance.
:param node: an ironic node object.
"""
swift_api = swift.SwiftAPI()
container = CONF.ilo.swift_ilo_container
boot_iso_object_name = _get_boot_iso_object_name(node)
try:
swift_api.delete_object(container, boot_iso_object_name)
except exception.SwiftOperationError as e:
LOG.exception(_LE("Failed to clean up boot ISO for %(node)s."
"Error: %(error)s."),
{'node': node.uuid, 'error': e})
def _get_single_nic_with_vif_port_id(task):
"""Returns the MAC address of a port which has a VIF port id.
:param task: a TaskManager instance containing the ports to act on.
:returns: MAC address of the port connected to deployment network.
None if it cannot find any port with vif id.
"""
for port in task.ports:
if port.extra.get('vif_port_id'):
return port.address
def _parse_driver_info(node):
"""Gets the driver specific Node deployment info.
This method validates whether the 'driver_info' property of the
supplied node contains the required information for this driver to
deploy images to the node.
:param node: a single Node.
:returns: A dict with the driver_info values.
:raises: MissingParameterValue, if any of the required parameters are
missing.
"""
info = node.driver_info
d_info = {}
d_info['ilo_deploy_iso'] = info.get('ilo_deploy_iso')
error_msg = _("Error validating iLO virtual media deploy")
deploy_utils.c |
CatalansMB/War1714 | src/ID_strings.py | Python | gpl-2.0 | 30,810 | 0.000032 | str_no_string = 0
str_empty_string = 1
str_yes = 2
str_no = 3 |
str_blank_string = 4
str_error_string = 5
str_s0 = 6
str_blank_s1 = 7
str_reg1 = 8
str_s50_comma_s51 = 9
str_s50_and_s51 = 10
str_s52_comma_s51 = 11
str_s52_and_s51 = 12
str_msg_battle_won = 13
str_charge = 14
str_color = 15
str_hold_fire = 16
str_blunt_hold_fire = 17
str_finished = 18
str_delivered_damage = 19
str_archery_target_hit = 20
str_cant_use_inventory_now = 21
str_give_up_fight = 22
str_battle_won = 23
s | tr_battle_lost = 24
str_kingdom_1_adjective = 25
str_kingdom_2_adjective = 26
str_kingdom_3_adjective = 27
str_kingdom_4_adjective = 28
str_kingdom_5_adjective = 29
str_kingdom_6_adjective = 30
str_credits_1 = 31
str_credits_2 = 32
str_credits_3 = 33
str_credits_4 = 34
str_credits_5 = 35
str_credits_6 = 36
str_credits_7 = 37
str_credits_8 = 38
str_credits_9 = 39
str_credits_10 = 40
str_credits_11 = 41
str_credits_12 = 42
str_credits_13 = 43
str_credits_14 = 44
str_credits_15 = 45
str_mp_ambush = 46
str_mp_ambush_fog = 47
str_mp_arabian_harbour = 48
str_mp_arabian_harbour_night = 49
str_mp_arabian_village = 50
str_mp_arabian_village_morning = 51
str_mp_arabian_village_conq = 52
str_mp_arabian_village_conq_morning = 53
str_mp_ardennes = 54
str_mp_ardennes_morning = 55
str_mp_avignon = 56
str_mp_avignon_morning = 57
str_mp_bavarian_river = 58
str_mp_bavarian_river_cloudy = 59
str_mp_beach = 60
str_mp_beach_morning = 61
str_mp_borodino = 62
str_mp_borodino_morn = 63
str_mp_champs_elysees = 64
str_mp_champs_elysees_rain = 65
str_mp_charge_to_the_rhine = 66
str_mp_charge_to_the_rhine_cloudy = 67
str_mp_citadelle_napoleon = 68
str_mp_citadelle_napoleon_morning = 69
str_mp_columbia_hill_farm = 70
str_mp_columbia_farm_morning = 71
str_mp_countryside = 72
str_mp_countryside = 73
str_mp_dust = 74
str_mp_dust_morning = 75
str_mp_european_city_summer = 76
str_mp_european_city_winter = 77
str_mp_floodplain = 78
str_mp_floodplain_storm = 79
str_mp_forest_pallisade = 80
str_mp_forest_pallisade_fog = 81
str_mp_fort_al_hafya = 82
str_mp_fort_al_hafya_night = 83
str_mp_fort_bashir = 84
str_mp_fort_bashir_morning = 85
str_mp_fort_beaver = 86
str_mp_fort_beaver_morning = 87
str_mp_fort_boyd = 88
str_mp_fort_boyd_raining = 89
str_mp_fort_brochet = 90
str_mp_fort_brochet_raining = 91
str_mp_fort_de_chartres = 92
str_mp_fort_de_chartres_raining = 93
str_mp_fort_fleetwood = 94
str_mp_fort_fleetwood_storm = 95
str_mp_fort_george = 96
str_mp_fort_george_raining = 97
str_mp_fort_hohenfels = 98
str_mp_fort_hohenfels_night = 99
str_mp_fort_lyon = 100
str_mp_fort_lyon_night = 101
str_mp_fort_mackinaw = 102
str_mp_fort_mackinaw_raining = 103
str_mp_fort_nylas = 104
str_mp_fort_nylas_raining = 105
str_mp_fort_refleax = 106
str_mp_fort_refleax_night = 107
str_mp_fort_vincey = 108
str_mp_fort_vincey_storm = 109
str_mp_french_farm = 110
str_mp_french_farm_storm = 111
str_mp_german_village = 112
str_mp_german_village_rain = 113
str_mp_hougoumont = 114
str_mp_hougoumont_night = 115
str_mp_hungarian_plains = 116
str_mp_hungarian_plains_cloud = 117
str_mp_theisland = 118
str_mp_la_haye_sainte = 119
str_mp_la_haye_sainte_night = 120
str_mp_landshut = 121
str_mp_landshut_night = 122
str_mp_minden = 123
str_mp_minden_night = 124
str_mp_naval = 125
str_mp_oaksfield_day = 126
str_mp_oaksfield_storm = 127
str_mp_outlaws_den = 128
str_mp_outlaws_den_night = 129
str_mp_quatre_bras = 130
str_mp_quatre_bras_night = 131
str_mp_river_crossing = 132
str_mp_river_crossing_morning = 133
str_mp_roxburgh = 134
str_mp_roxburgh_raining = 135
str_mp_russian_river_day = 136
str_mp_russian_river_cloudy = 137
str_mp_russian_village = 138
str_mp_russian_village_fog = 139
str_mp_russian_village_conq = 140
str_mp_russian_village_conq_night = 141
str_mp_saints_isle = 142
str_mp_saints_isle_rain = 143
str_mp_schemmerbach = 144
str_mp_schemmerbach_storm = 145
str_mp_siege_of_toulon = 146
str_mp_siege_of_toulon_night = 147
str_mp_sjotofta = 148
str_mp_sjotofta_night = 149
str_mp_slovenian_village = 150
str_mp_slovenian_village_raining = 151
str_mp_spanish_farm = 152
str_mp_spanish_farm_rain = 153
str_mp_spanish_mountain_pass = 154
str_mp_spanish_mountain_pass_evening = 155
str_mp_spanish_village = 156
str_mp_spanish_village_evening = 157
str_mp_strangefields = 158
str_mp_strangefields_storm = 159
str_mp_swamp = 160
str_mp_venice = 161
str_mp_venice_morning = 162
str_mp_walloon_farm = 163
str_mp_walloon_farm_night = 164
str_mp_testing_map = 165
str_random_multi_plain_medium = 166
str_random_multi_plain_large = 167
str_random_multi_plain_medium_rain = 168
str_random_multi_plain_large_rain = 169
str_random_multi_steppe_medium = 170
str_random_multi_steppe_large = 171
str_random_multi_steppe_forest_medium = 172
str_random_multi_steppe_forest_large = 173
str_random_multi_snow_medium = 174
str_random_multi_snow_medium_snow = 175
str_random_multi_snow_large = 176
str_random_multi_snow_large_snow = 177
str_random_multi_snow_forest_medium = 178
str_random_multi_snow_forest_medium_snow = 179
str_random_multi_snow_forest_large = 180
str_random_multi_snow_forest_large_snow = 181
str_random_multi_desert_medium = 182
str_random_multi_desert_large = 183
str_random_multi_desert_forest_medium = 184
str_random_multi_desert_forest_large = 185
str_random_multi_forest_medium = 186
str_random_multi_forest_medium_rain = 187
str_random_multi_forest_large = 188
str_random_multi_forest_large_rain = 189
str_mp_custom_map_1 = 190
str_mp_custom_map_2 = 191
str_mp_custom_map_3 = 192
str_mp_custom_map_4 = 193
str_mp_custom_map_5 = 194
str_mp_custom_map_6 = 195
str_mp_custom_map_7 = 196
str_mp_custom_map_8 = 197
str_mp_custom_map_9 = 198
str_mp_custom_map_10 = 199
str_mp_custom_map_11 = 200
str_mp_custom_map_12 = 201
str_mp_custom_map_13 = 202
str_mp_custom_map_14 = 203
str_mp_custom_map_15 = 204
str_mp_custom_map_16 = 205
str_mp_custom_map_17 = 206
str_mp_custom_map_18 = 207
str_mp_custom_map_19 = 208
str_mp_custom_map_20 = 209
str_multi_scene_end = 210
str_multi_game_type_1 = 211
str_multi_game_type_2 = 212
str_multi_game_type_3 = 213
str_multi_game_type_5 = 214
str_multi_game_type_6 = 215
str_multi_game_type_7 = 216
str_multi_game_type_8 = 217
str_multi_game_type_9 = 218
str_multi_game_type_11 = 219
str_multi_game_types_end = 220
str_multi_game_type_10 = 221
str_poll_kick_player_s1_by_s0 = 222
str_poll_ban_player_s1_by_s0 = 223
str_poll_change_map_to_s1_by_s0 = 224
str_poll_change_map_to_s1_and_factions_to_s2_and_s3_by_s0 = 225
str_poll_change_number_of_bots_to_reg0_and_reg1_by_s0 = 226
str_poll_kick_player = 227
str_poll_ban_player = 228
str_poll_change_map = 229
str_poll_change_map_with_faction = 230
str_poll_change_number_of_bots = 231
str_poll_time_left = 232
str_poll_result_yes = 233
str_poll_result_no = 234
str_server_name = 235
str_game_password = 236
str_map = 237
str_game_type = 238
str_max_number_of_players = 239
str_number_of_bots_in_team_reg1 = 240
str_team_reg1_faction = 241
str_enable_valve_anti_cheat = 242
str_allow_friendly_fire = 243
str_allow_melee_friendly_fire = 244
str_friendly_fire_damage_self_ratio = 245
str_friendly_fire_damage_friend_ratio = 246
str_spectator_camera = 247
str_control_block_direction = 248
str_map_time_limit = 249
str_round_time_limit = 250
str_players_take_control_of_a_bot_after_death = 251
str_team_points_limit = 252
str_point_gained_from_flags = 253
str_point_gained_from_capturing_flag = 254
str_respawn_period = 255
str_add_to_official_game_servers_list = 256
str_combat_speed = 257
str_combat_speed_0 = 258
str_combat_speed_1 = 259
str_combat_speed_2 = 260
str_combat_speed_3 = 261
str_combat_speed_4 = 262
str_off = 263
str_on = 264
str_defender_spawn_count_limit = 265
str_unlimited = 266
str_automatic = 267
str_by_mouse_movement = 268
str_free = 269
str_stick_to_any_player = 270
str_stick_to_team_members = 271
str_stick_to_team_members_view = 272
str_make_factions_voteable = 273
str_make_kick_voteable = 274
str_make_ban_voteable = 275
str_bots_upper_limit_for_votes = 276
str_make_maps_voteable = 277
str_valid_vote_ratio = 278
str_auto_team_balance_limit = 279
str_welcome_message = 280
str_initial_gold_multiplier = 281
str_battle_earnings_multiplier = 282
str_round_earnings_multiplier = 283
str_allow_player_ban |
openaid-IATI/OIPA | OIPA/solr/activity_sector/tasks.py | Python | agpl-3.0 | 883 | 0 | # If on Python 2.X
from __future__ import print_function
import pysolr
from django.conf import settings
from solr.activity_sector.indexing import ActivitySectorIndexing
from solr.tasks import BaseTaskIndexing
solr = pysolr.Solr(
'{url}/{core}'.format(
url=settings.SOLR.get('url'),
core=settings.SOLR.get(' | cores').get('activity-sector')
), always_commit=True, timeout=180
)
class ActivitySectorTaskIndexing(BaseTaskIndexing):
indexing = ActivitySectorIndexing
solr = solr
def run_from_activity(self, activity):
for sectors in activity.activitysector_set.all():
self | .instance = sectors
self.run()
def delete(self):
if settings.SOLR.get('indexing'):
self.solr.delete(q='iati_identifier:{iati_identifier}'.format(
iati_identifier=self.instance.activity.iati_identifier))
|
esteve/rosidl | rosidl_parser/test/test_field.py | Python | apache-2.0 | 2,122 | 0 | # Copyright 2014 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nose.tools import assert_raises
from rosidl_parser import Field
from rosidl_parser import InvalidValue
from rosidl_parser import Type
def test_field_constructor():
type_ = Type('bool')
field = Field(type_, 'foo')
assert field.type == type_
assert field.name == 'foo'
assert field.default_value is None
field = Field(type_, 'foo', '1')
assert field.default_value
with a | ssert_raises(TypeError):
Field('type', 'foo') |
with assert_raises(NameError):
Field(type_, 'foo bar')
type_ = Type('bool[2]')
field = Field(type_, 'foo', '[false, true]')
assert field.default_value == [False, True]
type_ = Type('bool[]')
field = Field(type_, 'foo', '[false, true, false]')
assert field.default_value == [False, True, False]
type_ = Type('bool[3]')
with assert_raises(InvalidValue):
Field(type_, 'foo', '[false, true]')
def test_field_methods():
assert Field(Type('bool'), 'foo') != 23
assert (Field(Type('bool'), 'foo', '1') ==
Field(Type('bool'), 'foo', 'true'))
assert (Field(Type('bool'), 'foo', '1') !=
Field(Type('bool'), 'foo', 'false'))
assert (Field(Type('bool'), 'foo', '1') !=
Field(Type('bool'), 'bar', '1'))
assert (Field(Type('bool'), 'foo', '1') !=
Field(Type('byte'), 'foo', '1'))
assert str(Field(Type('bool'), 'foo', '1')) == 'bool foo True'
assert str(Field(Type('string<=5'), 'foo', 'value')) == \
"string<=5 foo 'value'"
|
NcLang/vimrc | sources_non_forked/YouCompleteMe/python/ycm/diagnostic_interface.py | Python | mit | 9,546 | 0.036141 | # Copyright (C) 2013 Google Inc.
#
# This file is part of YouCompleteMe.
#
# YouCompleteMe is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# YouCompleteMe is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with YouCompleteMe. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from builtins import * # noqa
from future.utils import itervalues, iteritems
from collections import defaultdict, namedtuple
from ycm import vimsupport
import vim
class DiagnosticInterface( object ):
def __init__( self, user_options ):
self._user_options = user_options
# Line and column numbers are 1-based
self._buffer_number_to_line_to_diags = defaultdict(
lambda: defaultdict( list ) )
self._next_sign_id = 1
self._previous_line_number = -1
self._diag_message_needs_clearing = False
self._placed_signs = []
def OnCursorMoved( self ):
line, _ = vimsupport.CurrentLineAndColumn()
line += 1 # Convert to 1-based
if line != self._previous_line_number:
self._previous_line_number = line
if self._user_options[ 'echo_current_diagnostic' ]:
self._EchoDiagnosticForLine( line )
def GetErrorCount( self ):
return len( self._FilterDiagnostics( _DiagnosticIsError ) )
def GetWarningCount( self ):
return len( self._FilterDiagnostics( _DiagnosticIsWarning ) )
def PopulateLocationList( self, diags ):
vimsupport.SetLocationList(
vimsupport.ConvertDiagnosticsToQfList( diags ) )
def UpdateWithNewDiagnostics( self, diags ):
normalized_diags = [ _NormalizeDiagnostic( x ) for x in diags ]
self._buffer_number_to_line_to_diags = _ConvertDiagListToDict(
normalized_diags )
if self._user_options[ 'enable_diagnostic_signs' ]:
self._placed_signs, self._next_sign_id = _UpdateSigns(
self._placed_signs,
self._buffer_number_to_line_to_diags,
self._next_sign_id )
if self._user_options[ 'enable_diagnostic_highlighting' ]:
_UpdateSquiggles( self._buffer_number_to_line_to_diags )
if self._user_options[ 'always_populate_location_list' ]:
self.PopulateLocationList( normalized_diags )
def _EchoDiagnosticForLine( self, line_num ):
buffer_num = vim.current.buffer.number
diags = self._buffer_number_to_line_to_diags[ buffer_num ][ line_num ]
if not diags:
if self._diag_message_needs_clearing:
# Clear any previous diag echo
vimsupport.PostVimMessage( '', warning = False )
self._diag_message_needs_clearing = False
return
text = diags[ 0 ][ 'text' ]
if diags[ 0 ].get( 'fixit_available', False ):
text += ' (FixIt)'
vimsupport.PostVimMessage( text, warning = False, truncate = True )
self._diag_message_needs_clearing = True
def _FilterDiagnostics( self, predicate ):
matched_diags = []
line_to_diags = self._buffer_number_to_line_to_diags[
vim.current.buffer.number ]
for diags in itervalues( line_to_diags ):
matched_diags.extend( list( filter( predicate, diags ) ) )
return matched_diags
def _UpdateSquiggles( buffer_number_to_line_to_diags ):
vimsupport.ClearYcmSyntaxMatches()
line_to_diags = buffer_number_to_line_to_diags[ vim.current.buffer.number ]
for diags in itervalues( line_to_diags ):
for diag in diags:
location_extent = diag[ 'location_extent' ]
is_error = _DiagnosticIsError( diag )
if location_extent[ 'start' ][ 'line_num' ] < 0:
location = diag[ 'location' ]
vimsupport.AddDiagnosticSyntaxMatch(
location[ 'line_num' ],
location[ 'column_num' ] )
else:
vimsupport.AddDiagnosticSyntaxMatch(
location_extent[ 'start' ][ 'line_num' ],
location_extent[ 'start' ][ 'column_num' ],
location_extent[ 'end' ][ 'line_num' ],
location_extent[ 'end' ][ 'column_num' ],
is_error = is_error )
for diag_range in diag[ 'ranges' ]:
vimsupport.AddDiagnosticSyntaxMatch(
diag_range[ 'start' ][ 'line_num' ],
diag_range[ 'start' ][ 'column_num' ],
diag_range[ 'end' ][ 'line_num' ],
diag_range[ 'end' ][ 'column_num' ],
is_error = is_error )
def _UpdateSigns( placed_signs, buffer_number_to_line_to_diags, next_sign_id ):
new_signs, kept_signs, next_sign_id = _GetKeptAndNewSigns(
placed_signs, buffer_number_to_line_to_diags, next_sign_id
)
# Dummy sign used to prevent "flickering" in Vim when last mark gets
# deleted from buffer. Dummy sign prevents Vim to collapsing the sign column
# in that case.
# There's also a vim bug which causes the whole window to redraw in some
# conditions (vim redraw logic is very complex). But, somehow, if we place a
# dummy sign before placing other "real" signs, it will not redraw the
# buffer (patch to vim pending).
dummy_sign_needed = not kept_signs and new_signs
if dummy_sign_needed:
vimsupport.PlaceDummySign( next_sign_id + 1,
vim.current.buffer.number,
new_signs[ 0 ].line )
# We place only those signs that haven't been placed yet.
new_placed_signs = _PlaceNewSigns( kept_signs, new_signs )
# We use incremental placement, so signs that already placed on the correct
# lines will not be deleted and placed again, which should improve performance
# in case of many diags. Signs which don't exist in the current diag should be
# deleted.
_UnplaceObsoleteSigns( kept_signs, placed_signs )
if dummy_sign_needed:
vimsupport.UnPlaceDummySign( next_sign_id + 1, vim.current.buffer.number )
return new_placed_signs, next_sign_id
def _GetKeptAndNewSigns( placed_signs, buffer_number_to_line_to_diags,
next_sign_id ):
new_signs = []
kept_signs = []
for buffer_number, line_to_diags in iteritems(
buffer_number_to_line_to_diags ):
if not vimsupport.BufferIsVisible( buffer_number ):
continue
for line, diags in iteritems( line_to_diags ):
for diag in diags:
sign = _DiagSignPlacement( next_sign_id,
line,
buffer_number,
_DiagnosticIsError( diag ) )
if sign not in placed_signs:
new_signs += [ sign ]
next_sign_id += 1
else:
# We use .index here because `sign` contains a new id, but
# we need the sign with the old id to unplace it later on.
# We won't be placing the new sign.
| kept_signs += [ placed_signs[ placed_signs.index( sign ) ] ]
return new_signs, kept_signs, next_sign_id
def _PlaceNewSigns( kept_signs, new_signs ):
placed_signs = kept_signs[:]
for sign in new_signs:
# Do not set two signs on the same line, it will screw up storing sign
# locations.
if sign in placed_signs:
| continue
vimsupport.PlaceSign( sign.id, sign.line, sign.buffer, sign.is_error )
placed_signs.append(sign)
return placed_signs
def _UnplaceObsoleteSigns( kept_signs, placed_signs ):
for sign in placed_signs:
if sign not in kept_signs:
vimsupport.UnplaceSignInBuffer( sign.buffer, sign.id )
def _ConvertDiagListToDict( diag_list ):
buffer_to_line_to_diags = defaultdict( lambda: defaultdict( list ) )
for diag in diag_list:
location = diag[ 'location' ]
buffer_number = vimsupport.GetBufferNumberForFilename(
location[ 'filepath' ] )
line_number = location[ 'li |
chippey/gaffer | python/GafferRenderMan/__init__.py | Python | bsd-3-clause | 2,499 | 0.014406 | ##########################################################################
#
# Copyright (c) 2013, Image Engine Desig | n Inc. All rights reserved.
# Copyright (c) 2013, John Haddon. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of condition | s and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
__import__( "GafferScene" )
def __setupEnvironment() :
import os
def prependToPath( envVar, prefix ) :
e = os.environ.get( envVar, "" )
if e :
e = ":" + e
e = prefix + e
os.environ[envVar] = os.path.expandvars( e )
prependToPath( "DL_DISPLAYS_PATH", "$GAFFER_ROOT/renderMan/displayDrivers" )
prependToPath( "DL_PROCEDURALS_PATH", "$GAFFER_ROOT/renderMan/procedurals" )
__setupEnvironment()
from _GafferRenderMan import *
from RenderManRender import RenderManRender
from RenderManShaderBall import RenderManShaderBall
__import__( "IECore" ).loadConfig( "GAFFER_STARTUP_PATHS", {}, subdirectory = "GafferRenderMan" )
|
tracer9/Applied_Math | Homework_4.py | Python | apache-2.0 | 3,155 | 0.032013 | # -*- coding: utf-8 -*-
"""
Applied Mathematics for Computer Science.
Homework4 -- L-M Algorithm.
@author: LiBin 11531041
@date: 2016 - 5 - 23.
"""
#%% Objective: Assuming given type of the certain function
# " fun(x) = a*exp(-b*t) ", input data "x1,...x10", and output data "y1,..y10",
# using the Levenberg-Marquardt algorithm to find out the optimial value of
# "a" and "b". Naturally, the objective function is f(x) = = 1/2 * sum( ( fun(x_i)-y_i) * 2)
#%%
#0.1 compute the F_x, where F_i(x) = a*exp(-b *x_i) - y_i
def F_x( x, y, a, b ):
result = (a* np.exp(-b * x) - y).T
return result
#0.2 compute the jacobian matrix
def J_x( x, a, b ):
result = np.matrix(np.zeros((10,2)) )
result[:,0] = np.exp(-b*x).T
result[:,1] = np.multiply(-(a*x), np.exp(-b*x) ).T
return result
#0.3 compu | te the f_x, where f(x) = 1/2 * sum( F_x .* 2)
def f_x( x, y, a, b ):
temp = a* np.exp(-b * x) - y
result = np.sum( np.power( temp, 2) ) /2
return result
#%%
import matplotlib as mpl
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import matplotlib.pyplot as plt
x = np.matrix([0.25, 0.5, 1, 1.5, 2, 3, 4, 6, 8, 10])
y = np.matrix([19.21, | 18.15, 15.36, 14.10, 12.89, 9.32, 7.45, 5.24, 3.01, 1.85])
mu = 0.01
epsilon = 1e-6
max_iter = 50
a=10
b=0.5
#%%
a_trend = []
b_trend = []
f_trend = []
for loop in range(max_iter):
J = J_x( x, a, b )
F = F_x( x, y, a, b )
## step - 2
g = J.T * F
G = J.T * J
## step - 3
norm_g = np.sqrt( sum( np.power( g, 2) ) )
if norm_g < epsilon:
break
## step - 4
key = 0
while key == 0:
G_mu = G + mu * np.eye(2)
if np.all( np.linalg.eigvals(G_mu)>0 ):
key = 1
else:
mu = 4 * mu
key = 0
## step - 5
s = np.linalg.solve( G_mu, -g )
## step - 6
a_new = a + s[0,0]
b_new = b + s[1,0]
diff_f = f_x( x, y, a_new, b_new ) - f_x( x, y, a, b )
diff_q = (J.T * F).T * s + (s.T*(J.T*J) *s) /2
r = diff_f / diff_q
## step - 7
if r < 0.25:
mu = mu * 4
elif r > 0.75:
mu = mu / 2
else:
pass
## step - 8
if r > 0:
a = a_new
b = b_new
else:
pass
#print mu
a_trend.append(a)
b_trend.append(b)
f_trend.append(np.log(f_x( x, y, a, b)) )
#%%
num_grid = 15
a_index,b_index = np.mgrid[5:25:num_grid*1j,0:0.5:num_grid*1j]
z = np.zeros((num_grid,num_grid))
for i in xrange(num_grid):
for j in xrange(num_grid):
z[i,j] = np.log( f_x( x, y, a_index[i,j], b_index[i,j] ) )
ax = plt.subplot(111,projection='3d')
ax.plot_surface(a_index,b_index,z,rstride=2,cstride=1,cmap=plt.cm.coolwarm,alpha=0.8)
ax.set_xlabel('a')
ax.set_ylabel('b')
ax.set_zlabel('log f(x)')
mpl.rcParams['legend.fontsize'] = 10
ax.plot(a_trend, b_trend, f_trend, color='blue',linestyle='solid',linewidth = 3,marker='o',markerfacecolor='red',markersize=9,label='optimization curve')
ax.legend(loc=3)
plt.title('L-M algorithm to evaluate the optimial value')
plt.show()
|
percyfal/snakemakelib-core | snakemakelib/sample/tests/test_sampleorganization.py | Python | mit | 154 | 0.012987 | # Copyright (C) 2015 by Per Unneberg
import | pytest
from snakemakelib.sample. | organization import sample_org
def test_sample_org():
print (sample_org)
|
project-icp/bee-pollinator-app | src/icp/apps/beekeepers/migrations/0014_update_hive_scale_id_help_text.py | Python | apache-2.0 | 579 | 0.001727 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('beekeepers', '0013_update_queen_source_options'),
]
operations = [
migrations.AlterField(
model_name='monthlysurvey',
name='hive_scale | _id',
field=models.CharFi | eld(help_text='If you have an automated scale associated with this colony, please enter the hive scale brand and ID number here.', max_length=255, null=True, blank=True),
),
]
|
mlperf/training_results_v0.7 | Google/benchmarks/bert/implementations/bert-research-TF-tpu-v4-512/run_squad.py | Python | apache-2.0 | 46,112 | 0.008696 | """Run BERT on SQuAD 1.1 and SQuAD 2.0."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import json
import math
import os
import random
from language.bert import modeling
from language.bert import optimization
from language.bert import tokenization
import six
import tensorflow.compat.v1 as tf
from REDACTED.tensorflow.contrib import cluster_resolver as contrib_cluster_resolver
from REDACTED.tensorflow.contrib import tpu as contrib_tpu
flags = tf.flags
FLAGS = flags.FLAGS
## Required parameters
flags.DEFINE_string(
"bert_config_file", None,
"The config json file corresponding to the pre-trained BERT model. "
"This specifies the model architecture.")
flags.DEFINE_string("vocab_file", None,
"The vocabulary file that the BERT model was trained on.")
flags.DEFINE_string(
"output_dir", None,
"The output directory where the model checkpoints will be written.")
## Other parameters
flags.DEFINE_string("train_file", None,
"SQuAD json for training. E.g., train-v1.1.json")
flags.DEFINE_string(
"predict_file", None,
"SQuAD json for predictions. E.g., dev-v1.1.json or test-v1.1.json")
flags.DEFINE_string(
"init_checkpoint", None,
"Initial checkpoint (usually from a pre-trained BERT model).")
flags.DEFINE_bool(
"do_lower_case", True,
"Whether to lower case the input text. Should be True for uncased "
"models and False for cased models.")
flags.DEFINE_integer(
"max_seq_length", 384,
"The maximum total input sequence length after WordPiece tokenization. "
"Sequences longer than this will be truncated, and sequences shorter "
"than this will be padded.")
flags.DEFINE_integer(
"doc_stride", 128,
"When splitting up a long document into chunks, how much stride to "
"take between chunks.")
flags.DEFINE_integer(
"max_query_length", 64,
"The maximum number of tokens for the question. Questions longer than "
"this will be truncated to this length.")
flags.DEFINE_bool("do_train", False, "Whether to run training.")
flags.DEFINE_bool("do_predict", False, "Whether to run eval on the dev set.")
flags.DEFINE_integer("train_batch_size", 32, "Total batch size for training.")
flags.DEFINE_integer("predict_batch_size", 8,
"Total batch size for predictions.")
flags.DEFINE_float("learning_rate", 5e-5, "The initial learning rate for Adam.")
flags.DEFINE_float("num_train_epochs", 3.0,
"Total number of training epochs to perform.")
flags.DEFINE_float(
"warmup_proportion", 0.1,
"Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10% of training.")
flags.DEFINE_integer("save_checkpoints_steps", 1000,
"How often to save the model checkpoint.")
flags.DEFINE_integer("iterations_per_loop", 1000,
"How many steps to make in each estimator call.")
flags.DEFINE_integer(
"n_best_size", 20,
"The total number of n-best predictions to generate in the "
"nbest_predictions.json output file.")
flags.DEFINE_integer(
"max_answer_length", 30,
"The maximum length of an answer that can be generated. This is needed "
"because the start and end predictions are not conditioned on one another.")
flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.")
tf.flags.DEFINE_string(
"tpu_name", None,
"The Cloud TPU to use for training. This should be either the name "
"used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 "
"url.")
tf.flags.DEFINE_string(
"tpu_zone", None,
"[Optional] GCE zone where the Cloud TPU is located in. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
tf.flags.DEFINE_string(
"gcp_project", None,
"[Optional] Project name for the Cloud TPU-enabled project. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
tf.flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.")
flags.DEFINE_integer(
"num_tpu_cores", 8,
"Only used if `use_tpu` is True. Total number of TPU cores to use.")
flags.DEFINE_bool(
"verbose_logging", False,
"If true, all of the warnings related to data processing will be printed. "
"A number of warnings are expected for a normal SQuAD evaluation.")
flags.DEFINE_bool(
"version_2_with_negative", False,
"If true, the SQuAD examples contain some that do not have an answer.")
flags.DEFINE_float(
"null_score_diff_threshold", 0.0,
"If null_score - best_non_null is greater than the threshold predict null.")
class SquadExample(object):
"""A single training/test example for simple sequence classification.
For examples without an answer, the start and end position are -1.
"""
def __init__(self,
qas_id,
question_text,
doc_tokens,
orig_answer_text=None,
start_position=None,
end_position=None,
is_impossible=False):
self.qas_id = qas_id
self.question_text = question_text
self.doc_tokens = doc_tokens
self.orig_answer_text = orig_answer_text
self.start_position = start_position
self.end_position = end_position
self.is_impossible = is_impossible
def __str__(self):
return self.__repr__()
def __repr__(self):
s = ""
s += "qas_id: %s" % (tokenization.printable_text(self.qas_id))
s += ", question_text: %s" % (
tokenization.printable_text(self.question_text))
s += ", doc_tokens: [%s]" % (" ".join(self.doc_tokens))
if self.start_position:
s += ", start_position: %d" % (self.start_position)
if self.start_position:
s += ", end_position: %d" % (self.end_positi | on)
if self.start_position:
s += ", is_impossible: %r" % (self.is_impossible)
return s
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self,
unique_id,
example_index,
doc_span_index,
tokens,
token_to_orig_map,
token_is_max_context,
input_ids,
input | _mask,
segment_ids,
start_position=None,
end_position=None,
is_impossible=None):
self.unique_id = unique_id
self.example_index = example_index
self.doc_span_index = doc_span_index
self.tokens = tokens
self.token_to_orig_map = token_to_orig_map
self.token_is_max_context = token_is_max_context
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.start_position = start_position
self.end_position = end_position
self.is_impossible = is_impossible
def read_squad_examples(input_file, is_training):
"""Read a SQuAD json file into a list of SquadExample."""
with tf.gfile.Open(input_file, "r") as reader:
input_data = json.load(reader)["data"]
def is_whitespace(c):
if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F:
return True
return False
examples = []
for entry in input_data:
for paragraph in entry["paragraphs"]:
paragraph_text = paragraph["context"]
doc_tokens = []
char_to_word_offset = []
prev_is_whitespace = True
for c in paragraph_text:
if is_whitespace(c):
prev_is_whitespace = True
else:
if prev_is_whitespace:
doc_tokens.append(c)
else:
doc_tokens[-1] += c
prev_is_whitespace = False
char_to_word_offset.append(len(doc_tokens) - 1)
for qa in paragraph["qas"]:
qas_id = qa["id"]
question_text = qa["question"]
start_position = None
end_position = None
orig_answer_text = None
is_impossible = False
if is_training:
if FLAGS.version_2_with_negative:
is_impossible = qa["is_impossible"]
if (len(qa["answers"]) != 1) and (not is |
myt00seven/svrg | para_gpu/train_vgg.py | Python | mit | 6,897 | 0.00232 | import sys
import time
from multiprocessing import Process, Queue
import yaml
import numpy as np
import zmq
import pycuda.driver as drv
#sys.path.append('./lib')
from tools import (save_weights, load_weights,
save_momentums, load_momentums)
from train_funcs import (unpack_configs, adjust_learning_rate,
get_val_error_loss, get_rand3d, train_model_wrap,
proc_configs)
def train_net(config):
# UNPACK CONFIGS
(flag_para_load, train_filenames, val_filenames,
train_labels, val_labels, img_mean) = unpack_configs(config)
# pycuda set up
drv.init()
dev = drv.Device(int(config['gpu'][-1]))
ctx = dev.make_context()
if flag_para_load:
# zmq set up
sock = zmq.Context().socket(zmq.PAIR)
sock.connect('tcp://localhost:{0}'.format(config['sock_data']))
load_send_queue = config['queue_t2l']
load_recv_queue = config['queue_l2t']
else:
load_send_queue = None
load_recv_queue = None
import theano.sandbox.cuda
theano.sandbox.cuda.use(config['gpu'])
import theano
theano.config.on_unused_input = 'warn'
from layers import DropoutLayer
#from alex_net import AlexNet, compile_models
from vgg_net import VggNet, compile_models
import theano.misc.pycuda_init
import theano.misc.pycuda_utils
## BUILD NETWORK ##
#model = AlexNet(config)
print '...Start building network model'
model = VggNet(config)
print '...Finish building network model'
layers = model.layers
batch_size = model.batch_size
## COMPILE FUNCTIONS ##
(train_model, validate_model, train_error, learning_rate,
shared_x, shared_y, rand_arr, vels) = compile_models(model, config)
######################### TRAIN MODEL ################################
print '... training'
if flag_para_load:
# pass ipc handle and related information
gpuarray_batch = theano.misc.pycuda_utils.to_gpuarray(shared_x.container.value)
h = drv.mem_get_ipc_handle(gpuarray_batch.ptr)
sock.send_pyobj((gpuarray_batch.shape, gpuarray_batch.dtype, h))
load_send_queue.put(img_mean)
n_train_batches = len(train_filenames)
minibatch_range = range(n_train_batches)
# Start Training Loop
epoch = 0
step_idx = 0
val_record = []
while epoch < config['n_epochs']:
epoch = epoch + 1
if config['shuffle']:
np.random.shuffle(minibatch_range)
if config['resume_train'] and epoch == 1:
load_epoch = config['load_epoch']
load_weights(layers, config['weights_dir'], load_epoch)
lr_to_load = np.load(
config['weights_dir'] + 'lr_' + str(load_epoch) + '.npy')
val_record = list(
np.load(config['weights_dir'] + 'val_record.npy'))
learning_rate.set_value(lr_to_load)
load_momentums(vels, config['weights_dir'], load_epoch)
epoch = load_epoch + 1
if flag_para_load:
# send the initial message to load data, before each epoch
load_send_queue.put(str(train_filenames[minibatch_range[0]]))
load_send_queue.put(get_rand3d())
# clear the sync before 1st calc
load_send_queue.put('calc_finished')
count = 0
for minibatch_index in minibatch_range:
num_iter = (epoch - 1) * n_train_batches + count
count = count + 1
if count == 1:
s = time.time()
if count == 20:
e = time.time()
print "time per 20 iter:", (e - s)
cost_ij = train_model_wrap(train_model, shared_x,
shared_y, rand_arr, img_mean,
count, minibatch_index,
minibatch_range, batch_size,
train_filenames, train_labels,
flag_para_load,
config['batch_crop_mirror'],
send_queue=load_send_queue,
recv_queue=load_recv_queue)
if num_iter % config['print_freq'] == 0:
print 'training @ iter = ', num_iter
print 'training cost:', cost_ij
if config['print_train_error']:
print 'training error rate:', train_error()
if flag_para_load and (count < len(minibatch_range)):
load_send_queue.put('calc_finished')
############### Test on Validation Set ##################
DropoutLayer.SetDropoutOff()
this_validation_error, this_validation_loss = get_val_error_loss(
rand_arr, shared_x, shared_y,
val_filenames, val_labels,
flag_para_load, img_mean,
batch_size, validate_model,
send_queue=load_send_queue, recv_queue=load_recv_queue)
print('epoch %i: validation loss %f ' %
(epoch, this_validation_loss))
print('epoch %i: validation error %f %%' %
(epoch, this_validation_error * 100.))
val_record.append([this_validation_error, this_validation_loss])
np.save(config['weights_dir'] + 'val_record.npy', val_record)
np.savetxt(config['weights_dir'] + 'val_record_txt.txt', val_record)
DropoutLayer.SetDropoutOn()
############################################
# Adapt Learning Rate
step_idx = adjust_learning_rate(config, epoch, step_idx,
val_record, learning_rate)
# Save weights
if epoch % config['sna | pshot_freq'] == 0:
save_weights(layers, config['weights_dir'], epoch)
np.save(config['weights_dir'] + 'lr_' + str(epoc | h) + '.npy',
learning_rate.get_value())
save_momentums(vels, config['weights_dir'], epoch)
print('Optimization complete.')
if __name__ == '__main__':
with open('config.yaml', 'r') as f:
config = yaml.load(f)
with open('spec_1gpu.yaml', 'r') as f:
config = dict(config.items() + yaml.load(f).items())
config = proc_configs(config)
if config['para_load']:
from proc_load import fun_load
config['queue_l2t'] = Queue(1)
config['queue_t2l'] = Queue(1)
train_proc = Process(target=train_net, args=(config,))
load_proc = Process(
target=fun_load, args=(config, config['sock_data']))
train_proc.start()
load_proc.start()
train_proc.join()
load_proc.join()
else:
train_proc = Process(target=train_net, args=(config,))
train_proc.start()
train_proc.join()
|
zstackorg/zstack-woodpecker | integrationtest/vm/monitor/5min_alert_vm_mem_free.py | Python | apache-2.0 | 2,881 | 0.005207 | '''
Test about monitor trigger on vm memory free ratio in five minutes
@author: Songtao,Haochen
'''
import os
import test_stub
import random
import time
import zstacklib.utils.ssh as ssh
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.operations.resource_operations as res_ops
import zstackwoodpecker.operations.monitor_operations as mon_ops
def test():
global vm
global trigger
global media
global trigger_action
vm = test_stub.create_vm()
vm.check()
vm_ip = vm.get_vm().vmNics[0].ip
vm_uuid = vm.get_vm().uuid
vm_username = os.environ.get('Vm_Username')
vm_password = os.environ.get('Vm_Password')
vm_port = os.environ.get('Vm_Sshport')
test_item = "vm.mem.util"
resource_type = "VmInstanceVO"
vm_monitor_item = test_stub.get_monitor_item(resource_type)
if test_item not in vm_monitor_item:
test_util.test_fail('%s is not available for monitor' % test_item)
duration = 300
#expression = "vm.mem.util{} < 0.25"
expression = "vm.mem.util{} < 0.1"
monitor_trigger = mon_ops.create_monitor_trigger(vm_uuid, duration, expression)
send_email = test_stub.create_email_media()
media = send_email.uuid
trigger_action_name = "trigger_"+ ''.join(map(lambda xx:(hex(ord(xx))[2:]),os.urandom(8)))
trigger = monitor_trigger.uuid
receive_email = os.environ.get('receive_email')
monitor_trigger_action = mon_ops.create_email_monitor_trigger_action(trigger_action_name, send_email.uuid, trigger.split(), receive_email)
trigger_action = monitor_trigger_action.uuid
ssh_cmd = test_stub.ssh_cmd_line(vm_ip, vm_username, vm_password, vm_port)
test_stub.yum_install_stress_tool(ssh_cmd)
test_stub.run_mem_load(ssh_cmd | ,360)
status_problem, status_ok = test_stub.query_trigger_in_loop(trigger,80)
test_util.action_logger('Trigger old status: %s triggered. Trigger new status: %s recovered' % (status_problem, status_ok ))
if status_problem != 1 or status_ok != 1:
|
test_util.test_fail('%s Monitor Test failed, expected Problem or OK status not triggered' % test_item)
mail_list = test_stub.receive_email()
keywords = "fired"
mail_flag = test_stub.check_email(mail_list, keywords, trigger, vm_uuid)
if mail_flag == 0:
test_util.test_fail('Failed to Get Target: %s for: %s Trigger Mail' % (vm_uuid, test_item))
mon_ops.delete_monitor_trigger_action(trigger_action)
mon_ops.delete_monitor_trigger(trigger)
mon_ops.delete_email_media(media)
vm.destroy()
def error_cleanup():
global trigger
global media
global trigger_action
global vm
mon_ops.delete_monitor_trigger_action(trigger_action)
mon_ops.delete_monitor_trigger(trigger)
mon_ops.delete_email_media(media)
vm.destroy()
|
Kshitij-Dhakal/programmingBasics | Project Euler/14.py | Python | gpl-3.0 | 390 | 0.010256 | import operator
def func(n):
if n not in dict:
if (n % 2 == 0):
l = 1 + func(n / 2)
else:
l = 1 + func(3 * n + 1)
dict[n] = l
return dict[n]
else:
return dict[n]
if __name__ == '__main__':
dict = {1: 1}
for i in range(1, | 1000000):
| func(i)
print(max(dict.items(), key=operator.itemgetter(1))[0])
|
berquist/PyQuante | Tests/h2_sto3g.py | Python | bsd-3-clause | 404 | 0.039604 | #!/usr/bin/env python
"H2 using Gaussians"
from PyQuante.Ints import | getbasis,getints
from PyQuante.hartree_fock import rhf
from PyQuante.Molecule import M | olecule
energy = -1.082098
name = "H2"
def main():
h2 = Molecule('h2',atomlist=[(1,(0,0,0.6921756113231793)),(1,(0,0,-0.6921756113231793))])
en,orbe,orbs = rhf(h2,basis="sto3g")
return en
if __name__ == '__main__':
print main()
|
willthames/ansible-lint | lib/ansiblelint/rules/MetaChangeFromDefaultRule.py | Python | mit | 1,254 | 0 | # Copyright (c) 2018, Ansible Project
from ansiblelint.rules import AnsibleLintRule
class MetaChangeFromDefaultRule | (AnsibleLintRule):
id = '703'
shortdesc = 'meta/main.yml default values should be changed'
field_defaults = [
('author', 'your name'),
('description', 'your description'),
| ('company', 'your company (optional)'),
('license', 'license (GPLv2, CC-BY, etc)'),
('license', 'license (GPL-2.0-or-later, MIT, etc)'),
]
description = (
'meta/main.yml default values should be changed for: ``{}``'.format(
', '.join(f[0] for f in field_defaults)
)
)
severity = 'HIGH'
tags = ['metadata']
version_added = 'v4.0.0'
def matchplay(self, file, data):
if file['type'] != 'meta':
return False
galaxy_info = data.get('galaxy_info', None)
if not galaxy_info:
return False
results = []
for field, default in self.field_defaults:
value = galaxy_info.get(field, None)
if value and value == default:
results.append(({'meta/main.yml': data},
'Should change default metadata: %s' % field))
return results
|
fkie-cad/iva | tests/test_cpe_matching/test_cpe_sorter.py | Python | lgpl-3.0 | 6,349 | 0.007245 | import unittest
from matching.cpe_sorter import *
unsorted_cpes = [{'wfn': {'version': '4.0', 'target_sw': 'android_marshmallow'},
'uri_binding': 'cpe:/a:string_value_with\:double_points:internet_explorer:4.0:beta:~~~android_marshmallow~~'},
{'wfn': {'version': '1.0.1.2', 'target_sw': 'android_marshmallow'},
'uri_binding': 'cpe:/a:string_value_with\:double_points:internet_explorer:1.0.1.2:beta'},
{'wfn': {'version': '4.1.2', 'target_sw': 'ANY'},
'uri_binding': 'cpe:/a:string_value_with\:double_points:internet_explorer:4.1.2:beta'},
{'wfn': {'version': '4.6.3', 'target_sw': 'windows'},
'uri_binding': 'cpe:/a:string_value_with\:double_points:internet_explorer:4.6.3:beta:~~~windows~~'},
{'wfn': {'version': '4.7.1', 'target_sw': 'android'},
'uri_binding': 'cpe:/a:string_value_with\:double_points:internet_explorer:4.7.1:beta:~~~android~~'},
{'wfn': {'version': '4.7.2', 'target_sw': 'ANY'},
'uri_binding':'cpe:/a:string_value_with\:double_points:internet_explorer:4.7.2:beta'},
{'wfn': {'version': '4.3.2', 'target_sw': 'linux'},
'uri_binding': 'cpe:/a:string_value_with\:double_points:internet_explorer:4.3.2:beta:~~~linux~~'},
{'wfn': {'version': '2.3.1', 'target_sw': 'linux'},
'uri_binding': 'cpe:/a:string_value_with\:double_points:internet_explorer:2.3.1:beta'},
{'wfn': {'version': '4.7.3', 'target_sw': 'mac_os_x'},
'uri_binding': 'cpe:/a:string_value_with\:double_points:internet_explorer:4.7.3:beta:~~~mac_os_x~~'}
]
unsorted_cpes_year = [{'wfn': {'version': '2000', 'target_sw': 'android_marshmallow'},
'uri_binding': 'cpe:/a:string_value_with\:double_points:internet_explorer:2000:beta:~~~android_marshmallow~~'},
{'wfn': {'version': '2007', 'target_sw': 'android_marshmallow'},
'uri_binding': 'cpe:/a:string_value_with\:double_points:internet_explorer:2007:beta'},
{'wfn': {'version': '4.1.2', 'target_sw': 'ANY'},
'uri_binding': 'cpe:/a:string_value_with\:double_points:internet_explorer:4.1.2:beta'},
{'wfn': {'version': '2010', 'target_sw': 'windows'},
'uri_binding': 'cpe:/a:string_value_with\:double_points:internet_explorer:2010:beta:~~~windows~~'},
{'wfn': {'version': '4.7.1', 'target_sw': 'android'},
'uri_binding': 'cpe:/a:string_value_with\:double_points:internet_explorer:4.7.1:beta:~~~android~~'},
{'wfn': {'version': '2001', 'target_sw': 'ANY'},
'uri_binding':'cpe:/a:string_value_with\:double_points:internet_explorer:2001:beta'},
{'wfn': {'version': '4.3.2', 'target_sw': 'linux'},
'uri_binding': 'cpe:/a:string_ | value_with\:double_points:internet_explorer:4.3.2:beta | :~~~linux~~'},
{'wfn': {'version': '2010', 'target_sw': 'linux'},
'uri_binding': 'cpe:/a:string_value_with\:double_points:internet_explorer:2010:beta'},
{'wfn': {'version': '4.7.3', 'target_sw': 'mac_os_x'},
'uri_binding': 'cpe:/a:string_value_with\:double_points:internet_explorer:4.7.3:beta:~~~mac_os_x~~'},
{'wfn': {'version': '2010', 'target_sw': 'mac_os_x'},
'uri_binding': 'cpe:/a:string_value_with\:double_points:internet_explorer:2010:beta:~~~mac_os_x~~'}]
version = '4.7.2'
version_without_points = '4_7-2'
version_year = '2010'
os_windows = 'windows_7'
os_linux = 'linux_ubuntu'
os_android = 'android'
os_mac = 'mac_os_x_10.11'
class TestCPESorter(unittest.TestCase):
def test_sort_cpes_by_software_version(self):
sorted_cpes = sort_cpes_by_version(unsorted_cpes, version)
self.assertEqual(len(unsorted_cpes), len(sorted_cpes))
self.assertEqual(unsorted_cpes[5], sorted_cpes[0]) # 4.7.2
self.assertEqual(unsorted_cpes[4], sorted_cpes[1]) # 4.7.1
self.assertEqual(unsorted_cpes[8], sorted_cpes[2]) # 4.7.3
self.assertEqual(unsorted_cpes[0], sorted_cpes[3]) # 4.0
self.assertEqual(unsorted_cpes[2], sorted_cpes[4]) # 4.1.2
self.assertEqual(unsorted_cpes[3], sorted_cpes[5]) # 4.6.3
self.assertEqual(unsorted_cpes[6], sorted_cpes[6]) # 4.3.2
def test_cpes_and_sorted_cpes_are_equal_when_software_version_not_splitted_by_points(self):
sorted_cpes = sort_cpes_by_version(unsorted_cpes, version_without_points)
self.assertListEqual(unsorted_cpes, sorted_cpes)
def test_sort_cpes_by_version_with_year(self):
sorted_cpes = sort_cpes_by_version(unsorted_cpes_year, version_year)
self.assertEqual(len(unsorted_cpes_year), len(sorted_cpes))
self.assertEqual(unsorted_cpes_year[3], sorted_cpes[0]) # 2010
self.assertEqual(unsorted_cpes_year[7], sorted_cpes[1]) # 2010
self.assertEqual(unsorted_cpes_year[9], sorted_cpes[2]) # 2010
self.assertEqual(unsorted_cpes_year[0], sorted_cpes[3]) # 2000
self.assertEqual(unsorted_cpes_year[1], sorted_cpes[4]) # 2007
self.assertEqual(unsorted_cpes_year[5], sorted_cpes[5]) # 2001
def test_sort_cpes_by_operating_system_windows(self):
sorted_cpes = sort_cpes_by_operating_system(unsorted_cpes, os_windows)
self.assertEqual(len(unsorted_cpes), len(sorted_cpes))
self.assertEqual(unsorted_cpes[3], sorted_cpes[0])
def test_sort_cpes_by_operating_system_linux(self):
sorted_cpes = sort_cpes_by_operating_system(unsorted_cpes, os_linux)
self.assertEqual(len(unsorted_cpes), len(sorted_cpes))
self.assertEqual(unsorted_cpes[6], sorted_cpes[0])
def test_sort_cpes_by_operating_system_android(self):
sorted_cpes = sort_cpes_by_operating_system(unsorted_cpes, os_android)
self.assertEqual(len(unsorted_cpes), len(sorted_cpes))
self.assertEqual(unsorted_cpes[4], sorted_cpes[0])
self.assertEqual(unsorted_cpes[0], sorted_cpes[1])
if __name__ == '__main__':
unittest.main()
|
Tisseo/navitia | source/jormungandr/jormungandr/scenarios/tests/journey_compare_tests.py | Python | agpl-3.0 | 42,334 | 0.001842 | # Copyright (c) 2001-2014, Canal TP and/or its affiliates. All rights reserved.
#
# This file is part of Navitia,
# the software to build cool stuff with public transport.
#
# Hope you'll enjoy and contribute to this project,
# powered by Canal TP (www.canaltp.fr).
# Help us simplify mobility and open public transport:
# a non ending quest to the responsive locomotion way of traveling!
#
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# IRC #navitia on freenode
# https://groups.google.com/d/forum/navitia
# www.navitia.io
from __future__ import absolute_import, print_function, unicode_literals, division
from jormungandr.scenarios import journey_filter
from jormungandr.scenarios.utils import DepartureJourneySorter, ArrivalJourneySorter
from jormungandr.scenarios.journey_filter import to_be_deleted, get_qualified_journeys
import navitiacommon.response_pb2 as response_pb2
from navitiacommon import default_values
from jormungandr.scenarios.default import Scenario, are_equals
from jormungandr.utils import str_to_time_stamp
import random
import itertools
def empty_journeys_test():
scenario = Scenario()
response = response_pb2.Response()
scenario.sort_journeys(response, 'arrival_time')
assert not response.journeys
def different_arrival_times_test():
scenario = Scenario()
response = response_pb2.Response()
journey1 = response.journeys.add()
journey1.arrival_date_time = str_to_time_stamp("20140422T0800")
journey1.duration = 5 * 60
journey1.nb_transfers = 0
journey1.sections.add()
journey1.sections[0].type = response_pb2.PUBLIC_TRANSPORT
journey1.sections[0].duration = 5 * 60
journey2 = response.journeys.add()
journey2.arrival_date_time = str_to_time_stamp("20140422T0758")
journey2.duration = 2 * 60
journey2.nb_transfers = 0
journey2.sections.add()
journey2.sections[0].type = response_pb2.PUBLIC_TRANSPORT
journey2.sections[0].duration = 2 * 60
scenario.sort_journeys(response, 'arrival_time')
assert response.journeys[0].arrival_date_time == str_to_time_stamp("20140422T0758")
assert response.journeys[1].arrival_date_time == str_to_time_stamp("20140422T0800")
def different_departure_times_test():
scenario = Scenario()
response = response_pb2.Response()
journey1 = response.journeys.add()
journey1.departure_date_time = str_to_time_stamp("20140422T0800")
journey1.duration = 5 * 60
journey1.nb_transfers = 0
journey1.sections.add()
journey1.sections[0].type = response_pb2.PUBLIC_TRANSPORT
journey1.sections[0].duration = 5 * 60
journey2 = response.journeys.add()
journey2.departure_date_time = str_to_time_stamp("20140422T0758")
journey2.duration = 2 * 60
journey2.nb_transfers = 0
journey2.sections.add()
journey2.sections[0].type = response_pb2.PUBLIC_TRANSPORT
journey2.sections[0].duration = 2 * 60
scenario.sort_journeys(response, 'departure_time')
assert response.journeys[0].departure_date_time == str_to_time_stamp("20140422T0758")
assert response.journeys[1].departure_date_time == str_to_time_stamp("20140422T0800")
def different_duration_test():
scenario = Scenario()
response = response_pb2.Response()
journey1 = response.journeys.add()
journey1.arrival_date_time = str_to_time_stamp("20140422T0800")
journey1.duration = 5 * 60
journey1.nb_transfers = 0
journey1.sections.add()
journey1.sections[0].type = response_pb2.PUBLIC_TRANSPORT
journey1.sections[0].duration = 5 * 60
journey2 = response.journeys.add()
journey2.arrival_date_time = str_to_time_stamp("20140422T0800")
journey2.duration = 3 * 60
journey2.nb_transfers = 0
journey2.sections.add()
journey2.sections[0].type = response_pb2.PUBLIC_TRANSPORT
journey2.sections[0].duration = 3 * 60
scenario.sort_journeys(response, 'arrival_time')
assert response.journeys[0].arrival_date_time == str_to_time_stamp("20140422T0800")
assert response.journeys[1].arrival_date_time == str_to_time_stamp("20140422T0800")
assert response.journeys[0].duration == 3*60
assert response.journeys[1].duration == 5*60
def different_nb_transfers_test():
scenario = Scenario()
response = response_pb2.Response()
journey1 = response.journeys.add()
journey1.arrival_date_time = str_to_time_stamp("20140 | 422T0800")
journey1.duration | = 25 * 60
journey1.nb_transfers = 1
journey1.sections.add()
journey1.sections.add()
journey1.sections.add()
journey1.sections.add()
journey1.sections[0].type = response_pb2.PUBLIC_TRANSPORT
journey1.sections[0].duration = 5 * 60
journey1.sections[1].type = response_pb2.TRANSFER
journey1.sections[1].duration = 3 * 60
journey1.sections[2].type = response_pb2.WAITING
journey1.sections[2].duration = 2 * 60
journey1.sections[3].type = response_pb2.PUBLIC_TRANSPORT
journey1.sections[3].duration = 15 * 60
journey2 = response.journeys.add()
journey2.arrival_date_time = str_to_time_stamp("20140422T0800")
journey2.duration = 25 * 60
journey2.nb_transfers = 0
journey2.sections.add()
journey2.sections[0].type = response_pb2.PUBLIC_TRANSPORT
journey2.sections[0].duration = 25 * 60
scenario.sort_journeys(response, 'arrival_time')
assert response.journeys[0].arrival_date_time == str_to_time_stamp("20140422T0800")
assert response.journeys[1].arrival_date_time == str_to_time_stamp("20140422T0800")
assert response.journeys[0].duration == 25*60
assert response.journeys[1].duration == 25*60
assert response.journeys[0].nb_transfers == 0
assert response.journeys[1].nb_transfers == 1
def different_duration_non_pt_test():
scenario = Scenario()
response = response_pb2.Response()
journey1 = response.journeys.add()
journey1.arrival_date_time = str_to_time_stamp("20140422T0800")
journey1.duration = 25 * 60
journey1.nb_transfers = 1
journey1.sections.add()
journey1.sections.add()
journey1.sections.add()
journey1.sections.add()
journey1.sections.add()
journey1.sections[0].type = response_pb2.PUBLIC_TRANSPORT
journey1.sections[0].duration = 5 * 60
journey1.sections[1].type = response_pb2.TRANSFER
journey1.sections[1].duration = 3 * 60
journey1.sections[2].type = response_pb2.WAITING
journey1.sections[2].duration = 2 * 60
journey1.sections[3].type = response_pb2.PUBLIC_TRANSPORT
journey1.sections[3].duration = 15 * 60
journey1.sections[4].type = response_pb2.STREET_NETWORK
journey1.sections[4].duration = 10 * 60
journey2 = response.journeys.add()
journey2.arrival_date_time = str_to_time_stamp("20140422T0800")
journey2.duration = 25 * 60
journey2.nb_transfers = 1
journey2.sections.add()
journey2.sections.add()
journey2.sections.add()
journey2.sections.add()
journey2.sections[0].type = response_pb2.PUBLIC_TRANSPORT
journey2.sections[0].duration = 5 * 60
journey2.sections[1].type = response_pb2.TRANSFER
journey2.sections[1].duration = 3 * 60
journey2.sections[2].type = response_pb2.WAITING
journey2.sections[2].duration = 2 * 60
journey2.sections[3].type = response_pb2.PUBLIC_TRANSPORT
journey2.sections[3].duration = 15 * 60
scenario.sort_journeys(response, 'arrival_time')
assert response.journeys[0].arrival_date_time == str_to_time_stamp("20140422T0800")
assert response.journeys[1].arrival_date_time == str_to_time_stamp("20140422T0 |
googleapis/python-aiplatform | samples/generated_samples/aiplatform_v1_generated_job_service_create_hyperparameter_tuning_job_sync.py | Python | apache-2.0 | 2,381 | 0.0021 | # -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for CreateHyperparameterTuningJob
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-aiplatform
# [START aiplatform_v1_generated_JobService_CreateHyperparameterTuningJob_sync]
from google.cloud import aiplatform_v1
def sample_create_hyperparameter_tuning_job():
# Create a client
client = aiplatform_v1.JobServiceClient()
# Initialize request argument(s)
hyperparameter_tuning_job = aiplatform_v1.HyperparameterTuningJob()
hyperparameter_tuning_job.display_ | name = "display_name_value"
hyperparameter_tuning_job.study_spec.metrics.metric_id = "metric_id_value"
hyperparameter_tuning_job.study_spec.metrics.goal = "MINIMIZE"
hyperparameter_tuning_job.study_spec.parameters.double_value_spec.min_value = 0.96
hyperparameter_tuning_job.stu | dy_spec.parameters.double_value_spec.max_value = 0.962
hyperparameter_tuning_job.study_spec.parameters.parameter_id = "parameter_id_value"
hyperparameter_tuning_job.max_trial_count = 1609
hyperparameter_tuning_job.parallel_trial_count = 2128
hyperparameter_tuning_job.trial_job_spec.worker_pool_specs.container_spec.image_uri = "image_uri_value"
request = aiplatform_v1.CreateHyperparameterTuningJobRequest(
parent="parent_value",
hyperparameter_tuning_job=hyperparameter_tuning_job,
)
# Make the request
response = client.create_hyperparameter_tuning_job(request=request)
# Handle the response
print(response)
# [END aiplatform_v1_generated_JobService_CreateHyperparameterTuningJob_sync]
|
efornal/pulmo | app/migrations/0031_add_field_url_to_servers.py | Python | gpl-3.0 | 1,551 | 0.003868 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0030_add_field_service_to_connections'),
]
operations = [
migrations.AddField(
model_name='productionserver',
name='url',
field=models.CharField(max_length=200, null=True, verbose_name='url', blank=True),
),
migrations.AddField(
model_name='testserver',
name='url',
field=models.CharField(max_length=200, null=True, verbose_name='url', blank=True),
),
migrations.AlterField(
model_name='applicationconnectionsource',
name='service',
field=models.CharField(max_length=200, null=True, verbose_name='Servicio', blank=True),
),
migrations.AlterField(
model_name='applicationconnectiontarget',
name='service',
field=models.CharField(max_length=200, null=True, verbose_name='Servicio', blank=True),
),
migrations.AlterField(
| model_name='productionconnectionsource',
name='service',
field=models.CharField(max_length=200, null=True, verbose_name='Servicio', blank=True),
),
| migrations.AlterField(
model_name='productionconnectiontarget',
name='service',
field=models.CharField(max_length=200, null=True, verbose_name='Servicio', blank=True),
),
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.