repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
Adarnof/adarnauth-eveonline | eveonline/tasks.py | Python | gpl-3.0 | 2,156 | 0.000464 | from celery.task import periodic_task
from celery import shared_task
from eveonline.models import Character, Corporation, Alliance
from eveonline.providers import eve_provider_factory
from datetime import timedelta
@shared_task
def update_character(obj_id, provider=None):
"""
Updates a given character model asynchronously
:param obj_id: Alliance ID to update
:param provider: :class:`eveonline.provders.EveProvider`
"""
char = Character.objects.get(id=obj_id)
char.update(provider=provider)
@shared_task
def update_corporation(obj_id, provider=None):
"""
Updates a given corporation model asynchronously
:param obj_id: Alliance ID to update
:param provider: :class:`eveonline.provders.EveProvider`
"""
corp = Corporation.objects.get(id=obj_id)
corp.update(provider=provider)
@shared_task
def update_alliance(obj_id, provider=None):
"""
Updates a given alliance model asynchronously
:param obj_id: Alliance ID to update
:param provider: :class:`eveonline.provders.EveProvider`
"""
alliance = Alliance.objects.get(id=obj_id)
alliance.update(provider=provider)
@periodic_task(run_every=timede | lta(hours=3))
def update_all_characters():
"""
Triggers an update of all Character models
"""
char_ids = [c.id for c in Character.objects.all()]
provider = eve_provider_factory()
for obj_id in char_ids:
update_character.delay(obj_id, provider=provider)
@periodic_task(run_every=timedelta(hours=8))
def update_all_corps():
"""
Triggers an update of all Corporation models
"""
corp_ids = [c.id for c in Corporation.objects.all()]
| provider = eve_provider_factory()
for obj_id in corp_ids:
update_corporation.delay(obj_id, provider=provider)
@shared_task # data only changes very rarely on CCP intervention, don't queue periodically
def update_all_alliances():
"""
Triggers an update of all Alliance models
"""
alliance_ids = [a.id for a in Alliance.objects.all()]
provider = eve_provider_factory()
for obj_id in alliance_ids:
update_alliance.delay(obj_id, provider=provider)
|
armstrong/armstrong.core.arm_layout | tests/backends/_common.py | Python | apache-2.0 | 4,747 | 0.000421 | import abc
import random
import fudge
from contextlib import contextmanager
from ..support.models import *
class BackendTestCaseMixin(object):
__metaclass__ = abc.ABCMeta
@abc.abstractproperty # pragma: no cover
def backend_class(self):
"""backend_class = TestThisBackend"""
def __init__(self, *args, **kwargs):
super(BackendTestCaseMixin, self).__init__(*args, **kwargs)
self.backend = self.backend_class()
self.name = "full_page"
basemodel = Foobar()
self.root_model_path = 'layout/%s/%s/' % (
basemodel._meta.app_label,
basemodel._meta.object_name.lower())
@staticmethod
@contextmanager
def model_meta_randomizer(model, attr):
original = getattr(model._meta, attr)
value = "random_%d" % random.randint(100, 200)
setattr(model._meta, attr, value)
yield value
setattr(model._meta, attr, original)
def test_requires_a_model_instance(self):
with self.assertRaises(TypeError):
self.backend.get_layout_template_name(Foobar, self.name)
def test_returns_proper_path(self):
expected = ['%s%s.html' % (self.root_model_path, self.name)]
result = self.backend.get_layout_template_name(Foobar(), self.name)
self.assertEqual(expected, result)
def test_renderer_can_specify_base_path(self):
model = Foobar()
with fudge.patched_context(self.backend, "base_layout_directory", "different"):
result = self.backend.get_layout_template_name(model, self.name)
expected = ['different/%s/%s/%s.html' % (
model._meta.app_label, model._meta.object_name.lower(), self.name)]
self.assertEqual(expected, result)
def test_missing_file_is_okay(self):
model = Foobar()
file_doesnt_exist = "fake_template"
expected = ['layout/%s/%s/%s.html' % (
model._meta.app_label,
model._meta.object_name.lower(),
file_doesnt_exist)]
result = self.backend.get_layout_template_name(model, file_doesnt_exist)
self.assertEqual(expected, result)
def test_uses_app_label_in_template_name(self):
model = Foobar()
with self.model_meta_randomizer(model, 'app_label') as app_label:
expected = ['layout/%s/%s/%s.html' % (
app_label, model._meta.object_name.lower(), self.name)]
result = self.backend.get_layout_template_name(model, self.name)
self.assertEqual(expected, result)
def test_uses_mod | el_name_in_template_name(self):
model = Foobar()
with self.model_meta_randomizer(model, 'object_name') as object_name:
expected = ['layout/%s/%s/%s.html' % (
model._meta.app_label, object_name, self.name)]
result = self.backend.get_layout_template_name(model, self.name)
self.assertEqual(expected, result)
def test_uses_name_in_template_name(self):
| name = "random_%d" % random.randint(100, 200)
expected = ['%s%s.html' % (self.root_model_path, name)]
result = self.backend.get_layout_template_name(Foobar(), name)
self.assertEqual(expected, result)
def test_proper_model_inheritance_order(self):
model = SubFoobar()
model_path = 'layout/%s/%s/' % \
(model._meta.app_label, model._meta.object_name.lower())
expected = [
'%s%s.html' % (model_path, self.name),
'%s%s.html' % (self.root_model_path, self.name)]
result = self.backend.get_layout_template_name(model, self.name)
self.assertEqual(expected, result)
def test_abstract_models_are_used(self):
concrete = ConcreteFoo()
abstract = AbstractFoo()
concrete_path = 'layout/%s/%s/' % \
(concrete._meta.app_label, concrete._meta.object_name.lower())
abstract_path = 'layout/%s/%s/' % \
(abstract._meta.app_label, abstract._meta.object_name.lower())
expected = [
'%s%s.html' % (concrete_path, self.name),
'%s%s.html' % (abstract_path, self.name),
'%s%s.html' % (self.root_model_path, self.name)]
result = self.backend.get_layout_template_name(concrete, self.name)
self.assertEqual(expected, result)
def test_proxy_models_are_used(self):
model = ProxyFoo()
model_path = 'layout/%s/%s/' % \
(model._meta.app_label, model._meta.object_name.lower())
expected = [
'%s%s.html' % (model_path, self.name),
'%s%s.html' % (self.root_model_path, self.name)]
result = self.backend.get_layout_template_name(model, self.name)
self.assertEqual(expected, result)
|
beomyeol/models | inception/inception/slim/ops.py | Python | apache-2.0 | 18,781 | 0.003408 | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains convenience wrappers for typical Neural Network TensorFlow layers.
Additionally it maintains a collection with update_ops that need to be
updated after the ops have been computed, for exmaple to update moving means
and moving variances of batch_norm.
Ops that have different behavior during training or eval have an is_training
parameter. Additionally Ops that contain variables.variable have a trainable
parameter, which control if the ops variables are trainable or not.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.python.training import moving_averages
from inception.slim import losses
from inception.slim import scopes
from inception.slim import variables
# Used to keep the update ops done by batch_norm.
UPDATE_OPS_COLLECTION = '_update_ops_'
@scopes.add_arg_scope
def batch_norm(inputs,
decay=0.999,
center=True,
scale=False,
epsilon=0.001,
moving_vars='moving_vars',
activation=None,
is_training=True,
trainable=True,
restore=True,
scope=None,
reuse=None):
"""Adds a Batch Normalization layer.
Args:
inputs: a tensor of size [batch_size, height, width, channels]
or [batch_size, channels].
decay: decay for the moving average.
center: If True, subtract beta. If False, beta is not created and ignored.
scale: If True, multiply by gamma. If False, gamma is
not used. When the next layer is linear (also e.g. ReLU), this can be
disabled since the scaling can be done by the next layer.
epsilon: small float added to variance to avoid dividing by zero.
moving_vars: collection to store the moving_mean and moving_variance.
activation: activation function.
is_training: whether or not the | model is in training mode.
trainable: whether or not the variables should be trainable or not.
restore | : whether or not the variables should be marked for restore.
scope: Optional scope for variable_scope.
reuse: whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
Returns:
a tensor representing the output of the operation.
"""
inputs_shape = inputs.get_shape()
with tf.variable_scope(scope, 'BatchNorm', [inputs], reuse=reuse):
axis = list(range(len(inputs_shape) - 1))
params_shape = inputs_shape[-1:]
# Allocate parameters for the beta and gamma of the normalization.
beta, gamma = None, None
if center:
beta = variables.variable('beta',
params_shape,
initializer=tf.zeros_initializer(),
trainable=trainable,
restore=restore)
if scale:
gamma = variables.variable('gamma',
params_shape,
initializer=tf.ones_initializer(),
trainable=trainable,
restore=restore)
# Create moving_mean and moving_variance add them to
# GraphKeys.MOVING_AVERAGE_VARIABLES collections.
moving_collections = [moving_vars, tf.GraphKeys.MOVING_AVERAGE_VARIABLES]
moving_mean = variables.variable('moving_mean',
params_shape,
initializer=tf.zeros_initializer(),
trainable=False,
restore=restore,
collections=moving_collections)
moving_variance = variables.variable('moving_variance',
params_shape,
initializer=tf.ones_initializer(),
trainable=False,
restore=restore,
collections=moving_collections)
if is_training:
# Calculate the moments based on the individual batch.
mean, variance = tf.nn.moments(inputs, axis)
update_moving_mean = moving_averages.assign_moving_average(
moving_mean, mean, decay)
tf.add_to_collection(UPDATE_OPS_COLLECTION, update_moving_mean)
update_moving_variance = moving_averages.assign_moving_average(
moving_variance, variance, decay)
tf.add_to_collection(UPDATE_OPS_COLLECTION, update_moving_variance)
else:
# Just use the moving_mean and moving_variance.
mean = moving_mean
variance = moving_variance
# Normalize the activations.
outputs = tf.nn.batch_normalization(
inputs, mean, variance, beta, gamma, epsilon)
outputs.set_shape(inputs.get_shape())
if activation:
outputs = activation(outputs)
return outputs
def _two_element_tuple(int_or_tuple):
"""Converts `int_or_tuple` to height, width.
Several of the functions that follow accept arguments as either
a tuple of 2 integers or a single integer. A single integer
indicates that the 2 values of the tuple are the same.
This functions normalizes the input value by always returning a tuple.
Args:
int_or_tuple: A list of 2 ints, a single int or a tf.TensorShape.
Returns:
A tuple with 2 values.
Raises:
ValueError: If `int_or_tuple` it not well formed.
"""
if isinstance(int_or_tuple, (list, tuple)):
if len(int_or_tuple) != 2:
raise ValueError('Must be a list with 2 elements: %s' % int_or_tuple)
return int(int_or_tuple[0]), int(int_or_tuple[1])
if isinstance(int_or_tuple, int):
return int(int_or_tuple), int(int_or_tuple)
if isinstance(int_or_tuple, tf.TensorShape):
if len(int_or_tuple) == 2:
return int_or_tuple[0], int_or_tuple[1]
raise ValueError('Must be an int, a list with 2 elements or a TensorShape of '
'length 2')
@scopes.add_arg_scope
def conv2d(inputs,
num_filters_out,
kernel_size,
stride=1,
padding='SAME',
activation=tf.nn.relu,
stddev=0.01,
bias=0.0,
weight_decay=0,
batch_norm_params=None,
is_training=True,
trainable=True,
restore=True,
scope=None,
reuse=None):
"""Adds a 2D convolution followed by an optional batch_norm layer.
conv2d creates a variable called 'weights', representing the convolutional
kernel, that is convolved with the input. If `batch_norm_params` is None, a
second variable called 'biases' is added to the result of the convolution
operation.
Args:
inputs: a tensor of size [batch_size, height, width, channels].
num_filters_out: the number of output filters.
kernel_size: a list of length 2: [kernel_height, kernel_width] of
of the filters. Can be an int if both values are the same.
stride: a list of length 2: [stride_height, stride_width].
Can be an int if both strides are the same. Note that presently
both strides must have the same value.
padding: one of 'VALID' or 'SAME'.
activation: activation function.
stddev: standard deviation of the truncated guassian weight distribution.
bias: the initial value of the biases.
weight_decay: the weight decay.
batch_norm_params: parameters for t |
freneticmonkey/epsilonc | resources/scripts/core/basesingleton.py | Python | mit | 263 | 0.015209 | '''
Created on Nov 19, 2011
@author: scottporter
'''
|
class BaseSingleton(object):
_instance = None
@classmethod
def get_instance(cls):
if cls._instance is No | ne:
cls._instance = cls()
return cls._instance |
bwmichael/jccc-cis142-python | old/roll-the-dice.py | Python | apache-2.0 | 871 | 0 | ##
# @author Brandon Michael
# Roll the dice based on the user's input. Track double rolls and display
# the double totals.
# import the random library
import random
# Set the start and end values the same as a dice
start = 1
end = 6
# Set the running total for doubles found
totalDoubles = 0
# Get th | e number of t | imes we need to roll the dice
rolls = int(input("Enter the number of dice rolls: "))
# Loop through the number of rolls
for num in range(0, rolls, 1):
# Capture the rolls to check for doubles
roll_1 = random.randint(start, end)
roll_2 = random.randint(start, end)
# Check if rolls equal each other, and track the double count
if roll_1 == roll_2:
totalDoubles = totalDoubles + 1
print(roll_1, roll_2, "Double !")
else:
print(roll_1, roll_2)
# Print the results
print(totalDoubles, "double(s) rolled.")
|
maxamillion/ansible | test/support/integration/plugins/modules/mongodb_user.py | Python | gpl-3.0 | 16,253 | 0.002953 | #!/usr/bin/python
# (c) 2012, Elliott Foster <elliott@fourkitchens.com>
# Sponsored by Four Kitchens http://fourkitchens.com.
# (c) 2014, Epic Games, Inc.
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: mongodb_user
short_description: Adds or removes a user from a MongoDB database
description:
- Adds or removes a user from a MongoDB database.
version_added: "1.1"
options:
login_user:
description:
- The MongoDB username used to authenticate with.
type: str
login_password:
description:
- The login user's password used to authenticate with.
type: str
login_host:
description:
- The host running the database.
default: localhost
type: str
login_port:
description:
- The MongoDB port to connect to.
default: '27017'
type: str
login_database:
version_added: "2.0"
description:
- The database where login credentials are stored.
type: str
replica_set:
version_added: "1.6"
description:
- Replica set to connect to (automatically connects to primary for writes).
type: str
database:
description:
- The name of the database to add/remove the user from.
required: true
type: str
aliases: [db]
name:
description:
- The name of the user to add or remove.
required: true
aliases: [user]
type: str
password:
description:
- The password to use for the user.
type: str
aliases: [pass]
ssl:
version_added: "1.8"
description:
- Whether to use an SSL connection when connecting to the database.
type: bool
ssl_cert_reqs:
version_added: "2.2"
description:
- Specifies whether a certificate is required from the other side of the connection,
and whether it will be validated if provided.
default: CERT_REQUIRED
choices: [CERT_NONE, CERT_OPTIONAL, CERT_REQUIRED]
type: str
roles:
version_added: "1.3"
type: list
elements: raw
description:
- >
The database user roles valid values could either be one or more of the following strings:
'read', 'readWrite', 'dbAdmin', 'userAdmin', 'clusterAdmin', 'readAnyDatabase', 'readWriteAnyDatabase', 'userAdminAnyDatabase',
'dbAdminAnyDatabase'
- "Or the following dictionary '{ db: DATABASE_NAME, role: ROLE_NAME }'."
- "This param requires pymongo 2.5+. If it is a string, mongodb 2.4+ is also required. If it is a dictionary, mongo 2.6+ is required."
state:
description:
- The database user state.
default: present
choices: [absent, present]
type: str
update_password:
default: always
choices: [always, on_create]
version_added: "2.1"
description:
- C(always) will update passwords if they differ.
- C(on_create) will only set the password for newly created users.
type: str
notes:
- Requires the pymongo Python package on the remote host, version 2.4.2+. This
can be installed using pip or the OS package manager. @see http://api.mongodb.org/python/current/installation.html
requirements: [ "pymongo" ]
author:
- "Elliott Foster (@elliotttf)"
- "Julien Thebault (@Lujeni)"
'''
EXAMPLES = '''
- name: Create 'burgers' database user with name 'bob' and password '12345'.
mongodb_user:
database: burgers
name: bob
password: 12345
state: present
- name: Create a database user via SSL (MongoDB must be compiled with the SSL option and configured properly)
mongodb_user:
databa | se: burgers
name: bob
password: 12345
state: present
ssl: True
- name: Delete 'burgers' database user with name 'bob'.
mongodb_user:
database: burgers
name: bob
state: absent
- name: Define more users with various specific roles (if not defined, no roles is assign | ed, and the user will be added via pre mongo 2.2 style)
mongodb_user:
database: burgers
name: ben
password: 12345
roles: read
state: present
- name: Define roles
mongodb_user:
database: burgers
name: jim
password: 12345
roles: readWrite,dbAdmin,userAdmin
state: present
- name: Define roles
mongodb_user:
database: burgers
name: joe
password: 12345
roles: readWriteAnyDatabase
state: present
- name: Add a user to database in a replica set, the primary server is automatically discovered and written to
mongodb_user:
database: burgers
name: bob
replica_set: belcher
password: 12345
roles: readWriteAnyDatabase
state: present
# add a user 'oplog_reader' with read only access to the 'local' database on the replica_set 'belcher'. This is useful for oplog access (MONGO_OPLOG_URL).
# please notice the credentials must be added to the 'admin' database because the 'local' database is not synchronized and can't receive user credentials
# To login with such user, the connection string should be MONGO_OPLOG_URL="mongodb://oplog_reader:oplog_reader_password@server1,server2/local?authSource=admin"
# This syntax requires mongodb 2.6+ and pymongo 2.5+
- name: Roles as a dictionary
mongodb_user:
login_user: root
login_password: root_password
database: admin
user: oplog_reader
password: oplog_reader_password
state: present
replica_set: belcher
roles:
- db: local
role: read
'''
RETURN = '''
user:
description: The name of the user to add or remove.
returned: success
type: str
'''
import os
import ssl as ssl_lib
import traceback
from ansible.module_utils.compat.version import LooseVersion
from operator import itemgetter
try:
from pymongo.errors import ConnectionFailure
from pymongo.errors import OperationFailure
from pymongo import version as PyMongoVersion
from pymongo import MongoClient
except ImportError:
try: # for older PyMongo 2.2
from pymongo import Connection as MongoClient
except ImportError:
pymongo_found = False
else:
pymongo_found = True
else:
pymongo_found = True
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils.six import binary_type, text_type
from ansible.module_utils.six.moves import configparser
from ansible.module_utils._text import to_native
# =========================================
# MongoDB module specific support methods.
#
def check_compatibility(module, client):
"""Check the compatibility between the driver and the database.
See: https://docs.mongodb.com/ecosystem/drivers/driver-compatibility-reference/#python-driver-compatibility
Args:
module: Ansible module.
client (cursor): Mongodb cursor on admin database.
"""
loose_srv_version = LooseVersion(client.server_info()['version'])
loose_driver_version = LooseVersion(PyMongoVersion)
if loose_srv_version >= LooseVersion('3.2') and loose_driver_version < LooseVersion('3.2'):
module.fail_json(msg=' (Note: you must use pymongo 3.2+ with MongoDB >= 3.2)')
elif loose_srv_version >= LooseVersion('3.0') and loose_driver_version <= LooseVersion('2.8'):
module.fail_json(msg=' (Note: you must use pymongo 2.8+ with MongoDB 3.0)')
elif loose_srv_version >= LooseVersion('2.6') and loose_driver_version <= LooseVersion('2.7'):
module.fail_json(msg=' (Note: you must use pymongo 2.7+ with MongoDB 2.6)')
elif LooseVersion(PyMongoVersion) <= LooseVersion('2.5'):
module.fail_json(msg=' (Note: you must be on mongodb 2.4+ and pymongo 2.5+ to use the roles param)')
def user_f |
ipedrazas/dotmarks-api | src/app.py | Python | apache-2.0 | 197 | 0.005076 | from flask import Flask
app = Flask(__name__)
@app.route("/")
def hello():
return "Hello World!"
if __na | me__ == "__main__":
app.run(host='0.0.0.0', port=5000, debug=True, threaded=True)
| |
gurkslask/hamcwebc | tests/test_models.py | Python | bsd-3-clause | 1,951 | 0 | # -*- coding: utf-8 -*-
"""Model unit tests."""
import datetime as dt
import pytest
from hamcwebc.user.models import Role, User
from .factories import UserFactory
@pytest.mark.usefixtures('db')
class TestUser:
"""User tests."""
def test_get_by_id(self):
"""Get user by ID."""
user = | User('foo', 'foo@bar.com')
user.save()
retrieved = User.get_by_id(user.id)
assert retrieved == user
def test_created_at_defaults_to_datetime(self):
"""Test creation date."""
user = User(username='foo', email='foo@bar.com')
user.save()
assert bool(user.created_at)
assert isinstance(user.created_at, dt.datetime)
def test_password_is_nullable(self):
"""Test null password."""
use | r = User(username='foo', email='foo@bar.com')
user.save()
assert user.password is None
def test_factory(self, db):
"""Test user factory."""
user = UserFactory(password='myprecious')
db.session.commit()
assert bool(user.username)
assert bool(user.email)
assert bool(user.created_at)
assert user.is_admin is False
assert user.active is True
assert user.check_password('myprecious')
def test_check_password(self):
"""Check password."""
user = User.create(username='foo', email='foo@bar.com',
password='foobarbaz123')
assert user.check_password('foobarbaz123') is True
assert user.check_password('barfoobaz') is False
def test_full_name(self):
"""User full name."""
user = UserFactory(first_name='Foo', last_name='Bar')
assert user.full_name == 'Foo Bar'
def test_roles(self):
"""Add a role to a user."""
role = Role(name='admin')
role.save()
user = UserFactory()
user.roles.append(role)
user.save()
assert role in user.roles
|
vertical-knowledge/flask-ripozo | flask_ripozo_tests/integration/dispatcher.py | Python | gpl-2.0 | 1,809 | 0.001106 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from flask import Flask, request
from flask_ripozo.dispatcher import get_request_query_body_args
import json
import unittest2
class TestDispatcherFlaskIntegration(unittest2.TestCase):
def test_get_request_body_args(self):
"""
Tests getting the request body args
from a flask request object.
"""
app = Flask('myapp')
body = dict(x=1)
with app.test_request_context('/', data=json.dumps(body), content_type='application/json'):
q, b, headers = get_request_query_body_args(request)
self.assertDictEqual(b, body)
with app.test_request_context('/', data=body): # Form encoded
q, b, headers = get_request_query_body_args(request)
self.assertDictEqual(b, dict(x=['1']))
def test_get_request_body_args_nested(self):
"""
Tests getting nested body args which seems to
be handled slightly differnetly.
"""
app = Flask('myapp')
body = dict(x=1, y=dict(x=1))
with app.test_request_context('/', data=json.dumps(body), content_type='application/json'):
q, b, headers = get_request_ | query_body_args(request)
self.assertDictEqual(b, body)
def test_headers_copyable(self):
"""
Tests that the headers returned from get_request_query_body_args
appropriately returns the headers as a dic | tionary that can be copied
"""
app = Flask('myapp')
with app.test_request_context('/'):
q, b, headers = get_request_query_body_args(request)
headers2 = headers.copy()
self.assertDictEqual(headers, headers2)
|
therewillbecode/ichnaea | ichnaea/data/internal.py | Python | apache-2.0 | 4,710 | 0 | from collections import defaultdict
from datetime import datetime
import pytz
import simplejson
from ichnaea.data.export import (
MetadataGroup,
ReportUploader,
)
class InternalTransform(object):
# *_id maps a source section id to a target section id
# *_map maps fields inside the section from source to target id
# if the names are equal, a simple string can be specified instead
# of a | two-tuple
position_id = ('position', None)
position_map = [
('latitude', 'lat'),
('longitude', 'lon'),
'accuracy',
'altitude',
('altitudeAccuracy', 'altitude_accuracy'),
'age',
'heading',
'pressure',
'speed',
'source',
]
cell_id = ('cellTowers', 'cell')
| cell_map = [
('radioType', 'radio'),
('mobileCountryCode', 'mcc'),
('mobileNetworkCode', 'mnc'),
('locationAreaCode', 'lac'),
('cellId', 'cid'),
'age',
'asu',
('primaryScramblingCode', 'psc'),
'serving',
('signalStrength', 'signal'),
('timingAdvance', 'ta'),
]
wifi_id = ('wifiAccessPoints', 'wifi')
wifi_map = [
('macAddress', 'key'),
('radioType', 'radio'),
'age',
'channel',
'frequency',
'signalToNoiseRatio',
('signalStrength', 'signal'),
]
def conditional_set(self, item, target, value):
if value is not None:
item[target] = value
def _map_dict(self, item_source, field_map):
value = {}
for spec in field_map:
if isinstance(spec, tuple):
source, target = spec
else:
source = spec
target = spec
self.conditional_set(value, target,
item_source.get(source))
return value
def _parse_dict(self, item, report, key_map, field_map):
value = {}
if key_map[0] is None: # pragma: no cover
item_source = item
else:
item_source = item.get(key_map[0])
if item_source:
value = self._map_dict(item_source, field_map)
if value:
if key_map[1] is None:
report.update(value)
else: # pragma: no cover
report[key_map[1]] = value
return value
def _parse_list(self, item, report, key_map, field_map):
values = []
for value_item in item.get(key_map[0], ()):
value = self._map_dict(value_item, field_map)
if value:
values.append(value)
if values:
report[key_map[1]] = values
return values
def _parse_cells(self, item, report, key_map, field_map):
cells = []
for cell_item in item.get(key_map[0], ()):
cell = self._map_dict(cell_item, field_map)
if cell:
cells.append(cell)
if cells:
report[key_map[1]] = cells
return cells
def __call__(self, item):
report = {}
self._parse_dict(item, report, self.position_id, self.position_map)
timestamp = item.get('timestamp')
if timestamp:
report['timestamp'] = timestamp
cells = self._parse_cells(item, report, self.cell_id, self.cell_map)
wifis = self._parse_list(item, report, self.wifi_id, self.wifi_map)
if cells or wifis:
return report
return {}
class InternalUploader(ReportUploader):
transform = InternalTransform()
@staticmethod
def _task():
# avoiding import cycle problems, sigh!
from ichnaea.data.tasks import insert_reports
return insert_reports
def _format_report(self, item):
report = self.transform(item)
timestamp = report.pop('timestamp', None)
if timestamp:
dt = datetime.utcfromtimestamp(timestamp / 1000.0)
report['time'] = dt.replace(microsecond=0, tzinfo=pytz.UTC)
return report
def send(self, url, data):
groups = defaultdict(list)
for item in simplejson.loads(data):
group = MetadataGroup(**item['metadata'])
report = self._format_report(item['report'])
if report:
groups[group].append(report)
for group, reports in groups.items():
self._task().apply_async(
kwargs={
'api_key': group.api_key,
'email': group.email,
'ip': group.ip,
'nickname': group.nickname,
'reports': reports,
},
expires=21600)
|
AlexanderPease/IntroBot | lib/mongo.py | Python | gpl-3.0 | 653 | 0.02144 | import logging
import pymongo
import sett | ings
class Proxy(object):
_db = None
def __getattr__(self, name):
if Proxy._db == None:
# lazily connect to the db so we pickup the right environment settings
mongo_database = settings.get('mongo_database')
print mongo_database
logging.info("connecting to mongo at %s:%d/%s" % (mongo_database['h | ost'], mongo_database['port'], mongo_database['db']))
connection = pymongo.MongoClient(mongo_database['host'], mongo_database['port'], connectTimeoutMS=5000, max_pool_size=200)
Proxy._db = connection[mongo_database['db']]
return getattr(self._db, name)
db = Proxy() |
GoogleCloudPlatform/sap-deployment-automation | third_party/github.com/ansible/awx/installer/roles/image_build/files/settings.py | Python | apache-2.0 | 2,976 | 0.00168 | # AWX settings file
import os
def get_secret():
if os.path.exists("/etc/tower/SECRET_KEY"):
return open('/etc/tower/SECRET_KEY', 'rb').read().strip()
ADMINS = ()
STATIC_ROOT = '/var/lib/awx/public/static'
PROJECTS_ROOT = '/var/lib/awx/projects'
AWX_ANS | IBLE_COLLECTIONS_PATHS = '/var/lib/awx/vendor/awx_ansible_collections'
JOBOUTPUT_ROOT = '/var/lib/awx/job_status'
SECRET_KEY = get_secret()
ALLOWED_HOSTS = ['*']
# Container environments don't like chroots
AWX_PROOT_ENABLED = False
CLUSTER_HOST_ID = "awx"
SYSTEM_UUID = '00000000-0000-0000-0000-000000000000'
CSRF_COOKIE_SECURE = False
SESSION_COOKIE_SECURE = False
######### | ######################################################################
# EMAIL SETTINGS
###############################################################################
SERVER_EMAIL = 'root@localhost'
DEFAULT_FROM_EMAIL = 'webmaster@localhost'
EMAIL_SUBJECT_PREFIX = '[AWX] '
EMAIL_HOST = 'localhost'
EMAIL_PORT = 25
EMAIL_HOST_USER = ''
EMAIL_HOST_PASSWORD = ''
EMAIL_USE_TLS = False
LOGGING['handlers']['console'] = {
'()': 'logging.StreamHandler',
'level': 'DEBUG',
'formatter': 'simple',
}
LOGGING['loggers']['django.request']['handlers'] = ['console']
LOGGING['loggers']['rest_framework.request']['handlers'] = ['console']
LOGGING['loggers']['awx']['handlers'] = ['console', 'external_logger']
LOGGING['loggers']['awx.main.commands.run_callback_receiver']['handlers'] = ['console']
LOGGING['loggers']['awx.main.tasks']['handlers'] = ['console', 'external_logger']
LOGGING['loggers']['awx.main.scheduler']['handlers'] = ['console', 'external_logger']
LOGGING['loggers']['django_auth_ldap']['handlers'] = ['console']
LOGGING['loggers']['social']['handlers'] = ['console']
LOGGING['loggers']['system_tracking_migrations']['handlers'] = ['console']
LOGGING['loggers']['rbac_migrations']['handlers'] = ['console']
LOGGING['loggers']['awx.isolated.manager.playbooks']['handlers'] = ['console']
LOGGING['handlers']['callback_receiver'] = {'class': 'logging.NullHandler'}
LOGGING['handlers']['task_system'] = {'class': 'logging.NullHandler'}
LOGGING['handlers']['tower_warnings'] = {'class': 'logging.NullHandler'}
LOGGING['handlers']['rbac_migrations'] = {'class': 'logging.NullHandler'}
LOGGING['handlers']['system_tracking_migrations'] = {'class': 'logging.NullHandler'}
LOGGING['handlers']['management_playbooks'] = {'class': 'logging.NullHandler'}
DATABASES = {
'default': {
'ATOMIC_REQUESTS': True,
'ENGINE': 'awx.main.db.profiled_pg',
'NAME': os.getenv("DATABASE_NAME", None),
'USER': os.getenv("DATABASE_USER", None),
'PASSWORD': os.getenv("DATABASE_PASSWORD", None),
'HOST': os.getenv("DATABASE_HOST", None),
'PORT': os.getenv("DATABASE_PORT", None),
}
}
if os.getenv("DATABASE_SSLMODE", False):
DATABASES['default']['OPTIONS'] = {'sslmode': os.getenv("DATABASE_SSLMODE")}
USE_X_FORWARDED_HOST = True
USE_X_FORWARDED_PORT = True
|
gazoo74/linux | scripts/gdb/linux/config.py | Python | gpl-2.0 | 1,302 | 0 | # SPDX-License-Identifier: GPL-2.0
#
# Copyright 2019 Google LLC.
import gdb
import zlib
from linux import utils
class LxConfigDump(gdb.Command):
"""Output kernel config to the filename specified as the command
argument. Equivalent to 'zcat /proc/config.gz > config.txt' on
a running target"""
def __init__(self):
super(LxConfigDump, self).__init__("lx-configdump", gdb.COMMAND_DATA,
gdb.COMPLETE_FILENAME)
def invoke(self, arg, from_tty):
if len(arg) == 0:
filename = "config.txt"
else:
filename = arg
try:
py_config_ptr = gdb.parse_and_eval("kernel_config_data + 8")
py_config_size = gdb.parse_and_eval(
"sizeof(kernel_config_data) - 1 - 8 * 2")
except gdb.error as e:
raise gdb.G | dbError("Can't find config, enable CONFIG_IKCONFIG?")
inf = gdb.inferiors()[0]
zconfig_buf = utils.read_memoryview(inf, py_config_ptr,
py_config_size).tobytes()
| config_buf = zlib.decompress(zconfig_buf, 16)
with open(filename, 'wb') as f:
f.write(config_buf)
gdb.write("Dumped config to " + filename + "\n")
LxConfigDump()
|
SteveDiamond/cvxpy | cvxpy/reductions/complex2real/atom_canonicalizers/constant_canon.py | Python | gpl-3.0 | 912 | 0 | """
Copyright 2013 Steven Diamond
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by appl | icable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from cvxpy.expre | ssions.constants import Constant
def constant_canon(expr, real_args, imag_args, real2imag):
if expr.is_real():
return Constant(expr.value.real), None
elif expr.is_imag():
return None, Constant(expr.value.imag)
else:
return (Constant(expr.value.real),
Constant(expr.value.imag))
|
cathyyul/sumo | tools/build/checkSvnProps.py | Python | gpl-3.0 | 6,649 | 0.004061 | #!/usr/bin/env python
"""
@file checkSvnProps.py
@author Michael Behrisch
@date 2010
@version $Id: checkSvnProps.py 14493 2013-08-24 21:24:04Z behrisch $
Checks svn property settings for all files.
SUMO, Simulation of Urban MObility; see http://sumo-sim.org/
Copyright (C) 2010-2013 DLR (http://www.dlr.de/) and contributors
All rights reserved
"""
import os, subprocess, sys, xml.sax
from optparse import OptionParser
_SOURCE_EXT = [".h", ".cpp", ".py", ".pl", ".java", ".am"]
_TESTDATA_EXT = [".xml", ".prog", ".csv",
".complex", ".dfrouter", ".duarouter", ".jtrrouter",
".astar", ".chrouter", ".internal", ".tcl", ".txt",
".netconvert", ".netgen", ".od2trips", ".polyconvert", ".sumo",
".meso", ".tools", ".traci", ".activitygen", ".scenario",
".sumocfg", ".netccfg", ".netgcfg"]
_VS_EXT = [".vsprops", ".sln", ".vcproj", ".bat", ".props", ".vcxproj", ".filters"]
_IGNORE = set(["binstate.sumo"])
_KEYWORDS = "HeadURL Id LastChangedBy LastChangedDate LastChangedRevision"
class PropertyReader(xml.sax.handler.ContentHandler):
"""Reads the svn properties of files as written by svn pl -v --xml"""
def __init__(self, doFix):
self._fix = doFix
self._file = ""
self._property = None
self._value = ""
self._hadEOL = False
self._hadKeywords = False
def startElement(self, name, attrs):
if name == 'target':
self._file = attrs['path']
seen.add(os.path.join(svnRoot, self._file))
if name == 'property':
self._property = attrs['name']
def characters(self, content):
if self._property:
self._value += content
def endElement(self, name):
ext = os.path.splitext(self._file)[1]
if name == 'property' and self._property == "svn:eol-style":
self._hadEOL = True
if name == 'property' and self._property == "svn:keywords":
self._hadKeywords = True
if os.path.basename(self._file) not in _IGNORE:
if ext in _SOURCE_EXT or ext in _TESTDATA_EXT or ext in _VS_EXT:
if name == 'property' and self._property == "svn:executable" and ext not in [".py", ".pl", ".bat"]:
print self._file, self._property, self._value
if self._fix:
subprocess.call(["svn", "pd", "svn:executable", self._file])
if name == 'property' and self._property == "svn:mime-type":
print self._file, self._property, self._value
if self._fix:
subprocess.call(["svn", "pd", "svn:mime-type", self._file])
if ext in _SOURCE_EXT or ext in _TESTDATA_EXT:
if name == 'property' and self._property == "svn:eol-style" and self._value != "LF"\
or name == "target" and not self._hadEOL:
print self._file, "svn:eol-style", self._value
if self._fix:
if os.name == "posix":
subprocess.call(["sed", "-i", r's/\r$//', self._file])
subprocess.call(["sed", "-i", r's/\r/\n/g', self._file])
subprocess.call(["svn", "ps", "svn:eol-style", "LF", self._ | file])
if ex | t in _SOURCE_EXT:
if name == 'property' and self._property == "svn:keywords" and self._value != _KEYWORDS\
or name == "target" and not self._hadKeywords:
print self._file, "svn:keywords", self._value
if self._fix:
subprocess.call(["svn", "ps", "svn:keywords", _KEYWORDS, self._file])
if ext in _VS_EXT:
if name == 'property' and self._property == "svn:eol-style" and self._value != "CRLF"\
or name == "target" and not self._hadEOL:
print self._file, "svn:eol-style", self._value
if self._fix:
subprocess.call(["svn", "ps", "svn:eol-style", "CRLF", self._file])
if name == 'property':
self._value = ""
self._property = None
if name == 'target':
self._hadEOL = False
self._hadKeywords = False
optParser = OptionParser()
optParser.add_option("-v", "--verbose", action="store_true",
default=False, help="tell me what you are doing")
optParser.add_option("-f", "--fix", action="store_true",
default=False, help="fix invalid svn properties")
(options, args) = optParser.parse_args()
seen = set()
sumoRoot = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
svnRoots = [sumoRoot]
if len(args) > 0:
svnRoots = [os.path.abspath(a) for a in args]
else:
upDir = os.path.dirname(sumoRoot)
for l in subprocess.Popen(["svn", "pg", "svn:externals", upDir], stdout=subprocess.PIPE, stderr=open(os.devnull, 'w')).communicate()[0].splitlines():
if l[:5] == "sumo/":
svnRoots.append(os.path.join(upDir, l.split()[0]))
for svnRoot in svnRoots:
if options.verbose:
print "checking", svnRoot
output = subprocess.Popen(["svn", "pl", "-v", "-R", "--xml", svnRoot], stdout=subprocess.PIPE).communicate()[0]
xml.sax.parseString(output, PropertyReader(options.fix))
if options.verbose:
print "re-checking tree at", sumoRoot
for root, dirs, files in os.walk(sumoRoot):
for name in files:
fullName = os.path.join(root, name)
if fullName in seen or subprocess.call(["svn", "ls", fullName], stdout=open(os.devnull, 'w'), stderr=subprocess.STDOUT):
continue
ext = os.path.splitext(name)[1]
if name not in _IGNORE:
if ext in _SOURCE_EXT or ext in _TESTDATA_EXT or ext in _VS_EXT:
print fullName, "svn:eol-style"
if options.fix:
if ext in _VS_EXT:
subprocess.call(["svn", "ps", "svn:eol-style", "CRLF", fullName])
else:
if os.name == "posix":
subprocess.call(["sed", "-i", 's/\r$//', fullName])
subprocess.call(["svn", "ps", "svn:eol-style", "LF", fullName])
if ext in _SOURCE_EXT:
print fullName, "svn:keywords"
if options.fix:
subprocess.call(["svn", "ps", "svn:keywords", _KEYWORDS, fullName])
for ignoreDir in ['.svn', 'foreign', 'contributed']:
if ignoreDir in dirs:
dirs.remove(ignoreDir)
|
neuropower/neuropower | neuropower/apps/designtoolbox/batch/neurodesign.py | Python | mit | 6,551 | 0.009312 | import django
import sys
import os
sys.path.append('/tmp/neuropower-web/neuropower')
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings.settings'
django.setup()
from neurodesign import design, experiment, population, generate, msequence, report
from sqlalchemy.exc import OperationalError, DatabaseError
from django.core.exceptions import ObjectDoesNotExist
from apps.designtoolbox.forms import DesignRunForm
from apps.designtoolbox.models import DesignModel
from django.conf import settings
from celery import task, Celery
from datetime import datetime
import numpy as np
class design(design):
def no_func():
return 0
class experiment(experiment):
def no_func():
return 0
class population(population):
def optimise(POP):
sid = os.environ['TASK_UID']
desdata = DesignModel.objects.filter(SID=sid).first()
runform = DesignRunForm(None, instance=desdata)
# send email
subject = "NeuroDesign: optimisation process started"
sender = "NeuroDesign"
sendermail = "joke.durnez@gmail.com"
message = "Your design optimisation has now started. You can follow the progress here:"+" http://www.neuropowertools.org/design/runGA/?retrieve="+str(desdata.shareID)+". Thank you for using NeuroDesign."
recipient = str(desdata.email)
key = settings.MAILGUN_KEY
command = "curl -s --user '" + key + "' https://api.mailgun.net/v3/neuropowertools.org/messages -F from='" + sender + \
" <" + sendermail + ">' -F to=" + recipient + " -F subject="+subject+" -F text='" + message + "'"
os.system(command)
sid = os.environ.get('TASK_UID')
'''
Function to run natural selection for design optimization
'''
if (POP.exp.FcMax == 1 and POP.exp.FfMax==1):
POP.max_eff()
if POP.weights[0] > 0:
desdata = DesignModel.objects.filter(SID=sid).first()
runform = DesignRunForm(None, instance=desdata)
form = runform.save(commit=False)
form.running = 2
form.save()
# add new designs
POP.clear()
POP.add_new_designs(weights=[1,0,0,0])
# loop
for generation in range(POP.preruncycles):
print("prerun1 for sid "+str(sid)+": generation "+str(generation))
POP.to_next_generation(seed=POP.seed,weights=[1,0,0,0])
if generation % 10 == 10:
save_RDS(POP,sid,generation)
desdata = DesignModel.objects.filter(SID=sid).first()
runform = DesignRunForm(None, instance=desdata)
form = runform.save(commit=False)
form.timestamp = str(datetime.now())
form.generation = generation
form.save()
if POP.finished:
continue
POP.exp.FeMax = np.max(POP.bestdesig | n.F)
if POP.wei | ghts[1] > 0:
desdata = DesignModel.objects.filter(SID=sid).first()
runform = DesignRunForm(None, instance=desdata)
form = runform.save(commit=False)
form.running = 3
form.metrics = ""
form.bestdesign = ''
form.save()
POP.clear()
print("adding new designs...")
POP.add_new_designs(weights=[0,1,0,0])
# loop
for generation in range(POP.preruncycles):
print("prerun2 for sid "+str(sid)+": generation "+str(generation))
POP.to_next_generation(seed=POP.seed,weights=[0,1,0,0])
if generation % 10 == 0:
save_RDS(POP,sid,generation)
desdata = DesignModel.objects.filter(SID=sid).first()
runform = DesignRunForm(None, instance=desdata)
form = runform.save(commit=False)
form.timestamp = str(datetime.now())
form.generation = generation
form.save()
if POP.finished:
continue
POP.exp.FdMax = np.max(POP.bestdesign.F)
# clear all attributes
POP.clear()
POP.add_new_designs()
# loop
desdata = DesignModel.objects.filter(SID=sid).first()
runform = DesignRunForm(None, instance=desdata)
form = runform.save(commit=False)
form.running = 4
form.metrics = ""
form.bestdesign = ''
form.save()
for generation in range(POP.cycles):
POP.to_next_generation(seed=POP.seed)
print("optimisation for sid "+str(sid)+": generation "+str(generation))
if generation % 10 == 0:
save_RDS(POP,sid,generation)
desdata = DesignModel.objects.filter(SID=sid).first()
runform = DesignRunForm(None, instance=desdata)
form = runform.save(commit=False)
form.timestamp = str(datetime.now())
form.generation = generation
form.save()
if POP.finished:
continue
return POP
def save_RDS(POP,sid,generation):
desdata = None
tries = 0
while desdata == None or tries < 5:
tries += 1
try:
desdata = DesignModel.objects.filter(SID=sid).first()
except OperationalError or ObjectDoesNotExist or DatabaseError:
return None
# make metrics dictionary
if not desdata == None:
if not isinstance(desdata.metrics,dict):
Out = {"FBest": [], 'FeBest': [], 'FfBest': [],'FcBest': [], 'FdBest': [], 'Gen': []}
else:
Out = desdata.metrics
opt = [POP.bestdesign.F,POP.bestdesign.Fe,POP.bestdesign.Ff,POP.bestdesign.Fc,POP.bestdesign.Fd]
k = 0
for key in ['FBest','FeBest','FfBest','FcBest','FdBest']:
Out[key].append(opt[k])
k = k+1
Out['Gen'].append(generation)
# make bestdesign dictionary
keys = ["Stimulus_"+str(i) for i in range(POP.exp.n_stimuli)]
Seq = {}
for s in keys:
Seq.update({s:[]})
for stim in range(POP.exp.n_stimuli):
Seq["Stimulus_"+str(stim)]=POP.bestdesign.Xconv[:,stim].tolist()
Seq.update({"tps":POP.bestdesign.experiment.r_tp.tolist()})
runform = DesignRunForm(None, instance=desdata)
form = runform.save(commit=False)
form.metrics = Out
form.bestdesign = Seq
form.save()
|
Babtsov/Python-Scripts | emailExtract.py | Python | mit | 2,106 | 0.006648 | # This program extracts UF email addresses from a CSV file and prints them organized by class sections.
import os
import re
COURSE_NAME = "EEE3308C"
def extractEmailAddress(list):
for item in list:
if "@ufl.edu" in item:
return item
raise RuntimeError
def extractCVSInfo(namePattern):
csvFiles = [file for file in os.listdir('.') if re.match(namePattern,file)]
if len(csvFiles) > 1:
print("Multiple " + COURSE_NAME + | " CSV files in the current directory: ", csvFiles)
print("Please make sure there is only one " + COURSE_NAME + " CSV f | ile and try again.")
exit()
elif len(csvFiles) == 0:
print("No " + COURSE_NAME + " CSV file found in current directory. Please try again.")
exit()
try:
with open(csvFiles[0], "r") as studentInfo:
lines = [row.split(",") for row in studentInfo]
except EnvironmentError:
print("Failed to open the CSV file: ",csvFiles[0])
print("Please check the file and try again.")
exit()
return lines
namePattern = r"\d+_\w+_\d+_\d+_Grades-" + COURSE_NAME + r"\.csv"
lines = extractCVSInfo(namePattern)
dict = {}
for row in lines:
for entry in row:
if " Test\"" in entry: # search and remove test student
lines.remove(row)
break
if COURSE_NAME + "-" in entry:
try:
email = extractEmailAddress(row)
except:
print("Warning: failed to parse UF email for: ", row)
continue
if entry not in dict:
dict[entry] = [email]
else:
dict[entry].append(email)
print(COURSE_NAME + " Student Email Summary:",end="\n\n")
print("All Emails", end=" ")
print("(%d students)" % sum([len(value) for value in dict.values()]))
for emails in dict.values():
for email in emails:
print(email,end="; ")
print("\n\n")
for section,emails in dict.items():
print("Section %s (%d students):" %(section,len(emails)))
for email in emails:
print(email,end='; ')
print("\n\n")
|
flyapen/UgFlu | flumotion/admin/gtk/overlaystep.py | Python | gpl-2.0 | 5,429 | 0.000553 | # -*- Mode: Python -*-
# vi:si:et:sw=4:sts=4:ts=4
#
# Flumotion - a streaming media server
# Copyright (C) 2004,2005,2006,2007,2008 Fluendo, S.L. (www.fluendo.com).
# All rights reserved.
# This file may be distributed and/or modified under the terms of
# the GNU General Public License version 2 as published by
# the Free Software Foundation.
# This file is distributed without any warranty; without even the implied
# warranty of merchantability or fitness for a particular purpose.
# See "LICENSE.GPL" in the source distribution for more information.
# Licensees having purchased or holding a valid Flumotion Advanced
# Streaming Server license may use this file in accordance with the
# Flumotion Advanced Streaming Server Commercial License Agreement.
# See "LICENSE.Flumotion" in the source distribution for more information.
# Headers in this file shall remain intact.
import gettext
from flumotion.common import messages
from flumotion.admin.assistant.models import VideoConverter
from flumotion.common.i18n import N_, gettexter, ngettext
from flumotion.admin.gtk.workerstep import WorkerWizardStep
__version__ = "$Rev: 6228 $"
T_ = gettexter()
_ = gettext.gettext
class Overlay(VideoConverter):
componentType = 'overlay-converter'
def __init__(self, video_producer):
super(Overlay, self).__init__()
self._videoProducer = video_producer
self.can_overlay = False
self.show_logo = True
self.properties.show_text = True
self.properties.text = _("Flumotion")
# Public API
def hasOverlay(self):
if self.can_overlay:
if self.show_logo or self.properties.show_text:
return True
return False
# Component
def getProperties(self):
p = super(Overlay, self).getProperties()
if not self.properties.show_text:
del p.text
p.width = self._videoProducer.getWidth()
p.height = self._videoProducer.getHeight()
return p
class OverlayStep(WorkerWizardStep):
name = 'Overlay'
title = _('Overlay')
section = _('Production')
gladeFile = 'overlay-wizard.glade'
icon = 'overlay.png'
componentType = 'overlay'
docSection = 'help-configuration-assistant-overlay'
docAnchor = ''
docVersion = 'lo | cal'
def __init__(self, wizard, video_producer):
self.model = Overlay(video_producer)
WorkerWizardStep.__init__(self, wizard)
# Public API
def getOverlay(self):
if self.model.hasOverlay():
| return self.model
# Wizard Step
def setup(self):
self.text.data_type = str
self.add_proxy(self.model, ['show_logo'])
self.add_proxy(self.model.properties, ['show_text', 'text'])
def workerChanged(self, worker):
self.model.worker = worker
self._checkElements()
def getNext(self):
if self.wizard.getScenario().hasAudio(self.wizard):
return self.wizard.getStep('Production').getAudioStep()
return None
# Private API
def _setSensitive(self, sensitive):
self.show_text.set_sensitive(sensitive)
self.show_logo.set_sensitive(sensitive)
self.text.set_sensitive(sensitive)
def _checkElements(self):
self.model.can_overlay = False
def importError(error):
self.info('could not import PIL')
message = messages.Warning(
T_(N_("Worker '%s' cannot import module '%s'."),
self.worker, 'PIL'))
message.add(
T_(N_("\nThis module is part of '%s'."),
'Python Imaging Library'))
message.add(
T_(N_("\nThe project's homepage is %s"),
'http://www.pythonware.com/products/pil/'))
message.add(
T_(N_("\n\nClick \"Forward\" to proceed without overlay.")))
message.id = 'module-PIL'
self.wizard.add_msg(message)
self.wizard.taskFinished()
self._setSensitive(False)
def checkImport(unused):
self.wizard.taskFinished()
# taskFinished updates sensitivity
self.model.can_overlay = True
def checkElements(elements):
if elements:
f = ngettext("Worker '%s' is missing GStreamer element '%s'.",
"Worker '%s' is missing GStreamer elements '%s'.",
len(elements))
message = messages.Warning(
T_(f, self.worker, "', '".join(elements)), mid='overlay')
message.add(
T_(
N_("\n\nClick \"Forward\" to proceed without overlay.")))
self.wizard.add_msg(message)
self.wizard.taskFinished()
self._setSensitive(False)
return
else:
self.wizard.clear_msg('overlay')
# now check import
d = self.wizard.checkImport(self.worker, 'PIL')
d.addCallback(checkImport)
d.addErrback(importError)
self.wizard.waitForTask('overlay')
# first check elements
d = self.wizard.checkElements(
self.worker, 'ffmpegcolorspace', 'videomixer')
d.addCallback(checkElements)
# Callbacks
def on_show_text__toggled(self, button):
self.text.set_sensitive(button.get_active())
|
ActiveState/code | recipes/Python/535129_Groupby_hierarchy_tree/recipe-535129.py | Python | mit | 396 | 0.007576 | from | operator import itemgetter
from itertools import groupby
def groupby2(cols, lst, lev=0):
if not cols:
return str(list(lst))
keyfun = itemgetter(cols[0])
srted = sorted(list(lst), key=keyfun)
output = ""
for key, iter in groupby(srted, key=keyfun):
output += "\n"+" "*lev+"%10s:"%key
output += groupby2(cols[1:], iter | , lev+1)
return output
|
sequana/sequana | test/test_phred.py | Python | bsd-3-clause | 1,451 | 0.002757 | from sequana import Quality
from sequana import phred
def test_quality():
q = Quality('ABC')
q.plot()
assert q.mean_quality == 33
q = phred.QualitySanger('ABC')
q = phred.QualitySolexa('ABC')
def test_ascii_to_quality():
assert phred.ascii_to_quality("!") == 0
assert phred.ascii_to_quality("~") == 93
def test_quality_to_ascii():
assert phred.quality_to_ascii(65) == "b"
assert phred.quality_to_ascii(32) == "A"
assert phred.quality_to_ascii(32, phred=64) == "`"
def test_quality_ | to_proba():
assert phred.quality_to_proba_sanger(0) == 1
assert phred.quality_to_proba_sanger(40) == 0.0001
def test_others():
#sanger proba quality
assert phred. | proba_to_quality_sanger(0) == 93
assert phred.proba_to_quality_sanger(0.0001) == 40
assert phred.proba_to_quality_sanger(1) == 0
assert phred.proba_to_quality_sanger(2) == 0
# solexa proba quality
assert phred.proba_to_quality_solexa(0) == 62
assert abs(phred.proba_to_quality_solexa(0.0001) - 40) < 1e-3
assert phred.proba_to_quality_solexa(0.99) == -5
assert phred.proba_to_quality_solexa(2) == -5
# solexa and sanger quality are similar. In this exampl, sanger ones
# is slighlty larger
assert phred.quality_solexa_to_quality_sanger(64) > 64.
#
# inverse here
assert phred.quality_sanger_to_quality_solexa(64) < 64
assert phred.quality_sanger_to_quality_solexa(64) > 63.99
|
Just-D/chromium-1 | content/test/gpu/gpu_tests/gpu_test_expectations.py | Python | bsd-3-clause | 3,819 | 0.006546 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import test_expectations
# Valid expectation conditions are:
#
# Operating systems:
# win, xp, vista, win7, mac, leopard, snowleopard, lion, mountainlion,
# mavericks, yosemite, linux, chromeos, android
#
# GPU vendors:
# amd, arm, broadcom, hisilicon, intel, imagination, nvidia, qualcomm,
# vivante
#
# Browser types:
# android-webview-shell, android-content-shell, debug
#
# ANGLE renderer:
# d3d9, d3d11, opengl
#
# Specific GPUs can be listed as a tuple with vendor name and device ID.
# Examples: ('nvidia', 0x1234), ('arm', 'Mali-T604')
# Device IDs must be paired with a GPU vendor.
#
# Sample usage in SetExpectations in subclasses:
# self.Fail('gl-enable-vertex-attrib.html',
# ['mac', 'amd', ('nvidia', 0x1234)], bug=123)
ANGLE_MODIFIERS = ['d3d9', 'd3d11', 'opengl']
BROWSER_TYPE_MODIFIERS = [
'android-webview-shell', 'android-content-shell', 'debug' ]
class _FlakyExpectation(object):
def __init__(self, expectation, max_num_retries):
self.expectation = expectation
self.max_num_retries = max_num_retries
class GpuTestExpectations(test_expectations.TestExpectations):
def __init__(self):
self._flaky_expectations = []
super(GpuTestExpectations, self).__init__()
def Flaky(self, url_pattern, conditions=None, bug=None, max_num_retries=2):
expectation = _FlakyExpectation(self.CreateExpectation(
'pass', url_pattern, conditions, bug), max_num_retries)
self._flaky_expectations.append(expectation)
def GetFlakyRetriesForPage(self, page, browser):
for fe in self._flaky_expectations:
e = fe.expectation
if self.ExpectationAppliesToPage(e, browser, page):
return fe.max_num_retries
return 0
def IsValidUserDefinedCondition(self, condition):
# Add support for d3d9, d3d11 and opengl-specific expectations.
if condition in ANGLE_MODIFIERS:
return True
# Add support for browser-type-specific expectations.
if condition in BROWSER_TYPE_MODIFIERS:
return True
return super(GpuTestExpectations,
self).IsValidUserDefinedCondition(condition)
def ModifiersApply(self, browser, expectation):
if not super(GpuTestExpectations, self).ModifiersApply(
browser, expectation):
return False
# We'll only get here if the OS and GPU matched the expectation.
# TODO(kbr): refactor _Expectation to be a public class so that
# the GPU-specific properties can be moved into a subclass, and
# run the unit tests from this directory on the CQ and the bots.
# crbug.com/495868 crbug.com/495870
# Check for presence of Android WebView.
browser_expectations = [x for x in expectation.user_def | ined_conditions
if x in BROWSER_TYPE_MODIFIERS]
browser_matches = ((not browser_expectations) or
browser.browser_type in browser_expectations)
if not browser_matches:
return False
angle_renderer = ''
gpu_info = None
if browser.supports_system_info:
gpu_info = browser.GetSystemInfo().gpu
if gpu_info and gpu_info.aux_attributes:
gl_renderer = gpu_info.aux_attribu | tes.get('gl_renderer')
if gl_renderer:
if 'Direct3D11' in gl_renderer:
angle_renderer = 'd3d11'
elif 'Direct3D9' in gl_renderer:
angle_renderer = 'd3d9'
elif 'OpenGL' in gl_renderer:
angle_renderer = 'opengl'
angle_expectations = [x for x in expectation.user_defined_conditions
if x in ANGLE_MODIFIERS]
angle_matches = ((not angle_expectations) or
angle_renderer in angle_expectations)
return angle_matches
|
jicksy/oneanddone_test | vendor-local/lib/python/caching/invalidation.py | Python | mpl-2.0 | 7,039 | 0.000142 | import collections
import functools
import hashlib
import logging
import socket
from django.conf import settings
from django.core.cache import cache as default_cache, get_cache, parse_backend_uri
from django.core.cache.backends.base import InvalidCacheBackendError
from django.utils import encoding, translation
try:
import redis as redislib
except ImportError:
redislib = None
# Look for an own cache first before falling back to the default cache
try:
cache = get_cache('cache_machine')
except (InvalidCacheBackendError, ValueError):
cache = default_cache
CACHE_PREFIX = getattr(settings, 'CACHE_PREFIX', '')
FETCH_BY_ID = getattr(settings, 'FETCH_BY_ID', False)
FLUSH = CACHE_PREFIX + ':flush:'
log = logging.getLogger('caching.invalidation')
def make_key(k, with_locale=True):
"""Generate the full key for ``k``, with a prefix."""
key = encoding.smart_str('%s:%s' % (CACHE_PREFIX, k))
if with_locale:
key += encoding.smart_str(translation.get_language())
# memcached keys must be < 250 bytes and w/o whitespace, but it's nice
# to see the keys when using locmem.
return hashlib.md5(key).hexdigest()
def flush_key(obj):
"""We put flush lists in the flush: namespace."""
key = obj if isinstance(obj, basestring) else obj.cache_key
return FLUSH + make_key(key, with_locale=False)
def byid(obj):
key = obj if isinstance(obj, basestring) else obj.cache_key
return make_key('byid:' + key)
def safe_redis(return_type):
"""
Decorator to catch and log any redis errors.
return_type (optionally a callable) will be returned if there is an error.
"""
def decorator(f):
@functools.wraps(f)
def wrapper(*args, **kw):
try:
return f(*args, **kw)
except (socket.error, redislib.RedisError), e:
log.error('redis error: %s' % e)
# log.error('%r\n%r : %r' % (f.__name__, args[1:], kw))
if hasattr(return_type, '__call__'):
return return_type()
else:
return return_type
return wrapper
return decorator
class Invalidator(object):
def invalidate_keys(self, keys):
"""Invalidate all the flush lists named by the list of ``keys``."""
if not keys:
return
flush, flush_keys = self.find_flush_lists(keys)
if flush:
cache.delete_many(flush)
if flush_keys:
self.clear_flush_lists(flush_keys)
def cache_objects(self, objects, query_key, query_flush):
# Add this query to the flush list of each object. We include
# query_flush so that other things can be cached against the queryset
# and still participate in invalidation.
flush_keys = [o.flush_key() for o in objects]
flush_lists = collections.defaultdict(set)
for key in flush_keys:
flush_lists[key].add(query_flush)
flush_lists[query_flush].add(query_key)
# Add each object to the flush lists of its foreign keys.
for obj in objects:
obj_flush = obj.flush_key()
for key in map(flush_key, obj._cache_keys()):
if key != obj_flush:
flush_lists[key].add(obj_flush)
if FETCH_BY_ID:
flush_lists[key].add(byid(obj))
self.add_to_flush_list(flush_lists)
def find_flush_lists(self, keys):
"""
Recursively search for flush lists and objects to invalidate.
The search starts with the lists in `keys` and expands to any flush
lists found therein. Returns ({objects to flush}, {flush keys found}).
" | ""
new_keys = keys = set(map(flush_key, keys))
flush = set(keys)
# Add other flush keys from the lists, which happens when a parent
# object includes a foreign key.
while 1:
to_flush = self.get_flush_lists(new_keys)
flush.update(to_flush)
new_keys = set(k for k in to_flush if k.startswith(FLUSH))
diff = new_keys.difference(keys)
if diff:
| keys.update(new_keys)
else:
return flush, keys
def add_to_flush_list(self, mapping):
"""Update flush lists with the {flush_key: [query_key,...]} map."""
flush_lists = collections.defaultdict(set)
flush_lists.update(cache.get_many(mapping.keys()))
for key, list_ in mapping.items():
if flush_lists[key] is None:
flush_lists[key] = set(list_)
else:
flush_lists[key].update(list_)
cache.set_many(flush_lists)
def get_flush_lists(self, keys):
"""Return a set of object keys from the lists in `keys`."""
return set(e for flush_list in
filter(None, cache.get_many(keys).values())
for e in flush_list)
def clear_flush_lists(self, keys):
"""Remove the given keys from the database."""
cache.delete_many(keys)
class RedisInvalidator(Invalidator):
def safe_key(self, key):
if ' ' in key or '\n' in key:
log.warning('BAD KEY: "%s"' % key)
return ''
return key
@safe_redis(None)
def add_to_flush_list(self, mapping):
"""Update flush lists with the {flush_key: [query_key,...]} map."""
pipe = redis.pipeline(transaction=False)
for key, list_ in mapping.items():
for query_key in list_:
pipe.sadd(self.safe_key(key), query_key)
pipe.execute()
@safe_redis(set)
def get_flush_lists(self, keys):
return redis.sunion(map(self.safe_key, keys))
@safe_redis(None)
def clear_flush_lists(self, keys):
redis.delete(*map(self.safe_key, keys))
class NullInvalidator(Invalidator):
def add_to_flush_list(self, mapping):
return
def get_redis_backend():
"""Connect to redis from a string like CACHE_BACKEND."""
# From django-redis-cache.
_, server, params = parse_backend_uri(settings.REDIS_BACKEND)
db = params.pop('db', 1)
try:
db = int(db)
except (ValueError, TypeError):
db = 1
try:
socket_timeout = float(params.pop('socket_timeout'))
except (KeyError, ValueError):
socket_timeout = None
password = params.pop('password', None)
if ':' in server:
host, port = server.split(':')
try:
port = int(port)
except (ValueError, TypeError):
port = 6379
else:
host = 'localhost'
port = 6379
return redislib.Redis(host=host, port=port, db=db, password=password,
socket_timeout=socket_timeout)
if getattr(settings, 'CACHE_MACHINE_NO_INVALIDATION', False):
invalidator = NullInvalidator()
elif getattr(settings, 'CACHE_MACHINE_USE_REDIS', False):
redis = get_redis_backend()
invalidator = RedisInvalidator()
else:
invalidator = Invalidator()
|
HarmonyEnterpriseSolutions/harmony-platform | src/gnue/common/utils/FileUtils.py | Python | gpl-2.0 | 1,224 | 0.001634 | # -*- coding: iso-8859-1 -*-
#
# This file is part of GNU Enterprise.
#
# GNU Enterprise is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation; either
# version 2, or (at your option) any later version.
#
# GNU Enterprise is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHA | NTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public
# Lic | ense along with program; see the file COPYING. If not,
# write to the Free Software Foundation, Inc., 59 Temple Place
# - Suite 330, Boston, MA 02111-1307, USA.
#
# Copyright 2001-2007 Free Software Foundation
#
# FILE:
# FileUtils.py
#
# DESCRIPTION:
# Common file/url/resource related utilities
#
# NOTES:
# TODO: Deprecate
import os
import urllib
import urlparse
import sys
import cStringIO
# For backwards compatability
from gnue.common.utils.importing import import_string as dyn_import
from gnue.common.utils.file import to_uri as urlize, \
open_uri as openResource, \
to_buffer as openBuffer
|
simpleenergy/epochdatetimefield | setup.py | Python | mit | 1,135 | 0.003524 | #!/usr/bin/env python
from setuptools import setup, find_packages
import subprocess
import os
__doc__ = """
App for Django to allow using datetime objects over integer fields.
"""
def read | (fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
STAGE = 'alpha'
version = (0, 1, 1, STAGE)
def get_version():
number = '.'.join(map(str, version[:3]))
stage = version[3]
if stage == 'final':
| return number
elif stage == 'alpha':
process = subprocess.Popen('git rev-parse HEAD'.split(), stdout=subprocess.PIPE)
stdout, stderr = process.communicate()
return number + '-' + stdout.strip()[:8]
setup(
name='epochdatetimefield',
version=get_version(),
description=__doc__,
long_description=read('README.rst'),
url="https://epochdatetimefield.readthedocs.org/en/latest/",
author="SimpleEnergy",
author_email='nick@simpleenergy.com',
packages=[package for package in find_packages() if package.startswith('epochdatetimefield')],
install_requires=[
'Django>=1.4',
],
zip_safe=False,
include_package_data=True,
)
|
lucashanke/houseofdota | manage.py | Python | mit | 266 | 0.003759 | #!/usr/bin/env python3
import | os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "houseofdota.production_settings")
from django.core.management import execute_from_command_line
execute_from_command_li | ne(sys.argv)
|
rkuchan/Tax-Calculator | docs/source/conf.py | Python | mit | 11,210 | 0.00678 | # -*- coding: utf-8 -*-
#
# Tax Calculator documentation build configuration file, created by
# sphinx-quickstart on Mon Mar 9 17:06:10 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
sys.path.insert(0, os.path.abspath('../..'))
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinxcontrib.napoleon',
'sphinx.ext.pngmath',
'sphinx.ext.viewcode',
]
# The napoleon extension supports both numpy and google style docstrings.
# For more information, including additional settings visit:
# http://sphinxcontrib-napoleon.readthedocs.org/en/latest/
napoleon_include_private_with_doc = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Tax Calculator'
copyright = u'2015, Open Source Policy Center'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.4'
# The full version, including alpha/beta/rc tags.
release = '0.4'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
if sys.version_info[0] == 3:
from unittest.mock import MagicMock
elif sys.version_info[0] == 2:
from mock import Mock as MagicMock
else:
print("Please install or update python to at least version 2.x")
class Mock(MagicMock):
@classmethod
def __getattr__(cls, name):
return Mock()
MOCK_MODULES = ['numba', 'numba.jit', 'numba.vectorize', 'numba.guvectorize']
sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names | to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_dom | ain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'TaxCalculatordoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'TaxCalculator.tex', u'Tax Calculator Documentation',
u'Open Source Policy Center', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'taxcalculator', u' |
ikargis/horizon_fod | openstack_dashboard/dashboards/admin/hypervisors/tables.py | Python | apache-2.0 | 3,056 | 0 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 B1 Systems GmbH
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.utils.translation import ugettext_lazy as _ # noqa
from horizon import tables
from horizon.templatetags import sizeformat
def get_memory(hypervisor):
return sizeformat.mbformat(hypervisor.memory_mb)
def get_memory_used(hypervisor):
return sizeformat.mbformat(hypervisor.memory_mb_used)
def get_local(hypervisor):
return sizeformat.diskgbformat(hypervis | or.local_gb)
def get_local_used(hypervisor):
return sizeformat.diskgbformat(hypervisor.local_gb_used)
class AdminHypervisorsTable(tables.DataTable):
hostname = tables.Column("hypervisor_hostname",
link=("horizon:admin:hypervisors:detail"),
verbose_name=_("Hostname"))
hypervisor_type = tables.Column("hy | pervisor_type",
verbose_name=_("Type"))
vcpus = tables.Column("vcpus",
verbose_name=_("VCPUs (total)"))
vcpus_used = tables.Column("vcpus_used",
verbose_name=_("VCPUs (used)"))
memory = tables.Column(get_memory,
verbose_name=_("RAM (total)"),
attrs={'data-type': 'size'})
memory_used = tables.Column(get_memory_used,
verbose_name=_("RAM (used)"),
attrs={'data-type': 'size'})
local = tables.Column(get_local,
verbose_name=_("Storage (total)"),
attrs={'data-type': 'size'})
local_used = tables.Column(get_local_used,
verbose_name=_("Storage (used)"),
attrs={'data-type': 'size'})
running_vms = tables.Column("running_vms",
verbose_name=_("Instances"))
def get_object_id(self, hypervisor):
return hypervisor.hypervisor_hostname
class Meta:
name = "hypervisors"
verbose_name = _("Hypervisors")
class AdminHypervisorInstancesTable(tables.DataTable):
name = tables.Column("name",
verbose_name=_("Instance Name"))
instance_id = tables.Column("uuid",
verbose_name=_("Instance ID"))
def get_object_id(self, server):
return server['uuid']
class Meta:
name = "hypervisor_instances"
verbose_name = _("Hypervisor Instances")
|
kreczko/rootpy | rootpy/stats/dataset.py | Python | gpl-3.0 | 1,394 | 0.005022 | # Copyright 2012 the rootpy developers
# distributed under the terms of the GNU General Public License
from __future__ import absolute_import
import ROOT
from . import log; log = log[__name__]
from .. import QROOT, asrootpy
from ..base import NamedObject
from ..extern.six import string_types
__all__ = [
'DataSet',
]
class DataSet(NamedObject, QROOT.RooDataSet):
_ROOT = QROOT.RooDataSet
class Entry(object):
def __init__(self, idx, dataset):
self.idx_ = idx
self.dataset_ = dataset
@property
def fields(se | lf):
return asrootpy(self.dataset_.get(self.idx_))
@property
def weight(self):
self.dataset_.get(self.idx_) #set current ev | ent
return self.dataset_.weight()
def __len__(self):
return self.numEntries()
def __getitem__(self, idx):
return DataSet.Entry(idx, self)
def __iter__(self):
for idx in range(len(self)):
yield DataSet.Entry(idx, self)
def createHistogram(self, *args, **kwargs):
if args and isinstance(args[0], string_types):
return ROOT.RooAbsData.createHistogram(self, *args, **kwargs)
return super(DataSet, self).createHistogram(*args, **kwargs)
def reduce(self, *args, **kwargs):
return asrootpy(super(DataSet, self).reduce(*args, **kwargs))
|
RENCI/xDCIShare | hs_app_timeseries/migrations/0002_auto_20150813_1247.py | Python | bsd-3-clause | 302 | 0 | # -*- coding: utf-8 -*-
from _ | _future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('hs_app_timeseries', '0001_initial'),
]
operations = [
migrations.DeleteModel('TimeSeriesResource' | ),
]
|
wathen/PhD | MHD/FEniCS/MyPackage/PackageName/PETScFunc/__init__.py | Python | mit | 26 | 0 | from PETScMatOps import | *
| |
mitodl/micromasters | financialaid/migrations/0006_update_tierprogram.py | Python | bsd-3-clause | 574 | 0.001742 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2016-09-28 18:38
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('financialaid', '0005_switch_jsonfield'),
]
operations = [
migrations.AlterField(
model_name='tierprogram',
name= | 'tier',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='tier_programs', to='financia | laid.Tier'),
),
]
|
Lonkal/ProjectEuler | problem4.py | Python | mit | 255 | 0.058824 | #problem 4
import math
def isPalidrome(t | est):
return str(test) == str(test)[::-1] #op extended slicing feature
max = 0
for i in range(999,99,- | 1):
for j in range(999,99,-1):
prod = i*j
if isPalidrome(prod) and prod > max:
max = prod
print max |
softwaremechanic/Miscellaneous | Python/hw.py | Python | gpl-2.0 | 49 | 0 | def a(*a | rgs, **kwargs):
print("hello world") | |
zstackorg/zstack-utility | cephprimarystorage/cephprimarystorage/cdaemon.py | Python | apache-2.0 | 1,450 | 0.006897 | '''
@author: frank
'''
import sys, os, os.path
from zstacklib.utils import log
from zstacklib.utils import linux
import zstacklib.utils.iptables as iptables
pidfile = '/var/run/zstack/ceph-primarysto | rage.pid'
log.configure_log('/var/log/zstack/ceph-primarystorage.log')
logger = log.get_logger(__name__)
import cephagent
def pr | epare_pid_dir(path):
pdir = os.path.dirname(path)
if not os.path.isdir(pdir):
os.makedirs(pdir)
def main():
usage = 'usage: python -c "from cephprimarystorage import cdaemon; cdaemon.main()" start|stop|restart'
if len(sys.argv) != 2 or not sys.argv[1] in ['start', 'stop', 'restart']:
print usage
sys.exit(1)
global pidfile
prepare_pid_dir(pidfile)
try:
cmd = sys.argv[1]
py_process_name = 'from cephprimarystorage import cdaemon'
agentdaemon = cephagent.CephDaemon(pidfile, py_process_name)
if cmd == 'start':
logger.debug('zstack-ceph-primarystorage starts')
agentdaemon.start()
elif cmd == 'stop':
logger.debug('zstack-ceph-primarystorage stops')
agentdaemon.stop()
elif cmd == 'restart':
logger.debug('zstack-ceph-primarystorage restarts')
agentdaemon.restart()
sys.exit(0)
except Exception:
logger.warning(linux.get_exception_stacktrace())
sys.exit(1)
if __name__ == '__main__':
main()
|
typesupply/dialogKit | install.py | Python | mit | 852 | 0.014085 | """Install script for the dialogKit Package.
This script installs a _link_ to the current location
of dialogKit. It does not copy anything. It also means that
if you move your dialogKit folder, you'll have to run the
install script again.
"""
from distutils.sysconfig import get_python_lib
import os, sys
def install(srcDir, pathFileName):
sitePackDir = get_python_lib()
fileName = os.path.join(sitePackDir, pathFileName + ".pth")
print "Installing dialogKit: about to write a path to %r in %r..." % (srcDir, fileName)
f = open(fileNa | me, 'w')
f.write(srcDir)
f.close()
return fileName
dir = os.path.join(os.path.dirname(os.path.normpath(os.path.abspath(sys.argv[0]))), "Lib")
p = install(dir, "dialogKit")
print "dialogKit is now installed."
print "(Note that you have to run the install script | again if you move your dialogKit folder)"
|
jpoullet2000/cgs-benchmarks | hbase-benchmarks/hbase_import_process.py | Python | apache-2.0 | 21,976 | 0.011058 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import urllib
import zlib
import zipfile
import math
import sys
import json
import time
import bz2
import gzip
import binascii
import requests
import random
from subprocess import *
import subprocess
import threading
import MySQLdb # See http://stackoverflow.com/questions/372885/how-do-i-connect-to-a-mysql-database-in-python
import MySQLdb.cursors
from multiprocessing import Process, Manager
# This script will download the data from the highlander database, create a json from it, then upload it to
# the cgs system, which is, for now, constituted of a hbase database where it will save the data.
# Why do we download the data from highlander instead of using directly the parser from dbBuilder?
# Because the current dbBuilder.tojson does not give as much information as we would like for the benchmarks, that's all.
# Configuration for the user
highlander_host = "highlander.usr.hydra.vub.ac.be"
highlander_host = "172.31.244.166"
highlander_ | database = "Iridia"
highlander_user = "iridia"
highlander_password = "iri.2742"
local_host = "127.0.0.1"
local_database = "highlander_chromosomes"
local_user = "root"
local_password = "Olgfe65grgr"
current_server_url = 'http://62.210.254.52'
cluster_url = 'http://insilicodb.ulb.ac.be:8888'
querySession = requests.Session()
info = {'username': | 'gdegols','password':'z9FNeTrQJYaemAtyUVva'}
r = querySession.post(cluster_url+'/accounts/login/',data=info)
target_database = "hbase" # "hbase" or "impala_text"
global_upload_state = False # If False, we download the data. If True, we upload the data previously downloaded.
# This function returns the different samples already uploaded to hbase
def isSampleDone(sample_name, current_upload_state):
if not os.path.isfile('cluster_'+target_database+'_samples_done_'+str(current_upload_state)+'.txt'):
return False
samples = [line.strip() for line in open('cluster_'+target_database+'_samples_done_'+str(current_upload_state)+'.txt')]
found = False
sample_name = str(sample_name)
for sample in samples:
if sample and sample_name == sample:
found = True
break
return found
def addSampleDone(sample, current_upload_state):
with open('cluster_'+target_database+'_samples_done_'+str(current_upload_state)+'.txt', 'a') as file:
file.write(str(sample)+'\r\n')
def fieldsToCheck():
with open('api.json', 'rb') as f:
fields = f.read()
fields = json.loads(fields)
# We create a list to keep/recreate the order
ordered_fields = []
for i in xrange(0,len(fields)):
ordered_fields.append(fields['c'+str(i)])
# Thanks to this code, the mapping will be 40% faster
new_fields = {}
for key in fields:
field = fields[key]
new_fields[field['highlander']] = field['json']
return new_fields, ordered_fields
# This function returns the genotype (0/0, 1/1, 0/1, 1/0 only) from a "highlander variant"
def genotypeFromVariant(variant):
if variant['zygosity'] == 'Homozygous':
if random.random() < 0.5:
return '1|1'
else:
return '0|0'
else:
if random.random() < 0.5:
return '0|1'
else:
return '1|0'
# This function is in charge to create an adapted json for the benchmarks
def tojsonForBenchmarks(variants, patient):
fields, ordered_fields = fieldsToCheck()
data = {}
i=0
for variant in variants:
data[i] = {}
# We try to match any data from highlander to json
for highlander_field in variant:
if highlander_field in fields:
data[i][fields[highlander_field]] = str(variant[highlander_field]).replace(';',',')
# Some specific information
data[i]['readGroupSets.readGroups.sampleId'] = patient # variant['project_id']
data[i]['variants.fileformat'] = 'VCFv4.1'
if variant['allelic_depth_ref'] and variant['allelic_depth_alt']:
data[i]['variants.calls.info.confidence_by_depth'] = variant['allelic_depth_ref'] + "," + variant['allelic_depth_alt']
elif variant['allelic_depth_ref']:
data[i]['variants.calls.info.confidence_by_depth'] = variant['allelic_depth_ref']
elif variant['allelic_depth_alt']:
data[i]['variants.calls.info.confidence_by_depth'] = variant['allelic_depth_alt']
data[i]['variants.info.insert_date'] = int(time.time())
data[i]['variants.calls.genotype'] = genotypeFromVariant(variant)
i += 1
return data
# This function is in charge to create an adapted tsv for the benchmarks
def totsvForBenchmarks(variants, patient):
fields, ordered_fields = fieldsToCheck()
fields_map = {}
for field_id in xrange(0,len(ordered_fields)):
fields_map[ordered_fields[field_id]['highlander']] = field_id
"""
init_map = {}
for field_id in xrange(0,len(ordered_fields)):
init_map[ordered_fields[field_id]['highlander']] = ''
"""
tsv = []
dt = 0
for variant in variants:
# Some specific information
if variant['allelic_depth_ref'] and variant['allelic_depth_alt']:
variant['genotype_likelihood_hom_ref,genotype_likelihood_het,genotype_likelihood_hom_alt'] = variant['allelic_depth_ref'] + "," + variant['allelic_depth_alt']
elif variant['allelic_depth_ref']:
variant['genotype_likelihood_hom_ref,genotype_likelihood_het,genotype_likelihood_hom_alt'] = variant['allelic_depth_ref']
elif variant['allelic_depth_alt']:
variant['genotype_likelihood_hom_ref,genotype_likelihood_het,genotype_likelihood_hom_alt'] = variant['allelic_depth_alt']
variant['insert_date'] = int(time.time())
variant['special_genotype'] = genotypeFromVariant(variant)
variant['special_fileformat'] = 'VCFv4.1'
# We create the row-key
rowkey = str(variant['project_id']) + '-' + str(variant['chr']) + '-' \
+ str(variant['pos']) + '-' + str(variant['reference']) + '-' \
+ str(variant['alternative'])
line = rowkey
# It took me some times to find the most efficient way to create the tsv line, but
# maybe there is another way to do that even faster... It takes 11.5s for the current loop
val = [''] * len(ordered_fields)
for field_name, field_place in fields_map.iteritems():
try:
if variant[field_name]:
if field_name != 'unisnp_ids' and field_name != 'dbsnp_id_141' and field_name != 'dbsnp_id_137':
val[field_place] = str(variant[field_name])
else:
val[field_place] = str(variant[field_name]).replace(';',',')
except:
pass
line += ';'.join(val)
""" 9s
j = 0
for field in ordered_fields:
try:
if variant[field['highlander']]:
j += 1
else:
j += 1
except:
j += 1
"""
""" 19s
for field in ordered_fields:
if field['highlander'] in variant and variant[field['highlander']]:
line += ';'+str(variant[field['highlander']]).replace(';',',')
else:
line += ';'
"""
""" 16s
current_map = init_map.copy()
for field, value in variant.iteritems():
if field != 'unisnp_ids':
current_map[field] = str(value)
else:
current_map[field] = str(value).replace(';',',') #.replace(';',',')
for field in ordered_fields:
line += ';'+current_map[field['highlander']]
"""
|
ininex/geofire-python | resource/lib/python2.7/site-packages/pyrebase/pyrebase.py | Python | mit | 21,697 | 0.001751 | import requests
from requests import Session
from requests.exceptions import HTTPError
try:
from urllib.parse import urlencode, quote
except:
from urllib import urlencode, quote
import json
import math
from random import uniform
import time
from collections import OrderedDict
from sseclient import SSEClient
import threading
import socket
from oauth2client.service_account import ServiceAccountCredentials
from gcloud import storage
from requests.packages.urllib3.contrib.appengine import is_appengine_sandbox
from requests_toolbelt.adapters import appengine
import python_jwt as jwt
from Crypto.PublicKey import RSA
import datetime
def initialize_app(config):
return Firebase(config)
class Firebase:
""" Firebase Interface """
def __init__(self, config):
self.api_key = config["apiKey"]
self.auth_domain = config["authDomain"]
self.database_url = config["databaseURL"]
self.storage_bucket = config["storageBucket"]
self.credentials = None
self.requests = requests.Session()
if config.get("serviceAccount"):
scopes = [
'https://www.googleapis.com/auth/firebase.database',
'https://www.googleapis.com/auth/userinfo.email',
"https://www.googleapis.com/auth/cloud-platform"
]
service_account_type = type(config["serviceAccount"])
if service_account_type is str:
self.credentials = ServiceAccountCredentials.from_json_keyfile_name(config["serviceAccount"], scopes)
if service_account_type is dict:
self.credentials = ServiceAccountCredentials.from_json_keyfile_dict(config["serviceAccount"], scopes)
if is_appengine_sandbox():
# Fix error in standard GAE environment
# is releated to https://github.com/kennethreitz/requests/issues/3187
# ProtocolError('Connection aborted.', error(13, 'Permission denied'))
adapter = appengine.AppEngineAdapter(max_retries=3)
else:
adapter = requests.adapters.HTTPAdapter(max_retries=3)
for scheme in ('http://', 'https://'):
self.requests.mount(scheme, adapter)
def auth(self):
return Auth(self.api_key, self.requests, self.credentials)
def database(self):
return Database(self.credentials, self.api_key, self.database_url, self.requests)
def storage(self):
return Storage(self.credentials, self.storage_bucket, self.requests)
class Auth:
""" Authentication Service """
def __init__(self, api_key, requests, credentials):
self.api_key = api_key
self.current_user = None
self.requests = requests
self.credentials = credentials
def sign_in_with_email_and_password(self, email, password):
request_ref = "https://www.googleapis.com/identitytoolkit/v3/relyingparty/verifyPassword?key={0}".format(self.api_key)
headers = {"content-type": "application/json; charset=UTF-8"}
data = json.dumps({"email": email, "password": password, "returnSecureToken": True})
request_object = requests.post(request_ref, headers=headers, data=data)
raise_detailed_error(request_object)
self.current_user = request_object.json()
return request_object.json()
def create_custom_token(self, uid, additional_claims=None):
service_account_email = self.credentials.service_account_email
private_key = RSA.importKey(self.credentials._private_key_pkcs8_pem)
payload = {
"iss": service_account_email,
"sub": service_account_email,
"aud": "https://identitytoolkit.googleapis.com/google.identity.identitytoolkit.v1.IdentityToolkit",
"uid": uid
}
if additional_claims:
payload["claims"] = additional_claims
exp = datetime.timedelta(minutes=60)
return jwt.generate_jwt(payload, private_key, "RS256", exp)
def sign_in_with_custom_token(self, token):
request_ref = "https://www.googleapis.com/identitytoolkit/v3/relyingparty/verifyCustomToken?key={0}".format(self.api_key)
headers = {"content-type": "application/json; charset=UTF-8"}
data = json.dumps({"returnSecureToken": True, "token": token})
request_object = requests.post(request_ref, headers=headers, data=data)
raise_detailed_error(request_object)
return request_object.json()
def refresh(self, refresh_token):
request_ref = "https://securetoken.googleapis.com/v1/token?key={0}".format(self.api_key)
headers = {"content-type": "application/json; charset=UTF-8"}
data = json.dumps({"grantType": "refresh_token", "refreshToken": refresh_token})
request_object = requests.post(request_ref, headers=headers, data=data)
raise_detailed_error(request_object)
request_object_json = request_object.json()
# handle weirdly formatted response
user = {
"userId": request_object_json["user_id"],
"idToken": request_object_json["id_token"],
"refreshToken": request_object_json["re | fresh_token"]
}
return user
def get_account_info(self, id_token):
request_ref = "https://www.googleapis.com/identitytoolkit/v3/relyingparty/getAccountInfo?key | ={0}".format(self.api_key)
headers = {"content-type": "application/json; charset=UTF-8"}
data = json.dumps({"idToken": id_token})
request_object = requests.post(request_ref, headers=headers, data=data)
raise_detailed_error(request_object)
return request_object.json()
def send_email_verification(self, id_token):
request_ref = "https://www.googleapis.com/identitytoolkit/v3/relyingparty/getOobConfirmationCode?key={0}".format(self.api_key)
headers = {"content-type": "application/json; charset=UTF-8"}
data = json.dumps({"requestType": "VERIFY_EMAIL", "idToken": id_token})
request_object = requests.post(request_ref, headers=headers, data=data)
raise_detailed_error(request_object)
return request_object.json()
def send_password_reset_email(self, email):
request_ref = "https://www.googleapis.com/identitytoolkit/v3/relyingparty/getOobConfirmationCode?key={0}".format(self.api_key)
headers = {"content-type": "application/json; charset=UTF-8"}
data = json.dumps({"requestType": "PASSWORD_RESET", "email": email})
request_object = requests.post(request_ref, headers=headers, data=data)
raise_detailed_error(request_object)
return request_object.json()
def verify_password_reset_code(self, reset_code, new_password):
request_ref = "https://www.googleapis.com/identitytoolkit/v3/relyingparty/resetPassword?key={0}".format(self.api_key)
headers = {"content-type": "application/json; charset=UTF-8"}
data = json.dumps({"oobCode": reset_code, "newPassword": new_password})
request_object = requests.post(request_ref, headers=headers, data=data)
raise_detailed_error(request_object)
return request_object.json()
def create_user_with_email_and_password(self, email, password):
request_ref = "https://www.googleapis.com/identitytoolkit/v3/relyingparty/signupNewUser?key={0}".format(self.api_key)
headers = {"content-type": "application/json; charset=UTF-8" }
data = json.dumps({"email": email, "password": password, "returnSecureToken": True})
request_object = requests.post(request_ref, headers=headers, data=data)
raise_detailed_error(request_object)
return request_object.json()
class Database:
""" Database Service """
def __init__(self, credentials, api_key, database_url, requests):
if not database_url.endswith('/'):
url = ''.join([database_url, '/'])
else:
url = database_url
self.credentials = credentials
self.api_key = api_key
self.database_url = url
self.requests = requests
self.path = ""
self.build_query = {}
self.last_push_time = 0
self.last_rand_chars = []
|
wking/thumbor | thumbor/storages/redis_storage.py | Python | mit | 4,944 | 0 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# thumbor imaging service
# https://github.com/thumbor/thumbor/wiki
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 globo.com timehome@corp.globo.com
import logging
from json import loads, dumps
from datetime import datetime, timedelta
from redis import Redis, RedisError
from thumbor.storages import BaseStorage
from thumbor.utils import on_exception
from tornado.concurrent import return_future
logger = logging.getLogger('thumbor')
class Storage(BaseStorage):
storage = None
def __init__(self, context, shared_client=True):
'''Initialize the RedisStorage
:param thumbor.context.Context shared_client: Current context
:param boolean shared_client: When set to True a singleton client will
be used.
'''
BaseStorage.__init__(self, context)
self.shared_client = shared_client
self.storage = self.reconnect_redis()
def get_storage(self):
'''Get the storage instance.
:return Redis: Redis instance
'''
if self.storage:
return self.storage
self.storage = self.reconnect_redis()
return self.storage
def reconnect_redis(self):
if self.shared_client and Storage.storage:
return Storage.storage
storage = Redis(
port=self.context.config.REDIS_STORAGE_SERVER_PORT,
| host=self.context.config.REDIS_STORAGE_SERVER_HOST,
db=self.context.config.REDIS_STORAGE_SERVER_DB,
password=self.context.config.REDIS_STORAGE_SERVER_PASSWORD
)
if self.shared_client:
Storage.storage = storage
return storage
def on_redis_error(self, fname, exc_type, exc_value) | :
'''Callback executed when there is a redis error.
:param string fname: Function name that was being called.
:param type exc_type: Exception type
:param Exception exc_value: The current exception
:returns: Default value or raise the current exception
'''
if self.shared_client:
Storage.storage = None
else:
self.storage = None
if self.context.config.REDIS_STORAGE_IGNORE_ERRORS is True:
logger.error("[REDIS_STORAGE] %s" % exc_value)
if fname == '_exists':
return False
return None
else:
raise exc_value
def __key_for(self, url):
return 'thumbor-crypto-%s' % url
def __detector_key_for(self, url):
return 'thumbor-detector-%s' % url
@on_exception(on_redis_error, RedisError)
def put(self, path, bytes):
storage = self.get_storage()
storage.set(path, bytes)
storage.expireat(
path, datetime.now() + timedelta(
seconds=self.context.config.STORAGE_EXPIRATION_SECONDS
)
)
@on_exception(on_redis_error, RedisError)
def put_crypto(self, path):
if not self.context.config.STORES_CRYPTO_KEY_FOR_EACH_IMAGE:
return
if not self.context.server.security_key:
raise RuntimeError(
"STORES_CRYPTO_KEY_FOR_EACH_IMAGE can't be True if no "
"SECURITY_KEY specified"
)
key = self.__key_for(path)
self.get_storage().set(key, self.context.server.security_key)
@on_exception(on_redis_error, RedisError)
def put_detector_data(self, path, data):
key = self.__detector_key_for(path)
self.get_storage().set(key, dumps(data))
@return_future
def get_crypto(self, path, callback):
callback(self._get_crypto(path))
@on_exception(on_redis_error, RedisError)
def _get_crypto(self, path):
if not self.context.config.STORES_CRYPTO_KEY_FOR_EACH_IMAGE:
return None
crypto = self.get_storage().get(self.__key_for(path))
if not crypto:
return None
return crypto
@return_future
def get_detector_data(self, path, callback):
callback(self._get_detector_data(path))
@on_exception(on_redis_error, RedisError)
def _get_detector_data(self, path):
data = self.get_storage().get(self.__detector_key_for(path))
if not data:
return None
return loads(data)
@return_future
def exists(self, path, callback):
callback(self._exists(path))
@on_exception(on_redis_error, RedisError)
def _exists(self, path):
return self.get_storage().exists(path)
@on_exception(on_redis_error, RedisError)
def remove(self, path):
self.get_storage().delete(path)
@return_future
def get(self, path, callback):
@on_exception(self.on_redis_error, RedisError)
def wrap():
return self.get_storage().get(path)
callback(wrap())
|
nananan/Cinnamon | analyzePackage.py | Python | gpl-3.0 | 39,675 | 0.009679 | #!/usr/bin/python
import scapy
#import scapy_ex
import os,sys
import printerInfo
import enum
from enum import Enum
from scapy.all import *
import time, datetime
#from time import sleep
class Message(Enum):
AUTH = "0"
DEAUTH = "1"
PROBE_REQ = "2"
PROBE_RESP = "3"
HAND_SUCC = "4"
HAND_FAIL = "5"
CORR_PACK = "6"
RTS = "7"
CTS = "8"
ACK = "9"
DATA = "10"
BEACON = "11"
ASSOC_REQ = "12"
ASSOC_RESP = "13"
DISASSOC = "14"
NUM_PACK = "15"
OTHER = "16"
class AnalyzePackage:
BROADCAST_ADDR = "ff:ff:ff:ff:ff:ff"
EXTENSION_LOG = ".log"
FOLDER_LOG = "log/"
def __init__(self, printerInfo):
self.apPresent = []
self.essid = {}
self.channel = {}
self.power = {}
self.powerAP = {}
self.authentInfo = {}
self.authent = {}
self.associationRequestInfo = {}
self.associationRequest = {}
self.associationResponceInfo = {}
self.associationResponce = {}
self.disassociationInfo = {}
self.disassociation = {}
self.deauthentInfo = {}
self.deauthent = {}
self.probeRequestInfo = {}
self.probeRequest = {}
self.probeResponseInfo = {}
self.probeResponse = {}
self.eapHandshakeSuccessInfo = {}
self.eapHandshakeSuccess = {}
self.eapHandshakeFailedInfo = {}
self.eapHandshakeFailed = {}
self.corruptedPackInfo = {}
self.corruptedPack = {}
self.eapRequest = {}
self.rtsListInfo = {}
self.rtsList = {}
self.ctsListInfo = {}
self.ctsList = {}
self.dataListInfo = {}
self.dataList = {}
self.ackListInfo = {}
self.ackList = {}
self.beaconListInfo = {}
self.beacon = {}
self.numPackInfo = {}
self.numPack = {}
self.otherListInfo = {}
self.otherList = {}
self.cont = 0
self.printerInfo = printerInfo
self.info = {}
self.infoAP = {}
self.infoClient = {}
self.roamingClient = {}
self.contForAP = 0
now = datetime.datetime.now()
date = str(now.year)+str(now.month)+str(now.day)+"-"+str(now.hour)+"-"+str(now.minute)+"-"+str(now.second)
self.titleLog = AnalyzePackage.FOL | DER_LOG + date + AnalyzePackage.EXTENSION_LOG
#self.fileLog = open(self.titleLog, "w+")
f = open("DISASS.txt", "w+")
f.close()
def createArrayInfo(self,macAP, macClient):
if (macAP,macClient) not in self.deauthentInfo:
self.deauthentInfo[(macAP,macClient)] = 0
| if (macAP,macClient) not in self.authentInfo:
self.authentInfo[(macAP,macClient)] = 0
if (macAP,macClient) not in self.associationRequestInfo:
self.associationRequestInfo[(macAP,macClient)] = 0
if (macAP,macClient) not in self.associationResponceInfo:
self.associationResponceInfo[(macAP,macClient)] = 0
if (macAP,macClient) not in self.disassociationInfo:
self.disassociationInfo[(macAP,macClient)] = 0
if (macAP,macClient) not in self.power:
self.power[(macAP,macClient)] = "-"
if (macAP,macClient) not in self.eapHandshakeSuccessInfo:
self.eapHandshakeSuccessInfo[(macAP,macClient)] = 0
if (macAP,macClient) not in self.eapHandshakeFailedInfo:
self.eapHandshakeFailedInfo[(macAP,macClient)] = 0
if (macAP,macClient) not in self.corruptedPackInfo:
self.corruptedPackInfo[(macAP,macClient)] = 0
if (macAP,macClient) not in self.rtsListInfo:
self.rtsListInfo[(macAP,macClient)] = 0
if (macAP,macClient) not in self.ctsListInfo:
self.ctsListInfo[(macAP,macClient)] = 0
if (macAP,macClient) not in self.dataListInfo:
self.dataListInfo[(macAP,macClient)] = 0
if (macAP,macClient) not in self.numPackInfo:
self.numPackInfo[(macAP,macClient)] = 0
if (macAP,macClient) not in self.ackListInfo:
self.ackListInfo[(macAP,macClient)] = 0
if (macAP,macClient) not in self.beaconListInfo:
self.beaconListInfo[(macAP,macClient)] = 0
if (macAP,macClient) not in self.probeResponseInfo:
self.probeResponseInfo[(macAP,macClient)] = 0
if macClient not in self.ackListInfo:
self.ackListInfo[macClient] = 0
if (macAP,macClient) not in self.otherListInfo:
self.otherListInfo[(macAP,macClient)] = 0
def createArray(self, mac):
if mac not in self.beacon:
self.beacon[mac] = 0
if mac not in self.numPack:
self.numPack[mac] = 0
if mac not in self.authent:
self.authent[mac] = 0
if mac not in self.associationRequest:
self.associationRequest[mac] = 0
if mac not in self.associationResponce:
self.associationResponce[mac] = 0
if mac not in self.disassociation:
self.disassociation[mac] = 0
if mac not in self.deauthent:
self.deauthent[mac] = 0
if mac not in self.probeRequest:
self.probeRequest[mac] = 0
if mac not in self.probeResponse:
self.probeResponse[mac] = 0
if mac not in self.eapHandshakeSuccess:
self.eapHandshakeSuccess[mac] = 0
if mac not in self.eapHandshakeFailed:
self.eapHandshakeFailed[mac] = 0
if mac not in self.corruptedPack:
self.corruptedPack[mac] = 0
if mac not in self.rtsList:
self.rtsList[mac] = 0
if mac not in self.ctsList:
self.ctsList[mac] = 0
if mac not in self.dataList:
self.dataList[mac] = 0
if mac not in self.ackList:
self.ackList[mac] = 0
if mac not in self.otherList:
self.otherList[mac] = 0
if mac not in self.power:
self.power[mac] = "-"
if mac not in self.channel:
self.channel[mac] = "-"
def checkFrequence(self,macAP, macClient, power):
if power != 0 and power != None:
if macAP != AnalyzePackage.BROADCAST_ADDR:
self.power[(macAP,macClient)] = power
self.powerAP[macAP] = power
def checkChannel(self, mac, channel):
if channel != "0" and channel != None:
self.channel[mac] = channel
def printInfo(self,essid,macAP,macClient):
if macAP != None and macClient != None:
if (essid,macClient) not in self.probeRequestInfo:
self.probeRequestInfo[(essid,macClient)] = 0
if self.numPackInfo[(macAP,macClient)] != 0:
percentCorr = int(float(self.corruptedPackInfo[(macAP,macClient)])/float(self.numPackInfo[(macAP,macClient)])*100)
strPercentage = str(percentCorr)
i = tuple([essid, macAP, macClient, self.authentInfo[(macAP,macClient)], self.deauthentInfo[(macAP,macClient)], self.associationRequestInfo[(macAP,macClient)], self.associationResponceInfo[(macAP,macClient)], self.disassociationInfo[(macAP,macClient)], self.eapHandshakeSuccessInfo[(macAP,macClient)], self.eapHandshakeFailedInfo[(macAP,macClient)], self.power[(macAP,macClient)], self.corruptedPackInfo[(macAP,macClient)], strPercentage, self.dataListInfo[(macAP,macClient)], self.rtsListInfo[(macAP,macClient)], self.ctsListInfo[(macAP,macClient)], self.ackListInfo[(macAP, macClient)], self.beaconListInfo[(macAP,macClient)], self.probeRequestInfo[(essid,macClient)], self.probeResponseInfo[(macAP,macClient)], self.numPackInfo[(macAP,macClient)], self.otherListInfo[(macAP,macClient)]])
self.info[i[1],i[2]] = i
def printInfoAP(self, essid, macAP, macClient):
if macAP != None and macAP != AnalyzePackage.BROADCAST_ADDR and macClient ! |
googleapis/python-secret-manager | samples/generated_samples/secretmanager_v1_generated_secret_manager_service_set_iam_policy_sync.py | Python | apache-2.0 | 1,501 | 0.000666 | # -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for SetIamPolicy
# NOTE: This snippet has been auto | matically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-secretmanager
# [START secretmanager_v1_generated_SecretManagerService_SetIamPolicy_sync]
from google.cloud import secretmanager_v1
def sample_set_iam_policy():
# Create a client
client = secretmanager_v1.SecretManagerServiceClient( | )
# Initialize request argument(s)
request = secretmanager_v1.SetIamPolicyRequest(
resource="resource_value",
)
# Make the request
response = client.set_iam_policy(request=request)
# Handle the response
print(response)
# [END secretmanager_v1_generated_SecretManagerService_SetIamPolicy_sync]
|
ezralalonde/cloaked-octo-sansa | 01/qu/02.py | Python | bsd-2-clause | 342 | 0.005848 | # Write Pyth | on code to print out how far light travels
# in centimeters in one nanosecond. Use the variables
# defined below.
speed_of_light = 299792458 # meters per second
meter = 100 # one meter is 100 centimeters
nanosecond = 1.0/1000000000 # one billionth of a second
print speed_of_light * meter * nan | osecond
|
techtonik/pip | tests/lib/options_helpers.py | Python | mit | 792 | 0 | """Provides helper classes for testing option handling in pip
"""
import os
from pip._inter | nal.cli import cmdoptions
from pip._internal.cli.base_command import Command
from pip._internal.commands import commands_dict
class FakeCommand(Command):
name = 'fake'
summary = name
def main(self, args):
index_opts = cmdoptions.make_option_group(
cmdoptions.index_group,
self.parser,
)
self.parser.add_option_group(index_opts)
return self.parse_args(args)
class AddFakeCommandMixin(object):
def setup(self):
self.enviro | n_before = os.environ.copy()
commands_dict[FakeCommand.name] = FakeCommand
def teardown(self):
os.environ = self.environ_before
commands_dict.pop(FakeCommand.name)
|
elopezga/ErrorRate | ivi/lecroy/lecroyBaseScope.py | Python | mit | 71,778 | 0.00255 | """
Python Interchangeable Virtual Instrument Library
Copyright (c) 2012-2014 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import time
import struct
from .. import ivi
from .. import scope
from .. import scpi
from .. import extra
AcquisitionTypeMapping = {
'normal': 'norm',
'peak_detect': 'peak',
'high_resolution': 'hres',
'average': 'aver'}
VerticalCoupling = set(['ac', 'dc', 'gnd'])
InputImpedance = set([1000000, 50, 'gnd'])
# Bandwidth Limits, OFF = none, ON = 20 MHz, 200MHZ = 200 MHz
BandwidthLimit = set(['OFF', 'ON', '200MHZ'])
TriggerModes = set(['auto', 'norm', 'single', 'stop'])
TriggerTypes = set(
['drop', 'edge', 'ev', 'glit', 'ht', 'hv', 'hv2', 'il', 'intv', 'is', 'i2', 'off', 'pl', 'ps', 'p2', 'ql', 'sng',
'sq', 'sr', 'teq', 'ti', 'tl'])
TriggerCouplingMapping = {
'ac': ('ac', 0, 0),
'dc': ('dc', 0, 0),
'hf_reject': ('dc', 0, 1),
'lf_reject': ('lfr', 0, 0),
'noise_reject': ('dc', 1, 0),
'hf_reject_ac': ('ac', 0, 1),
'noise_reject_ac': ('ac', 1, 0),
'hf_noise_reject': ('dc', 1, 1),
'hf_noise_reject_ac': ('ac', 1, 1),
'lf_noise_reject': ('lfr', 1, 0)}
TVTriggerEventMapping = {'field1': 'fie1',
'field2': 'fie2',
'any_field': 'afi',
'any_line': 'alin',
'line_number': 'lfi1',
'vertical': 'vert',
'line_field1': 'lfi1',
'line_field2': 'lfi2',
'line': 'line',
'line_alternate': 'lalt',
'lvertical': 'lver'}
TVTriggerFormatMapping = {'generic': 'gen',
'ntsc': 'ntsc',
'pal': 'pal',
'palm': 'palm',
'secam': 'sec',
'p480l60hz': 'p480',
'p480': 'p480',
'p720l60hz': 'p720',
'p720': 'p720',
'p1080l24hz': 'p1080',
'p1080': 'p1080',
'p1080l25hz': 'p1080l25hz',
'p1080l50hz': 'p1080l50hz',
'p1080l60hz': 'p1080l60hz',
'i1080l50hz': 'i1080l50hz',
| 'i1080': 'i1080l50hz',
'i1080l60hz': 'i1080l60hz'}
PolarityMapping = {'positive': 'pos',
'negative': 'neg'}
GlitchConditionMapping = {'less_than': 'less',
'greater_than': 'gre | '}
WidthConditionMapping = {'within': 'rang'}
SampleModeMapping = {'real_time': 'rtim',
'equivalent_time': 'etim'}
SlopeMapping = {
'positive': 'pos',
'negative': 'neg',
'either': 'eith',
'alternating': 'alt'}
MeasurementFunctionMapping = {
'rise_time': 'risetime',
'fall_time': 'falltime',
'frequency': 'frequency',
'period': 'period',
'voltage_rms': 'vrms display',
'voltage_peak_to_peak': 'vpp',
'voltage_max': 'vmax',
'voltage_min': 'vmin',
'voltage_high': 'vtop',
'voltage_low': 'vbase',
'voltage_average': 'vaverage display',
'width_negative': 'nwidth',
'width_positive': 'pwidth',
'duty_cycle_positive': 'dutycycle',
'amplitude': 'vamplitude',
'voltage_cycle_rms': 'vrms cycle',
'voltage_cycle_average': 'vaverage cycle',
'overshoot': 'overshoot',
'preshoot': 'preshoot',
'ratio': 'vratio',
'phase': 'phase',
'delay': 'delay'}
MeasurementFunctionMappingDigital = {
'rise_time': 'risetime',
'fall_time': 'falltime',
'frequency': 'frequency',
'period': 'period',
'width_negative': 'nwidth',
'width_positive': 'pwidth',
'duty_cycle_positive': 'dutycycle'}
ScreenshotImageFormatMapping = {
'bmp': 'bmp',
'bmp24': 'bmp',
'bmp8': 'bmpcomp',
'jpeg': 'jpeg',
'png': 'png',
'png24': 'png',
'psd': 'psd',
'tiff': 'tiff'}
TimebaseModeMapping = {
'main': 'main',
'window': 'wind',
'xy': 'xy',
'roll': 'roll'}
TimebaseReferenceMapping = {
'left': 'left',
'center': 'cent',
'right': 'righ'}
class lecroyBaseScope(scpi.common.IdnCommand, scpi.common.ErrorQuery, scpi.common.Reset,
scpi.common.SelfTest, scpi.common.Memory,
scope.Base, scope.TVTrigger,
scope.GlitchTrigger, scope.WidthTrigger, scope.AcLineTrigger,
scope.WaveformMeasurement, scope.MinMaxWaveform,
scope.ContinuousAcquisition, scope.AverageAcquisition,
scope.SampleMode, scope.AutoSetup,
extra.common.SystemSetup, extra.common.Screenshot,
ivi.Driver):
"LeCroy generic IVI oscilloscope driver"
def __init__(self, *args, **kwargs):
self.__dict__.setdefault('_instrument_id', '')
self._analog_channel_name = list()
self._analog_channel_count = 4
self._digital_channel_name = list()
self._digital_channel_count = 16
self._channel_count = self._analog_channel_count + self._digital_channel_count
self._channel_label = list()
self._channel_label_position = list()
self._channel_noise_filter = list()
self._channel_interpolation = list()
self._channel_probe_skew = list()
self._channel_invert = list()
self._channel_probe_id = list()
self._channel_bw_limit = list()
super(lecroyBaseScope, self).__init__(*args, **kwargs)
self._memory_size = 5
self._analog_channel_name = list()
self._analog_channel_count = 4
self._digital_channel_name = list()
self._digital_channel_count = 16
self._channel_count = self._analog_channel_count + self._digital_channel_count
self._bandwidth = 1e9
self._horizontal_divisions = 10
self._vertical_divisions = 8
self._timebase_mode = 'main'
self._timebase_reference = 'center'
self._timebase_position = 0.0
self._timebase_range = 1e-3
self._timebase_scale = 100e-6
self._timebase_window_position = 0.0
self._timebase_window_range = 5e-6
self._timebase_window_scale = 500e-9
self._trigger_mode = 'auto'
self._trigger_type = 'edge'
self._display_vectors = True
self._display_labels = True
self._display_grid = "single"
self._identity_description = "LeCroy generic IVI oscilloscope driver"
self._identity_identifier = ""
self._identity_revision = ""
self._identity_vendor = ""
self._identity_instrument_manufacturer = "LeCroy"
self._identity_instrument_model = ""
self._identity_instrument_firmware_revision = ""
self._identity_specification_major_version = 4
self._identity_specification_minor_version = 1
self._identity_supported_instrument_models = ['WR204MXI-A', 'WR204XI-A', 'WR104MXI-A', 'WR104XI-A', 'WR64MXI-A',
'WR64XI-A',
|
cliburn/flow | src/plugins/visual/TwoDFrame/colormap.py | Python | gpl-3.0 | 1,367 | 0.027067 | #!/usr/bin/env python
#
"""
These functions, when given a magnitude mag between cmin and cmax, return
a colour tuple (red, green, blue). Light blue is cold (low magnitude)
and yellow is hot (high magnitude).
"""
import math
def floatRgb(mag, cmin, cmax, alpha=1.0):
"""
Return a tuple of floats between 0 and 1 for the red, green and
blue amplitudes.
"""
try:
# normalize to [0,1]
x = float(mag-cmin)/float(cmax-cmin)
except:
# cmax = cmin
x = 0.5
blue = min((max((4*(0.75-x), 0.)), 1.))
red = min((max((4*(x-0.25), 0.)), 1.))
green= min((max((4*math.fabs(x-0.5)-1., 0.)), 1.))
return (red, green, blue, alpha)
def strRgb(mag, cmin, cmax):
"""
Return a tuple of strings to be used in Tk plots.
"""
red, green, blue = floatRgb(mag, cmin, cmax)
return "#%02x%02x%02x" % (red*255, green*255, blue*255)
def rgb(mag, cmin, cmax):
"""
Return a tuple of integers to be used in AWT/Java plots.
"""
red, green, blue | = floatRgb(mag, cmin, cmax)
return (int(red*255), int(green*255), int(blue*255))
def htmlRgb(mag, cmin, cmax):
"""
Return a tuple of strings to be used in HTML documents.
| """
return "#%02x%02x%02x"%rgb(mag, cmin, cmax)
|
dmartinezgarcia/Python-Programming | Chapter 8 - Software Objects/exercise_3.py | Python | gpl-2.0 | 2,406 | 0.006234 | # Exercise 3
#
# | Create a "back door" in the Critter Caretaker program that shows the exact values of the object's attributes.
# Accomplish this by printing the object when a secret selection, not listed in the menu, is entered as the user's
# choice. (Hint: add the special method __str__() to the Critter class.)
#
|
class Critter(object):
"""A virtual pet"""
def __init__(self, name, hunger = 0, boredom = 0):
self.name = name
self.hunger = hunger
self.boredom = boredom
def __str__(self):
string = "Name: " + self.name + "\n"
string += "Hunger: " + self.hunger + "\n"
string += "Boredom: " + self.boredom + "\n"
def __pass_time(self):
self.hunger += 1
self.boredom += 1
@property
def mood(self):
unhappiness = self.hunger + self.boredom
if unhappiness < 5:
m = "happy"
elif 5 <= unhappiness <= 10:
m = "okay"
elif 11 <= unhappiness <= 15:
m = "frustrated"
else:
m = "mad"
return m
def talk(self):
print("I'm", self.name, "and I feel", self.mood, "now.\n")
self.__pass_time()
def eat(self, food = 4):
print("Brruppp. Thank you.")
self.hunger -= food
if self.hunger < 0:
self.hunger = 0
self.__pass_time()
def play(self, fun = 4):
print("Wheee!")
self.boredom -= fun
if self.boredom < 0:
self.boredom = 0
self.__pass_time()
def main():
crit_name = input("What do you want to name your critter?: ")
crit = Critter(crit_name)
choice = None
while choice != "0":
print \
("""
Critter Caretaker
0 - Quit
1 - Listen to your critter
2 - Feed your critter
3 - Play with your critter
""")
choice = input("Choice: ")
print()
# exit
if choice == "0":
print("Good-bye.")
# listen to your critter
elif choice == "1":
crit.talk()
# feed your critter
elif choice == "2":
crit.eat()
# play with your critter
elif choice == "3":
crit.play()
# some unknown choice
else:
print("\nSorry, but", choice, "isn't a valid choice.")
main()
("\n\nPress the enter key to exit.")
|
orbitfp7/nova | nova/compute/utils.py | Python | apache-2.0 | 19,092 | 0.000262 | # Copyright (c) 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Compute-related Utilities and helpers."""
import itertools
import string
import traceback
import netifaces
from oslo_config import cfg
from oslo_utils import encodeutils
from nova import block_device
from nova.compute import power_state
from nova.compute import task_states
from nova import exception
from nova.i18n import _LW
from nova.network import model as network_model
from nova import notifications
from nova import objects
from nova.objects import base as obj_base
from nova.openstack.common import log
from nova import rpc
from nova import utils
from nova.virt import driver
CONF = cfg.CONF
CONF.import_opt('host', 'nova.netconf')
LOG = log.getLogger(__name__)
def exception_to_dict(fault):
"""Converts exceptions to a dict for use in notifications."""
# TODO(johngarbutt) move to nova/exception.py to share with wrap_exception
code = 500
if hasattr(fault, "kwargs"):
code = fault.kwargs.get('code', 500)
# get the message from the exception that was thrown
# if that does not exist, use the name of the exception class itself
try:
message = fault.format_message()
# These exception handlers are broad so we don't fail to log the fault
# just because there is an unexpected error retrieving the message
except Exception:
try:
message = unicode(fault)
except Exception:
message = None
if not message:
message = fault.__class__.__name__
# NOTE(dripton) The message field in the database is limited to 255 chars.
# MySQL silently truncates overly long messages, but PostgreSQL throws an
# error if we don't truncate it.
b_message = encodeutils.safe_encode(message)[:255]
# NOTE(chaochin) UTF-8 character byte size varies from 1 to 6. If
# truncating a long byte string to 255, the last character may be
# cut in the middle, so that UnicodeDecodeError will occur when
# converting it back to unicode.
decode_ok = False
while not decode_ok:
try:
u_message = encodeutils.safe_decode(b_message)
decode_ok = True
except UnicodeDecodeError:
b_message = b_message[:-1]
fault_dict = dict(exception=fault)
fault_dict["message"] = u_message
fault_dict["code"] = code
return fault_dict
def _get_fault_details(exc_info, error_code):
details = ''
if exc_info and error_code == 500:
tb | = exc_info[2]
if tb:
details = ''.join(traceback.format_tb(tb))
return unicode(details)
def add_instance_fault_from_exc(context, instance, fault, exc_info=None):
"""Adds the specified fault to the database."""
fault_obj = objects.InstanceFault(context=context)
fault_obj.host = CONF.host
fault | _obj.instance_uuid = instance['uuid']
fault_obj.update(exception_to_dict(fault))
code = fault_obj.code
fault_obj.details = _get_fault_details(exc_info, code)
fault_obj.create()
def get_device_name_for_instance(context, instance, bdms, device):
"""Validates (or generates) a device name for instance.
This method is a wrapper for get_next_device_name that gets the list
of used devices and the root device from a block device mapping.
"""
mappings = block_device.instance_block_mapping(instance, bdms)
return get_next_device_name(instance, mappings.values(),
mappings['root'], device)
def default_device_names_for_instance(instance, root_device_name,
*block_device_lists):
"""Generate missing device names for an instance."""
dev_list = [bdm.device_name
for bdm in itertools.chain(*block_device_lists)
if bdm.device_name]
if root_device_name not in dev_list:
dev_list.append(root_device_name)
for bdm in itertools.chain(*block_device_lists):
dev = bdm.device_name
if not dev:
dev = get_next_device_name(instance, dev_list,
root_device_name)
bdm.device_name = dev
bdm.save()
dev_list.append(dev)
def get_next_device_name(instance, device_name_list,
root_device_name=None, device=None):
"""Validates (or generates) a device name for instance.
If device is not set, it will generate a unique device appropriate
for the instance. It uses the root_device_name (if provided) and
the list of used devices to find valid device names. If the device
name is valid but applicable to a different backend (for example
/dev/vdc is specified but the backend uses /dev/xvdc), the device
name will be converted to the appropriate format.
"""
is_xen = driver.compute_driver_matches('xenapi.XenAPIDriver')
req_prefix = None
req_letter = None
if device:
try:
req_prefix, req_letter = block_device.match_device(device)
except (TypeError, AttributeError, ValueError):
raise exception.InvalidDevicePath(path=device)
if not root_device_name:
root_device_name = block_device.DEFAULT_ROOT_DEV_NAME
try:
prefix = block_device.match_device(
block_device.prepend_dev(root_device_name))[0]
except (TypeError, AttributeError, ValueError):
raise exception.InvalidDevicePath(path=root_device_name)
# NOTE(vish): remove this when xenapi is setting default_root_device
if is_xen:
prefix = '/dev/xvd'
if req_prefix != prefix:
LOG.debug("Using %(prefix)s instead of %(req_prefix)s",
{'prefix': prefix, 'req_prefix': req_prefix})
used_letters = set()
for device_path in device_name_list:
letter = block_device.get_device_letter(device_path)
used_letters.add(letter)
# NOTE(vish): remove this when xenapi is properly setting
# default_ephemeral_device and default_swap_device
if is_xen:
flavor = instance.get_flavor()
if flavor.ephemeral_gb:
used_letters.add('b')
if flavor.swap:
used_letters.add('c')
if not req_letter:
req_letter = _get_unused_letter(used_letters)
if req_letter in used_letters:
raise exception.DevicePathInUse(path=device)
return prefix + req_letter
def _get_unused_letter(used_letters):
doubles = [first + second for second in string.ascii_lowercase
for first in string.ascii_lowercase]
all_letters = set(list(string.ascii_lowercase) + doubles)
letters = list(all_letters - used_letters)
# NOTE(vish): prepend ` so all shorter sequences sort first
letters.sort(key=lambda x: x.rjust(2, '`'))
return letters[0]
def get_image_metadata(context, image_api, image_id_or_uri, instance):
image_system_meta = {}
# In case of boot from volume, image_id_or_uri may be None or ''
if image_id_or_uri is not None and image_id_or_uri != '':
# If the base image is still available, get its metadata
try:
image = image_api.get(context, image_id_or_uri)
except (exception.ImageNotAuthorized,
exception.ImageNotFound,
exception.Invalid) as e:
LOG.warning(_LW("Can't access image %(image_id)s: %(error)s"),
{"image_id": image_id_or_uri, "error": e},
instance=instance)
else:
flavor = instance.get_flavor()
image_system_meta = utils.get_system_metadata_from_image(image,
|
singulared/aiohttp | tests/test_py35/test_cbv35.py | Python | apache-2.0 | 351 | 0 | from unittest import moc | k
from aiohttp import web
from aiohttp.web_urldispatcher import View
async def test_render_ok():
resp = web.Response(text='OK')
class MyView(View):
async def get(self):
return resp
reque | st = mock.Mock()
request._method = 'GET'
resp2 = await MyView(request)
assert resp is resp2
|
LunarLanding/agipibi | python/example_tektronix_2432.py | Python | gpl-3.0 | 6,565 | 0.003809 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2012,2013 Thibault VINCENT <tibal@reloaded.fr>
#
# This file is part of Agipibi.
#
# Agipibi is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Agipibi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Agipibi. If not, see <http://www.gnu.org/licenses/>.
#
'''
Example for the Tektronix 2432 oscilloscope and other models.
Instrument must be set to "T/L" (Talk/Listen) mode, "EOI" message termination,
and have the GPIB address bellow (not 0 which by default collides with the
Arduino controller).
This code shows function arguments having default values, and that should
not be specified most of the time. Please refer to the classes source.
'''
from agipibi import Agipibi, AgipibiError
CIC_ADDRESS=0x00
SCOPE_ADDRESS=0x01
# Open serial port with the Arduino.
ctl = Agipibi(device='/dev/ttyUSB0', debug=False)
# Test communication and µC responsiveness.
if ctl.interface_ping():
print("Arduino is alive :-)")
else:
print("No reponse to ping, you should reset the board :-(")
# Initialize bus lines and become Controller-In-Charge.
# All lines will be put to HiZ except for NRFD asserted because we gave no
# argument to the function, so we pause the Talker while setting up.
# IFC is pulsed to gain CIC status.
ctl.gpib_init(address=0x00, controller=True)
# Activate 'remote' mode of instruments (not required with this scope).
# It asserts REN untill disabled with False or gpib_init() is called again.
ctl.gpib_remote(True)
# Clear all instruments on the bus.
# Sends DCL when bus=True, reaching all devices. But it would use SDC
# if bus=True and Listeners are set.
ctl.gpib_clear(bus=True)
# Two functions to set direction of the communication.
def cic_to_scope():
# Unaddress everyone.
ctl.gpib_untalk()
ctl.gpib_unlisten()
# Set ourself as the Talker.
ctl.gpib_talker(CIC_ADDRESS)
# Scope will listen.
ctl.gpib_listener(SCOPE_ADDRESS)
def scope_to_cic():
# Unaddress everyone.
ctl.gpib_untalk()
ctl.gpib_unlisten()
# Set scope as the Talker.
ctl.gpib_talker(SCOPE_ADDRESS)
# We'll listen for data.
ctl.gpib_listener(CIC_ADDRESS)
#
# Now let's test some commands
#
print('### get instrument identification')
cic_to_scope()
ctl.gpib_write('ID?')
scope_to_cic()
print(ctl.gpib_read())
# Output:
# ID TEK/2432,V81.1,"29-FEB-88 V1.52 /1.4",TVTRIG
print('### print a two lines message on the scope screen')
cic_to_scope()
ctl.gpib_write('MESS 1:"Hello world",2:"Arduino"')
# On screen:
# ARDUINO
# HELLO WORLD
print('### read channel 1 coupling and settings')
cic_to_scope()
ctl.gpib_write('CH1?')
scope_to_cic()
print(ctl.gpib_read())
# Output:
# CH1 VOLTS:1E-1,VARIABLE:0,POSITION:-1.00E-2,COUPLING:GND,FIFTY:OFF,INVERT:OFF
print('### set channel 1 to 0.5 V/div')
cic_to_scope()
ctl.gpib_write('CH1 VOLTS:0.5')
print('### turn CH1 off and CH2 on')
cic_to_scope()
ctl.gpib_write('VMO CH1:OFF,CH2:ON')
print('### acquire the ascii-encoded waveform of CH2 with preamble')
cic_to_scope()
ctl.gpib_write('DAT ENC:ASC,SOU:CH2')
ctl.gpib_write('WAV?')
scope_to_cic()
print(ctl.gpib_read())
# Output:
# WFMPRE WFID:"CH2 DC 200mV 500us NORMAL",NR.PT:1024,PT.OFF:512,PT.FMT:Y,XUNIT:
# SEC,XINCR:1.000E-5,YMULT:8.000E-3,YOFF:-2.500E-1,YUNIT:V,BN.FMT:RI,ENCDG:ASCI
# I;CURVE 50,50, | 49,49,49,50,50,49,50,49,49,49,0,0,0,0,-1,-1,-2,0,0,0,0,0,0,-2,0
# ,0,0,-1,0,0,-1,-1,0,0,0,0,0,-1,0,-1,0,0,0,0,0,-1,0,0,0,1,-1,0,0,0,0,0,0,0,0,0
# ,0,-1,0,0,0,0,0,-3,-1,-1,0,0,0,-1,0,-1,0,0,-1,0,0,0,-1,0,0,-1,0,0,-1,0,-1,-1,
# 0,0,1,0,0,-1,-2,0,0,0,0,1,-1,0,0,0,-1,-1,48,50,49,50,49,49,50,49,50,50,49,49,
# 49,49,48,49,50,48,49,49,50,49,50,49,50,49,49,49,49,50,48,50,49,49,49,49,50,49
# ,49,49,50,50,48,49,49,50,48,50,49,49,49,49,49,50,49,49,50,49,49,50,50,49,50,4
# 8,49, | 49,49,50,50,49,49,49,49,50,49,50,50,49,49,49,49,50,50,49,49,50,49,49,50,
# 49,49,49,50,49,49,50,50,49,49,50,0,0,0,0,-1,0,-1,-2,0,0,-1,0,0,0,-2,0,0,0,-1,
# 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,-1,0,0,0,-1,0,0,0,1,0,0,0,0,-1,0,-1,0,-1,0,0,
# -2,0,0,-1,0,0,0,0,-1,0,0,0,0,-1,0,-1,0,-1,0,0,-1,0,0,-2,0,0,0,0,0,0,0,0,0,-1,
# 0,-1,0,-1,0,0,0,0,-1,-1,0,49,49,50,50,50,51,49,49,49,50,50,49,49,50,50,50,49,
# 50,48,50,49,49,49,50,51,49,50,49,48,48,50,48,49,49,49,49,49,48,49,50,49,50,50
# ,49,50,50,49,50,49,49,50,49,50,50,49,51,49,49,50,49,49,49,49,49,48,48,49,49,4
# 7,49,49,50,49,49,49,49,49,50,49,49,49,49,50,49,49,50,48,50,49,49,49,50,49,50,
# 50,49,49,49,49,49,0,-2,0,0,0,0,-1,0,0,0,0,0,-1,0,-1,0,-1,0,-2,0,0,0,-1,0,0,-1
# ,-1,0,0,0,0,0,0,-1,-1,0,0,0,0,0,-1,0,0,0,0,0,1,-1,0,0,-1,0,0,0,0,0,0,0,0,-1,0
# ,0,0,0,0,0,-1,-1,1,0,0,0,-2,-1,1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,-1,-1,0,
# 0,1,0,0,48,49,50,50,49,50,49,49,50,49,49,49,50,49,50,49,49,49,48,49,49,49,49,
# 50,50,49,49,50,49,49,50,49,49,49,49,49,49,49,50,49,49,49,49,49,50,50,50,49,49
# ,48,49,49,49,49,49,50,49,50,50,50,49,49,49,49,49,49,49,49,49,49,49,49,50,49,5
# 0,49,50,49,49,49,49,50,50,49,50,50,49,49,51,49,49,50,49,49,49,49,49,49,49,50,
# 0,-2,0,-1,0,0,0,0,-1,0,0,0,0,-2,-1,0,-1,0,0,0,0,0,0,0,0,-1,0,0,-1,-2,-1,0,1,0
# ,0,-1,0,0,0,0,0,0,0,0,0,0,0,0,-1,0,0,0,-2,0,-1,0,0,-1,0,1,0,0,0,0,0,-1,1,0,0,
# 0,0,0,0,0,0,0,0,-1,0,0,0,0,0,-1,-1,0,0,0,-1,0,-1,0,0,0,0,0,0,0,0,-1,49,49,50,
# 50,49,49,49,50,50,49,50,50,49,49,49,49,49,49,50,49,50,50,49,49,49,48,50,49,50
# ,50,49,49,48,49,49,49,49,49,48,49,50,50,49,49,49,49,50,49,49,50,49,49,49,49,4
# 9,50,49,49,49,50,49,50,50,51,50,49,49,49,50,49,49,49,49,50,50,49,49,49,49,48,
# 50,50,50,49,48,49,50,49,51,49,49,48,50,49,49,49,49,49,49,49,0,0,-1,0,-2,-1,0,
# 0,-1,-1,-1,0,0,-1,-1,0,0,0,-1,0,0,0,0,0,-1,0,0,0,0,-1,-1,0,0,0,-1,0,-1,0,0,0,
# 0,0,0,0,0,0,-1,-1,-1,0,0,0,0,-1,0,0,0,0,0,0,-1,-1,-1,0,0,0,0,0,0,0,0,0,0,0,0,
# 0,-1,-1,0,-1,-2,0,0,0,-1,0,0,0,-1,0,0,-1,-1,-1,-1,0,0,0,0,-1,49,50,50,49,49,5
# 0,49,51,49,49,49,50,50,50,48,49,49,50,49,50,49,50,49,49,49,49,49,50,49,48,48,
# 49,49,50,49,49,49,50,49,50,49,48,50,49,49,50,50,49,50,49,49,50,49,51,50,49,50
# ,49,50,49,49,50,50,49,49,49,49,49,49,49,50,50,48,50,50,50,49,49,49,49,48,49,4
# 9,50,49,49,50,49,49,49,48,49,49,50,49,50,49,49,50,49,0,-2,0,-1,-1,0,0,0,0,-2,
# 0,0
|
openai/cleverhans | scripts/make_confidence_report_bundle_examples.py | Python | mit | 4,354 | 0.008268 | #!/usr/bin/env python3
"""
make_confidence_report_bundle_examples.py
Usage:
make_confidence_report_bundle_examples.py model.joblib a.npy
make_confidence_report_bundle_examples.py model.joblib a.npy b.npy c.npy
where model.joblib is a file created by cleverhans.serial.save containing
a picklable cleverhans.model.Model instance and each examples_i.npy is
a saved numpy array containing adversarial examples for a whole dataset.
Usually example_i.npy is the output of make_confidence_report.py or
make_confidence_report_bundled.py.
This script uses max-confidence attack bundling
( https://openreview.net/forum?id=H1g0piA9tQ )
to combine adversarial example datasets that were created earlier.
It will save a ConfidenceReport to to model_bundled_examples_report.joblib.
The report can be later loaded by another
script using cleverhans.serial.load.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import warnings
import numpy as np
import tensorflow as tf
from cleverhans.utils_tf import silence
# We need to disable pylint's complaints about import order because `silence`
# works only if it is called before the other imports.
# pylint: disable=C0413
silence()
from cleverhans.attack_bundling import bundle_examples_with_goal, MaxConfidence
from cleverhans import serial
from cleverhans.compat import flags
from cleverhans.confidence_report import BATCH_SIZE
from cleverhans.confidence_report import TRAIN_START, TRAIN_END
from cleverhans.confidence_report im | port TEST_START, TEST_END
from cleverhans.confidence_report import WHICH_SET
FLAGS = flags.FLAGS
def main(argv=None):
"""
Make a confidence report and save it to disk.
"""
assert len(argv) >= 3
_name_ | of_script = argv[0]
model_filepath = argv[1]
adv_x_filepaths = argv[2:]
sess = tf.Session()
with sess.as_default():
model = serial.load(model_filepath)
factory = model.dataset_factory
factory.kwargs['train_start'] = FLAGS.train_start
factory.kwargs['train_end'] = FLAGS.train_end
factory.kwargs['test_start'] = FLAGS.test_start
factory.kwargs['test_end'] = FLAGS.test_end
dataset = factory()
adv_x_list = [np.load(filepath) for filepath in adv_x_filepaths]
x, y = dataset.get_set(FLAGS.which_set)
for adv_x in adv_x_list:
assert adv_x.shape == x.shape, (adv_x.shape, x.shape)
# Make sure these were made for the right dataset with right scaling
# arguments, etc.
assert adv_x.min() >= 0. - dataset.kwargs['center'] * dataset.max_val
assert adv_x.max() <= dataset.max_val
data_range = dataset.max_val * (1. + dataset.kwargs['center'])
if adv_x.max() - adv_x.min() <= .8 * data_range:
warnings.warn("Something is weird. Your adversarial examples use "
"less than 80% of the data range."
"This might mean you generated them for a model with "
"inputs in [0, 1] and are now using them for a model "
"with inputs in [0, 255] or something like that. "
"Or it could be OK if you're evaluating on a very small "
"batch.")
report_path = FLAGS.report_path
if report_path is None:
suffix = "_bundled_examples_report.joblib"
assert model_filepath.endswith('.joblib')
report_path = model_filepath[:-len('.joblib')] + suffix
goal = MaxConfidence()
bundle_examples_with_goal(sess, model, adv_x_list, y, goal,
report_path, batch_size=FLAGS.batch_size)
if __name__ == '__main__':
flags.DEFINE_string('report_path', None, 'Report path')
flags.DEFINE_integer('train_start', TRAIN_START, 'Starting point (inclusive)'
'of range of train examples to use')
flags.DEFINE_integer('train_end', TRAIN_END, 'Ending point (non-inclusive) '
'of range of train examples to use')
flags.DEFINE_integer('test_start', TEST_START, 'Starting point '
'(inclusive) of range of test examples to use')
flags.DEFINE_integer('test_end', TEST_END, 'End point (non-inclusive) of '
'range of test examples to use')
flags.DEFINE_string('which_set', WHICH_SET, '"train" or "test"')
flags.DEFINE_integer('batch_size', BATCH_SIZE, 'batch size')
tf.app.run()
|
linkcheck/linkchecker | tests/checker/telnetserver.py | Python | gpl-2.0 | 3,383 | 0.000887 | # Copyright (C) 2012 Bastian Kleineidam
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Define http test support classes for LinkChecker tests.
"""
import time
import threading
import telnetlib
import miniboa
from . import LinkCheckTest
TIMEOUT = 5
class TelnetServerTest(LinkCheckTest):
"""Start/stop a Telnet server that can be used for testing."""
def __init__(self, methodName="runTest"):
"""Init test class and store default ftp server port."""
super().__init__(methodName=methodName)
self.host = "localhost"
self.port = None
self.stop_event = threading.Event()
self.server_thread = None
def get_url(self, user=None, password=None):
if user is not None:
if password is not None:
netloc = "%s:%s@%s" % (user, password, self.host)
else:
netloc = "%s@%s" % (user, self.host)
else:
netloc = self.host
return "telnet://%s:%d" % (netloc, self.port)
def setUp(self):
"""Start a new Telnet server in a new thread."""
self.port, self.server_thread = start_server(self.host, 0, self.stop_event)
self.assertFalse(self.port is None)
def tearDown(self):
"""Send QUIT request to telnet server."""
self.stop_event.set()
if self.server_thread is not None:
self.server_thread.join(10)
assert not self.server_thread.is_alive()
def start_server(host, port, stop_event):
# Instantiate Telnet server class and listen to host:port
clients = []
def on_connect(client):
clients.append(client)
client.send("Telnet test server\nlogin: ")
server = miniboa.TelnetServer(port=port, address=host, on_connect=on_connect)
port = server.server_socket.getso | ckname()[1]
t = threading.Thread(None, serve_forever, args=(server, clients, stop_event))
t.start()
# wait for server to start up
tries = 0
while tries < 5:
tries += 1
try:
client = telnetlib.Telnet(timeout=TIMEOUT)
client.open(host, port)
client.write(b"exit\n")
break
except Exception:
time.sleep(0.5)
return port, t
def serve_forever(serve | r, clients, stop_event):
"""Run poll loop for server."""
while True:
if stop_event.is_set():
return
server.poll()
for client in clients:
if client.active and client.cmd_ready:
handle_cmd(client)
def handle_cmd(client):
"""Handle telnet clients."""
msg = client.get_command().lower()
if msg == "exit":
client.active = False
else:
client.send("Password: ")
|
benctamas/zerorpc-logging | logstream_test.py | Python | apache-2.0 | 3,399 | 0.006472 | import zerorpc
import gevent.queue
import logging
import sys
logging.basicConfig()
# root logger
logger = logging.getLogger()
# set the mimimum level for root logger so it will be possible for a client
# to subscribe and receive logs for any log level
logger.setLevel(0)
class QueueingLogHandler(logging.Handler):
""" A simple logging handler which puts all emitted logs into a
gevent queue.
"""
def __init__(self, queue, level, formatter):
super(QueueingLogHandler, self).__init__()
self._queue = queue
self.setLevel(level)
self.setFormatter(formatter)
def emit(self, record):
msg = self.format(record)
self._queue.put_nowait(msg)
def close(self):
super(QueueingLogHandler, self).close()
self._queue.put_nowait(None)
@property
def emitted(self):
return self._queue
class TestService(object):
_HANDLER_CLASS = QueueingLogHandler
_DEFAULT_FORMAT = '%(name)s - %(levelname)s - %(asctime)s - %(message)s'
logger = logging.getLogger("service")
def __init__(self):
self._logging_handlers = set()
def test(self, logger_name, logger_level, message):
logger = logging.getLogger(logger_name)
getattr(logger, logger_level.lower())(message)
def available_loggers(self):
""" List of initalized loggers """
return logging.getLogger().manager.loggerDict.keys()
def close_log_streams(self):
""" Closes all log_stream streams. """
while self._logging_handlers:
self._logging_handlers.pop().close()
@zerorpc.stream
def log_stream(self, logger_name, level_name, format_str):
""" Attaches a log handler to the specified logger and sends emitted logs
back as stream.
"""
if logger_name != "" and logger_name not in self.available_loggers():
raise ValueErr | or("logger {0} is not available".format(logger_name))
level_name_upper = level_name.upper() if level_name else "NOTSET"
try:
level = getattr(logging, level_name_upper)
except AttributeError, e:
raise AttributeError("log level {0} is not available".format(level_name_upper))
q = gevent.queue.Queue()
fmt = format_str if format_str.strip() else self._DEFAULT_FORMAT
logger = | logging.getLogger(logger_name)
formatter = logging.Formatter(fmt)
handler = self._HANDLER_CLASS(q, level, formatter)
logger.addHandler(handler)
self._logging_handlers.add(handler)
self.logger.debug("new subscriber for {0}/{1}".format(logger_name or "root", level_name_upper))
try:
for msg in handler.emitted:
if msg is None:
return
yield msg
finally:
self._logging_handlers.discard(handler)
handler.close()
self.logger.debug("subscription finished for {0}/{1}".format(logger_name or "root", level_name_upper))
if __name__ == "__main__":
service = TestService()
server = zerorpc.Server(service)
server.bind(sys.argv[1])
logger.warning("starting service")
try:
server.run()
except BaseException, e:
logger.error(str(e))
finally:
logger.warning("shutting down")
|
deeponion/deeponion | contrib/seeds/makeseeds.py | Python | mit | 8,058 | 0.003723 | #!/usr/bin/env python3
# Copyright (c) 2013-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Generate seeds.txt from Pieter's DNS seeder
#
import re
import sys
import dns.resolver
import collections
NSEEDS=512
MAX_SEEDS_PER_ASN=2
MIN_BLOCKS = 337600
# These are hosts that have been observed to be behaving strangely (e.g.
# aggressively connecting to every node).
with open("suspicious_hosts.txt", mode="r", encoding="utf-8") as f:
SUSPICIOUS_HOSTS = {s.strip() for s in f if s.strip()}
PATTERN_IPV4 = re.compile(r"^((\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})):(\d+)$")
PATTERN_IPV6 = re.compile(r"^\[([0-9a-z:]+)\]:(\d+)$")
PATTERN_ONION = re.compile(r"^([abcdefghijklmnopqrstuvwxyz234567]{16}\.onion):(\d+)$")
PATTERN_AGENT = re.compile(
r"^/Satoshi:("
r"0.14.(0|1|2|3|99)|"
r"0.15.(0|1|2|99)|"
r"0.16.(0|1|2|3|99)|"
r"0.17.(0|0.1|1|2|99)|"
r"0.18.(0|1|99)|"
r"0.19.(0|1|99)|"
r"0.20.(0|1|99)|"
r"0.21.99"
r")")
def parseline(line):
sline = line.split()
if len(sline) < 11:
return None
m = PATTERN_IPV4.match(sline[0])
sortkey = None
ip = None
if m is None:
m = PATTERN_IPV6.match(sline[0])
if m is None:
m = PATTERN_ONION.match(sline[0])
if m is None:
return None
else:
| net = 'onion'
ipstr = sortkey = m.group(1)
port = int(m.group(2))
else:
net = 'ipv6'
if m.group(1) in ['::']: # Not interested in localhost
retu | rn None
ipstr = m.group(1)
sortkey = ipstr # XXX parse IPv6 into number, could use name_to_ipv6 from generate-seeds
port = int(m.group(2))
else:
# Do IPv4 sanity check
ip = 0
for i in range(0,4):
if int(m.group(i+2)) < 0 or int(m.group(i+2)) > 255:
return None
ip = ip + (int(m.group(i+2)) << (8*(3-i)))
if ip == 0:
return None
net = 'ipv4'
sortkey = ip
ipstr = m.group(1)
port = int(m.group(6))
# Skip bad results.
if sline[1] == 0:
return None
# Extract uptime %.
uptime30 = float(sline[7][:-1])
# Extract Unix timestamp of last success.
lastsuccess = int(sline[2])
# Extract protocol version.
version = int(sline[10])
# Extract user agent.
agent = sline[11][1:-1]
# Extract service flags.
service = int(sline[9], 16)
# Extract blocks.
blocks = int(sline[8])
# Construct result.
return {
'net': net,
'ip': ipstr,
'port': port,
'ipnum': ip,
'uptime': uptime30,
'lastsuccess': lastsuccess,
'version': version,
'agent': agent,
'service': service,
'blocks': blocks,
'sortkey': sortkey,
}
def dedup(ips):
'''deduplicate by address,port'''
d = {}
for ip in ips:
d[ip['ip'],ip['port']] = ip
return list(d.values())
def filtermultiport(ips):
'''Filter out hosts with more nodes per IP'''
hist = collections.defaultdict(list)
for ip in ips:
hist[ip['sortkey']].append(ip)
return [value[0] for (key,value) in list(hist.items()) if len(value)==1]
def lookup_asn(net, ip):
'''
Look up the asn for an IP (4 or 6) address by querying cymru.com, or None
if it could not be found.
'''
try:
if net == 'ipv4':
ipaddr = ip
prefix = '.origin'
else: # http://www.team-cymru.com/IP-ASN-mapping.html
res = str() # 2001:4860:b002:23::68
for nb in ip.split(':')[:4]: # pick the first 4 nibbles
for c in nb.zfill(4): # right padded with '0'
res += c + '.' # 2001 4860 b002 0023
ipaddr = res.rstrip('.') # 2.0.0.1.4.8.6.0.b.0.0.2.0.0.2.3
prefix = '.origin6'
asn = int([x.to_text() for x in dns.resolver.resolve('.'.join(
reversed(ipaddr.split('.'))) + prefix + '.asn.cymru.com',
'TXT').response.answer][0].split('\"')[1].split(' ')[0])
return asn
except Exception:
sys.stderr.write('ERR: Could not resolve ASN for "' + ip + '"\n')
return None
# Based on Greg Maxwell's seed_filter.py
def filterbyasn(ips, max_per_asn, max_per_net):
# Sift out ips by type
ips_ipv46 = [ip for ip in ips if ip['net'] in ['ipv4', 'ipv6']]
ips_onion = [ip for ip in ips if ip['net'] == 'onion']
# Filter IPv46 by ASN, and limit to max_per_net per network
result = []
net_count = collections.defaultdict(int)
asn_count = collections.defaultdict(int)
for ip in ips_ipv46:
if net_count[ip['net']] == max_per_net:
continue
asn = lookup_asn(ip['net'], ip['ip'])
if asn is None or asn_count[asn] == max_per_asn:
continue
asn_count[asn] += 1
net_count[ip['net']] += 1
result.append(ip)
# Add back Onions (up to max_per_net)
result.extend(ips_onion[0:max_per_net])
return result
def ip_stats(ips):
hist = collections.defaultdict(int)
for ip in ips:
if ip is not None:
hist[ip['net']] += 1
return '%6d %6d %6d' % (hist['ipv4'], hist['ipv6'], hist['onion'])
def main():
lines = sys.stdin.readlines()
ips = [parseline(line) for line in lines]
print('\x1b[7m IPv4 IPv6 Onion Pass \x1b[0m', file=sys.stderr)
print('%s Initial' % (ip_stats(ips)), file=sys.stderr)
# Skip entries with invalid address.
ips = [ip for ip in ips if ip is not None]
print('%s Skip entries with invalid address' % (ip_stats(ips)), file=sys.stderr)
# Skip duplicates (in case multiple seeds files were concatenated)
ips = dedup(ips)
print('%s After removing duplicates' % (ip_stats(ips)), file=sys.stderr)
# Skip entries from suspicious hosts.
ips = [ip for ip in ips if ip['ip'] not in SUSPICIOUS_HOSTS]
print('%s Skip entries from suspicious hosts' % (ip_stats(ips)), file=sys.stderr)
# Enforce minimal number of blocks.
ips = [ip for ip in ips if ip['blocks'] >= MIN_BLOCKS]
print('%s Enforce minimal number of blocks' % (ip_stats(ips)), file=sys.stderr)
# Require service bit 1.
ips = [ip for ip in ips if (ip['service'] & 1) == 1]
print('%s Require service bit 1' % (ip_stats(ips)), file=sys.stderr)
# Require at least 50% 30-day uptime for clearnet, 10% for onion.
req_uptime = {
'ipv4': 50,
'ipv6': 50,
'onion': 10,
}
ips = [ip for ip in ips if ip['uptime'] > req_uptime[ip['net']]]
print('%s Require minimum uptime' % (ip_stats(ips)), file=sys.stderr)
# Require a known and recent user agent.
ips = [ip for ip in ips if PATTERN_AGENT.match(ip['agent'])]
print('%s Require a known and recent user agent' % (ip_stats(ips)), file=sys.stderr)
# Sort by availability (and use last success as tie breaker)
ips.sort(key=lambda x: (x['uptime'], x['lastsuccess'], x['ip']), reverse=True)
# Filter out hosts with multiple bitcoin ports, these are likely abusive
ips = filtermultiport(ips)
print('%s Filter out hosts with multiple bitcoin ports' % (ip_stats(ips)), file=sys.stderr)
# Look up ASNs and limit results, both per ASN and globally.
ips = filterbyasn(ips, MAX_SEEDS_PER_ASN, NSEEDS)
print('%s Look up ASNs and limit results per ASN and per net' % (ip_stats(ips)), file=sys.stderr)
# Sort the results by IP address (for deterministic output).
ips.sort(key=lambda x: (x['net'], x['sortkey']))
for ip in ips:
if ip['net'] == 'ipv6':
print('[%s]:%i' % (ip['ip'], ip['port']))
else:
print('%s:%i' % (ip['ip'], ip['port']))
if __name__ == '__main__':
main()
|
dontnod/weblate | weblate/fonts/admin.py | Python | gpl-3.0 | 1,510 | 0 | # -*- coding: utf-8 -*-
#
# Copyright © 2012 - 2019 Michal Čihař <michal@cihar.com>
#
# This file is part of Weblate <https://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the | implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
from django.contrib import admin
from weblate.fonts.models import FontOverride
from weblate.wladmin.models import WeblateModelAdmin
class FontAdmin(WeblateModelAdmin):
list_display = ["family", "st | yle", "project", "user"]
search_fields = ["family", "style"]
list_filter = [("project", admin.RelatedOnlyFieldListFilter)]
ordering = ["family", "style"]
class InlineFontOverrideAdmin(admin.TabularInline):
model = FontOverride
extra = 0
class FontGroupAdmin(WeblateModelAdmin):
list_display = ["name", "font", "project"]
search_fields = ["name", "font__family"]
list_filter = [("project", admin.RelatedOnlyFieldListFilter)]
ordering = ["name"]
inlines = [InlineFontOverrideAdmin]
|
yanndavin/judge_offline | setup.py | Python | bsd-2-clause | 532 | 0.00188 | # -*- coding: utf-8 -*-
from setuptools import setup, find_packages
with open('README.rst') as f:
readme = f.read()
with open('LICENSE') as f:
license = | f.read()
setup(
name='judge_offline',
version='0.1.0',
description='Provides personal judge similar hackrank or judge online',
long_description=readme,
author='Yann Davin',
aut | hor_email='yann.davin@gmail.com',
url='https://github.com/yanndavin/judge_offline',
license=license,
packages=find_packages(exclude=('samples', 'docs'))
) |
beregond/jsonmodels | tasks.py | Python | bsd-3-clause | 251 | 0 | """Tasks for invoke."""
from invoke import task, run
@task
def test():
run('./setup.py test --quick')
@task
def fulltest():
run( | './setup.py test')
@task
def coverage():
| run('./setup.py test', hide='stdout')
run('coverage html')
|
kelvinwong-ca/django-select-multiple-field | test_projects/django14/suthern/models.py | Python | bsd-3-clause | 1,575 | 0 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.core.urlresolvers import reverse
from django.db import models
from django.utils.encoding import (
force_text, python_2_unicode_compatible)
from django.utils.translation import ugettext_lazy as _
from select_multiple_field.models import SelectMultipleField
@python_2_unicode_compatible
class ChickenBalls(models.Model):
"""ChickenBalls is used for South migration testing"""
SUICIDE = 's'
HOT = 'h'
HOME_STYLE = 'H'
CAJUN = 'c'
JERK = 'j'
GATOR = 'g'
FLAVOUR_CHOICES = (
(_('Hot & Spicy'), (
(SUICIDE, _('Suicide hot')),
(HOT, _('Hot hot sauce')),
(CAJUN, _('Cajun sauce')),
(JERK, _('Jerk sauce')))),
(_('Traditional'), (
(HOME_STYLE, _('Homestyle')),
(GATOR, _('Gator flavour')))),
)
flavour = SelectMultipleField(
blank=True,
include_blank=False,
max_length=5,
max_choices=2,
choices=FLAVOUR_CHOICES
)
RANCH = 'r'
HONEY_MUSTARD = 'h'
BBQ = 'b'
DIP_CHOICES = (
(RANCH, _('Ranch')),
(HONEY_MUSTARD, _('Honey mustard')),
| (BBQ, _('BBQ')),
)
dips = SelectMultipleField(
blank=True,
default='',
include_blank=False,
max_length=6,
| max_choices=3,
choices=DIP_CHOICES
)
def __str__(self):
return "pk=%s" % force_text(self.pk)
def get_absolute_url(self):
return reverse('ftw:detail', args=[self.pk])
|
mirkobrombin/Bottles | src/views/bottle_preferences.py | Python | gpl-3.0 | 30,960 | 0.001227 | # bottle_preferences.py
#
# Copyright 2020 brombinmirko <send@mirko.pm>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
from gettext import gettext as _
from gi.repository import Gtk
from bottles.utils import RunAsync # pyright: reportMissingImports=false
from bottles.backend.runner import Runner, gamemode_available, gamescope_available
from bottles.backend.managers.runtime import RuntimeManager
from bottles.backend.utils.manager import ManagerUtils
from bottles.dialogs.envvars import EnvVarsDialog
from bottles.dialogs.drives import DrivesDialog
from bottles.dialogs.dlloverrides import DLLOverridesDialog
from bottles.dialogs.gamescope import GamescopeDialog
from bottles.backend.wine.reg import Reg
from bottles.backend.wine.regkeys import RegKeys
@Gtk.Template(resource_path='/com/usebottles/bottles/details-preferences.ui')
class PreferencesView(Gtk.ScrolledWindow):
__gtype_name__ = 'DetailsPreferences'
# region Widgets
btn_manage_runners = Gtk.Template.Child()
btn_manage_dxvk = Gtk.Template.Child()
btn_manage_vkd3d = Gtk.Template.Child()
btn_manage_nvapi = Gtk.Template.Child()
btn_manage_gamescope = Gtk.Template.Child()
btn_cwd = Gtk.Template.Child()
btn_environment_variables = Gtk.Template.Child()
btn_drives = Gtk.Template.Child()
btn_overrides = Gtk.Template.Child()
switch_dxvk = Gtk.Template.Child()
switch_dxvk_hud = Gtk.Template.Child()
switch_vkd3d = Gtk.Template.Child()
switch_nvapi = Gtk.Template.Child()
switch_gamemode = Gtk.Template.Child()
switch_gamescope = Gtk.Template.Child()
switch_aco = Gtk.Template.Child()
switch_fsr = Gtk.Template.Child()
switch_discrete = Gtk.Template.Child()
switch_virt_desktop = Gtk.Template.Child()
switch_pulse_latency = Gtk.Template.Child()
switch_fixme = Gtk.Template.Child()
switch_runtime = Gtk.Template.Child()
switch_mouse_capture = Gtk.Template.Child()
switch_take_focus = Gtk.Template.Child()
toggle_sync = Gtk.Template.Child()
toggle_esync = Gtk.Template.Child()
toggle_fsync = Gtk.Template.Child()
toggle_futex2 = Gtk.Template.Child()
combo_fsr = Gtk.Template.Child()
combo_virt_res = Gtk.Template.Child()
combo_dpi = Gtk.Template.Child()
combo_runner = Gtk.Template.Child()
combo_dxvk = Gtk.Template.Child()
combo_vkd3d = Gtk.Template.Child()
combo_nvapi = Gtk.Template.Child()
combo_windows = Gtk.Template.Child()
row_cwd = Gtk.Template.Child()
action_runtime = Gtk.Template.Child()
spinner_dxvk = Gtk.Template.Child()
spinner_dxvkbool = Gtk.Template.Child()
spinner_vkd3d = Gtk.Template.C | hild()
spinner_vkd3dbool = Gtk.Template.Child()
spinner_nvapi = Gtk.Template.Child()
spinner_nvapibool = Gtk.Template.Child()
spinner_runner = Gtk.Template.Child()
spinner_win = Gtk.Template.Child()
# endregion
def __init__(self, window, config, **kwargs):
super().__init__(**kwargs)
# common variables and references
self.window = window
self.manager = window.manager
self.config = config
self.btn_overrides.connect("click | ed", self.__show_dll_overrides_view)
self.btn_manage_runners.connect("clicked", self.window.show_prefs_view)
self.btn_manage_dxvk.connect("clicked", self.window.show_prefs_view)
self.btn_manage_vkd3d.connect("clicked", self.window.show_prefs_view)
self.btn_manage_nvapi.connect("clicked", self.window.show_prefs_view)
self.btn_manage_gamescope.connect("clicked", self.__show_gamescope_settings)
self.btn_cwd.connect("clicked", self.choose_cwd)
self.btn_drives.connect("clicked", self.__show_drives)
self.btn_environment_variables.connect("clicked", self.__show_environment_variables)
self.toggle_sync.connect('toggled', self.__set_wine_sync)
self.toggle_esync.connect('toggled', self.__set_esync)
self.toggle_fsync.connect('toggled', self.__set_fsync)
self.toggle_futex2.connect('toggled', self.__set_futex2)
self.switch_dxvk.connect('state-set', self.__toggle_dxvk)
self.switch_dxvk_hud.connect('state-set', self.__toggle_dxvk_hud)
self.switch_vkd3d.connect('state-set', self.__toggle_vkd3d)
self.switch_nvapi.connect('state-set', self.__toggle_nvapi)
self.switch_gamemode.connect('state-set', self.__toggle_gamemode)
self.switch_gamescope.connect('state-set', self.__toggle_gamescope)
self.switch_aco.connect('state-set', self.__toggle_aco)
self.switch_fsr.connect('state-set', self.__toggle_fsr)
self.switch_discrete.connect('state-set', self.__toggle_discrete_gpu)
self.switch_virt_desktop.connect('state-set', self.__toggle_virt_desktop)
self.switch_pulse_latency.connect('state-set', self.__toggle_pulse_latency)
self.switch_fixme.connect('state-set', self.__toggle_fixme)
self.switch_mouse_capture.connect('state-set', self.__toggle_x11_reg_key, "GrabFullscreen", "fullscreen_capture")
self.switch_take_focus.connect('state-set', self.__toggle_x11_reg_key, "UseTakeFocus", "take_focus")
self.combo_fsr.connect('changed', self.__set_fsr_level)
self.combo_virt_res.connect('changed', self.__set_virtual_desktop_res)
self.combo_dpi.connect('changed', self.__set_custom_dpi)
self.combo_runner.connect('changed', self.__set_runner)
self.combo_dxvk.connect('changed', self.__set_dxvk)
self.combo_vkd3d.connect('changed', self.__set_vkd3d)
self.combo_nvapi.connect('changed', self.__set_nvapi)
self.combo_windows.connect('changed', self.__set_windows)
self.__prevent_scroll()
if RuntimeManager.get_runtimes:
self.action_runtime.set_visible(True)
self.switch_runtime.connect('state-set', self.__toggle_runtime)
'''
Toggle the gamemode sensitivity based on gamemode_available
also update the tooltip text with an helpfull message if it
is not available.
'''
self.switch_gamemode.set_sensitive(gamemode_available)
self.switch_gamescope.set_sensitive(gamescope_available)
_not_available = _("This feature is not available on your system.")
if not gamemode_available:
self.switch_gamemode.set_tooltip_text(_not_available)
if not gamescope_available:
self.switch_gamescope.set_tooltip_text(_not_available)
def choose_cwd(self, widget):
'''
This function pop up a file chooser to choose the
cwd (current working directory) of the bottle and update
the bottle configuration with the new value.
The default path for the file chooser is the bottle path by default.
'''
file_dialog = Gtk.FileChooserNative.new(
_("Choose working directory for executables"),
self.window,
Gtk.FileChooserAction.SELECT_FOLDER,
_("Done"),
_("Cancel")
)
file_dialog.set_current_folder(
ManagerUtils.get_bottle_path(self.config)
)
response = file_dialog.run()
if response == -3:
self.manager.update_config(
config=self.config,
key="WorkingDir",
value=file_dialog.get_filename()
)
file_dialog.destroy()
def update_combo_components(self):
'''
This function update the components combo boxes with the
items in the manager catalogs. It also temporarily disable
|
jkyeung/XlsxWriter | xlsxwriter/test/comparison/test_cond_format01.py | Python | bsd-2-clause | 1,643 | 0.000609 | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2016, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
| filename = 'cond_format01.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = []
self.ignore_elements = {}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with conditional formatting."""
workbook = Workbook(self.got_filename)
w | orksheet = workbook.add_worksheet()
cell_format = workbook.add_format({
'color': '#9C0006',
'bg_color': '#FFC7CE',
'font_condense': 1,
'font_extend': 1
})
worksheet.write('A1', 10)
worksheet.write('A2', 20)
worksheet.write('A3', 30)
worksheet.write('A4', 40)
worksheet.conditional_format('A1:A1',
{'type': 'cell',
'format': cell_format,
'criteria': 'greater than',
'value': 5
})
workbook.close()
self.assertExcelEqual()
|
antononcube/ConversationalAgents | Packages/Python/ExternalParsersHookUpApp/examples.py | Python | gpl-3.0 | 1,315 | 0.004563 | import pandas
from ExternalParsersHookUp import RakuCommandFunctions
from ExternalParsersHookUp import ParseWorkflowSpecifications
dfStarwars = pandas.read_csv("https://raw.githubusercontent.com/antononcube/R-packages/master/DataQueryWorkflowsTests/inst/extdata/dfStarwars.csv")
dfStarwarsFilms = pandas.read_csv("https://raw.githubusercontent.com/antononcube/R-packages/master/DataQueryWorkflowsTests/inst/extdata/dfStarwarsFilms.csv")
dfStarwarsStarships = pandas.read_csv("https://raw.githubusercontent.com/antononcube/R-packages/master/DataQueryW | orkflowsTests/inst/extdata/dfStarwarsStarships.csv")
# res = RakuCommandFunctions.RakuCommand( 'say ToDataQueryWorkflowCode("use dfStarwars; select mass and height; cross tabulate mass and height", "Python-pandas")', 'DSL::English::DataQueryWorkflows')
# print(res.stdout)
# exec(res.stdout)
# print(obj)
#
command1 = "use dfStarwars; | filter 'species' is 'Human' or 'mass' is greater than 120; select homeworld and species; cross tabulate homeworld and species"
command2 = 'use dfStarwars; filter "species" is "Human" or "mass" is greater than 120; select homeworld and species; cross tabulate homeworld and species'
res = ParseWorkflowSpecifications.ToDataQueryWorkflowCode(
command=command2,
execute=True,
globals=globals())
print(res)
print(obj)
|
opalmer/aws | awsutil/dns.py | Python | mit | 1,168 | 0.000856 | import boto
import argparse
try:
import urllib2
except ImportError: # Python 3
import urllib.request as urllib2
from awsutil.logger import logger
def set_public_record():
parser = argparse.ArgumentParser(description="Updates DNS records")
parser.add_argument(
"--address",
default=urllib2.urlopen(
"http://169.254.169.254/latest/meta-data/public-ipv4",
timeout=120
).read()
| )
parser.add_argument(
"hostname",
help="The hostname to establish the DNS record for"
)
args = parser.parse_args()
if not args.hostname.endswith("."):
parser.error("Expected record to end with '.'")
zone_name = ".".join(list(filter(bool, args.hostname.split(".")))[-2:])
route53 = boto.connect_route53()
zone = route53.get_zone(zone_name)
record = zone.get_a(args | .hostname)
if record is None:
logger.info("Creating A %s %s", args.hostname, args.address)
zone.add_a(args.hostname, args.address, ttl=60)
else:
logger.info("Updating A %s %s", args.hostname, args.address)
zone.update_record(record, args.address, new_ttl=60)
|
catapult-project/catapult | telemetry/telemetry/internal/util/binary_manager.py | Python | bsd-3-clause | 9,298 | 0.008496 | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import print_function
from __future__ import absolute_import
import contextlib
import logging
import os
import py_utils
from py_utils import binary_manager
from py_utils import cloud_storage
from py_utils import dependency_util
import dependency_manager
from dependency_manager import base_config
from devil import devil_env
from telemetry.core import exceptions
from telemetry.core import util
TELEMETRY_PROJECT_CONFIG = os.path.join(
util.GetTelemetryDir(), 'telemetry', 'binary_dependencies.json')
CHROME_BINARY_CONFIG = os.path.join(util.GetCatapultDir(), 'common', 'py_utils',
'py_utils', 'chrome_binaries.json')
SUPPORTED_DEP_PLATFORMS = (
'linux_aarch64', 'linux_x86_64', 'linux_armv7l', 'linux_mips',
'mac_x86_64', 'mac_arm64',
'win_x86', 'win_AMD64',
'android_arm64-v8a', 'android_armeabi-v7a', 'android_arm', 'android_x64',
'android_x86'
)
PLATFORMS_TO_DOWNLOAD_FOLDER_MAP = {
'linux_aarch64': 'bin/linux/aarch64',
'linux_x86_64': 'bin/linux/x86_64',
'linux_armv7l': 'bin/linux/armv7l',
'linux_mips': 'bin/linux/mips',
'mac_x86_64': 'bin/mac/x86_64',
'mac_arm64': 'bin/mac/arm64',
'win_x86': 'bin/win/x86',
'win_AMD64': 'bin/win/AMD64',
'android_arm64-v8a': 'bin/android/arm64-v8a',
'android_armeabi-v7a': 'bin/android/armeabi-v7a',
'android_arm': 'bin/android/arm',
'android_x64': 'bin/android/x64',
'android_x86': 'bin/android/x86',
}
NoPathFoundError = dependency_manager.NoPathFoundError
CloudStorageError = dependency_manager.CloudStorageError
_binary_manager = None
_installed_helpers = set()
TELEMETRY_BINARY_BASE_CS_FOLDER = 'binary_dependencies'
TELEMETRY_BINARY_CS_BUCKET = cloud_storage.PUBLIC_BUCKET
def NeedsInit():
return not _binary_manager
def InitDependencyManager(client_configs):
if GetBinaryManager():
raise exceptions.InitializationError(
'Trying to re-initialize the binary manager with config %s'
% client_configs)
configs = []
if client_configs:
configs += client_configs
configs += [TELEMETRY_PROJECT_CONFIG, CHROME_BINARY_CONFIG]
SetBinaryManager(binary_manager.BinaryManager(configs))
devil_env.config.Initialize()
@contextlib.contextmanager
def TemporarilyReplaceBinaryManager(manager):
old_manager = GetBinaryManager()
try:
SetBinaryManager(manager)
yield
finally:
SetBinaryManager(old_manager)
def GetBinaryManager():
return _binary_manager
def SetBinaryManager(manager):
global _binary_manager # pylint: disable=global-statement
_binary_manager = manager
def _IsChromeOSLocalMode(os_name):
"""Determines if we're running telemetry on a Chrome OS device.
Used to differentiate local mode (telemetry running on the CrOS DUT) from
remote mode (running telemetry on another platform that communicates with
the CrOS DUT over SSH).
"""
return os_name == 'chromeos' and py_utils.GetHostOsName() == 'chromeos'
def FetchPath(binary_name, os_name, arch, os_version=None):
""" Return a path to the appropriate executable for <binary_name>, downloading
from cloud storage if needed, or None if it cannot be found.
"""
if GetBinaryManager() is None:
raise exceptions.InitializationError(
'Called FetchPath with uninitialized binary manager.')
return GetBinaryManager().FetchPath(
binary_name, 'linux' if _IsChromeOSLocalMode(os_name) else os_name,
arch, os_version)
def LocalPath(binary_name, os_name, arch, os_version=None):
""" Return a local path to the given binary name, or None if an executable
cannot be found. Will not download the executable.
"""
if GetBinaryManager() is None:
raise exceptions.InitializationError(
'Called LocalPath with uninitialized binary manager.')
return GetBinaryManager().LocalPath(binary_name, os_name, arch, os_version)
def FetchBinaryDependencies(
platform, client_configs, fetch_reference_chrome_binary):
""" Fetch all binary dependenencies for the given |platform|.
Note: we don't fetch browser binaries by default because the size of the
binary is about 2Gb, and it requires cloud storage permission to
chrome-telemetry bucket.
Args:
platform: an instance of telemetry.core.platform
client_configs: A list of paths (string) to dependencies json files.
fetch_reference_chrome_binary: whether to fetch reference chrome binary for
the given platform.
"""
configs = [
dependency_manager.BaseConfig(TELEMETRY_PROJECT_CONFIG),
]
dep_manager = dependency_manager.DependencyManager(configs)
os_name = platform.GetOSName()
# If we're running directly on a Chrome OS device, fetch the binaries for
# linux instead, which should be compatible with CrOS. Otherwise, if we're
# running remotely on CrOS, fetch the binaries for the host platform like
# we do with android below.
if _IsChromeOSLocalMode(os_name):
os_name = 'linux'
target_platform = '%s_%s' % (os_name, platform.GetArchName())
dep_manager.PrefetchPaths(target_platform)
host_platform = None
fetch_devil_deps = False
if os_name in ('android', 'chromeos'):
host_platform = '%s_%s' % (
py_utils.GetHostOsName(), py_utils.GetHostArchName())
dep_manager.PrefetchPaths(host_platform)
if os_name == 'android':
if host_platform == 'linux_x86_64':
fetch_devil_deps = True
else:
logging.error('Devil only supports 64 bit linux as a host platform. '
'Android tests may fail.')
if fetch_reference_chrome_binary:
_FetchReferenceBrowserBinary(platform)
# For now, handle client config separately because the BUILD.gn & .isolate of
# telemetry tests in chromium src failed to include the files specified in its
# client config.
# (https://github.com/catapult-project/catapult/issues/2192)
# For now this is ok because the client configs usually don't include cloud
# storage infos.
# TODO(crbug.com/1111556): remove the logic of swallowing exception once the
# issue is fixed on Chromium side.
if client_configs:
manager = dependency_manager.DependencyManager(
list(dependency_manager.BaseConfig(c) for c in client_c | onfigs))
try:
manager.PrefetchPaths(target_platform)
if host_platform is not None:
manager.PrefetchPaths(host_platform)
except dependency_manager.NoPathFoundError as e:
logging.error('Error when trying to | prefetch paths for %s: %s',
target_platform, e)
if fetch_devil_deps:
devil_env.config.Initialize()
devil_env.config.PrefetchPaths(arch=platform.GetArchName())
devil_env.config.PrefetchPaths()
def ReinstallAndroidHelperIfNeeded(binary_name, install_path, device):
""" Install a binary helper to a specific location.
Args:
binary_name: (str) The name of the binary from binary_dependencies.json
install_path: (str) The path to install the binary at
device: (device_utils.DeviceUtils) a device to install the helper to
Raises:
Exception: When the binary could not be fetched or could not be pushed to
the device.
"""
if (device.serial, install_path) in _installed_helpers:
return
host_path = FetchPath(binary_name, 'android', device.GetABI())
if not host_path:
raise Exception(
'%s binary could not be fetched as %s', binary_name, host_path)
device.PushChangedFiles([(host_path, install_path)])
device.RunShellCommand(['chmod', '777', install_path], check_return=True)
_installed_helpers.add((device.serial, install_path))
def _FetchReferenceBrowserBinary(platform):
os_name = platform.GetOSName()
if _IsChromeOSLocalMode(os_name):
os_name = 'linux'
arch_name = platform.GetArchName()
manager = binary_manager.BinaryManager(
[CHROME_BINARY_CONFIG])
if os_name == 'android':
os_version = dependency_util.GetChromeApkOsVersion(
platform.GetOSVersionName())
manager.FetchPath(
'chrome_stable', os_name, arch_name, os_version)
el |
darthbhyrava/pywikibot-local | pywikibot/version.py | Python | mit | 18,495 | 0.000162 | # -*- coding: utf-8 -*-
"""Module to determine the pywikibot version (tag, revision and date)."""
#
# (C) Merlijn 'valhallasw' van Deen, 2007-2014
# (C) xqt, 2010-2015
# (C) Pywikibot team, 2007-2015
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, unicode_literals
__version__ = '$Id$'
#
import codecs
import datetime
import os
import subprocess
import sys
import time
import xml.dom.minidom
from distutils.sysconfig import get_python_lib
from io import BytesIO
from warnings import warn
try:
from setuptools import svn_utils
except ImportError:
try:
from setuptools_svn import svn_utils
except ImportError as e:
svn_utils = e
import pywikibot
from pywikibot import config2 as config
from pywikibot.tools import deprecated, PY2
if not PY2:
basestring = (str, )
cache = None
_logger = 'version'
class ParseError(Exception):
"""Parsing went wrong."""
def _get_program_dir():
_program_dir = os.path.normpath(os.path.split(os.path.dirname(__file__))[0])
return _program_dir
def getversion(online=True):
"""Return a pywikibot version string.
@param online: (optional) Include information obtained online
"""
data = dict(getversiondict()) # copy dict to prevent changes in 'cache'
data['cmp_ver'] = 'n/a'
if online:
try:
hsh2 = getversion_onlinerepo()
hsh1 = data['hsh']
data['cmp_ver'] = 'OUTDATED' if hsh1 != hsh2 else 'ok'
except Exception:
pass
data['hsh'] = data['hsh'][:7] # make short hash from full hash
return '%(tag)s (%(hsh)s, %(rev)s, %(date)s, %(cmp_ver)s)' % data
def getversiondict():
"""Get version info for the package.
@return:
- tag (name for the repository),
- rev (current revision identifier),
- date (date of current revision),
- hash (git hash for the current revision)
@rtype: C{dict} of four C{str}
"""
global cache
if cache:
return cache
_program_dir = _get_program_dir()
exceptions = {}
for vcs_func in (getversion_git,
getversion_svn_setuptools,
getversion_nightly,
getversion_svn,
getversion_package):
try:
(tag, rev, date, hsh) = vcs_func(_program_dir)
except Exception as e:
exceptions[vcs_func] = e
else:
break
else:
# nothing worked; version unknown (but suppress exceptions)
# the value is most likely '$Id' + '$', it means that
# pywikibot was imported without using version control at all.
tag, rev, date, hsh = (
'', '-1 (unknown)', '0 (unknown)', '(unknown)')
# git and svn can silently fail, as it may be a nightly.
if getversion_package in exceptions:
warn('Unable to detect version; exceptions raised:\n%r'
% exceptions, UserWarning)
elif exceptions:
pywikibot.debug('version algorithm exceptions:\n%r'
% exceptions, _logger)
if isinstance(date, basestring):
datestring = date
elif isinstance(date, time.struct_time):
datestring = time.strftime('%Y/%m/%d, %H:%M:%S', date)
else:
warn('Unable to detect package date', UserWarning)
datestring = '-2 (unknown)'
cache = dict(tag=tag, rev=rev, date=datestring, hsh=hsh)
return cache
@deprecated('getversion_svn_setuptools')
def svn_rev_info(path):
"""Fetch information about the current revision of an Subversion checkout.
@param path: directory of the Subversion checkout
@return:
- tag (name for the repository),
- rev (current Subversion revision identifier),
- date (date of current revision),
| @rtype: C{tuple} of two C{str} and a C{time.struct_time}
"""
if not os.path.isdir(os.path.join(path, '.svn')):
path = os.path.join(path, '..')
_program_dir = path
filename = os.path.join(_program_dir, '.svn/entries')
if os.path.isfile(filename):
with open(filename) as entries:
version = entries.readline().strip()
if version != '12':
| for i in range(3):
entries.readline()
tag = entries.readline().strip()
t = tag.split('://')
t[1] = t[1].replace('svn.wikimedia.org/svnroot/pywikipedia/',
'')
tag = '[%s] %s' % (t[0], t[1])
for i in range(4):
entries.readline()
date = time.strptime(entries.readline()[:19],
'%Y-%m-%dT%H:%M:%S')
rev = entries.readline()[:-1]
return tag, rev, date
# We haven't found the information in entries file.
# Use sqlite table for new entries format
from sqlite3 import dbapi2 as sqlite
con = sqlite.connect(os.path.join(_program_dir, ".svn/wc.db"))
cur = con.cursor()
cur.execute("""select
local_relpath, repos_path, revision, changed_date, checksum from nodes
order by revision desc, changed_date desc""")
name, tag, rev, date, checksum = cur.fetchone()
cur.execute("select root from repository")
tag, = cur.fetchone()
con.close()
tag = os.path.split(tag)[1]
date = time.gmtime(date / 1000000)
return tag, rev, date
def github_svn_rev2hash(tag, rev):
"""Convert a Subversion revision to a Git hash using Github.
@param tag: name of the Subversion repo on Github
@param rev: Subversion revision identifier
@return: the git hash
@rtype: str
"""
from pywikibot.comms import http
uri = 'https://github.com/wikimedia/%s/!svn/vcc/default' % tag
request = http.fetch(uri=uri, method='PROPFIND',
body="<?xml version='1.0' encoding='utf-8'?>"
"<propfind xmlns=\"DAV:\"><allprop/></propfind>",
headers={'label': str(rev),
'user-agent': 'SVN/1.7.5 {pwb}'})
dom = xml.dom.minidom.parse(BytesIO(request.raw))
hsh = dom.getElementsByTagName("C:git-commit")[0].firstChild.nodeValue
date = dom.getElementsByTagName("S:date")[0].firstChild.nodeValue
date = time.strptime(date[:19], '%Y-%m-%dT%H:%M:%S')
return hsh, date
def getversion_svn_setuptools(path=None):
"""Get version info for a Subversion checkout using setuptools.
@param path: directory of the Subversion checkout
@return:
- tag (name for the repository),
- rev (current Subversion revision identifier),
- date (date of current revision),
- hash (git hash for the Subversion revision)
@rtype: C{tuple} of three C{str} and a C{time.struct_time}
"""
if isinstance(svn_utils, Exception):
raise svn_utils
tag = 'pywikibot-core'
_program_dir = path or _get_program_dir()
svninfo = svn_utils.SvnInfo(_program_dir)
rev = svninfo.get_revision()
if not isinstance(rev, int):
raise TypeError('SvnInfo.get_revision() returned type %s' % type(rev))
if rev < 0:
raise ValueError('SvnInfo.get_revision() returned %d' % rev)
if rev == 0:
raise ParseError('SvnInfo: invalid workarea')
hsh, date = github_svn_rev2hash(tag, rev)
rev = 's%s' % rev
return (tag, rev, date, hsh)
@deprecated('getversion_svn_setuptools')
def getversion_svn(path=None):
"""Get version info for a Subversion checkout.
@param path: directory of the Subversion checkout
@return:
- tag (name for the repository),
- rev (current Subversion revision identifier),
- date (date of current revision),
- hash (git hash for the Subversion revision)
@rtype: C{tuple} of three C{str} and a C{time.struct_time}
"""
_program_dir = path or _get_program_dir()
tag, rev, date = svn_rev_info(_program_dir)
hsh, date2 = github_svn_rev2hash(tag, rev)
if date.tm_isdst >= 0 and date2.tm_isdst >= 0:
assert date == date2, 'Date of version is not consistent'
# date.tm_isdst is -1 means unk |
magayorker/magatip | scripts/add_gold.py | Python | mit | 735 | 0 | import argparse
# parse | argument
import datetime
from tinydb import TinyDB
import config
parser = argparse.ArgumentParser(description='Refill Gold To Bot')
parser.add_argument('-n', help='Number of credits', required=True)
parser.add_argument('-c', help='Currency', required=True)
parser.add_argument('-p', help='Price of credits (total)', required=True)
args = parser.parse_args()
db = TinyDB(config.DATA_PATH + 'reddit_gol | d.json')
db.insert({
"user_buyer": "",
"quantity": args.n,
"price": (float(args.n) / float(args.p)),
"currency": args.c,
"amount": "",
"total_price": args.p,
"usd_price": "",
'tx_id': "",
'status': "refill",
'time': datetime.datetime.now().isoformat(),
})
db.close()
|
ekristen/mythboxee | xml/__init__.py | Python | mit | 1,360 | 0.000735 | """Core XML support for Python.
This package contains four sub-packages:
dom -- The W3C Document Object Model. This supports DOM Level 1 +
Namespaces.
parsers -- Python wrappers for XML parsers (currently only supports Expat).
sax -- The Simple API for XML, developed by XML-Dev, led by David
| Megginson and ported to Python by Lars Marius Garshol. This
supports the SAX 2 API.
etree -- The ElementTree XML library. This is a subset of the full
ElementTree XML release.
"""
__all__ = ["dom", "parsers", "sax", "etree"]
# When being checked-out without options, this has the form
# "<dollar>Revision: x.y </dollar>"
# When exported using -kv, it is "x.y".
__version__ = "$Revision | : 41660 $".split()[-2:][0]
_MINIMUM_XMLPLUS_VERSION = (0, 8, 4)
import os
# only prefer _xmlplus if the environment variable PY_USE_XMLPLUS is defined
if 'PY_USE_XMLPLUS' in os.environ:
try:
import _xmlplus
except ImportError:
pass
else:
try:
v = _xmlplus.version_info
except AttributeError:
# _xmlplus is too old; ignore it
pass
else:
if v >= _MINIMUM_XMLPLUS_VERSION:
import sys
_xmlplus.__path__.extend(__path__)
sys.modules[__name__] = _xmlplus
else:
del v
|
ysarbaev/contrib-python-qubell-client | qubell/api/public/application.py | Python | apache-2.0 | 5,682 | 0.003872 | # Copyright (c) 2013 Qubell Inc., http://qubell.com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = "Vasyl Khomenko"
__copyright__ = "Copyright 2013, Qubell.com"
__license__ = "Apache"
__email__ = "vkhomenko@qubell.com"
import logging as log
import requests
import simplejson as json
from qubell.api.public.organization import Organization
from qubell.api.private import exceptions
class Application(Organization):
"""
Base class for applications. It should create application and services+environment requested
"""
rawResponse = None
def __parse(self, values):
ret = {}
for val in values:
ret[val['id']] = val['value']
return ret
def __init__(self, auth, id):
self.auth = auth
self.applicationId = id
self.auth.applicationId = id
my = self.json()
self.name = my['name']
#self.manifest = my['manifest']
def delete(self):
raise NotImplementedError
def clean(self):
from qubell.api.public import instance, revision
instances = self.instances
if instances:
for ins in instances:
obj = instance.Instance(auth=self.auth, id=ins['id'])
st = obj.status
if st not in ['Destroyed', 'Destroying', 'Launching', 'Executing']: # Tests could fail and we can get any statye here
log.info("Destroying instance %s" % obj.name)
obj.delete()
assert obj.destroyed(timeout=10)
revisions = self.revisions
if revisions:
for rev in revisions:
obj = revision.Revision(auth=self.auth, id=rev['id'])
obj.delete()
return True
def json(self, key=None):
url = self.auth.tenant+'/api/1/organizations/'+self.auth.organizationId+'/applications'
resp = requests.get(url, auth=(self.auth.user, self.auth.password), verify=False)
log.debug(resp.text)
if resp.status_code == 200:
org = [x for x in resp.json() if x['id'] == self.applicationId]
if len(org)>0:
return org[0]
raise exceptions.NotFoundError('Unable to find application by id: %s' % self.organizationId)
raise exceptions.ApiError('Unable to get application by id: %s, got error: %s' % (self.organizationId, resp.text))
def __getattr__(self, key):
resp = self.json()
if not resp.has_key(key):
raise exceptions.NotFoundError('Cannot get property %s' % key)
return resp[key] or False
def upload(self, manifest):
log.info("Uploading manifest")
url = self.auth.tenant+'/api/1/applications/'+self.applicationId+'/manifest'
headers = {'Content-Type': 'application/x-yaml'}
resp = requests.put(url, auth=(self.auth.user, self.auth.password), data=manifest. | content, verify=False, headers=headers)
log.debug(resp.text)
if resp.status_code == 200:
self.manifest = manifest
return resp.json()
raise exceptions.ApiError('Unable to upload manifest to application id: %s, got error: %s' % (self.applicationId, resp.text))
def launch(self, **argv):
url = self.auth.tenant+'/api/1/applications/'+self.applicationId+'/launch'
he | aders = {'Content-Type': 'application/json'}
#if not 'environmentId' in argv.keys():
# argv['environmentId'] = self.context.environmentId
data = json.dumps(argv)
resp = requests.post(url, auth=(self.auth.user, self.auth.password), data=data, verify=False, headers=headers)
log.debug('--- APPLICATION LAUNCH REQUEST ---')
log.debug('REQUEST HEADERS: %s' % resp.request.headers)
log.debug('REQUEST: %s' % resp.request.body)
log.debug('RESPONSE: %s' % resp.text)
if resp.status_code == 200:
instance_id = resp.json()['id']
return self.get_instance(id=instance_id)
raise exceptions.ApiError('Unable to launch application id: %s, got error: %s' % (self.applicationId, resp.text))
def get_instance(self, id):
from qubell.api.public.instance import Instance
return Instance(auth=self.auth, id=id)
def delete_instance(self, id):
ins = self.get_instance(id)
return ins.delete()
def get_revision(self, id):
from qubell.api.public.revision import Revision
self.auth.applicationId = self.applicationId
return Revision(auth=self.auth, id=id)
def list_revisions(self):
url = self.auth.tenant+'/api/1/applications/'+self.applicationId+'/revisions'
resp = requests.get(url, auth=(self.auth.user, self.auth.password), verify=False)
log.debug(resp.text)
if resp.status_code == 200:
return resp.json()
raise exceptions.ApiError('Unable to get revisions list, got error: %s' % resp.text)
def create_revision(self, name, instance, parameters=[], version=None):
raise NotImplementedError
def delete_revision(self, id):
raise NotImplementedError
def get_manifest(self):
raise NotImplementedError
|
diplomacy/research | diplomacy_research/models/draw/tests/draw_model_test_setup.py | Python | mit | 7,985 | 0.004634 | # ==============================================================================
# Copyright 2019 - Philip Paquette
#
# NOTICE: Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# ==============================================================================
""" Generic class to tests for draw model correctness """
from tornado import gen
from tornado.ioloop import IOLoop
from diplomacy import Game
from diplomacy_research.models.datasets.queue_dataset import QueueDataset
from diplomacy_research.models.state_space import extract_state_proto, extract_phase_history_proto, \
extract_possible_orders_proto
from diplomacy_research.utils.cluster import process_fetches_dict
class DrawModelTestSetup():
""" Creates a testable setup to test a model and a constructor """
def __init__(self, policy_model_ctor, value_model_ctor, draw_model_ctor, dataset_builder, adapter_ctor,
load_policy_args, load_value_args, load_draw_args):
""" Constructor
:param policy_model_ctor: The policy model constructor to create the policy.
:param value_model_ctor: The value model constructor to create the value model.
:param draw_model_ctor: The draw model constructor to create the draw model.
:param dataset_builder: An instance of `BaseBuilder` containing the proto-fields and generation methods
:param adaptor_ctor: The policy adapter constructor to create the policy adapter
:param load_policy_args: Reference to the callable function required to load policy args
:param load_value_args: Reference to the callable function required to load value args
:param load_draw_args: Reference to the callable function required to load draw args
:type policy_model_ctor: diplomacy_research.models.policy.base_policy_model.BasePolicyModel.__class__
:type value_model_ctor: diplomacy_research.models.value.base_value_model.BaseValueModel.__class__
:type draw_model_ctor: diplomacy_research.models.draw.base_draw_model.BaseDrawModel.__class__
:type dataset_builder: diplomacy_research.models.datasets.base_builder.BaseBuilder
:type adapter_ctor: diplomacy_research.models.policy.base_policy_adapter.BasePolicyAdapter.__class__
"""
# pylint: disable=too-many-arguments
# Parsing new flags
args = load_policy_args()
if load_value_args is not None:
args += load_value_args()
args += load_draw_args()
self.hparams = self.parse_flags(args)
# Other attributes
self.graph = None
self.sess = None
self.adapter = None
self.queue_dataset = None
self.policy_model_ctor = policy_model_ctor
self.value_model_ctor = value_model_ctor
self.draw_model_ctor = draw_model_ctor
self.dataset_builder = dataset_builder
self.adapter_ctor = adapter_ctor
def build_model(self):
""" Builds the model """
from diplomacy_research.utils.tensorflow import tf
graph = tf.Graph()
with graph.as_default():
# Creating dataset
self.queue_dataset = QueueDataset(batch_size=self.hparams['batch_size'],
dataset_builder=self.dataset_builder)
# Creating model and validating
model = self.policy_model_ctor(self.queue_dataset, self.hparams)
if self.value_model_ctor is not None:
model = self.value_model_ctor(model, self.queue_dataset, self.hparams)
model = self.draw_model_ctor(model, self.queue_dataset, self.hparams)
model.finalize_build()
model.validate()
self.graph = graph
self.sess = tf.Session(graph=graph)
@staticmethod
def parse_flags(args):
""" Parse flags without calling tf.app.run() """
define = {'bool': lambda x: bool(x), # pylint: disable=unnecessary-lambda
'int': lambda x: int(x), # pylint: disable=unnecessary-lambda
'str': lambda x: str(x), # pylint: disable=unnecessary-lambda
'float': lambda x: float(x), # pylint: disable=unnecessary-lambda
'---': lambda x: x} # pylint: disable=unnecessary-lambda
# Keeping a dictionary of parse args to overwrite if provided multiple times
flags = {}
for arg in args:
arg_type, arg_name, arg_value, _ = arg
flags[arg_name] = define[arg_type](arg_value)
if arg_type == '---' and arg_name in flags:
del flags[arg_name]
return flags
def run_tests(self):
""" Run all tests """
IOLoop.current().run_sync(self.run_tests_async)
@gen.coroutine
def run_tests_async(self):
""" Run tests in an asynchronous IO Loop """
self.build_model()
self.adapter = self.adapter_ctor(self.queue_dataset, self.graph, session=self.sess)
yield self.test_get_draw_prob()
@gen.coroutine
def test_get_draw_prob(self):
""" Checks if the .get_draw_prob method works """
game = Game()
state_proto = extract_state_proto(game)
phase_history_proto = extract_phase_history_proto(game)
possible_orders_proto = extract_possible_orders_proto(game)
locs = ['PAR', 'MAR', 'BUR']
kwargs = {'player_seed': 0, 'noise': 0., 'temperature': 1., 'dropout_rate': 0.}
# Temperature == 1.
# With and without prefetching
for use_prefetching i | n (False, True):
if not use_prefetching:
_, policy_details = yield self.adapter.get_orders(locs,
state_proto,
| 'FRANCE',
phase_history_proto,
possible_orders_proto,
**kwargs)
else:
fetches = yield self.adapter.get_orders(locs,
state_proto,
'FRANCE',
phase_history_proto,
possible_orders_proto,
prefetch=True,
**kwargs)
fetches = yield process_fetches_dict(self.queue_dataset, fetches)
_, policy_details = yield self.adapter.get_orders(locs,
state_proto,
'FRANCE',
phase_history_proto,
possible_orders_proto,
fetches=fetches,
**kwargs)
assert policy_details['draw_action'] in (True, False)
assert 0. < policy_details['draw_prob'] < 1.
|
SJIT-Hackerspace/SJIT-CodingPortal | QuestionDumps/migrations/0001_initial.py | Python | apache-2.0 | 823 | 0.003645 | # -*- coding | : utf-8 -*-
# Generated by Django 1.9.1 on 2017-01-10 08:39
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Document',
fields=[
| ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('docfile', models.FileField(upload_to='documents/%Y/%m/%d')),
('subCategory', models.IntegerField(choices=[(4, 'Syllogism'), (5, 'Arithmetic Reasoning'), (6, 'Series Completion')])),
('TypeofQuestion', models.IntegerField(choices=[(1, 'Programming'), (2, 'Verbal'), (3, 'Quiz')])),
],
),
]
|
audy/domain-name-generator | tld.py | Python | mit | 2,280 | 0.001754 | #!/usr/bin/env python3
import sys
import argparse
def parse_arguments():
""" parse the arguments """
p = argparse.ArgumentParser()
p.add_argument(
"--words-file",
help="file with list of words [/usr/share/dict/words]",
default="/usr/share/dict/words",
)
p.add_argument(
"--tlds-file", help="file with list of tlds [tlds.txt]", default="tlds.txt"
)
p.add_argument(
"--tlds", help="manually specify tlds as comma-separated list", default=False
)
p.ad | d_argument(
"--leet",
help="generate domains that replace letters with numbers",
action="store_true",
)
p.add_argument("--min-size", default=0, type=int, help="minimum word length")
p.add_argument("--max-size", default=100000, type=int, help="maximum word length")
return p.parse_args() |
def iter_words(handle):
""" iterate over list of words in text file """
return (word.strip().lower() for word in handle)
def get_tlds(tlds_file):
""" iterate over list of tlds in text file """
with open(tlds_file) as handle:
return [line.split()[0].strip().lower() for line in handle]
def iter_domains(words, tlds):
""" list domains made from words and tlds """
return (
"{}.{}".format(word.rstrip(tld), tld)
for word in words
for tld in tlds
if word.endswith(tld)
)
def l33tify(domain):
""" Produce 1337 versions of words """
replacements = {
"a": "4",
"b": "8",
"e": "3",
"g": "6",
"i": "1",
"o": "0",
"s": "5",
"t": "7",
"z": "2",
}
word, tld = domain.split(".")
return "".join([replacements.get(char, char) for char in word]) + "." + tld
if __name__ == "__main__":
args = parse_arguments()
if not args.tlds:
tlds = get_tlds(args.tlds_file)
else:
tlds = args.tlds.split(",")
with open(args.words_file) as handle:
processed_domains = (
l33tify(domain) if args.leet else domain
for domain in iter_domains(iter_words(handle), tlds)
if len(domain) in range(args.min_size, args.max_size)
)
for domain in processed_domains:
print(domain)
|
AustereCuriosity/astropy | astropy/units/tests/test_quantity_ufuncs.py | Python | bsd-3-clause | 38,162 | 0.000079 | # The purpose of these tests are to ensure that calling ufuncs with quantities
# returns quantities with the right units, or raises exceptions.
import warnings
import pytest
import numpy as np
from numpy.testing.utils import assert_allclose
from ... import units as u
from ...tests.helper import raises
from ...extern.six.moves import zip
from ...utils.compat import NUMPY_LT_1_13
class TestUfuncCoverage(object):
"""Test that we cover all ufunc's"""
def test_coverage(self):
all_np_ufuncs = set([ufunc for ufunc in np.core.umath.__dict__.values()
if type(ufunc) == np.ufunc])
from .. import quantity_helper as qh
all_q_ufuncs = (qh.UNSUPPORTED_UFUNCS |
set(qh.UFUNC_HELPERS.keys()))
assert all_np_ufuncs - all_q_ufuncs == set([])
assert all_q_ufuncs - all_np_ufuncs == set([])
class TestQuantityTrigonometricFuncs(object):
"""
Test trigonometric functions
"""
def test_sin_scalar(self):
q = np.sin(30. * u.degree)
assert q.unit == u.dimensionless_unscaled
assert_allclose(q.value, 0.5)
def test_sin_array(self):
q = np.sin(np.array([0., np.pi / 4., np.pi / 2.]) * u.radian)
assert q.unit == u.dimensionless_unscaled
assert_allclose(q.value,
np.array([0., 1. / np.sqrt(2.), 1.]), atol=1.e-15)
def test_arcsin_scalar(self):
q1 = 30. * u.degree
q2 = np.arcsin(np.sin(q1)).to(q1.unit)
assert_allclose(q1.value, q2.value)
def test_arcsin_array(self):
q1 = np.array([0., np.pi / 4., np.pi / 2.]) * u.radian
q2 = np.arcsin(np.sin(q1)).to(q1.unit)
assert_allclose(q1.value, q2.value)
def test_sin_invalid_units(self):
with pytest.raises(TypeError) as exc:
np.sin(3. * u.m)
assert exc.value.args[0] == ("Can only apply 'sin' function "
"to quantities with angle units")
def test_arcsin_invalid_units(self):
with pytest.raises(TypeError) as exc:
np.arcsin(3. * u.m)
assert exc.value.args[0] == ("Can only apply 'arcsin' function to "
"dimensionless quantities")
def test_arcsin_no_warning_on_unscaled_quantity(self):
a = 15 * u.kpc
b = 27 * u.pc
with warnings.catch_warnings():
warnings.filterwarnings('error')
np.arcsin(b/a)
def test_cos_scalar(self):
q = np.cos(np.pi / 3. * u.radian)
assert q.unit == u.dimensionless_unscaled
assert_allclose(q.value, 0.5)
def test_cos_array(self):
q = np.cos(np.array([0., np.pi / 4., np.pi / 2.]) * u.radian)
assert q.unit == u.dimensionless_unscaled
assert_allclose(q.value,
np.array([1., 1. / np.sqrt(2.), 0.]), atol=1.e-15)
def test_arccos_scalar(self):
q1 = np.pi / 3. * u.radian
q2 = np.arccos(np.cos(q1)).to(q1.unit)
assert_allclose(q1.value, q2.value)
def test_arccos_array(self):
q1 = np.array([0., np.pi / 4., np.pi / 2.]) * u.radian
q2 = np.arccos(np.cos(q1)).to(q1.unit)
assert_allclose(q1.value, q2.value)
def test_cos_invalid_units(self):
with pytest.raises(TypeError) as exc:
np.cos(3. * u.s)
assert exc.value.args[0] == ("Can only apply 'cos' function "
"to quantities with angle units")
def test_arccos_invalid_units(self):
with pytest.raises(TypeError) as exc:
np.arccos(3. * u.s)
assert exc.value.args[0] == ("Can only apply 'arccos' function to "
"dimensionless quantities")
def test_tan_scalar(self):
q = np.tan(np.pi / 3. * u.radian)
assert q.unit == u.dimensionless_unscaled
assert_allclose(q.value, np.sqrt(3.))
def test_tan_array(self):
q = np.tan(np.array([0., 45., 135., 180.]) * u.degree)
assert q.unit == u.dimensionless_unscaled
assert_allclose(q.value,
np.array([0., 1., -1., 0.]), atol=1.e-15)
def test_arctan_scalar(self):
q = np.pi / 3. * u.radian
assert np.arctan(np.tan(q))
def test_arctan_array(self):
q = np.array([10., 30., 70., 80.]) * u.degree
assert_allclose(np.arctan(np.tan(q)).to_value(q.unit), q.value)
def test_tan_invalid_units(self):
with pytest.raises(TypeError) as exc:
np.tan(np.array([1, 2, 3]) * u.N)
assert exc.value.args[0] == ("Can only apply 'tan' function "
"to quantities with angle units")
def test_arctan_invalid_units(self):
with pytest.raises(TypeError) as exc:
np.arctan(np.array([1, 2, 3]) * u.N)
assert exc.value.args[0] == ("Can only apply 'arctan' function to "
"dimensionless quantities")
def test_arctan2_valid(self):
q1 = np.array([10., 30., 70., 80.]) * u.m
q2 = 2.0 * u.km
assert np.arctan2(q1, q2).unit == u.radian
assert_allclose(np.arctan2(q1, q2).value,
np.arctan2(q1.value, q2.to_value(q1.unit)))
q3 = q1 / q2
q4 = 1.
at2 = np.arctan2(q3, q4)
assert_allclose(at2.value, np.arctan2(q3.to_value(1), q4))
def test_arctan2_invalid(self):
with pytest.raises(u.UnitsError) as exc:
np.arctan2(np.array([1, 2, 3]) * u.N, 1. * u.s)
assert "compatible dimensions" in exc.value.args[0]
with pytest.raises(u.UnitsError) as exc:
np.arctan2(np.array([1, 2, 3]) * u.N, 1.)
assert "dimensionless quantities when other arg" in exc.value.args[0]
def test_radians(self):
q1 = np.deg2rad(180. * u.degree)
assert_allclose(q1.value, np.pi)
assert q1.unit == u.radian
q2 = np.radians(180. * u.degree)
assert_allclose(q2.value, np.pi)
assert q2.unit == u.radian
# the following doesn't make much sense in terms of the name of the
# routine, but we check it gives the correct result.
q3 = np.deg2rad(3. * u.radian)
assert_allclose(q3.value, 3.)
assert q3.unit == u.radian
q4 = np.radians(3. * u.radian)
assert_allclose(q4.value, 3.)
assert q4.unit == u.radian
with pytest.raises(TypeError):
np.deg2rad(3. * u.m)
with pytest.raises(TypeError):
np.radians(3. * u.m)
def test_degrees(self):
# the following doesn't make much sense in terms of the name of the
# routine, but we check it gives the correct result.
q1 = np.rad2deg(60. * u.degree)
assert_allclose(q1.value, 60.)
assert q1.unit == u.degree
q2 = np.degrees(60. * u.degree)
assert_allclose(q2.value, 60.)
assert q2.unit == u.degree
q3 = np.rad2deg(np.pi * u.radian)
assert_allclose(q3.value, 180.)
assert q3.unit == u.degree
q4 = np.degrees(np.pi * u.radian)
assert_allclose(q4.value, 180.)
assert q4.unit == u.degree
with pytest.raises(TypeError):
np.rad2deg(3. * u.m)
with pytest.raises(TypeError):
np.degrees(3. * u.m)
class TestQuantityMathFuncs(object):
"""
Test other mathematical functions
"""
def test_multiply_scalar(self):
assert np.multiply(4. * u.m, 2. / u.s) == 8. * u.m / u.s
assert np.multiply(4. * u.m, 2.) == 8. * u.m
| assert np.multiply(4., 2. / u.s) == 8. / u.s
def test_multiply_array(self):
assert np.all(np.multiply(np.arange(3.) * u.m, 2. / u.s) ==
np.arange(0, 6., 2.) * u.m / u.s)
@pytest.mark.parametrize('function', (np.divide, np.true_divide))
def test_divide_scalar(self, function):
assert function(4. * u.m, 2. * u.s) == function(4 | ., 2.) * u.m / u.s
assert function(4. * u.m, 2.) == function(4., 2.) * u.m
assert function(4., 2. * u.s) == function(4., 2.) / u.s
@pytest.mark.parametrize('function', (np.divide, np. |
dpdani/tBB | tBB/settings.py | Python | gpl-3.0 | 7,813 | 0.002048 | #!/usr/bin/python3
#
# tBB is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at | your option) any later version.
#
# tBB is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# | You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
This module takes care of representing and handling settings throughout tBB.
"""
import enum
import re
import datetime
valid_item_name = re.compile(r'[a-zA-Z_][a-zA-Z0-9_]*')
timedelta_parse_string = '%M:%S'
class UndefinedValueException(Exception):
def __init__(self):
super().__init__('self.value needs to be defined before converting it.')
class ConversionException(Exception):
def __init__(self, value, value_type):
super().__init__("couldn't convert '{}' to {}.".format(value, value_type))
class UnknownSettingException(Exception):
def __init__(self, setting_path):
self.setting_path = setting_path
super().__init__("defined setting '{}' is unknown.".format(setting_path))
class InconsistentSettingTypeException(Exception):
def __init__(self, setting_path, should_be, got):
self.setting_path = setting_path
self.should_be = should_be
self.got = got
super().__init__("setting type for '{}' should be {}. Got: {}.".format(setting_path, should_be, got))
class SettingsTypes(enum.Enum):
unknown = -1
string = 0 # need no conversion
integer = 1 # need no conversion
boolean = 2 # need no conversion
timedelta = 3
settings_item = 4
list = 5 # need no conversion
class SettingsItem:
def __init__(self, name, value_type):
if type(name) != str:
raise TypeError('argument name must be a string.')
if not isinstance(value_type, SettingsTypes):
raise TypeError('argument value_types must be a SettingsTypes instance.')
if re.match(valid_item_name, name) is None:
raise ValueError('settings item name is not acceptable. See tBB.settings.valid_item_name.')
else:
try:
self.__getattribute__(name)
except AttributeError:
pass
else:
raise ValueError('settings item name is not acceptable. Name reserved.')
self.name = name
self.value = None
self.value_type = value_type
def convert(self):
if self.value is None:
raise UndefinedValueException()
# static conversions: basically, only do type checking
if self.value_type == SettingsTypes.string:
if type(self.value) != str:
raise ConversionException(self.value, self.value_type)
elif self.value_type == SettingsTypes.integer:
if type(self.value) != int:
raise ConversionException(self.value, self.value_type)
elif self.value_type == SettingsTypes.boolean:
if type(self.value) != bool:
raise ConversionException(self.value, self.value_type)
# complex conversions
elif self.value_type == SettingsTypes.timedelta:
self.value = self.convert_to_timedelta(self.value)
elif self.value_type == SettingsTypes.settings_item:
self.value = self.convert_to_settings_item(self.value)
elif self.value_type == SettingsTypes.unknown: # make a guess on what it could be
if type(self.value) == int:
self.value_type = SettingsTypes.integer
elif type(self.value) == bool:
self.value_type = SettingsTypes.boolean
elif type(self.value) == list:
self.value_type = SettingsTypes.list
elif type(self.value) == dict:
try:
self.value = self.convert_to_settings_item(self.value)
except ConversionException as exc:
raise exc
else:
self.value_type = SettingsTypes.settings_item
elif type(self.value) == str:
try:
self.value = self.convert_to_timedelta(self.value)
except ConversionException:
self.value_type = SettingsTypes.string
else:
self.value_type = SettingsTypes.timedelta
@staticmethod
def convert_to_timedelta(value):
try:
tmp = datetime.datetime.strptime(value, timedelta_parse_string)
return datetime.timedelta(minutes=tmp.minute, seconds=tmp.second)
except (ValueError, TypeError) as exc:
raise ConversionException(value, SettingsTypes.timedelta) from exc
@staticmethod
def convert_to_settings_item(value):
if type(value) != dict:
raise ConversionException(value, SettingsTypes.settings_item)
children = {}
for name, elem in value.items():
new_item = SettingsItem(name=name, value_type=SettingsTypes.unknown)
new_item.value = elem
children[name] = new_item
for child in children.values():
child.convert()
return children
def __getattr__(self, item):
try:
return self.__getattribute__(item)
except AttributeError as exc:
if self.value_type == SettingsTypes.settings_item and self.value is not None:
if item in self.value.keys():
return self.value[item]
else:
raise exc
def __repr__(self):
return "<{} '{}' ({})>".format(self.__class__.__name__, self.name, self.value_type)
class Settings:
def __init__(self, tree):
if not isinstance(tree, SettingsItem):
raise TypeError("expected SettingsItem instance for argument tree. Got: '{}'.".format(tree))
self.tree = tree
def update(self, new_tree, scope=''):
if not isinstance(new_tree, SettingsItem):
raise TypeError("expected SettingsItem instance for argument tree. "
"Got: '{}'.".format(new_tree))
if type(new_tree.value) != dict:
walked_path = 'self.tree'
try:
setting = self.tree
for selector in scope.split('.')[1:]:
walked_path += '.' + selector
setting = getattr(setting, selector)
except AttributeError:
raise UnknownSettingException(walked_path)
else:
if setting.value_type != new_tree.value_type:
raise InconsistentSettingTypeException(scope, setting.value_type,
new_tree.value_type)
setting.value = new_tree.value
else:
for name in new_tree.value:
if new_tree.value_type == SettingsTypes.settings_item:
self.update(new_tree.value[name], scope=self.tree.name+scope+'.'+name)
else:
raise TypeError("expected iterators inside new_tree to be SettingsTypes.settings_item. "
"Got: {}".format(new_tree.value_type))
@staticmethod
def parse(json_data, name='toplevel'):
tree = SettingsItem(name=name, value_type=SettingsTypes.settings_item)
tree.value = json_data
tree.convert()
return tree
def __getattr__(self, item):
return self.tree.__getattr__(item)
|
bollu/sagenb | sagenb/notebook/cell.py | Python | gpl-3.0 | 82,188 | 0.002569 | # -*- coding: utf-8 -*-
"""
A Cell
A cell is a single input/output block. Worksheets are built out of
a list of cells.
"""
###########################################################################
# Copyright (C) 2006 William Stein <wstein@gmail.com>
#
# Distributed under the terms of the GNU General Public License (GPL)
# http://www.gnu.org/licenses/
###########################################################################
import os
import re
import shutil
import textwrap
import time
from cgi import escape
from sagenb.misc.misc import (word_wrap, strip_string_literals,
set_restrictive_permissions, unicode_str,
encoded_str)
from interact import (INTERACT_RESTART, INTERACT_UPDATE_PREFIX,
INTERACT_TEXT, INTERACT_HTML)
# Maximum number of characters allowed in output. This is needed
# avoid overloading web browser. For example, it should be possible
# to gracefully survive:
# while True:
# print "hello world"
# On the other | hand, we don't want to loose the output of big matrices
# and numbers, so don't make this too small.
MAX_OUTPUT = 32000
MAX_OUTPUT_LINES = 120
# Used to detect and format tracebacks. See :func:`format_exce | ption`.
TRACEBACK = 'Traceback (most recent call last):'
# This regexp matches "cell://blah..." in a non-greedy way (the ?), so
# we don't get several of these combined in one.
re_cell = re.compile('"cell://.*?"')
re_cell_2 = re.compile("'cell://.*?'") # same, but with single quotes
# Matches script blocks.
re_script = re.compile(r'<script[^>]*?>.*?</script>', re.DOTALL | re.I)
# Whether to enable editing of :class:`TextCell`s with TinyMCE.
JEDITABLE_TINYMCE = True
###########################
# Generic (abstract) cell #
###########################
class Cell_generic(object):
def __init__(self, id, worksheet):
"""
Creates a new generic cell.
INPUT:
- ``id`` - an integer or string; this cell's ID
- ``worksheet`` - a
:class:`sagenb.notebook.worksheet.Worksheet` instance; this
cell's parent worksheet
EXAMPLES::
sage: C = sagenb.notebook.cell.Cell_generic(0, None)
sage: isinstance(C, sagenb.notebook.cell.Cell_generic)
True
sage: isinstance(C, sagenb.notebook.cell.TextCell)
False
sage: isinstance(C, sagenb.notebook.cell.Cell)
False
"""
try:
self._id = int(id)
except ValueError:
self._id = id
self._worksheet = worksheet
def __repr__(self):
"""
Returns a string representation of this generic cell.
OUTPUT:
- a string
EXAMPLES::
sage: C = sagenb.notebook.cell.Cell_generic(0, None)
sage: C.__repr__()
'Cell_generic 0'
"""
return "Cell_generic %s" % self._id
def __cmp__(self, right):
"""
Compares generic cells by ID.
INPUT:
- ``right`` - a :class:`Cell_generic` instance; the cell to
compare to this cell
OUTPUT:
- a boolean
EXAMPLES::
sage: C1 = sagenb.notebook.cell.Cell_generic(0, None)
sage: C2 = sagenb.notebook.cell.Cell_generic(1, None)
sage: C3 = sagenb.notebook.cell.Cell_generic(0, None)
sage: [C1 == C2, C1 == C3, C2 == C3]
[False, True, False]
sage: C1 = sagenb.notebook.cell.TextCell('bagel', 'abc', None)
sage: C2 = sagenb.notebook.cell.TextCell('lox', 'abc', None)
sage: C3 = sagenb.notebook.cell.TextCell('lox', 'xyz', None)
sage: [C1 == C2, C1 == C3, C2 == C3]
[False, False, True]
sage: C1 = sagenb.notebook.cell.Cell(7, '3+2', '5', None)
sage: C2 = sagenb.notebook.cell.Cell(7, '3+2', 'five', None)
sage: C3 = sagenb.notebook.cell.Cell('7', '2+3', '5', None)
sage: [C1 == C2, C1 == C3, C2 == C3]
[True, True, True]
"""
return cmp(self.id(), right.id())
def id(self):
"""
Returns this generic cell's ID.
OUTPUT:
- an integer or string
EXAMPLES::
sage: C = sagenb.notebook.cell.Cell_generic(0, None)
sage: C.id()
0
sage: C = sagenb.notebook.cell.Cell('blue', '2+3', '5', None)
sage: C.id()
'blue'
sage: C = sagenb.notebook.cell.TextCell('yellow', '2+3', None)
sage: C.id()
'yellow'
"""
return self._id
def set_id(self, id):
"""
Sets this generic cell's ID.
INPUT:
- ``id`` - an integer or string; the new ID
EXAMPLES::
sage: C = sagenb.notebook.cell.Cell_generic(0, None)
sage: C.id()
0
sage: C.set_id('phone')
sage: C.id()
'phone'
"""
try:
self._id = int(id)
except ValueError:
self._id = id
def proxied_id(self):
"""
Returns the ID of the cell for which this generic cell is a
proxy. If this cell does not have such an ID, it returns the
cell's own ID.
EXAMPLES::
sage: C = sagenb.notebook.cell.Cell_generic('self_stand_in', None)
sage: [C.id(), C.proxied_id()]
['self_stand_in', 'self_stand_in']
"""
try:
return self._proxied_id
except AttributeError:
return self._id
def set_proxied_id(self, proxied_id):
"""
Sets, for this generic cell, the ID of the cell that it
proxies.
INPUT:
- ``proxied_id`` - an integer or string; the proxied cell's ID
EXAMPLES::
sage: C = sagenb.notebook.cell.Cell_generic('understudy', None)
sage: [C.id(), C.proxied_id()]
['understudy', 'understudy']
sage: C.set_proxied_id('principal')
sage: [C.id(), C.proxied_id()]
['understudy', 'principal']
"""
self._proxied_id = proxied_id
def worksheet(self):
"""
Returns this generic cell's worksheet object.
OUTPUT:
- a :class:`sagenb.notebook.worksheet.Worksheet` instance
EXAMPLES::
sage: C = sagenb.notebook.cell.Cell_generic(0, 'worksheet object')
sage: C.worksheet()
'worksheet object'
sage: nb = sagenb.notebook.notebook.Notebook(tmp_dir(ext='.sagenb'))
sage: nb.user_manager().add_user('sage','sage','sage@sagemath.org',force=True)
sage: W = nb.create_new_worksheet('Test', 'sage')
sage: C = sagenb.notebook.cell.Cell(0, '2+3', '5', W)
sage: C.worksheet() is W
True
sage: nb.delete()
"""
return self._worksheet
def set_worksheet(self, worksheet, id=None):
"""
Sets this generic cell's worksheet object and, optionally, its
ID.
INPUT:
- ``worksheet`` - a
:class:`sagenb.notebook.worksheet.Worksheet` instance; the
cell's new worksheet object
- ``id`` - an integer or string (default: None); the cell's
new ID
EXAMPLES::
sage: C = sagenb.notebook.cell.Cell_generic(0, None)
sage: W = "worksheet object"
sage: C.set_worksheet(W)
sage: C.worksheet()
'worksheet object'
"""
self._worksheet = worksheet
if id is not None:
self.set_id(id)
def worksheet_filename(self):
"""
Returns the filename of this generic cell's worksheet object.
- ``publish`` - a boolean (default: False); whether to render
a published cell
OUTPUT:
- a string
EXAMPLES::
sage: nb = sagenb.notebook.notebook.Notebook(tmp_dir(ext='.sagenb'))
sage: nb.user_manager().add_user('sage','sage','sage@sagemath.org',force=True)
sage: W = |
bollwyvl/nosebook | setup.py | Python | bsd-3-clause | 1,949 | 0 | import os
from setuptools import setup
# you'd add this, too, for `python setup.py test` integration
from setuptools.command.test import test as TestCommand
class NosebookTestCommand(TestCommand):
def run_tests(self):
# Run nose ensuring that argv simulates running nosetests directly
import nose
nose.run_exit(argv=[ | 'nosetests', '-c', './.noserc'])
def read(fname):
"""
Utility function to read the README file.
Used for the long_ | description. It's nice, because now 1) we have a top
level README file and 2) it's easier to type in the README file than to put
a raw string in below ...
"""
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name="nosebook",
version="0.4.0",
author="Nicholas Bollweg",
author_email="nick.bollweg@gmail.com",
description="a nose plugin for IPython notebooks",
license="BSD",
keywords="IPython nose",
url="http://github.com/bollwyvl/nosebook",
py_modules=["nosebook"],
long_description=read("README.rst"),
test_suite="nose.collector",
classifiers=[
"Topic :: Utilities",
"Framework :: IPython",
"Natural Language :: English",
"Programming Language :: Python",
"Intended Audience :: Developers",
"Development Status :: 3 - Alpha",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 2",
"License :: OSI Approved :: BSD License",
"Topic :: Software Development :: Testing",
],
setup_requires=[
"nose",
"IPython",
"jsonschema",
"pyzmq"
],
entry_points={
"nose.plugins.0.10": [
"nosebook = nosebook:Nosebook",
"subprocstreams = "
"IPython.testing.iptest:SubprocessStreamCapturePlugin"
]
},
cmdclass={'test': NosebookTestCommand}
)
|
mozilla/firefox-flicks | vendor-local/lib/python/celery/canvas.py | Python | bsd-3-clause | 14,524 | 0.000413 | # -*- coding: utf-8 -*-
"""
celery.canvas
~~~~~~~~~~~~~
Composing task workflows.
Documentation for these functions are in :mod:`celery`.
You should not import from this module directly.
"""
from __future__ import absolute_import
from copy import deepcopy
from functools import partial as _partial
from operator import itemgetter
from itertools import chain as _chain
from kombu.utils import cached_property, fxrange, kwdict, reprcall, uuid
from celery import current_app
from celery.local import Proxy
from celery.utils.compat import chain_from_iterable
from celery.result import AsyncResult, GroupResult
from celery.utils.functional import (
maybe_list, is_list, regen,
chunks as _chunks,
)
from celery.utils.text import truncate
Chord = Proxy(lambda: current_app.tasks['celery.chord'])
class _getitem_property(object):
def __init__(self, key):
self.key = key
def __get__(self, obj, type=None):
if obj is None:
return type
return obj.get(self.key)
def __set__(self, obj, value):
obj[self.key] = value
class Signature(dict):
"""Class that wraps the arguments and execution options
for a single task invocation.
Used as the parts in a :class:`group` or to safely
pass tasks around as callbacks.
:param task: Either a task class/instance, or the name of a task.
:keyword args: Positional arguments to apply.
:keyword kwargs: Keyword arguments to apply.
:keyword options: Additional options to :meth:`Task.apply_async`.
Note that if the first argument is a :class:`dict`, the other
arguments will be ignored and the values in the dict will be used
instead.
>>> s = subtask('tasks.add', args=(2, 2))
>>> subtask(s)
{'task': 'tasks.add', args=(2, 2), kwargs={}, options={}}
"""
TYPES = {}
_type = None
@classmethod
def register_type(cls, subclass, name=None):
cls.TYPES[name or subclass.__name__] = subclass
return subclass
@classmethod
def from_dict(self, d):
typ = d.get('subtask_type')
if typ:
return self.TYPES[typ].from_dict(kwdict(d))
return Signature(d)
def __init__(self, task=None, args=None, kwargs=None, options=None,
type=None, subtask_type=None, immutable=False, **ex):
init = dict.__init__
if isinstance(task, dict):
return init(self, task) # works like dict(d)
# Also supports using task class/instance instead of string name.
try:
task_name = task.name
except AttributeError:
task_name = task
else:
self._type = task
init(self,
task=task_name, args=tuple(args or ()),
kwargs=kwargs or {},
options=dict(options or {}, **ex),
subtask_type=subtask_type,
immutable=immutable)
def __call__(self, *partial_args, **partial_kwargs):
return self.apply_async(partial_args, partial_kwargs)
delay = __call__
def apply(self, args=(), kwargs={}, **options):
"""Apply this task locally."""
# For callbacks: extra args are prepended to the stored args.
args, kwargs, options = self._merge(args, kwargs, options)
return self.type.apply(args, kwargs, **options)
def _merge(self, args=(), kwargs={}, options={}):
if self.immutable:
return self.args, self.kwargs, dict(self.options, **options)
return (tuple(args) + tuple(self.args) if args else self.args,
dict(self.kwargs | , **kwargs) if kwargs else self.kwargs,
dict(self.options, **options) if options else self.options)
def clone(self, args=(), kwargs={}, **opts):
# need to deepcopy options so origins links etc. is not modified.
args, kwargs, opts = self._merge(args, kwargs, opts)
s = Signature.from_dict({'task': self.task, 'args': tuple(args),
| 'kwargs': kwargs, 'options': deepcopy(opts),
'subtask_type': self.subtask_type,
'immutable': self.immutable})
s._type = self._type
return s
partial = clone
def _freeze(self, _id=None):
opts = self.options
try:
tid = opts['task_id']
except KeyError:
tid = opts['task_id'] = _id or uuid()
return self.AsyncResult(tid)
def replace(self, args=None, kwargs=None, options=None):
s = self.clone()
if args is not None:
s.args = args
if kwargs is not None:
s.kwargs = kwargs
if options is not None:
s.options = options
return s
def set(self, immutable=None, **options):
if immutable is not None:
self.immutable = immutable
self.options.update(options)
return self
def apply_async(self, args=(), kwargs={}, **options):
# For callbacks: extra args are prepended to the stored args.
args, kwargs, options = self._merge(args, kwargs, options)
return self._apply_async(args, kwargs, **options)
def append_to_list_option(self, key, value):
items = self.options.setdefault(key, [])
if value not in items:
items.append(value)
return value
def link(self, callback):
return self.append_to_list_option('link', callback)
def link_error(self, errback):
return self.append_to_list_option('link_error', errback)
def flatten_links(self):
return list(chain_from_iterable(_chain(
[[self]],
(link.flatten_links()
for link in maybe_list(self.options.get('link')) or [])
)))
def __or__(self, other):
if not isinstance(self, chain) and isinstance(other, chain):
return chain((self,) + other.tasks)
elif isinstance(other, chain):
return chain(*self.tasks + other.tasks)
elif isinstance(other, Signature):
if isinstance(self, chain):
return chain(*self.tasks + (other, ))
return chain(self, other)
return NotImplemented
def __invert__(self):
return self.apply_async().get()
def __reduce__(self):
# for serialization, the task type is lazily loaded,
# and not stored in the dict itself.
return subtask, (dict(self), )
def reprcall(self, *args, **kwargs):
args, kwargs, _ = self._merge(args, kwargs, {})
return reprcall(self['task'], args, kwargs)
def __repr__(self):
return self.reprcall()
@cached_property
def type(self):
return self._type or current_app.tasks[self['task']]
@cached_property
def AsyncResult(self):
try:
return self.type.AsyncResult
except KeyError: # task not registered
return AsyncResult
@cached_property
def _apply_async(self):
try:
return self.type.apply_async
except KeyError:
return _partial(current_app.send_task, self['task'])
task = _getitem_property('task')
args = _getitem_property('args')
kwargs = _getitem_property('kwargs')
options = _getitem_property('options')
subtask_type = _getitem_property('subtask_type')
immutable = _getitem_property('immutable')
class chain(Signature):
def __init__(self, *tasks, **options):
tasks = tasks[0] if len(tasks) == 1 and is_list(tasks[0]) else tasks
Signature.__init__(
self, 'celery.chain', (), {'tasks': tasks}, **options
)
self.tasks = tasks
self.subtask_type = 'chain'
def __call__(self, *args, **kwargs):
return self.apply_async(args, kwargs)
@classmethod
def from_dict(self, d):
tasks = d['kwargs']['tasks']
if d['args'] and tasks:
# partial args passed on to first task in chain (Issue #1057).
tasks[0]['args'] = d['args'] + tasks[0]['args']
return chain(*d['kwargs']['tasks'], **kwdict(d['options']))
def __repr__(self):
return ' |
trudikampfschaf/flask-microblog | mail.py | Python | bsd-3-clause | 273 | 0.014652 | #!flask/bin/python
from flask.ext.mail import Message
| from app import app, mail
from config import ADMINS
msg = Message('test subject', sender = ADMINS[0], recipients = ADMINS)
msg.body = 'text body'
msg.html = '<b>HTML</b> body'
with app.app_context():
mail.send(msg) | |
tomoyuki-nakabayashi/ICS-IoT-hackathon | sample/python-camera/python-camera.py | Python | mit | 475 | 0.035789 | # -*- | coding: utf-8 -*-
import cv2
# device number "0"
cap = cv2.VideoCapture(0)
while(True):
# Capture a frame
ret, frame = cap.read()
# show on display
cv2.imshow('frame',frame)
# waiting for keyboard input
key = cv2.waitKey(1) & 0xFF
# Exit if "q" pressed
if key == ord('q'):
break
# Save if "s" pressed
if key == ord('s'):
path = "photo.jpg"
cv2.imw | rite(path,frame)
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
|
ox-it/talks.ox | talks/audit_trail/forms.py | Python | apache-2.0 | 812 | 0.002463 | from __future__ import absolute_import
import urllib
from django import forms
DEFAULT_DATE_FORMATS = ["%d/%m/%Y"]
DEFAULT_TIME_FORMATS = ["%H:%M"]
class RevisionsFilteringForm(forms.Form):
from_date = forms.SplitDateTimeField(label="From",
required=False,
input_date_formats=DEFAULT_DATE_FORMATS,
input_time_formats=DEFAULT_TIME_FORMATS)
to_date = forms.SplitDateTimeField(label="To",
required=False,
input_date_format | s=DEFAULT_DATE_FORMATS,
input_time_formats=DEFAULT_TIME_FORMA | TS)
def as_url_args(self):
return urllib.parse.urlencode(self.data)
|
manuelep/openshift_v3_test | wsgi/web2py/gluon/contrib/plural_rules/af.py | Python | mit | 598 | 0.006689 | #!/usr/bin/e | nv python
# -*- coding: utf8 -*-
# Plural-Forms for af (Afrikaans (South Africa))
nplurals=2 # Afrikaans language has 2 forms:
# 1 singular and 1 plural
# Determine plural_id for number *n* as sequence of positive
# integers: 0,1,...
# NOTE! For singular form ALWAYS return plural_id = 0
get_plural_id = lambda n: int(n != 1)
# Construct and return plural form of *word* using
# *plural_id* (which ALWAYS>0). This function will be executed
# f | or words (or phrases) not found in plural_dict dictionary
# construct_plural_form = lambda word, plural_id: (word + 'suffix')
|
vincentrose88/civAdder | civ_battleroyal_leader_civ_adder.py | Python | mit | 3,793 | 0.006327 | import sys
def get_civ_leader(civ_leader_file):
"""Reads in a file with civs mapped to leaders and add it to a dict.
"""
return {leader.strip('\n'): country
for line in civ_leader_file
for (country, leader) in [line.split('\t')]}
def get_all_names(civ_leader):
"""Reads in all leader names in the leader_civ dict to filter narrator text.
"""
return[k for keys in civ_leader for k in keys.split()]
def find_best_leader_match(input_lines):
"""Return best leader according to input.
Finds the best matched leader name for inputted list of words (containing
at least one leadername) (useful when narrator use shortned leader names
and more leaders share some of their name (Khan as an example)."""
best_match = 0
matched_key = None
for leader in civ_leader.keys():
matches = 0
for split_name in leader.split():
for split_input in input_lines:
if(split_input == split_name):
matches+=1
if(matches>best_match):
matched_key = leader
best_match = matches
elif(matches==best_match and matches!=0):
matched_key = None
if(matched_key is not None):
return civ_leader[matched_key]
def insert_civ_names(input_lines, all_names):
"""Inserts civ names in parenthesis.
Reads in a text file from narrators and searches for leader names and adds
civ in brackets"""
out = []
for line in input_lines:
new_line = []
split_line = line.split(' ')
start_word_num = 0
word_num = 0
while word_num < len(split_line):
word=split_line[word_num]
if(word[-1] in '.,:;?!+-='):
punct = word[-1]
word = word[:-1]
else:
punct = ''
w = 0
leader = []
if(word in all_names and word != 'I'):
while(word in all_names):
leader.append(word)
w += 1
word = split_line[word_num + w]
civ = find_best_leader_match(leader)
if civ is not Fa | lse:
new_line.extend(
(' '.join(split_line[start_word_num:word_num]),
' {} ({}){} '.format(' '.join(leader), civ, | punct)))
start_word_num = word_num + len(leader)
word_num = word_num + len(leader)
else:
word_num += 1
else:
word_num += 1
new_line.append(' '.join(split_line[start_word_num:]))
out.append(''.join(new_line))
return(''.join(out))
def print_help():
print (
'For Civilization Battle Royal Mk.II community at '
'reddit/r/civbattleroyale - Flair up!\n'
'This python script takes in a plain text file as the only argument.\n'
'It adds civilization names in brackets to leader names (from the '
'civBR_civ_leader.tsv).\n'
'Outputs a new text-file with a suffix: "_with_civs".\n'
'Made by vincentrose88')
if __name__ == '__main__':
if len(sys.argv) == 1:
print_help()
exit(0)
elif(sys.argv[1] in ('-h', '--help', None)):
print_help()
exit(0)
elif(sys.argv[1]=='-t' or sys.argv[1]=='--test'):
input_file = 'data/test_data.txt'
else:
input_file = str(sys.argv[1])
input_lines = open(input_file,'r').readlines()
civ_leader_file = open('civBR_civ_leader.tsv','r')
civ_leader = get_civ_leader(civ_leader_file)
all_names = get_all_names(civ_leader)
output = insert_civ_names(input_lines, all_names)
updated_file = open(input_file + '_with_civs', 'w')
updated_file.write(output)
|
dandygithub/kodi | addons/DEPRECATED/plugin.video.unified.search/resources/lib/search_db.py | Python | gpl-3.0 | 3,245 | 0.005855 | #!/usr/bin/python
# Writer (c) 2012, MrStealth
# Rev. 1.1.1
# License: Attribution-NonCommercial-ShareAlike 3.0 Unported (CC BY-NC-SA 3.0)
# -*- coding: utf-8 -*-
import os
import sqlite3 as sqlite
import xbmcaddon
import xbmc
__addon__ = xbmcaddon.Addon(id='plugin.video.unified.search')
#addon_path = __addon__.getAddonInfo('path')
profile_nc = xbmc.translatePath(__addon__.getAddonInfo('profile'))
try : profile = os.path.normpath(profile_nc.decode('utf-8'))
except : profile = os.path.normpath(profile_nc)
class SearchDB:
def __init__(self):
#self.filename = os.path.join(addon_path, 'resources/databases', 'searches.db')
self.filename = os.path.join(profile, 'databases', 'searches.db')
self.connect()
def connect(self):
# Create directory if not exist
basedir = os.path.dirname(self.filename)
if not os.path.exists(basedir):
os.makedirs(basedir)
# Create DB file if not exist
if not os.path.isfile(self.filename):
print "Create new sqlite file %s" % self.filename
open(self.filename, 'w').close()
# Try to avoid OperationalError: database is locked
self.db = sqlite.connect(self.filename, timeout=1000, check_same_thread = False)
self.db.text_factory = str
self.cursor = self.db.cursor()
self.execute = self.cursor.execute
self.commit = self.db.commit()
self.create_if_not_exists()
def create_if_not_exists(self):
try:
self.execute("CREATE TABLE IF NOT EXISTS searches (id INT, keyword TEXT, counter INT default 0)")
self.db.commit()
except sqlite.OperationalError:
print "Database '%s' is locked" % self.filename
pass
def new(self, keyword):
search_id = self.search_id()
self.execute('INSERT INTO searches(id, keyword) VALUES(?,?)', (search_id, keyword))
self.db.commit()
return search_id
def search_id(self):
self.execute("SELECT MAX(id) FROM searches")
return self.increase_counter(self.cursor.fetchone()[0])
def increase_counter(self, counter):
counter = counter + 1 if counter or counter == 0 else 1
return counter
def get_latest_search_id(self):
self.execute("SELECT MAX(id) FROM searches")
return self.cursor.fetchone()[0]
def update_counter(self, search_id):
self.execute("UPDATE searches SET counter=counter+1 WHERE id=%d" % (search_id))
self.execute("SELECT MAX(counter) FROM searches WHERE id=%d" % search_id)
self.db.commit()
return self.cursor.fetchone()[0]
def get_counte | r(self, search_id):
self.execute("SELECT MAX(counter) FROM searches WHERE id=%d" % search_id)
return self.cursor.fetchone()[0]
def all(self):
self.execute("SELECT * FROM searches ORDER BY id DESC")
return [{'id': x[0], 'keyword': x[1], 'counter': x[2]} for x in self.cursor.fetchall()]
def drop(self):
| if os.path.isfile(self.filename):
self.connect()
self.execute('DELETE FROM searches')
self.db.commit()
def close(self):
self.cursor.close()
self.db.close()
|
reiths/ros_spinnaker_interface | examples/example_ros_spinnaker_interface.py | Python | mit | 2,533 | 0.006317 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@author Stephan Reith
@date 31.08.2016
This is a simple example to demonstrate how the ROS Spinnaker Interface can be used.
You will also need a ROS Listener and a ROS Talker to send and receive data.
Make sure they communicate over the same ROS topics and std_msgs.Int64 ROS Messages used in here.
"""
import spynnaker.pyNN as pynn
from ros_spinnaker_interface import ROS_Spinnaker_Interface
# import transfer_functions as tf
from ros_spinnaker_interface import SpikeSourcePoisson
from ros_spinnaker_interface import SpikeSinkSmoothing
ts = 0.1
n_neurons = 1
simulation_time = 10000 # ms
pynn.setup(timestep=ts, min_delay=ts, max_delay=2.0*ts)
pop = pynn.Population(size=n_neurons, cellclass=pynn.IF_curr_exp, cellparams={}, label='pop')
# The ROS_Spinnaker_Interface just needs to be initialised. The following parameters are possible:
ros_interface = ROS_Spinnaker_Interface(
n_neurons_source=n_neurons, # number of neurons of the injector population
Spike_Source_Class=SpikeSourcePoisson, # the transfer function ROS Input -> Spikes you want to use.
Spike_Sink_Class=SpikeSinkSmoothing, # the transfer function Spikes -> ROS Output you want to use.
# You can choose from the transfer_functions module
# or write one yourself.
output_population=pop, # the pynn population you wish to receive the
# live spikes from.
ros_topic_send='to_spinnaker', # the ROS topic used for the | incoming ROS values.
ros_topic_recv='from_spinnaker', # the ROS topic used for the outgoing ROS values.
clk_rate=1000, # mainloop clock (update) rate in Hz.
ros_output_rate=10) # number of ROS messages send out per second.
# Build your network, run the simul | ation and optionally record the spikes and voltages.
pynn.Projection(ros_interface, pop, pynn.OneToOneConnector(weights=5, delays=1))
pop.record()
pop.record_v()
pynn.run(simulation_time)
spikes = pop.getSpikes()
pynn.end()
# Plot
import pylab
spike_times = [spike[1] for spike in spikes]
spike_ids = [spike[0] for spike in spikes]
pylab.plot(spike_times, spike_ids, ".")
pylab.xlabel('Time (ms)')
pylab.ylabel('Neuron ID')
pylab.title('Spike Plot')
pylab.xlim(xmin=0)
pylab.show()
|
praekelt/mc2 | mc2/controllers/base/admin.py | Python | bsd-2-clause | 378 | 0 | fro | m django.contrib import admin
from mc2.controllers.base.models import Controller
class ControllerAdmin(admin.ModelAdmin):
search_fields = ('state', 'name')
list_filter = ('state',)
list_display = ('name', 'state', 'organization')
list_editable = ('organization',)
readonly_fields = ('state', 'owner')
admin.site.register(C | ontroller, ControllerAdmin)
|
amolenaar/gaphor | gaphor/diagram/diagramtools/tests/conftest.py | Python | lgpl-2.1 | 570 | 0.001754 | import pytest
from gaphas.painter import BoundingBoxPainter
from gaphas.view import GtkView
from gaphor.diagram.painter import It | emPainter
from gaphor.diagram.selection import Selection
from gaphor.diagram.tests.fixtures import diagram, element_factory, event_manager
@ | pytest.fixture
def view(diagram):
view = GtkView(model=diagram, selection=Selection())
view._qtree.resize((-100, -100, 400, 400))
item_painter = ItemPainter(view.selection)
view.painter = item_painter
view.bounding_box_painter = BoundingBoxPainter(item_painter)
return view
|
djangraw/PsychoPyParadigms | BasicExperiments/FourLetterTask.py | Python | mit | 9,655 | 0.020818 | #!/usr/bin/env python2
"""Implement a visuospatial working memory task
described in Mason et al., Science 2007 (doi: 10.1126/science.1131295)"""
# FourLetterTask.py
# Created 12/17/14 by DJ based on SequenceLearningTask.py
# Updated 11/9/15 by DJ - cleanup, instructions
from psychopy import core, visual, gui, data, event, sound
from psychopy.tools.filetools import fromFile, toFile
import time, numpy as np
from psychopy import logging
# ====================== #
# ===== PARAMETERS ===== #
# ====================== #
# Declare primary task parameters
nBlocks = 4
nTrialsPerBlock = 1
fixDur = 0.2 # before sequence
sequenceDur = 1 # when sequence is displayed
gapDur = 0.5
arrowDur = 0.5 # time arrow is on screen
respDur = 8 # max time to respond
ITI = 0.5 # time between end of one trial and beginning of next trial
IBI = 1 # time between end of block/probe and beginning of next block
randomize_seq = False;
sequences = (['R','X','V','F'],['W','V','Y','Q'])
if randomize_seq:
np.random.shuffle(sequences[0])
for i in range(1,len(sequences)):
np.random.shuffle(sequences[i])
while sequences[i] in sequences[0:i-1]:
np.random.shuffle(sequences[i])
# declare probe parameters
probe_prob = 0 # probablilty that a given trial will be preceded by a probe
probe1_string = 'Where was your attention focused just before this?'
probe1_options = ('Completely on the task','Mostly on th | e task','Not sure','Mostly on inward thoughts','Completely on inward thoughts')
probe2_string = 'How aware were you of where your attention was?'
probe2_options = ('Very aware','Somewhat aware','Neutral','Somewhat unaware','Very unaware')
# ========================== #
# ===== SET UP STIMULI ===== #
# ========================== #
try:#tr | y to get a previous parameters file
expInfo = fromFile('lastFourLetterParams.pickle')
except:#if not there then use a default set
expInfo = {'subject':'abc', 'session':'1'}
dateStr = time.strftime("%b_%d_%H%M", time.localtime())#add the current time
#present a dialogue to change params
dlg = gui.DlgFromDict(expInfo, title='Four Letter Task', fixed=['date'])
if dlg.OK:
toFile('lastFourLetterParams.pickle', expInfo)#save params to file for next time
else:
core.quit()#the user hit cancel so exit
#make a text file to save data
fileName = 'FourLetter-' + expInfo['subject'] + '-' + expInfo['session'] + '-' + dateStr
dataFile = open(fileName+'.txt', 'w')
dataFile.write('key RT AbsTime\n')
#create window and stimuli
globalClock = core.Clock()#to keep track of time
trialClock = core.Clock()#to keep track of time
win = visual.Window([800,600],allowGUI=False, monitor='testMonitor', units='deg')
fixation = visual.GratingStim(win, color='black', tex=None, mask='circle',size=0.2)
message1 = visual.TextStim(win, pos=[0,+3],text="Memorize each sequence of 4 letters. Then an arrow will indicate whether the ordering is right-to-left or left-to-right. Next you'll see a letter - indicate its number in the given sequence and order.")
message2 = visual.TextStim(win, pos=[0,-3], text="When you're ready to begin, press any key.")
# make sequence text
centerText = visual.TextStim(win,pos=[0,0],text = sequences[0])
# make arrows
line = visual.Line(win,start=(-2,0),end=(2,0),lineColor='black')
leftArrow = visual.Polygon(win,edges=3,radius=0.5,pos=(-2,0),ori=30,lineColor='black',fillColor='black')
rightArrow = visual.Polygon(win,edges=3,radius=0.5,pos=(2,0),ori=-30,lineColor='black',fillColor='black')
# ========================== #
# ===== SET UP LOGGING ===== #
# ========================== #
logging.LogFile((fileName+'.log'), level=logging.INFO)#, mode='w') # w=overwrite
logging.log(level=logging.INFO, msg='Subject %s, Session %s'%(expInfo['subject'],expInfo['session']))
for i in range(0,len(sequences)):
logging.log(level=logging.INFO, msg='sequence %d: %s'%(i,sequences[i]))
# ============================ #
# ======= SUBFUNCTIONS ======= #
# ============================ #
def PlaySequence(sequence):
# get block start time
tBlock = globalClock.getTime()*1000
#draw fixation dot
fixation.draw()
win.logOnFlip(level=logging.EXP, msg='Fixation')
win.flip()
core.wait(fixDur)#wait for specified ms (use a loop of x frames for more accurate timing)
# display sequence
centerText.setText(''.join(sequence))
centerText.draw()
win.logOnFlip(level=logging.EXP, msg='Square %d/%d'%(i+1,len(sequence)))
win.flip()
core.wait(sequenceDur)
return tBlock
def RunTrial(goForward,replay):
# get trial start time
tTrial = globalClock.getTime()*1000
# reset trial clock
trialClock.reset()
# clear event buffer
event.clearEvents()
# display response direction
line.draw()
if goForward:
rightArrow.draw()
win.logOnFlip(level=logging.EXP, msg='Right Arrow')
else:
leftArrow.draw()
win.logOnFlip(level=logging.EXP, msg='Left Arrow')
win.flip()
# pause brielfy
core.wait(arrowDur)
#get responses
allKeys = []
print(replay)
for i in range(0,len(replay)):
centerText.setText(replay[i])
centerText.draw()
win.logOnFlip(level=logging.EXP, msg='letter %d: %s'%(i+1,replay[i]))
win.flip()
while trialClock.getTime()<respDur:
newKeys = event.getKeys(timeStamped=trialClock)
for thisKey in newKeys:
allKeys.append(thisKey) #,keyList=['1','2','3','4','q','Escape']
if len(newKeys)>0:
break # exit while loop and go on to next
# allKeys = event.getKeys(timeStamped=trialClock)
return (tTrial,allKeys)
def RunProbes():
# reset clock
trialClock.reset()
# set up stimuli
message1.setText(probe1_string)
message2.setText("1) %s\n2) %s\n3) %s\n4) %s\n5) %s" % probe1_options)
message1.draw()
message2.draw()
win.logOnFlip(level=logging.EXP, msg='Probe 1')
win.flip()
# get response
key1 = event.waitKeys(keyList=['1','2','3','4','5','q','Escape'],timeStamped=trialClock)
# reset clock
trialClock.reset()
# set up stimuli
message1.setText(probe2_string)
message2.setText("1) %s\n2) %s\n3) %s\n4) %s\n5) %s" % probe2_options)
message1.draw()
message2.draw()
win.logOnFlip(level=logging.EXP, msg='Probe 2')
win.flip()
# get response
key2 = event.waitKeys(keyList=['1','2','3','4','5','q','Escape'],timeStamped=trialClock)
# return results
return (key1[0],key2[0])
# =========================== #
# ===== MAIN EXPERIMENT ===== #
# =========================== #
#display instructions and wait
message1.draw()
message2.draw()
win.logOnFlip(level=logging.EXP, msg='Instructions')
win.flip()
#check for a keypress
event.waitKeys()
# do brief wait before first stimulus
fixation.draw()
win.logOnFlip(level=logging.EXP, msg='Fixation')
win.flip()
core.wait(ITI)
for iBlock in range(0,nBlocks): #will step through the blocks
# determine sequence
iSeq = np.random.choice(range(0,len(sequences)))
sequence = sequences[iSeq]
logging.log(level=logging.EXP, msg='Block %d, Sequence %d: %s'%(iBlock,iSeq,sequence))
# play sequence
tBlock = PlaySequence(sequence)
core.wait(gapDur)#wait for specified ms (use a loop of x frames for more accurate timing)
for iTrial in range(0,nTrialsPerBlock):
# determine direction
goForward = np.random.random()>0.5
np.random.shuffle(sequence)
logging.log(level=logging.EXP, msg='Trial %d, goForward %r, order %s'%(iTrial,goForward,sequence))
# Run Trial
(tTrial,allKeys) = RunTrial(goForward,sequence)
# Evaluate results
for thisKey in allKeys:
keyChar = thisKey[0]
RT = thisKey[1]*1000 # in ms
#log the data
dataFile.write('%s %.1f %.1f\n' %(keyChar, RT, RT+tTrial))
print("key=%s, RT=%.1f"%(keyChar,RT))
# look for escape character
if thisKey[0] in ['q', 'escape']:
core.quit()#abort experiment
event.clearEvents('mous |
Wesalius/EloBot | pywikibot/families/meta_family.py | Python | gpl-3.0 | 641 | 0 | # -*- coding: utf-8 -*-
"""Family module for Meta Wiki."""
#
# (C) Pywikibot team, 2005-2018
#
# Dis | tributed under the terms of the MIT license.
#
from __future__ import absolute_import, unicode_literals
from pywikibot import family
# The meta wikimedia family
class Family(family.WikimediaOrgFamily):
"""Family class for Meta Wiki."""
name = 'meta'
interwiki_forward = 'wikipedia'
cross_allowed = ['meta', ]
category_redirect_templates = {
'meta': (
'Category redirect',
),
}
# Subpages for documentation.
doc_subpages = {
| '_default': (('/doc',), ['meta']),
}
|
VirusTotal/content | Packs/McAfee_DXL/Integrations/McAfee_DXL/McAfee_DXL.py | Python | mit | 7,105 | 0.002252 | from typing import Dict
import tempfile
from dxlclient.client_config import DxlClientConfig
from dxlclient.client import DxlClient
from dxlclient.broker import Broker
from dxlclient.message import Event
import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
INTEGRATION_NAME = "McAfee DXL"
CONNECT_RETRIES = 1
RECONNECT_DELAY = 1
RECONNECT_DELAY_MAX = 10
class EventSender:
TRUST_LEVEL = {
'NOT_SET': '0',
'KNOWN_MALICIOUS': '1',
'MOST_LIKELY_MALICIOUS': '15',
'MIGHT_BE_MALICIOUS': '30',
'UNKNOWN': '50',
'MIGHT_BE_TRUSTED': '70',
'MOST_LIKELY_TRUSTED': '85',
'KNOWN_TRUSTED': '99',
'KNOWN_TRUSTED_INSTALLER': '100'
}
broker_ca_bundle = tempfile.NamedTemporaryFile().name
cert_file = tempfile.NamedTemporaryFile().name
private_key = tempfile.NamedTemporaryFile().name
def __init__(self, params: Dict):
with open(self.broker_ca_bundle, "w") as text_file:
text_file.write(params['broker_ca_bundle'])
with open(self.cert_file, "w") as text_file:
text_file.write(params['cert_file'])
with open(self.private_key, "w") as text_file:
text_file.write(params['private_key'])
if 'broker_urls' in params:
self.broker_urls = params['broker_urls'].split(',')
self.push_ip_topic = params.get('push_ip_topic')
self.push_url_topic = params.get('push_url_topic')
self.push_domain_topic = params.get('push_domain_topic')
self.push_hash_topic = params.get('push_hash_topic')
self.client = DxlClient(self.get_client_config())
self.client.connect()
def __del__(self):
self.client.disconnect()
def push_ip(self, ip, trust_level, topic):
if not is_ip_valid(ip):
raise ValueError(f'argument ip {ip} is not a valid IP')
trust_level_key = self.TRUST_LEVEL[trust_level]
if topic:
self.push_ip_topic = topic
self.send_event(self.push_ip_topic, f'ip:{ip};trust_level:{trust_level_key}')
return f'Successfully pushed ip {ip} with trust level {trust_level}'
def push_url(self, url, trust_level, topic):
trust_level_key = self.TRUST_LEVEL[trust_level]
if topic:
self.push_url_topic = topic
self.send_event(self.push_url_topic, f'url:{url};trust_level:{trust_level_key}')
return f'Successfully pushed url {url} with trust level {trust_level}'
def push_domain(self, domain, trust_level, topic):
trust_level_key = self.TRUST_LEVEL[trust_level]
if topic:
self.push_domain_topic = topic
self.send_event(self.push_domain_topic, f'domain:{domain};trust_level:{trust_level_key}')
return f'Successfully pushed domain {domain} with trust level {trust_l | evel}'
def push_hash(self, hash_obj, trust_level, topic):
trust_level_key = self.TRUST_LEVEL[trust_level]
if topic:
self.push_ip_topic = topic
self.send_event(self.pu | sh_hash_topic, f'hash:{hash_obj};trust_level:{trust_level_key}')
return f'Successfully pushed hash {hash_obj} with trust level {trust_level}'
def get_client_config(self):
config = DxlClientConfig(
broker_ca_bundle=self.broker_ca_bundle,
cert_file=self.cert_file,
private_key=self.private_key,
brokers=[Broker.parse(url) for url in self.broker_urls]
)
config.connect_retries = CONNECT_RETRIES
config.reconnect_delay = RECONNECT_DELAY
config.reconnect_delay_max = RECONNECT_DELAY_MAX
return config
def send_event(self, topic, payload):
if not topic:
raise Exception(f'Error in {demisto.command()} topic field is required')
event = Event(topic)
event.payload = str(payload).encode()
self.client.send_event(event)
def send_event_wrapper(self, topic, payload):
self.send_event(topic, payload)
return 'Successfully sent event'
def validate_certificates_format():
if '-----BEGIN PRIVATE KEY-----' not in demisto.params()['private_key']:
return_error(
"The private key content seems to be incorrect as it doesn't start with -----BEGIN PRIVATE KEY-----")
if '-----END PRIVATE KEY-----' not in demisto.params()['private_key']:
return_error(
"The private key content seems to be incorrect as it doesn't end with -----END PRIVATE KEY-----")
if '-----BEGIN CERTIFICATE-----' not in demisto.params()['cert_file']:
return_error("The client certificates content seem to be "
"incorrect as they don't start with '-----BEGIN CERTIFICATE-----'")
if '-----END CERTIFICATE-----' not in demisto.params()['cert_file']:
return_error(
"The client certificates content seem to be incorrect as it doesn't end with -----END CERTIFICATE-----")
if not demisto.params()['broker_ca_bundle'].lstrip(" ").startswith('-----BEGIN CERTIFICATE-----'):
return_error(
"The broker certificate seem to be incorrect as they don't start with '-----BEGIN CERTIFICATE-----'")
if not demisto.params()['broker_ca_bundle'].rstrip(" ").endswith('-----END CERTIFICATE-----'):
return_error(
"The broker certificate seem to be incorrect as they don't end with '-----END CERTIFICATE-----'")
def main():
args = demisto.args()
command = demisto.command()
try:
event_sender = EventSender(demisto.params())
result = ''
if command == 'test-module':
event_sender.send_event('TEST', 'test')
result = 'ok'
elif command == 'dxl-send-event':
result = event_sender.send_event_wrapper(args.get('topic'), args.get('payload'))
elif command == 'dxl-push-ip':
result = event_sender.push_ip(args.get('ip'),
args.get('trust_level'),
args.get('topic'))
elif command == 'dxl-push-url':
result = event_sender.push_url(args.get('url'),
args.get('trust_level'),
args.get('topic'))
elif command == 'dxl-push-domain':
result = event_sender.push_domain(args.get('domain'),
args.get('trust_level'),
args.get('topic'))
elif command == 'dxl-push-hash':
result = event_sender.push_hash(args.get('hash'),
args.get('trust_level'),
args.get('topic'))
else:
raise Exception(f'{demisto.command()} is not a command')
return_outputs(result)
except Exception as error:
validate_certificates_format()
return_error(f'error in {INTEGRATION_NAME} {str(error)}.', error)
if __name__ in ('__builtin__', 'builtins'):
main()
|
ds-hwang/chromium-crosswalk | tools/perf/page_sets/memory_health_story.py | Python | bsd-3-clause | 3,234 | 0.007112 | # Copyrigh | t 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import re
from telemetry.page import page as page_module
from telemetry.page import shared_page_state
from telemetry import story
from devil.android.sdk import intent # pyl | int: disable=import-error
from devil.android.sdk import keyevent # pylint: disable=import-error
DUMP_WAIT_TIME = 3
URL_LIST = [
'http://google.com',
'http://vimeo.com',
'http://yahoo.com',
'http://baidu.com',
'http://cnn.com',
'http://yandex.ru',
'http://yahoo.co.jp',
'http://amazon.com',
'http://ebay.com',
'http://bing.com',
]
class ForegroundPage(page_module.Page):
"""Take a measurement after loading a regular webpage."""
def __init__(self, story_set, name, url):
super(ForegroundPage, self).__init__(
url=url, page_set=story_set, name=name,
shared_page_state_class=shared_page_state.SharedMobilePageState)
self.archive_data_file = story_set.archive_data_file
def _TakeMemoryMeasurement(self, action_runner, phase):
action_runner.Wait(1) # See crbug.com/540022#c17.
with action_runner.CreateInteraction(phase):
action_runner.Wait(DUMP_WAIT_TIME)
action_runner.ForceGarbageCollection()
action_runner.tab.browser.platform.FlushEntireSystemCache()
action_runner.Wait(DUMP_WAIT_TIME)
if not action_runner.tab.browser.DumpMemory():
logging.error('Unable to get a memory dump for %s.', self.name)
def RunPageInteractions(self, action_runner):
action_runner.tab.WaitForDocumentReadyStateToBeComplete()
self._TakeMemoryMeasurement(action_runner, 'foreground')
class BackgroundPage(ForegroundPage):
"""Take a measurement while Chrome is in the background."""
def __init__(self, story_set, name):
super(BackgroundPage, self).__init__(story_set, name, 'about:blank')
def RunPageInteractions(self, action_runner):
action_runner.tab.WaitForDocumentReadyStateToBeComplete()
# Launch clock app, pushing Chrome to the background.
android_platform = action_runner.tab.browser.platform
android_platform.LaunchAndroidApplication(
intent.Intent(package='com.google.android.deskclock',
activity='com.android.deskclock.DeskClock'),
app_has_webviews=False)
# Take measurement.
self._TakeMemoryMeasurement(action_runner, 'background')
# Go back to Chrome.
android_platform.android_action_runner.InputKeyEvent(keyevent.KEYCODE_BACK)
class MemoryHealthStory(story.StorySet):
"""User story for the Memory Health Plan."""
def __init__(self):
super(MemoryHealthStory, self).__init__(
archive_data_file='data/memory_health_plan.json',
cloud_storage_bucket=story.PARTNER_BUCKET)
for url in URL_LIST:
# We name pages so their foreground/background counterparts are easy
# to identify. For example 'http://google.com' becomes
# 'http_google_com' and 'after_http_google_com' respectively.
name = re.sub('\W+', '_', url)
self.AddStory(ForegroundPage(self, name, url))
self.AddStory(BackgroundPage(self, 'after_' + name))
|
MischaLundberg/bamsurgeon | scripts/makevcf.py | Python | mit | 2,120 | 0.010377 | #!/usr/bin/env python
import sys,os
import textwrap
def print_header():
print textwrap.dedent("""\
##fileformat=VCFv4.1
##phasing=none
##INDIVIDUAL=TRUTH
##SAMPLE=<ID=TRUTH,Individual="TRUTH",Description="bamsurg | eon spike-in">
##INFO=<ID=CIPOS,Number=2,Type=Integer,Description="Confidence interval around POS for imprecise variants">
##INFO=<ID=IMPRECISE,Number=0,Type=Flag,Description="Imprecise structural variation">
##INFO=<ID=SVTYPE,Number=1,Type=String,Description="Type of structural variant">
##INFO=<ID=SVLEN,Number=.,Type=Integer,Des | cription="Difference in length between REF and ALT alleles">
##INFO=<ID=SOMATIC,Number=0,Type=Flag,Description="Somatic mutation in primary">
##INFO=<ID=VAF,Number=1,Type=Float,Description="Variant Allele Frequency">
##INFO=<ID=DPR,Number=1,Type=Float,Description="Avg Depth in Region (+/- 1bp)">
##INFO=<ID=MATEID,Number=1,Type=String,Description="Breakend mate">
##ALT=<ID=INV,Description="Inversion">
##ALT=<ID=DUP,Description="Duplication">
##ALT=<ID=DEL,Description="Deletion">
##ALT=<ID=INS,Description="Insertion">
##FORMAT=<ID=GT,Number=1,Type=String,Description="Genotype">
#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\tSPIKEIN""")
if len(sys.argv) == 2:
print_header()
logdir_files = os.listdir(sys.argv[1])
for filename in logdir_files:
if filename.endswith('.log'):
with open(sys.argv[1] + '/' + filename, 'r') as infile:
for line in infile:
if line.startswith('snv'):
#chrom, pos, mut = line.strip().split()
c = line.strip().split()
chrom = c[1].split(':')[0]
pos = c[3]
mut = c[4]
dpr = c[6]
vaf = c[7]
ref,alt = mut.split('-->')
print "\t".join((chrom,pos,'.',ref,alt,'100','PASS','SOMATIC;VAF=' + vaf + ';DPR=' + dpr,'GT','0/1'))
else:
print "usage:", sys.argv[0], "<log directory>"
|
magacoin/magacoin | contrib/zmq/zmq_sub.py | Python | mit | 1,425 | 0.002105 | #!/usr/bin/env python2
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import array
import binascii
import zmq
import struct
port = 25332
zmqContext = zmq.Context()
zmqSubSocket = zmqContext.socket(zmq.SUB)
zmqSubSocke | t.setsockopt(zmq.SUBSCRIBE, "hashbrick")
zmqSubSocket.setsockopt(zmq.SUBSCRIBE, "hashtx")
zmqSubSocket.setsockopt(zmq.SUBSCRIBE, "rawbrick")
zmqSubSocket.setsockopt(zmq.SUBSCRIBE, "rawtx")
zmqSubSocket.connect("tcp://127.0.0.1:%i" % port)
try:
while True:
msg = zmqSubSocket.recv_multipart()
topic = str(msg[0])
body = msg[1]
sequence = "Unknown";
| if len(msg[-1]) == 4:
msgSequence = struct.unpack('<I', msg[-1])[-1]
sequence = str(msgSequence)
if topic == "hashbrick":
print '- HASH BRICK ('+sequence+') -'
print binascii.hexlify(body)
elif topic == "hashtx":
print '- HASH TX ('+sequence+') -'
print binascii.hexlify(body)
elif topic == "rawbrick":
print '- RAW BRICK HEADER ('+sequence+') -'
print binascii.hexlify(body[:80])
elif topic == "rawtx":
print '- RAW TX ('+sequence+') -'
print binascii.hexlify(body)
except KeyboardInterrupt:
zmqContext.destroy()
|
alaasalman/taskit | TIAboutDialog.py | Python | gpl-3.0 | 2,548 | 0.008634 | """
Copyright 2007 Alaa Salman <alaa@codedemigod.com>
This file is part of TaskIt.
TaskIt is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
TaskIt is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You | should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import sys
from PyQt4 import QtGui
from PyQt4 import QtCore
from | ui import AboutDialog
class TIAboutDialog(QtGui.QDialog):
def __init__(self, p_Parent = None):
QtGui.QDialog.__init__(self, p_Parent)
self.ui = AboutDialog.Ui_Dialog()
self.ui.setupUi(self)
htmlAboutAuthor = """<html>
<body>
TaskIt GTD Application.
<p>Author: Alaa Salman</p>
<p>Uses the awesome famfamfam icons.</p>
</body>
</html>
"""
htmlAboutLicense = """<html><body>TaskIt is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
<p>TaskIt is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.</p>
<p>You should have received a copy of the GNU General Public License
along with this program. If not, see http://www.gnu.org/licenses</p>
<p></p></body></html>"""
self.ui.tabWidget.widget(0).findChild(QtGui.QTextBrowser, "authorTextBrowser").setHtml(htmlAboutAuthor)
self.ui.tabWidget.widget(1).findChild(QtGui.QTextBrowser, "licenseTextBrowser").setHtml(htmlAboutLicense)
|
xiangcai/todother | controller/search.py | Python | lgpl-3.0 | 3,591 | 0.011139 | import os
import sys
import logging
import uuid
import re
import json
import string
import tornado.web
import tornado.escape
import urlparse
import urllib
import Levenshtein
from controller.base import *
from module.todo_entity import TodoMatchEntity
_todo_prefix = "t:"
_person_prefix = "p:"
_friend_prefix = "f:"
_pagesize = 30
_page = 0
class SearchHandler(BaseHandler):
@tornado.web.authenticated
def post(self):
global _person_prefix
global _friend_prefix
global _todo_prefix
global _page
global _pagesize
keyword = self.get_argument("keyword", None)
_page = int(self.ge | t_argument("page",0))
_pagesize=30
print keyword
if not keyword:
return
if keyword.startswith(_person_prefix):
return self.person_search(keyword)
elif keyword.startswith(_friend_prefix):
ret | urn self.friend_search(keyword)
else:
return self.todo_search(keyword)
def person_search(self,orakeyword):
result = []
keyword = orakeyword[len(_person_prefix):len(orakeyword)]
entries = self.db.query("SELECT * FROM auth_user WHERE nickname like %s","%%"+keyword+"%%")
total = len(entries)
if len(entries)==1:
user = UserEntity(entries[0].user_id)
user.load(entries[0])
result.append(user)
self.render("search.html", entries=result,page=_page,totalnum=total,pagesize=_pagesize,keyword=orakeyword,title="Search")
else:
self.render("search.html", entries=result,page=_page,totalnum=total,pagesize=_pagesize,keyword=orakeyword,title="Search")
def friend_search(self,orakeyword):
keyword = orakeyword[len(_friend_prefix):len(orakeyword)]
self.render("search.html", result=keyword,title="Search Result")
def todo_search(self,orakeyword):
if orakeyword.startswith(_todo_prefix):
keyword = orakeyword[len(_todo_prefix):len(orakeyword)]
else:
keyword = orakeyword
entries = self.db.query("SELECT t.*, u.nickname,u.language,u.gender FROM todo t left join auth_user u on todo_user_id = user_id "
"WHERE todo_what like %s","%%"+keyword+"%%")
if len(entries)==1:
count = self.db.get('select count(*) from todo_update where todo_id=%s' % entries[0].todo_id)
if count == 0:
updates = None
elif count > 5:
updates = self.db.query('select * from todo_update where todo_id=%s order by update_time desc limit 5' % entry.todo_id)
else:
updates = self.db.query('select * from todo_update where todo_id=%s order by update_time desc' % entry.todo_id)
times = []
if updates:
for update in updates:
times.append(update.update_time)
self.render("todo.html", entry=entries[0], updates=updates, times=times, with_update=True)
else:
result = []
for entry in entries:
todo_match = TodoMatchEntity(entry.todo_id)
todo_match.load(entry)
result.append(todo_match)
start = _page * _pagesize
end = (_page+1) * _pagesize-1
page_result=result[start:end]
total = len(result)
print type(result[0])
self.render("search.html", entries=page_result,page=_page,totalnum=total,pagesize=_pagesize,keyword=orakeyword,title="Search")
|
jruizgit/rules | setup.py | Python | mit | 2,665 | 0.011632 | try:
from setuptools import setup, Extension
from setuptools.command import install_lib as _install_lib
except ImportError:
from distutils.core import setup, Extension
from distutils.command import install_lib as _install_lib
from codecs import open
from os import path
from os import environ
from sys import platform
if platform == 'win3 | 2':
environ['CFLAGS'] = '-std=c99 -D_GNU_SOURCE -_WIN32'
elif platform == 'darwin':
environ['CFLAGS'] = '-std=c99 -D_GNU_SOURCE -fcommon'
else:
environ['CFLAGS'] = '-std=c99 -D_GNU_SOURCE'
# Patch "install_lib" command to run build_clib before build_ext
# to properly work with easy_install.
# See: http://bugs.python.org/issue5243
class install_lib(_install_lib.install_lib):
def build(self):
if not self.skip_build:
if self.distribution.has_pure_modules( | ):
self.run_command('build_py')
if self.distribution.has_c_libraries():
self.run_command('build_clib')
if self.distribution.has_ext_modules():
self.run_command('build_ext')
rules_lib = ('durable_rules_engine_py', {'sources': ['src/rules/%s.c' % src for src in ('json', 'rete', 'state', 'events', 'regex')]})
rules = Extension('durable_rules_engine',
sources = ['src/rulespy/rules.c'],
include_dirs=['src/rules'])
here = path.abspath(path.dirname(__file__)) + '/docs/py'
with open(path.join(here, 'README.txt'), encoding='utf-8') as f:
long_description = f.read()
setup (
name = 'durable_rules',
version = '2.0.28',
description = 'for real time analytics (a Python Rules Engine)',
long_description=long_description,
url='https://github.com/jruizgit/rules',
author='Jesus Ruiz',
author_email='jr3791@live.com',
license='MIT',
classifiers=[
'Operating System :: OS Independent',
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries',
'License :: OSI Approved :: MIT License',
'Programming Language :: C',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
keywords='rules engine rete forward chaining event stream state machine workflow streaming analytics',
packages = ['durable'],
package_dir = {'': 'libpy'},
libraries = [rules_lib],
ext_modules = [rules],
# Override 'install_lib' command
cmdclass={'install_lib': install_lib},
)
|
luboslenco/cyclesgame | blender/arm/logicnode/sound_play_sound.py | Python | lgpl-3.0 | 652 | 0.004601 | import bpy
fr | om bpy.props import *
from bpy.types import Node, NodeSocket
from arm.logicnode.arm_nodes import *
class PlaySoundNode(Node, ArmLogicTreeNode):
'''Play sound node'''
bl_idname = 'LNPlaySoundRawNode'
bl_label = 'Play Sound'
bl_icon = 'QUESTION'
property0: PointerProperty(name='', type=bpy.types.Sound)
def init(self, context):
self.inputs.new('ArmNodeSocketAction', 'In')
self.outputs.new('ArmNodeSocketAction', 'Out')
def draw_buttons(self, cont | ext, layout):
layout.prop_search(self, 'property0', bpy.data, 'sounds', icon='NONE', text='')
add_node(PlaySoundNode, category='Sound')
|
dmpetrov/dataversioncontrol | dvc/env.py | Python | apache-2.0 | 396 | 0 | DV | C_CHECKPOINT = "DVC_CHECKPOINT"
DVC_DAEMON = "DVC_DAEMON"
DVC_PAGER = "DVC_PAGER"
DVC_ROOT = "DVC_ROOT"
DVCLIVE_PATH = "DVCLIVE_PATH"
DVCLIVE_SUMMARY = "DVCLIVE_SUMMARY"
DVCLIVE_HTML = "DVCLIVE_HTML"
DVCLIVE_RESUME = "DVCLIVE_RESUME"
DVC_IGNORE_ISATTY = "DVC_IGNORE_ISATTY"
DVC_EXP_GIT_REMOTE = "DVC_EXP_GIT_REMOTE"
DVC_EXP_AUTO_PUSH = "DVC_EXP_AUTO_PUSH"
DVC_NO_ANALYTICS = "DVC_NO_ANALYTICS" | |
google-research/language | language/conpono/evals/run_finetune_coherence.py | Python | apache-2.0 | 18,761 | 0.005863 | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""BERT next sentence prediction / binary coherence finetuning runner."""
import collections
import os
import random
from absl import app
from absl import flags
from bert import modeling
from bert import optimization
from bert import tokenization
from language.conpono.cpc import model_builder
from language.conpono.evals.coherence_eval import create_cpc_input_from_text
from language.conpono.evals.coherence_eval import read_data
import tensorflow.compat.v1 as tf
from tensorflow.contrib import cluster_resolver as contrib_cluster_resolver
from tensorflow.contrib import tpu as contrib_tpu
from tensorflow.contrib import training as contrib_training
FLAGS = flags.FLAGS
## Required parameters
flags.DEFINE_string(
"eval_file", None,
"The input data. Should be in tfrecord format ready to input to BERT.")
flags.DEFINE_string(
"train_file", None,
"The input data. Should be in tfrecord format ready to input to BERT.")
flags.DEFINE_string("train_raw_data", None, "The raw input data for training.")
flags.DEFINE_string("eval_raw_data", None, "The raw input data for eval.")
flags.DEFINE_string(
"bert_config_file", None,
"The config json file corresponding to the pre-trained BERT model. "
"This specifies the model architecture.")
flags.DEFINE_string("vocab_file", None,
"The vocabulary file that the BERT model was trained on.")
flags.DEFINE_string(
"finetune_output_dir", None,
"The output directory where the model checkpoints will be written.")
## Other parameters
flags.DEFINE_integer("num_choices", 16, "Number of negative samples + 1")
flags.DEFINE_bool("add_lv2loss", False, "Whether to use the level 2 loss.")
flags.DEFINE_string(
"init_checkpoint", None,
"Initial checkpoint (usually from a pre-trained BERT model).")
flags.DEFINE_bool(
"do_lower_case", True,
"Whether to lower case the input text. Should be True for uncased "
"models and False for cased models.")
flags.DEFINE_integer(
"max_seq_length", 512,
"The maximum total input sequence length after WordPiece tokenization. "
"Sequences longer than this will be truncated, and sequences shorter "
"than this will be padded.")
flags.DEFINE_bool("do_train", False, "Whether to run training.")
flags.DEFINE_bool("do_eval", False, "Whether to run eval on the dev set.")
flags.DEFINE_integer("train_batch_size", 32, "Total batch size for training.")
flags.DEFINE_integer("eval_batch_size", 32, "Total batch size for eval.")
flags.DEFINE_integer("train_data_size", 10000, "The number of examples in the"
"training data")
flags.DEFINE_integer("eval_data_size", -1, "The number of examples in the"
"validation data")
flags.DEFINE_integer("predict_batch_size", 8, "Total batch size for predict.")
flags.DEFINE_float("learning_rate", 5e-5, "The initial learning rate for Adam.")
flags.DEFINE_float(
"warmup_proportion", 0.1,
"Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10% of training.")
flags.DEFINE_integer("save_checkpoints_steps", 10000,
"How often to save the model checkpoint.")
flags.DEFINE_integer("iterations_per_loop", 1000,
"How many steps to make in each estimator call.")
flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.")
flags.DEFINE_string(
"tpu_name", None,
"The Cloud TPU to use for training. This should be either the name "
"used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 "
"url.")
flags.DEFINE_string(
"tpu_zone", None,
"[Optional] GCE zone where the Cloud TPU is located in. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
flags.DEFINE_string(
"gcp_project", None,
"[Optional] Project name for the Cloud TPU-enabled project. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.")
flags.DEFINE_integer(
"num_tpu_cores", 8,
"Only used if `use_tpu` is True. Total number of TPU cores to use.")
_SEP_TOKEN = "[SEP]"
class InputFeatures:
"""A single set of features of data."""
def __init__(self,
input_ids,
input_mask,
segment_ids,
label_id,
is_real_example=True):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
self.is_real_example = is_real_example
def file_based_input_fn_builder(input_file, seq_length, is_training,
drop_remainder, num_choices):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
input_file = input_file.split(",")
expanded_files = []
for infile in input_file:
try:
sharded_files = tf.io.gfile.glob(infile)
expanded_files.append(sharded_files)
except tf.errors.OpError:
expanded_files.append(infile)
name_to_features = {}
for i in range(num_choices):
name_to_features["input_ids" + str(i)] = tf.FixedLenFeature([seq_length],
tf.int64)
name_to_features["input_mask" + str(i)] = tf.FixedLenFeature([seq_length],
tf.int64)
name_to_features["segment_ids" + str(i)] = tf.FixedLenFeature([seq_length],
tf.int64)
name_to_features["label_types"] = tf.FixedLenFeature([8], tf.int64)
name_to_features["labels"] = t | f.FixedLenFeature([8], tf.int64)
def _decode_record(record, name_to_features):
"""Decodes a record to a TensorFlow example."""
example = tf.parse_single_example(record, name_to_features)
# tf.Example only supports tf.int64, but the TPU only supports tf.int32.
# So cast all int64 to int32.
for name in list(example.keys()):
t = example[name]
if t.dtype == tf.int64:
t = tf.to_int32(t)
example[name] = t
return example
def input_fn(params):
| """The actual input function."""
batch_size = params["batch_size"]
if len(expanded_files) == 1:
d = tf.data.TFRecordDataset(expanded_files[0])
if is_training:
d = d.repeat()
d = d.shuffle(buffer_size=256)
else:
dataset_list = [
tf.data.TFRecordDataset(expanded_files[i])
for i in range(len(expanded_files))
]
if is_training:
dataset_list = [
d.repeat() for d in dataset_list
]
# Note that an alternative is to load from the list directly without
# specifying a range. Sample below:
# d = tf.data.experimental.sample_from_datasets(dataset_list)
choice_dataset = tf.data.Dataset.range(len(dataset_list)).repeat()
d = tf.data.experimental.choose_from_datasets(dataset_list,
choice_dataset)
if is_training:
d = d.shuffle(buffer_size=256)
d = d.apply(
tf.data.experimental.map_and_batch(
lambda record: _decode_record(record, name_to_features),
batch_size=batch_size,
drop_remainder=drop_remainder))
return d
return input_fn
def model_fn_builder(bert_config, init_checkpoint, learning_rate,
num_train_steps, num_warmup_steps, use_tpu,
use_one_hot_emb |
kudlav/dnf | dnf/cli/completion_helper.py | Python | gpl-2.0 | 6,785 | 0.002211 | #!/usr/bin/env python
#
# This file is part of dnf.
#
# Copyright 2015 (C) Igor Gnatenko <i.gnatenko.brain@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
import dnf.cli
import re
import sys
def filter_list_by_kw(kw, lst):
return filter(lambda k: str(k).startswith(kw), lst)
def listpkg_to_setstr(pkgs):
return set([str(x) for x in pkgs])
class RemoveCompletionCommand(dnf.cli.commands.remove.RemoveCommand):
def __init__(self, args):
super(RemoveCompletionCommand, self).__init__(args)
def configure(self, args):
self.cli.demands.root_user = False
self.cli.demands.sack_activation = True
def run(self, args):
for pkg in ListCompletionCommand.installed(self.base, args[0]):
print(str(pkg))
class InstallCompletionCommand(dnf.cli.commands.install.InstallCommand):
def __init__(self, args):
super(InstallCompletionCommand, self).__init__(args)
def configure(self, args):
self.cli.demands.root_user = False
self.cli.demands.available_repos = True
self.cli.demands.sack_activation = True
def run(self, args):
installed = listpkg_to_setstr(ListCompletionCommand.installed(self.base, args[0]))
available = listpkg_to_setstr(ListCompletionCommand.available(self.base, args[0]))
for pkg in (available - installed):
print(str(pkg))
class ReinstallCompletionCommand(dnf.cli.commands.reinstall.ReinstallCommand):
def __init__(self, args):
super(ReinstallCompletionCommand, self).__init__(args)
def configure(self, args):
self.cli.demands.root_user = False
self.cli.demands.available_repos = True
self.cli.demands.sack_activation = True
def run(self, args):
installed = listpkg_to_setstr(ListCompletionCommand.installed(self.base, args[0]))
available = listpkg_to_setstr(ListCompletionCommand.available(self.base, args[0]))
for pkg in (installed & available):
print(str(pkg))
class ListCompletionCommand(dnf.cli.commands.ListCommand):
def __init__(self, args):
super(ListCompletionCommand, self).__init__(args)
def run(self, args):
subcmds = self.__class__.__base__.usage[1:-1].split("|")[1:]
if args[0] not in subcmds:
print("\n".join(filter_list_by_kw(args[1], subcmds)))
else:
if args[0] == "installed":
pkgs = self.installed(self.base, args[1])
elif args[0] == "available":
pkgs = self.available(self.base, args[1])
elif args[0] == "updates":
pkgs = self.updates(self.base, args[1])
for pkg in pkgs:
print(str(pkg))
@staticmethod
def installed(base, arg):
return base.sack.query().installed().filter(name__glob="{}*".format(arg))
@staticmethod
def available(base, arg):
return base.sack.query().available().filter(name__glob="{}*".format(arg))
@staticmethod
def updates(base, arg):
return base.check_updates(["{}*".format(arg)], print_=False)
class RepoListCompletionCommand(dnf.cli.commands.repolist.RepoListCommand):
def __init__(self, args):
super(RepoListCompletionCommand, self).__init__(args)
def run(self, args):
if args[0] == "enabled":
print("\n".join(filter_list_by_kw(args[1], [r.id for r in self.base.repos.iter_enabled()])))
elif args[0] == "disabled":
print("\n".join(filter_list_by_kw(args[1], [r.id for r in self.base.repos.all() if not r.enabled])))
class UpgradeCompletionCommand(dnf.cli.comman | ds.upgrade.UpgradeCommand):
def __init__(self, args):
super(UpgradeCompletionCommand, self).__init__(args)
def configure(self, args):
self.cli.demands.root_user = False
self.cli.demands.available_repos = True
self.cli.demands.sack_acti | vation = True
def run(self, args):
for pkg in ListCompletionCommand.updates(self.base, args[0]):
print(str(pkg))
class DowngradeCompletionCommand(dnf.cli.commands.downgrade.DowngradeCommand):
def __init__(self, args):
super(DowngradeCompletionCommand, self).__init__(args)
def configure(self, args):
self.cli.demands.root_user = False
self.cli.demands.available_repos = True
self.cli.demands.sack_activation = True
def run(self, args):
for pkg in ListCompletionCommand.available(self.base, args[0]).downgrades():
print(str(pkg))
class CleanCompletionCommand(dnf.cli.commands.clean.CleanCommand):
def __init__(self, args):
super(CleanCompletionCommand, self).__init__(args)
def run(self, args):
subcmds = self.__class__.__base__.usage[1:-1].split("|")[1:]
print("\n".join(filter_list_by_kw(args[1], subcmds)))
class HistoryCompletionCommand(dnf.cli.commands.HistoryCommand):
def __init__(self, args):
super(HistoryCompletionCommand, self).__init__(args)
def run(self, args):
subcmds = self.__class__.__base__.usage[1:-1].split("|")[1:]
if args[0] not in subcmds:
print("\n".join(filter_list_by_kw(args[1], subcmds)))
def main(args):
base = dnf.cli.cli.BaseCli()
cli = dnf.cli.Cli(base)
if args[0] == "_cmds":
base.plugins.load(base.conf.pluginpath, [])
base.plugins.run_init(base, cli)
print("\n".join(filter_list_by_kw(args[1], cli.cli_commands)))
return
cli.cli_commands.clear()
cli.register_command(RemoveCompletionCommand)
cli.register_command(InstallCompletionCommand)
cli.register_command(ReinstallCompletionCommand)
cli.register_command(ListCompletionCommand)
cli.register_command(RepoListCompletionCommand)
cli.register_command(UpgradeCompletionCommand)
cli.register_command(DowngradeCompletionCommand)
cli.register_command(CleanCompletionCommand)
cli.register_command(HistoryCompletionCommand)
cli.configure(args)
cli.run()
if __name__ == "__main__":
try:
main(sys.argv[1:])
except KeyboardInterrupt:
sys.exit(1)
|
opencobra/cobrapy | src/cobra/util/solver.py | Python | gpl-2.0 | 21,838 | 0.000687 | """Additional helper functions for the optlang solvers.
All functions integrate well with the context manager, meaning that
all operations defined here are automatically reverted when used in a
`with model:` block.
The functions defined here together with the existing model functions
should allow you to implement custom flux analysis methods with ease.
"""
import logging
import re
from functools import partial
from types import ModuleType
from typing import TYPE_CHECKING, Dict, List, NamedTuple, Optional, Tuple, Union
from warnings import warn
import optlang
import pandas as pd
from optlang.interface import (
FEASIBLE,
INFEASIBLE,
ITERATION_LIMIT,
NUMERIC,
OPTIMAL,
SUBOPTIMAL,
TIME_LIMIT,
)
from optlang.symbolics import Basic, Zero
from cobra.exceptions import (
OPTLANG_TO_EXCEPTIONS_DICT,
OptimizationError,
SolverNotFound,
)
from cobra.util.context import get_context
# Used to avoid cyclic reference and enable third-party static type checkers to work
if TYPE_CHECKING:
from cobra import Model, Reaction
CONS_VARS = Union[optlang.interface.Constraint, optlang.interface.Variable]
logger = logging.getLogger(__name__)
# Define all the solvers that are found in optlang.
solvers = {
match.split("_interface")[0]: getattr(optlang, match)
for match in dir(optlang)
if "_interface" in match
}
# Defines all the QP solvers implemented in optlang.
qp_solvers = ["cplex", "gurobi", "osqp"]
# optlang solution statuses which still allow retrieving primal values
has_primals = [NUMERIC, FEASIBLE, INFEASIBLE, SUBOPTIMAL, ITERATION_LIMIT, TIME_LIMIT]
class Components(NamedTuple):
"""Define an object for adding absolute expressions."""
variable: optlang.interface.Variable
upper_constraint: optlang.interface.Constraint
lower_constraint: optlang.interface.Constraint
def linear_reaction_coefficients(
model: "Model", reactions: Optional[List["Reaction"]] = None
) -> Dict["Reaction", float]:
"""Retrieve coefficient for the reactions in a linear objective.
Parameters
----------
model : cobra.Model
The cobra model defining the linear objective.
reactions : list of cobra.Reaction, optional
An optional list of the reactions to get the coefficients for.
By default, all reactions are considered (default None).
Returns
-------
dict
A dictionary where the keys are the reaction objects and the values
are the corresponding coefficient. Empty dictionary if there are no
linear terms in the objective.
"""
linear_coefficients = {}
reactions = model.reactions if not reactions else reactions
try:
objective_expression = model.solver.objective.expression
coefficients = objective_expression.as_coefficients_dict()
except AttributeError:
return linear_coefficients
for rxn in reactions:
forward_coefficient = coefficients.get(rxn.forward_variable, 0)
reverse_coefficient = coefficients.get(rxn.reverse_variable, 0)
if forward_coefficient != 0:
if forward_coefficient == -reverse_coefficient:
linear_coefficients[rxn] = float(forward_coefficient)
return linear_coefficients
def _valid_atoms(model: "Model", expression: optlang.symbolics.Basic) -> bool:
"""Check whether a sympy expression references the correct variables.
Parameters
----------
model : cobra.Model
The model in which to check for variables.
expression : sympy.Basic
A sympy expression.
Returns
-------
bool
True if all referenced variables are contained in model, False
otherwise.
"""
atoms = expression.atoms(optlang.interface.Variable)
return all(a.problem is model.solver for a in atoms)
def set_objective(
model: "Model",
value: Union[
optlang.interface.Objective,
optlang.symbolics.Basic,
Dict["Reaction", float],
],
additive: bool = False,
) -> None:
"""Set the model objective.
Parameters
----------
model : cobra.Model
The model to set the objective for.
value : optlang.interface.Objective, optlang.symbolics.Basic, dict
If the model objective is linear, then the value can be a new
optlang.interface.Objective or a dictionary with linear
coefficients where each key is a reaction and the corresponding
value is the new coefficient (float).
If the objective is non-linear and `additive` is True, then only
values of class optlang.interface.Objective, are accepted.
additive : bool
If True, add the terms to the current objective, otherwise start with
an empty objective.
Raises
------
ValueError
If model objective is non-linear and the `value` is a dict.
TypeError
If the type of `value` is not one of the accepted ones.
"""
interface = model.problem
reverse_value = model.solver.objective.expression
reverse_value = interface.Objective(
reverse_value, direction=model.solver.objective.direction, sloppy=True
)
if isinstance(value, dict):
if not model.objective.is_Linear:
raise ValueError(
"You can only update non-linear objectives additively using object of "
f"class optlang.interface.Objective, not of {type(value)}"
)
if not additive:
model.solver.objective = interface. | Objective(
Zero, direction=model.solver.objective.direction
)
for reaction, coef in value.items():
model.solver.ob | jective.set_linear_coefficients(
{reaction.forward_variable: coef, reaction.reverse_variable: -coef}
)
elif isinstance(value, (Basic, optlang.interface.Objective)):
if isinstance(value, Basic):
value = interface.Objective(
value, direction=model.solver.objective.direction, sloppy=False
)
# Check whether expression only uses variables from current model;
# clone the objective if not, faster than cloning without checking
if not _valid_atoms(model, value.expression):
value = interface.Objective.clone(value, model=model.solver)
if not additive:
model.solver.objective = value
else:
model.solver.objective += value.expression
else:
raise TypeError(f"{value} is not a valid objective for {model.solver}.")
context = get_context(model)
if context:
def reset():
model.solver.objective = reverse_value
model.solver.objective.direction = reverse_value.direction
context(reset)
def interface_to_str(interface: Union[str, ModuleType]) -> str:
"""Give a string representation for an optlang interface.
Parameters
----------
interface : str, ModuleType
Full name of the interface in optlang or cobra representation.
For instance, 'optlang.glpk_interface' or 'optlang-glpk'.
Returns
-------
str
The name of the interface as a string.
"""
if isinstance(interface, ModuleType):
interface = interface.__name__
return re.sub(r"optlang.|.interface", "", interface)
def get_solver_name(mip: bool = False, qp: bool = False) -> str:
"""Select a solver for a given optimization problem.
Parameters
----------
mip : bool
True if the solver requires mixed integer linear programming capabilities.
qp : bool
True if the solver requires quadratic programming capabilities.
Returns
-------
str
The name of the feasible solver.
Raises
------
SolverNotFound
If no suitable solver could be found.
"""
if len(solvers) == 0:
raise SolverNotFound("No solvers found.")
# Those lists need to be updated as optlang implements more solvers
mip_order = ["gurobi", "cplex", "glpk"]
lp_order = ["glpk", "cplex", "gurobi"]
qp_order = ["gurobi", "cplex", "osqp"]
if mip is False and qp is False:
for solver_name in lp_order:
|
castlecms/castle.cms | castle/cms/vocabularies.py | Python | gpl-2.0 | 11,750 | 0.001106 | import pycountry
from Acquisition import aq_parent
from castle.cms.fragments.interfaces import IFragmentsDirectory
from castle.cms.browser.survey import ICastleSurvey
from plone import api
from plone.registry.interfaces import IRegistry
from Products.CMFCore.utils import getToolByName
from zope.component import getAllUtilitiesRegisteredFor
from zope.component import getUtility
from zope.component.hooks import getSite
from zope.globalrequest import getRequest
from zope.interface import directlyProvides
from zope.interface import implementer
from zope.interface import implements
from zope.schema.interfaces import IContextSourceBinder
from zope.schema.interfaces import IVocabularyFactory
from zope.schema.vocabulary import SimpleTerm
from zope.schema.vocabulary import SimpleVocabulary
from plone.app.tiles.browser.edit import AcquirableDictionary
from plone.app.content.browser import vocabulary
import requests
import json
# XXX needs updating in 5.1
try:
vocabulary.PERMISSIONS['plone.app.vocabularies.Groups'] = 'Modify portal content'
vocabulary.PERMISSIONS['castle.cms.vocabularies.EmailCategories'] = 'Modify portal content'
vocabulary.PERMISSIONS['castle.cms.vocabularies.Surveys'] = 'Modify portal content'
vocabulary.PERMISSIONS['plone.app.vocabularies.Keywords'] = 'View'
vocabulary.PERMISSIONS['castle.cms.vocabularies.ProvidesTitleSummaryLeadImage'] = 'View'
except KeyError:
vocabulary._permissions['plone.app.vocabularies.Groups'] = 'Modify portal content'
vocabulary._permissions['castle.cms.vocabularies.EmailCategories'] = 'Modify portal content'
vocabulary._permissions['castle.cms.vocabularies.Surveys'] = 'Modify portal content'
vocabulary._permissions['plone.app.vocabularies.Keywords'] = 'View'
vocabulary._permissions['castle.cms.vocabularies.ProvidesTitleSummaryLeadImage'] = 'View'
vocabulary._unsafe_metadata.append('last_modified_by')
_blacklist = (
'editbar',
'footer',
'mainlinks',
'sidenav',
'sitetitle',
'statusmessage',
'toplinks',
'topsharing',
'mobilelinks',
'mobilenav',
'announcement',
'search')
_dashboard_available = (
'dashboard-statistics',
'dashboard-welcome'
)
def AvailableFragments(context):
# need to move import here since vocab module is used in interfaces
from castle.cms.interfaces import IDashboard
if isinstance(context, AcquirableDictionary):
context = aq_parent(context)
is_dash = IDashboard.providedBy(context)
utils = getAllUtilitiesRegisteredFor(IFragmentsDirectory)
all_frags = []
request = getRequest()
for util in utils:
if util.layer is not None:
if not util.layer.providedBy(request):
continue
all_frags.extend(util.list())
terms = [SimpleVocabulary.createTerm('', '', 'Select fragment')]
frags = []
for frag in set(all_frags):
if (frag in _blacklist or
(not is_dash and frag in _dashboard_available)):
continue
if frag[0] == '_' or frag[-1] == '_':
continue
frags.append(frag)
frags.sort()
for frag in frags:
terms.append(
SimpleVocabulary.createTerm(
frag, frag, frag.capitalize().replace('-', ' ')))
return SimpleVocabulary(terms)
directlyProvides(AvailableFragments, IContextSourceBinder)
class RegistryValueSource(object):
implements(IContextSourceBinder)
def __init__(self, key_name, default=[]):
self.key_name = key_name
self.default = default
def __call__(self, context):
registry = getUtility(IRegistry)
terms = []
for value in registry.get(self.key_name, self.default):
key = value
if '|' in value:
key, _, value = value.partition('|')
terms.append(
SimpleVocabulary.createTerm(key, key.encode('utf-8'), value))
return SimpleVocabulary(terms)
@implementer(IVocabularyFactory)
class LocationsVocabularyFactory(object):
def __call__(self, context):
return RegistryValueSource('castle.allowed_locations')(context)
LocationsVocabulary = LocationsVocabularyFactory()
@implementer(IVocabularyFactory)
class MimeTypeVocabularyFactory(object):
def __call__(self, context):
catalog = api.portal.get_tool('portal_catalog')
catalog.uniqueValuesFor('contentType')
terms = []
for value in catalog.uniqueValuesFor('contentType'):
human = value
if 'html' in value:
human = 'HTML'
elif value.split('/')[0] in ('audio', 'video', 'image'):
human = value.split('/')[-1].upper()
terms.append(
SimpleVocabulary.createTerm(
value, value.encode('utf-8'), human))
return Simpl | eVocabulary(terms)
MimeTypeVocabulary = MimeTypeVocabularyFactory()
@implementer(IVocabularyFactory)
class RobotBehaviorVocabularyFactory(object):
def __call__(self, context):
terms = [
{
'value': 'index',
'title': 'Index',
},
{
'value': 'follow',
| 'title': 'Follow links',
},
{
'value': 'noimageindex',
'title': 'Do not index images',
},
{
'value': 'noarchive',
'title': 'Search engines should not show a cached link to this page on a SERP.',
},
{
'value': 'nosnippet',
'title': 'Search engines should not show a snippet of this page (i.e. meta description) on a SERP.', # noqa:E501
},
]
return SimpleVocabulary([
SimpleTerm(
value=term['value'],
token=term['value'],
title=term['title'],
) for term in terms
])
RobotBehaviorVocabulary = RobotBehaviorVocabularyFactory()
@implementer(IVocabularyFactory)
class EmailCategoryVocabularyFactory(object):
def __call__(self, context):
registry = getUtility(IRegistry)
categories = registry.get('castle.subscriber_categories')
terms = []
for category in categories:
terms.append(SimpleTerm(value=category, title=category))
return SimpleVocabulary(terms)
EmailCategoryVocabulary = EmailCategoryVocabularyFactory()
@implementer(IVocabularyFactory)
class SurveyVocabularyFactory(object):
def __call__(self, context):
try:
survey_settings = getUtility(IRegistry).forInterface(
ICastleSurvey, check=False)
list_url = '{}/survey-list'.format(survey_settings.survey_api_url)
account_id = survey_settings.survey_account_id
request_data = {
'account_id': account_id
}
response = requests.post(list_url, data=json.dumps(request_data))
result = response.json()
surveys = result['list']
terms = []
for survey in surveys:
terms.append(
SimpleTerm(title=survey['survey_name'], value=survey['uid']))
return SimpleVocabulary(terms)
except Exception:
# error accessing survey api
return SimpleVocabulary([
SimpleTerm(title='Survey API is not set up properly',
value="no_api")])
SurveyVocabulary = SurveyVocabularyFactory()
BUSINES_TYPES = [
'Restaurant',
'AnimalShelter',
'AutomotiveBusiness',
'ChildCare',
'DryCleaningOrLaundry',
'EmergencyService',
'EmploymentAgency',
'EntertainmentBusiness',
'FinancialService',
'FoodEstablishment',
'GovernmentOffice',
'HealthAndBeautyBusiness',
'HomeAndConstructionBusiness',
'InternetCafe',
'LegalService',
'Library',
'LodgingBusiness',
'MedicalOrganization',
'Organization',
'ProfessionalService',
'RadioStation',
'RealEstateAgent',
'RecyclingCenter',
'SelfStorage',
'ShoppingCenter',
'SportsActivit |
benekex2/smart_mirror | motiondetect.py | Python | gpl-3.0 | 449 | 0.046771 | import os #for OS program calls
import sys #For Clean sys.exit command
import time #for sleep/pause
import | RPi.GPIO as io #read the GPIO pins
io.setmode(io.BCM)
pir_pin = 17
screen_saver = False
io.setup(pir_pin, io.IN)
while True:
if screen_saver:
if io.input(pir_pin):
os.system("xscreensaver-command -deactivate")
screen_saver = False
else:
time.sleep(300)
os.system("xscreensaver-command -activate")
screen_s | aver = True
|
OCA/connector-cmis | cmis/models/cmis_backend.py | Python | agpl-3.0 | 4,701 | 0 | # © 2014-2015 Savoir-faire Linux (<http://www.savoirfairelinux.com>).
# Copyright 2016 ACSONE SA/NV
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
import logging
from odoo import api, fields, models
from odoo.exceptions import UserError
from odoo.tools.translate import _
from ..exceptions import CMISError
_logger = logging.getLogger(__name__)
try:
import cmislib.exceptions
from cmislib.model import CmisClient
from cmislib.browser.binding import BrowserBinding
from cmislib.exceptions import ObjectNotFoundException
except (ImportError, IOError) as err:
_logger.debug(err)
class CmisBackend(models.Model):
_name = 'cmis.backend'
_description = 'CMIS Backend'
_order = 'name desc'
name = fields.Char(required=True)
location = fields.Char(
required=True)
username = fields.Char(
required=True)
password = fields.Char(
required=True)
initial_directory_write = fields.Char(
'Initial directory for writing', required=True, default='/')
_sql_constraints = [
('name_uniq',
'unique(name)',
_('CMIS Backend name must be unique!')),
]
@api.multi
def get_cmis_client(self):
"""
Get an initialized CmisClient using the CMISBrowserBinding
"""
self.ensure_one()
return CmisClient(
self.location,
self.username,
self.password,
binding=BrowserBinding())
@api.multi
def get_cmis_repository(self):
""" Return the default repository in the CMIS container """
self.ensure_one()
client = self.get_cmis_client()
return client.defaultRepository
@api.multi
def check_directory_of_write(self):
"""Check access right to write from the path"""
datas_fname = 'testdoc'
for this in self:
# login with the cmis account
folder_path_write = this.initial_directory_write
path_write_objectid = self.get_folder_by_path(
folder_path_write,
create_if_not_found=False,
cmis_parent_objectid=None)
# Check if we can create a doc from OE to EDM
# Document properties
if path_write_objectid:
try:
path_write_objectid.createDocumentFromString(
datas_fname,
contentString='hello, world',
contentType='text/plain')
except cmislib.exceptions.UpdateConflictException:
raise CMISError(
_("The test file already exists in the DMS. "
"Please remove it and try again."))
except cmislib.exceptions.RuntimeException:
_logger.exception("Please check your access right.")
raise CMISError(
("Please check your access right."))
if path_write_objectid is not False:
raise UserError(_("Path is correct for : %s") %
path_write_objectid)
else:
raise CMISError(_("Error path for : %s") %
path_write_objectid)
@api.multi
def get_folder_by_path(self, path, create_if_not_found=True,
cmis_parent_objectid=None):
self.ensure_one()
repo = self.get_cmis_repository()
if cmis_parent_objectid:
path = repo.getObject(
cmis_parent_objectid).getPaths()[0] + '/' + path
traversed = []
if not path.startswith('/'):
path = '/%s' % path
try:
return repo.getObjectByPath(path)
except ObjectNotFoundException:
if not create_if_not_found:
return False
# The path doesn't exist and must be created
for part in path.split('/'):
try:
part = '%s' % part
traversed.append(part)
new_root = repo.getObjectByPath('/'.join(traversed))
except ObjectNotFoundException:
new_root = repo.createFolder(new_root, part)
root = new_root
return root
def sanitize_input(self, file_name):
"""Prevent injection by escaping: '%_"""
file_name = file_name.replace("'", r"\'")
file_name = file_name.replace("%", r"\%")
fi | le_name = file_name.replace("_", r"\_")
return file_name
def safe_query(self, query, file_name, repo):
args = map(self.sanitize_input, file_name)
return repo.query(query % ''.j | oin(args))
|
tnemis/staging-server | schoolnew/views.py | Python | mit | 219,616 | 0.044382 | from django.views.generic import ListView, DetailView, CreateView, \
DeleteView, UpdateView, \
ArchiveIndexView, DateDetailView, \
DayArchiveView, MonthArchiveView, \
TodayArchiveView, WeekArchiveView, \
YearArchiveView, View
from schoolnew.models import *
from schoolnew.forms import *
from django.db.models import *
from django.shortcuts import render
from django.core.paginator import Paginator, PageNotAnInteger
from django.core.urlresolvers import reverse
from django.contrib import messages
from itertools import *
from datetime import datetime
from django.http import HttpResponse,HttpResponseRedirect
from django.shortcuts import redirect
import reportlab
import cStringIO as StringIO
from xhtml2pdf import pisa
from django.template.loader import get_template
from django.template import Context
from cgi import escape
from excel_response import ExcelResponse
import json
from django.utils import simplejson
import os
from django.conf import settings
from django.contrib.auth import authenticate, login
from django.core.cache import cache
class myview1(View):
def get(self,request,**kwargs):
if request.user.is_authenticated():
pk=self.kwargs.get('pk')
basic=Basicinfo.objects.get(id=pk)
school=School.objects.get(id=request.user.account.associated_with)
admin = Academicinfo.objects.get(school_key=basic.id)
academic = Academicinfo.objects.get(school_key=basic.id)
infra = Infradet.objects.get(school_key=basic.id)
class_det = Class_section.objects.filter(school_key=basic.id)
schgroup_det = Sch_groups.objects.filter(school_key=basic.id)
post_det = Staff.objects.filter(Q(school_key=basic.id))
parttime_det = Parttimestaff.objects.filter(school_key=basic.id)
land_det = Land.objects.filter(school_key=basic.id)
build_det = Building.objects.filter(school_key=basic.id)
buildabs_det = Building_abs.objects.filter(school_key=basic.id)
sports_det = Sports.objects.filter(school_key=basic.id)
ict_det = | Ictentry.objects.filter(school_key=basic.id)
passper_det=Passpercent.objects.filter(school_key=basic.id)
infra
a=basic.udise_code
response = HttpResponse(content_type='application/pdf')
filename = str(a)
infra_edit_chk='Yes'
response['Content-Disposition'] = 'attachement'; 'filename={0}.pdf'.format(filename | )
pdf=render_to_pdf(
'printpdfschool.html',
{
'basic':basic,
'admin':admin,
'academic':academic,
'infra': infra,
'class_det':class_det,
'schgroup_det':schgroup_det,
'post_det':post_det,
'parttime_det':parttime_det,
'land_det':land_det,
'build_det':build_det,
'buildabs_det':buildabs_det,
'sports_det':sports_det,
'ict_det':ict_det,
'passper_det':passper_det,
'pagesize':'A4'
}
)
response.write(pdf)
return response
else:
return redirect('%s?next=%s' % (settings.LOGIN_URL, request.path))
def render_to_pdf(template_src, context_dict):
template = get_template(template_src)
context = Context(context_dict)
html = template.render(context)
result = StringIO.StringIO()
infra_edit_chk='Yes'
# "UTF-8"
# The only thing was to replace html.encode("ISO-8859-1") by html.decode("utf-8")
pdf = pisa.pisaDocument(StringIO.StringIO(html.encode("UTF-8")), result)
if not pdf.err:
return HttpResponse(result.getvalue(), content_type='application/pdf')
return HttpResponse('We had some errors<pre>%s</pre>' % escape(html))
class home_page(View):
def get(self,request,**kwargs):
if request.user.is_authenticated():
if (Basicinfo.objects.filter(udise_code=request.user.username).count())>0:
# chk_ss=Basicinfo.objects.filter(udise_code=request.user.username)
# slno=District.objects.filter(id__lt=15)
basic_det=Basicinfo.objects.get(udise_code=request.user.username)
basic_det = Basicinfo.objects.get(id=basic_det.id)
sch_key = basic_det.id
new_sch_id = basic_det.id
govchk=basic_det.sch_management
sch_dir=basic_det.sch_directorate
sch_cat_chk=basic_det.sch_cate
chk_user=Basicinfo.objects.get(udise_code=request.user.username)
if ((str(govchk)=='School Education Department School')|(str(govchk)=='Corporation School')|(str(govchk)=='Municipal School')|(str(govchk)=='Fully Aided School')|(str(govchk)=='Partly Aided School')|(str(govchk)=='Anglo Indian (Fully Aided) School')|(str(govchk)=='Anglo Indian (Partly Aided) School')|(str(govchk)=='Oriental (Fully Aided) Sanskrit School')|(str(govchk)=='Oriental (Partly Aided) Sanskrit School')|(str(govchk)=='Oriental (Fully Aided) Arabic School')|(str(govchk)=='Oriental (Partly Aided) Arabic School')):
if ((basic_det.sch_directorate.department_code=='001')|(basic_det.sch_directorate.department_code=='002')):
govaid_ent='Yes'
else:
govaid_ent=''
else:
govaid_ent=''
if (Academicinfo.objects.filter(school_key=basic_det.id).count())>0:
acade_det = Academicinfo.objects.get(school_key=basic_det.id)
acade_det = Academicinfo.objects.get(id=acade_det.id)
if basic_det.sch_cate:
if (basic_det.sch_cate.category_code in ('3,5,6,7,8,10,11')):
pass_ent='Yes'
else:
pass_ent=''
else:
pass_ent=''
if (Infradet.objects.filter(school_key=basic_det.id).count())>0:
infra_det = Infradet.objects.get(school_key=basic_det.id)
if (Class_section.objects.filter(school_key=basic_det.id).count())>0:
class_det = Class_section.objects.filter(school_key=basic_det.id)
if (Staff.objects.filter(Q(school_key=basic_det.id) & Q(staff_cat=1)))>0:
teach_det = Staff.objects.filter(Q(school_key=basic_det.id) & Q(staff_cat=1))
if (Staff.objects.filter(Q(school_key=basic_det.id) & Q(staff_cat=2)).count())>0:
nonteach_det = Staff.objects.filter(Q(school_key=basic_det.id) & Q(staff_cat=2))
if (Parttimestaff.objects.filter(school_key=basic_det.id).count())>0:
parttime_det = Parttimestaff.objects.filter(school_key=basic_det.id)
if (Land.objects.filter(school_key=basic_det.id).count())>0:
land_det = Land.objects.filter(school_key=basic_det.id)
if (Building.objects.filter(school_key=basic_det.id).count())>0:
building_det = Building.objects.filter(school_key=basic_det.id)
if (Building_abs.objects.filter(school_key=basic_det.id).count())>0:
buildabs_det = Building_abs.objects.filter(school_key=basic_det.id)
if (Sports.objects.filter(school_key=basic_det.id).count())>0:
sports_det = Sports.objects.filter(school_key=basic_det.id)
if (Ictentry.objects.filter(school_key=basic_det.id).count())>0:
ict_det = Ictentry.objects.filter(school_key=basic_det.id)
if (Sch_groups.objects.filter(school_key=basic_det.id).count())>0:
schgroup_det = Sch_groups.objects.filter(school_key=basic_det.id)
basic_mdate=basic_det.modified_date.strftime('%d-%m-%Y -- %H:%M %p')
grp=basic_det.sch_cate
if ((str(grp)=='Hr.Sec School (I-XII)')|(str(grp)=='Hr.Sec School (VI-XII)')|(str(grp)=='Hr.Sec School (IX-XII)')|(str(grp)=='Hr.Sec School (XI-XII)')|(str(grp)=='Matriculation Hr.Sec School (I-XII)')):
grp_chk='Yes'
else:
grp_chk=''
if (Academicinfo.objects.filter(school_key=basic_det.id).count())>0:
acade_mdate=Academicinfo.objects.get(school_key=basic_det.id)
if (Infradet.objects.filter(school_key=basic_det.id).count())>0:
infra_mdate=Infradet.objects.get(school_key=basic_det.id)
if (Staff.objects.filter(Q(school_key=basic_det.id) & Q(staff_cat=1)))>0:
teach_mdate=Staff.objects.filter(Q(school_key=basic_det.id) & Q(staff_cat=1))
return render (request,'home_edit1.html',locals())
else:
return render (request,'home_edit1.html',locals())
else:
return redirect('%s?next=%s' % (settings.LOGIN_URL, request.path))
class basic_edit(UpdateView):
def get(self,request,**kwargs):
if request.user.is_authenticated():
chk_user1=self.kwargs.get('pk')
district_list = District.objects.all().order_by('district_name')
chk_u |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.