repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
monetate/sqlalchemy | test/sql/test_metadata.py | Python | mit | 176,328 | 0 | from contextlib import contextmanager
import pickle
import sqlalchemy as tsa
from sqlalchemy import ARRAY
from sqlalchemy import bindparam
from sqlalchemy import BLANK_SCHEMA
from sqlalchemy import Boolean
from sqlalchemy import CheckConstraint
from sqlalchemy import Column
from sqlalchemy import column
from sqlalchemy import ColumnDefault
from sqlalchemy import desc
from s | qlalchemy import Enum
from sqlalchemy import event
from sqlalchemy import exc
from sqlalchemy import ForeignKey
from sqlalchemy import ForeignKeyConstraint
from sqlalchemy import func
from sqlalchemy import Index
from sqlalchemy import Integer
from sqlalchemy import MetaData
from sqlalchemy import PrimaryKeyConstraint
from sqlalchemy import schema
from sqlalchemy import Sequence
from sqlalchemy import String
from sqlalchemy import Table
from sqlalchemy import table
from sqlalchemy i | mport testing
from sqlalchemy import text
from sqlalchemy import TypeDecorator
from sqlalchemy import types as sqltypes
from sqlalchemy import Unicode
from sqlalchemy import UniqueConstraint
from sqlalchemy.engine import default
from sqlalchemy.schema import AddConstraint
from sqlalchemy.schema import CreateIndex
from sqlalchemy.schema import DefaultClause
from sqlalchemy.schema import DropIndex
from sqlalchemy.sql import naming
from sqlalchemy.sql import operators
from sqlalchemy.sql.elements import _NONE_NAME
from sqlalchemy.sql.elements import literal_column
from sqlalchemy.testing import assert_raises
from sqlalchemy.testing import assert_raises_message
from sqlalchemy.testing import AssertsCompiledSQL
from sqlalchemy.testing import ComparesTables
from sqlalchemy.testing import emits_warning
from sqlalchemy.testing import eq_
from sqlalchemy.testing import expect_raises_message
from sqlalchemy.testing import fixtures
from sqlalchemy.testing import is_
from sqlalchemy.testing import is_false
from sqlalchemy.testing import is_true
from sqlalchemy.testing import mock
class MetaDataTest(fixtures.TestBase, ComparesTables):
def test_metadata_contains(self):
metadata = MetaData()
t1 = Table("t1", metadata, Column("x", Integer))
t2 = Table("t2", metadata, Column("x", Integer), schema="foo")
t3 = Table("t2", MetaData(), Column("x", Integer))
t4 = Table("t1", MetaData(), Column("x", Integer), schema="foo")
assert "t1" in metadata
assert "foo.t2" in metadata
assert "t2" not in metadata
assert "foo.t1" not in metadata
assert t1 in metadata
assert t2 in metadata
assert t3 not in metadata
assert t4 not in metadata
def test_uninitialized_column_copy(self):
for col in [
Column("foo", String(), nullable=False),
Column("baz", String(), unique=True),
Column(Integer(), primary_key=True),
Column(
"bar",
Integer(),
Sequence("foo_seq"),
primary_key=True,
key="bar",
),
Column(Integer(), ForeignKey("bat.blah"), doc="this is a col"),
Column(
"bar",
Integer(),
ForeignKey("bat.blah"),
primary_key=True,
key="bar",
),
Column("bar", Integer(), info={"foo": "bar"}),
]:
c2 = col._copy()
for attr in (
"name",
"type",
"nullable",
"primary_key",
"key",
"unique",
"info",
"doc",
):
eq_(getattr(col, attr), getattr(c2, attr))
eq_(len(col.foreign_keys), len(c2.foreign_keys))
if col.default:
eq_(c2.default.name, "foo_seq")
for a1, a2 in zip(col.foreign_keys, c2.foreign_keys):
assert a1 is not a2
eq_(a2._colspec, "bat.blah")
def test_col_subclass_copy(self):
class MyColumn(schema.Column):
def __init__(self, *args, **kw):
self.widget = kw.pop("widget", None)
super(MyColumn, self).__init__(*args, **kw)
def _copy(self, *arg, **kw):
c = super(MyColumn, self)._copy(*arg, **kw)
c.widget = self.widget
return c
c1 = MyColumn("foo", Integer, widget="x")
c2 = c1._copy()
assert isinstance(c2, MyColumn)
eq_(c2.widget, "x")
def test_uninitialized_column_copy_events(self):
msgs = []
def write(c, t):
msgs.append("attach %s.%s" % (t.name, c.name))
c1 = Column("foo", String())
m = MetaData()
for i in range(3):
cx = c1._copy()
# as of 0.7, these events no longer copy. its expected
# that listeners will be re-established from the
# natural construction of things.
cx._on_table_attach(write)
Table("foo%d" % i, m, cx)
eq_(msgs, ["attach foo0.foo", "attach foo1.foo", "attach foo2.foo"])
def test_schema_collection_add(self):
metadata = MetaData()
Table("t1", metadata, Column("x", Integer), schema="foo")
Table("t2", metadata, Column("x", Integer), schema="bar")
Table("t3", metadata, Column("x", Integer))
eq_(metadata._schemas, set(["foo", "bar"]))
eq_(len(metadata.tables), 3)
def test_schema_collection_remove(self):
metadata = MetaData()
t1 = Table("t1", metadata, Column("x", Integer), schema="foo")
Table("t2", metadata, Column("x", Integer), schema="bar")
t3 = Table("t3", metadata, Column("x", Integer), schema="bar")
metadata.remove(t3)
eq_(metadata._schemas, set(["foo", "bar"]))
eq_(len(metadata.tables), 2)
metadata.remove(t1)
eq_(metadata._schemas, set(["bar"]))
eq_(len(metadata.tables), 1)
def test_schema_collection_remove_all(self):
metadata = MetaData()
Table("t1", metadata, Column("x", Integer), schema="foo")
Table("t2", metadata, Column("x", Integer), schema="bar")
metadata.clear()
eq_(metadata._schemas, set())
eq_(len(metadata.tables), 0)
def test_metadata_tables_immutable(self):
# this use case was added due to #1917.
metadata = MetaData()
Table("t1", metadata, Column("x", Integer))
assert "t1" in metadata.tables
assert_raises(TypeError, lambda: metadata.tables.pop("t1"))
@testing.provide_metadata
def test_dupe_tables(self):
metadata = self.metadata
Table(
"table1",
metadata,
Column("col1", Integer, primary_key=True),
Column("col2", String(20)),
)
metadata.create_all(testing.db)
Table("table1", metadata, autoload_with=testing.db)
def go():
Table(
"table1",
metadata,
Column("col1", Integer, primary_key=True),
Column("col2", String(20)),
)
assert_raises_message(
tsa.exc.InvalidRequestError,
"Table 'table1' is already defined for this "
"MetaData instance. Specify 'extend_existing=True' "
"to redefine options and columns on an existing "
"Table object.",
go,
)
def test_fk_copy(self):
c1 = Column("foo", Integer)
c2 = Column("bar", Integer)
m = MetaData()
t1 = Table("t", m, c1, c2)
kw = dict(
onupdate="X",
ondelete="Y",
use_alter=True,
name="f1",
deferrable="Z",
initially="Q",
link_to_name=True,
)
fk1 = ForeignKey(c1, **kw)
fk2 = ForeignKeyConstraint((c1,), (c2,), **kw)
t1.append_constraint(fk2)
fk1c = fk1._copy()
fk2c = fk2._copy()
for k in kw:
eq_(getattr(fk1c, k), kw[k])
eq_(getattr(fk2c, k), kw[k])
def test_check_constraint_copy(self):
def r(x):
|
pcmagic/stokes_flow | try_code/contourAnimation.py | Python | mit | 1,468 | 0.002725 | import matplotlib.pyplot as plt
import matplotlib.animation as animation
import numpy as np
# from sympy import *
from src import jeffery_model as jm
DoubleletStrength = np.array((1, 0, 0))
alpha = 1
B = np.array((0, 1, 0))
lbd = (alpha ** 2 - 1) / (alpha ** 2 + 1)
x, y = np.meshgrid(np.linspace(-1, 1, 100), np.linspace(-1, 1, 100))
problem = jm.SingleDoubleletJefferyProblem(B=B, DoubleletStrength=DoubleletStrength)
length = | 200
def fun(zi):
location = np.vstack((x.flatten(), y.flatten(), np.ones_like(y.flatten()) * zi))
Jij = problem.J_matrix(location)
JijT = Jij.transpose(1, 0, 2)
Sij = 1 / 2 * (Jij + JijT)
Oij = 1 / 2 * (Jij - JijT)
B | ij = Sij + lbd * Oij
TrB2 = (Bij[0, 0, :] ** 2 + Bij[1, 1, :] ** 2 + Bij[2, 2, :] ** 2).reshape(x.shape)
TrB3 = (Bij[0, 0, :] ** 3 + Bij[1, 1, :] ** 3 + Bij[2, 2, :] ** 3).reshape(x.shape)
DtLine = TrB2 ** 3 - 6 * TrB3 ** 2
return DtLine
fig, ax = plt.subplots()
p = [ax.contourf(x, y, np.log10(fun(1 / length)))]
def update(i):
zi = (1 + i) / length * 2
for tp in p[0].collections:
tp.remove()
p[0] = ax.contourf(x, y, np.log10(fun(zi)))
return p[0].collections
ani = animation.FuncAnimation(fig, update, frames=length,
interval=5, blit=True, repeat=True)
# plt.show()
Writer = animation.writers['ffmpeg']
writer = Writer(fps=15, metadata=dict(artist='Me'), bitrate=600)
ani.save('t2.mp4', writer=writer)
print('111')
|
brianrodri/oppia | scripts/run_lighthouse_tests.py | Python | apache-2.0 | 7,645 | 0.000131 | # Copyright 2020 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express | or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This script performs lighthouse checks and creates lighthouse reports.
Any callers must pass in a flag, either --accessibility or --performance.
"""
from __future__ import annotations
import argparse
import contextlib
import os
import subprocess
import sys
|
from core.constants import constants
from scripts import build
from scripts import common
from scripts import servers
LIGHTHOUSE_MODE_PERFORMANCE = 'performance'
LIGHTHOUSE_MODE_ACCESSIBILITY = 'accessibility'
SERVER_MODE_PROD = 'dev'
SERVER_MODE_DEV = 'prod'
GOOGLE_APP_ENGINE_PORT = 8181
LIGHTHOUSE_CONFIG_FILENAMES = {
LIGHTHOUSE_MODE_PERFORMANCE: {
'1': '.lighthouserc-1.js',
'2': '.lighthouserc-2.js'
},
LIGHTHOUSE_MODE_ACCESSIBILITY: {
'1': '.lighthouserc-accessibility-1.js',
'2': '.lighthouserc-accessibility-2.js'
}
}
APP_YAML_FILENAMES = {
SERVER_MODE_PROD: 'app.yaml',
SERVER_MODE_DEV: 'app_dev.yaml'
}
_PARSER = argparse.ArgumentParser(
description="""
Run the script from the oppia root folder:
python -m scripts.run_lighthouse_tests
Note that the root folder MUST be named 'oppia'.
""")
_PARSER.add_argument(
'--mode', help='Sets the mode for the lighthouse tests',
required=True, choices=['accessibility', 'performance'])
_PARSER.add_argument(
'--shard', help='Sets the shard for the lighthouse tests',
required=True, choices=['1', '2'])
def run_lighthouse_puppeteer_script():
"""Runs puppeteer script to collect dynamic urls."""
puppeteer_path = (
os.path.join('core', 'tests', 'puppeteer', 'lighthouse_setup.js'))
bash_command = [common.NODE_BIN_PATH, puppeteer_path]
process = subprocess.Popen(
bash_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
if process.returncode == 0:
print(stdout)
for line in stdout.split(b'\n'):
# Standard output is in bytes, we need to decode the line to
# print it.
export_url(line.decode('utf-8'))
print('Puppeteer script completed successfully.')
else:
print('Return code: %s' % process.returncode)
print('OUTPUT:')
# Standard output is in bytes, we need to decode the line to
# print it.
print(stdout.decode('utf-8'))
print('ERROR:')
# Error output is in bytes, we need to decode the line to
# print it.
print(stderr.decode('utf-8'))
print('Puppeteer script failed. More details can be found above.')
sys.exit(1)
def run_webpack_compilation():
"""Runs webpack compilation."""
max_tries = 5
webpack_bundles_dir_name = 'webpack_bundles'
for _ in range(max_tries):
try:
with servers.managed_webpack_compiler() as proc:
proc.wait()
except subprocess.CalledProcessError as error:
print(error.output)
sys.exit(error.returncode)
if os.path.isdir(webpack_bundles_dir_name):
break
if not os.path.isdir(webpack_bundles_dir_name):
print('Failed to complete webpack compilation, exiting...')
sys.exit(1)
def export_url(line):
"""Exports the entity ID in the given line to an environment variable, if
the line is a URL.
Args:
line: str. The line to parse and extract the entity ID from. If no
recognizable URL is present, nothing is exported to the
environment.
"""
url_parts = line.split('/')
print('Parsing and exporting entity ID in line: %s' % line)
if 'collection_editor' in line:
os.environ['collection_id'] = url_parts[5]
elif 'create' in line:
os.environ['exploration_id'] = url_parts[4]
elif 'topic_editor' in line:
os.environ['topic_id'] = url_parts[4]
elif 'story_editor' in line:
os.environ['story_id'] = url_parts[4]
elif 'skill_editor' in line:
os.environ['skill_id'] = url_parts[4]
def run_lighthouse_checks(lighthouse_mode, shard):
"""Runs the Lighthouse checks through the Lighthouse config.
Args:
lighthouse_mode: str. Represents whether the lighthouse checks are in
accessibility mode or performance mode.
shard: str. Specifies which shard of the tests should be run.
"""
lhci_path = os.path.join('node_modules', '@lhci', 'cli', 'src', 'cli.js')
# The max-old-space-size is a quick fix for node running out of heap memory
# when executing the performance tests: https://stackoverflow.com/a/59572966
bash_command = [
common.NODE_BIN_PATH, lhci_path, 'autorun',
'--config=%s' % LIGHTHOUSE_CONFIG_FILENAMES[lighthouse_mode][shard],
'--max-old-space-size=4096'
]
process = subprocess.Popen(
bash_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
if process.returncode == 0:
print('Lighthouse checks completed successfully.')
else:
print('Return code: %s' % process.returncode)
print('OUTPUT:')
# Standard output is in bytes, we need to decode the line to
# print it.
print(stdout.decode('utf-8'))
print('ERROR:')
# Error output is in bytes, we need to decode the line to
# print it.
print(stderr.decode('utf-8'))
print('Lighthouse checks failed. More details can be found above.')
sys.exit(1)
def main(args=None):
"""Runs lighthouse checks and deletes reports."""
parsed_args = _PARSER.parse_args(args=args)
if parsed_args.mode == LIGHTHOUSE_MODE_ACCESSIBILITY:
lighthouse_mode = LIGHTHOUSE_MODE_ACCESSIBILITY
server_mode = SERVER_MODE_DEV
elif parsed_args.mode == LIGHTHOUSE_MODE_PERFORMANCE:
lighthouse_mode = LIGHTHOUSE_MODE_PERFORMANCE
server_mode = SERVER_MODE_PROD
else:
raise Exception(
'Invalid parameter passed in: \'%s\', please choose'
'from \'accessibility\' or \'performance\'' % parsed_args.mode)
if lighthouse_mode == LIGHTHOUSE_MODE_PERFORMANCE:
print('Building files in production mode.')
build.main(args=['--prod_env'])
elif lighthouse_mode == LIGHTHOUSE_MODE_ACCESSIBILITY:
build.main(args=[])
run_webpack_compilation()
with contextlib.ExitStack() as stack:
stack.enter_context(servers.managed_redis_server())
stack.enter_context(servers.managed_elasticsearch_dev_server())
if constants.EMULATOR_MODE:
stack.enter_context(servers.managed_firebase_auth_emulator())
stack.enter_context(servers.managed_cloud_datastore_emulator())
stack.enter_context(servers.managed_dev_appserver(
APP_YAML_FILENAMES[server_mode],
port=GOOGLE_APP_ENGINE_PORT,
log_level='critical',
skip_sdk_update_check=True))
run_lighthouse_puppeteer_script()
run_lighthouse_checks(lighthouse_mode, parsed_args.shard)
if __name__ == '__main__':
main()
|
pandeydivesh15/NER-using-Deep-Learning | Task 3: Hindi data/NER_model.py | Python | mit | 3,179 | 0.038377 | # Keras imports
from keras.preprocessing import sequence
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers.wrappers import TimeDistributed
from keras.layers.wrappers import Bidirectional
from keras.layers.core import Dropout
from keras.regularizers import l2
from keras import metrics
import numpy as np
import pandas as pd
from sklearn.metrics import confusion_matrix, classification_report
class NER():
def __init__(self, data_reader):
self.data_reader = data_reader
self.x, self.y = data_reader.get_data();
self.model = None
self.x_train = None
self.y_train = None
self.x_test = None
self.y_test = None
def make_and_compile(self, units = 150, dropout = 0.2, regul_alpha = 0.0):
self.model = Sequential()
# Bidirectional LSTM with 100 outputs/memory units
self.model.add(Bidirectional(LSTM(units,
return_sequences=True,
W_regularizer=l2(regul_alpha),
b_regularizer=l2(regul_alpha)),
input_shape = [self.data_reader.max_len,
self.data_reader.LEN_WORD_VECTORS]))
self.model.add(TimeDistributed(Dense(self.data_reader.LEN_NAMED_CLASSES,
activation='softmax',
W_regularizer=l2(regul_alpha),
b_regularizer=l2(regul_alpha))))
self.model.add(Dropout(dropout))
self.model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
print self.model.summary()
def train(self, train_split = 0.8, epochs = 10, batch_size = 50):
split_mask = np.random.rand(len(self.x)) < (train_split)
self.x_train = self.x[split_mask] |
self.y_train = self.y[split_mask]
self.x_test = self.x[~split_mask]
self.y_test = self.y[~split_mask]
self.model.fit(self.x_train, self.y_train, nb_epoch=epochs, batch_size=batch_size)
def evaluate(self):
predicted_tags= []
test_data_tags = []
for x,y in zip | (self.x_test, self.y_test):
flag = 0
tags = self.model.predict(np.array([x]), batch_size=1)[0]
pred_tags = self.data_reader.decode_result(tags)
test_tags = self.data_reader.decode_result(y)
for i,j in zip(pred_tags, test_tags):
if j != self.data_reader.NULL_CLASS:
flag = 1
if flag == 1:
test_data_tags.append(j)
predicted_tags.append(i)
predicted_tags = np.array(predicted_tags)
test_data_tags = np.array(test_data_tags)
print classification_report(test_data_tags, predicted_tags)
simple_conf_matrix = confusion_matrix(test_data_tags,predicted_tags)
all_tags = sorted(list(set(test_data_tags)))
conf_matrix = pd.DataFrame(
columns = all_tags,
index = all_tags)
for x,y in zip(simple_conf_matrix, all_tags):
conf_matrix[y] = x
conf_matrix = conf_matrix.transpose()
return conf_matrix
def predict_tags(self, sentence):
sentence_list = sentence.strip().split()
sent_len = len(sentence_list)
# Get padded word vectors
x = self.data_reader.encode_sentence(sentence)
tags = self.model.predict(x, batch_size=1)[0]
tags = tags[-sent_len:]
pred_tags = self.data_reader.decode_result(tags)
for s,tag in zip(sentence_list,pred_tags):
print s + "/" + tag
|
jmcanterafonseca/fiware-cygnus | test/acceptance/tools/mysql_utils.py | Python | agpl-3.0 | 7,640 | 0.010344 | # -*- coding: utf-8 -*-
#
# Copyright 2015 Telefonica Investigación y Desarrollo, S.A.U
#
# This file is part of fiware-cygnus (FI-WARE project).
#
# fiware-cygnus is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General
# Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any
# later version.
# fiware-cygnus is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License along with fiware-cygnus. If not, see
# http://www.gnu.org/licenses/.
#
# For those usages not covered by the GNU Affero General Public License please contact:
# iot_support at tid.es
#
__author__ = 'Iván Arias León (ivan.ariasleon at telefonica dot com)'
import time
import MySQLdb
import gc
# constants
EMPTY = u''
WITHOUT = u'without'
# mysql commands
SELECT_VERSION = u'SELECT version ()'
MYSQL_CREATE_DATABASE = u'CREATE DATABASE IF NOT EXISTS '
MYSQL_CREATE_TABLE = u'CREATE TABLE IF NOT EXISTS '
MYSQL_DROP_DATABASE = u'DROP SCHEMA IF EXISTS '
M | YSQL_SHOW_DATABASE = u'SHOW DATABASES'
MYSQL_SHOW_TABLES = u'SELECT TABLE_NAME FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = \''
class Mysql:
"""
mysql | funcionabilities
"""
def __init__(self, **kwargs):
"""
constructor
:param host: mysql host (MANDATORY)
:param port: mysql port (MANDATORY)
:param user: mysql user (MANDATORY)
:param password: mysql pass (MANDATORY)
:param database: mysql database (OPTIONAL)
:param version: mysql version (OPTIONAL)
:param mysql_verify_version: determine whether the version is verified or not (True or False). (OPTIONAL)
:param capacity: capacity of the channel (OPTIONAL)
:param channel_transaction_capacity: amount of bytes that can be sent per transaction (OPTIONAL)
:param retries_number: number of retries when get values (OPTIONAL)
:param delay_to_retry: time to delay each retry (OPTIONAL)
"""
self.host = kwargs.get("host", EMPTY)
self.port = kwargs.get("port", EMPTY)
self.user = kwargs.get("user", EMPTY)
self.password = kwargs.get("password", EMPTY)
self.database = kwargs.get("database", EMPTY)
self.version = kwargs.get("version", "2,2")
self.mysql_verify_version = kwargs.get("mysql_verify_version", "false")
self.capacity = kwargs.get("capacity", "1000")
self.transaction_capacity = kwargs.get("transaction_capacity", "100")
self.retries_number=int(kwargs.get('retries_number',1))
self.retry_delay=int(kwargs.get('delay_to_retry',10))
self.conn = None
self.database = None
def __error_assertion(self, value, error=False):
"""
It Shows exception error or return for evaluation
:param value: exception error text
:param error: True or False (True - return per evaluation |False shows the exception error)
:return: exception error text
"""
if error:
return value
assert False, value
def __query(self, sql, error=False):
"""
new query
:param sql: query
:return: message as text
"""
try:
cur = self.conn.cursor()
cur.execute(sql)
return cur
except Exception, e:
return self.__error_assertion('DB exception: %s' % (e), error)
def __drop_database (self):
"""
delete a database
"""
self.__query("%s `%s`" % (MYSQL_DROP_DATABASE, self.database)) # drop database
# public methods ------------------------------------------
def connect(self):
"""
Open a new mysql connection
"""
try:
self.database = EMPTY
self.conn = MySQLdb.connect(self.host, self.user, self.password, self.database)
except Exception, e:
return self.__error_assertion ('DB exception: %s' % (e))
def set_database (self, database):
"""
set database name
"""
self.database = database
def disconnect (self):
"""
Close a mysql connection and drop the database before
"""
self.__drop_database()
self.conn.close() # close mysql connection
gc.collect() # invoking the Python garbage collector
def verify_version(self):
"""
Verify if the mysql version is the expected
"""
if self.mysql_verify_version.lower() == "true":
cur = self.__query(SELECT_VERSION)
row = cur.fetchone ()
assert row[0] == self.version, \
"Wrong version expected: %s. and version installed: %s" % (str(self.version), str(row[0]))
def create_database(self, name):
"""
create a new Database
:param name:
"""
self.database = name.lower() # converted to lowercase, because cygnus always convert to lowercase per ckan
self.__query("%s `%s`;" % (MYSQL_CREATE_DATABASE, self.database))
def generate_field_datastore_to_resource (self, attributes_number, attributes_name, attribute_type, metadata_type, recvtime="timestamp"):
"""
generate fields to datastore request
:return: fields list
"""
field = " (recvTime "+recvtime
for i in range(int(attributes_number)):
if attribute_type != WITHOUT: field = field + ", " + attributes_name+"_"+str(i)+" "+ attribute_type
if metadata_type != WITHOUT: field = field + ", " + attributes_name+"_"+str(i)+"_md "+ metadata_type
return field + ")"
def create_table (self, name, database_name, fields):
"""
create a new table per column type
:param name:
:param database_name:
:param fields:
"""
self.table = name
self.__query("%s `%s`.`%s` %s;" % (MYSQL_CREATE_TABLE, database_name, self.table, fields))
def table_exist (self, database_name, table_name):
"""
determine if table exist in database
:param database_name:
:param table_name:
"""
cur = self.__query('SELECT table_name FROM information_schema.tables WHERE table_schema = "%s" AND table_name = "%s" LIMIT 1;' % (database_name, table_name))
return cur.fetchone ()
def table_search_one_row (self, database_name, table_name):
"""
get last record from a table
:param database_name:
:param table_name:
"""
if self.table_exist(database_name, table_name) != None:
cur = self.__query('SELECT * FROM `%s`.`%s` ORDER BY 1 DESC LIMIT 1;' % (database_name, table_name))
return cur.fetchone () # return one row from the table
return False
def table_search_several_rows (self,rows, database_name, table_name):
"""
get last records from a table
:param rows:
:param database_name:
:param table_name:
"""
if self.table_exist(database_name, table_name) != None:
cur = self.__query('SELECT * FROM `%s`.`%s` ORDER BY 1 DESC LIMIT %s;' % (database_name, table_name, rows))
return cur.fetchall () # return several lines from the table
return False
|
Dev-Cloud-Platform/Dev-Cloud | dev_cloud/cc1/src/cm/settings_ctx.py | Python | apache-2.0 | 802 | 0 | # -*- coding: utf-8 -*-
# @COPYRIGHT_begin
#
# Copyright [2010-2014] Institute of Nuclear Physics PAN, Krakow, Poland
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this | file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# | limitations under the License.
#
# @COPYRIGHT_end
from cm.settings import *
ROOT_URLCONF = 'cm.urls_ctx'
WSGI_APPLICATION = 'cm.wsgi_ec2ctx.application'
|
BassantMorsi/finderApp | users/api/views.py | Python | mit | 17,235 | 0.002379 | import json
from django.http import HttpResponse
from rest_framework import status
from rest_framework.decorators import api_view
from rest_framework.response import Response
from rest_framework.generics import (
ListAPIView,
RetrieveAPIView,
UpdateAPIView,
DestroyAPIView,
CreateAPIView
)
from users.models import User, FindRequest, MissRequest
from .pagination import UserLimitOffsetPagination, UserPageNumberPaginaton
from .serializers import (
UserListSerializer,
UserDetailSerializer,
UserSerializer,
UserMissRequestSerializer,
UserFindRequestSerializer,
)
from datetime import datetime as dt
import os
import cv2
from PIL import Image
import numpy as np
import base64
FACE_DETECTOR_PATH = "{base_path}/cascades/haarcascade_frontalface_default.xml".format(
base_path=os.path.abspath(os.path.dirname(__file__)))
MISS_FACES_PATH = "{base_path}/faces/miss/".format(
base_path=os.path.abspath(os.path.dirname(__file__)))
FIND_FACES_PATH = "{base_path}/faces/find/".format(
base_path=os.path.abspath(os.path.dirname(__file__)))
# maximum distance between face and match
THRESHOLD = 75
# create the cascade classifiers
detector = cv2.CascadeClassifier(FACE_DETECTOR_PATH)
# path faces/testing or faces/training
# imageName == 1...10.userid.userrequest.jpg
# image extension = .userid.userrequest
def get_images_and_labels_helper(path, extension, stop):
images = []
labels = []
temp = []
maxreqid = 0
image_paths = [os.path.join(path, f) for f in os.listdir(path) if not f.endswith(extension)]
for image_path in image_paths:
requestId = os.path.split(image_path)[1].split(".")[1]
print('request id:'+requestId)
print ('stop:'+str(stop))
if requestId not in temp and requestId > str(stop):
if requestId > maxreqid:
maxreqid = requestId
temp.append(requestId)
# Read the image and convert to grayscale
image_pil = Image.open(im | age_path).convert('L')
# Convert the image format into numpy array
image = np.array(image_pil, 'uint8')
# Detect the face in the image
faces = detector.detectMultiScale(image)
# If face is detected, append the face to images and the label to labels
for (x, y, w, h) in | faces:
images.append(image[y: y + h, x: x + w])
labels.append(int(os.path.split(image_path)[1].split(".")[1]))
return images, labels, maxreqid
def get_images_and_labels(path, extension):
# images will contains face images
images = []
# labels will contains the label that is assigned to the image
labels = []
# Append all the absolute image paths in a list image_paths
image_paths = [os.path.join(path, f) for f in os.listdir(path) if f.endswith(extension)]
for image_path in image_paths:
# Read the image and convert to grayscale
image_pil = Image.open(image_path).convert('L')
# Convert the image format into numpy array
image = np.array(image_pil, 'uint8')
# Detect the face in the image
faces = detector.detectMultiScale(image)
# If face is detected, append the face to images and the label to labels
for (x, y, w, h) in faces:
images.append(image[y: y + h, x: x + w])
labels.append(int(os.path.split(image_path)[1].split(".")[0]))
return images, labels
@api_view(['POST'])
def recognize(request):
data = request.data
username =data.get('userName', None)
date = str(data.get('date', None))
requestType = data.get('type', None)
user = User.objects.get(userName=username)
userid = str(user.id)
msg = {
'error': 'Sorry there has no any matching yet'
}
if requestType == 'f':
r = FindRequest.objects.get(user=user, date=date)
fname = r.fName
gender = r.gender
requestid = str(r.id)
stop = r.stop
print requestid, userid
e = '.' + requestid + '.' + userid + '.jpg'
images, labels = get_images_and_labels(FIND_FACES_PATH, e)
recognizer = cv2.face.createLBPHFaceRecognizer()
recognizer.train(images, np.array(labels))
e1 = userid + '.jpg'
timages, tlabels, maxreqid = get_images_and_labels_helper(MISS_FACES_PATH, e1, stop)
FindRequest.objects.filter(user=user, date=date).update(stop=maxreqid)
collector = cv2.face.StandardCollector_create()
recognizer.setThreshold(THRESHOLD)
minConf = 1000
for timage, tlabel in zip(timages, tlabels):
recognizer.predict_collect(timage, collector)
conf = collector.getMinDist()
# dah bydeny elmin image mismatching ma3 el sora elly batest beha
pred = collector.getMinLabel()
if conf < minConf:
minConf = conf
labelRequest = tlabel
print tlabel
print (conf)
print (pred)
print ('hhhhhhhhhhhhhh '+str(len(timages)))
if len(timages) > 0:
requestconf = MissRequest.objects.get(id=labelRequest)
userconf =requestconf.user
# user data to be sent that match find request
userNameConf = userconf.userName
userMobileConf = userconf.mobile
userEmailConf = userconf.email
# end of user data that i want to send
useridconf = userconf.id
print (userconf, useridconf)
rdate = str(requestconf.date)
rgender = requestconf.gender
rstatus = requestconf.status
rfname = requestconf.fName
print (rdate, rfname, rgender)
rdatestr = dt.strptime(rdate, '%Y-%m-%d')
datestr = dt.strptime(date, '%Y-%m-%d')
print(gender, fname)
if rdatestr <= datestr and rgender == gender and rfname == fname and rstatus is False:
os.chdir(os.path.dirname(MISS_FACES_PATH))
filename = '1.'+str(labelRequest)+'.'+str(useridconf)+'.jpg'
image_file = open(filename, 'rb').read()
encoded_string = base64.b64encode(image_file)
msg = {
'image': encoded_string,
'user_name': userNameConf,
'user_mobile': userMobileConf,
'user_email': userEmailConf,
'date': rdate
}
elif requestType == 'm':
r = MissRequest.objects.get(user=user, date=date)
fname = r.fName
gender = r.gender
requestid = str(r.id)
stop = r.stop
# print requestid, userid
e = '.' + requestid + '.' + userid + '.jpg'
images, labels = get_images_and_labels(MISS_FACES_PATH, e)
recognizer = cv2.face.createLBPHFaceRecognizer()
recognizer.train(images, np.array(labels))
e1 = userid + '.jpg'
timages, tlabels, maxreqid = get_images_and_labels_helper(FIND_FACES_PATH, e1, stop)
MissRequest.objects.filter(user=user, date=date).update(stop=maxreqid)
collector = cv2.face.StandardCollector_create()
recognizer.setThreshold(THRESHOLD)
minConf = 1000
for timage, tlabel in zip(timages, tlabels):
recognizer.predict_collect(timage, collector)
conf = collector.getMinDist()
# dah bydeny elmin image mismatching ma3 el sora elly batest beha
pred = collector.getMinLabel()
if conf < minConf:
minConf = conf
labelRequest = tlabel
print tlabel
print (conf)
print (pred)
if len(timages) > 0:
requestconf = MissRequest.objects.get(id=labelRequest)
userconf = requestconf.user
# user data to be sent that match find request
userNameConf = userconf.userName
userMobileConf = userconf.mobile
userEmailConf = userconf.email
# end of user data that i want to send
useridconf = userconf.id
print (userconf, use |
dieterich-lab/dorina | dorina/report.py | Python | gpl-3.0 | 6,747 | 0.000148 | #!/usr/bin/env python
# -*- coding: utf-8
"""
Created on 13:48 16/04/2018 2018
This module contains a tools for ploting regulator .bed files distributed with
Dorina.
"""
import multiprocessing
from pathlib import Path
import numpy as np
import pandas as pd
import pybedtools
from bokeh.layouts import gridplot
from bokeh.models import ColumnDataSource, LabelSet
from bokeh.plotting import figure, output_file, save
def filter_by_feature(feature, featuretype):
if feature[2] == featuretype:
return True
def count_reads_in_features(bed, features_fn):
"""Counts reads in features, loaded as pytbedtools obj"""
return bed.intersect(b=features_fn, stream=True).count()
def get_sequences(bed, fasta):
"""Extracts sequences from genomic regions as list"""
bed.sequence(fi=fasta, name=True, s=True)
with open(bed.seqfn) as f:
seq = f.readlines()
return [x.rstrip() for x in seq if not x.startswith('>')]
def plot_hist(values, logx=False, title=""):
"""Plot bokeh histograms"""
t = "{2} distribution (μ={0:.2f}, σ={0:.2f})".format(
np.mean(values), np.std(values), title)
if logx:
p1 = figure(title=t, x_axis_type="log")
p1.xaxis.axis_label = 'Log(x)'
else:
p1 = figure(title=t)
p1.xaxis.axis_label = 'x'
hist, edges = np.histogram(values, density=True, bins='fd')
p1.quad(top=hist, bottom=0, left=edges[:-1], right=edges[1:],
fill_color="#036564", line_color="#036564")
p1.legend.location = "center_right"
p1.legend.background_fill_color = "darkgrey"
p1.yaxis.axis_label = 'Pr(x)'
p1.xaxis.major_label_text_font = 'helvetica'
p1.yaxis.major_label_text_font = 'helvetica'
p1.title.text_font = 'helvetica'
p1.xaxis.axis_label_text_font = 'helvetica'
return p1
def plot_vbar(values, title="counts", count=False, keys=None):
if count:
counts = pd.value_counts(values).to_frame(name='x_max')
else:
counts = values.to_frame(name='x_max')
if keys:
counts = counts.loc[keys]
else:
counts = counts.sort_values(by='x_max', ascending=True)
x_max = counts['x_max'].max()
group = counts.index.tolist()
counts['x_min'] = 0
counts = ColumnDataSource(counts)
p = figure(title=title, y_range=group, x_range=(0, x_max * 1.1),
plot_width=500, plot_height=750)
p.hbar(y="index", left='x_min', right='x_max', height=0.5,
source=counts, fill_color="#036564", line_color="#036564")
labels = LabelSet(x='x_max', y="index", text='x_max',
level='glyph', x_offset=5, y_offset=-7.5,
source=c | ounts, render_mode='canvas',
text_font='helvetica', text_font_size='9pt')
p.add_layout(labels)
p.toolbar.active_drag = None
p.ygrid.grid_line_color = None
p.xaxis.axis_label = "Counts"
p.yaxis.axis_line_color = None
p.yaxis.major_tick_line_color = None
p.outline_line_color = None
p.xaxis.major_label_tex | t_font = 'helvetica'
p.yaxis.major_label_text_font = 'helvetica'
p.title.text_font = 'helvetica'
p.xaxis.axis_label_text_font = 'helvetica'
return p
def add_chr(entry):
entry.chrom = 'chr' + entry.chrom
return entry
def plot_chr_counts(assembly, dataframe):
chr_size = pybedtools.chromsizes(assembly)
chromsizes = {k: chr_size[k][1] - chr_size[k][0] for k in chr_size}
keys = dataframe['chrom'].value_counts(
).sort_values(ascending=True).index.tolist()
return gridplot([[
plot_vbar(pd.Series(dataframe['chrom']), count=True, keys=keys,
title='Counts per chromossome'),
plot_vbar(pd.Series(chromsizes), keys=keys,
title=assembly + ' Chromossome size')]])
def plot_feat_counts(bt, datadir, n_proc=1):
def count_reads_in_features_this(feat):
return count_reads_in_features(bt, features_fn=feat)
def total_feature_length(bed_obj):
df = bed_obj.to_dataframe()
return sum(df['end'] - df['start'])
t_utr = pybedtools.BedTool(datadir + '/3_utr.gff')
f_utr = pybedtools.BedTool(datadir + '/5_utr.gff')
cds = pybedtools.BedTool(datadir + '/cds.gff')
exon = pybedtools.BedTool(datadir + '/exon.gff')
intergenic = pybedtools.BedTool(datadir + '/intergenic.bed')
intron = pybedtools.BedTool(datadir + '/intron.gff')
features = (t_utr, f_utr, cds, exon, intergenic, intron)
feat_names = '3_utr 5_utr cds exon intergenic intron'.split()
with multiprocessing.Pool(processes=n_proc) as pool:
results = pool.map(count_reads_in_features_this, features)
with multiprocessing.Pool(processes=n_proc) as pool:
features_length = pool.map(total_feature_length, features)
counts_per_feature = pd.Series(results, feat_names)
features_length = pd.Series(features_length, feat_names)
return gridplot([[
plot_vbar(counts_per_feature, title='Peaks per feature'),
plot_vbar(features_length, title='Feature length')]])
def plot_biotype_counts(bt, ensembl_gtf):
bt_gtf = pybedtools.BedTool(
ensembl_gtf) \
.filter(filter_by_feature, 'gene') \
.each(add_chr) \
.saveas()
biotype_result = bt_gtf.intersect(bt, wa=True, wb=True)
reg_type = {x['gene_id']: x.attrs['gene_biotype'] for x in biotype_result}
gene_type = {x['gene_id']: x.attrs['gene_biotype'] for x in bt_gtf}
reg_type = pd.Series(list(reg_type.values()))
return gridplot([[
plot_vbar(reg_type.value_counts(), keys=list(reg_type.unique()),
title='Counts per biotype'),
plot_vbar(pd.Series(list(gene_type.values())), count=True,
keys=list(reg_type.unique()), title='Total')]])
def main(target, regulator=None, fasta=None, output_dir=None,
assembly='hg38', datadir=None, n_proc=1, ensembl_gtf=None):
if output_dir is None:
output_dir = Path.cwd()
bt = pybedtools.BedTool(target)
if regulator:
bt = bt.filter(lambda x: regulator in x.name).saveas()
df = bt.to_dataframe()
if fasta:
df['seq'] = get_sequences(bt, fasta=fasta)
output_file(str(output_dir / 'score_dist.html'))
save(plot_hist(df['score'], logx=True, title='Score'))
output_file(str(output_dir / 'peak_length.html'))
save(plot_hist(df['end'] - df['start'], title='Peak length'))
output_file(str(output_dir / 'count_per_chr.html'))
save(plot_chr_counts(assembly, df))
output_file(str(output_dir / 'count_per_feature.html'))
save(plot_feat_counts(bt, datadir, n_proc=n_proc))
output_file(str(output_dir / 'count_per_biotype.html'))
save(plot_biotype_counts(bt, ensembl_gtf))
return 0
|
wangybgit/Chameleon | hostapd-OpenWrt/tests/hwsim/test_suite_b.py | Python | apache-2.0 | 5,735 | 0.001918 | # Suite B tests
# Copyright (c) 2014-2015, Jouni Malinen <j@w1.fi>
#
# This software may be distributed under the terms of the BSD license.
# See README for more details.
import time
import logging
logger = logging.getLogger()
import hostapd
from utils import HwsimSkip
def test_suite_b(dev, apdev):
"""WPA2-PSK/GCMP connection at Suite B 128-bit level"""
if "GCMP" not in dev[0].get_capability("pairwise"):
raise HwsimSkip("GCMP not supported")
if "BIP-GMAC-128" not in dev[0].get_capability("group_mgmt"):
raise HwsimSkip("BIP-GMAC-128 not supported")
if "WPA-EAP-SUITE-B" not in dev[0].get_capability("key_mgmt"):
raise HwsimSkip("WPA-EAP-SUITE-B not supported")
tls = dev[0].request("GET tls_library")
if not tls.startswith("OpenSSL"):
raise HwsimSkip("TLS library not supported for Suite B: " + tls);
if "build=OpenSSL 1.0.2" not in tls or "run=OpenSSL 1.0.2" not in tls:
raise HwsimSkip("OpenSSL version not supported for Suite B: " + tls)
dev[0].flush_scan_cache()
params = { "ssid": "test-suite-b",
"wpa": "2",
"wpa_key_mgmt": "WPA-EAP-SUITE-B",
"rsn_pairwise": "GCMP",
"group_mgmt_cipher": "BIP-GMAC-128",
"ieee80211w": "2",
"ieee8021x": "1",
"openssl_ciphers": "SUITEB128",
#"dh_file": "auth_serv/dh.conf",
"eap_server": "1",
"eap_user_file": "auth_serv/eap_user.conf",
"ca_cert": "auth_serv/ec-ca.pem",
"server_cert": "auth_serv/ec-server.pem",
"private_key": "auth_serv/ec-server.key" }
hapd = hostapd.add_ap(apdev[0]['ifname'], params)
dev[0].connect("test-suite-b", key_mgmt="WPA-EAP-SUITE-B", ieee80211w="2",
openssl_ciphers="SUITEB128",
eap="TLS", identity="tls user",
ca_cert="auth_serv/ec-ca.pem",
client_cert="auth_serv/ec-user.pem",
private_key="auth_serv/ec-user.key",
pairwise="GCMP", group="GCMP", scan_freq="2412")
tls_cipher = dev[0].get_status_field("EAP TLS cipher")
if tls_cipher != "ECDHE-ECDSA-AES128-GCM-SHA256":
raise Exception("Unexpected TLS cipher: " + tls_cipher)
bss = dev[0].get_bss(apdev[0]['bssid'])
if 'flags' not in bss:
raise Exception("Could not get BSS flags from BSS table")
if "[WPA2-EAP-SUITE-B-GCMP]" not in bss['flags']:
raise Exception("Unexpected BSS flags: " + bss['flags'])
dev[0].request("DISCONNECT")
dev[0].wait_disconnected(timeout=20)
dev[0].dump_monitor()
dev[0].request("RECONNECT")
ev = dev[0].wait_event(["CTRL-EVENT-EAP-STARTED",
"CTRL-EVENT-CONNECTED"], timeout=20)
if ev is None:
raise Exception("Roaming with the AP timed out")
if "CTRL-EVENT-EAP-STARTED" in ev:
raise Exception("Unexpected EAP exchange")
def test_suite_b_192(dev, apdev):
"""WPA2-PSK/GCMP-256 connection at Suite B 192-bit level"""
if "GCMP-256" not in dev[0].get_capability("pairwise"):
raise HwsimSkip("GCMP-256 not supported")
if "BIP-GMAC-256" not in dev[0].get_capability("group_mgmt"):
raise HwsimSkip("BIP-GMAC-256 not supported")
if "WPA-EAP-SUITE-B-192" not in dev[0].get_capability("key_mgmt"):
raise HwsimSkip("WPA-EAP-SUITE-B-192 not supported")
tls = dev[0].request("GET tls_library")
if not tls.startswith("OpenSSL"):
raise HwsimSkip("TLS library not supported for Suite B: " + tls);
if "build=OpenSSL 1.0.2" not in tls or "run=OpenSSL 1.0.2" not in tls:
raise HwsimSkip("OpenSSL version not supported for Suite B: " + tls)
dev[0].flush_scan_cache()
params = { "ssid": "test-suite-b",
"wpa": "2",
"wpa_key_mgmt": "WPA-EAP-SUITE-B-192",
"rsn_pairwise": "GCMP-256",
"group_mgmt_cipher": "BIP-GMAC-256",
"ieee80211w": "2",
"ieee8021x": "1",
"openssl_ciphers": "SUITEB192",
"eap_server": "1",
"eap_user_file": "auth_serv/eap_user.conf",
"ca_cert": "auth_serv/ec2-ca.pem",
"server_cert": "auth_serv/ec2-server.pem",
| "private_key": "auth_serv/ec2-server.key" }
hapd = hostapd.add_ap(apdev[0]['if | name'], params)
dev[0].connect("test-suite-b", key_mgmt="WPA-EAP-SUITE-B-192",
ieee80211w="2",
openssl_ciphers="SUITEB192",
eap="TLS", identity="tls user",
ca_cert="auth_serv/ec2-ca.pem",
client_cert="auth_serv/ec2-user.pem",
private_key="auth_serv/ec2-user.key",
pairwise="GCMP-256", group="GCMP-256", scan_freq="2412")
tls_cipher = dev[0].get_status_field("EAP TLS cipher")
if tls_cipher != "ECDHE-ECDSA-AES256-GCM-SHA384":
raise Exception("Unexpected TLS cipher: " + tls_cipher)
bss = dev[0].get_bss(apdev[0]['bssid'])
if 'flags' not in bss:
raise Exception("Could not get BSS flags from BSS table")
if "[WPA2-EAP-SUITE-B-192-GCMP-256]" not in bss['flags']:
raise Exception("Unexpected BSS flags: " + bss['flags'])
dev[0].request("DISCONNECT")
dev[0].wait_disconnected(timeout=20)
dev[0].dump_monitor()
dev[0].request("RECONNECT")
ev = dev[0].wait_event(["CTRL-EVENT-EAP-STARTED",
"CTRL-EVENT-CONNECTED"], timeout=20)
if ev is None:
raise Exception("Roaming with the AP timed out")
if "CTRL-EVENT-EAP-STARTED" in ev:
raise Exception("Unexpected EAP exchange")
|
vertexproject/synapse | synapse/tests/test_cryotank.py | Python | apache-2.0 | 4,379 | 0.000457 | import synapse.common as s_common
import synapse.cryotank as s_cr | yotank
import synapse.lib.const as s_const
import synapse.tests.utils as s_t_utils
from synapse.tests.utils import alist
logger = s_cryotank.logger
cryodata = (('foo', {'bar': 10}), ('baz', {'faz': 20}))
class CryoTest(s_t_utils.SynTest):
async def test_cryo_cell_async(self):
async with self.getTestCryo() as cryo:
| async with cryo.getLocalProxy() as prox:
self.true(await prox.init('foo'))
self.eq([], await alist(prox.rows('foo', 0, 1)))
async def test_cryo_cell(self):
with self.getTestDir() as dirn:
async with self.getTestCryoAndProxy(dirn=dirn) as (cryo, prox):
self.eq((), await prox.list())
self.true(await prox.init('foo'))
self.eq('foo', (await prox.list())[0][0])
self.none(await prox.last('foo'))
self.eq([], await alist(prox.rows('foo', 0, 1)))
self.true(await prox.puts('foo', cryodata))
info = await prox.list()
self.eq('foo', info[0][0])
self.eq(2, info[0][1].get('stat').get('entries'))
self.true(await prox.puts('foo', cryodata))
items = await alist(prox.slice('foo', 1, 3))
self.eq(items[0][1][0], 'baz')
metrics = await alist(prox.metrics('foo', 0, 9999))
self.len(2, metrics)
self.eq(2, metrics[0][1]['count'])
self.eq(3, (await prox.last('foo'))[0])
self.eq('baz', (await prox.last('foo'))[1][0])
iden = s_common.guid()
self.eq(0, await prox.offset('foo', iden))
items = await alist(prox.slice('foo', 0, 1000, iden=iden))
self.eq(0, await prox.offset('foo', iden))
items = await alist(prox.slice('foo', 4, 1000, iden=iden))
self.eq(4, await prox.offset('foo', iden))
# test the direct tank share....
async with cryo.getLocalProxy(share='cryotank/foo') as lprox:
items = await alist(lprox.slice(1, 3))
self.eq(items[0][1][0], 'baz')
self.len(4, await alist(lprox.slice(0, 9999)))
await lprox.puts(cryodata)
self.len(6, await alist(lprox.slice(0, 9999)))
# test offset storage and updating
iden = s_common.guid()
self.eq(0, await lprox.offset(iden))
self.eq(2, await lprox.puts(cryodata, seqn=(iden, 0)))
self.eq(2, await lprox.offset(iden))
# test the new open share
async with cryo.getLocalProxy(share='cryotank/lulz') as lprox:
self.len(0, await alist(lprox.slice(0, 9999)))
await lprox.puts(cryodata)
self.len(2, await alist(lprox.slice(0, 9999)))
self.len(1, await alist(lprox.metrics(0)))
# Delete apis
self.false(await prox.delete('newp'))
self.true(await prox.delete('lulz'))
# Re-open the tank and ensure that the deleted tank is not present.
async with self.getTestCryoAndProxy(dirn=dirn) as (cryo, prox):
tanks = await prox.list()
self.len(1, tanks)
self.eq('foo', tanks[0][0])
async def test_cryo_init(self):
with self.getTestDir() as dirn:
async with self.getTestCryo(dirn) as cryo:
# test passing conf data in through init directly
tank = await cryo.init('conftest', conf={'map_size': s_const.mebibyte * 64})
self.eq(tank.slab.mapsize, s_const.mebibyte * 64)
_, conf = await cryo.hive.get(('cryo', 'names', 'conftest'))
self.eq(conf, {'map_size': s_const.mebibyte * 64})
# And the data was persisted
async with self.getTestCryo(dirn) as cryo:
tank = cryo.tanks.get('conftest')
self.eq(tank.slab.mapsize, s_const.mebibyte * 64)
_, conf = await cryo.hive.get(('cryo', 'names', 'conftest'))
self.eq(conf, {'map_size': s_const.mebibyte * 64})
|
PrincetonUniversity/AdvNet-OF_Scripts | evaluation/switch/flowmod_test/pox/pox/samples/l2_bell_burst_mod.py | Python | apache-2.0 | 7,313 | 0.015315 | # Copyright 2011 James McCauley
#
# This file is part of POX.
#
# POX is | free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# POX is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A | PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with POX. If not, see <http://www.gnu.org/licenses/>.
"""
An L2 learning switch.
It is derived from one written live for an SDN crash course.
It is somwhat similar to NOX's pyswitch in that it installs
exact-match rules for each flow.
"""
from __future__ import division
from random import randrange
from pox.core import core
import pox.openflow.libopenflow_01 as of
from pox.lib.util import dpid_to_str
from pox.lib.util import str_to_bool
import sys, os, commands, time
from pox.lib.util import dpidToStr
log = core.getLogger()
#-------------------------------define flow rate----------
flow_rate = 50
interval = 1/flow_rate
print 'current flow modification rate is:', flow_rate
global burst
burst = {}
# We don't want to flood immediately when a switch connects.
# Can be overriden on commandline.
_flood_delay = 0
class LearningSwitch (object):
"""
The learning switch "brain" associated with a single OpenFlow switch.
When we see a packet, we'd like to output it on a port which will
eventually lead to the destination. To accomplish this, we build a
table that maps addresses to ports.
We populate the table by observing traffic. When we see a packet
from some source coming from some port, we know that source is out
that port.
When we want to forward traffic, we look up the desintation in our
table. If we don't know the port, we simply send the message out
all ports except the one it came in on. (In the presence of loops,
this is bad!).
In short, our algorithm looks like this:
For each packet from the switch:
1) Use source address and switch port to update address/port table
2) Is transparent = False and either Ethertype is LLDP or the packet's
destination address is a Bridge Filtered address?
Yes:
2a) Drop packet -- don't forward link-local traffic (LLDP, 802.1x)
DONE
3) Is destination multicast?
Yes:
3a) Flood the packet
DONE
4) Port for destination address in our address/port table?
No:
4a) Flood the packet
DONE
5) Is output port the same as input port?
Yes:
5a) Drop packet and similar ones for a while
6) Install flow table entry in the switch so that this
flow goes out the appopriate port
6a) Send the packet out appropriate port
"""
def __init__ (self, connection, transparent):
# Switch we'll be adding L2 learning switch capabilities to
self.connection = connection
self.transparent = transparent
# Our table
self.macToPort = {}
# We want to hear PacketIn messages, so we listen
# to the connection
connection.addListeners(self)
# We just use this to know when to log a helpful message
self.hold_down_expired = _flood_delay == 0
#-----------------------
msg = of.ofp_flow_mod(command=of.OFPFC_DELETE)
# iterate over all connected switches and delete all their flows
connection.send(msg)
print "INFO: Clearing all flows..."
#for BCM switch only
msg = of.ofp_flow_mod()
msg.priority = 10
msg.match.dl_type = 0x800
#msg.match.in_port = 5
msg.match.nw_src = '10.0.0.1'
msg.idle_timeout = 0
msg.hard_timeout = 0
#msg.actions.append(of.ofp_action_output(port = 1))
self.connection.send(msg)
print 'INFO: add a default rule... I am slice 1(BCM only)'
for k in xrange(1,65):#the number of rules to install
#insert first
if k % 2 == 0:
msg = of.ofp_flow_mod()
#msg.match = of.ofp_match.from_packet(packet, event.port)
#msg.priority = 20000 + randrange(1000)
msg.priority = 2000
msg.match.dl_type = 0x800
i = int(k / 256) + 56
j = k % 256
dst = '192.168.' + str(i) + '.' + str(j)
#msg.match.in_port = 1
msg.match.nw_src = '10.0.0.1'
msg.match.nw_dst = dst
#print 'INFO',dst, time.time()
msg.idle_timeout = 0
msg.hard_timeout = 0
msg.actions.append(of.ofp_action_output(port = 2))
#msg.data = event.ofp # 6a
self.connection.send(msg)
time.sleep(0.02)
#-------------------------
# (note that flow_mods match all flows by default)
os.system('./simplesniffer eth2 64&')
os.system('sudo bash ../pktgen/pktgen.conf.1-1-flow-dist.sh &')
time.sleep(5)
y = 0
print 'INFO: starting sending flow mod...'
for k in xrange(1,65):#the number of rules to install
#insert firsti
msg = of.ofp_flow_mod()
if k % 2 == 0:
msg.command = of.OFPFC_MODIFY
#msg.match = of.ofp_match.from_packet(packet, event.port)
#msg.priority = 20000 + randrange(1000)
msg.priority = 2000
msg.match.dl_type = 0x800
i = int(k / 256) + 56
j = k % 256
dst = '192.168.' + str(i) + '.' + str(j)
#msg.match.in_port = 1
msg.match.nw_src = '10.0.0.1'
msg.match.nw_dst = dst
#print 'INFO',dst, time.time()
msg.idle_timeout = 0
msg.hard_timeout = 0
msg.actions.append(of.ofp_action_output(port = 5))
#msg.data = event.ofp # 6a
self.connection.send(msg)
#print 'DATA: 10.0.0.1', dst, '%f' %time.time()
#print 'DATA: 10.0.0.1', dst, '%f' %time.time()
burst[dst] = time.time()
#time.sleep(interval)
print 'INFO: flow mod measure finished...'
#write file
w = open('poxout1','w')
for d in burst:
w.write('src: 10.0.0.1 dst: %s sec: %f usec: %f\n' %(d, int(burst[d]), (burst[d] - int(burst[d])) * 1000000 ))
w.close()
os.system('sudo bash cleanpox.sh') #self destrory
def _handle_PacketIn (self, event):
"""
Handle packet in messages from the switch to implement above algorithm.
"""
packet = event.parsed
#print 'PACKET_IN:', event.port, packet.next.dstip,'%f' % time.time()
def _handle_flowstats_received (event):
stats = flow_stats_to_list(event.stats)
print "FlowStatsReceived from %s: %s" % (dpidToStr(event.connection.dpid), stats)
class l2_learning (object):
"""
Waits for OpenFlow switches to connect and makes them learning switches.
"""
def __init__ (self, transparent):
core.openflow.addListeners(self)
self.transparent = transparent
def _handle_ConnectionUp (self, event):
log.debug("Connection %s" % (event.connection,))
LearningSwitch(event.connection, self.transparent)
def launch (transparent=False, hold_down=_flood_delay):
"""
Starts an L2 learning switch.
"""
try:
global _flood_delay
_flood_delay = int(str(hold_down), 10)
assert _flood_delay >= 0
except:
raise RuntimeError("Expected hold-down to be a number")
core.registerNew(l2_learning, str_to_bool(transparent))
|
Royal-Society-of-New-Zealand/NZ-ORCID-Hub | orcid_api_v3/models/funding_contributor_v30_rc1.py | Python | mit | 6,669 | 0.00015 | # coding: utf-8
"""
ORCID Member
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: Latest
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from orcid_api_v3.models.contributor_email_v30_rc1 import ContributorEmailV30Rc1 # noq | a: F401,E501
from orcid_api_v3.models.contributor_orcid_v30_rc1 import ContributorOrcidV30Rc1 # noqa: F401,E501
from orcid_api_v3.models.credit_name_v30_rc1 import CreditNameV30Rc1 # noqa: F401,E501
from orcid_api_v3.models.funding_contributor_attributes_v30_rc1 import FundingContributorAttributesV30Rc1 # noqa: F401,E501
class FundingContributorV30Rc1(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
| """
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'contributor_orcid': 'ContributorOrcidV30Rc1',
'credit_name': 'CreditNameV30Rc1',
'contributor_email': 'ContributorEmailV30Rc1',
'contributor_attributes': 'FundingContributorAttributesV30Rc1'
}
attribute_map = {
'contributor_orcid': 'contributor-orcid',
'credit_name': 'credit-name',
'contributor_email': 'contributor-email',
'contributor_attributes': 'contributor-attributes'
}
def __init__(self, contributor_orcid=None, credit_name=None, contributor_email=None, contributor_attributes=None): # noqa: E501
"""FundingContributorV30Rc1 - a model defined in Swagger""" # noqa: E501
self._contributor_orcid = None
self._credit_name = None
self._contributor_email = None
self._contributor_attributes = None
self.discriminator = None
if contributor_orcid is not None:
self.contributor_orcid = contributor_orcid
if credit_name is not None:
self.credit_name = credit_name
if contributor_email is not None:
self.contributor_email = contributor_email
if contributor_attributes is not None:
self.contributor_attributes = contributor_attributes
@property
def contributor_orcid(self):
"""Gets the contributor_orcid of this FundingContributorV30Rc1. # noqa: E501
:return: The contributor_orcid of this FundingContributorV30Rc1. # noqa: E501
:rtype: ContributorOrcidV30Rc1
"""
return self._contributor_orcid
@contributor_orcid.setter
def contributor_orcid(self, contributor_orcid):
"""Sets the contributor_orcid of this FundingContributorV30Rc1.
:param contributor_orcid: The contributor_orcid of this FundingContributorV30Rc1. # noqa: E501
:type: ContributorOrcidV30Rc1
"""
self._contributor_orcid = contributor_orcid
@property
def credit_name(self):
"""Gets the credit_name of this FundingContributorV30Rc1. # noqa: E501
:return: The credit_name of this FundingContributorV30Rc1. # noqa: E501
:rtype: CreditNameV30Rc1
"""
return self._credit_name
@credit_name.setter
def credit_name(self, credit_name):
"""Sets the credit_name of this FundingContributorV30Rc1.
:param credit_name: The credit_name of this FundingContributorV30Rc1. # noqa: E501
:type: CreditNameV30Rc1
"""
self._credit_name = credit_name
@property
def contributor_email(self):
"""Gets the contributor_email of this FundingContributorV30Rc1. # noqa: E501
:return: The contributor_email of this FundingContributorV30Rc1. # noqa: E501
:rtype: ContributorEmailV30Rc1
"""
return self._contributor_email
@contributor_email.setter
def contributor_email(self, contributor_email):
"""Sets the contributor_email of this FundingContributorV30Rc1.
:param contributor_email: The contributor_email of this FundingContributorV30Rc1. # noqa: E501
:type: ContributorEmailV30Rc1
"""
self._contributor_email = contributor_email
@property
def contributor_attributes(self):
"""Gets the contributor_attributes of this FundingContributorV30Rc1. # noqa: E501
:return: The contributor_attributes of this FundingContributorV30Rc1. # noqa: E501
:rtype: FundingContributorAttributesV30Rc1
"""
return self._contributor_attributes
@contributor_attributes.setter
def contributor_attributes(self, contributor_attributes):
"""Sets the contributor_attributes of this FundingContributorV30Rc1.
:param contributor_attributes: The contributor_attributes of this FundingContributorV30Rc1. # noqa: E501
:type: FundingContributorAttributesV30Rc1
"""
self._contributor_attributes = contributor_attributes
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(FundingContributorV30Rc1, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, FundingContributorV30Rc1):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
NablaWebkom/django-wiki | wiki/plugins/macros/settings.py | Python | gpl-3.0 | 256 | 0 | from __future__ import absolute_import, unicode_literals
from django.conf import settings as django_settings
SLUG = 'ma | cros'
APP_LABEL = 'wiki'
METHODS = getattr(
django_settings,
'WIKI_PLUGINS_METHODS',
| ('article_list',
'toc',
))
|
lituan/tools | leaf/leaf.py | Python | cc0-1.0 | 6,618 | 0.008008 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
use leaf alogrithm to remove redundancy
this algorithm is published in this following paper
1. Bull, S. C., Muldoon, M. R. & Doig, A. J. Maximising the Size of Non-Redundant Protein Datasets Using Graph Theory. PLoS One 8, (2013).
Simon Bull gives an implementation for python 3, hosted at https://github.com/SimonCB765/Leaf
this implementation is for python 2.7
this script can read in a multiple alignment file in fasta format, compute pairwise similarities, and remove redundancy accordint to similarity cutoff
you can choose to use igraph to plot the network
usage python leaf.py test.fa
"""
import sys
import os
import igraph
def read_msa(msa_f):
with open(msa_f) as o_f:
lines = o_f.readlines()
lines = [line.rstrip('\r\n') for line in lines if line]
pro_line_num = [i for i, line in enumerate(
lines) if '>' in line] + [len(lines)]
seqs = [lines[n:pro_line_num[i + 1]]
for i, n in enumerate(pro_line_num[:-1])]
seqs = [(seq[0].split()[0][1:], ''.join(seq[1:])) for seq in seqs]
return seqs
def get_pim(seqs):
def pim(seq1, seq2):
identity = len([i for i, s in enumerate(seq1) if s == seq2[i]])
return identity * 1.0 / len(seq1)
scores = []
seqlen = len(seqs)
for i in range(seqlen):
score_i = []
for j in range(seqlen):
if j < i:
score_i.append(scores[j][i])
elif j > i:
score_i.append(pim(seqs[i][1], seqs[j][1]))
else:
score_i.append(1.0)
scores.append(score_i)
return scores
def leaf(labels, similarities, cutoff, filename):
matrix = [map(lambda x: 1 if x > cutoff else 0, | row)
for row in similarities]
for i in range(len(matrix)):
matrix[i][i] = 0
# use igraph to plot the initial network
graph = igraph.Graph.Adjacency(matrix, mode='undirected')
igraph.plot(graph, filename + '.png', vertex_label=range(len(labels)))
adjlist = [[i for i,n in enumerate(row ) if n] for row in matrix]
neighbors = []
remove = []
for i,a in enumerate(adjlist | ):
print '{0}:{1},'.format(i,a)
# transform adjlist to set
neighbors = [set(n) for i, n in enumerate(adjlist)]
# detect possible max clique
max_neighbors = max(len(l) for l in neighbors)
# the possible clique size is 2 to max_neighbors+1, so the possible
# neighborsize is 1 to max_neighbors
for clique_num in range(1, max_neighbors + 1):
nodes_index = set([i for i, l in enumerate(
neighbors) if len(l) == clique_num])
for i in nodes_index:
if not i in remove: # do not compute removed vertex
# a clique is set of vertex connecting to each other
nodesofinterest = neighbors[i].union([i])
print 'initial nodesofinterest: ',nodesofinterest
if set.intersection(*[neighbors[i].union([i]) for i in nodesofinterest]) == nodesofinterest:
print 'clique nodesofinterest: ',nodesofinterest
# detect vertex without linking to outside vertex
in_clique = [i for i in nodesofinterest if not neighbors[
i].union([i]).difference(nodesofinterest)]
# keep one of the vertex without linking to outside vertex,
# remove rest
if in_clique:
print 'in_clique: ',in_clique
keep = [in_clique[0]]
print 'keep: ',keep
remove_iter = nodesofinterest.difference(set(keep))
print 'remove_iter: ',remove_iter
for r in remove_iter:
if not r in remove: # do not compute removed vertex
print 'remove: ',r
for i in range(len(neighbors)):
if r in neighbors[i]:
neighbors[i].remove(r)
remove += remove_iter
print 'after leaf: ',neighbors
nr_matrix = [matrix[i] for i in range(len(matrix)) if not i in remove]
nr_matrix = [[row[i] for i in range(
len(matrix)) if not i in remove] for row in nr_matrix]
graph = igraph.Graph.Adjacency(nr_matrix, mode='undirected')
nr_labels = [i for i in range(len(matrix)) if not i in remove]
igraph.plot(graph, filename + '_leaf.png', vertex_label=nr_labels)
# continue to remove the one with most neighbors until no vertex has
# neighbors, removed vertex is not considered
while max([len(r) for i, r in enumerate(neighbors) if not i in remove]) > 0:
max_index = max([(len(r), i) for i, r in enumerate(neighbors) if not i in remove])[1]
print 'remove: ',max_index
remove.append(max_index)
for i in set(range(len(neighbors))).difference(set(remove)): # do not compute remove vertex
if max_index in neighbors[i]:
neighbors[i].remove(max_index)
print 'final remove: ',remove
nr_matrix = [matrix[i] for i in range(len(matrix)) if not i in remove]
nr_matrix = [[row[i] for i in range(
len(matrix)) if not i in remove] for row in nr_matrix]
nr_labels = [i for i in range(len(matrix)) if not i in remove]
# plot non-redundant notwork
graph = igraph.Graph.Adjacency(nr_matrix, mode='undirected')
igraph.plot(graph, filename + '_nr.png', vertex_label=nr_labels)
nr_similarities = [similarities[i] for i in range(len(similarities)) if not i in remove]
nr_similarities = [[row[i] for i in range(
len(similarities)) if not i in remove] for row in nr_similarities]
nr_labels = [labels[i] for i in range(len(similarities)) if not i in remove]
return nr_labels, nr_similarities
def main():
seqs = read_msa(sys.argv[-1])
filename = os.path.splitext(os.path.split(sys.argv[-1])[1])[0]
seqnames = [seq[0] for seq in seqs]
similarities = get_pim(seqs)
for cutoff in [0.3,0.4,0.5,0.6,0.7,0.8,0.9,0.95]:
# for cutoff in [0.8]:
nr_names,nr_similarities = leaf(seqnames, similarities, cutoff, filename+'_'+str(cutoff))
nr_seqs = [seq for seq in seqs if seq[0] in nr_names]
with open(filename+'_nr_seqs_'+str(cutoff)+'.fas','w') as w_f:
for pro,seq in nr_seqs:
print >> w_f,'>{0}'.format(pro)
print >> w_f,'{0}'.format(seq)
if __name__ == "__main__":
main()
|
t-wissmann/qutebrowser | qutebrowser/browser/webkit/tabhistory.py | Python | gpl-3.0 | 3,533 | 0 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2015-2020 Florian Bruhin (The Compiler) <mail@qutebrowser.org>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Utilities related to QWebHistory."""
import typing
from PyQt5.QtCore import QByteArray, QDataStream, QIODevice, QUrl
from qutebrowser.utils import qtutils
def _serialize_items(items, current_idx, stream):
# {'currentItemIndex': 0,
# 'history': [{'children': [],
# 'documentSequenceNumber': 1485030525573123,
# 'documentState': [],
# 'formContentType': '',
# 'itemSequenceNumber': 1485030525573122,
# 'originalURLString': 'about:blank',
# 'pageScaleFactor': | 0.0,
# 'referrer': '',
# 'scrollPosition': {'x': 0, 'y': 0},
# 'target': '',
# 'title': '',
# 'urlString': 'about:blank'}]}
data = {'currentItemIndex': current_idx, 'history': []}
for item in items:
data['history'].append(_serialize_item(item))
stream.writeInt(3) # history stream version
stream.writeQVariantM | ap(data)
def _serialize_item(item):
data = {
'originalURLString': item.original_url.toString(QUrl.FullyEncoded),
'scrollPosition': {'x': 0, 'y': 0},
'title': item.title,
'urlString': item.url.toString(QUrl.FullyEncoded),
}
try:
data['scrollPosition']['x'] = item.user_data['scroll-pos'].x()
data['scrollPosition']['y'] = item.user_data['scroll-pos'].y()
except (KeyError, TypeError):
pass
return data
def serialize(items):
"""Serialize a list of WebHistoryItems to a data stream.
Args:
items: An iterable of WebHistoryItems.
Return:
A (stream, data, user_data) tuple.
stream: The reset QDataStream.
data: The QByteArray with the raw data.
user_data: A list with each item's user data.
Warning:
If 'data' goes out of scope, reading from 'stream' will result in a
segfault!
"""
data = QByteArray()
stream = QDataStream(data, QIODevice.ReadWrite)
user_data = [] # type: typing.List[typing.Mapping[str, typing.Any]]
current_idx = None
for i, item in enumerate(items):
if item.active:
if current_idx is not None:
raise ValueError("Multiple active items ({} and {}) "
"found!".format(current_idx, i))
current_idx = i
if items:
if current_idx is None:
raise ValueError("No active item found!")
else:
current_idx = 0
_serialize_items(items, current_idx, stream)
user_data += [item.user_data for item in items]
stream.device().reset()
qtutils.check_qdatastream(stream)
return stream, data, user_data
|
rwl/muntjac | muntjac/demo/sampler/features/commons/Tooltips.py | Python | apache-2.0 | 930 | 0.009677 |
from muntjac.ui.abstract_component import AbstractComponent
from muntjac.demo.sampler.APIResource import APIResource
from muntjac.demo.sampler.Feature import Feature, Version
class Tooltips(Feature):
def getSinceVersion(self):
return Version.OLD
def getName(self):
return 'Tooltips'
def getDescription(self):
return ('Most components can have a <i>description</i>,'
' which is usually shown as a <i>\"tooltip\"</i>.'
' In the Form component, the description is shown at the'
' top of the form.'
' Descriptions can have HTML formatted (\'rich\') content.<br/>')
def getRelatedAPI(self):
return [APIResource(AbstractComponent)]
def getRelatedFeatures(self):
# TODO Auto-generated method stub
| return None
def getRelatedResources(self):
# TODO Auto | -generated method stub
return None
|
mitodl/bootcamp-ecommerce | applications/management/utils.py | Python | bsd-3-clause | 6,610 | 0.003177 | """Application management command utility functions"""
from collections import defaultdict
from django.core.exceptions import ValidationError
from django.db import transaction
from applications.api import derive_application_state
from applications.constants import APPROVED_APP_STATES
from applications.models import (
BootcampApplication,
ApplicationStepSubmission,
ApplicationStep,
)
from klasses.models import BootcampRun
from main.utils import is_empty_file
def fetch_bootcamp_run(run_property):
"""
Fetches a bootcamp run based on a given property, which could refer to a few different fields
Args:
run_property (str): A string indicating an id, title, etc.
Returns:
BootcampRun: The bootcamp run that matches the given property
"""
if run_property.isdigit():
bootcamp_run = BootcampRun.objects.get(id=run_property)
else:
bootcamp_run = BootcampRun.objects.get(title=run_property)
return bootcamp_run
def has_same_application_steps(bootcamp_id1, bootcamp_id2, ignore_order=True):
"""
Returns True if the application steps are the same for the bootcamps indicated by the given ids
Args:
bootcamp_id1 (int): A bootcamp id
bootcamp_id2 (int): Another bootcamp id
ignore_order (bool): If set to True, the function will still return True if the two bootcamps have the same
steps in a different order.
Returns:
bool: True if the application steps are the same for the bootcamps indicated by the given ids
"""
if bootcamp_id1 == bootcamp_id2:
return True
first_bootcamp_app_steps = ApplicationStep.objects.filter(bootcamp_id=bootcamp_id1)
second_bootcamp_app_steps = ApplicationStep.objects.filter(bootcamp_id=bootcamp_id2)
order_by_field = "submission_type" if ignore_order else "step_order"
first_bootcamp_step_types = list(
first_bootcamp_app_steps.order_by(order_by_field).values_list(
"submission_type", flat=True
)
)
second_bootcamp_step_types = list(
second_bootcamp_app_steps.order_by(order_by_field).values_list(
"submission_type", flat=True
)
)
return first_bootcamp_step_types == second_bootcamp_step_types
def migrate_application(from_run_application, to_run):
"""
Given an existing application, creates a new application in a different bootcamp run and "migrates" over all of
the data from the existing application. Assumes that the 'from' run and 'to' run have the same application steps.
Args:
from_run_application (BootcampApplication): The bootcamp application to copy
to_run (BootcampRun): The bootcamp run for which a new application will be created
Returns:
BootcampApplication: The newly-created bootcamp application that was created based on the existing one.
"""
has_completed_app = BootcampApplication.objects.filter(
bootcamp_run=to_run,
user=from_run_application.user,
state__in=APPROVED_APP_STATES,
).exists()
if has_completed_app:
raise ValidationError(
"An approved/completed application already exists for this user and run ({}, {})".format(
from_run_application.user.email, to_run.title
)
)
with transaction.atomic():
(
to_run_application,
_,
) = BootcampApplication.objects.select_for_update().get_or_create(
bootcamp_run=to_run, user=from_run_application.user
)
# Copy work history data
if is_empty_file(to_run_application.resume_file) and not is_empty_file(
from_run_application.resume_file
):
to_run_application.resume_file.name = from_run_application.resume_file.name
if (
to_run_application.linkedin_url is None
and from_run_application.linkedin_url is not None
):
to_run_application.linkedin_url = from_run_application.linkedin_url
to_run_application.resume_upload_date = from_run_application.resume_upload_date
to_run_application.save()
# Copy application submissions (video interview, etc.)
from_app_step_submissions = ApplicationStepSubmission.objects.filter(
bootcamp_application=from_run_application
).order_by("run_application_step__application_step__step_order")
# Build a dict of each submission type mapped to a list of the bootcamp run application step ids that require
# that submission type (e.g.: {"videointerviewsubmission": [1, 2], "quizsubmission": [3]}).
to_run_step_qset = to_run.application_steps.order_by(
"application_step__step_order"
).values("id", "application_step__submission_type")
to_run_steps = defaultdict(list)
for to_run_step in to_run_step_qset:
submission_type = to_run_step["application_step__submission_type"]
to_run_steps[submission_type].append(to_run_step["id"])
# In order to make this work even if the 'from' and 'to' runs have possibly-repeated application steps in a
# possibly-different order, keep track of | the run step ids for which a submission has already been created.
used_run_step_ids = set()
for from_app_step_submission in from_app_step_submissions:
submission_type = (
from_app_step_submission.run_application | _step.application_step.submission_type
)
to_run_step_id = next(
step_id
for step_id in to_run_steps[submission_type]
if step_id not in used_run_step_ids
)
ApplicationStepSubmission.objects.update_or_create(
bootcamp_application=to_run_application,
run_application_step_id=to_run_step_id,
defaults=dict(
review_status=from_app_step_submission.review_status,
review_status_date=from_app_step_submission.review_status_date,
submitted_date=from_app_step_submission.submitted_date,
submission_status=from_app_step_submission.submission_status,
content_type=from_app_step_submission.content_type,
object_id=from_app_step_submission.object_id,
),
)
used_run_step_ids.add(to_run_step_id)
# Set state
to_run_application.state = derive_application_state(to_run_application)
to_run_application.save()
return to_run_application
|
ikalnytskyi/kostyor-openstack-ansible | kostyor_openstack_ansible/upgrades/ref.py | Python | gpl-3.0 | 6,099 | 0 | # This file is part of OpenStack Ansible driver for Kostyor.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# I feel incredibly wrong about these lines but it seems like the only
# working solution right now. Celery uses its own fork of native
# multiprocessing module, which is significantly diverged from the
# version of Python 2.7. So when it come to start 'multiprocessing.Process'
# instance from within Celery task, it simply fails due to inability to
# retrieve some properties (e.g. _authkey) from '_current_process' since
# they simply don't exist in 'billiard.Process.
#
# This is essential part of this driver, since Ansible internally use
# multiprocessing.Process to do parallel execution.
#
# https://github.com/celery/billiard/pull/202
import multiprocessing
import billiard
multiprocessing.Process = billiard.Process # noqa
import os
import glob
from ansible.cli.playbook import PlaybookCLI
from ansible.executor.playbook_executor import PlaybookExecutor
from ansible.inventory import Inventory
from ansible.parsing.dataloader import DataLoader
from ansible.vars import VariableManager
from ansible.utils.vars import combine_vars
from kostyor.rpc.app import app
from . import base
class _setcwd(object):
"""Context manager for temporally changing current working directory.
Some of OpenStack Ansible playbooks require to be called from some
directory. Since Ansible doesn't support passing custom working
directory, we need to change current working directory before calling
this sort of playbooks.
Usage example:
with _setcwd('/opt/openstack-ansible/playbooks'):
_run_playbook(...)
:param cwd: current working directory to be set
:type cwd: str
"""
def __init__(self, cwd):
self._newcwd = cwd
self._oldcwd = None
def __enter__(self):
self._oldcwd = os.getcwd()
if self._newcwd:
os.chdir(self._newcwd)
def __exit__(self, *args):
if self._newcwd:
os.chdir(self._oldcwd)
def _get_user_settings(loader):
"""Read user settings from /etc/openstack_deploy.
OpenStack Ansible user settings are stored in /etc/openstack_deploy
directory. We need to read, combine and pass them to variable
manager before executing any playbook. This is what heppened under
the hood when one calls 'openstack-ansible' wrapper in command line.
:param loader: an instance of ansible data loader to be used
:type loader: :class:`ansible.parsing.dataloader.DataLoader`
"""
settings = {}
# /etc/openstack_deploy is default and, by all means, hardcoded path
# to deployment settings. The dir contains user settings, where each
# file starts with 'user_' prefix and ends with '.yml' suffix.
pattern = os.path.join('/etc', 'openstack_deploy', 'user_*.yml')
for filename in glob.glob(pattern):
# Ansible may use different strategies of combining variables, so
# we need to use its function instead of '.update(...)' method.
settings = combine_vars(settings, loader.load_from_file(filename))
return settings
def _run_playbook_impl(playbook, hosts_fn=None, cwd=None, ignore_errors=False):
# Unfortunately, there's no good way to get the options instance
# with proper defaults since it's generated by argparse inside
# PlaybookCLI. Due to the fact that the options can't be empty
# and must contain proper values we have not choice but extract
# them from PlaybookCLI instance.
playbook_cli = PlaybookCLI(['to-be-stripped', playbook])
playbook_cli.parse()
options = playbook_cli.options
# Get others required options.
loader = DataLoader()
variable_manager = VariableManager()
inventory = Inventory(loader, variable_manager)
variable_manager.set_inventory(inventory)
variable_manager.extra_vars = _get_user_settings(loader)
# Limit playbook execution to hosts returned by 'hosts_fn'.
if hosts_fn is not None:
inventory.subset([
host.get_vars()['inventory_hostname']
for host in hosts_fn(inventory)
])
# Finally, we can create a playbook executor and run the playbook.
executor = PlaybookExecutor(
playbooks=[playbook],
inventory=inventory,
variable_manager=variable_manager, |
loader=loader,
options=options,
passwords={}
)
# So | me playbooks may rely on current working directory, so better allow
# to change it before execution.
with _setcwd(cwd):
exitcode = executor.run()
# Celery treats exceptions from task as way to mark it failed. So let's
# throw one to do so in case return code is not zero.
if all([not ignore_errors, exitcode is not None, exitcode != 0]):
raise Exception('Playbook "%s" has been finished with errors. '
'Exit code is "%d".' % (playbook, exitcode))
return exitcode
@app.task
def _run_playbook(playbook, cwd=None, ignore_errors=False):
return _run_playbook_impl(
playbook,
cwd=cwd,
ignore_errors=ignore_errors
)
@app.task
def _run_playbook_for(playbook, hosts, service, cwd=None, ignore_errors=False):
return _run_playbook_impl(
playbook,
lambda inv: base.get_component_hosts_on_nodes(inv, service, hosts),
cwd=cwd,
ignore_errors=ignore_errors
)
class Driver(base.Driver):
_run_playbook = _run_playbook
_run_playbook_for = _run_playbook_for
|
eProsima/Fast-DDS | test/profiling/memory_analysis.py | Python | apache-2.0 | 483 | 0.00207 | import sys
import msparser
import csv
stack = []
heap = []
msparser_data = mspar | ser.parse_file(sys.argv[1])
for snapshot in msparser_data['snapshots']:
if snapshot['mem_heap'] ! | = 0:
stack.append(snapshot['mem_stack'])
heap.append(snapshot['mem_heap'])
with open(sys.argv[2], 'w+') as csv_file:
csv_writer = csv.writer(csv_file, delimiter=',', quoting=csv.QUOTE_ALL)
csv_writer.writerow(['stack', 'heap'])
csv_writer.writerow([max(stack), max(heap)]) |
pierreberthet/local-scripts | reduce_dopa.py | Python | gpl-2.0 | 4,102 | 0.024866 | import nest
import nest.raster_plot
import numpy as np
import pylab as pl
nest.ResetKernel()
nest.SetKernelStatus({"overwrite_files": True})
sim_time = 0.
weight = []
if (not 'bcpnn_dopamine_synapse' in nest.Models()):
#nest.Install('ml_module')
nest.Install('/media/backup/temp_milner/save/17.10.14/modules/from.git/bcpnndopa_module/lib/nest/ml_module')
dopa = nest.Create('iaf_neuron', 200)
vt_dopa = nest.Create('volume_transmitter', 1)
nest.ConvergentConnect(dopa, vt_dopa, weight= 5., delay = 1.)
sample_size = 20
pre = nest.Create('iaf_cond_alpha_bias', sample_size)
post = nest.Create('iaf_cond_alpha_bias', sample_size)
poisson_pre = nest.Create('poisson_generator',1)
poisson_post = nest.Create('poisson_generator',1)
poisson_dopa = nest.Create('poisson_generator',1)
poisson_noise = nest.Create('poisson_generator',1)
nest.DivergentConnect(poisson_noise,pre , weight=1., delay=1.)
nest.DivergentConnect(poisson_noise,post , weight=1., delay=1.)
nest.DivergentConnect(poisson_noise,dopa , weight=1., delay=1.)
nest.SetStatus(poisson_noise, {'rate':1800.})
recorder = nest.Create('spike_detector',1)
voltmeter = nest.Create('multimeter', 1, params={'record_from': ['V_m'], 'interval' :0.1} )
nest.SetStatus(voltmeter, [{"to_file": True, "withtime": True, 'label' : 'volt'}])
time = 300.
key = 'C_m'
spread = .2
params = {
'b': 1.,
'delay':1.,
'dopamine_modulated':True,
'complementary':False,
'fmax': 20.,
'gain': 2.,
'gain_dopa': 1.,
'n': 0.07,
'p_i': .01,
'p_j': .01,
'p_ij': .00012,
'tau_i': 5.,
'tau_j': 6.,
'tau_e': 40.,
'tau_p': 200.,
'tau_n': 100.,
'value': 1.,
'k_pow':3.,
'reverse': 1.
}
nest.SetDefaults('bcpnn_dopamine_synapse', {'vt':vt_dopa[0]})
default = nest.GetStatus([post[0]], key)[0]
print 'Default value for ', key, 'is ', default
start = (1-spread)*default
end= (1+spread)*default
value = np.arange(start, end, (end-start)/sample_size)
for i in xrange(sample_size):
nest.SetStatus([post[i]], {key:value[i]})
nest.DivergentConnect(poisson_pre, pre, weight=4., delay=1.)
nest.DivergentConnect(poisson_post, post, weight=4., delay=1.)
nest.DivergentConnect(poisson_dopa, dopa, weight=4., delay=1.)
nest.ConvergentConnect(post, recorder)
nest.ConvergentConnect(voltmeter, post)
nest.SetStatus(poisson_pre, {'rate': 0.})
nest.CopyModel('bcpnn_dopamine_synapse', 'test', params)
nest.DivergentConnect(pre, post, model='test' )
conn = nest.GetConnections(pre, post)
def simul(pre_rate, post_rate, dopa_rate, duration):
nest.SetStatus(poisson_pre, {'rate': pre_rate})
nest.SetStatus(pois | son_post, {'rate': post_rate})
nest.SetStatus(poisson_dopa, {'rate': dopa_rate})
global sim_time
global weight
sim_time+= duration
nest.Simulate(duration)
weight.append(np.mean([(np.log(a['p_ij']/(a['p_i']*a['p_j']))) for a in nest.GetStatus(conn)]))
step=250.
simul(1000.,1000.,1000.,step)
simul(2000.,1000.,1000.,step)
simul(2000.,1000.,1000.,step)
simul(3000.,0.,1500.,step)
simul(3000.,0.,1000.,step)
events = nest.GetStatu | s(voltmeter)[0]['events']
t = events['times']
pl.subplot(211)
pl.plot(t, events['V_m'])
pl.ylabel('Membrane potential [mV]')
pl.subplot(212)
pl.plot(weight)
pl.show()
nest.raster_plot.from_device(recorder, hist=True)
nest.raster_plot.show()
param = [{'C_m': 250.0,
'E_L': -70.0,
'E_ex': 0.0,
'E_in': -85.0,
'I_e': 0.0,
'V_m': -70.0,
'V_reset': -60.0,
'V_th': -55.0,
'archiver_length': 0,
'bias': 0.0,
'epsilon': 0.001,
'fmax': 20.0,
'frozen': False,
'g_L': 16.6667,
'gain': 1.0,
'global_id': 204,
'kappa': 1.0,
'local': True,
'local_id': 204,
'model': 'iaf_cond_alpha_bias',
'parent': 0,
'recordables': ['V_m',
't_ref_remaining',
'g_ex',
'g_in',
'z_j',
'e_j',
'p_j',
'bias',
'epsilon',
'kappa'],
'state': 0,
't_ref': 2.0,
't_spike': -1.0,
'tau_e': 100.0,
'tau_j': 10.0,
'tau_minus': 20.0,
'tau_minus_triplet': 110.0,
'tau_p': 1000.0,
'tau_syn_ex': 0.2,
'tau_syn_in': 2.0,
'thread': 0,
'type': 'neuron',
'vp': 0}]
|
diegojromerolopez/djanban | src/djanban/apps/journal/migrations/0003_auto_20160926_1851.py | Python | mit | 563 | 0.001776 | # -*- coding: utf-8 -*-
# Gene | rated by Django 1.10 on 2016-09-26 16:51
from __future__ import unicode_literals
import ckeditor_uploader.fields
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('journal', '0002_auto_20160926_0155'),
]
opera | tions = [
migrations.AlterField(
model_name='journalentry',
name='content',
field=ckeditor_uploader.fields.RichTextUploadingField(help_text='Content of this journal entry', verbose_name='Content'),
),
]
|
blaze/distributed | distributed/protocol/tests/test_serialize.py | Python | bsd-3-clause | 11,096 | 0.00027 | import copy
import pickle
import msgpack
import numpy as np
import pytest
from tlz import identity
from distributed import wait
from distributed.protocol import (
register_serialization,
serialize,
deserialize,
nested_deserialize,
Serialize,
Serialized,
to_serialize,
serialize_bytes,
deserialize_bytes,
serialize_bytelist,
register_serialization_family,
dask_serialize,
)
from distributed.protocol.serialize import check_dask_serializable
from distributed.utils import nbytes
from distributed.utils_test import inc, gen_test
from distributed.comm.utils import to_frames, from_frames
class MyObj:
def __init__(self, data):
self.data = data
def __getstate__(self):
raise Exception("Not picklable")
def serialize_myobj(x):
return {}, [pickle.dumps(x.data)]
def deserialize_myobj(header, frames):
return MyObj(pickle.loads(frames[0]))
register_serialization(MyObj, serialize_myobj, deserialize_myobj)
def test_dumps_serialize():
for x in [123, [1, 2, 3, 4, 5, 6]]:
header, frames = serialize(x)
assert header["serializer"] == "pickle"
assert len(frames) == 1
result = deserialize(header, frames)
assert result == x
x = MyObj(123)
header, frames = serialize(x)
assert header["type"]
assert len(frames) == 1
result = deserialize(header, frames)
assert result.data == x.data
def test_serialize_bytestrings():
for b in (b"123", bytearray(b"4567")):
header, frames = serialize(b)
assert frames[0] is b
bb = deserialize(header, frames)
assert bb == b
def test_Serialize():
s = Serialize(123)
assert "123" in str(s)
assert s.data == 123
t = Serialize((1, 2))
assert str(t)
u = Serialize(123)
assert s == u
assert not (s != u)
assert s != t
assert not (s == t)
assert hash(s) == hash(u)
assert hash(s) != hash(t) # most probably
def test_Serialized():
s = Serialized(*serialize(123))
t = Serialized(*serialize((1, 2)))
u = Serialized(*serialize(123))
assert s == u
assert not (s != u)
assert s != t
assert not (s == t)
def test_nested_deserialize():
x = {
"op": "update",
"x": [to_serialize(123), to_serialize(456), 789],
"y": {"a": ["abc", Serialized(*serialize("def"))], "b": b"ghi"},
}
x_orig = copy.deepcopy(x)
assert nested_deserialize(x) == {
"op": "update",
"x": [123, 456, 789],
"y": {"a": ["abc", "def"], "b": b"ghi"},
}
assert x == x_orig # x wasn't mutated
from distributed.utils_test import gen_cluster
from dask import delayed
@gen_cluster(client=True)
async def test_object_in_graph(c, s, a, b):
o = MyObj(123)
v = delayed(o)
v2 = delayed(identity)(v)
future = c.compute(v2)
result = await future
assert isinstance(result, MyObj)
assert result.data == 123
@gen_cluster(client=True)
async def test_scatter(c, s, a, b):
o = MyObj(123)
[future] = await c._scatter([o])
await c._replicate(o)
o2 = await c._gather(future)
assert isinstance(o2, MyObj)
assert o2.data == 123
@gen_cluster(client=True)
async def test_inter_worker_comms(c, s, a, b):
o = MyObj(123)
[future] = await c._scatter([o], workers=a.address)
future2 = c.submit(identity, future, workers=b.address)
o2 = await c._gather(future2)
assert isinstance(o2, MyObj)
assert o2.data == 123
class Empty:
def __getstate__(self):
raise Exception("Not picklable")
def serialize_empty(x):
return {}, []
def deserialize_empty(header, frames):
return Empty()
register_serialization(Empty, serialize_empty, deserialize_empty)
def test_empty():
e = Empty()
e2 = deserialize(*serialize(e))
assert isinstance(e2, Empty)
def test_empty_loads():
from distributed.protocol import loads, dumps
e = Empty()
e2 = loads(dumps([to_serialize(e)]))
assert isinstance(e2[0], Empty)
def test_empty_loads_deep():
from distributed.protocol import loads, dumps
e = Empty()
e2 = loads(dumps([[[to_serialize(e)]]]))
assert isinstance(e2[0][0][0], Empty)
def test_serialize_bytes():
for x in [1, "abc", np.arange(5), b"ab" * int(40e6)]:
b = serialize_bytes(x)
assert isinstance(b, bytes)
y = deserialize_bytes(b)
assert str(x) == str(y)
def test_serialize_list_compress():
pytest.importorskip("lz4")
x = np.ones(1000000)
L = serialize_bytelist(x)
assert sum(map(nbytes, L)) < x.nbytes / 2
b = b"".join(L)
y = deserialize_bytes(b)
assert (x == y).all()
def test_malicious_exception():
class BadException(Exception):
def __setstate__(self):
return Exception("Sneaky deserialization code")
class MyClass:
def __getstate__(self):
raise BadException()
obj = MyClass()
header, frames = serialize(obj, serializers=[])
with pytest.raises(Exception) as info:
deserialize(header, frames)
assert "Sneaky" not in str(info.value)
assert "MyClass" in str(info.value)
header, frames = serialize(obj, serializers=["pickle"])
with pytest.raises(Exception) as info:
deserialize(header, frames)
assert "Sneaky" not in str(info.value)
assert "BadException" in str(info.value)
def test_errors():
msg = {"data": {"foo": to_serialize(inc)}, "a": 1, "b": 2, "c": 3, "d": 4, "e": 5}
header, frames = serialize(msg, serializers=["msgpack", "pickle"])
assert header["serializer"] == "pickle"
header, frames = serialize(msg, serializers=["msgpack"])
assert header["serializer"] == "error"
with pytest.raises(TypeError):
serialize(msg, serializers=["msgpack"], on_error="raise")
@gen_test()
async def test_err_on_bad_deserializer():
frames = await to_frames({"x": to_serialize(1234)}, serializers=["pickle"])
result = await from_frames(frames, deserializers=["pickle", "foo"])
assert result == {"x": 1234}
with pytest.raises(TypeError):
await from_frames(frames, deserializers=["msgpack"])
class MyObject:
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
def my_dumps(obj, context=None):
if type(obj).__name__ == "MyObject":
header = {"serializer": "my-ser"}
frames = [
msgpack.dumps(obj.__dict__, use_bin_type=True),
msgpack.dumps(context, use_bin_type=True),
]
return header, frames
else:
raise NotImplementedError()
def my_loads(header, frames):
obj = MyObject(**msgpack.loads(frames[0], raw=False))
# to provide something to test against, lets just attach the context to
# the object itself
obj.context | = msgpack.loads(frames[1], raw=False)
return obj
@gen_cluster(
client=True,
client_kwargs={"serializers": ["my-ser", "pickle"]},
| worker_kwargs={"serializers": ["my-ser", "pickle"]},
)
async def test_context_specific_serialization(c, s, a, b):
register_serialization_family("my-ser", my_dumps, my_loads)
try:
# Create the object on A, force communication to B
x = c.submit(MyObject, x=1, y=2, workers=a.address)
y = c.submit(lambda x: x, x, workers=b.address)
await wait(y)
key = y.key
def check(dask_worker):
# Get the context from the object stored on B
my_obj = dask_worker.data[key]
return my_obj.context
result = await c.run(check, workers=[b.address])
expected = {"sender": a.address, "recipient": b.address}
assert result[b.address]["sender"] == a.address # see origin worker
z = await y # bring object to local process
assert z.x == 1 and z.y == 2
assert z.context["sender"] == b.address
finally:
from distributed.protocol.serialize import families
del families["my-ser"]
@gen_cluster(client=True)
async def test_context_specific_serialization_class(c, s, a, b):
register_serialization(MyObject, my_dumps, my_loads)
# Create the object on A, force communication to B
x = c.subm |
sgmap/openfisca-web-api | openfisca_web_api/controllers/calculate.py | Python | agpl-3.0 | 19,065 | 0.016313 | # -*- coding: utf-8 -*-
"""Calculate controller"""
from __future__ import division
import collections
import copy
import itertools
import os
import time
from openfisca_core.legislations import ParameterNotFound
from .. import conf, contexts, conv, environment, model, wsgihelpers
def N_(message):
return message
def build_output_variables(simulations, use_label, variables):
return [
{
variable: simulation.get_holder(variable).to_value_json(use_label = use_label)
for variable in variables
}
for simulation in simulations
]
def fill_test_cases_with_values(intermediate_variables, scenarios, simulations, use_label, variables):
output_test_cases = []
for scenario, simulation in itertools.izip(scenarios, simulations):
if intermediate_variables:
holders = []
for step in simulation.traceback.itervalues():
holder = step['holder']
if holder not in holders:
holders.append(holder)
else:
holders = [
simulation.get_holder(variable)
for variable in variables
]
test_case = scenario.to_json()['test_case']
for holder in holders:
variable_value_json = holder.to_value_json(use_label = use_label)
if variable_value_json is None:
continue
variable_name = holder.column.name
entity_members = test_case[holder.entity.key_plural]
if isinstance(variable_value_json, dict):
for entity_member_index, entity_member in enumerate(entity_members):
entity_member[variable_name] = {}
for period, array_or_dict_json in variable_value_json.iteritems():
if type(array_or_dict_json) == dict:
if len(array_or_dict_json) == 1:
entity_member[variable_name][period] = \
array_or_dict_json[array_or_dict_json.keys()[0]][entity_member_index]
else:
entity_member[variable_name][period] = {}
for key, array in array_or_dict_json.iteritems():
entity_member[variable_name][period][key] = array[entity_member_index]
else:
entity_member[variable_name][period] = array_or_dict_json[entity_member_index]
else:
for entity_member, cell_json in itertools.izip(entity_members, variable_value_json):
entity_member[variable_name] = cell_json
output_test_cases.append(test_case)
return output_test_cases
@wsgihelpers.wsgify
def api1_calculate(req):
def calculate_simulations(scenarios, variables, trace):
simulations = []
for scenario_index, scenario in enumerate(scenarios):
simulation = scenario.new_simulation(trace = trace)
for variable_name in variables:
try:
simulation.calculate_output(variable_name)
except ParameterNotFound as exc:
raise wsgihelpers.respond_json(ctx,
collections.OrderedDict(sorted(dict(
apiVersion = 1,
context = inputs.get('context'),
error = collections.OrderedDict(sorted(dict(
code = 500,
errors = [{"scenarios": {scenario_index: exc.to_json()}}],
).iteritems())),
method = req.script_name,
params = inputs,
url = req.url.decode('utf-8'),
).iteritems())),
headers = headers,
)
simulations.append(simulation)
return simulations
total_start_time = time.time()
ctx = contexts.Ctx(req)
headers = wsgihelpers.handle_cross_origin_resource_sharing(ctx)
assert req.method == 'POST', req.method
if conf['load_alert']:
try:
load_average = os.getloadavg()
except (AttributeError, OSError):
# When load average is not available, always accept request.
pass
else:
if load_average[0] / environment.cpu_count > 1:
return wsgihelpers.respond_json(ctx,
collections.OrderedDict(sorted(dict(
apiVersion = 1,
error = collections.OrderedDict(sorted(dict(
code = 503, # Service Unavailable
message = ctx._(u'Server is overloaded: {} {} {}').format(*load_average),
).iteritems())),
method = req.script_name,
url = req.url.decode('utf-8'),
).iteritems())),
headers = headers,
)
content_type = req.content_type
if content_type is not None:
content_type = content_type.split(';', 1)[0].strip()
if content_type != 'application/json':
return wsgihelpers.respond_json(ctx,
collections.OrderedDict(sorted(dict(
apiVersion = 1,
error = collections.OrderedDict(sorted(dict(
code = 400, # Bad Request
message = ctx._(u'Bad content-type: {}').format(content_type),
).iteritems())),
method = req.script_name,
url = req.url.decode('utf-8'),
).iteritems())),
headers = headers,
)
inputs, error = conv.pipe(
conv.make_input_to_json(object_pairs_hook = collections.OrderedDict),
conv.test_isinstance(dict),
conv.not_none,
)(req.body, state = ctx)
if error is not None:
return wsgihelpers.respond_json(ctx,
collections.OrderedDict(sorted(dict(
apiVersion = 1,
error = collections.OrderedDict(sorted(dict(
code = 400, # Bad Request
errors = [conv.jsonify_value(error)],
message = ctx._(u'Invalid JSON in request POST body'),
).iteritems())),
method = req.script_name,
params = req.body,
url = req.url.decode('utf-8'),
).iteritems())),
headers = headers,
)
str_list_to_reforms = conv.make_str_list_to_reforms()
data, errors = conv.struct(
dict(
base_reforms = str_list_to_reforms,
context = conv.test_isinstance(basestring), # For asynchronous calls
intermediate_variables = conv.pipe(
conv.test_isinstance((bool, int)),
conv.anything_to_bool,
conv.default(False),
),
| labels = conv.pipe( # Return labels (of enumerations) instead of numeric values.
conv.test_isinstance((bool, int)),
conv. | anything_to_bool,
conv.default(False),
),
output_format = conv.pipe(
conv.test_isinstance(basestring),
conv.test_in(['test_case', 'variables']),
conv.default('test_case'),
),
reforms = str_list_to_reforms,
scenarios = conv.pipe(
conv.test_isinstance(list),
conv.uniform_sequence(
conv.not_none, # Real conversion is done once tax-benefit system is known.
),
conv.test(lambda scenarios: len(scenarios) >= 1, error = N_(u'At least one scenario is required')),
conv.test(lambda scenarios: len(scenarios) <= 100,
error = N_(u"There can't be more than 100 scenarios")),
conv.not_ |
admetricks/bravado-core | tests/unmarshal/unmarshal_object_test.py | Python | bsd-3-clause | 4,494 | 0 | # -*- coding: utf-8 -*-
import pytest
from bravado_core.exception import | SwaggerMappingError
from bravado_core.unmarshal import unmarshal_object
from bravado_core. | spec import Spec
@pytest.fixture
def address_spec():
return {
'type': 'object',
'properties': {
'number': {
'type': 'number'
},
'street_name': {
'type': 'string'
},
'street_type': {
'type': 'string',
'enum': [
'Street',
'Avenue',
'Boulevard']
}
}
}
@pytest.fixture
def address():
return {
'number': 1600,
'street_name': u'Ümlaut',
'street_type': 'Avenue'
}
def test_properties(empty_swagger_spec, address_spec, address):
expected_address = {
'number': 1600,
'street_name': u'Ümlaut',
'street_type': 'Avenue'
}
result = unmarshal_object(empty_swagger_spec, address_spec, address)
assert expected_address == result
def test_array(empty_swagger_spec, address_spec):
tags_spec = {
'type': 'array',
'items': {
'type': 'string'
}
}
address_spec['properties']['tags'] = tags_spec
address = {
'number': 1600,
'street_name': 'Pennsylvania',
'street_type': 'Avenue',
'tags': [
'home',
'best place on earth',
'cul de sac'
],
}
result = unmarshal_object(empty_swagger_spec, address_spec, address)
assert result == address
def test_nested_object(empty_swagger_spec, address_spec):
location_spec = {
'type': 'object',
'properties': {
'longitude': {
'type': 'number'
},
'latitude': {
'type': 'number'
},
}
}
address_spec['properties']['location'] = location_spec
address = {
'number': 1600,
'street_name': 'Pennsylvania',
'street_type': 'Avenue',
'location': {
'longitude': 100.1,
'latitude': 99.9,
},
}
result = unmarshal_object(empty_swagger_spec, address_spec, address)
assert result == address
def test_model(minimal_swagger_dict, address_spec):
location_spec = {
'type': 'object',
'properties': {
'longitude': {
'type': 'number'
},
'latitude': {
'type': 'number'
},
}
}
minimal_swagger_dict['definitions']['Location'] = location_spec
swagger_spec = Spec.from_dict(minimal_swagger_dict)
address_spec['properties']['location'] = \
swagger_spec.spec_dict['definitions']['Location']
Location = swagger_spec.definitions['Location']
address_dict = {
'number': 1600,
'street_name': 'Pennsylvania',
'street_type': 'Avenue',
'location': {
'longitude': 100.1,
'latitude': 99.9,
},
}
expected_address = {
'number': 1600,
'street_name': 'Pennsylvania',
'street_type': 'Avenue',
'location': Location(longitude=100.1, latitude=99.9),
}
address = unmarshal_object(swagger_spec, address_spec, address_dict)
assert expected_address == address
def test_object_not_dict_like_raises_error(empty_swagger_spec, address_spec):
i_am_not_dict_like = 34
with pytest.raises(SwaggerMappingError) as excinfo:
unmarshal_object(empty_swagger_spec, address_spec, i_am_not_dict_like)
assert 'Expected dict' in str(excinfo.value)
def test_mising_properties_set_to_None(
empty_swagger_spec, address_spec, address):
del address['street_name']
expected_address = {
'number': 1600,
'street_name': None,
'street_type': 'Avenue'
}
result = unmarshal_object(empty_swagger_spec, address_spec, address)
assert expected_address == result
def test_pass_through_additionalProperties_with_no_spec(
empty_swagger_spec, address_spec, address):
address_spec['additionalProperties'] = True
address['city'] = 'Swaggerville'
expected_address = {
'number': 1600,
'street_name': u'Ümlaut',
'street_type': 'Avenue',
'city': 'Swaggerville',
}
result = unmarshal_object(empty_swagger_spec, address_spec, address)
assert expected_address == result
|
wiki-ai/revscoring | tests/languages/test_greek.py | Python | mit | 5,978 | 0.000469 | import pickle
from revscoring.datasources import revision_oriented
from revscoring.dependencies import solve
from revscoring.languages import greek
from .util import compare_extraction
BAD = [
"αδερφάρα", "αδελφάρα",
"αλήτης",
"αλητήριος", "αλητήρια", "αλητήριο",
"αλητάμπουρας",
"άχρηστος", "άχρηστη", "αχρηστία",
"αρχίδι", "αρχίδια",
"αντινεοφιλελέ",
"αντινεοφιλελές",
"βλάκας", "βλακεία",
"βυζί", "βυζιά", "βυζόμπαλα",
"γαμώ",
"γαμημένος", "γαμημένη", "γαμημένο",
"γαμώτο", "γαμιέται", "γαμιέσαι",
"γαμιάς",
"γκαβλί",
"γκει",
"γύφτος", "γυφτιά", "γύφτισα",
"γυφταριό", "γύφτικο",
"διάολος", "διάολο",
"διαολεμένος", "διαολεμένη", "διαολεμένο",
"ελαφροχέρης", "ελαφροχέρα",
"ηλίθιος", "ηλίθια", "ηλίθιο",
"καβλί", "καβλιάρης", "καβλωμένος"
"κουράδα", "κουράδι",
"κομμουνιστοσυμμορίτης",
"κώλος", "κόλος", "κωλί",
"κωλοτρυπίδα", "κώλο",
"κωλόπαιδο", "κολόπαιδο",
"κουτός", "κουφάλα",
"κλανιά", "κλανιάρης",
"κλάνω", "κλάνεις", "κλάνει",
"κλέφτης", "κλεφτρόνι",
"καριόλης", "καυλί",
"λεσβία", "λούγκρα",
"μπάσταρδο", "μπάσταρδος", "μπάσταρδα",
"μπασταρδεμένο",
"μουνί", "μουνιά", "μουνάρα", "μουνάκι"
"μουνόσκυλος", "μουνότρυπα",
"μαλάκας", "μαλάκες", "μαλακία",
"μαλακίες",
"μαλακοκάβλι",
"μαλακισμένος", "μαλακισμένη", "μαλακισμένο"
"νταής", "νταηλίκι",
"νεοφιλελε", "νεοφιλελές",
"ντουγάνι",
"ξεδιάντροπος", "ξεδιάντροπη",
"ξετσίπωτος", "ξετσίπωτη",
"πεισματάρης", "πεισματάρα", "πεισματάρικο",
"πουτάνα", "πουτανάκι", "πουτανιά",
"πούστης", "πουστιά", "πούστικος",
"πούστικο",
"ρουφιάνος", "ρουφιάνα", "ρουφιανιά",
"φιλελε", "φιλελές"
]
INFORMAL = [
"άντε",
"άσε",
"άστη", "άστο", "άστα",
"γεια",
"εμάς", "εσάς", "μας", "σας",
"φίλε",
"μαν",
"μπαι",
"τρανς",
"τρανσεξουαλ",
"μπλα", "μπλαμπλα", "μπλαμπλαμπλα",
"χα", "χαχα", "χαχαχαχα",
"χε", "χεχε", "χεχεχεχε",
"χι", "χιχι", "χιχιχιχι",
"χο", "χοχο", "χοχοχοχο",
]
OTHER = [
"""
Η Μήδεια είναι όπερα κομίκ σε τρεις πράξεις του Λουίτζι Κερουμπίνι.
Το λιμπρέτο του Φρανσουά-Μπενουά Οφμάν είναι βασισμένο στην ομώνυμη
τραγωδία του Ευριπίδη, Μήδεια, καθώς και στο θεατρικό έργο Μήδεια του Πιέρ
Κορνέιγ. Παρουσιάστηκε πρώτη φορά στις 17 Μαρτίου 1797 στο θέατρο Φεντώ
στο Παρίσι με τη Γαλλίδα υψίφωνο Ζιλί-Ανζελίκ Σιό στο ρόλο της Μήδειας.
Είναι ένα από τα πιο γνωστά έργα του Κερουμπίνι και το μόνο που παίζεται
τακτικά έως σήμερα. Η όπερα, αν και στην πρωτότυπη εκδοχή ήταν στα γαλλικά
και συμπεριελάμβανε διαλόγους δίχως συνοδεία μουσικής, έγινε γνωστή τον
περασμένο αιώνα με την Ιταλική εκδοχή του λιμπρέτου του Οφμάν και των
ρετσιτατίβι του Φραντς Λάχνερ από τον Κάρλο Τσανγκαρίνι.
"""
]
r_text = revision_oriented.revision.text
def test_badwords():
print(greek.badwords.revision.datasources.matches("βλάκας"))
compare_extraction(greek.badwords.revision.datasources.matches,
BAD, OTHER)
print(greek.badwords.revision.datasources.matches("βλάκας"))
assert greek.badwords == pickle.loads(pickle.dumps(greek.badwords))
def test_informals():
compare_extraction(greek.informals.revision.datasources.matches,
INFORMAL, OTHER)
assert greek.informals == | pickle.loads(pickle.dumps(greek.informals))
def test_dictionary():
cache = {r_text: 'Αυτό είναι γραμμένο λθος. <td>'}
assert (solve(greek.dictionary.revision.datasources.dict_words, cache=cache) ==
["Αυτό", "είναι", "γραμμένο"])
assert (solve(greek.dictionary.revision.datasources.non_dict_words,
cache=cache) ==
["λθος"])
assert greek.dictionary == pi | ckle.loads(pickle.dumps(greek.dictionary))
def test_stopwords():
cache = {r_text: 'Αυτό είναι γραμμένο λθος. <td>'}
assert (solve(greek.stopwords.revision.datasources.stopwords, cache=cache) ==
["Αυτό", "είναι"])
assert (solve(greek.stopwords.revision.datasources.non_stopwords,
cache=cache) ==
["γραμμένο", "λθος"])
assert greek.stopwords == pickle.loads(pickle.dumps(greek.stopwords))
|
danielnyga/dnutils | tests/testp3.py | Python | mit | 8,270 | 0.002297 | # -*- coding: utf-8 -*-
import time
from multiprocessing.pool import Pool
import colored
import numpy as np
from dnutils import out, stop, trace, getlogger, ProgressBar, StatusMsg, bf, loggers, newlogger, logs, edict, ifnone, \
ifnot, allnone, allnot, first, sleep, __version__ as version, waitabout
import unittest
from dnutils.logs import expose, inspect, exposure
from dnutils.stats import Gaussian, stopwatch, print_stopwatches, get_stopwatch
from dnutils.tools import LinearScale
loggers({
'default': newlogger(logs.console),
'results': newlogger(logs.console, logs.FileHandler('dnutils-test.log'))
})
def wait():
time.sleep(1)
bfctnames = {
'out': colored.stylize('out()', colored.attr('bold')),
'stop': colored.stylize('stop()', colored.attr('bold')),
'trace': colored.stylize('trace()', colored.attr('bold'))
}
class EDictTest(unittest.TestCase):
def test_xpath(self):
d = edict({'a': [{'b': {'c': 'hello'}}, {'b': {'c': 'world'}}]}, recursive=1)
msg = 'xpath query with indexing'
self.assertEqual(d.xpath('a/[0]/b/c'), 'hello', msg)
self.assertEqual(d.xpath('a/[1]/b/c'), 'world', msg)
self.assertTrue(type(d.xpath('a')) is list, msg)
self.assertTrue(type(d.xpath('a/[0]')) is edict)
d = edict()
d.set_xpath('a/b/c', 'hello, world!', force=True)
assert d.xpath('a/b/d') is None
assert d.xpath('a/b/c') == 'hello, world!'
def test_default(self):
d = edict(default=list)
d['a'].append('first item')
self.assertEqual(d['a'][0], 'first item')
self.assertTrue(d.xpath('a/[1]') is None)
def test_getset(self):
d = edict()
d['a'] = 1
d['b'] = 2
with self.assertRaises(KeyError):
d['c']
self.assertIsNone(d.get('c'))
self.assertEqual(d.get('c', 3), 3)
self.assertDictEqual(d, {'a': 1, 'b': 2})
def test_projection(self):
d = edict({'one': 1, 'two': 2})
d_ = edict(d)
self.assertDictEqual(d.project('one'), {'one': 1})
self.assertDictEqual(d, d_)
class ConditionalTest(unittest.TestCase):
def test_ifnone(self):
self.assertEqual(ifnone(None, 'hello'), 'hello')
self.assertEqual(ifnone('hello', None), 'hello')
self.assertEqual(ifnone(None, 1, transform=str), 1)
self.assertEqual(ifnone(1, 1, transform=str), '1')
self.assertEqual(ifnone(0, 1, transform=str), '0')
def test_ifnot(self):
self.assertEqual(ifnot(None, 'hello'), 'hello')
self.assertEqual(ifnot('hello', None), 'hello')
self.assertEqual(ifnot('', None), None)
self.assertEqual(ifnot(None, 1, transform=str), 1)
self.assertEqual(ifnot(1, 1, transform=str), '1')
self.assertEqual(ifnot(0, 1, transform=str), 1)
def test_allnone(self):
self.assertTrue(allnone([None, None, None]))
self.assertFalse(allnone([0, 0, 0]))
self.assertFalse(allnone([None, None, 1]))
self.assertFalse(allnone([None, None, 0]))
def test_allnot(self):
self.assertTrue(allnot([None, None, None]))
self.assertTrue(allnot([0, 0, 0]))
self.assertFalse(allnot([None, None, 1]))
self.assertTrue(allnot([None, None, 0]))
class GaussianTest(unittest.TestCase):
def test_multivariate(self):
mean = [5., 4.]
cov = [[1., -0.3], [-0.3, 1.]]
data = np.random.multivariate_normal(np.array(mean), np.array(cov), size=50000)
gauss = Gaussian()
for d in data:
gauss.update(d)
for e1, e2 in zip(gauss.mean, mean):
self.assertAlmostEqual(e1, e2, 1, 'means differ too much:\n%s\n!=\n%s' % (mean, gauss.mean))
for e1, e2 in zip(np.nditer(np.array(gauss.cov)), np.nditer(np.array(cov))):
self.assertAlmostEqual(round(float(e1), 1), e2, 1, 'covariances differ too much: %s != %s' % (cov, gauss.cov))
def test_univariate(self):
mu, sigma = 0.5, 0.1
data = np.random.normal(mu, sigma, 1000)
g = Gaussian(data=data)
self.assertAlmostEqual(mu, float(g.mean), 1)
self.assertAlmostEqual(sigma, np.sqrt(float(g.cov)), 1)
class StopWatchTest(unittest.TestCase):
def test_watch(self):
mean = .2
std = .05
times = np.random.normal(mean, std, 100)
for t in times:
with stopwatch('/test'):
sleep(t)
print_stopwatches()
w = get_stopwatch('/test')
self.assertAlmostEqual(w.avg, mean, 1, 'means differ too much:\n%s\n!=\n%s' % (w.avg, mean))
self.assertAlmostEqual(w.std, std, 1, 'stddevs differ too much:\n%s\n!=\n%s' % (w.std, std))
class IteratorTest(unittest.TestCase):
def test_first(self):
self.assertEqual(first([0, 1, 2]), 0)
self.assertEqual(first(None), None)
self.assertEqual(first([]), None)
def gen():
for i in range(3):
yield i
self.assertEqual(first(gen()), 0)
self.assertEqual(first(gen(), str, 'no elements'), '0')
self.assertEqual(first([], str, 'no elements'), 'no elements')
class ScaleTest(unittest.TestCase):
def test_ | linearscale(self):
scale = LinearScale([0, 100], [0, 1])
self.assertEqual(scale(50), .5)
with self.assertRaises(ValueError):
scale(-50)
scale(150)
| scale.strict = False
self.assertEqual(scale(-50), -.5)
self.assertEqual(scale(150), 1.5)
def exposure_proc(*_):
for _ in range(10):
waitabout(1)
# use the exposure as a file lock
with exposure('/vars/myexposure'):
n = inspect(expose('/vars/myexposure'))
expose('/vars/myexposure', n + 1)
assert n + 1 == inspect(expose('/vars/myexposure'))
class ExposureTest(unittest.TestCase):
def test_expose_inspect(self):
expose('/vars/myexposure', 'a', 'b', 'c')
self.assertEqual(inspect('/vars/myexposure'), ['a', 'b', 'c'])
expose('/vars/myexposure2', 2)
self.assertEqual(inspect('/vars/myexposure2'), 2)
expose('/vars/myexposure', 0)
pool = Pool(4)
pool.map(exposure_proc, [[] for _ in range(5)])
pool.close()
pool.join()
if __name__ == '__main__':
print('Welcome to dnutils version %s.' % version)
logger = getlogger('results', logs.DEBUG)
logger.info('Initialized. Running all tests...')
wait()
logger.info('Testing log levels...')
logger.debug('this is the debug level')
logger.info('this is the info level')
logger.warning('this is the warning level')
logger.error('this is the error level')
logger.critical('this is the critical level')
logger.critical('wait a second...')
wait()
logger.debug('This debug message spreads over\nmultiple lines and should be\naligned with appropriate indentation.')
wait()
logger.level = logs.ERROR
logger.info('If you see this message, something went wrong with the log levels.')
logger.level = logs.DEBUG
logger.info('Testing the debug functions.')
wait()
out('the %s function always prints the code location where it is called so it can be found again later swiftly.' %
bfctnames['out'])
wait()
out('it', 'also', 'accepts', 'multiple', 'arguments', 'which', 'are', 'being', 'concatenated')
stop('the %s function is equivalent to %s except for it stops until you hit return:' % (bfctnames['stop'],
bfctnames['out']))
trace('the %s function gives you a stack trace of the current position' % bfctnames['trace'])
logger.info('testing the', bf('ProgressBar'), 'and', bf('StatusMsg'), '...')
bar = ProgressBar(steps=10, color='deep_sky_blue_4c')
for i in range(11):
bar.update(value=i/10., label='step %d' % (i+1))
time.sleep(.5)
bar.finish()
logger.info('testing the', bf(StatusMsg), '(you should see 5 "OK" and 5 "ERROR" messages)')
wait()
for i in range(20):
bar = StatusMsg('this is a Linux-style status bar |
RakshakTalwar/hatchpitchpull | hatchpitchpull/hatchpitchpull.py | Python | apache-2.0 | 12,244 | 0.015109 | """
Copyright 2016 (c) Rakshak Talwar
Released under the Apache 2 License
"""
import json, pdb, re, time
import HTMLParser
import sqlite3 as sql
import requests
import gspread
from oauth2client.client import SignedJwtAssertionCredentials
### global variables ###
hparse = HTMLParser.HTMLParser()
### gather authentication information ###
auth_file_location = 'auth_info.txt'
auth_ls = []
with open(auth_file_location, 'r') as auth_file:
auth_ls = [item.strip() for item in auth_file.readlines()]
auth_file.close()
# define global variables for authentication
F6S_KEY = auth_ls[0]
class F6S():
"""Defines object to pull data from F6S API"""
def __init__(self):
self.api_key = F6S_KEY
self.request_url = 'https://api.f6s.com/hatchpitchsxsw2016/applications'
# define the field names for the json responses and sql table, field names correspond 1 to 1
self.sql_fields = [
"StartedOn",
"SubmittedOn",
"CompanyTeam",
"City",
"Country",
"IndustrySector",
"ContactFirstName",
"ContactLastName",
"ContactEmail",
"ContactPhone",
"Employees",
"FoundersandExecs",
"InvestorsEquity",
"ProductLaunch",
"Grapevine",
"Accelerators",
"Pitching",
"Availability",
"Agreement",
"AppStatus"
]
self.json_fields = [
"date_created",
"date_finalized",
"name",
["location", "city"],
["location", "country"],
["questions", 0, "field_response", '*'],
["questions", 4, "question_response"],
["questions", 5, "question_response"],
["questions", 6, "question_response"],
["questions", 7, "question_response"],
["questions", 3, "question_response"],
["members", '*', "name"],
["questions", 2, "question_response"],
["questions", 1, "field_response"],
["questions", 8, "question_response"],
["questions", 9, "question_response"],
["questions", 10, "question_response"],
["questions", 11, "field_response", '*'],
["questions", 12, "field_response", '*'],
"status"
]
def grab_data(self):
"""Pulls all relevant data from F6S REST API. Returns a dict
with fields: data and fields (see save method under DBHandler class)"""
self.all_data = [] # list stores JSON objects of all companies' data
page = 1
while page:
# pull JSON object
payload = {'page' : page, 'api_key' : self.api_key}
r = requests.get(self.request_url, params=payload)
j = r.json() # create JSON object from response
# extend all_data with data in this json response if the data exists
if 'data' in j: # check to see if data is present in most recent request
self.all_data.extend(j['data'])
page += 1 # increment page variable to pick up new data on next run
time.sleep(0.02) # wait for a bit before submitting next request
else: # if no data exists, exit this loop
page = False
return {'data' : self._piece_extractor(self.all_data), 'fields' : self.sql_fields}
def _piece_extractor(self, j_objects):
"""Extracts the SQL tables corresponding piece of information from a
dict representing a single company. Returns a list of dicts
where the field names correspond with the needed field names for the SQL table"""
self.cleaned_objs = [] # a list of cleaned JSON objects to be returned
# go through each object fro | m the F6S API calls and create semi-copies with relevant and corresponding fields
for j_object in j_objects:
# create a temporary object, will be appended to the cleaned_objs list
temp_obj = {}
# fill up the temp_obj with the relevant information
for index, sql_field in enumerate(self.sql_fields):
# handle the different types of nested data sequences
if isinstance(self.json_fiel | ds[index], str): # if the field is directly present, no nesting
temp_obj[sql_field] = j_object[self.json_fields[index]]
elif isinstance(self.json_fields[index], list): # handles nested cases
nest_list = self.json_fields[index] # for brevity's sake
if len(nest_list) == 2:
temp_obj[sql_field] = j_object[nest_list[0]][nest_list[1]]
elif len(nest_list) == 3:
# note there are two types of nest_list of length 3, we need to handle them seperately
if isinstance(nest_list[1], int): # the first type is where item at index 1 is an integer
temp_obj[sql_field] = hparse.unescape(j_object[nest_list[0]][nest_list[1]][nest_list[2]])
elif nest_list[1] == '*': # the second type is where item at index 1 is an asterisk (*)
# in this case we need to cycle through the list given after we pull it from j_object.
# then we join all of the values given in the field from nest_list[2]
str_to_return = ''
for item in j_object[nest_list[0]]:
str_to_return = str_to_return + ', ' + item[nest_list[2]]
temp_obj[sql_field] = str_to_return.encode('ascii', 'ignore')
elif len(nest_list) == 4:
str_to_return = ''
if isinstance(j_object[nest_list[0]][nest_list[1]][nest_list[2]], list):
for item in j_object[nest_list[0]][nest_list[1]][nest_list[2]]:
str_to_return = item + ', ' + str_to_return
elif isinstance(j_object[nest_list[0]][nest_list[1]][nest_list[2]], str):
str_to_return = j_object[nest_list[0]][nest_list[1]][nest_list[2]].encode('ascii', 'ignore')
temp_obj[sql_field] = str_to_return
# add the cleaned object
self.cleaned_objs.append(temp_obj)
return self.cleaned_objs
class GS():
"""Defines object to pull data from gspread API"""
def __init__(self):
# initialize the client which will communicate with the Google Spreadsheet
json_key = json.load(open('client_secret.json'))
scope = ['https://spreadsheets.google.com/feeds']
credentials = SignedJwtAssertionCredentials(json_key['client_email'], json_key['private_key'], scope)
self.client = gspread.authorize(credentials)
# define the field names for the sql table
self.sql_fields = [
"SubmittedOn",
"Company",
"ShortDescription",
"City",
"StateProvince",
"Country",
"IndustrySector",
"CompanyWebsite",
"CompanySocial",
"ContactFirstName",
"ContactLastName",
"ContactEmail",
"ContactPhone",
"Employees",
"FoundersandExecs",
"Revenue",
"CurrentInvestment",
"Investors",
"Funding",
"Launch",
"Grapevine",
"Accelerators",
"Pitching",
"Availability",
"Agreement"
]
def grab_data(self, spreadsheet_key='1TgYK4D209oPrmv18-XVodH40JjvU69Xhkfjau3DlQxg', worksheet_name='Sheet1'):
# open the respective worksheet within the Google Spreadsheet
self.spreadsheet = self.client.open_by_key(spreadsheet_key)
self.worksheet = self.spreadsheet.worksheet(worksheet_name)
# grab all data present within the worksheet, not including headers
all_data = self.worksheet.get_all_values()[1:]
return {'data' : all_data, 'fields' |
ovresko/erpnext | erpnext/controllers/buying_controller.py | Python | gpl-3.0 | 29,596 | 0.025476 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _, msgprint
from frappe.utils import flt,cint, cstr, getdate
from erpnext.accounts.party import get_party_details
from erpnext.stock.get_item_details import get_conversion_factor
from erpnext.buying.utils import validate_for_items, update_last_purchase_rate
from erpnext.stock.stock_ledger import get_valuation_rate
from erpnext.stock.doctype.stock_entry.stock_entry import get_used_alternative_items
from erpnext.stock.doctype.serial_no.serial_no import get_auto_serial_nos, auto_make_serial_nos, get_serial_nos
from frappe.contacts.doctype.address.address import get_address_display
from erpnext.accounts.doctype.budget.budget import validate_expense_against_budget
from erpnext.controllers.stock_controller import StockController
class BuyingController(StockController):
def __setup__(self):
if hasattr(self, "taxes"):
self.flags.print_taxes_with_zero_amount = cint(frappe.db.get_single_value("Print Settings",
"print_taxes_with_zero_amount"))
self.flags.show_inclusive_tax_in_print = self.is_inclusive_tax()
self.print_templates = {
"total": "templates/print_formats/includes/total.html",
"taxes": "templates/print_formats/includes/taxes.html"
}
def get_feed(self):
if self.get("supplier_name"):
return _("From {0} | {1} {2}").format(self.supplier_name, self.currency,
self.grand_total)
def validate(self):
super(BuyingController, self).validate()
if getattr(self, "supplier", None) and not self.supplier_name:
self.supplier_name = frappe.db.get_value("Supplier", self.supplier, "supplier_name")
self.validate_items()
self.set_qty_as_per_stock_uom()
self.validate_stock_or_nonstock_items()
self.validate_warehouse()
self.set_supplier_address()
if self.doctype=="Purchase Invoice":
self.validate_purchase_receipt_if_update_stock()
if self.doctype=="Purchase Receipt" or (self.doctype=="Purchase Invoice" and self.update_stock):
# self.validate_purchase_return()
self.validate_rejected_warehouse()
self.validate_accepted_rejected_qty()
validate_for_items(self)
#sub-contracting
self.validate_for_subcontracting()
self.create_raw_materials_supplied("supplied_items")
self.set_landed_cost_voucher_amount()
if self.doctype in ("Purchase Receipt", "Purchase Invoice"):
self.update_valuation_rate("items")
def set_missing_values(self, for_validate=False):
super(BuyingController, self).set_missing_values(for_validate)
self.set_supplier_from_item_default()
self.set_price_list_currency("Buying")
# set contact and address details for supplier, if they are not mentioned
if getattr(self, "supplier", None):
self.update_if_missing(get_party_details(self.supplier, party_type="Supplier", ignore_permissions=self.flags.ignore_permissions,
doctype=self.doctype, company=self.company, party_address=self.supplier_address, shipping_address=self.get('shipping_address')))
self.set_missing_item_details(for_validate)
def set_supplier_from_item_defa | ult(self):
if self.meta.get_field("supplier") and not self.supplier:
for d in self.get("items"):
supplier = frappe.db.get_value("Item Default",
{"parent": d.item_code, "company": self.company}, "default_supplier")
if supplier:
self.supplier = supplier
else:
item_group = frappe.db.get_value("Item", d.item_code, "item_group")
supplier = frappe.db.get_value("Item Default",
{"parent": item_group, "company": self.company}, "default_supplier")
if suppl | ier:
self.supplier = supplier
break
def validate_stock_or_nonstock_items(self):
if self.meta.get_field("taxes") and not self.get_stock_items() and not self.get_asset_items():
tax_for_valuation = [d for d in self.get("taxes")
if d.category in ["Valuation", "Valuation and Total"]]
if tax_for_valuation:
for d in tax_for_valuation:
d.category = 'Total'
msgprint(_('Tax Category has been changed to "Total" because all the Items are non-stock items'))
def get_asset_items(self):
if self.doctype not in ['Purchase Invoice', 'Purchase Receipt']:
return []
return [d.item_code for d in self.items if d.is_fixed_asset]
def set_landed_cost_voucher_amount(self):
for d in self.get("items"):
lc_voucher_data = frappe.db.sql("""select sum(applicable_charges), cost_center
from `tabLanded Cost Item`
where docstatus = 1 and purchase_receipt_item = %s""", d.name)
d.landed_cost_voucher_amount = lc_voucher_data[0][0] if lc_voucher_data else 0.0
if not d.cost_center and lc_voucher_data and lc_voucher_data[0][1]:
d.db_set('cost_center', lc_voucher_data[0][1])
def set_supplier_address(self):
address_dict = {
'supplier_address': 'address_display',
'shipping_address': 'shipping_address_display'
}
for address_field, address_display_field in address_dict.items():
if self.get(address_field):
self.set(address_display_field, get_address_display(self.get(address_field)))
def set_total_in_words(self):
from frappe.utils import money_in_words
if self.meta.get_field("base_in_words"):
if self.meta.get_field("base_rounded_total") and not self.is_rounded_total_disabled():
amount = self.base_rounded_total
else:
amount = self.base_grand_total
self.base_in_words = money_in_words(amount, self.company_currency)
if self.meta.get_field("in_words"):
if self.meta.get_field("rounded_total") and not self.is_rounded_total_disabled():
amount = self.rounded_total
else:
amount = self.grand_total
self.in_words = money_in_words(amount, self.currency)
# update valuation rate
def update_valuation_rate(self, parentfield):
"""
item_tax_amount is the total tax amount applied on that item
stored for valuation
TODO: rename item_tax_amount to valuation_tax_amount
"""
stock_items = self.get_stock_items() + self.get_asset_items()
stock_items_qty, stock_items_amount = 0, 0
last_stock_item_idx = 1
for d in self.get(parentfield):
if d.item_code and d.item_code in stock_items:
stock_items_qty += flt(d.qty)
stock_items_amount += flt(d.base_net_amount)
last_stock_item_idx = d.idx
total_valuation_amount = sum([flt(d.base_tax_amount_after_discount_amount) for d in self.get("taxes")
if d.category in ["Valuation", "Valuation and Total"]])
valuation_amount_adjustment = total_valuation_amount
for i, item in enumerate(self.get(parentfield)):
if item.item_code and item.qty and item.item_code in stock_items:
item_proportion = flt(item.base_net_amount) / stock_items_amount if stock_items_amount \
else flt(item.qty) / stock_items_qty
if i == (last_stock_item_idx - 1):
item.item_tax_amount = flt(valuation_amount_adjustment,
self.precision("item_tax_amount", item))
else:
item.item_tax_amount = flt(item_proportion * total_valuation_amount,
self.precision("item_tax_amount", item))
valuation_amount_adjustment -= item.item_tax_amount
self.round_floats_in(item)
if flt(item.conversion_factor)==0.0:
item.conversion_factor = get_conversion_factor(item.item_code, item.uom).get("conversion_factor") or 1.0
qty_in_stock_uom = flt(item.qty * item.conversion_factor)
rm_supp_cost = flt(item.rm_supp_cost) if self.doctype in ["Purchase Receipt", "Purchase Invoice"] else 0.0
landed_cost_voucher_amount = flt(item.landed_cost_voucher_amount) \
if self.doctype in ["Purchase Receipt", "Purchase Invoice"] else 0.0
item.valuation_rate = ((item.base_net_amount + item.item_tax_amount + rm_supp_cost
+ landed_cost_voucher_amount) / qty_in_stock_uom)
else:
item.valuation_rate = 0.0
def validate_for_subcontracting(self):
if not self.is_subcontracted and self.sub_contracted_items:
frappe.throw(_("Please enter 'Is Subcontracted' as Yes or No"))
if self.is_subcontracted == "Yes":
if self.doctype in ["Purchase Receipt", "Purchase Invoice"] and not self.supplier_warehouse:
frappe.throw(_("Supplier Warehouse mandatory for sub-contracted Purchase Receipt"))
|
stuti-rastogi/leetcodesolutions | 234_palindromeLinkedList.py | Python | apache-2.0 | 1,611 | 0.004966 | # Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def isPalindrome(self, head):
"""
:type head: ListNode
:rtype: bool
"""
# stack = []
# if (not head):
# return True
# curr = head
# while (curr):
| # stack.append(curr. | val)
# curr = curr.next
# curr = head
# while (curr):
# if (curr.val != stack.pop()):
# return False
# curr = curr.next
# return True
# O(1) space solution
if not head or not head.next:
return True
curr = head
count = 1
# length of list
while curr.next:
curr = curr.next
count = count + 1
# reversing first half of list
p = head
curr = head
half = count / 2
while half > 0:
tmp = p.next
if p != head:
p.next = curr
else:
p.next = None
curr = p
p = tmp
half -= 1
# pointer to beginning of second half
if count % 2 == 0:
secondHalf = p
else:
secondHalf = p.next
# curr was last element of first half
p = curr
while p:
if p.val != secondHalf.val:
return False
p = p.next
secondHalf = secondHalf.next
return True |
jrd/pylibsalt | tests/test_execute.py | Python | gpl-2.0 | 929 | 0.007535 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: set et ai sta sts=2 sw=2 ts=2 tw=0:
from __future__ import print_function, unicode_literals, absolute_import
import os
import sys
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import unittest
from libsalt import execute as ex
import subprocess
class TestExecute(unittest.TestCase):
def test_call_ok(self):
self.assertEqual(ex.execCall("ls >/dev/nul | l"), 0)
self.assertEqual(ex.execCall("ls -lh | grep -q '[.]'"), 0)
self.assertEqual(ex.execCall(['echo', '-n'], shell=False), 0)
def test_call_ko(self):
self.assertEqual(ex.execCall("xyz 2>/dev/null"), 127)
self.assertRaises(subprocess.CalledProcessError, ex.execCheck, "xyz")
def test_exec_check(self):
self.assertEqual(ex.execCheck("ls >/dev/null"), 0)
def test_exec_get_output(self):
self.a | ssertEqual(ex.execGetOutput("pwd")[0].strip(), os.getcwd())
|
ToonTownInfiniteRepo/ToontownInfinite | toontown/minigame/DistributedTagGame.py | Python | mit | 10,955 | 0.002191 | from pandac.PandaModules import *
from toontown.to | onbase.ToonBaseGlobal import *
from DistributedMinigame import *
from direct.interval.IntervalGlobal import *
from direct.fsm import ClassicFSM, State
from direct.fsm import State
from toontown.safezone import Walk
from toontown.toonbase import ToontownTimer
from direct.gui import OnscreenText
import MinigameAvatarScorePanel
from direct.distributed import DistributedSmoothNode
import random
from toontown.toonbase import ToontownGlobals
from toontown.toonbase | import TTLocalizer
from otp.otpbase import OTPGlobals
import TagGameGlobals
import Trajectory
class DistributedTagGame(DistributedMinigame):
DURATION = TagGameGlobals.DURATION
IT_SPEED_INCREASE = 1.3
IT_ROT_INCREASE = 1.3
def __init__(self, cr):
DistributedMinigame.__init__(self, cr)
self.gameFSM = ClassicFSM.ClassicFSM('DistributedTagGame', [State.State('off', self.enterOff, self.exitOff, ['play']), State.State('play', self.enterPlay, self.exitPlay, ['cleanup']), State.State('cleanup', self.enterCleanup, self.exitCleanup, ['off'])], 'off', 'off')
self.addChildGameFSM(self.gameFSM)
self.walkStateData = Walk.Walk('walkDone')
self.scorePanels = []
self.initialPositions = ((0, 10, 0, 180, 0, 0),
(10, 0, 0, 90, 0, 0),
(0, -10, 0, 0, 0, 0),
(-10, 0, 0, -90, 0, 0))
base.localAvatar.isIt = 0
self.modelCount = 4
def getTitle(self):
return TTLocalizer.TagGameTitle
def getInstructions(self):
return TTLocalizer.TagGameInstructions
def getMaxDuration(self):
return self.DURATION
def load(self):
self.notify.debug('load')
DistributedMinigame.load(self)
self.itText = OnscreenText.OnscreenText('itText', fg=(0.95, 0.95, 0.65, 1), scale=0.14, font=ToontownGlobals.getSignFont(), pos=(0.0, -0.8), wordwrap=15, mayChange=1)
self.itText.hide()
self.sky = loader.loadModel('phase_3.5/models/props/TT_sky')
self.ground = loader.loadModel('phase_4/models/minigames/tag_arena')
self.music = base.loadMusic('phase_4/audio/bgm/MG_toontag.ogg')
self.tagSfx = base.loadSfx('phase_4/audio/sfx/MG_Tag_C.ogg')
self.itPointer = loader.loadModel('phase_4/models/minigames/bboard-pointer')
self.tracks = []
self.IT = None
return
def unload(self):
self.notify.debug('unload')
DistributedMinigame.unload(self)
self.ignoreAll()
del self.tracks
del self.IT
self.sky.removeNode()
del self.sky
self.itPointer.removeNode()
del self.itPointer
self.ground.removeNode()
del self.ground
del self.music
del self.tagSfx
self.itText.cleanup()
del self.itText
self.removeChildGameFSM(self.gameFSM)
del self.gameFSM
def onstage(self):
self.notify.debug('onstage')
DistributedMinigame.onstage(self)
self.ground.reparentTo(render)
self.sky.reparentTo(render)
myPos = self.avIdList.index(self.localAvId)
base.localAvatar.setPosHpr(*self.initialPositions[myPos])
base.localAvatar.reparentTo(render)
base.localAvatar.loop('neutral')
camera.reparentTo(render)
camera.setPosHpr(0, -24, 16, 0, -30, 0)
base.camLens.setFar(450.0)
base.transitions.irisIn(0.4)
NametagGlobals.setMasterArrowsOn(1)
DistributedSmoothNode.activateSmoothing(1, 1)
self.IT = None
return
def offstage(self):
self.notify.debug('offstage')
DistributedSmoothNode.activateSmoothing(1, 0)
NametagGlobals.setMasterArrowsOn(0)
DistributedMinigame.offstage(self)
self.sky.reparentTo(hidden)
self.ground.reparentTo(hidden)
base.camLens.setFar(ToontownGlobals.DefaultCameraFar)
self.itText.hide()
def setGameReady(self):
if not self.hasLocalToon:
return
self.notify.debug('setGameReady')
if DistributedMinigame.setGameReady(self):
return
for avId in self.avIdList:
self.acceptTagEvent(avId)
myPos = self.avIdList.index(self.localAvId)
for i in xrange(self.numPlayers):
avId = self.avIdList[i]
avatar = self.getAvatar(avId)
if avatar:
avatar.startSmooth()
base.localAvatar.setPosHpr(*self.initialPositions[myPos])
base.localAvatar.d_clearSmoothing()
base.localAvatar.sendCurrentPosition()
base.localAvatar.b_setAnimState('neutral', 1)
base.localAvatar.b_setParent(ToontownGlobals.SPRender)
def setGameStart(self, timestamp):
if not self.hasLocalToon:
return
self.notify.debug('setGameStart')
DistributedMinigame.setGameStart(self, timestamp)
self.gameFSM.request('play')
def enterOff(self):
self.notify.debug('enterOff')
def exitOff(self):
pass
def enterPlay(self):
self.notify.debug('enterPlay')
for i in xrange(self.numPlayers):
avId = self.avIdList[i]
avName = self.getAvatarName(avId)
scorePanel = MinigameAvatarScorePanel.MinigameAvatarScorePanel(avId, avName)
scorePanel.setPos(-0.213, 0.0, 0.28 * i + 0.66)
scorePanel.reparentTo(base.a2dBottomRight)
self.scorePanels.append(scorePanel)
base.setCellsAvailable(base.rightCells, 0)
self.walkStateData.enter()
self.walkStateData.fsm.request('walking')
if base.localAvatar.isIt:
base.mouseInterfaceNode.setForwardSpeed(ToontownGlobals.ToonForwardSpeed * self.IT_SPEED_INCREASE)
base.mouseInterfaceNode.setRotateSpeed(ToontownGlobals.ToonRotateSpeed * self.IT_ROT_INCREASE)
self.timer = ToontownTimer.ToontownTimer()
self.timer.posInTopRightCorner()
self.timer.setTime(self.DURATION)
self.timer.countdown(self.DURATION, self.timerExpired)
base.playMusic(self.music, looping=1, volume=0.9)
base.localAvatar.setIdealCameraPos(Point3(0, -24, 8))
def exitPlay(self):
for task in self.tracks:
task.finish()
self.tracks = []
for avId in self.avIdList:
toon = self.getAvatar(avId)
if toon:
toon.getGeomNode().clearMat()
toon.scale = 1.0
toon.rescaleToon()
self.walkStateData.exit()
self.music.stop()
self.timer.destroy()
del self.timer
for panel in self.scorePanels:
panel.cleanup()
self.scorePanels = []
base.setCellsAvailable(base.rightCells, 1)
base.mouseInterfaceNode.setForwardSpeed(ToontownGlobals.ToonForwardSpeed)
base.mouseInterfaceNode.setRotateSpeed(ToontownGlobals.ToonRotateSpeed)
self.itPointer.reparentTo(hidden)
base.localAvatar.cameraIndex = 0
base.localAvatar.setCameraPositionByIndex(0)
def timerExpired(self):
self.notify.debug('local timer expired')
self.gameOver()
def enterCleanup(self):
self.notify.debug('enterCleanup')
self.gameFSM.request('off')
def exitCleanup(self):
pass
def setIt(self, avId):
if not self.hasLocalToon:
return
if self.gameFSM.getCurrentState().getName() != 'play':
self.notify.debug('Ignoring setIt after done playing')
return
self.itText.show()
self.notify.debug(str(avId) + ' is now it')
if avId == self.localAvId:
self.itText.setText(TTLocalizer.TagGameYouAreIt)
base.localAvatar.isIt = 1
base.localAvatar.controlManager.setSpeeds(OTPGlobals.ToonForwardSpeed * self.IT_SPEED_INCREASE, OTPGlobals.ToonJumpForce, OTPGlobals.ToonReverseSpeed * self.IT_SPEED_INCREASE, OTPGlobals.ToonRotateSpeed * self.IT_ROT_INCREASE)
else:
self.itText.setText(TTLocalizer.TagGameSomeoneElseIsIt % self.getAvatarName(avId))
base.localAvatar.isIt = 0
base.localAvatar.setWalkSpeedNorm |
tjcsl/wedge | achievements.py | Python | mit | 1,867 | 0.006427 | from db import conn
import json
#Format "Name":"Desc"
achievement_metadata = {}
def first_edit(wpusername):
first_edit.name = "Your First Edit"
achievement_metadata[first_edit.name] = "Awarded for your first edit after signing up for wedge"
query = "SELECT score FROM edits WHERE username=%s"
cur = conn.cursor()
cur.execute(query, [wpusername])
meetsreqs = (len(cur.fetchall()) == 1)
cur.close()
return meetsreqs
def gen_point_achievement(score, name, desc):
achievement_metadata[name] = desc
def ach(wpusername):
ach.name = name
cur = conn.cursor()
cur.execute("SELECT sum(score) FROM edits WHERE username=%s", (wpusername,))
row = cur.fetchone()
cur.execute("SELECT 1 FROM achievements WHERE \
uid=(SELECT uid FROM users WHERE wp_username=%s) AND name=%s", (wpuse | rname, ach.name))
return row[0] > score and cur.fetchone() is None
return ach
ACH_FUNCS = [
first_edit,
gen_point_achievement(10, "Ten Points", "Awarded for getting 10 points"),
gen_point_achievement(100, "One Hundred Points", "Awarded for acculating 100 points"),
gen_point_achievement(500, "Five Hundred Points", "Acquired when you have reached the lofty heights of 500 points"),
gen_point_achievement(1000, "One Thousand Points", "Awarded for stashing awa | y 1,000 points"),
gen_point_achievement(9001, "Over 9000", "Get OVER 9000 POINTS")]
def check_all_achievements(wpusername):
for i in ACH_FUNCS:
check(i, wpusername)
def check(f, wpusername):
result = f(wpusername)
if result:
name = f.name
query = "INSERT INTO achievements (uid, name) VALUES ((SELECT uid FROM users WHERE wp_username=%s), %s)"
cur = conn.cursor()
cur.execute(query, (wpusername, name))
conn.commit()
|
pandas-dev/pandas | pandas/tests/frame/methods/test_between_time.py | Python | bsd-3-clause | 10,811 | 0.000647 | from datetime import (
datetime,
time,
)
import numpy as np
import pytest
from pandas._libs.tslibs import timezones
import pandas.util._test_decorators as td
from pandas import (
DataFrame,
Series,
date_range,
)
import pandas._testing as tm
class TestBetweenTime:
@td.skip_if_has_locale
def test_between_time_formats(self, frame_or_series):
# GH#11818
rng = date_range("1/1/2000", "1/5/2000", freq="5min")
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
ts = tm.get_obj(ts, frame_or_series)
strings = [
("2:00", "2:30"),
("0200", "0230"),
("2:00am", "2:30am"),
("0200am", "0230am"),
("2:00:00", "2:30:00"),
("020000", "023000"),
("2:00:00am", "2:30:00am"),
("020000am", "023000am"),
]
expected_length = 28
for time_string in strings:
assert len(ts.between_time(*time_string)) == expected_length
@pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"])
def test_localized_between_time(self, tzstr, frame_or_series):
tz = timezones.maybe_get_tz(tzstr)
rng = date_range("4/16/2012", "5/1/2012", freq="H")
ts = Series(np.random.randn(len(rng)), index=rng)
if frame_or_series is DataFrame:
ts = ts.to_frame()
ts_local = ts.tz_localize(tzstr)
t1, t2 = time(10, 0), time(11, 0)
result = ts_local.between_time(t1, t2)
expected = ts.between_time(t1, t2).tz_localize(tzstr)
tm.assert_equal(result, expected)
assert timezones.tz_compare(result.index.tz, tz)
def test_between_time_types(self, frame_or_series):
# GH11818
rng = date_range("1/1/2000", "1/5/2000", freq="5min")
obj = DataFrame({"A": 0}, index=rng)
obj = tm.get_obj(obj, frame_or_series)
msg = r"Cannot convert arg \[datetime\.datetime\(2010, 1, 2, 1, 0\)\] to a time"
with pytest.raises(ValueError, match=msg):
obj.between_time(datetime(2010, 1, 2, 1), datetime(2010, 1, 2, 5))
def test_between_time(self, inclusive_endpoints_fixture, frame_or_series):
rng = date_range("1/1/2000", "1/5/2000", freq="5min")
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
ts = tm.get_obj(ts, frame_or_series)
stime = time(0, 0)
etime = time(1, 0)
inclusive = inclusive_endpoints_fixture
filtered = ts.between_time(stime, etime, inclusive=inclusive)
exp_len = 13 * 4 + 1
if inclusive in ["right", "neither"]:
exp_len -= 5
if inclusive in ["left", "neither"]:
exp_len -= 4
assert len(filtered) == exp_len
for rs in filtered.index:
t = rs.time()
if inclusive in ["left", "both"]:
assert t >= stime
else:
assert t > stime
if inclusive in ["right", "both"]:
assert t <= etime
else:
assert t < etime
result = ts.between_time("00:00", "01:00")
expected = ts.between_time(stime, etime)
tm.assert_equal(result, expected)
# across midnight
rng = date_range("1/1/2000", "1/5/2000", freq="5min")
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
ts = tm.get_obj(ts, frame_or_series)
stime = time(22, 0)
etime = time(9, 0)
filtered = ts.between_time(stime, etime, inclusive=inclusive)
exp_len = (12 * 11 + 1) * 4 + 1
if inclusive in ["right", "neither"]:
exp_len -= 4
if inclusive in ["left", "neither"]:
exp_len -= 4
assert len(filtered) == exp_len
for rs in filtered.index:
t = rs.time()
if inclusive in ["left", "both"]:
assert (t >= stime) or (t <= etime)
else:
assert (t > stime) or (t <= etime)
if inclusive in ["right", "both"]:
assert (t <= etime) or (t >= stime)
else:
assert (t < etime) or (t >= stime)
def test_between_time_raises(self, frame_or_series):
# GH#20725
obj = DataFrame([[1, 2, 3], [4, 5, 6]])
obj = tm.get_obj(obj, frame_or_series)
msg = "Index must be DatetimeIndex"
with pytest.raises(TypeError, match=msg): # index is not a DatetimeIndex
obj.between_time(start_time="00:00", end_time="12:00")
def test_between_time_axis(self, frame_or_series):
# GH#8839
rng = date_range("1/1/2000", periods=100, freq="10min")
ts = Series(np.random.randn(len(rng)), index=rng)
if frame_or_series is DataFrame:
ts = ts.to_frame()
stime, etime = ("08:00:00", "09:00:00")
expected_length = 7
assert len(ts.between_time(stime, etime)) == expected_length
assert len(ts.between_time(stime, etime, axis=0)) == expected_length
msg = f"No axis named {ts.ndim} for object type {type(ts).__name__}"
with pytest.raises(ValueError, match=msg):
ts.between_time(stime, etime, axis=ts.ndim)
def test_between_time_axis_aliases(self, axis):
# GH#8839
rng = date_range("1/1/2000", periods=100, freq="10min")
ts = DataFrame(np.random.randn(len(rng), len(rng)))
stime, etime = ("08:00:00", "09:00:00")
exp_len = 7
if axis in ["index", 0]:
ts.index = rng
assert len(ts.between_time(stime, etime)) == exp_len
assert len(ts.between_time(stime, etime, axis=0)) == exp_len
if axis in ["columns", 1]:
ts.columns = rng
selected = ts.between_time(stime, etime, axis=1).columns
assert len(selected) == exp_len
def test_between_time_axis_raises(self, axis):
# issue 8839
rng = date_range("1/1/2000", periods=100, freq="10min")
mask = np.arange(0, len(rng))
rand_data = np.random.randn(len(rng), len(rng))
ts = DataFrame(rand_data, index=rng, columns=rn | g)
stime, etime = ("08:00:00", "09:00:00")
msg = "Index must be DatetimeIndex"
if axis in ["columns", 1]:
ts.index = mask
with pytest.raises(TypeError, match=msg):
t | s.between_time(stime, etime)
with pytest.raises(TypeError, match=msg):
ts.between_time(stime, etime, axis=0)
if axis in ["index", 0]:
ts.columns = mask
with pytest.raises(TypeError, match=msg):
ts.between_time(stime, etime, axis=1)
def test_between_time_datetimeindex(self):
index = date_range("2012-01-01", "2012-01-05", freq="30min")
df = DataFrame(np.random.randn(len(index), 5), index=index)
bkey = slice(time(13, 0, 0), time(14, 0, 0))
binds = [26, 27, 28, 74, 75, 76, 122, 123, 124, 170, 171, 172]
result = df.between_time(bkey.start, bkey.stop)
expected = df.loc[bkey]
expected2 = df.iloc[binds]
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result, expected2)
assert len(result) == 12
@pytest.mark.parametrize("include_start", [True, False])
@pytest.mark.parametrize("include_end", [True, False])
def test_between_time_warn(self, include_start, include_end, frame_or_series):
# GH40245
rng = date_range("1/1/2000", "1/5/2000", freq="5min")
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
ts = tm.get_obj(ts, frame_or_series)
stime = time(0, 0)
etime = time(1, 0)
match = (
"`include_start` and `include_end` "
"are deprecated in favour of `inclusive`."
)
with tm.assert_produces_warning(FutureWarning, match=match):
_ = ts.between_time(stime, etime, include_start, include_end)
def test_between_time_incorr_arg_inclusive(self):
# GH40245
rng = date_range("1/1/2000", "1/5/2000", freq="5min")
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
s |
alihanniba/pythonDemo | homePage/polls/models.py | Python | mit | 401 | 0.007481 | from __future__ import unicode_literals
from django.db import models
# Create your models here.
class Question(models.Mod | el):
question_text = models.CharField(max_length=200)
pub_date = models.DateTimeField('data published')
class Chioce(models.Model):
question = models.ForeignKey(Question)
choice_text = models.CharField( | max_length=200)
votes = models.IntegerField(default=0) |
killuazhu/vimrc | sources_non_forked/deoplete.nvim/rplugin/python3/deoplete/filter/converter_truncate_menu.py | Python | mit | 1,019 | 0 | # ============================================================================
# FILE: converter_truncate_menu.py
# AUTHOR: Shougo Matsushita <Shougo.Matsu at gmail.com>
# License: MIT license
# ============================================ | ================================
from .base import Base
from deoplete.util import truncate_skipping
class Filter(Base):
def __init__(self, vim):
super().__init__(vim)
self.name = 'conve | rter_truncate_menu'
self.description = 'truncate menu converter'
def filter(self, context):
max_width = context['max_menu_width']
if not context['candidates'] or 'menu' not in context[
'candidates'][0] or max_width <= 0:
return context['candidates']
footer_width = max_width / 3
for candidate in context['candidates']:
candidate['menu'] = truncate_skipping(
candidate.get('menu', ''),
max_width, '..', footer_width)
return context['candidates']
|
Baseyoyoyo/Higher-or-Lower | tests/score_board_test.py | Python | mit | 1,622 | 0.008015 | import unittest
import datetime
from classes.score_board import ScoreBoard
class DateTimeStub(object):
def now(self):
return "test_date_time"
class ScoreBoardTest(uni | ttest.TestCase):
def __init__(self, *args, **kwargs):
super(ScoreBoardTest, self).__init__(*args, **kwargs)
self.score_board = ScoreBoard("testdatabase.db")
def seedScores(self):
self.score_board.clear()
dummy_scores = [
("player1", 5, datetime.datetime.now() - datetime.timedelta(days=3)),
("player2", 6, datetime.datetime.now()),
("player3", 3, datetime.datetime.now() - datetime.timedelta(days=2))
| ]
self.score_board.cursor.executemany(
"INSERT INTO scores(name, score, recorded_at) VALUES(?,?,?)",
dummy_scores
)
self.score_board.db.commit()
def testRecorderAt(self):
self.assertEqual(type(self.score_board.recordedAt()), datetime.datetime)
def testCount(self):
self.seedScores()
self.assertEqual(self.score_board.count(), 3)
def testClear(self):
self.seedScores()
self.score_board.clear()
self.assertEqual(self.score_board.count(), 0)
def testAdd(self):
self.seedScores()
self.score_board.add("player4", 15)
self.assertEqual(self.score_board.count(), 4)
def testHighest(self):
self.seedScores()
scores = self.score_board.highest()
self.assertEqual(scores[0][0], "player2")
self.assertEqual(scores[2][0], "player3")
def testRecent(self):
self.seedScores()
scores = self.score_board.recent()
self.assertEqual(scores[0][0], "player2")
self.assertEqual(scores[2][0], "player1") |
EricssonResearch/calvin-base | calvin/runtime/south/storage/twistedimpl/securedht/dht_server_commons.py | Python | apache-2.0 | 21,119 | 0.00554 | # -*- coding: utf-8 -*-
# Copyright (c) 2015 Ericsson AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import traceback
import random
try:
import pydot
except:
pydot = None
import hashlib
from calvin.utilities import calvinlogger
from calvin.runtime.south.storage.twistedimpl.securedht import append_server
from calvin.runtime.south.storage.twistedimpl.securedht import dht_server
from calvin.runtime.south.storage.twistedimpl.securedht import service_discovery_ssdp
from calvin.utilities import certificate
from calvin.utilities import runtime_credentials
from kademlia.node import Node
from kademlia.utils import deferredDict, digest
from kademlia.crawling import NodeSpiderCrawl
from calvin.utilities import calvinconfig
_conf = calvinconfig.get()
_log = calvinlogger.get_logger(__name__)
def generate_challenge():
""" Generate a random challenge of 8 bytes, hex string formated"""
return os.urandom(8).encode("hex")
class evilAutoDHTServer(dht_server.AutoDHTServer):
def __init__(self, node_id, control_uri, runtime_credentials):
self._name = None
self._node_id = node_id
self._control_uri = control_uri
self._runtime_credentials = runtime_credentials
super(evilAutoDHTServer, self).__init__(node_id, control_uri, runtime_credentials)
def start(self, iface='', network=None, bootstrap=None, cb=None, type=None, name=None, nodeid=None):
self._name = name
if bootstrap is None:
bootstrap = []
certpath, cert, certstr = self._runtime_credentials.get_own_cert()
key = cert.digest("sha256")
newkey = key.replace(":", "")
bytekey = newkey.decode("hex")
if network is None:
network = _conf.get_in_order("dht_network_filter", "ALL")
certpath, cert, certstr = self._runtime_credentials.get_own_cert()
if not certstr:
raise("No runtime certificate available, please restart runtime")
self.dht_server = dht_server.ServerApp(evilAppendServer, bytekey[-20:], node_name=name, runtime_credentials=self._runtime_credentials)
ip, port = self.dht_server.start(iface=iface)
dlist = []
dlist.append(self.dht_server.bootstrap(bootstrap))
self._ssdps = service_discovery_ssdp.SSDPServiceDiscovery(self._node_id, self._control_uri, iface)
dlist += self._ssdps.start()
_log.debug("Register service %s %s:%s" % (network, ip, port))
self._ssdps.register_service(network, ip, port)
_log.debug("Set client filter %s" % (network))
self._ssdps.set_client_filter(network)
def bootstrap_proxy(addrs):
def started(args):
_log.debug("DHT Started %s" % (args))
if not self._started and cb:
service_discovery_ssdp.reactor.callLater(.2,
cb,
True)
self._started = True
def failed(args):
_log.debug("DHT failed | to bootstrap %s" % (args))
#reactor.callLater(.5, bootstrap_proxy, addrs)
_log.debug("Trying to bootstrap with %s" % (repr(addrs)))
d = self.dht_server.bootstrap(addrs)
d.addCallback(started)
d.addErrback(failed)
def start_msearch(args):
_lo | g.debug("** msearch %s args: %s" % (self, repr(args)))
def _later_start():
self._ssdps.start_search(service_discovery_ssdp.SERVICE_UUID, callback=bootstrap_proxy, stop=False)
self._ssdps.update_server_params(service_discovery_ssdp.SERVICE_UUID, cert=certstr)
service_discovery_ssdp.reactor.callLater(0, _later_start)
# Wait until servers all listen
dl = service_discovery_ssdp.defer.DeferredList(dlist)
dl.addBoth(start_msearch)
self.dht_server.kserver.protocol.evilType = type
self.dht_server.kserver.protocol.sourceNode.port = port
self.dht_server.kserver.protocol.sourceNode.ip = "0.0.0.0"
self.dht_server.kserver.name = name
self.dht_server.kserver.protocol.name = name
self.dht_server.kserver.protocol.storeOwnCert(certstr)
class evilKademliaProtocolAppend(append_server.KademliaProtocolAppend):
def _timeout(self, msgID):
self._outstanding[msgID][0].callback((False, None))
del self._outstanding[msgID]
def callPing(self, nodeToAsk, id=None):
address = (nodeToAsk.ip, nodeToAsk.port)
challenge = generate_challenge()
try:
signature = self.runtime_credentials.sign_data(challenge)
except:
"Signing ping failed"
if id:
decider = random.random()
if decider < 0.5:
self.ping(address, id, challenge, signature, self.getOwnCert())
else:
self.ping(address, id, challenge, signature)
else:
d = self.ping(address, self.sourceNode.id, challenge, signature, self.getOwnCert())
return True
def turn_evil(self, evilPort):
old_ping = self.rpc_ping
old_find_node = self.rpc_find_node
old_find_value = self.rpc_find_value
self.router.node.port = evilPort;
if self.evilType == "poison":
self.rpc_find_node = self.poison_rpc_find_node
self.rpc_find_value = self.poison_rpc_find_value
self.false_neighbour_list = []
for i in range(0, 30):
fakeid = hashlib.sha1(str(random.getrandbits(255))).digest()
fake_neighbour = [fakeid,
'10.0.0.9',
self.router.node.port]
self.false_neighbour_list.append(fake_neighbour)
_log.debug("Node with port {} prepared to execute "
"poisoning attack".format(self.router.node.port))
elif self.evilType == "insert":
self.rpc_find_node = self.sybil_rpc_find_node
self.rpc_find_value = self.poison_rpc_find_value
ends = bytearray([0x01, 0x02, 0x03])
self.false_neighbour_list = []
for i in range(0, 9):
if i < 3:
key = digest("APA")
elif i > 5:
key = digest("KANIN")
else:
key = digest("KOALA")
key = key[:-1] + bytes(ends[i % 3])
self.false_neighbour_list.append((key,
'10.0.0.9',
self.router.node.port))
_log.debug("Node with port {} prepared to execute node "
"insertion attack".format(self.router.node.port))
elif self.evilType == "eclipse":
self.rpc_find_node = self.eclipse_rpc_find_node
self.rpc_find_value = self.eclipse_rpc_find_value
self.closest_neighbour = map(list,
self.router.findNeighbors((self.router.node)))
self.false_neighbour_list = []
for i in range(0, 10):
fakeid = hashlib.sha1(str(random.getrandbits(255))).digest()
self.false_neighbour_list.append((fakeid,
'10.0.0.9',
self.router.node.port))
_log.debug("Node with port {} prepared to execute eclipse "
"attack on {}".format(self.router.node.port,
s |
FePhyFoFum/PyPHLAWD | src/choose_one_species_cluster.py | Python | gpl-2.0 | 1,407 | 0.002132 | import sys
import seq
import os
from logger import Logger
"""
right now this just chooses the longest
BEWARE, this writes over the file
"""
if __name__ == "__main__":
if len(sys.argv) != 4 and len(sys.argv) != 5:
print("python "+sys.argv[0]+" table clusterdir fending [logfile]")
sys.exit(0)
fend = sys.argv[3]
LOGFILE = "pyphlawd.log"
if len(sys.argv) == 5:
LOGFILE = sys.argv[4]
log = Logger(LOGFILE)
log.a()
tab = open(sys.argv[1],"r")
idn = {}
for i in tab:
spls = i.strip().split("\t")
idn[spls[3]] = spls[4]
ta | b.close()
dirr = sys.argv[2]
for o in os.listdir(dirr):
if fend != None:
if fend not in o:
continue
seqs = {}
for i in seq.read_fasta_file_iter(dirr+"/"+o):
if idn[i.name] not in seqs:
seqs[idn[i.name]] = []
seqs[idn[i.name]].append(i)
for i in seqs:
if len(seqs[i]) > 1:
longest = None
longestn = 0
for j in seqs[i]:
| if len(j.seq) > longestn:
longest = j
longestn = len(j.seq)
seqs[i] = [longest]
fn = open(dirr+"/"+o,"w")
for i in seqs:
for j in seqs[i]:
fn.write(j.get_fasta())
fn.close()
log.c()
|
fduraffourg/servo | ports/geckolib/string_cache/regen_atom_macro.py | Python | mpl-2.0 | 1,335 | 0.004494 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import re
import sys
if len(sys.argv) != 2:
print "usage: ./%s PATH/TO/OBJDIR" % sys.argv[0]
objdir_path = sys.argv[1]
def line_to_atom(line):
| result = re.match('^GK_ATOM\((.+),\s*"(.*)"\)', line)
return (result.group(1), result.group(2))
def symbolify(ident):
return "_ZN9nsGkAtoms" + str(len(ident | )) + ident + "E"
with open(objdir_path + "/dist/include/nsGkAtomList.h") as f:
lines = [line for line in f.readlines() if line.startswith("GK_ATOM")]
atoms = [line_to_atom(line) for line in lines]
with open("atom_macro.rs", "w") as f:
f.write("use gecko_bindings::structs::nsIAtom;\n\n")
f.write("use Atom;\n\n")
f.write("pub fn unsafe_atom_from_static(ptr: *mut nsIAtom) -> Atom { unsafe { Atom::from_static(ptr) } }\n\n")
for atom in atoms:
f.write('extern { pub static %s: *mut nsIAtom; }\n' % symbolify(atom[0]))
f.write("#[macro_export]\n")
f.write("macro_rules! atom {\n")
f.writelines(['("%s") => { $crate::atom_macro::unsafe_atom_from_static($crate::atom_macro::%s) };\n'
% (atom[1], symbolify(atom[0])) for atom in atoms])
f.write("}\n")
|
dstaple/z3test | old-regressions/python/z3py.9.py | Python | mit | 170 | 0 |
# Copyright | (c) 2015 Microsoft Corporation
from z3 import *
set_option(auto_config=True)
p = Bool('p')
x = Real('x')
solve(Or(x < 5 | , x > 10), Or(p, x**2 == 2), Not(p))
|
xfxf/veyepar | dj/scripts/tsdv.py | Python | mit | 4,150 | 0.005783 | #!/usr/bin/python
"""
tsdv.py - timestamp dv
sets start/end times of dv files
Gets start from one of:
the file name (assumes yy_mm_dd/hh_mm_ss.dv format)
the file system time stamp,
the first frame of the dv
duration (in seconds) based on file size / BBF*FPS
last frame
"""
import os
import datetime
from fnmatch import fnmatch
from process import process
from main.models import Client, Show, Location, Episode, Raw_File, Cut_List
import tsraw
class ts_rf(process):
def one_rf(self, rf):
pathname=os.path.join(self.show_dir, 'dv',
rf.location.slug, rf.filename)
if self.options.verbose: print(pathname)
# print( "tsraw.get_start", (pathname, self.options.time_source ) )
start = tsraw.get_start(pathname, self.options.time_source )
"""
if offset is not None:
start += datetime.timedelta(seconds=offset)
"""
"""
# when did this start working?
if os.path.splitext(rf.filename)[1] in [ '.ts' ]:
seconds = 1800
else:
seconds = tsraw.get_duration(pathname)
"""
if not os.path.exists(pathname):
print('missing file: {}'.format(pathname))
if self.options.delete_unknown:
rf.delete()
return
seconds = tsraw.get_duration(pathname)
print(( pathname, start, seconds ))
rf.start = start
hms = seconds//3600, (seconds%3600)//60, seconds%60
duration = "%02d:%02d:%02d" % hms
rf.duration = duration
rf.save()
def one_loc(self, show, location):
print(show,location)
for rf in Raw_File.objects.filter(show=show, location=location):
if self.options.verbose:
print(1, rf.filename)
print(1, self.args)
if self.args:
print(2, rf.filename)
print(2, self.args)
if not any(
fnmatch(rf.filename,mask) for mask in self.args):
# only process files listed on the command line
continue
if self.options.ext:
if not rf.filename.endswith(self.options.ext):
# skip
continue
if self.options.subs:
# subs holds a bit of the dirs we want,
# like graphics,video,Camera,GFX
if not self.options.subs in rf.filename:
continue
# offset = self.options.offset_seconds
if not rf.start or self.options.force:
# self.one_rf(rf, offset )
self.one_rf(rf )
def work(self):
"""
find and process show
"""
if sel | f.options.client:
client = Client.obj | ects.get(slug=self.options.client)
show = Show.objects.get(
client=client, slug=self.options.show)
else:
show = Show.objects.get(slug=self.options.show)
self.one_show(show)
return
def add_more_options(self, parser):
# parser.add_option('--offset_hours', type="int",
# help="adjust time to deal with clock in wrong time zone.")
# parser.add_option('--offset_seconds', type="float",
# help="adjust time to deal with clock wrong.")
parser.add_option('--time_source',
help="one of fn, fs, frame, gst, un, auto\n" \
"(file name, file system, dv frame, gst lib, UN files, auto)")
parser.add_option('--ext',
help="only hit this ext")
parser.add_option('--subs',
help="string to use for subs stuff that makes me cry.")
parser.add_option('--delete-unknown', action='store_true',
help="Delete any file records from the database, if we can't "
"find them on disk.")
def add_more_option_defaults(self, parser):
# parser.set_defaults(offset_hours=0)
# parser.set_defaults(offset_seconds=0)
parser.set_defaults(time_source="auto")
if __name__=='__main__':
p=ts_rf()
p.main()
|
vulogov/zq | setup.py | Python | gpl-3.0 | 3,597 | 0.0139 | from setuptools import setup#, find_packages, Extension
import distutils.command.build as _build
import setuptools.command.install as _install
import sys
import os
import os.path as op
import distutils.spawn as ds
import distutils.dir_util as dd
import posixpath
def run_cmake(arg=""):
"""
Forcing to run cmake
"""
if ds.find_executable('cmake') is None:
print "CMake is required to build zql"
print "Please install cmake version >= 2.8 and re-run setup"
sys.exit(-1)
print "Configuring zql build with CMake.... "
cmake_args = arg
try:
build_dir = op.join(op.split(__file__)[0], 'build')
dd.mkpath(build_dir)
os.chdir("build")
ds.spawn(['cmake', '..'] + cmake_args.split())
ds.spawn(['make', 'clean'])
ds.spawn(['make'])
os.chdir("..")
except ds.DistutilsExecError:
print "Error while running cmake"
print "run 'setup.py build --help' for build options"
print "You may also try editing the settings in CMakeLists.txt file and re-running setup"
sys.exit(-1)
class build(_build.build):
def run(self):
run_cmake()
# Now populate the extension module attribute.
#self.distribution.ext_modules = get_ext_modules()
_build.build.run(self)
class install(_install.install):
def run(self):
if not posixpath.exists("src/zq.so"):
run_cmake()
ds.spawn(['make', 'install'])
#self.distribution.ext_modules = get_ext_modules()
self.do_egg_install()
with open('README.txt') as file:
clips6_long_desc = file.read()
setup(
name = "zq",
version = '0.6',
description = 'ZQL - Zabbix Query Language',
install_requires = ["cython", "msgpack-python", "simplejson", "hy", "pyfiglet",
"gevent", "json" | , "termcolor", "humanfriendly", "ipaddr", "pyfscache",
"Cheetah", "dateparser", "pygithub",
],
requires = [],
include_package_data = True,
url = 'https://github.com/vulogov/zq/',
author='Vladimir Ulogov',
author_email = 'vladimir.ulogov@me.com',
maintainer_email = 'vladimir.ulogov@me.com',
license = "GNU GPL Versin 3",
long_description = clips6_long_desc,
keywords = "zql, monitoring | , zabbix",
platforms = ['GNU/Linux','Unix','Mac OS-X'],
classifiers = [
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: System :: Monitoring',
'Topic :: System :: Networking :: Monitoring',
'Topic :: System :: Systems Administration',
'Environment :: Console',
'Environment :: Console :: Curses'
],
# ext_modules is not present here. This will be generated through CMake via the
# build or install commands
cmdclass={'install':install,'build': build},
zip_safe=False,
packages = ['zq'],
package_data = {
'zq': ['zq.so', '*.pyx', '*.pyi']
}
)
|
LokiNetworks/empower-runtime | empower/apps/zephyr/zephyr.py | Python | apache-2.0 | 17,023 | 0.006814 | #!/usr/bin/env python3
#
# Copyright (c) 2015, Roberto Riggio
# Al | l rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, | are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the CREATE-NET nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY CREATE-NET ''AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL CREATE-NET BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Basic Zephyr manager."""
from empower.core.app import EmpowerApp
from empower.core.app import DEFAULT_PERIOD
from empower.main import RUNTIME
from empower.datatypes.etheraddress import EtherAddress
from empower.core.resourcepool import ResourcePool
from empower.lvapp.lvappconnection import LVAPPConnection
import time, datetime, threading
import empower.apps.zephyr.zephyrLinker as linker
starttime = datetime.datetime.now()
class Zephyr(EmpowerApp):
"""Basic mobility manager.
Command Line Parameters:
tenant_id: tenant id
limit: handover limit in dBm (optional, default -80)
every: loop period in ms (optional, default 5000ms)
Example:
./empower-runtime.py apps.mobilitymanager.mobilitymanager \
--tenant_id=52313ecb-9d00-4b7d-b873-b55d3d9ada26
"""
def __init__(self, **kwargs):
self.__limit = linker.DEFAULT_RSSI_LIMIT
EmpowerApp.__init__(self, **kwargs)
# Register an wtp up event
self.wtpup(callback=self.wtp_up_callback)
# Register an lvap join event
self.lvapjoin(callback=self.lvap_join_callback)
# Register an lvap leave event
self.lvapleave(callback=self.lvap_leave_callback)
def lvap_leave_callback(self, lvap):
"""Called when an LVAP disassociates from a tennant."""
self.log.info("LVAP %s left %s" % (lvap.addr, lvap.ssid))
def wtp_up_callback(self, wtp):
"""Called when a new WTP connects to the controller."""
for block in wtp.supports:
self.ucqm(block=block, every=self.every)
def lvap_join_callback(self, lvap):
"""Called when an joins the network."""
self.rssi(lvap=lvap.addr, value=self.limit, relation='LT',
callback=self.low_rssi)
def handover(self, lvap):
""" Handover the LVAP to a WTP with
an RSSI higher that -65dB. """
self.log.info("Running handover...")
self.log.info("LVAP: %s - Limit RSSI : %u dB" % (lvap.addr, self.limit))
self.log.info("Initialize the Resource Pool")
pool = ResourcePool()
for wtp in self.wtps():
#for wtpd, lvaps in wtpdict.items():
#self.log.info("WTP in wtps : %s WTP in dict : %s are equal : %u\n" % (str(wtp.addr), wtpd, (wtp.addr == wtpd)))
templist = linker.wtpdict[str(wtp.addr)]
length = len(templist)
self.log.info("Pooling WTP: %s" % str(wtp.addr))
self.log.info(wtp.supports)
pool = pool | wtp.supports
self.log.info("Select matching Resource Blocks")
matches = pool #& lvap.scheduled_on
self.log.info(matches)
self.log.info("LVAP1 LOOP 107")
counter=0
for lvap in self.lvaps():
self.log.info("!!!!!!!!!!!!!!!%d : %s" % (counter, lvap.addr))
counter=counter+1
for block in matches:
self.log.info("Time : %f \n LVAP : %s \n addr : %s \n last_rssi_avg : %.2f \n last_rssi_std : %.2f \n last_packets : %u \n mov_rrsi : %.2f\n" % (time.time(),
lvap.addr,
block.ucqm[lvap.addr]['addr'],
block.ucqm[lvap.addr]['last_rssi_avg'],
block.ucqm[lvap.addr]['last_rssi_std'],
block.ucqm[lvap.addr]['last_packets'],
block.ucqm[lvap.addr]['mov_rssi']))
if (lvap.addr=="78:44:76:BF:DA:D4"):
self.log.info("LVAP: %s is leaving" % lvap.addr)
#del lvap.downlink[block] #deletes lvap
# Initialize the Resource Pool
pool = ResourcePool()
# Update the Resource Pool with all
# the available Resourse Blocks
for wtp in self.wtps():
if (str(wtp.addr) in linker.wtpdict):
if (len(linker.wtpdict[str(wtp.addr)]) < linker.wtpdict_limit[str(wtp.addr)]):
pool = pool | wtp.supports
# Select matching Resource Blocks
matches = pool & lvap.scheduled_on
# Filter Resource Blocks by RSSI
valid = [block for block in matches
if block.ucqm[lvap.addr]['mov_rssi'] >= self.limit]
#valid = self.blocks(lvap, self.limit)
if not valid:
self.log.info("not valid")
return
for block in valid:
self.log.info("valid LVAP: %s - Current RSSI : %u dB" % (lvap.addr, float(block.ucqm[lvap.addr]['mov_rssi'])))
new_block = max(valid, key=lambda x: x.ucqm[lvap.addr]['mov_rssi'])
self.log.info("LVAP %s setting new block %s" % (lvap.addr, new_block))
lvap.scheduled_on = new_block
@property
def limit(self):
"""Return loop period."""
return self.__limit
@limit.setter
def limit(self, value):
"""Set limit."""
limit = int(value)
if limit > 0 or limit < -100:
raise ValueError("Invalid value for limit")
self.log.info("Setting limit %u dB" % value)
self.__limit = limit
def set_limit(self, value):
"""Set limit."""
limit = int(value)
if limit > 0 or limit < -100:
raise ValueError("Invalid value for limit")
self.log.info("Setting limit %u dB" % value)
self.__limit = limit
def low_rssi(self, trigger):
""" Perform handover if an LVAP's rssi is
going below the threshold. """
self.log.info("Received trigger from %s rssi %u dB",
trigger.event['block'],
trigger.event['current'])
lvap = self.lvap(trigger.lvap)
if not lvap:
return
self.handover(lvap)
def wtp_clientlimit(self):
self.log.info("Running Client Limit...")
wtp_c=0
for wtp in self.wtps():
#Create lvaplist for the specific wtp
lvaplist = []
for lvap in self.lvaps():
if lvap.wtp.addr == wtp.addr:
#self.log.info("LVAP before list : %s" % lvap.addr)
lvaplist.append(str(lvap.addr))
#self.log.info("LVAP after list : %s" % lvaplist[-1])
#Check if limit is not given and provide the default
#if str(wtp.addr) not in linker.wtpdict_limit:
#linker.wtpdict_limit[str(wtp.addr)]=linker.DEFAULT_LVAP_NUMBER_LIMIT
#Che |
nhannv/hasjob | hasjob/twitter.py | Python | agpl-3.0 | 2,286 | 0.002189 | # -*- coding: utf-8 -*-
from flask.ext.rq import job
from tweepy import OAuthHandler, API
import bitlyapi
import urllib2
import json
import re
from hasjob import app
@job('hasjob')
def tweet(title, url, location=None, parsed_location=None, username=None):
auth = OAuthHandler(app.config['TWITTER_CONSUMER_KEY'], app.config['TWITTER_CONSUMER_SECRET'])
auth.set_access_token(app.config['TWITTER_ACCESS_KEY'], app.config['TWITTER_ACCESS_SECRET'])
api = API(auth)
urllength = 23 # Current Twitter standard for HTTPS (as of Oct 2014)
maxlength = 140 - urllength - 1 # == 116
if username:
maxlength -= len(username) + 2
locationtag = u''
if par | sed_location:
locationtags = []
for token in parsed_location.get('tokens', []):
if 'geoname' in token and 'token' in token:
locname = token['token'].strip()
if locname:
locationtags.append(u'#' + locname.titl | e().replace(u' ', ''))
locationtag = u' '.join(locationtags)
if locationtag:
maxlength -= len(locationtag) + 1
if not locationtag and location:
# Make a hashtag from the first word in the location. This catches
# locations like 'Anywhere' which have no geonameid but are still valid
locationtag = u'#' + re.split('\W+', location)[0]
maxlength -= len(locationtag) + 1
if len(title) > maxlength:
text = title[:maxlength - 1] + u'…'
else:
text = title[:maxlength]
text = text + ' ' + url # Don't shorten URLs, now that there's t.co
if locationtag:
text = text + ' ' + locationtag
if username:
text = text + ' @' + username
api.update_status(text)
def shorten(url):
if app.config['BITLY_KEY']:
b = bitlyapi.BitLy(app.config['BITLY_USER'], app.config['BITLY_KEY'])
res = b.shorten(longUrl=url)
return res['url']
else:
req = urllib2.Request("https://www.googleapis.com/urlshortener/v1/url",
headers={"Content-Type": "application/json"},
data=json.dumps({'longUrl': url}))
request_result = urllib2.urlopen(req)
result = request_result.read()
result_json = json.loads(result)
return result_json['id']
|
SamuelLongchamps/grammalecte | compile_rules.py | Python | gpl-3.0 | 24,371 | 0.006309 |
import re
import sys
import traceback
import copy
import json
from distutils import file_util
from grammalecte.echo import echo
DEF = {}
FUNCTIONS = []
JSREGEXES = {}
WORDLIMITLEFT = r"(?<![\w.,–-])" # r"(?<![-.,—])\b" seems slower
WORDLIMITRIGHT = r"(?![\w–-])" # r"\b(?!-—)" seems slower
def prepare_for_eval (s):
s = re.sub(r"(select|exclude)[(][\\](\d+)", '\\1(dDA, m.start(\\2), m.group(\\2)', s)
s = re.sub(r"define[(][\\](\d+)", 'define(dDA, m.start(\\1)', s)
s = re.sub(r"(morph|morphex|displayInfo)[(][\\](\d+)", '\\1((m.start(\\2), m.group(\\2))', s)
s = re.sub(r"(morph|morphex|displayInfo)[(]", '\\1(dDA, ', s)
s = re.sub(r"(sugg\w+|switch\w+)\(@", '\\1(m.group(i[4])', s)
s = re.sub(r"word\(\s*1\b", 'nextword1(s, m.end()', s) # word(1)
s = re.sub(r"word\(\s*-1\b", 'prevword1(s, m.start()', s) # word(-1)
s = re.sub(r"word\(\s*(\d)", 'nextword(s, m.end(), \\1', s) # word(n)
s = re.sub(r"word\(\s*-(\d)", 'prevword(s, m.start(), \\1', s) # word(-n)
s = re.sub(r"before\(\s*", 'look(s[:m.start()], ', s) # before(s)
s = re.sub(r"after\(\s*", 'look(s[m.end():], ', s) # after(s)
s = re.sub(r"textarea\(\s*", 'look(s, ', s) # textarea(s)
s = re.sub(r"before_chk1\(\s*", 'look_chk1(dDA, s[:m.start()], 0, ', s) # before_chk1(s)
s = re.sub(r"after_chk1\(\s*", 'look_chk1(dDA, s[m.end():], m.end(), ', s) # after_chk1(s)
s = re.sub(r"textarea_chk1\(\s*", 'look_chk1(dDA, s, 0, ', s) # textarea_chk1(s)
s = re.sub(r"before0\(\s*", 'look(sx[:m.start()], ', s) # before0(s)
s = re.sub(r"after0\(\s*", 'look(sx[m.end():], ', s) # after0(s)
s = re.sub(r"textarea0\(\s*", 'look(sx, ', s) # textarea0(s)
s = re.sub(r"before0_chk1\(\s*", 'look_chk1(dDA, sx[:m.start()], 0, ', s) # before0_chk1(s)
s = re.sub(r"after0_chk1\(\s*", 'look_chk1(dDA, sx[m.end():], m.end(), ', s) # after0_chk1(s)
s = re.sub(r"textarea0_chk1\(\s*", 'look_chk1(dDA, sx, 0, ', s) # textarea0_chk1(s)
s = re.sub(r"isEndOfNG\(\s*\)", 'isEndOfNG(dDA, s[m.end():], m.end())', s) # isEndOfNG(s)
s = re.sub(r"\bspell *[(]", '_oDict.isValid(', s)
s = re.sub(r"[\\](\d+)", 'm.group(\\1)', s)
return s
def py2js (sCode):
"c | onvert Python code to JavaScript code"
# Python 2.x | unicode strings
sCode = re.sub('\\b[ur]"', '"', sCode)
sCode = re.sub("\\b[ur]'", "'", sCode)
# operators
sCode = sCode.replace(" and ", " && ")
sCode = sCode.replace(" or ", " || ")
sCode = re.sub("\\bnot\\b", "!", sCode)
sCode = re.sub("(.+) if (.+) else (.+)", "\\2 ? \\1 : \\3", sCode)
# boolean
sCode = sCode.replace("False", "false")
sCode = sCode.replace("True", "true")
sCode = sCode.replace("bool", "Boolean")
# methods
sCode = sCode.replace(".endswith", ".endsWith")
sCode = sCode.replace(".find", ".indexOf")
sCode = sCode.replace(".startswith", ".startsWith")
sCode = sCode.replace(".lower", ".toLowerCase")
sCode = sCode.replace(".upper", ".toUpperCase")
sCode = sCode.replace(".isdigit", "._isDigit")
sCode = sCode.replace(".isupper", "._isUpperCase")
sCode = sCode.replace(".islower", "._isLowerCase")
sCode = sCode.replace(".istitle", "._isTitle")
sCode = sCode.replace(".capitalize", "._toCapitalize")
sCode = sCode.replace(".strip", "._trim")
sCode = sCode.replace(".lstrip", "._trimLeft")
sCode = sCode.replace(".rstrip", "._trimRight")
sCode = sCode.replace('.replace("."', ".replace(/\./g")
sCode = sCode.replace('.replace("..."', ".replace(/\.\.\./g")
sCode = re.sub('.replace\("([^"]+)" ?,', ".replace(/\\1/g,", sCode)
# regex
sCode = re.sub('re.search\("([^"]+)", *(m.group\(\\d\))\)', "(\\2.search(/\\1/) >= 0)", sCode)
sCode = re.sub(".search\\(/\\(\\?i\\)([^/]+)/\\) >= 0\\)", ".search(/\\1/i) >= 0)", sCode)
sCode = re.sub('(look\\(sx?[][.a-z:()]*), "\\(\\?i\\)([^"]+)"', "\\1, /\\2/i", sCode)
sCode = re.sub('(look\\(sx?[][.a-z:()]*), "([^"]+)"', "\\1, /\\2/", sCode)
sCode = re.sub('(look_chk1\\(dDA, sx?[][.a-z:()]*, [0-9a-z.()]+), "\\(\\?i\\)([^"]+)"', "\\1, /\\2/i", sCode)
sCode = re.sub('(look_chk1\\(dDA, sx?[][.a-z:()]*, [0-9a-z.()]+), "([^"]+)"', "\\1, /\\2/i", sCode)
sCode = sCode.replace("(?<!-)", "") # todo
# slices
sCode = sCode.replace("[:m.start()]", ".slice(0,m.index)")
sCode = sCode.replace("[m.end():]", ".slice(m.end[0])")
sCode = re.sub("\\[(-?\\d+):(-?\\d+)\\]", ".slice(\\1,\\2)", sCode)
sCode = re.sub("\\[(-?\\d+):\\]", ".slice(\\1)", sCode)
sCode = re.sub("\\[:(-?\\d+)\\]", ".slice(0,\\1)", sCode)
# regex matches
sCode = sCode.replace(".end()", ".end[0]")
sCode = sCode.replace(".start()", ".index")
sCode = sCode.replace("m.group()", "m[0]")
sCode = re.sub("\\.start\\((\\d+)\\)", ".start[\\1]", sCode)
sCode = re.sub("m\\.group\\((\\d+)\\)", "m[\\1]", sCode)
# tuples -> lists
sCode = re.sub("\((m\.start\[\\d+\], m\[\\d+\])\)", "[\\1]", sCode)
# regex
sCode = sCode.replace("\w[\w-]+", "[a-zA-Zà-öÀ-Ö0-9ø-ÿØ-ßĀ-ʯ][a-zA-Zà-öÀ-Ö0-9ø-ÿØ-ßĀ-ʯ-]+")
sCode = sCode.replace(r"/\w/", "/[a-zA-Zà-öÀ-Ö0-9ø-ÿØ-ßĀ-ʯ]/")
sCode = sCode.replace(r"[\w-]", "[a-zA-Zà-öÀ-Ö0-9ø-ÿØ-ßĀ-ʯ-]")
sCode = sCode.replace(r"[\w,]", "[a-zA-Zà-öÀ-Ö0-9ø-ÿØ-ßĀ-ʯ,]")
return sCode
def uppercase (s, sLang):
"convert regex to uppercase regex: 'foo' becomes '[Ff][Oo][Oo]', but 'Bar' becomes 'B[Aa][Rr]'."
sUp = ""
state = 0
for i in range(0, len(s)):
c = s[i]
if c == "[":
state = 1
if state == 1 and c == "]":
state = 0
if c == "<" and i > 3 and s[i-3:i] == "(?P":
state = 2
if state == 2 and c == ">":
state = 0
if c == "?" and i > 0 and s[i-1:i] == "(" and s[i+1:i+2] != ":":
state = 5
if state == 5 and c == ")":
state = 0
if c.isalpha() and c.islower() and state == 0:
if c == "i" and (sLang == "tr" or sLang == "az"):
sUp += "[İ" + c + "]"
else:
sUp += "[" + c.upper() + c + "]"
elif c.isalpha() and c.islower() and state == 1 and s[i+1:i+2] != "-":
if s[i-1:i] == "-" and s[i-2:i-1].islower(): # [a-z] -> [a-zA-Z]
sUp += c + s[i-2:i-1].upper() + "-" + c.upper()
elif c == "i" and (sLang == "tr" or sLang == "az"):
sUp += "İ" + c
else:
sUp += c.upper() + c
else:
sUp += c
if c == "\\":
state = 4
elif state == 4:
state = 0
return sUp
def countGroupInRegex (sRegex):
try:
return re.compile(sRegex).groups
except:
traceback.print_exc()
echo(sRegex)
return 0
def createRule (s, nIdLine, sLang, bParagraph):
"returns rule as list [option name, regex, bCaseInsensitive, identifier, list of actions]"
global JSREGEXES
#### OPTIONS
sRuleId = str(nIdLine) + ("p" if bParagraph else "s")
sOption = False # False or [a-z0-9]+ name
tGroups = None # code for groups positioning (only useful for JavaScript)
cCaseMode = 'i' # i: case insensitive, s: case sensitive, u: uppercasing allowed
cWordLimitLeft = '[' # [: word limit, <: no specific limit
cWordLimitRight = ']' # ]: word limit, >: no specific limit
m = re.match("^__([[<]\\w[]>])(/[a-zA-Z0-9]+|)__ *", s)
if m:
if m.group(1):
cWordLimitLeft = m.group(1)[0]
cCaseMode = m.group(1)[1]
cWordLimitRight = m.group(1)[2]
sOption = m.group(2)[1:] if m.group(2) else False
s = s[m.end(0):]
#### REGEX TRIGGER
i = s.find(" <<-")
if i == -1:
print("# Error: no condition at line " + sRuleId)
return None
sRegex = |
ivankoster/SublimeYouCompleteMe | plugin/settings.py | Python | gpl-3.0 | 1,137 | 0.004398 | # Copyright (C) 2014 Ivan Koster
#
# This file is part of SublimeYouCompleteMe.
#
# SublimeYouCompleteMe is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SublimeYouCompleteMe is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SublimeYouCompleteMe. If not, see <http://www.gnu.org/licenses/>.
""" Wrap | per to handle settings for SublimeYouCompleteMe and the YCMD server
in the sublime text settings files
"""
import sublime
SETTINGS = sublime.load_settings('SublimeYouCompleteMe.su | blime-settings')
# TODO load ycmd defaults into sublime text settings file (not the user settings file!)
# TODO when starting the ycmd server, use the sublime text usersettings
|
jaesivsm/pyAggr3g470r | jarr/controllers/feed_builder.py | Python | agpl-3.0 | 9,247 | 0 | import html
import logging
import re
import urllib
from feedparser import FeedParserDict
from feedparser import parse as fp_parse
from requests.exceptions import ReadTimeout
from jarr.lib.const import FEED_MIMETYPES, GOOGLE_BOT_UA, REQUIRED_JSON_FEED
from jarr.lib.enums import FeedType
from jarr.lib.html_parsing import (extract_feed_links, extract_icon_url,
extract_opg_prop, extract_title)
from jarr.lib.utils import jarr_get
SCHEME = r'(?:https?:)?\/\/'
logger = logging.getLogger(__name__)
REDDIT_FEED = re.compile(SCHEME + r'(www.)?reddit.com/r/([\w\-_]+)/?(.*)$')
INSTAGRAM_RE = re.compile(SCHEME + r'(www.)?instagram.com/([^ \t\n\r\f\v/]+)')
TWITTER_RE = re.compile(SCHEME + r'(www.)?twitter.com/([^ \t\n\r\f\v/]+)')
TUMBLR_RE = re.compile(SCHEME + r'([^ \t\n\r\f\v/]+).tumblr.com/.*$')
YOUTUBE_CHANNEL_RE = re.compile(r'((http|https):\/\/)?(www\.)?youtube\.com\/'
r'channel\/([a-zA-Z0-9\-]+)')
SOUNDCLOUD_RE = re.compile(
r'^https?://(www.)?soundcloud.com/([^ \t\n\r\f\v/]+)')
KOREUS_RE = re.compile(r'^https?://feeds.feedburner.com/Koreus.*$')
REDDIT_FEED_PATTERN = "https://www.reddit.com/r/%s/.rss"
YOUTUBE_FEED_PATTERN = 'https://www.youtube.com/feeds/videos.xml?channel_id=%s'
class FeedBuilderController:
def __init__(self, url, parsed_feed=None):
self.url = self._fix_url(url)
self.page_response = None
self.feed_response = None
self.parsed_feed = parsed_feed
@property
def feed_response_content_type(self):
try:
return self.feed_response.headers['Content-Type']
except Exception:
return ''
@staticmethod
def _fix_url(url):
split = urllib.parse.urlsplit(url)
if not split.scheme and not split.netloc:
return 'http://' + url
if not split.scheme:
return 'http:' + url
return url
def any_url(self):
if self.url:
yield self.url
for page in self.feed_response, self.page_response:
if page and page.url:
yield page.url
@property
def _is_json_feed(self):
return ('application/feed+json' in self.feed_response_content_type
or 'application/json' in self.feed_response_content_type)
def is_parsed_feed(self):
if not self.feed_response and not self.parsed_feed:
return False
if not self.parsed_feed:
if self._is_json_feed:
self.parsed_feed = self.feed_response.json()
if len(REQUIRED_JSON_FEED.intersection(self.parsed_feed)) != 2:
return False
elif any(mimetype in self.feed_response_content_type
for mimetype in FEED_MIMETYPES):
self.parsed_feed = fp_parse(self.feed_response.content)
else:
return False
if not isinstance(self.parsed_feed, (FeedParserDict, dict)):
return False
return self.parsed_feed.get('entries') \
or self.parsed_feed.get('items') \
or not self.parsed_feed.get('bozo')
def construct_from_xml_feed_content(self):
if not self.is_parsed_feed():
return {}
fp_feed = self.parsed_feed.get('feed') or {}
result = {'link': self.feed_response.url,
'site_link': fp_feed.get('link'),
'title': fp_feed.get('title')}
if self.parsed_feed.get('href'):
result['link'] = self.parsed_feed.get('href')
if fp_feed.get('subtitle'):
result['description'] = fp_feed.get('subtitle')
# extracting extra links
rel_to_link = {'self': 'link', 'alternate': 'site_link'}
for link in fp_feed.get('links') or []:
if link['rel'] not in rel_to_link:
logger.info('unknown link type %r', link)
continue
if result.get(rel_to_link[link['rel']]):
logger.debug('%r already field', rel_to_link[link['rel']])
continue
result[rel_to_link[link['rel']]] = link['href']
# extracting description
if not result.get('description') \
and (fp_feed.get('subtitle_detail') or {}).get('value'):
result['description'] = fp_feed['subtitle_detail']['value']
return {key: value for key, value in result.items() if value}
def construct_from_json_feed_content(self):
if not self.is_parsed_feed():
return {}
result = {'feed_type': FeedType.json,
'site_link': self.parsed_feed.get('home_page_url'),
'link': self.feed_response.url,
'icon_url': (self.parsed_feed.get('favicon')
or self.parsed_feed.get('icon')),
'description': self.parsed_feed.get('description'),
'title': self.parsed_feed.get('title')}
try:
result['links'] = [hub.get('url')
for hub in self.parsed_feed.get('hubs', [])]
except Exception:
pass
return {key: value for key, value in result.items() if value}
def construct_from_feed_content(self):
if self._is_json_feed:
return self.construct_from_json_feed_content()
return self.construct_from_xml_feed_content()
def correct_rss_bridge_feed(self, regex, feed_type):
def extract_id(url):
try:
return regex.split(url, 1)[2]
except Exception:
return False
for url in self.any_url():
if extract_id(url):
return extract_id(url)
def parse_webpage(self):
result = {'site_link': self.page_response.url}
icon_url = extract_icon_url(self.page_response)
if icon_url:
result['icon_url'] = icon_url
links = list(extract_feed_links(self.page_response))
if links:
result['link'] = links[0]
if len(links) > 1:
result['links'] = links
result['title'] = extract_opg_prop(self.page_response,
og_prop='og:site_name')
if not result['title']:
result['title'] = extract_title(self.page_response)
return {key: value for key, value in result.items() if value}
| @staticmethod
def _handle_known_malfunctionning_link(feed):
# reddit's subs don't automatically provide rss feed
reddit_match = REDDIT_FEED.match(feed['link'])
if reddit_match and not reddit_match.group(3):
feed['link'] = REDDIT_FEED_PATTERN % reddit_ma | tch.group(2)
feed['feed_type'] = FeedType.reddit
return feed
youtube_match = YOUTUBE_CHANNEL_RE.match(feed['link'])
if youtube_match:
feed['site_link'] = feed['link']
feed['link'] = YOUTUBE_FEED_PATTERN % youtube_match.group(4)
return feed
@staticmethod
def http_get(url):
try:
return jarr_get(url)
except (ReadTimeout, TimeoutError):
return jarr_get(url, user_agent=GOOGLE_BOT_UA)
def construct(self):
feed = {'feed_type': FeedType.classic, 'link': self.url}
feed = self._handle_known_malfunctionning_link(feed)
self.feed_response = self.http_get(feed['link'])
# is an XML feed
if self.is_parsed_feed():
feed.update(self.construct_from_feed_content())
if feed.get('site_link'):
self.page_response = self.http_get(feed['site_link'])
feed = dict(self.parse_webpage(), **feed)
else: # is a regular webpage
del feed['link']
self.page_response = self.feed_response
self.feed_response = None
feed = dict(self.parse_webpage(), **feed)
if feed.get('link'):
self.feed_response = self.http_get(feed['link'])
feed.update(self.construct_from_feed_content())
# marking integration feed
for regex, feed_type in ((REDDIT_FEED, FeedType.reddit),
|
UCHIC/h2outility | src/GAMUTRawData/odmdata/variable.py | Python | bsd-3-clause | 1,443 | 0.025641 | from sqlalchemy import String, Column, Float, Integer, Boolean, ForeignKey
from sqlalchemy.orm import relationship
from base import Base
from unit import Unit
class Variable(Base):
__tablename__ = 'Variables'
id = Column('VariableID', Integer, primary_key=True)
code = Column('VariableCode', String, nullable=False)
name = Column('VariableName', String, nullable=False)
speciation = Column('Speciation', String, nullable=False)
variable_unit_id = Column('VariableUnitsID', Integer, ForeignKey('Units.UnitsID'), nullable=False)
sample_medium = Column('SampleMedium', String, nulla | ble=False)
value_type = Column('ValueType', String, nullable=False)
is_regular = Column('IsRegular', Boolean, nullable=False)
time_support = Column('TimeSupport', Float, nullable=False)
time_unit_id = Column('TimeUnitsID', Integer, ForeignKey('Units.UnitsID'), nullable=False)
data_type = Column('DataType', String, nullable=False)
general_category = Column('GeneralCategory', String, nullable=False)
no_data_value | = Column('NoDataValue', Float, nullable=False)
# relationships
variable_unit = relationship(Unit, primaryjoin=("Unit.id==Variable.variable_unit_id")) # <-- Uses class attribute names, not table column names
time_unit = relationship(Unit, primaryjoin=("Unit.id==Variable.time_unit_id"))
def __repr__(self):
return "<Variable('%s', '%s', '%s')>" % (self.id, self.code, self.name)
|
andrewpx1/apiai-weather-webhook-sample-master | lovepx.py | Python | apache-2.0 | 1,716 | 0.011682 | #!/usr/bin/env python
from __future__ import print_function
from future.standard_library import install_aliases
install_aliases()
from urllib.parse import urlparse, urlencode
from urllib.request import urlopen, Request
from urllib.error import HTTPError
from os.path import splitext
|
import json
import os
import requests
from flask import Flask
from flask import request
from flask import make_response
# Flask app should start in global layout
app = Flask(__name__)
def processRequest(req):
result = req.get("result")
parameters = result.get("parameters")
prt = parameters.get("any")
yurt = parameters.get("any1")
baseurl = "https://love-calculator.p.mashape.com/getPercentage?"
mydict1 = {'fname': prt}
mydict2 = {'s | name': yurt}
put1 = urlencode(mydict1)
put2 = urlencode(mydict2)
url = baseurl + put1 + '&' + put2
headers = {
"X-Mashape-Key": "axWE0J6Hj5mshIyeWhO19vjpSYyxp1k53ohjsnr3rxp4xsIj8I",
'Accept': 'application/json'
}
r = requests.get(url, headers=headers)
gh = r.content
poke = gh.decode()
toke = json.loads(poke)
perc = toke.get('percentage')
resl = toke.get("result")
joke = prt + ' ❤️ ' + yurt + ' = ' + perc + ' % \nSo, ' + resl
speech = joke
print("Response:")
print(speech)
return {
"speech": speech,
"displayText": speech,
# "data": data,
# "contextOut": [],
"source": "apiai-weather-webhook-sample"
}
if __name__ == '__main__':
port = int(os.getenv('PORT', 5000))
print("Starting app on port %d" % port)
app.run(debug=False, port=port, host='0.0.0.0')
|
oVirt/vdsm | lib/vdsm/storage/sdm/api/reduce_domain.py | Python | gpl-2.0 | 2,333 | 0 | #
# Copyright 2016 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the | hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Refer to the README and COPYING f | iles for full details of the license
#
from __future__ import absolute_import
from vdsm.common import properties
from vdsm.storage import exception as se
from vdsm.storage.constants import STORAGE
from vdsm.storage import resourceManager as rm
from vdsm.storage.sdc import sdCache
from . import base
class Job(base.Job):
"""
Reduces the device from the given domain devices.
"""
def __init__(self, job_id, host_id, reduce_params):
super(Job, self).__init__(job_id, 'reduce_domain', host_id)
self.params = StorageDomainReduceParams(reduce_params)
def _run(self):
sd_manifest = sdCache.produce_manifest(self.params.sd_id)
if not sd_manifest.supports_device_reduce():
raise se.UnsupportedOperation(
"Storage domain does not support reduce operation",
sdUUID=sd_manifest.sdUUID(),
sdType=sd_manifest.getStorageType())
# TODO: we assume at this point that the domain isn't active and can't
# be activated - we need to ensure that.
with rm.acquireResource(STORAGE, self.params.sd_id, rm.EXCLUSIVE):
with sd_manifest.domain_id(self.host_id), \
sd_manifest.domain_lock(self.host_id):
sd_manifest.reduceVG(self.params.guid)
class StorageDomainReduceParams(properties.Owner):
sd_id = properties.UUID(required=True)
guid = properties.String(required=True)
def __init__(self, params):
self.sd_id = params.get('sd_id')
self.guid = params.get('guid')
|
ktan2020/legacy-automation | win/Lib/site-packages/wx-3.0-msw/wx/lib/eventwatcher.py | Python | mit | 16,311 | 0.007602 | #----------------------------------------------------------------------------
# Name: wx.lib.eventwatcher
# Purpose: A widget that allows some or all events for a par | ticular widget
# to be captured and displayed.
#
# Author: Robin Dunn
#
# | Created: 21-Jan-2009
# RCS-ID: $Id: $
# Copyright: (c) 2009 by Total Control Software
# Licence: wxWindows license
#----------------------------------------------------------------------------
"""
A widget and supporting classes for watching the events sent to some other widget.
"""
import wx
from wx.lib.mixins.listctrl import CheckListCtrlMixin
#----------------------------------------------------------------------------
# Helpers for building the data structures used for tracking the
# various event binders that are available
_eventBinders = None
_eventIdMap = None
def _buildModuleEventMap(module):
count = 0
for name in dir(module):
if name.startswith('EVT_'):
item = getattr(module, name)
if isinstance(item, wx.PyEventBinder) and \
len(item.evtType) == 1 and \
item not in _eventBinders:
_eventBinders.append(item)
_eventIdMap[item.typeId] = name
count += 1
return count
def buildWxEventMap():
"""
Add the event binders from the main wx namespace. This is called
automatically from the EventWatcher.
"""
global _eventBinders
global _eventIdMap
if _eventBinders is None:
_eventBinders = list()
_eventIdMap = dict()
_buildModuleEventMap(wx)
def addModuleEvents(module):
"""
Adds all the items in module that start with ``EVT_`` to the event
data structures used by the EventWatcher.
"""
if _eventBinders is None:
buildWxEventMap()
return _buildModuleEventMap(module)
# Events that should not be watched by default
_noWatchList = [
wx.EVT_PAINT,
wx.EVT_NC_PAINT,
wx.EVT_ERASE_BACKGROUND,
wx.EVT_IDLE,
wx.EVT_UPDATE_UI,
wx.EVT_UPDATE_UI_RANGE,
]
OTHER_WIDTH = 250
def _makeSourceString(wdgt):
if wdgt is None:
return "None"
else:
name = ''
id = 0
if hasattr(wdgt, 'GetName'):
name = wdgt.GetName()
if hasattr(wdgt, 'GetId'):
id = wdgt.GetId()
return '%s "%s" (%d)' % (wdgt.__class__.__name__, name, id)
def _makeAttribString(evt):
"Find all the getters"
attribs = ""
for name in dir(evt):
if (name.startswith('Get') or name.startswith('Is')) and \
name not in [ 'GetEventObject',
'GetEventType',
'GetId',
'GetSkipped',
'GetTimestamp',
'GetClientData',
'GetClientObject',
]:
try:
value = getattr(evt, name)()
attribs += "%s : %s\n" % (name, value)
except:
pass
return attribs.rstrip()
#----------------------------------------------------------------------------
class EventLog(wx.ListCtrl):
"""
A virtual listctrl that displays information about the watched events.
"""
def __init__(self, *args, **kw):
kw['style'] = wx.LC_REPORT|wx.LC_SINGLE_SEL|wx.LC_VIRTUAL|wx.LC_HRULES|wx.LC_VRULES
wx.ListCtrl.__init__(self, *args, **kw)
self.clear()
if 'wxMac' in wx.PlatformInfo:
self.SetWindowVariant(wx.WINDOW_VARIANT_SMALL)
self.InsertColumn(0, "#", format=wx.LIST_FORMAT_RIGHT, width=50)
self.InsertColumn(1, "Event", width=200)
self.InsertColumn(2, "Source", width=200)
self.SetMinSize((450+wx.SystemSettings.GetMetric(wx.SYS_VSCROLL_X), 450))
self.Bind(wx.EVT_LIST_ITEM_SELECTED, self.onItemSelected)
self.Bind(wx.EVT_LIST_ITEM_ACTIVATED, self.onItemActivated)
self.Bind(wx.EVT_LIST_ITEM_RIGHT_CLICK, self.onItemActivated)
def append(self, evt):
evtName = _eventIdMap.get(evt.GetEventType(), None)
if evtName is None:
evtName = 'Unknown: %d' % evt.GetEventType()
source = _makeSourceString(evt.GetEventObject())
attribs = _makeAttribString(evt)
lastIsSelected = self.currItem == len(self.data)-1
self.data.append( (evtName, source, attribs) )
count = len(self.data)
self.SetItemCount(count)
self.RefreshItem(count-1)
if lastIsSelected:
self.Select(count-1)
self.EnsureVisible(count-1)
def clear(self):
self.data = []
self.SetItemCount(0)
self.currItem = -1
self.Refresh()
def OnGetItemText(self, item, col):
if col == 0:
val = str(item+1)
else:
val = self.data[item][col-1]
return val
def OnGetItemAttr(self, item): return None
def OnGetItemImage(self, item): return -1
def onItemSelected(self, evt):
self.currItem = evt.GetIndex()
def onItemActivated(self, evt):
idx = evt.GetIndex()
text = self.data[idx][2]
wx.CallAfter(wx.TipWindow, self, text, OTHER_WIDTH)
#----------------------------------------------------------------------------
class EventChooser(wx.Panel):
"""
Panel with CheckListCtrl for selecting which events will be watched.
"""
class EventChooserLC(wx.ListCtrl, CheckListCtrlMixin):
def __init__(self, parent):
wx.ListCtrl.__init__(self, parent,
style=wx.LC_REPORT|wx.LC_SINGLE_SEL|wx.LC_HRULES|wx.LC_VRULES)
CheckListCtrlMixin.__init__(self)
if 'wxMac' in wx.PlatformInfo:
self.SetWindowVariant(wx.WINDOW_VARIANT_SMALL)
# this is called by the base class when an item is checked/unchecked
def OnCheckItem(self, index, flag):
self.Parent.OnCheckItem(index, flag)
def __init__(self, *args, **kw):
wx.Panel.__init__(self, *args, **kw)
self.updateCallback = lambda: None
self.doUpdate = True
self._event_name_filter = wx.SearchCtrl(self)
self._event_name_filter.ShowCancelButton(True)
self._event_name_filter.Bind(wx.EVT_TEXT, lambda evt: self.setWatchList(self.watchList))
self._event_name_filter.Bind(wx.EVT_SEARCHCTRL_CANCEL_BTN, self._ClearEventFilter)
self.lc = EventChooser.EventChooserLC(self)
btn1 = wx.Button(self, -1, "All")
btn2 = wx.Button(self, -1, "None")
btn1.SetToolTipString("Check all events")
btn2.SetToolTipString("Uncheck all events")
self.Bind(wx.EVT_BUTTON, self.onCheckAll, btn1)
self.Bind(wx.EVT_BUTTON, self.onUncheckAll, btn2)
self.Bind(wx.EVT_LIST_ITEM_ACTIVATED, self.onItemActivated, self.lc)
self.lc.InsertColumn(0, "Binder", width=OTHER_WIDTH)
btnSizer = wx.BoxSizer(wx.HORIZONTAL)
btnSizer.Add(btn1, 0, wx.ALL, 5)
btnSizer.Add(btn2, 0, wx.ALL, 5)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(self._event_name_filter, 0, wx.EXPAND|wx.ALL, 5)
sizer.Add(self.lc, 1, wx.EXPAND)
sizer.Add(btnSizer)
self.SetSizer(sizer)
def setUpdateCallback(self, func):
self.updateCallback = func
def setWatchList(self, watchList):
self.doUpdate = False
searched = self._event_name_filter.GetValue().lower()
self.watchList = watchList
self.lc.DeleteAllItems()
count = 0
for index, (item, flag) in enumerate(watchList):
typeId = item.typeId
text = _even |
sserrot/champion_relationships | venv/Lib/site-packages/win32/test/test_security.py | Python | mit | 5,205 | 0.013449 | # Tests for the win32security module.
import sys, os
import unittest
import winerror
from pywin32_testutil import testmain, TestSkipped, ob2memory
import win32api, win32con, win32security, ntsecuritycon
class SecurityTests(unittest.TestCase):
def setUp(self):
self.pwr_sid=win32security.LookupAccountName('','Power Users')[0]
self.admin_sid=win32security.LookupAccountName('','Administrator')[0]
def tearDown(self):
pass
def testEqual(self):
self.failUnlessEqual(win32security.LookupAccountName('','Administrator')[0],
win32security.LookupAccountName('','Administrator')[0])
def testNESID(self):
self.failUnless(self.pwr_sid==self.pwr_sid)
self.failUnless(self.pwr_sid!=self.admin_sid)
def testNEOther(self):
self.failUnless(self.pwr_sid!=None)
self.failUnless(None!=self.pwr_sid)
self.failIf(self.pwr_sid==None)
self.failIf(None==self.pwr_sid)
self.failIfEqual(None, self.pwr_sid)
def testSIDInDict(self):
d = dict(foo=self.pwr_sid)
self.failUnlessEqual(d['foo'], self.pwr_sid)
def testBuffer(self):
self.failUnlessEqual(ob2memory(win32security.LookupAccountName('','Administrator')[0]),
ob2memory(win32security.LookupAccountName('','Administrator')[0]))
def testMemory(self):
pwr_sid = self.pwr_sid
admin_sid = self.admin_sid
sd1=win32security.SECURITY_DESCRIPTOR()
sd2=win32security.SECURITY_DESCRIPTOR()
sd3=win32security.SECURITY_DESCRIPTOR()
dacl=win32security.ACL()
dacl.AddAccessAllowedAce(win32security.ACL_REVISION,win32con.GENERIC_READ,pwr_sid)
dacl.AddAccessAllowedAce(win32security.ACL_REVISION,win32con.GENERIC_ALL,admin_sid)
sd4=win32security.SECURITY_DESCRIPTOR()
sacl=win32security.ACL()
sacl.AddAuditAccessAce(win32security.ACL_REVISION,win32con.DELETE,admin_sid,1,1)
sacl.AddAuditAccessAce(win32security.ACL_REVISION,win32con.GENERIC_ALL,pwr_sid,1,1)
for x in range(0,200000):
sd1.SetSecurityDescriptorOwner(admin_sid,0)
sd2.SetSecurityDescriptorGroup(pwr_sid,0)
sd3.SetSecurityDescriptorDacl(1,dacl,0)
sd4.SetSecurityDescriptorSacl(1,sacl,0)
class DomainTests(unittest.TestCase):
def setUp(self):
self.ds_handle = None
try:
# saving the handle means the other test itself should bind faster.
self.ds_handle = win32security.DsBind()
except win32security.error as exc:
if exc.winerror != winerror.ERROR_NO_SUCH_DOMAIN:
raise
raise TestSkipped(exc)
def tearDown(self):
if self.ds_handle is not None:
self.ds_handle.close()
class TestDS(DomainTests):
def testDsGetDcName(self):
# Not sure what we can actually test here! At least calling it
# does something :)
win32security.DsGetDcName()
def testDsListServerInfo(self):
# again, not checking much, just exercising the code.
h=win32security.DsBind()
for (status, ignore, site) in win32security.DsListSites(h):
for (status, ignore, server) in win32security.DsListServersInSite(h, site):
info = win32security.DsListInfoForServer(h, server)
for (status, ignore, domain) in win32security.DsListDomainsInSite(h, site):
pass
def testDsCrackNames(self):
h = win32security.DsBind()
fmt_offered = ntsecur | itycon.DS_FQDN_1779_NAME
name = win32api.GetUserNameEx(fmt_offered)
result = win32security.DsCrackNames(h, 0, fmt_offered, fmt_offered, (name,))
self.failUnlessEqual(name, result[0][2])
def testDsCrackNamesSyntax(self):
# Do a syntax check only - that allows us to avoid binding.
# But must use DS_CAN | ONICAL_NAME (or _EX)
expected = win32api.GetUserNameEx(win32api.NameCanonical)
fmt_offered = ntsecuritycon.DS_FQDN_1779_NAME
name = win32api.GetUserNameEx(fmt_offered)
result = win32security.DsCrackNames(None, ntsecuritycon.DS_NAME_FLAG_SYNTACTICAL_ONLY,
fmt_offered, ntsecuritycon.DS_CANONICAL_NAME,
(name,))
self.failUnlessEqual(expected, result[0][2])
class TestTranslate(DomainTests):
def _testTranslate(self, fmt_from, fmt_to):
name = win32api.GetUserNameEx(fmt_from)
expected = win32api.GetUserNameEx(fmt_to)
got = win32security.TranslateName(name, fmt_from, fmt_to)
self.failUnlessEqual(got, expected)
def testTranslate1(self):
self._testTranslate(win32api.NameFullyQualifiedDN, win32api.NameSamCompatible)
def testTranslate2(self):
self._testTranslate(win32api.NameSamCompatible, win32api.NameFullyQualifiedDN)
def testTranslate3(self):
self._testTranslate(win32api.NameFullyQualifiedDN, win32api.NameUniqueId)
def testTranslate4(self):
self._testTranslate(win32api.NameUniqueId, win32api.NameFullyQualifiedDN)
if __name__=='__main__':
testmain()
|
andyraib/data-storage | python_scripts/env/lib/python3.6/site-packages/pandas/tests/test_panel4d.py | Python | apache-2.0 | 35,347 | 0.000057 | # -*- coding: utf-8 -*-
from datetime import datetime
from pandas.compat import range, lrange
import operator
import nose
import numpy as np
from pandas.types.common import is_float_dtype
from pandas import Series, Index, isnull, notnull
from pandas.core.panel import Panel
from pandas.core.panel4d import Panel4D
from pandas.core.series import remove_na
from pandas.tseries.offsets import BDay
from pandas.util.testing import (assert_panel_equal,
assert_panel4d_equal,
assert_frame_equal,
assert_series_equal,
assert_almost_equal)
import pandas.util.testing as tm
def add_nans(panel4d):
for l, label in enumerate(panel4d.labels):
panel = panel4d[label]
tm.add_nans(panel)
class SafeForLongAndSparse(object):
_multiprocess_can_split_ = True
def test_repr(self):
repr(self.panel4d)
def test_iter(self):
tm.equalContents(list(self.panel4d), self.panel4d.labels)
def test_count(self):
f = lambda s: notnull(s).sum()
self._check_stat_op('count', f, obj=self.panel4d, has_skipna=False)
def test_sum(self):
self._check_stat_op('sum', np.sum)
def test_mean(self):
self._check_stat_op('mean', np.mean)
def test_prod(self):
self._check_stat_op('prod', np.prod)
def test_median(self):
def wrapper(x):
if isnull(x).any():
return np.nan
return np.median(x)
self._check_stat_op('median', wrapper)
def test_min(self):
self._check_stat_op('min', np.min)
def test_max(self): |
self._check_stat_op('max', np.max)
| def test_skew(self):
try:
from scipy.stats import skew
except ImportError:
raise nose.SkipTest("no scipy.stats.skew")
def this_skew(x):
if len(x) < 3:
return np.nan
return skew(x, bias=False)
self._check_stat_op('skew', this_skew)
# def test_mad(self):
# f = lambda x: np.abs(x - x.mean()).mean()
# self._check_stat_op('mad', f)
def test_var(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.var(x, ddof=1)
self._check_stat_op('var', alt)
def test_std(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.std(x, ddof=1)
self._check_stat_op('std', alt)
def test_sem(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.std(x, ddof=1) / np.sqrt(len(x))
self._check_stat_op('sem', alt)
# def test_skew(self):
# from scipy.stats import skew
# def alt(x):
# if len(x) < 3:
# return np.nan
# return skew(x, bias=False)
# self._check_stat_op('skew', alt)
def _check_stat_op(self, name, alternative, obj=None, has_skipna=True):
if obj is None:
obj = self.panel4d
# # set some NAs
# obj.ix[5:10] = np.nan
# obj.ix[15:20, -2:] = np.nan
f = getattr(obj, name)
if has_skipna:
def skipna_wrapper(x):
nona = remove_na(x)
if len(nona) == 0:
return np.nan
return alternative(nona)
def wrapper(x):
return alternative(np.asarray(x))
for i in range(obj.ndim):
result = f(axis=i, skipna=False)
assert_panel_equal(result, obj.apply(wrapper, axis=i))
else:
skipna_wrapper = alternative
wrapper = alternative
for i in range(obj.ndim):
result = f(axis=i)
if not tm._incompat_bottleneck_version(name):
assert_panel_equal(result, obj.apply(skipna_wrapper, axis=i))
self.assertRaises(Exception, f, axis=obj.ndim)
class SafeForSparse(object):
_multiprocess_can_split_ = True
@classmethod
def assert_panel_equal(cls, x, y):
assert_panel_equal(x, y)
@classmethod
def assert_panel4d_equal(cls, x, y):
assert_panel4d_equal(x, y)
def test_get_axis(self):
assert(self.panel4d._get_axis(0) is self.panel4d.labels)
assert(self.panel4d._get_axis(1) is self.panel4d.items)
assert(self.panel4d._get_axis(2) is self.panel4d.major_axis)
assert(self.panel4d._get_axis(3) is self.panel4d.minor_axis)
def test_set_axis(self):
new_labels = Index(np.arange(len(self.panel4d.labels)))
# TODO: unused?
# new_items = Index(np.arange(len(self.panel4d.items)))
new_major = Index(np.arange(len(self.panel4d.major_axis)))
new_minor = Index(np.arange(len(self.panel4d.minor_axis)))
# ensure propagate to potentially prior-cached items too
# TODO: unused?
# label = self.panel4d['l1']
self.panel4d.labels = new_labels
if hasattr(self.panel4d, '_item_cache'):
self.assertNotIn('l1', self.panel4d._item_cache)
self.assertIs(self.panel4d.labels, new_labels)
self.panel4d.major_axis = new_major
self.assertIs(self.panel4d[0].major_axis, new_major)
self.assertIs(self.panel4d.major_axis, new_major)
self.panel4d.minor_axis = new_minor
self.assertIs(self.panel4d[0].minor_axis, new_minor)
self.assertIs(self.panel4d.minor_axis, new_minor)
def test_get_axis_number(self):
self.assertEqual(self.panel4d._get_axis_number('labels'), 0)
self.assertEqual(self.panel4d._get_axis_number('items'), 1)
self.assertEqual(self.panel4d._get_axis_number('major'), 2)
self.assertEqual(self.panel4d._get_axis_number('minor'), 3)
def test_get_axis_name(self):
self.assertEqual(self.panel4d._get_axis_name(0), 'labels')
self.assertEqual(self.panel4d._get_axis_name(1), 'items')
self.assertEqual(self.panel4d._get_axis_name(2), 'major_axis')
self.assertEqual(self.panel4d._get_axis_name(3), 'minor_axis')
def test_arith(self):
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
self._test_op(self.panel4d, operator.add)
self._test_op(self.panel4d, operator.sub)
self._test_op(self.panel4d, operator.mul)
self._test_op(self.panel4d, operator.truediv)
self._test_op(self.panel4d, operator.floordiv)
self._test_op(self.panel4d, operator.pow)
self._test_op(self.panel4d, lambda x, y: y + x)
self._test_op(self.panel4d, lambda x, y: y - x)
self._test_op(self.panel4d, lambda x, y: y * x)
self._test_op(self.panel4d, lambda x, y: y / x)
self._test_op(self.panel4d, lambda x, y: y ** x)
self.assertRaises(Exception, self.panel4d.__add__,
self.panel4d['l1'])
@staticmethod
def _test_op(panel4d, op):
result = op(panel4d, 1)
assert_panel_equal(result['l1'], op(panel4d['l1'], 1))
def test_keys(self):
tm.equalContents(list(self.panel4d.keys()), self.panel4d.labels)
def test_iteritems(self):
"""Test panel4d.iteritems()"""
self.assertEqual(len(list(self.panel4d.iteritems())),
len(self.panel4d.labels))
def test_combinePanel4d(self):
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = self.panel4d.add(self.panel4d)
self.assert_panel4d_equal(result, self.panel4d * 2)
def test_neg(self):
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
self.assert_panel4d_equal(-self.panel4d, self.panel4d * -1)
def test_select(self):
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
p = self.panel4d
# select labels
result = p.select(lambda x: x in ('l1', 'l3'), axis='labels')
expected = p.reindex(labels=['l1', 'l3'])
|
nijel/weblate | weblate/accounts/migrations/0016_alter_auditlog_activity.py | Python | gpl-3.0 | 1,470 | 0 | # Generated by Django 3.2.4 on 2021-06-03 07:44
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("accounts", "0015_auto_20210512_1955"),
]
operations = [
migrations.AlterField(
model_name="auditlog",
name="activity",
field=models.CharField(
| choices=[
("auth-connect", "auth-connect"),
("auth-disconnect", "auth-disconnect"),
("autocreated", "autocreated"),
("blocked", "blocked"),
("connect", "connect"),
("email", "email"),
("failed-auth", "failed-auth"),
("fu | ll_name", "full_name"),
("invited", "invited"),
("locked", "locked"),
("login", "login"),
("login-new", "login-new"),
("password", "password"),
("register", "register"),
("removed", "removed"),
("reset", "reset"),
("reset-request", "reset-request"),
("sent-email", "sent-email"),
("tos", "tos"),
("trial", "trial"),
("username", "username"),
],
db_index=True,
max_length=20,
),
),
]
|
adaussy/eclipse-monkey-revival | plugins/python/org.eclipse.eclipsemonkey.lang.python/Lib/test/test_cmd_line_script.py | Python | epl-1.0 | 9,548 | 0.00199 | # Tests command line execution of scripts
import unittest
import os
import os.path
import test.test_support
from test.script_helper import (run_python,
temp_dir, make_script, compile_script,
make_pkg, make_zip_script, make_zip_pkg)
from test.test_support import is_jython
verbose = test.test_support.verbose
test_source = """\
# Script may be run with optimisation enabled, so don't rely on assert
# statements being executed
def assertEqual(lhs, rhs):
if lhs != rhs:
raise AssertionError('%r != %r' % (lhs, rhs))
def assertIdentical(lhs, rhs):
if lhs is not rhs:
raise AssertionError('%r is not %r' % (lhs, rhs))
# Check basic code execution
result = ['Top level assignment']
def f():
result.append('Lower level reference')
f()
assertEqual(result, ['Top level assignment', 'Lower level reference'])
# Check population of magic variables
assertEqual(__name__, '__main__')
print '__file__==%r' % __file__
print '__package__==%r' % __package__
# Check the sys module
import sys
assertIdentical(globals(), sys.modules[__name__].__dict__)
print 'sys.argv[0]==%r' % sys.argv[0]
"""
def _make_test_script(script_dir, script_basename, source=test_source):
return make_script(script_dir, script_basename, source)
def _make_test_zip_pkg(zip_dir, zip_basename, pkg_name, script_basename,
source=test_source, depth=1):
return make_zip_pkg(zip_dir, zip_basename, pkg_name, script_basename,
source, depth)
# There's no easy way to pass the script directory in to get
# -m to work (avoiding that is the whole point of making
# directories and zipfiles executable!)
# So we fake it for testing purposes with a custom launch script
launch_source = """\
import sys, os.path, runpy
sys.path.insert(0, %s)
runpy._run_module_as_main(%r)
"""
def _make_launch_script(script_dir, script_basename, module_name, path=None):
if path is None:
path = "os.path.dirname(__file__)"
else:
path = repr(path)
source = launch_source % (path, module_name)
return make_script(script_dir, script_basename, source)
class CmdLineTest(unittest.TestCase):
def _check_script(self, script_name, expected_file,
expected_argv0, expected_package,
*cmd_line_switches):
run_args = cmd_line_switches + (script_name | ,)
exit_code, data = run_python(*run_args)
if verbose:
print 'Output from test script %r:' % script_name
pr | int data
self.assertEqual(exit_code, 0)
printed_file = '__file__==%r' % expected_file
printed_argv0 = 'sys.argv[0]==%r' % expected_argv0
printed_package = '__package__==%r' % expected_package
if verbose:
print 'Expected output:'
print printed_file
print printed_package
print printed_argv0
self.assertIn(printed_file, data)
self.assertIn(printed_package, data)
self.assertIn(printed_argv0, data)
def _check_import_error(self, script_name, expected_msg,
*cmd_line_switches):
run_args = cmd_line_switches + (script_name,)
exit_code, data = run_python(*run_args)
if verbose:
print 'Output from test script %r:' % script_name
print data
print 'Expected output: %r' % expected_msg
self.assertIn(expected_msg, data)
def test_basic_script(self):
with temp_dir() as script_dir:
script_name = _make_test_script(script_dir, 'script')
self._check_script(script_name, script_name, script_name, None)
@unittest.skipIf(is_jython, "FIXME: not working in Jython")
def test_script_compiled(self):
with temp_dir() as script_dir:
script_name = _make_test_script(script_dir, 'script')
compiled_name = compile_script(script_name)
os.remove(script_name)
self._check_script(compiled_name, compiled_name, compiled_name, None)
@unittest.skipIf(is_jython, "FIXME: not working in Jython")
def test_directory(self):
with temp_dir() as script_dir:
script_name = _make_test_script(script_dir, '__main__')
self._check_script(script_dir, script_name, script_dir, '')
@unittest.skipIf(is_jython, "FIXME: not working in Jython")
def test_directory_compiled(self):
with temp_dir() as script_dir:
script_name = _make_test_script(script_dir, '__main__')
compiled_name = compile_script(script_name)
os.remove(script_name)
self._check_script(script_dir, compiled_name, script_dir, '')
@unittest.skipIf(is_jython, "FIXME: not working in Jython")
def test_directory_error(self):
with temp_dir() as script_dir:
msg = "can't find '__main__' module in %r" % script_dir
self._check_import_error(script_dir, msg)
@unittest.skipIf(is_jython, "FIXME: not working in Jython")
def test_zipfile(self):
with temp_dir() as script_dir:
script_name = _make_test_script(script_dir, '__main__')
zip_name, run_name = make_zip_script(script_dir, 'test_zip', script_name)
self._check_script(zip_name, run_name, zip_name, '')
@unittest.skipIf(is_jython, "FIXME: not working in Jython")
def test_zipfile_compiled(self):
with temp_dir() as script_dir:
script_name = _make_test_script(script_dir, '__main__')
compiled_name = compile_script(script_name)
zip_name, run_name = make_zip_script(script_dir, 'test_zip', compiled_name)
self._check_script(zip_name, run_name, zip_name, '')
@unittest.skipIf(is_jython, "FIXME: not working in Jython")
def test_zipfile_error(self):
with temp_dir() as script_dir:
script_name = _make_test_script(script_dir, 'not_main')
zip_name, run_name = make_zip_script(script_dir, 'test_zip', script_name)
msg = "can't find '__main__' module in %r" % zip_name
self._check_import_error(zip_name, msg)
def test_module_in_package(self):
with temp_dir() as script_dir:
pkg_dir = os.path.join(script_dir, 'test_pkg')
make_pkg(pkg_dir)
script_name = _make_test_script(pkg_dir, 'script')
launch_name = _make_launch_script(script_dir, 'launch', 'test_pkg.script')
self._check_script(launch_name, script_name, script_name, 'test_pkg')
@unittest.skipIf(is_jython, "FIXME: not working in Jython")
def test_module_in_package_in_zipfile(self):
with temp_dir() as script_dir:
zip_name, run_name = _make_test_zip_pkg(script_dir, 'test_zip', 'test_pkg', 'script')
launch_name = _make_launch_script(script_dir, 'launch', 'test_pkg.script', zip_name)
self._check_script(launch_name, run_name, run_name, 'test_pkg')
@unittest.skipIf(is_jython, "FIXME: not working in Jython")
def test_module_in_subpackage_in_zipfile(self):
with temp_dir() as script_dir:
zip_name, run_name = _make_test_zip_pkg(script_dir, 'test_zip', 'test_pkg', 'script', depth=2)
launch_name = _make_launch_script(script_dir, 'launch', 'test_pkg.test_pkg.script', zip_name)
self._check_script(launch_name, run_name, run_name, 'test_pkg.test_pkg')
def test_package(self):
with temp_dir() as script_dir:
pkg_dir = os.path.join(script_dir, 'test_pkg')
make_pkg(pkg_dir)
script_name = _make_test_script(pkg_dir, '__main__')
launch_name = _make_launch_script(script_dir, 'launch', 'test_pkg')
self._check_script(launch_name, script_name,
script_name, 'test_pkg')
@unittest.skipIf(is_jython, "FIXME: not working in Jython")
def test_package_compiled(self):
with temp_dir() as script_dir:
pkg_dir = os.path.join(script_dir, 'test_pkg')
make_pkg(pkg_dir)
script_name = _make_test_script(pkg_dir, '__main__')
compil |
qvazzler/Flexget | flexget/plugins/urlrewrite/iptorrents.py | Python | mit | 6,258 | 0.003356 | from __future__ import unicode_literals, division, absolute_import
from builtins import * # pylint: disable=unused-import, redefined-builtin
from future.moves.urllib.parse import quote_plus
import re
import logging
from flexget import plugin
from flexget.config_schema import one_or_more
from flexget.entry import Entry
from flexget.event import event
from flexget.plugins.plugin_urlrewriting import UrlRewritingError
from flexget.utils import requests
from flexget.utils.soup import get_soup
from flexget.utils.search import torrent_availability, normalize_unicode
log = logging.getLogger('iptorrents')
CATEGORIES = {
# Movies
'Movie-all': 72,
'Movie-3D': 87,
'Movie-480p': 77,
'Movie-BD-R': 89,
'Movie-BD-Rip': 90,
'Movie-DVD-R': 6,
'Movie-HD-Bluray': 48,
'Movie-Kids': 54,
'Movie-MP4': 62,
'Movie-Non-English': 38,
'Movie-Packs': 68,
'Movie-XviD': 17,
# TV
'TV-all': 73,
'TV-Sports': 55,
'TV-480p': 78,
'TV-MP4': 66,
'TV-Non-English': 82,
'TV-Packs': 65,
'TV-Packs-Non-English': 83,
'TV-SD-x264': 79,
'TV-x264': 5,
'TV-XVID': 4,
'TV-Web-DL': 22
}
BASE_URL = 'http://iptorrents.com'
class UrlRewriteIPTorrents(object):
"""
IpTorrents urlrewriter and search plugin.
iptorrents:
rss_key: xxxxxxxxx (required)
uid: xxxxxxxx (required)
password: xxxxxxxx (required)
category: HD
Category is any combination of: all, Movie-3D, Movie-480p, Movie-3D,
Movie-480p, Movie-BD-R, Movie-BD-Rip, Movie-DVD-R,
Movie-HD-Bluray, Movie-Kids, Movie-MP4,
Movie-Non-English, Movie-Packs, Movie-XviD,
TV-all, TV-Sports, TV-480p, TV-MP4, TV-Non-English, TV-Packs,
TV-Packs-Non-English, TV-SD-x264, TV-x264, TV-XVID, TV-Web-DL
"""
schema = {
'type': 'object',
'properties': {
'rss_key': {'type': 'string'},
'uid': {'type': 'integer'},
'password': {'type': 'string'},
'category': one_or_more({
'oneOf': [
{'type': 'integer'},
{'type': 'string', 'enu | m': list(CATEGORIES)},
]}),
},
'required': ['rss_key', 'uid', 'password'],
'additionalProperties': False
| }
# urlrewriter API
def url_rewritable(self, task, entry):
url = entry['url']
if url.startswith(BASE_URL + '/download.php/'):
return False
if url.startswith(BASE_URL + '/'):
return True
return False
# urlrewriter API
def url_rewrite(self, task, entry):
if 'url' not in entry:
log.error("Didn't actually get a URL...")
else:
log.debug("Got the URL: %s" % entry['url'])
if entry['url'].startswith(BASE_URL + '/t?'):
# use search
results = self.search(task, entry)
if not results:
raise UrlRewritingError("No search results found")
# TODO: Search doesn't enforce close match to title, be more picky
entry['url'] = results[0]['url']
@plugin.internet(log)
def search(self, task, entry, config=None):
"""
Search for name from iptorrents
"""
categories = config.get('category', 'all')
# Make sure categories is a list
if not isinstance(categories, list):
categories = [categories]
# If there are any text categories, turn them into their id number
categories = [c if isinstance(c, int) else CATEGORIES[c]
for c in categories]
filter_url = '&'.join(('l' + str(c) + '=') for c in categories)
entries = set()
for search_string in entry.get('search_strings', [entry['title']]):
query = normalize_unicode(search_string)
query = quote_plus(query.encode('utf8'))
url = "{base_url}/t?{filter}&q={query}&qf=".format(base_url=BASE_URL, filter=filter_url, query=query)
log.debug('searching with url: %s' % url)
req = requests.get(url, cookies={'uid': str(config['uid']), 'pass': config['password']})
if '/u/' + str(config.get('uid')) not in req.content:
raise plugin.PluginError("Invalid cookies (user not logged in)...")
soup = get_soup(req.content, parser="html.parser")
torrents = soup.find('table', {'class': 'torrents'})
for torrent in torrents.findAll('a', href=re.compile('\.torrent$')):
entry = Entry()
entry['url'] = "{base}{link}?torrent_pass={key}".format(
base=BASE_URL, link=torrent['href'], key=config.get('rss_key'))
entry['title'] = torrent.findPrevious("a", attrs={'class': 't_title'}).text
seeders = torrent.findNext('td', {'class': 'ac t_seeders'}).text
leechers = torrent.findNext('td', {'class': 'ac t_leechers'}).text
entry['torrent_seeds'] = int(seeders)
entry['torrent_leeches'] = int(leechers)
entry['search_sort'] = torrent_availability(entry['torrent_seeds'],
entry['torrent_leeches'])
size = torrent.findNext(text=re.compile('^([\.\d]+) ([GMK]?)B$'))
size = re.search('^([\.\d]+) ([GMK]?)B$', size)
if size:
if size.group(2) == 'G':
entry['content_size'] = int(float(size.group(1)) * 1000 ** 3 / 1024 ** 2)
elif size.group(2) == 'M':
entry['content_size'] = int(float(size.group(1)) * 1000 ** 2 / 1024 ** 2)
elif size.group(2) == 'K':
entry['content_size'] = int(float(size.group(1)) * 1000 / 1024 ** 2)
else:
entry['content_size'] = int(float(size.group(1)) / 1024 ** 2)
entries.add(entry)
return entries
@event('plugin.register')
def register_plugin():
plugin.register(UrlRewriteIPTorrents, 'iptorrents', groups=['urlrewriter', 'search'], api_ver=2)
|
sunj1/my_pyforms | tutorials/1.SimpleExamples/SimpleExample5/SimpleExample5.py | Python | mit | 1,700 | 0.048235 | #!/usr/bin/python
# -*- coding: utf-8 -*-
__author__ = "Ricardo Ribeiro"
__credits__ = ["Ricardo Ribeiro"]
__license__ = "MIT"
__version__ = "0.0"
__maintainer__ = "Ricardo Ribeiro"
__email__ = "ricardojvr@gmail.com"
__status__ | = "Development"
from __init__ import *
class SimpleExample5(BaseWidget):
def __init__(self):
super(SimpleExample5,self).__init__('Simple example 5')
#Definit | ion of the forms fields
self._firstname = ControlText('First name', 'Default value')
self._middlename = ControlText('Middle name')
self._lastname = ControlText('Lastname name')
self._fullname = ControlText('Full name')
self._button = ControlButton('Press this button')
self._formset = [ {
'Tab1':['_firstname','||','_middlename','||','_lastname'],
'Tab2': ['_fullname']
},
'=',(' ','_button', ' ') ]
#Define the window main menu using the property main menu
self.mainmenu = [
{ 'File': [
{'Open': self.__dummyEvent},
'-',
{'Save': self.__dummyEvent},
{'Save as': self.__dummyEvent}
]
},
{ 'Edit': [
{'Copy': self.__dummyEvent},
{'Past': self.__dummyEvent}
]
}
]
def __dummyEvent(self):
print "Menu option selected"
##################################################################################################################
##################################################################################################################
##################################################################################################################
#Execute the application
if __name__ == "__main__": pyforms.startApp( SimpleExample5 )
|
Akuukis/MMO_sim | ships.py | Python | gpl-3.0 | 599 | 0.006678 | def main(tick, config, q):
return # Drafted, TODO
| pc = {
'id': 1,
'coords': [0, 0, 0]
}
ship = {
'coords': [0, 0, 0],
'type': 'freight' or 'settler' or 'corvette' or 'frigate', # Corvette small tank, Frigate big tank, guns similar
'storage': {
'goods': {
},
'solids': 0, # Upkeep for industry
'metals': 0, # For structure of skips
'isotopes': 0, # For guns of ships
'ammo': 1000 # For guns to shoo | t
}
}
pass
if __name__ == "__main__":
main() |
ButterFlyDevs/StudentsManagementSystem | SMS-Back-End/tdbms/appengine_config.py | Python | gpl-3.0 | 166 | 0.012121 | # -*- coding: utf-8 -*-
from google.appengine.ext import vendor
#Para que entienda que las librerías de terceros debe buscarlas | en la carpeta lib
vendor.a | dd('lib')
|
dit/dit | dit/shannon/__init__.py | Python | bsd-3-clause | 211 | 0 | """
The basic | forms of Shannon's information measures.
"""
from .shannon import (entropy,
| conditional_entropy,
mutual_information,
entropy_pmf)
|
jobsafran/mediadrop | mediadrop/model/settings.py | Python | gpl-3.0 | 4,208 | 0.002376 | # This file is a part of MediaDrop (http://www.mediadrop.net),
# Copyright 2009-2015 MediaDrop contributors
# For the exact contribution history, see the git revision log.
# The source code contained in this file is licensed under the GPLv3 or
# (at your option) any later version.
# See LICENSE.txt in the main project directory, for more information.
"""
Settings Model
A very rudimentary settings implementation which is intended to store our
non-mission-critical options which can be edited via the admin UI.
.. todo:
Rather than fetch one option at a time, load all settings into an object
with attribute-style access.
"""
from sqlalchemy import Table, ForeignKey, Column
from sqlalchemy.exc import IntegrityError, ProgrammingError
from sqlalchemy.types import Unicode, UnicodeText, Integer, Boolean, Float
from sqlalchemy.orm import mapper, relation, backref, synonym, interfaces, validates
from urlparse import urlparse
from mediadrop.model.meta import DBSession, metadata
from mediadrop.plugin import events
settings = Table('settings', metadata,
Column('id', Integer, autoincrement=True, primary_key=True),
Column('key', Unicode(255), nullable=False, unique=True),
Column('value', UnicodeText),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
multisettings = Table('settings_multi', metadata,
Column('id', Integer, autoincrement=True, primary_key=True),
Column('key', Unicode(255), nullable=False),
Column('value', UnicodeText, nullable=False),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
class Setting(object):
"""
A Single Setting
"""
query = DBSession.query_property()
def __init__(self, key=None, value=None):
self.key = key or None
self.value = value or None
def __repr__(self):
return '<Setting: %s = %r>' % (self.key, self.value)
def __unicode__(self):
return self.value
class MultiSetting(object):
"""
A MultiSetting
"""
query = DBSession.query_property()
def __init__(self, key=None, value=None):
self.key = key or None
self.value = value or None
def __repr__(self):
return '<MultiSetting: %s = %r>' % (self.key, self.value)
def __unicode__(self):
return self.value
mapper(Setting, settings, extension=events.MapperObserver(events.Setting))
mapper(MultiSetting, multisettings, extension=events.MapperObserver(events.MultiSetting))
def insert_settings(defaults):
"""Insert the given setting if they don't exist yet.
XXX: Does not include any support for MultiSetting. This approach
won't work for that. We'll need to use a migration script.
:type defaults: list
:param defaults: Key and value pairs
:rtype: list
:returns: Any settings that have just been created.
"" | "
inserte | d = []
try:
settings_query = DBSession.query(Setting.key)\
.filter(Setting.key.in_([key for key, value in defaults]))
existing_settings = set(x[0] for x in settings_query)
except ProgrammingError:
# If we are running paster setup-app on a fresh database with a
# plugin which tries to use this function every time the
# Environment.loaded event fires, the settings table will not
# exist and this exception will be thrown, but its safe to ignore.
# The settings will be created the next time the event fires,
# which will likely be the first time the app server starts up.
return inserted
for key, value in defaults:
if key in existing_settings:
continue
transaction = DBSession.begin_nested()
try:
s = Setting(key, value)
DBSession.add(s)
transaction.commit()
inserted.append(s)
except IntegrityError:
transaction.rollback()
if inserted:
DBSession.commit()
return inserted
def fetch_and_create_multi_setting(key, value):
multisettings = MultiSetting.query\
.filter(MultiSetting.key==key)\
.all()
for ms in multisettings:
if ms.value == value:
return ms
ms = MultiSetting(key, value)
DBSession.add(ms)
return ms
|
RNAer/micronota | micronota/util.py | Python | bsd-3-clause | 9,506 | 0.000316 | r'''
Utility functionality
=====================
.. currentmodule:: micronota.util
This module (:mod:`micronota.util`) provides various utility functionality,
'''
# ----------------------------------------------------------------------------
# Copyright (c) 2015--, micronota development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# --------------------------------------------------------- | -------------------
from unittest import TestCase
from sqlite3 import connect
from logging import getLogger
from skbio import read, write, Sequence, DNA
logger = getLogger(__name__)
def convert(in_f, in_fmt, out_f, out_fmt):
'''convert between file formats
Parameters
----------
in_fmt : str
input file format
out_fmt : str
output f | ile format
in_f : str
input file path
out_f: str
output file path
'''
for obj in read(in_f, format=in_fmt):
write(obj, format=out_fmt, into=out_f)
def _filter_sequence_ids(in_fp, out_fp, ids, negate=False):
'''Filter away the seq with specified IDs.'''
with open(out_fp, 'w') as out:
for seq in read(in_fp, format='fasta', constructor=Sequence):
seq_id = seq.metadata['id']
if seq_id not in ids:
write(seq, format='fasta', into=out)
def _add_cds_metadata(seq_id, imd, cds_metadata):
'''Add metadata to all the CDS interval features.'''
for intvl in imd._intervals:
md = intvl.metadata
# this md is parsed from prodigal output, which
# has ID like "seq1_1", "seq1_2" for genes
idx = md['ID'].rsplit('_', 1)[1]
md['ID'] = '%s_%s' % (seq_id, idx)
if idx in cds_metadata:
md.update(cds_metadata[idx])
def filter_ident_overlap(df, pident=90, overlap=80):
'''Filter away the hits using the same UniRef clustering standards.
Parameters
----------
df : ``pandas.DataFrame``
it must have columns of 'pident', 'gaps', and 'slen'
pident : ``Numeric``
minimal percentage of identity
overlap : ``Numeric``
minimal percentage of overlap for subject sequences.
Returns
-------
``pandas.DataFrame``
The data frame only containing hits that pass the thresholds.
'''
select_id = df.pident >= pident
overlap_length = df.length - df.gaps
select_overlap = overlap_length * 100 / df.slen >= overlap
# if qlen * 100 / len(row.sequence) >= 80:
df_filtered = df[select_id & select_overlap]
# df_filtered.set_index('qseqid', drop=True, inplace=True)
return df_filtered
def check_seq(in_seq, in_fmt=None, discard=lambda s: len(s) < 500):
'''Validate and filter input seq file.
1. filter seq;
2. validate seq IDs (no duplicates)
3. remove gaps in the sequence if there is any
Parameters
----------
in_seq : str or Iterable of ``Sequence`` objects
input seq file path if it is a str
in_fmt : str
the format of seq file
discard : callable
a callable that applies on a ``Sequence`` and return a boolean
Yields
------
``Sequence`` object
TODO
----
add an option to ignore the abnormal seq and continue yielding
'''
logger.info('Filter and validate input sequences')
ids = set()
if isinstance(in_seq, str):
# allow lowercase in DNA seq
in_seq = read(in_seq, format=in_fmt, constructor=DNA, lowercase=True)
for seq in in_seq:
seq = seq.degap()
if discard(seq):
continue
if in_fmt == 'genbank':
seq.metadata['id'] = seq.metadata['LOCUS']['locus_name']
try:
ident = seq.metadata['id']
except KeyError:
raise KeyError('Ill input file format: at least one sequences do not have IDs.')
if ident in ids:
raise ValueError(
'Duplicate seq IDs in your input file: {}'.format(ident))
else:
ids.add(ident)
yield seq
def filter_partial_genes(in_fp, out_fp, out_fmt='gff3'):
'''filter out partial genes from Prodigal predicted CDS.
It uses "partial" tag in the GFF3 from Prodigal to identify partial genes.
Parameters
----------
in_fp : str
input gff3 file
out_fp : str
output gff3 file
'''
logger.info('filter out partial genes for genome %s' % in_fp)
with open(out_fp, 'w') as out:
for seq_id, imd in read(in_fp, format='gff3'):
for i in imd.query(metadata={'partial': '01'}):
i.drop()
for i in imd.query(metadata={'partial': '10'}):
i.drop()
imd.write(out, seq_id=seq_id, format='gff3')
class _DBTest(TestCase):
def _test_eq_db(self, db1, db2):
'''Test if two database files have the same contents.'''
with connect(db1) as o, connect(db2) as e:
co = o.cursor()
ce = e.cursor()
# compare each table
for table, in e.execute(
"SELECT name FROM sqlite_master WHERE type='table'"):
co.execute('SELECT * FROM %s' % table)
ce.execute('SELECT * FROM %s' % table)
self.assertEqual(co.fetchall(), ce.fetchall())
def split(is_another, construct=None, ignore=None, **kwargs):
'''Return a function to yield record.
Parameters
----------
is_another : callable
accept a string and return a bool indicating if the current
line starts a new record or not.
construct : callable (optional)
accept a string and return a modified each input line. Default
is to strip the whitespaces at the ends. Do nothing if it is
``None``.
ignore : callable (optional)
A callable to ignore the line if it returns ``True``. Do nothing
if it is ``None``.
kwargs : dict
optional key word arguments passing to ``is_another``
Returns
-------
function
a function that accepts a file-object-like and yields record
one by one from it.
'''
def parse(stream):
lines = []
for line in stream:
if ignore is not None and ignore(line):
continue
if is_another(line, **kwargs):
if lines:
yield lines
lines = []
if construct is not None:
line = construct(line)
lines.append(line)
if lines:
yield lines
return parse
def split_head(line, is_head=lambda line: line.startswith('>')):
'''
Examples
--------
>>> import io
>>> s = """>seq1
... ATGC
... >seq2
... A
... T
... """
>>> f = io.StringIO(s)
>>> gen = split(split_head, construct=lambda x: x.strip())
>>> list(gen(f))
[['>seq1', 'ATGC'], ['>seq2', 'A', 'T']]
'''
if is_head(line):
return True
else:
return False
class SplitterTail:
r'''Split a stream of lines into record delimited by its tail of each record.
Parameters
----------
is_tail : callable
to return a bool indicating if the current line concludes current record
Examples
--------
Let's create a file object that has sequences separated by "//" at
the end of each record (similar to multiple GenBank records in a file):
>>> import io
>>> s = """seq1
... AT
... //
... seq2
... ATGC
... //
... """
>>> f = io.StringIO(s)
And then we can yield each sequence with this code:
>>> splitter = SplitterTail(lambda x: x == '//\n')
>>> gen = split(splitter, construct=lambda x: x.strip())
>>> list(gen(f))
[['seq1', 'AT', '//'], ['seq2', 'ATGC', '//']]
'''
def __init__(self, is_tail):
self.is_tail = is_tail
self._flag = False
def __call__(self, line):
if self.is_tail(line):
self._flag = True
return False
else:
if self._flag:
self._flag = False
return True
|
plotly/python-api | packages/python/plotly/plotly/validators/layout/scene/zaxis/title/_text.py | Python | mit | 463 | 0.00216 | import _plotly_utils.basevalidators
class TextValidator(_plotly_utils.basevalidators.S | tringValidator):
def __init__(
self, plotly_name="text", parent_name="layout.scene.zaxis.title", **kwargs
):
super(Tex | tValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
role=kwargs.pop("role", "info"),
**kwargs
)
|
nburn42/tensorflow | tensorflow/python/kernel_tests/spacetodepth_op_test.py | Python | apache-2.0 | 13,957 | 0.007881 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for SpacetoDepth op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
class SpaceToDepthTest(test.TestCase):
def _testOne(self, inputs, block_size, outputs, dtype=dtypes.float32):
input_nhwc = math_ops.cast(inputs, dtype)
with self.test_session(use_gpu=False):
# test NHWC (default) on CPU
x_tf = array_ops.space_to_depth(input_nhwc, block_size)
self.assertAllEqual(x_tf.eval(), outputs)
if test.is_gpu_available():
with self.test_session(use_gpu=True):
# test NHWC (default) on GPU
x_tf = array_ops.space_to_depth(input_nhwc, block_size)
self.assertAllEqual(x_tf.eval(), outputs)
# test NCHW on GPU
input_nchw = test_util.NHWCToNCHW(input_nhwc)
output_nchw = array_ops.space_to_depth(
input_nchw, block_size, data_format="NCHW")
output_nhwc = test_util.NCHWToNHWC(output_nchw)
self.assertAllEqual(output_nhwc.eval(), outputs)
def testBasic(self):
| x_np = [[[[1], [2]], [[3], [4]]]]
block_size = 2
x_out = [[[[1, 2, 3, 4]]]]
self._testOne(x_np, block_size, x_out)
def testBasicFloat16(self):
x_np | = [[[[1], [2]], [[3], [4]]]]
block_size = 2
x_out = [[[[1, 2, 3, 4]]]]
self._testOne(x_np, block_size, x_out, dtype=dtypes.float16)
# Tests for larger input dimensions. To make sure elements are
# correctly ordered spatially.
def testLargerInput2x2(self):
x_np = [[[[1], [2], [5], [6]], [[3], [4], [7], [8]],
[[9], [10], [13], [14]], [[11], [12], [15], [16]]]]
block_size = 2
x_out = [[[[1, 2, 3, 4], [5, 6, 7, 8]], [[9, 10, 11, 12],
[13, 14, 15, 16]]]]
self._testOne(x_np, block_size, x_out)
# Tests for larger input dimensions. To make sure elements are
# correctly ordered in depth. Here, larger block size.
def testLargerInput4x4(self):
x_np = [[[[1], [2], [5], [6]], [[3], [4], [7], [8]],
[[9], [10], [13], [14]], [[11], [12], [15], [16]]]]
block_size = 4
x_out = [[[[1, 2, 5, 6, 3, 4, 7, 8, 9, 10, 13, 14, 11, 12, 15, 16]]]]
self._testOne(x_np, block_size, x_out)
# Tests for larger input depths.
# To make sure elements are properly interleaved in depth.
def testDepthInterleaved(self):
x_np = [[[[1, 10], [2, 20]], [[3, 30], [4, 40]]]]
block_size = 2
x_out = [[[[1, 10, 2, 20, 3, 30, 4, 40]]]]
self._testOne(x_np, block_size, x_out)
# Tests for larger input depths. Here an odd depth.
# To make sure elements are properly interleaved in depth.
def testDepthInterleavedDepth3(self):
x_np = [[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]]
block_size = 2
x_out = [[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]]
self._testOne(x_np, block_size, x_out)
# Tests for larger input dimensions AND for larger input depths.
# To make sure elements are properly interleaved in depth and ordered
# spatially.
def testDepthInterleavedLarge(self):
x_np = [[[[1, 10], [2, 20], [5, 50], [6, 60]],
[[3, 30], [4, 40], [7, 70], [8, 80]],
[[9, 90], [10, 100], [13, 130], [14, 140]],
[[11, 110], [12, 120], [15, 150], [16, 160]]]]
block_size = 2
x_out = [[[[1, 10, 2, 20, 3, 30, 4, 40], [5, 50, 6, 60, 7, 70, 8, 80]],
[[9, 90, 10, 100, 11, 110, 12, 120],
[13, 130, 14, 140, 15, 150, 16, 160]]]]
self._testOne(x_np, block_size, x_out)
def testBlockSize2Batch10(self):
block_size = 2
def batch_input_elt(i):
return [[[1 * i], [2 * i], [5 * i], [6 * i]],
[[3 * i], [4 * i], [7 * i], [8 * i]],
[[9 * i], [10 * i], [13 * i], [14 * i]],
[[11 * i], [12 * i], [15 * i], [16 * i]]]
def batch_output_elt(i):
return [[[1 * i, 2 * i, 3 * i, 4 * i], [5 * i, 6 * i, 7 * i, 8 * i]],
[[9 * i, 10 * i, 11 * i, 12 * i],
[13 * i, 14 * i, 15 * i, 16 * i]]]
batch_size = 10
x_np = [batch_input_elt(i) for i in range(batch_size)]
x_out = [batch_output_elt(i) for i in range(batch_size)]
self._testOne(x_np, block_size, x_out)
def testBatchSize0(self):
block_size = 2
batch_size = 0
input_nhwc = array_ops.ones([batch_size, 4, 6, 3])
x_out = array_ops.ones([batch_size, 2, 3, 12])
with self.test_session(use_gpu=False):
# test NHWC (default) on CPU
x_tf = array_ops.space_to_depth(input_nhwc, block_size)
self.assertAllEqual(x_tf.shape, x_out.shape)
x_tf.eval()
if test.is_gpu_available():
with self.test_session(use_gpu=True):
# test NHWC (default) on GPU
x_tf = array_ops.space_to_depth(input_nhwc, block_size)
self.assertAllEqual(x_tf.shape, x_out.shape)
x_tf.eval()
# Tests for different width and height.
def testNonSquare(self):
x_np = [[[[1, 10], [2, 20]], [[3, 30], [4, 40]], [[5, 50], [6, 60]],
[[7, 70], [8, 80]], [[9, 90], [10, 100]], [[11, 110], [12, 120]]]]
block_size = 2
x_out = [[[[1, 10, 2, 20, 3, 30, 4, 40]], [[5, 50, 6, 60, 7, 70, 8, 80]],
[[9, 90, 10, 100, 11, 110, 12, 120]]]]
self._testOne(x_np, block_size, x_out)
# Error handling:
def testInputWrongDimMissingDepth(self):
# The input is missing the last dimension ("depth")
x_np = [[[1, 2], [3, 4]]]
block_size = 2
with self.assertRaises(ValueError):
out_tf = array_ops.space_to_depth(x_np, block_size)
out_tf.eval()
def testInputWrongDimMissingBatch(self):
# The input is missing the first dimension ("batch")
x_np = [[[1], [2]], [[3], [4]]]
block_size = 2
with self.assertRaises(ValueError):
_ = array_ops.space_to_depth(x_np, block_size)
def testBlockSize0(self):
# The block size is 0.
x_np = [[[[1], [2]], [[3], [4]]]]
block_size = 0
with self.assertRaises(ValueError):
out_tf = array_ops.space_to_depth(x_np, block_size)
out_tf.eval()
def testBlockSizeOne(self):
# The block size is 1. The block size needs to be > 1.
x_np = [[[[1], [2]], [[3], [4]]]]
block_size = 1
with self.assertRaises(ValueError):
out_tf = array_ops.space_to_depth(x_np, block_size)
out_tf.eval()
def testBlockSizeLarger(self):
# The block size is too large for this input.
x_np = [[[[1], [2]], [[3], [4]]]]
block_size = 10
with self.assertRaises(ValueError):
out_tf = array_ops.space_to_depth(x_np, block_size)
out_tf.eval()
def testBlockSizeNotDivisibleWidth(self):
# The block size divides width but not height.
x_np = [[[[1], [2], [3]], [[3], [4], [7]]]]
block_size = 3
with self.assertRaises(ValueError):
_ = array_ops.space_to_depth(x_np, block_size)
def testBlockSizeNotDivisibleHeight(self):
# The block size divides height but not width.
x_np = [[[[1], [2]], [[3], [4]], [[5], [6]]]]
block_size = 3
with self.assertRaises(Value |
ygol/odoo | addons/stock/models/product_strategy.py | Python | agpl-3.0 | 4,217 | 0.003083 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import _, api, fields, models
from odoo.exceptions import UserError
class RemovalStrategy(models.Model):
_name = 'product.removal'
_description = 'Removal Strategy'
name = fields.Char('Name', required=True)
method = fields.Char("Method", required=True, help="FIFO, LIFO...")
class StockPutawayRule(models.Model):
_name = 'stock.putaway.rule'
_order = 'sequence,product_id'
_description = 'Putaway Rule'
_check_company_auto = True
def _default_category_id(self):
if self.env.context.get('active_model') == 'product.category':
return self.env.context.get('active_id')
def _default_location_id(self):
if self.env.context.get('active_model') == 'stock.location':
return self.env.context.get('active_id')
def _default_product_id(self):
if self.env.context.get('active_model') == 'product.template' and self.env.context.get('active_id'):
product_template = self.env['product.template'].browse(self.env.context.get('active_id'))
product_template = product_template.exists()
if product_template.product_variant_count == 1:
return product_template.product_variant_id
elif self.env.context.get('active_model') == 'product.product':
return self.env.context.get('active_id')
def _domain_category_id(self):
active_model = self.env.context.get('active_model')
if active_model in ('product.template', 'product.product') and self.env.context.get('active_id'):
product = self.env[active_model].browse(self.env.context.get('active_id'))
product = product.exists()
if product:
return [('id', '=', product.categ_id.id)]
return []
def _domain_product_id(self):
domain = "[('type', '!=', 'service'), '|', ('company_id', '=', False), ('company_id', '=', company_id)]"
if self.env.context.get('active_model') == 'product.template':
return [('product_tmpl_id', '=', self.env.context.get('active_id'))]
return domain
product_id = fields.Many2one(
'product.product', 'Product', check_company=True,
default=_default_product_id, domain=_domain_product_id, ondelete='cascade')
category_id = fields.Many2one('product.category', 'Product Category',
default=_default_category_id, domain=_domain_category_id, ondelete='cascade')
location_in_id = fields.Many2one(
'stock.location', 'When product arrives in', check_company=True,
domain="[('child_ids', '!=', False), '|', ('company_id', '=', False), ('company_id', '=', company_id)]",
default=_default_location_id, required=True, ondelete='cascade')
location_out_id = fields.Many2one(
'stock.location', 'Store to', check_company=True,
domain="[('id', 'child_of', location_in_id), ('id', '!=', location_in_id), '|', ('company_id', '=', False), ('company_id', '=', company_id)]",
required=True, ondelete='cascade')
sequence = fields.Integer('Priority', help="Give to the more specialized category, a higher priority to have them in top of the list.")
company_id = fields.Many2one(
'res.company', 'Company', required=True,
default=lambda s: s.env.company.id, index=True)
@api.onchange('location_in_id')
def _onchange_location_in(self):
if self.location_out_id:
child_location_count = self.env['stock.location'].search_count([
('id', '=', self.location_out_id.id),
('id', 'child_of', self.location_in_id.id),
('id', '!=', self.location_in_id.id),
])
if not child_location_count:
self.location_out_id = None
def write(self, vals):
if 'company_id' in vals:
for rule in self:
if rule.company_id.id != vals['company_id']:
r | aise UserError(_("Changing the company of this record is forbidden at | this point, you should rather archive it and create a new one."))
return super(StockPutawayRule, self).write(vals)
|
timm/timmnix | pypy3-v5.5.0-linux64/lib-python/3/email/encoders.py | Python | mit | 2,185 | 0.001831 | # Copyright (C) 2001-2006 Python Software Foundation
# Author: Barry Warsaw
# Contact: email-sig@python.org
"""Encodings and related functions."""
__all__ = [
'encode_7or8bit',
'encode_base64',
'encode_noop',
'encode_quopri',
]
from base64 import encodebytes as _bencode
from quopri import encodestring as _encodestring
def _qencode(s):
enc = _encodestring(s, quotetabs=True)
# Must encode spaces, which quopri.encodestring() doesn't do
return enc.replace(b' ', b'=20')
def encode_base64(msg):
"""Encode the message's payload in Base64.
Also, add an appropriate Content-Transfer-Encoding header.
"""
orig = msg.get_payload(decode=True)
encdata = str(_bencode(orig), 'ascii')
msg.set_payload(encdata)
msg['Content-Transfer-Encoding'] = 'base64'
def encode_quopri(msg):
"""Encode the message's payload in quoted-printable.
Also, add an appropriate Content-Transfer-Encoding header.
"""
orig = msg.get_payload(decode=True)
encdata = _qencode(orig)
msg.set_payload(encdata)
msg['Content-Transfer-Encoding'] = 'quoted-printable'
def encode_7or8bit(msg):
"""Set the Content-Transfer-Encoding header to 7bit or 8bit."""
orig = msg.get_payload(decode=True)
if orig is None:
# There's no payload. For backwards compatibility we use 7bit
msg['Content-Transfer-Encoding'] = '7bit'
return
# We play a trick to make this go fast. If encoding/decode to ASCII
# succeeds, we know the data mu | st be 7bit, otherwise treat it as 8bit.
try:
if isinstance(orig, str):
orig.encode('ascii')
else:
orig.decode('ascii')
except UnicodeError:
charset = msg.get_charset()
output_cset = charset and chars | et.output_charset
# iso-2022-* is non-ASCII but encodes to a 7-bit representation
if output_cset and output_cset.lower().startswith('iso-2022-'):
msg['Content-Transfer-Encoding'] = '7bit'
else:
msg['Content-Transfer-Encoding'] = '8bit'
else:
msg['Content-Transfer-Encoding'] = '7bit'
def encode_noop(msg):
"""Do nothing."""
|
skymyyang/YouDaoWord | MysqlHelper.py | Python | gpl-2.0 | 913 | 0.040794 | #encoding=utf-8
import pymysql
import json
class MysqlHelper:
"""mysql 帮助类"""
@staticmethod
def insert(word,asymbol,esymbol,explain,cizu,liju,xian | gguancihui,aspoken,espoken):
db=pymysql.connect(host="192.168.180.187",user="root",password="123456",db="lytest",charset="utf8")
cursor=db.cursor()
print(word.encode("utf8"))
print("--------------------------------insert into mysql db")
cursor.execute("insert into mfg_t_wordtest (f_word,f_asymbol,f_esymbol,f_explain,f_cizu,f_liju,f_xiangguancihui,f_aspoken,f_espoken,f_biaoji,f_type) values(%s,%s, | %s,%s,%s,%s,%s,%s,%s,0,0)",(word,asymbol,esymbol,"{"+json.dumps(explain,ensure_ascii=False,indent=2)+"}",json.dumps(cizu,ensure_ascii=False,indent=2),json.dumps(liju,ensure_ascii=False,indent=2),json.dumps(xiangguancihui,ensure_ascii=False,indent=2),aspoken,espoken))
db.commit()
db.close()
|
Akson/RemoteConsolePlus3 | RemoteConsolePlus3/RCP3/DefaultParser.py | Python | lgpl-3.0 | 2,359 | 0.009326 | #Created by Dmytro Konobrytskyi, 2013 (github.com/Akson)
import logging
import json
import struct
import numpy as np
def ParseBinaryData(binaryData, binaryDataFormat, dimensions):
elementSize = struct.calcsize(binaryDataFormat)
elementsNumber = len(binaryData) / elementSize
#Single element case
if elementsNumber == 1:
return struct.unpack(binaryDataFormat, binaryData)[0]
#It looks like we have an array, parse it with NumPy
if dimensions == None:
return np.frombuffer(binaryData, binaryDataFormat)
#And it is actually a multi-dimensional array
return np.ndarray(shape=dimensions, dtype=binaryDataFormat, buffer=binaryData)
def ParseDimensionsString(dimensionsString):
dimensionsString = dimensionsString.lower()
dimensionsString = dimensionsString.replace("(", "")
dimensionsString = dimensionsString.replace(")", "")
dimensionsString = dimensionsString.replace("[", "")
dimensionsString = dimensionsString.replace("]", "")
dimensionsString = dimensionsString.replace(" ", "")
dimensionsString = dimensionsString.replace("x", ",")
dimensionsString = dimensionsString.replace(";", ",")
dimensions = [int(ds) for ds in dimensionsString.split(",")]
return dimensions
def ParseMessage(message):
processedMessage = dict()
processedMessage["Stream"] = message["Stream"]
processedMessage["Info"] = message["Info"]
#Parse d | ata based on format. String is a default format
dataType = message["Info"].get("DataType", "String")
if dataType == "String":
processedMessage["Data"] = message["Data"]
if dataType == "JSON":
jsonObj = json.loads(message["Data"])
processedMessage["Data"] = jsonObj.get("_Value", jsonObj)
if dataType == "Binary":
if not "BinaryDataFormat" in | message["Info"]:
logging.warning("Cannot parse binary data, no format data available")
return None
binaryDataFormat = message["Info"]["BinaryDataFormat"]
#We may have multi-dimensional data
dimensions = None
if "Dimensions" in message["Info"]:
dimensions = ParseDimensionsString(message["Info"]["Dimensions"])
processedMessage["Data"] = ParseBinaryData(message["Data"], binaryDataFormat, dimensions)
return processedMessage
|
jgillis/casadi | test/python/mx.py | Python | lgpl-3.0 | 61,968 | 0.049832 | #
# This file is part of CasADi.
#
# CasADi -- A symbolic framework for dynamic optimization.
# Copyright (C) 2010 by Joel Andersson, Moritz Diehl, K.U.Leuven. All rights reserved.
#
# CasADi is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3 of the License, or (at your option) any later version.
#
# CasADi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with CasADi; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
#
from casadi import *
import casadi as c
from numpy import *
import unittest
from types import *
from helpers import *
from copy import deepcopy
scipy_available = True
try:
from scipy.sparse import csr_matrix
except:
scipy_available = False
def checkarray(self,zr,zt,name):
if len(zr.shape)==1 and (zt.shape[0]==1 or zt.shape[1]==1) and zr.shape[0]==zt.shape[1]*zt.shape[0]:
zr=reshape(zr,(zt.shape));
self.assertEqual(zt.shape[0],zr.shape[0],"%s dimension error. Got %s, expected %s" % (name,str(zt.shape),str(zr.shape)))
self.assertEqual(len(zt.shape),len(zr.shape),"%s dimension error. Got %s, expected %s" % (name,str(zt.shape),str(zr.shape)))
self.assertEqual(zt.shape[1],zr.shape[1],"%s dimension error. Got %s, expected %s" % (name,str(zt.shape),str(zr.shape)))
for i in range(zr.shape[0]):
for j in range(zr.shape[1]):
self.assertAlmostEqual(zt[i,j],zr[i,j],10,"%s evaluation error. %s <-> %s" % (name, str(zt),str(zr)))
def checkMXoperations(self,ztf,zrf,name):
x = MX("x",1,3)
z=vertcat([x*(i+1) for i in range(8)])
f = MXFunction([x],[ztf(z)])
f.init()
L=[1,2,3]
f.setInput(L,0)
f.evaluate()
zt = f.output(0).toArray()
zr = array([[L[0]*(i+1),L[1]*(i+1),L[2]*(i+1)] for i in range(8)])
checkarray(self,zrf(zr),zt,name)
return (zt,zrf(zr))
def checkMXoperations2(self,ztf,zrf,name):
x = MX("x",3,1)
z = horzcat([x*i for i in range(8)])
f = MXFunction([x],[ztf(z)])
f.init()
L=[1,2,3]
f.setInput(L,0)
f.evaluate()
zt = f.output(0).toArray()
zr = array([[L[0]*i,L[1]*i,L[2]*i] for i in range(8)]).T
checkarray(self,zrf(zr),zt,name)
return zt
def checkMXoperations3(self,ztf,zrf,name):
x | = MX("x",3,1)
p = horzcat([x[0,0],x[1,0],x[2,0]])
z = vertcat([p*i for i in range(8)])
f = MXFunction([x],[ztf(z)])
f.init()
L=[1,2,3]
f.setInput(L,0)
f.evaluate()
zt = f.output(0).toArray()
zr = array([[L[0]*i,L[1]*i,L[2]*i] for i in range(8)])
checkarray(self,zrf(zr),zt,name)
return (zt,zrf(zr))
class MXtests( | casadiTestCase):
def setUp(self):
self.pool=FunctionPool()
self.pool.append(lambda x: sqrt(x[0]),sqrt,"sqrt")
self.pool.append(lambda x: sin(x[0]),sin,"sin")
self.pool.append(lambda x: cos(x[0]),cos,"cos")
self.pool.append(lambda x: tan(x[0]),tan,"tan")
self.pool.append(lambda x: arctan(x[0]),arctan,"arctan")
self.pool.append(lambda x: arcsin(x[0]),arcsin,"arcsin")
self.pool.append(lambda x: arccos(x[0]),arccos,"arccos")
self.pool.append(lambda x: exp(x[0]),exp,"exp")
self.pool.append(lambda x: log(x[0]),log,"log")
self.pool.append(lambda x: x[0]**0,lambda x : x**0,"x^0",flags={'nozero'})
self.pool.append(lambda x: x[0]**1,lambda x : x**1,"^1")
self.pool.append(lambda x: x[0]**(-2),lambda x : x**(-2),"^-2",flags={'nozero'})
self.pool.append(lambda x: x[0]**(0.3),lambda x : x**(0.3),"^0.3")
self.pool.append(lambda x: floor(x[0]),floor,"floor")
self.pool.append(lambda x: ceil(x[0]),ceil,"ceil")
self.Jpool=FunctionPool()
self.Jpool.append(lambda x: sqrt(x[0]),lambda x:diag(1/(2.0*sqrt(x))),"sqrt")
self.Jpool.append(lambda x: sin(x[0]),lambda x:diag(cos(x)),"sin")
self.Jpool.append(lambda x: cos(x[0]),lambda x:diag(-sin(x)),"cos")
self.Jpool.append(lambda x: tan(x[0]),lambda x:diag(1.0/cos(x)**2),"tan")
self.Jpool.append(lambda x: arctan(x[0]),lambda x:diag( 1.0/(x**2+1)),"arctan")
self.Jpool.append(lambda x: arcsin(x[0]),lambda x:diag( 1.0/sqrt(1-x**2)),"arcsin")
self.Jpool.append(lambda x: arccos(x[0]),lambda x: diag(-1.0/sqrt(1-x**2)),"arccos")
self.Jpool.append(lambda x: exp(x[0]),lambda x: diag(exp(x)),"exp")
self.Jpool.append(lambda x: log(x[0]),lambda x: diag(1.0/x),"log")
self.Jpool.append(lambda x: x[0]**0,lambda x :diag(zeros(x.shape)),"x^0")
self.Jpool.append(lambda x: x[0]**1,lambda x : diag(ones(x.shape)),"^1")
self.Jpool.append(lambda x: x[0]**(-2),lambda x : diag(-2.0/x**3),"^-2")
self.Jpool.append(lambda x: x[0]**(0.3),lambda x :diag( 0.3/x**0.7),"^0.3")
self.matrixpool=FunctionPool()
#self.matrixpool.append(lambda x: norm_2(x[0]),linalg.norm,"norm_2")
#self.matrixpool.append(lambda x: norm_1(x[0]),lambda x: sum(sum(abs(x))),"norm_1")
#self.matrixpool.append(lambda x: norm_inf(x[0]),lambda x: abs(matrix(x)).max(),"norm_inf")
self.matrixbinarypool=FunctionPool()
self.matrixbinarypool.append(lambda a: a[0]+a[1],lambda a: a[0]+a[1],"Matrix+Matrix")
self.matrixbinarypool.append(lambda a: a[0]-a[1],lambda a: a[0]-a[1],"Matrix-Matrix")
self.matrixbinarypool.append(lambda a: a[0]*a[1],lambda a: a[0]*a[1],"Matrix*Matrix")
#self.matrixbinarypool.append(lambda a: inner_mul(a[0],trans(a[1])),lambda a: dot(a[0].T,a[1]),name="inner_mul(Matrix,Matrix)")
self.matrixbinarypool.append(lambda a: mul(a[0],trans(a[1])),lambda a: dot(a[0],a[1].T),"mul(Matrix,Matrix.T)")
def test_indirection(self):
self.message("MXFunction indirection")
x=MX("x",2,1)
y=MX("y",2,1)
z=MX("z",2,1)
xn = array([2.3,1.3])
yn = array([7.3,4.6])
zn = array([12,7.4])
f=MXFunction([x,y,z],[x+2*y+3*z])
f.init()
self.message(":simple indirection")
g=MXFunction([x,y,z],f.call([x,y,z]))
g.init()
g.input(0).set(xn)
g.input(1).set(yn)
g.input(2).set(zn)
g.fwdSeed(0).set(xn) # okay, I'm just lazy comming up with more numbers
g.fwdSeed(1).set(yn)
g.fwdSeed(2).set(zn)
g.evaluate(1,0)
self.checkarray(g.output(),xn+2*yn+3*zn,"MXFunction indirection");
self.checkarray(g.fwdSens(),array([52.9,32.7]),"MXFunction indirection");
g=MXFunction([x,y,z],f.call([vertcat([x[0],x[1]]),y,z]))
g.init()
g.input(0).set(xn)
g.input(1).set(yn)
g.input(2).set(zn)
g.fwdSeed(0).set(xn)
g.fwdSeed(1).set(yn)
g.fwdSeed(2).set(zn)
g.evaluate(1,0)
self.checkarray(g.output(),xn+2*yn+3*zn,"MXFunction indirection");
self.checkarray(g.fwdSens(),array([52.9,32.7]),"MXFunction indirection");
self.message(":double output flipover")
h=MXFunction([x,y,z],f.call([vertcat([y[0],x[1]]),vertcat([x[0],y[1]]),z]))
h.init()
h=MXFunction([x,y,z],h.call([vertcat([y[0],x[1]]),vertcat([x[0],y[1]]),z]))
# h should be identical to g now
h.init()
h.input(0).set(xn)
h.input(1).set(yn)
h.input(2).set(zn)
h.fwdSeed(0).set(xn)
h.fwdSeed(1).set(yn)
h.fwdSeed(2).set(zn)
h.evaluate(1,0)
self.checkarray(h.output(),xn+2*yn+3*zn,"MXFunction indirection");
self.checkarray(h.fwdSens(),array([52.9,32.7]),"MXFunction indirection");
self.message(":double input flipover")
h=MXFunction([x,y,z],f.call([y,x,z]))
h.init()
h=MXFunction([x,y,z],h.call([y,x,z]))
h.init()
h.input(0).set(xn)
h.input(1).set(yn)
h.input(2).set(zn)
h.fwdSeed(0).set(xn)
h.fwdSeed(1).set(yn)
h.fwdSeed(2).set(zn)
h.evaluate(1,0)
self.checkarray(h.output(),xn+2*yn+3*zn,"MXFunction indirection");
self.checkarray(h.fwdSens(),array([52.9,32.7]),"MXFunction indirection");
return # uncomplete calls are not supported
self.message(":uncomple |
3DP-Unlimited/3DP-Printrun | printrun/gl/trackball.py | Python | gpl-3.0 | 2,746 | 0.004006 | #!/usr/bin/env python
# This file is part of the Printrun suite.
#
# Printrun is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Printrun is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Printrun. If not, see <http://www.gnu.org/licenses/>.
import math
from pyglet.gl import *
def cross(v1, v2):
return [v1[1]*v2[2]-v1[2]*v2[1], v1[2]*v2[0]-v1[0]*v2[2], v1[0]*v2[1]-v1[1]*v2[0]]
def trackball(p1x, p1y, p2x, p2y, r):
TRACKBALLSIZE = r
if p1x == p2x and p1y == p2y:
return [0.0, 0.0, 0.0, 1.0]
p1 = [p1x, p1y, project_to_sphere(TRACKBALLSIZE, p1x, p1y)]
p2 = [p2x, p2y, project_to_sphere(TRACKBALLSIZE, p2x, p2y)]
a = cross(p2, p1)
d = map(lambda x, y: x - y, p1, p2)
t = math.sqrt(sum(map(lambda x: x * x, d))) / (2.0 * TRACKBALLSIZE)
if t > 1.0:
t = 1.0
if t < -1.0:
t = -1.0
phi = 2.0 * math.asin(t)
return axis_to_quat(a, phi)
def axis_to_quat(a, phi):
lena = math.sqrt(sum(map(lambda x: x * x, a)))
q = map(lambda x: x * (1 / lena), a)
q = map(lambda x: x * math.sin(phi / 2.0), q)
q.append(math.cos(phi / 2.0))
return q
def build_rotmatrix(q):
m = (GLdouble * 16)()
m[0] = 1.0 - 2.0 * (q[1] * q[1] + q[2] * q[2])
m[1] = 2.0 * (q[0] * q[1] - q[2] * q[3])
m[2] = 2.0 | * (q[2] * q[0] + q[1] * q[3])
m[3] = 0.0
m[4] = 2.0 * (q[0] * q[1] + q[2] * q[3])
m[5] = 1.0 - 2.0 * (q[2] * q[2] + q[0] * q[0])
m[6] = 2.0 * (q[1] * q[2] - q[0] * q[3])
m[7] = 0.0
m[8] = 2.0 * (q[2] * q[0] - q[1] * q[3])
m[9] = 2.0 * (q[1] * q[2] + q[0] * q[3])
m[10 | ] = 1.0 - 2.0 * (q[1] * q[1] + q[0] * q[0])
m[11] = 0.0
m[12] = 0.0
m[13] = 0.0
m[14] = 0.0
m[15] = 1.0
return m
def project_to_sphere(r, x, y):
d = math.sqrt(x * x + y * y)
if (d < r * 0.70710678118654752440):
return math.sqrt(r * r - d * d)
else:
t = r / 1.41421356237309504880
return t * t / d
def mulquat(q1, rq):
return [q1[3] * rq[0] + q1[0] * rq[3] + q1[1] * rq[2] - q1[2] * rq[1],
q1[3] * rq[1] + q1[1] * rq[3] + q1[2] * rq[0] - q1[0] * rq[2],
q1[3] * rq[2] + q1[2] * rq[3] + q1[0] * rq[1] - q1[1] * rq[0],
q1[3] * rq[3] - q1[0] * rq[0] - q1[1] * rq[1] - q1[2] * rq[2]]
|
jstraub/bnp | python/testLibhdp.py | Python | mit | 163 | 0.03681 | #! /usr/bin/env python
import numpy | as np
import libhdp as bnp
A=np.zeros((4,4), dtype=n | p.uint32)
Aa=np.zeros(34)
Dir=bnp.Dir(Aa)
bnp.HDP_Dir(Dir,10.0,10.0)
|
acostasg/upload | upload/tests/shared/test_open_file.py | Python | gpl-3.0 | 642 | 0.003115 | import unittest
import upload.injectionContainer as injectionContainer
from u | pload.strategy.dummys.injectedContainerDummy import ContainerMock
class TestRequestParams(unittest.TestCase):
"""
Class test for request params class
"""
def test_open_file_error(self):
"""
test case secured upload
"""
injectionContainer.Container.update(
ContainerMock().container()
)
| from upload.shared \
import open_file
with self.assertRaises(FileNotFoundError):
open_file.execute('FailedTest', 'r')
if __name__ == '__main__':
unittest.main()
|
scholer/cadnano2.5 | cadnano/views/gridview/prexovermanager.py | Python | mit | 9,797 | 0.001633 | """Summary
"""
from PyQt5.QtWidgets import QGraphicsRectItem
from . import gridstyles as styles
from .gridextras import PreXoverItemGroup, WEDGE_RECT
from . import (
GridNucleicAcidPartItemT,
GridVirtualHelixItemT
)
_RADIUS = styles.GRID_HELIX_RADIUS
class PreXoverManager(QGraphicsRectItem):
"""Summary
Attributes:
active_group (TYPE): Description
active_neighbor_group (TYPE): Description
groups (dict): Description
neighbor_pairs (tuple): Description
neighbor_prexover_items (dict): Description
part_item (TYPE): Description
prexover_item_map (dict): Description
virtual_helix_item (cadnano.views.gridview.virtualhelixitem.VirtualHelixItem): Description
"""
def __init__(self, part_item: GridNucleicAcidPartItemT):
"""Summary
Args:
part_item (TYPE): Description
"""
super(PreXoverManager, self).__init__(part_item)
self.part_item = part_item
self.virtual_helix_item = None
self.active_group = None
self.active_neighbor_group = None
self.groups = {}
# dictionary of tuple of a
# (PreXoverItemGroup, PreXoverItemGroup, List[PreXoverItem])
# tracks connections between prexovers
self.prexover_item_map = {}
self.neighbor_prexover_items = {} # just a dictionary of neighbors
self.neighbor_pairs = () # accounting for neighbor pairing
self._active_items = []
# end def
def __repr__(self):
return "<{}>".format(self.__class__.__name__)
def partItem(self) -> GridNucleicAcidPartItemT:
"""
Returns:
The part item
"""
return self.part_item
# end def
def destroyItem(self):
print("destroying Grid PreXoverManager")
self.deactivateNeighbors()
self.clearPreXoverItemGroups()
self.neighbor_pairs = None
self.part_item = None
self.virtual_helix_item = None
self.scene().removeItem(self)
# end def
def clearPreXoverItemGroups(self):
"""Summary
Returns:
TYPE: Description
"""
groups = self.groups
while groups:
k, item = groups.popitem()
item.destroyItem()
if self.active_group is not None:
self.active_group.destroyItem()
self.active_group = None
self._active_items = []
self.prexover_item_map = {}
self.neighbor_prexover_items = {}
if self.virtual_helix_item is not None:
self.virtual_helix_item.setZValue(styles.ZGRIDHELIX)
# end def
def hideGroups(self):
"""Summary
Returns:
TYPE: Description
"""
self.clearPreXoverItemGroups()
if self.active_group is not None:
self.active_group.hide()
for group in self.groups.values():
group.hide()
self.virtual_helix_item = None
# end def
def activateVirtualHelix(self, virtual_helix_item: GridVirtualHelixItemT,
idx: int,
per_neighbor_hits,
pairs):
"""Create PreXoverItemGroups for the active virtual_helix_item and its
neighbors and connect the neighboring bases
Args:
virtual_helix_item: Descriptio | n
idx: the | base index within the virtual helix
per_neighbor_hits: Description
pairs: Description
"""
self.clearPreXoverItemGroups()
pxis = self.prexover_item_map
neighbor_pxis_dict = self.neighbor_prexover_items # for avoiding duplicates)
self.neighbor_pairs = pairs
self.virtual_helix_item = virtual_helix_item
part_item = self.part_item
groups = self.groups
self.active_group = agroup = PreXoverItemGroup(_RADIUS, WEDGE_RECT,
virtual_helix_item, True)
id_num = virtual_helix_item.idNum()
virtual_helix_item.setZValue(styles.ZGRIDHELIX + 10)
fwd_st_type, rev_st_type = True, False # for clarity in the call to constructors
for neighbor_id, hits in per_neighbor_hits.items():
nvhi = part_item.idToVirtualHelixItem(neighbor_id)
ngroup = PreXoverItemGroup(_RADIUS, WEDGE_RECT, nvhi, False)
groups[neighbor_id] = ngroup
fwd_axis_hits, rev_axis_hits = hits
# n_step_size = nvhi.getProperty('bases_per_repeat')
for idx, fwd_idxs, rev_idxs in fwd_axis_hits:
neighbor_pxis = []
# print((id_num, fwd_st_type, idx))
pxis[(id_num, fwd_st_type, idx)] = (agroup.getItemIdx(fwd_st_type, idx),
ngroup,
neighbor_pxis
)
for j in fwd_idxs:
nkey = (neighbor_id, fwd_st_type, j)
npxi = neighbor_pxis_dict.get(nkey)
if npxi is None:
npxi = ngroup.getItemIdx(fwd_st_type, j)
neighbor_pxis_dict[nkey] = npxi
neighbor_pxis.append(npxi)
for j in rev_idxs:
nkey = (neighbor_id, rev_st_type, j)
npxi = neighbor_pxis_dict.get(nkey)
if npxi is None:
npxi = ngroup.getItemIdx(rev_st_type, j)
neighbor_pxis_dict[nkey] = npxi
neighbor_pxis.append(npxi)
for idx, fwd_idxs, rev_idxs in rev_axis_hits:
neighbor_pxis = []
# print((id_num, rev_st_type, idx))
pxis[(id_num, rev_st_type, idx)] = (agroup.getItemIdx(rev_st_type, idx),
ngroup,
neighbor_pxis
)
for j in fwd_idxs:
nkey = (neighbor_id, fwd_st_type, j)
npxi = neighbor_pxis_dict.get(nkey)
if npxi is None:
npxi = ngroup.getItemIdx(fwd_st_type, j)
neighbor_pxis_dict[nkey] = npxi
neighbor_pxis.append(npxi)
for j in rev_idxs:
nkey = (neighbor_id, rev_st_type, j)
npxi = neighbor_pxis_dict.get(nkey)
if npxi is None:
npxi = ngroup.getItemIdx(rev_st_type, j)
neighbor_pxis_dict[nkey] = npxi
neighbor_pxis.append(npxi)
# end for per_neighbor_hits
# end def
def activateNeighbors(self, id_num: int, is_fwd: bool, idx: int):
"""Summary
Args:
id_num: VirtualHelix ID number. See `NucleicAcidPart` for description and related methods.
is_fwd: ``True`` if ``fwd`` (top) strand, ``False`` if ``rev`` (bottom) strand
idx: the base index within the virtual helix
Returns:
TYPE: Description
Raises:
ValueError: Description
"""
# print("ACTIVATING neighbors", id_num, idx)
if self.active_group is None:
return
agroup = self.active_group
if id_num != agroup.id_num:
raise ValueError("not active id_num {} != {}".format(id_num,
agroup.id_num))
active_items = self._active_items
item = self.prexover_item_map.get((id_num, is_fwd, idx))
if item is None:
apxi = agroup.getItemIdx(is_fwd, idx)
apxi.setActive5p(True) if is_fwd else apxi.setActive3p(True)
agroup.active_wedge_gizmo.pointToPreXoverItem(apxi)
active_items.append(apxi)
else:
apxi, npxig, neighbor_list = item
pairs = self.neighbor_pairs[0] if is_fwd else self.neighbor_pairs[1] |
OrDuan/cool_commits | cool_commits/api.py | Python | apache-2.0 | 368 | 0 | from cool_commits.parsers import all_parsers
from cool_commits.utils imp | ort git_all_commits
def find(path):
commits_list = git_all_commits(path)
for parser in all_parsers:
yield str(parser(commits_list, path))
def info(path):
commits_list = git_all_commits(path)
for parser in all_parsers:
| yield parser(commits_list, path).info()
|
CtheSky/pycparser | tests/test_c_generator.py | Python | bsd-3-clause | 7,444 | 0.000672 | import sys
import unittest
# Run from the root dir
sys.path.insert(0, '.')
from pycparser import c_parser, c_generator, c_ast
_c_parser = c_parser.CParser(
lex_optimize=False,
yacc_debug=True,
yacc_optimize=False,
yacctab='yacctab')
def compare_asts(ast1, ast2):
if type(ast1) != type(ast2):
return False
if isinstance(ast1, tuple) and isinstance(ast2, tuple):
if ast1[0] != ast2[0]:
return False
ast1 = ast1[1]
ast2 = ast2[1]
return compare_asts(ast1, ast2)
for attr in ast1.attr_names:
if getattr(ast1, attr) != getattr(ast2, attr):
return False
for i, c1 in enumerate(ast1.children()):
if compare_asts(c1, ast2.children()[i]) == False:
return False
return True
def parse_to_ast(src):
return _c_parser.parse(src)
class TestFunctionDeclGeneration(unittest.TestCase):
class _FuncDeclVisitor(c_ast.NodeVisitor):
def __init__(self):
self.stubs = []
def visit_FuncDecl(self, node):
gen = c_generator.CGenerator()
self.stubs.append(gen.visit(node))
def test_partial_funcdecl_generation(self):
src = r'''
void noop(void);
void *something(void *thing);
int add(int x, int y);'''
ast = parse_to_ast(src)
v = TestFunctionDeclGeneration._FuncDeclVisitor()
v.visit(ast)
self.assertEqual(len(v.stubs), 3)
self.assertTrue(r'void noop(void)' in v.stubs)
self.assertTrue(r'void *something(void *thing)' in v.stubs)
self.assertTrue(r'int add(int x, int y)' in v.stubs)
class TestCtoC(unittest.TestCase):
def _run_c_to_c(self, src):
ast = parse_to_ast(src)
generator = c_generator.CGenerator()
return generator.visit(ast)
def _assert_ctoc_correct(self, src):
""" Checks that the c2c translation was correct by parsing the code
generated by c2c for src and comparing the AST with the original
AST.
"""
src2 = self._run_c_to_c(src)
self.assertTrue(compare_asts(parse_to_ast(src), parse_to_ast(src2)),
src2)
def test_trivial_decls(self):
self._assert_ctoc_correct('int a;')
self._assert_ctoc_correct('int b, a;')
self._assert_ctoc_correct('int c, b, a;')
def test_complex_decls(self):
self._assert_ctoc_correct('int** (*a)(void);')
self._assert_ctoc_correct('int** (*a)(void*, int);')
self._assert_ctoc_correct('int (*b)(char * restrict k, float);')
self._assert_ctoc_correct('int test(const char* const* arg);')
self._assert_ctoc_correct('int test(const char** const arg);')
#s = 'int test(const char* const* arg);'
#parse_to_ast(s).show()
def test_ternary(self):
self._assert_ctoc_correct('''
int main(void)
{
int a, b;
(a == 0) ? (b = 1) : (b = 2);
}''')
def test_casts(self):
self._assert_ctoc_correct(r'''
int main() {
int b = (int) f;
int c = (int*) f;
}''')
def test_initlist(self):
self._assert_ctoc_correct('int arr[] = {1, 2, 3};')
def test_exprs(self):
self._assert_ctoc_correct('''
int main(void)
{
int a;
int b = a++;
int c = ++a;
int d = a--;
int e = --a;
}''')
def test_statements(self):
# note two minuses here
self._assert_ctoc_correct(r'''
int main() {
int a;
a = 5;
;
b = - - a;
return a;
}''')
def test_casts(self):
self._assert_ctoc_correct(r'''
int main() {
int a = (int) b + 8;
int t = (int) c;
}
''')
def test_struct_decl(self):
self._assert_ctoc_correct(r'''
typedef struct node_t {
struct node_t* next;
int data;
} node;
''')
def test_krstyle(self):
self._assert_ctoc_correct(r'''
int main(argc, argv)
int argc;
char** argv;
{
return 0;
}
''')
def test_switchcase(self):
self._assert_ctoc_correct(r'''
int main() {
switch (myvar) {
case 10:
{
k = 10;
p = k + 1;
break;
}
case 20:
case 30:
return 20;
default:
break;
}
}
''')
def test_nest_initializer_list(self):
self._assert_ctoc_correct(r'''
int main()
{
int i[ | 1][1] = { { 1 } };
}''')
def test_nest_named_initializer(self):
self._assert_ctoc_correct(r'''struct test
{
int i;
struct test_i_t
{
int k;
} test_i;
int j;
};
| struct test test_var = {.i = 0, .test_i = {.k = 1}, .j = 2};
''')
def test_expr_list_in_initializer_list(self):
self._assert_ctoc_correct(r'''
int main()
{
int i[1] = { (1, 2) };
}''')
def test_issue36(self):
self._assert_ctoc_correct(r'''
int main() {
}''')
def test_issue37(self):
self._assert_ctoc_correct(r'''
int main(void)
{
unsigned size;
size = sizeof(size);
return 0;
}''')
def test_issue83(self):
self._assert_ctoc_correct(r'''
void x(void) {
int i = (9, k);
}
''')
def test_issue84(self):
self._assert_ctoc_correct(r'''
void x(void) {
for (int i = 0;;)
i;
}
''')
def test_exprlist_with_semi(self):
self._assert_ctoc_correct(r'''
void x() {
if (i < j)
tmp = C[i], C[i] = C[j], C[j] = tmp;
if (i <= j)
i++, j--;
}
''')
def test_exprlist_with_subexprlist(self):
self._assert_ctoc_correct(r'''
void x() {
(a = b, (b = c, c = a));
}
''')
def test_comma_operator_funcarg(self):
self._assert_ctoc_correct(r'''
void f(int x) { return x; }
int main(void) { f((1, 2)); return 0; }
''')
def test_comma_op_in_ternary(self):
self._assert_ctoc_correct(r'''
void f() {
(0, 0) ? (0, 0) : (0, 0);
}
''')
def test_comma_op_assignment(self):
self._assert_ctoc_correct(r'''
void f() {
i = (a, b, c);
}
''')
def test_pragma(self):
self._assert_ctoc_correct(r'''
#pragma foo
void f() {
#pragma bar
i = (a, b, c);
}
''')
if __name__ == "__main__":
unittest.main()
|
espdev/readthedocs.org | readthedocs/builds/views.py | Python | mit | 2,216 | 0.002256 | import logging
from django.shortcuts import get_object_or_404
from django.views.generic import ListView, DetailView
from django.http import HttpResponsePermanentRedirect
from django.conf import settings
from django.core.urlresolve | rs import reverse
from readthedocs.builds.models import Build, Version
from readthedocs.builds.filters import BuildFilter
from readthedocs.projects.models import Project
from redis import Redis, ConnectionError
log = logging.getLogger(__name__)
|
class BuildBase(object):
model = Build
def get_queryset(self):
self.project_slug = self.kwargs.get('project_slug', None)
self.project = get_object_or_404(
Project.objects.protected(self.request.user),
slug=self.project_slug
)
queryset = Build.objects.public(user=self.request.user, project=self.project)
return queryset
class BuildList(BuildBase, ListView):
def get_context_data(self, **kwargs):
context = super(BuildList, self).get_context_data(**kwargs)
filter = BuildFilter(self.request.GET, queryset=self.get_queryset())
active_builds = self.get_queryset().exclude(state="finished").values('id')
context['project'] = self.project
context['filter'] = filter
context['active_builds'] = active_builds
context['versions'] = Version.objects.public(user=self.request.user, project=self.project)
try:
redis = Redis.from_url(settings.BROKER_URL)
context['queue_length'] = redis.llen('celery')
except ConnectionError:
context['queue_length'] = None
return context
class BuildDetail(BuildBase, DetailView):
pk_url_kwarg = 'build_pk'
def get_context_data(self, **kwargs):
context = super(BuildDetail, self).get_context_data(**kwargs)
context['project'] = self.project
return context
# Old build view redirects
def builds_redirect_list(request, project_slug):
return HttpResponsePermanentRedirect(reverse('builds_project_list', args=[project_slug]))
def builds_redirect_detail(request, project_slug, pk):
return HttpResponsePermanentRedirect(reverse('builds_detail', args=[project_slug, pk]))
|
mupif/mupif | examples/Example02-distrib/application2.py | Python | lgpl-3.0 | 3,766 | 0.002655 | import sys
import Pyro5
import logging
sys.path.extend(['..', '../..'])
import mupif as mp
log = logging.getLogger()
@Pyro5.api.expose
class Application2(mp.Model):
"""
Simple application that computes an arithmetical average of mapped property
"""
def __init__(self, metadata={}):
MD = {
'Name': 'Simple application cummulating time steps',
'ID': 'N/A',
'Description': 'Cummulates time steps',
'Version_date': '02/2019',
'Physics': {
'Type': 'Other',
'Entity': 'Other'
},
'Solver': {
'Software': 'Python script',
'Language': 'Python3',
'License': 'LGPL',
'Creator': 'Borek',
'Version_date': '02/2019',
'Type': 'Summator',
'Documentation': 'Nowhere',
'Estim_time_step_s': 1,
'Estim_comp_time_s': 0.01,
'Estim_execution_cost_EUR': 0.01,
'Estim_person | nel_cost_EUR': 0.01,
'Required_expertise': 'None',
'Accuracy': 'High',
'Sensitivity': 'High',
'Complexity': 'Low',
'Robustness': 'High'
},
'Inputs': [
{'Type': 'mupif.Property', 'Type_ID': 'mupif.DataID.PID_Time_step', 'Name': 'Time step',
'Description': 'Time step', 'Units': 's',
'Origin': 'Simulated', 'Required' | : True, "Set_at": "timestep", "ValueType": "Scalar"}],
'Outputs': [
{'Type': 'mupif.Property', 'Type_ID': 'mupif.DataID.PID_Time', 'Name': 'Cummulative time',
'Description': 'Cummulative time', 'Units': 's', 'Origin': 'Simulated', "ValueType": "Scalar"}]
}
super().__init__(metadata=MD)
self.updateMetadata(metadata)
self.value = 0.0
self.count = 0.0
self.contrib = mp.ConstantProperty(
value=0., propID=mp.DataID.PID_Time, valueType=mp.ValueType.Scalar, unit=mp.U.s, time=0*mp.U.s)
def initialize(self, workdir='', metadata={}, validateMetaData=True, **kwargs):
# import pprint.prrint
# pprint(self.metadata)
# sys.exit(1)
super().initialize(workdir=workdir, metadata=metadata, validateMetaData=validateMetaData, **kwargs)
def get(self, objectTypeID, time=None, objectID=""):
md = {
'Execution': {
'ID': self.getMetadata('Execution.ID'),
'Use_case_ID': self.getMetadata('Execution.Use_case_ID'),
'Task_ID': self.getMetadata('Execution.Task_ID')
}
}
if objectTypeID == mp.DataID.PID_Time:
return mp.ConstantProperty(
value=self.value, propID=mp.DataID.PID_Time, valueType=mp.ValueType.Scalar, unit=mp.U.s, time=time, metadata=md)
else:
raise mp.APIError('Unknown property ID')
def set(self, obj, objectID=""):
if obj.isInstance(mp.Property):
if obj.getPropertyID() == mp.DataID.PID_Time_step:
# remember the mapped value
self.contrib = obj
else:
raise mp.APIError('Unknown DataID')
def solveStep(self, tstep, stageID=0, runInBackground=False):
# here we actually accumulate the value using value of mapped property
self.value = self.value+self.contrib.inUnitsOf(mp.U.s).getValue(tstep.getTime())
self.count = self.count+1
def getCriticalTimeStep(self):
return 1.*mp.U.s
def getAssemblyTime(self, tstep):
return tstep.getTime()
def getApplicationSignature(self):
return "Application2"
|
aronysidoro/django-livesettings | live/keyedcache/models.py | Python | bsd-3-clause | 2,736 | 0.003289 | import keyedcache
import logging
log = logging.getLogger(__name__)
class CachedObjectMixin(object):
"""Provides basic object keyedcache for any objects using this as a mixin.
The class name of the object should be unambiguous.
"""
def cache_delete(self, *args, **kwargs):
key = self.cache_key(*args, **kwargs)
log.debug("clearing cache for %s", key)
keyedcache.cache_delete(key, children=True)
def cache_get(self, *args, **kwargs):
key = self.cache_key(*args, **kwargs)
return keyedcache.cache_get(key)
def cache_key(self, *args, **kwargs):
keys = [self.__class__.__name__, self]
keys.extend(args)
return keyedcache.cache_key(keys, **kwargs)
def cache_reset(self):
self.cache_delete()
self.cache_set()
def cache_set(self, *args, **kwargs):
val = kwargs.pop('value', self)
key = self.cache_key(*args, **kwargs)
keyedcache.cache_set(key, value=val)
def is_cached(self, *args, **kwargs):
return keyedcache.is_cached(self.cache_key(*args, **kwargs))
# Unused functions find_by_id, find_by_key, find_by_slug are coming from
# Satchmo but are currently unused also there.
def find_by_id(cls, groupkey, objectid, raises=False):
"""A helper function to look up an object by id"""
ob = None
try:
ob = keyedcache.cache_get(groupkey, objectid)
except keyedcache.NotCachedError as e:
try:
ob = cls.objects.get(pk=objectid)
keyedcache.cache_set(e.key, value=ob)
except cls.DoesNotExist:
log.debug("No such %s: %s", groupkey, objectid)
if raises:
raise cls.DoesNotExist
return ob
def find_by_key(cls, groupkey, key, raises=False):
"""A helper function to look up an object by key"""
ob = None
try:
ob = keyedcache.cache_get(groupkey, key)
except keyedcache.NotCachedError as e:
try:
ob = cls.objects.get(key__ex | act=key)
key | edcache.cache_set(e.key, value=ob)
except cls.DoesNotExist:
log.debug("No such %s: %s", groupkey, key)
if raises:
raise
return ob
def find_by_slug(cls, groupkey, slug, raises=False):
"""A helper function to look up an object by slug"""
ob = None
try:
ob = keyedcache.cache_get(groupkey, slug)
except keyedcache.NotCachedError as e:
try:
ob = cls.objects.get(slug__exact=slug)
keyedcache.cache_set(e.key, value=ob)
except cls.DoesNotExist:
log.debug("No such %s: %s", groupkey, slug)
if raises:
raise
return ob
|
beaker-project/beaker | Client/src/bkr/client/commands/cmd_job_cancel.py | Python | gpl-2.0 | 2,402 | 0.003747 | # -*- coding: utf-8 -*-
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
"""
bkr job-cancel: Cancel running Beaker jobs
==========================================
.. program:: bkr job-cancel
Synopsis
--------
:program:`bkr job-cancel` [--msg <message>] [*options*] <taskspec>...
Description
-----------
Specify one or more <taskspec> arguments to be cancelled.
The <taskspec> arguments follow the same format as in other :program:`bkr`
subcommands (for example, ``J:1234``). See :ref:`Specifying tasks <taskspec>`
in :manpage:`bkr(1)`.
Only jobs and recipe sets may be cancelled. It does not make sense to cancel
individual recipes within a recipe set, or tasks within a recipe, so Beaker
does not permit this.
Options
-------
.. option:: --msg <message>
Optionally you can provide a message describing the reason for the
cancellation. This message will be recorded against all outstanding tasks in
the cancelled recipe set, and will be visible in the Beaker web UI.
Common :program:`bkr` options are described in the :ref:`Options
<common-options>` section of :manpage:`bkr(1)`.
Exit status
-----------
Non-zero on error, otherwise zero.
Examples
--------
Cancel job 1234 with a helpful message::
bkr job-cancel --msg "Selected wrong distro, resubmitting job" J:1234
See also
--------
:manpage:`bkr(1)`
"""
from __future__ import print_function
from bkr.client import BeakerCommand
class Job_Cancel(BeakerCommand):
"""
Cancel Jobs/Recipes
"""
enabled = True
def options(self):
self.parser.add_option(
"--msg", |
default=None,
help="Optional message to record as to why you cancelled",
)
self.parser.usage = "%%prog %s [options] [J:<id> | RS:<id> ...]" % self.normalized_name
def run(self, *args, **kwargs):
if len(args) < 1:
self.parser.error('Please specify a taskspec to cancel')
self.check_taskspec_args( | args, permitted_types=['J', 'RS', 'T'])
msg = kwargs.pop("msg", None)
self.set_hub(**kwargs)
for task in args:
self.hub.taskactions.stop(task, 'cancel', msg)
print('Cancelled %s' % task)
|
GeorgiaTechDHLab/TOME | topics/migrations/0012_auto_20170605_1314.py | Python | bsd-3-clause | 707 | 0.001414 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-06-05 13:14
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('news', '0014_auto_20170605_1314'),
('topics', '0011_auto_20170605_0507'),
]
operations = [
migrations.RemoveField(
model_name='topic',
name='corpus',
),
migrations.AddField(
model_name='yeartopicrank',
| name='corpus',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='new | s.Corpus'),
),
]
|
demis001/bio_pieces | bio_pieces_old/sequence_concat.py | Python | gpl-2.0 | 10,368 | 0.016686 | #!/usr/bin/env python
##########################################################################
## Sequence Concat
## Author: Tyghe Vallard
## Release Date: 5/30/2012
## Version: 1.1
## Description:
## This script will merge all sequences with identical names in a
## file.
##
## Example:
## SampleFile1.fasta contents
## >identifier1
## AAAAAAAAAAAAAAAAAAAAAAAAA
## >identifier1
## TTTTTTTTTTTTTTTTTTTTTTTTT
## >identifier1
## GGGGGGGGGGGGGGGGGGGGGGGGG
## >identifier2
## CCCCCCCCCCCCCCCCCCCCCCCCC
## >identifier2
## AAAAAAAAAAAAAAAAAAAAAAAAA
##
## OUTPUT
## >identifier1
## AAAAAAAAAAAAAAAAAAAAAAAAATTTTTTTTTTTTTTTTTTTTTTTTTGGGGGGGGGGGGGGGGGGGGGGGGG
## >identifier2
## CCCCCCCCCCCCCCCCCCCCCCCCCAAAAAAAAAAAAAAAAAAAAAAAAA
##
## VERSION HISTORY
## -----------------
## v1.1 - 6/08/2012
## - Added options for doing gisaid formatted files
## - Added support for different formatted required_segments
##########################################################################
import os
from optparse import OptionParser,OptionGroup
import sys
import cStringIO
from bio_pieces import fasta
from bio_pieces.fasta import UnknownIdentifierLineException
class SequenceConcat:
_fasta_file_path = None
_strip_chars = ""
_parsed_fasta = None
def __init__( self, fasta_file, file_type = 'genbank', strip_chars = "-" ):
self._fasta_file_path = fasta_file
self._strip_chars = strip_chars
if file_type == 'genbank':
try:
self._read_genbank()
except UnknownIdentifierLineException, e:
print "An unknown identifier line was encountered in the fasta file. Is this a genbank file? If so use --type genbank"
print e
sys.exit( 1 )
elif file_type == 'gisaid':
try:
self._read_gisaid()
except UnknownIdentifierLineException, e:
print "An unknown identifier line was encountered in the fasta file. Is this a gisaid file? If so use --type gisaid"
print e
sys.exit( 1 )
def _read_genbank( self ):
""" Reads the genbank file into a easy to work with dictionary """
self._parsed_fasta = fasta.read_genbank_fasta( self._fasta_file_path, self._strip_chars )
def _read_gisaid( self ):
""" Reads the gisaid file into a easy to work with dictionary """
self._parsed_fasta = fasta.read_gisaid_fasta( self._fasta_file_path, self._strip_chars )
def prune_missing( self, fasta_dict, segments_expected = None ):
"""
Prunes the dictionary of sequences so that only the sequences that have segments_expected
amount of segments are included.
Parameters:
fasta_dict - Dictionary form of a fasta file from pyWRAIRLib.parser.fasta functions
segments_expected - List of segments expected in what order(I.E [1,2,3,4,5,6,7,8] or ['PB2', 'PB1', 'PA', 'HA', 'NP', 'NA', 'MP', 'NS'])
Note:
This is an inplace operation
+++ Unit Test +++
# Try an inhouse easy test
>>> path = os.path.dirname( __file__ )
>>> s = SequenceConcat( os.path.join( path, '../example_files/example1.txt' ), 'genbank' )
>>> fasta = s.get_sequences()
>>> pruned_fasta = s.prune_missing( fasta, range( 1, 4 ) )
>>> print pruned_fasta
{'ident4': {'1': 'AAAAAAAAAAAAAAAAA', '3': 'TTTTTTTTTTTTTTTTT'}, 'ident5': {'1': 'AAAAAAAAAAAAAAAAA'}, 'ident3': {'1': 'AAAAAAAAAAAAAAAAA', '2': 'CCCCCCCCCCCCCCCCC'}}
>>> fasta != pruned_fasta
True
>>> len( fasta ) == 2
True
>>> s = SequenceConcat( os.path.join( path, '../example_files/example2.txt' ), 'gisaid' )
>>> fasta = s.get_sequences()
>>> pruned_fasta = s.prune_missing( fasta, ['A','B','C'] )
>>> print pruned_fasta
{'ident4': {'A': 'AAAAAAAAAAAAAAAAA', 'B': 'TTTTTTTTTTTTTTTTT'}, 'ident5': {'A': 'AAAAAAAAAAAAAAAAA'}, 'ident3': {'C': 'CCCCCCCCCCCCCCCCC', 'B': 'AAAAAAAAAAAAAAAAA'}}
>>> fasta != pruned_fasta
True
>>> len( fasta ) == 2
True
"""
# We will return a dictionary that contains the sequences that have missing segments
segments_missing = {}
# Delete any of the sequences names from the dictionary that
# do not have the required amount of segments
for seq_name in fasta_dict.keys():
if len( fasta_dict[seq_name] ) != len( segments_expected ):
# Copy the sequence
segments_missing[seq_name] = fasta_dict[seq_name]
# Delete it
del fasta_dict[seq_name]
return segments_missing
def get_sequences( self ):
"""
Return unmodified fasta dictionary
+++ Unit Tests +++
>>> s = SequenceConcat( '../example_files/example1.txt', 'genbank' )
>>> len( s.get_sequences() ) > 1
True
"""
| return self._parsed_fasta
def get_merged_sequences( self, prune = True, segments_required = [1,2,3,4,5,6,7,8] ):
"""
Returns a merged fasta formatted file
Set prune to false if you don't want to prune out the sequences that don't have
the correct amount of segments
+++ Unit Tests +++
>>> path = os.path.dirname( __f | ile__ )
>>> s = SequenceConcat( os.path.join( path, '../example_files/example1.txt' ), 'genbank' )
>>> print s.get_merged_sequences( True, range( 1, 4 ) )
>ident1
AAAAAAAAAAAAAAAAACCCCCCCCCCCCCCCCCTTTTTTTTTTTTTTTTT
>ident2
AAAAAAAAAAAAAAAAATTTTTTTTTTTTTTTTTCCCCCCCCCCCCCCCCC
<BLANKLINE>
>>> s = SequenceConcat( '../example_files/WR2848N.fasta', 'genbank' )
>>> fh = open( '../example_files/WR2848N_merged.fasta' )
>>> WR2848N_manually_merged = fh.read()
>>> fh.close()
>>> s.get_merged_sequences( ) == WR2848N_manually_merged[:-1]
True
>>> path = os.path.dirname( __file__ )
>>> s = SequenceConcat( os.path.join( path, '../example_files/example2.txt' ), 'gisaid' )
>>> print s.get_merged_sequences( True, ['A','B','C'] )
>ident1
AAAAAAAAAAAAAAAAACCCCCCCCCCCCCCCCCTTTTTTTTTTTTTTTTT
>ident2
AAAAAAAAAAAAAAAAATTTTTTTTTTTTTTTTTCCCCCCCCCCCCCCCCC
<BLANKLINE>
"""
# The fasta sequences
fasta_dict = self.get_sequences()
# Fast String writer
cs = cStringIO.StringIO()
# Return variable
output = None
# Will hold missing sequences
missing = None
# If the prune option is set then prune the sequences
if prune:
# Make sure that the segments_required is in the right format
# If string then split on ,
if type( segments_required ) == str:
segments_required = segments.required.split( ',' )
# If already a list then it is ok
elif type( segments_required ) == list:
pass
# Bail out if it gets here
else:
print( "Invalid value for required segments" )
sys.exit( -1 )
# Prune the dictionary
missing = self.prune_missing( fasta_dict, segments_required )
# Write a header to stderr
if len( missing ):
sys.stderr.write( "==================== Sequences Missing Segments ====================\n" )
#segments_required
# Loop through the missing segment sequences
for name, segments in missing.items():
|
DarthMaulware/EquationGroupLeaks | Leak #5 - Lost In Translation/windows/Resources/Dsz/PyScripts/Lib/dsz/mca/file/cmd/strings/type_Params.py | Python | unlicense | 3,175 | 0.00315 | # uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: type_Params.py
from types import *
PARAMS_ENCODING_ASCII = 0
PARAMS_ENCODING_UNICODE = 1
PARAMS_MIN_THRESHOLD = 2
class Params:
def __init__(self):
self.__dict__['threshold'] = 4
self.__dict__['maximum'] = 0
self.__dict__['encoding'] = PARAMS_ENCODING_ASCII
self.__dict__['start'] = 0
self.__dict__['end'] = 0
self.__dict__['file'] = ''
def __getattr__(self, name):
if name == 'threshold':
return self.__dict__['threshold']
if name == 'maximum':
return self.__dict__['maximum']
if name == 'encoding':
return self.__dict__['encoding']
if name == 'start':
return self.__dict__['start']
if name == 'end':
return self.__dict__['end']
if name == 'file':
return self.__dict__['file']
raise AttributeError("Attribute '%s' not found" % name)
def __setattr__(self, name, value):
if name == 'threshold':
self.__dict__['threshold'] = value
elif name == 'maximum':
self.__dict__['maximum'] = value
elif name == 'encoding':
self.__dict__['encoding'] = value
elif name == 'start':
self.__dict__['start'] = value
elif name == 'end':
self.__dict__['end'] = value
elif name == 'file':
self.__dict__['file'] = value
else:
raise AttributeError("Attribute '%s' not found" % name)
def Marshal(self, mmsg):
from mcl.object.Message import MarshalMessage
submsg = MarshalMessage()
submsg.AddU32(MSG_KEY_PARAMS_THRESHOLD, self.__dict__['threshold'])
submsg.AddU32(MSG_KEY_PARAMS_MAXIMUM, self.__dict__['maximum'])
submsg.AddU8(MSG_KEY_PARAMS_ENCODING, self.__dict__['encoding'])
submsg.AddU64(MSG_KEY_PARAMS_START, self.__dict__['start'])
submsg.AddU64(MSG_KEY_PARAMS_END, self.__dict__['end'])
submsg.AddStringUtf8(MSG_KEY_PARAMS_FILE, self.__dict__['file'])
mmsg.AddMessage(MSG_KEY_PARAMS, submsg)
def Demarshal(self, dmsg, instance=-1):
import mcl.object.Message
msgData = dmsg.FindData(MSG_KEY_PARAMS, mcl.object.Message.MSG_TYPE_MSG, instance)
submsg = mcl.object.Message.DemarshalMessage(msgData)
try:
self.__dict__['threshold'] = submsg.FindU32(MSG_KEY_PARAMS_THRESHOLD)
except:
| pass
try:
self.__dict__['maximum'] = submsg.FindU32(MSG_KEY_PARAMS_MAXIMUM)
except:
pass
try:
self.__dict__['encoding' | ] = submsg.FindU8(MSG_KEY_PARAMS_ENCODING)
except:
pass
try:
self.__dict__['start'] = submsg.FindU64(MSG_KEY_PARAMS_START)
except:
pass
try:
self.__dict__['end'] = submsg.FindU64(MSG_KEY_PARAMS_END)
except:
pass
self.__dict__['file'] = submsg.FindString(MSG_KEY_PARAMS_FILE) |
dsriram/golang | src/pkg/runtime/runtime-gdb.py | Python | bsd-3-clause | 11,121 | 0.029404 | # Copyright 2010 The Go Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
"""GDB Pretty printers and convenience functions for Go's runtime structures.
This script is loaded by GDB when it finds a .debug_gdb_scripts
section in the compiled binary. The [68]l linkers emit this with a
path to this file based on the path to the runtime package.
"""
# Known issues:
# - pretty printing only works for the 'native' strings. E.g. 'type
# foo string' will make foo a plain struct in the eyes of gdb,
# circumventing the pretty print triggering.
import sys, re
print >>sys.stderr, "Loading Go Runtime support."
# allow to manually reload while developing
goobjfile = gdb.current_objfile() or gdb.objfiles()[0]
goobjfile.pretty_printers = []
#
# Pretty Printers
#
class StringTypePrinter:
"Pretty print Go strings."
pattern = re.compile(r'^struct string$')
def __init__(self, val):
self.val = val
def display_hint(self):
return 'string'
def to_string(self):
l = int(self.val['len'])
return self.val['str'].string("utf-8", "ignore", l)
class SliceTypePrinter:
"Pretty print slices."
pattern = re.compile(r'^struct \[\]')
def __init__(self, val):
self.val = val
def display_hint(self):
return 'array'
def to_string(self):
return str(self.val.type)[6:] # skip 'struct '
def children(self):
if self.val["len"] > self.val["cap"]:
return
ptr = self.val["array"]
for idx in range(self.val["len"]):
yield ('[%d]' % idx, (ptr + idx).dereference())
class MapTypePrinter:
"""Pretty print map[K]V types.
Map-typed go variables are really pointers. dereference them in gdb
to inspect their contents with this pretty printer.
"""
pattern = re.compile(r'^struct hash<.*>$')
def __init__(self, val):
self.val = val
def display_hint(self):
return 'map'
def to_string(self):
return str(self.val.type)
def children(self):
B = self.val['b']
buckets = self.val['buckets']
oldbuckets = self.val['oldbuckets']
flags = self.val['flags']
inttype = self.val['hash0'].type
cnt = 0
for bucket in xrange(2 ** B):
bp = buckets + bucket
if oldbuckets:
oldbucket = bucket & (2 ** (B - 1) - 1)
oldbp = oldbuckets + oldbucket
oldb = oldbp.dereference()
if (oldb['overflow'].cast(inttype) & 1) == 0: # old bucket not evacuated yet
if bucket >= 2 ** (B - 1): continue # already did old bucket
bp = oldbp
while bp:
b = bp.dereference()
for i in xrange(8):
if b['tophash'][i] != 0:
k = b['keys'][i]
v = b['values'][i]
if flags & 1:
k = k.dereference()
if flags & 2:
v = v.dereference()
yield '%d' % cnt, k
yield '%d' % (cnt + 1), v
cnt += 2
bp = b['overflow']
class ChanTypePrinter:
"""Pretty print chan[T] types.
Chan-typed go variables are really pointers. dereference them in gdb
to inspect their contents with this pretty printer.
"""
pattern = re.compile(r'^struct hchan<.*>$')
def __init__(self, val):
self.val = val
def display_hint(self):
return 'array'
def to_string(self):
return str(self.val.type)
def children(self):
# see chan.c chanbuf(). et is the type stolen from hchan<T>::recvq->first->elem
et = [x.type for x in self.val['recvq']['first'].type.target().fields() if x.name == 'elem'][0]
ptr = (self.val.address + 1).cast(et.pointer())
for i in range(self.val["qcount"]):
j = (self.val["recvx"] + i) % self.val["dataqsiz"]
yield ('[%d]' % i, (ptr + j).dereference())
#
# Register all the *Printer classes above.
#
def makematcher(klass):
def matcher(val):
try:
if klass.pattern.match(str(val.type)):
return klass(val)
except:
pass
return matcher
goobjfile.pretty_printers.extend([makematcher(k) for k in vars().values() if hasattr(k, 'pattern')])
#
# For reference, this is what we're trying to do:
# eface: p *(*(struct 'runtime.rtype'*)'main.e'->type_->data)->string
# iface: p *(*(struct 'runtime.rtype'*)'main.s'->tab->Type->data)->string
#
# interface types can't be recognized by their name, instead we check
# if they have the expected fields. Unfortunately the mapping of
# fields to python attributes in gdb.py isn't complete: you can't test
# for presence other than by trapping.
def is_iface(val):
try:
return str(val['tab'].type) == "struct runtime.itab *" \
and str(val['data'].type) == "void *"
except:
pass
def is_eface(val):
try:
return str(val['_type'].type) == "struct runtime._type *" \
and str(val['data'].type) == "void *"
except:
pass
def lookup_type(name):
try:
return gdb.lookup_type(name)
except:
pass
try:
return gdb.lookup_type('struct ' + name)
except:
pass
try:
return gdb.lookup_type('struct ' + name[1:]).pointer()
except:
pass
_rctp_type = gdb.lookup_type("struct runtime.rtype").pointer()
def iface_commontype(obj):
if is_iface(obj):
go_type_ptr = obj['tab']['_type']
elif is_eface(obj):
go_type_ptr = obj['_type']
else:
return
return go_type_ptr.cast(_rctp_type).dereference()
def iface_dtype(obj):
"Decode type of the data field of an eface or iface struct."
# known issue: dtype_name decoded from runtime.rtype is "nested.Foo"
# but the dwarf table lists it as "full/path/to/nested.Foo"
dynamic_go_type = iface_commontype(obj)
if dynamic_go_type is None:
return
dtype_name = dynamic_go_type['string'].dereference()['str'].string()
dynamic_gdb_type = lookup_type(dtype_name)
if dynamic_gdb_type is None:
return
type_size = int(dynamic_go_type['size'])
uintptr_size = int(dynamic_go_type['size'].type.sizeof) # size is itself an uintptr
if type_size > uintptr_size:
dynamic_gdb_type = dynamic_gdb_type.pointer()
return dynamic_gdb_type
def iface_dtype_name(obj):
"Decode type name of the data field of an eface or iface struct."
dynamic_go_type = iface_commontype(obj)
if dynamic_go_type is None:
return
return dynamic_go_type['string'].dereference()['str'].string()
class IfacePrinter:
"""Pretty print interface values
Casts the data field to the appropriate dynamic type."""
def __init__(self, val):
self.val = val
def display_hint(self):
retur | n 'string'
def to_string(self):
if self.val['data'] == 0:
return 0x0
try:
dtype = iface_dtype(self.val)
except:
return "<bad dynamic type>"
if dtype is None: # trouble looking up, print something reasonable
return "(%s)%s" % (iface_dtype_name(self.val), self.val['data'])
try:
return self.val['data'].cast(dtype).dereference()
except:
pass
return self.val['data'].cast(dtype)
def ifacematcher(val):
if is_iface(val) or is_eface(val | ):
return IfacePrinter(val)
goobjfile.pretty_printers.append(ifacematcher)
#
# Convenience Functions
#
class GoLenFunc(gdb.Function):
"Length of strings, slices, maps or channels"
how = ((StringTypePrinter, 'len'),
(SliceTypePrinter, 'len'),
(MapTypePrinter, 'count'),
(ChanTypePrinter, 'qcount'))
def __init__(self):
super(GoLenFunc, self).__init__("len")
def invoke(self, obj):
typename = str(obj.type)
for klass, fld in self.how:
if klass.pattern.match(typename):
return obj[fld]
class GoCapFunc(gdb.Function):
"Capacity of slices or channels"
how = ((SliceTypePrinter, 'cap'),
(ChanTypePrinter, 'dataqsiz'))
def __init__(self):
super(GoCapFunc, self).__init__("cap")
def invoke(self, obj):
typename = str(obj.type)
for klass, fld in self.how:
if klass.pattern.match(typename):
return obj[fld]
class DTypeFunc(gdb.Function):
"""Cast Interface values to their dynamic type.
For non-interface types this behaves as the identity operation.
"""
def __init__(self):
super(DTypeFunc, self).__init__("dtype")
def invoke(self, obj):
try:
return obj['data'].cast(iface_dtype(obj))
except:
pass
return obj
#
# Commands
#
sts = ('idle', 'runnable', 'running', 'syscall', 'waiting', 'moribund', 'dead', 'recovery')
def linked_list(ptr, linkfield):
while ptr:
yield ptr
ptr = ptr[linkfield]
class GoroutinesCmd(gdb.Command):
"List all goroutines."
def __init__(self):
super( |
thnuclub/kandota | spiderserver/crawl/spiderserver/spiders/dotamaxspider.py | Python | apache-2.0 | 1,374 | 0.007278 | # -*- coding: utf-8 -*-
import scrapy
from spiderserver.siteparse.parser import parse_dotamax_html
from scrapy.http import Request
from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from spiderserver.items import DotamaxItem
class DotamaxSpider(scrapy.Spider):
name = "dotamaxspider"
allowed_domains = (
"dotamax.com",
)
start_urls = (
'http://dotamax.com/video/users/',
)
rules = (
Rule(SgmlLinkExtractor(allow = r'http://dotamax\.com/video/users\d+'), callback = 'parse'),
)
page = 1
download_delay = 2
def parse(self, response):
lists = parse_dotamax_html(response.body)
tag = 'dota'
if response._url.find('dota2') >= 0:
tag = 'dota2'
for l in lists:
l['tag'] = tag
l['channel'] = 'dotamax'
item = DotamaxItem()
item = l
yield i | tem
if len(lists) > 0 and self.page < 900:
self.page = self.page + 1
base = 'http://dotamax.com/video/users/'
for o in ['dota', 'dota2']:
href = "%s%s?type=&dm_uid=&p=%s" % (base, o, self.page)
yield Request(url=href, cookies={}, callback=self.parse, errback=self | .parse_fail)
def parse_fail(self, response):
pass |
jasvir99/LibreHatti | src/librehatti/catalog/lookups.py | Python | gpl-2.0 | 1,701 | 0.017637 | from django.db.models import Q
from django.utils.html import escape
from | django.contrib.auth.models import User
from ajax_select import LookupChannel
class BuyerLookup(LookupChannel):
"""
This class suggests user names (AJAX Effect) while filling client name for a purchase order
"""
model = User
def get_query(self, q, request):
user = User.objects.all()
for value in q.split():
user = user.filter(Q(username__icontains=value)| \
Q(first_name__icontains=v | alue) \
| Q(last_name__icontains=value) \
|Q(customer__address__street_address__icontains=value)\
|Q(customer__address__district__icontains=value)\
|Q(customer__address__province__icontains=value)
|Q(customer__title__icontains=value)
|Q(customer__company__icontains=value))
return user[0:15]
def get_result(self, obj):
return unicode(obj.username)
def format_match(self, obj):
return self.format_item_display(obj)
def format_item_display(self, obj):
result = User.objects.values('first_name','last_name',
'customer__title','customer__address__street_address',
'customer__address__district','customer__company').filter(id = obj.id)[0]
return "<b>Name or Title:</b> %s <br> <b>Company:</b> %s <br> <b>Address:</b> %s <br> %s \
<hr>" %((result['first_name'] + ' ' + result['last_name'] + ' ' + \
result['customer__title']), \
(result['customer__company']),\
(result['customer__address__street_address']), \
(result['customer__address__district']))
|
nicolaszein/chat | chat/migrations/0003_userprofile.py | Python | bsd-3-clause | 1,161 | 0.004307 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.3 on 2016-06-08 11:44
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('chat', '0002_auto_20160605_2337'),
]
operations = [
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('avatar', models.ImageField(blank=True, max_length=255, null=True, upload_to='user_profile', verbose_name='Foto')),
('data_nascimento', models.DateField(verbose_name='Data | Nascimento')),
('sexo', models.IntegerField(choices=[(1, 'Masculino'), (2, 'Feminino')], | verbose_name='Sexo')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='profile_user', to=settings.AUTH_USER_MODEL, verbose_name='Usu\xe1rio')),
],
),
]
|
lento/cortex | test/IECore/ops/classVectorParameterTest/classVectorParameterTest-2.py | Python | bsd-3-clause | 2,288 | 0.020979 | ##########################################################################
#
# Copyright (c) 2010, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modif | ication, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce | the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import IECore
class classVectorParameterTest( IECore.Op ) :
def __init__( self ) :
IECore.Op.__init__( self,
".",
IECore.IntParameter(
name = "result",
description = "",
defaultValue = 0,
)
)
self.parameters().addParameter(
IECore.ClassVectorParameter(
name = "cv",
description = "",
searchPathEnvVar = "IECORE_OP_PATHS",
)
)
def doOperation( self, operands ) :
return IECore.IntData( 1 )
IECore.registerRunTimeTyped( classVectorParameterTest )
|
gnperumal/exscript | src/Exscript/util/template.py | Python | gpl-2.0 | 6,513 | 0.004299 | # Copyright (C) 2007-2010 Samuel Abels.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2, as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
Executing Exscript templates on a connection.
"""
from Exscript import stdlib
from Exscript.interpreter import Parser
def _compile(conn, filename, template, parser_kwargs, **kwargs):
if conn:
hostname = conn.get_host()
account = conn.last_account
username = account is not None and account.get_name() or None
else:
hostname = 'undefined'
username = None
# Init the parser.
parser = Parser(**parser_kwargs)
parser.define(**kwargs)
# Define the built-in variables and functions.
builtin = dict(__filename__ = [filename or 'undefined'],
__username__ = [username],
__hostname__ = [hostname],
__connection__ = conn)
parser.define_object(**builtin)
parser.define_object(**stdlib.functions)
# Compile the template.
return parser.parse(template, builtin.get('__filename__')[0])
def _run(conn, filename, template, parser_kwargs, **kwargs):
compiled = _compile(conn, filename, template, parser_kwargs, **kwargs)
return compiled.execute()
def test(string, **kwargs):
"""
Compiles the given template, and raises an exception if that
failed. Does nothing otherwise.
@type string: string
@param string: The template to compile.
@type kwargs: dict
@param kwargs: Variables to define in the template.
"""
_compile(None, None, string, {}, **kwargs)
def test_secure(string, **kwargs):
"""
Like test(), but makes sure that each function that is used in
the template has the Exscript.stdlib.util.safe_function decorator.
Raises Exscript.interpreter.Exception.PermissionError if any
function lacks the decorator.
@type string: string
@param string: The template to compile.
@type kwargs: dict
@param kwargs: Variables to define in the template.
"""
_compile(None, None, string, {'secure': True}, **kwargs)
def test_file(filename, **kwargs):
"""
Convenience wrapper around test() that reads the template from a file
instead.
@type filename: string
@param filename: The name of the template file.
@type kwargs: dict
@param kwargs: Variables to define in the template.
"""
_compile(None, filename, open(filename).read(), {}, **kwargs)
def eval(conn, string, strip_command = True, **kwargs):
"""
Compiles the given template and executes it on the given
connection.
Raises an exception if the compilation fails.
if strip_command is True, the first line of each response that is
received after any command sent by the template is stripped. For
example, consider the following template::
ls -1{extract /(\S+)/ as filenames}
{loop filenames as filename}
touch $filename
{end}
If strip_command is False, the response, (and hence, the `filenames'
variable) contains the following::
ls -1
myfile
myfile2
[...]
By setting strip_command to True, the first line is ommitted.
@type conn: Exscript.protocols.Protocol
@param conn: The connection on which to run the template.
@type string: string
@param string: The template to compile.
@type strip_command: bool
@param strip_command: Whether to strip the command echo from the response.
@type kwargs: dict
@param kwargs: Variables to define in the template.
@rtype: dict
@return: The variables that are defined after execution of the script.
"""
parser_args = {'strip_command': strip_command}
return _run(conn, None, string, parser_args, **kwargs)
def eval_file(conn, filename, strip_command = True, **kwargs):
"""
Convenience wrapper around eval() that reads the template from a file
instead.
@type conn: Exscript.protocols.Protocol
@param conn: The connection on which to run the template.
@type filename: string
@param filename: The name of the template file.
@type strip_command: bool
@param strip_command: Whether to strip the command echo from the response.
@type kwargs: dict
@param kwargs: Variables to define in the template.
"""
template = open(filename, 'r').read()
parser_args = {'strip_command': strip_command}
return _run(conn, filename, template, parser_args, **kwargs)
def paste(conn, string, **kwargs):
"""
Compiles the given template and executes it on the given
connection. This function differs from eval() such that it does not
wait for a prompt after sending each command to the connected host.
That means that the script can no longer read the response of the
host, making commands such as `extract' or `set_prompt' usel | ess.
The function raises an exception if the compilation fails, or if
the template contains a command that requires a response from the
host.
@type conn: Exscript.protocols.Protocol
@param conn: The connection on which to run the template.
@type string: string
@param string: The template to compile.
@type kwargs: dict
@param kwargs: Variables to define in the template.
@rtype: dict
@return: The variables that are defined after | execution of the script.
"""
return _run(conn, None, string, {'no_prompt': True}, **kwargs)
def paste_file(conn, filename, **kwargs):
"""
Convenience wrapper around paste() that reads the template from a file
instead.
@type conn: Exscript.protocols.Protocol
@param conn: The connection on which to run the template.
@type filename: string
@param filename: The name of the template file.
@type kwargs: dict
@param kwargs: Variables to define in the template.
"""
template = open(filename, 'r').read()
return _run(conn, None, template, {'no_prompt': True}, **kwargs)
|
Alwnikrotikz/epubia | naver_autospacing.py | Python | mit | 1,159 | 0.012942 | maxline = 10000 # from web-page
qurl = 'http://s.lab.naver.com/autospacing/?'
qvalues = { "query": "",
"result_type": "paragraph"
}
qheaders = {"Referer": qurl,
"User-Agent": "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)"
}
import urllib, urllib2
import re
def _naver_autospacing(txt):
q | values['query'] = txt.encode('utf-8').replace('\n','\r\n')
data = urllib.urlencode(qvalues)
req = urllib2.Request(qurl, data, qheaders)
r = urllib2.urlopen(req)
doc = r.read().decode('utf-8')
markup = re.compile('<div class="wrap_spacing2" style="clear:both;">(.*?)</div>', re.S).search(doc).group(1)
markup = re.compile('<br>').sub('\n',markup)
txt = re.compile('<[^<>]*>').sub('',markup)
return txt.strip()
def | naver_autospacing(txt):
# warning if #line > maxline
return _naver_autospacing(txt)
#################################################3
# main
if __name__ == "__main__":
import sys
txt = open(sys.argv[1], 'r').read()[3:].decode('utf-8')
txt2 = naver_autospacing(txt)
open(sys.argv[2], 'w').write(txt2.encode('utf-8'))
# vim:sts=4:et
|
hdzierz/Kaka | web/http_data_download_response.py | Python | gpl-2.0 | 1,477 | 0.004062 | from django.http import QueryDict, HttpResponse, HttpResponseRedirect
import gzip
class HttpDataDownloadResponse(HttpResponse):
fmt = 'csv'
gzipped | = True
data = None
def __init__(self, data, report, fmt='csv', zipit=True):
if data:
c_type, download = self.get_mime_type(fmt)
fn = report + "." + fmt
if(zipit):
with gzip.open('/tmp/' + fn + '.gz', 'wb') as f:
f.write(data)
fn = fn + '.gz'
c_type, download = self.get_mim | e_type('gzip')
with gzip.open('/tmp/' + fn + '.gz', 'wb') as f:
data = f.read(fn)
super(HttpDataDownloadResponse, self).__init__(data, content_type=c_type)
if download:
self['Content-Disposition'] = 'attachment; filename="' + fn + '"'
else:
super(HttpDataDownloadResponse, self).__init__('No Data')
def get_mime_type(self, ext):
if(ext == 'json'):
return 'Content-type: application/json', False
elif(ext == 'xml'):
return 'Content-type: application/xml', False
elif(ext == 'yaml'):
return 'Content-type: text/x-yaml', False
elif(ext == 'csv'):
return 'Content-type: text/csv', False
elif(ext == 'gzip'):
return 'Content-type: application/x-gzip', True
return 'Content-type: application/octet-stream', True
|
factorlibre/carrier-delivery | delivery_carrier_tnt/model/__init__.py | Python | agpl-3.0 | 95 | 0 | from . import tnt_config
fro | m . im | port delivery
from . import stock
from . import carrier_file
|
moyogo/vanilla | Lib/vanilla/__init__.py | Python | mit | 2,699 | 0.001853 | from vanilla.vanillaBase import VanillaBaseObject, VanillaBaseControl, VanillaError
from vanilla.vanillaBox import Box, HorizontalLine, VerticalLine
from vanilla.vanillaBrowser import ObjectBrowser
from vanilla.vanillaButton import Button, SquareButton, ImageButton, HelpButton
from vanilla.vanillaCheckBox import CheckBox
from vanilla.vanillaColorWell import ColorWell
from vanilla.vanillaComboBox import ComboBox
from vanilla.vanillaDatePicker import DatePicker
from vanilla.vanillaDrawer import Drawer
from vanilla.vanillaEditText import EditText, SecureEditText
from vanilla.vanillaGradientButton import GradientButton
from vanilla.vanillaGroup import Group
from vanilla.vanillaImageView import ImageView
from vanilla.vanillaLevelIndicator import LevelIndicator, LevelIndicatorListCell
from vanilla.vanillaList import List, CheckBoxListCell, SliderListCell, PopUpButtonListCell, ImageListCell, SegmentedButtonListCell
from vanilla.vanillaPathControl import PathControl
from vanilla.vanillaPopUpButton import PopUpButton, ActionButton
from vanilla.vanillaProgressBar import ProgressBar
from vanilla.vanillaProgressSpinner import ProgressSpinner
from vanilla.vanillaRadioGroup import RadioGroup
from vanilla.vanill | aScrollView import ScrollView
from vanilla.vanillaSearchBox import SearchBox
from vanilla.vanillaSegmentedButton import Segme | ntedButton
from vanilla.vanillaSlider import Slider
from vanilla.vanillaSplitView import SplitView, SplitView2
from vanilla.vanillaTabs import Tabs
from vanilla.vanillaTextBox import TextBox
from vanilla.vanillaTextEditor import TextEditor
from vanilla.vanillaWindows import Window, FloatingWindow, HUDFloatingWindow, Sheet
__all__ = [
"VanillaBaseObject", "VanillaBaseControl", "VanillaError",
"Box", "HorizontalLine", "VerticalLine",
"Button", "SquareButton", "ImageButton", "HelpButton",
"CheckBox",
"ColorWell",
"ComboBox",
"DatePicker",
"Drawer",
"EditText",
"GradientButton",
"Group",
"ImageView",
"LevelIndicator", "LevelIndicatorListCell",
"List", "CheckBoxListCell", "SliderListCell", "PopUpButtonListCell", "ImageListCell", "SegmentedButtonListCell",
"ObjectBrowser",
"PathControl",
"PopUpButton", "ActionButton",
"ProgressBar",
"ProgressSpinner",
"RadioGroup",
"ScrollView",
"SearchBox",
"SecureEditText",
"SegmentedButton",
"Slider",
"SplitView",
"SplitView2",
"Tabs",
"TextBox",
"TextEditor",
"Window", "FloatingWindow", "HUDFloatingWindow", "Sheet"
]
__version__ = "0.1"
# OS 10.7 objects
try:
from vanilla.vanillaPopover import Popover
__all__.append("Popover")
except (ImportError, NameError):
pass
|
owtf/owtf | owtf/plugins/web/grep/Credentials_transport_over_an_encrypted_channel@OWTF-AT-001.py | Python | bsd-3-clause | 1,830 | 0.005464 | """
GREP Plugin for Credentials transport over an encrypted channel (OWASP-AT-001)
https://www.owasp.org/index.php/Testing_for_credentials_transport_%28OWASP-AT-001%29
NOTE: GREP plugins do NOT send traffic to the target and only grep the HTTP Transaction Log
"""
import logging
DESCRIPTION = "Searches transaction DB for credentials protections"
def run(PluginInfo):
# TODO: Needs fix | ing
# Content = "This plugin looks for password fields and then checks the URL (i.e. http vs. https)<br />"
# Conten | t += "Uniqueness in this case is performed via URL + password field"
# # This retrieves all hidden password fields found in the DB response bodies:
# Command, RegexpName, Matches = ServiceLocator.get_component("transaction").GrepMultiLineResponseRegexp(ServiceLocator.get_component("config").Get('RESPONSE_REGEXP_FOR_PASSWORDS'))
# # Now we need to check if the URL is https or not and count the insecure ones (i.e. not https)
# IDs = []
# InsecureMatches = []
# for ID, FileMatch in Matches:
# if ID not in IDs: # Retrieve Transaction from DB only once
# IDs.append(ID) # Process each transaction only once
# Transaction = ServiceLocator.get_component("transaction").GetByID(ID)
# if 'https' != Transaction.URL.split(":")[0]:
# OWTFLogger.log("Transaction: "+ID+" contains passwords fields with a URL different than https")
# InsecureMatches.append([ID, Transaction.URL+": "+FileMatch]) # Need to make the unique work by URL + password
# Message = "<br /><u>Total insecure matches: "+str(len(InsecureMatches))+'</u>'
# OWTFLogger.log(Message)
# Content += Message+"<br />"
# Content += ServiceLocator.get_component("plugin_helper").DrawResponseMatchesTables([Command, RegexpName, InsecureMatches], PluginInfo)
# return Content
return []
|
jhurt/dribbble-palettes | dribbble_palettes/__init__.py | Python | bsd-2-clause | 21 | 0 | _ | _author__ = 'jh | urt'
|
phlax/translate | translate/lang/data.py | Python | gpl-2.0 | 21,385 | 0.000936 | # -*- coding: utf-8 -*-
#
# Copyright 2007-2011 Zuza Software Foundation
#
# This file is part of translate.
#
# translate is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# translate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
"""This module stores information and functionality that relates to plurals."""
from __future__ import unicode_literals
import gettext
import locale
import os
import re
import six
try:
import pycountry
except ImportError:
pycountry = None
languages = {
'ach': ('Acholi', 2, 'n > 1'),
'af': ('Afrikaans', 2, '(n != 1)'),
'ak': ('Akan', 2, 'n > 1'),
'am': ('Amharic', 2, 'n > 1'),
'an': ('Aragonese', 2, '(n != 1)'),
'anp': ('Angika', 2, '(n != 1)'),
'ar': ('Arabic', 6,
'n==0 ? 0 : n==1 ? 1 : n==2 ? 2 : n%100>=3 && n%100<=10 ? 3 : n%100>=11 ? 4 : 5'),
'arn': ('Mapudungun; Mapuche', 2, 'n > 1'),
'as': ('Assamese', 2, '(n != 1)'),
'ast': ('Asturian; Bable; Leonese; Asturleonese', 2, '(n != 1)'),
'ay': ('Aymará', 1, '0'),
'az': ('Azerbaijani', 2, '(n != 1)'),
'be': ('Belarusian', 3,
'n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2'),
'bg': ('Bulgarian', 2, '(n != 1)'),
'bn': ('Bengali', 2, '(n != 1)'),
'bn_BD': ('Bengali (Bangladesh)', 2, '(n != 1)'),
'bn_IN': ('Bengali (India)', 2, '(n != 1)'),
'bo': ('Tibetan', 1, '0'),
'br': ('Breton', 2, 'n > 1'),
'brx': ('Bodo', 2, '(n != 1)'),
'bs': ('Bosnian', 3,
'n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2'),
'ca': ('Catalan; Valencian', 2, '(n != 1)'),
'ca@valencia': ('Catalan; Valencian (Valencia)', 2, '(n != 1)'),
'cgg': ('Chiga', 1, '0'),
'cs': ('Czech', 3, '(n==1) ? 0 : (n>=2 && n<=4) ? 1 : 2'),
'csb': ('Kashubian', 3,
'n==1 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2'),
'cy': ('Welsh', 2, '(n==2) ? 1 : 0'),
'da': ('Danish', 2, '(n != 1)'),
'de': ('German', 2, '(n != 1)'),
'doi': ('Dogri', 2, '(n != 1)'),
'dz': ('Dzongkha', 1, '0'),
'el': ('Greek, Modern (1453-)', 2, '(n != 1)'),
'en': ('English', 2, '(n != 1)'),
'en_GB': ('English (United Kingdom)', 2, '(n != 1)'),
'en_ZA': ('English (South Africa)', 2, '(n != 1)'),
'eo': ('Esperanto', 2, '(n != 1)'),
'es': ('Spanish; Castilian', 2, '(n != 1)'),
'es_AR': ('Argentinean Spanish', 2, '(n != 1)'),
'et': ('Estonian', 2, '(n != 1)'),
'eu': ('Basque', 2, '(n != 1)'),
'fa': ('Persian', 2, 'n > 1'),
'ff': ('Fulah', 2, '(n != 1)'),
'fi': ('Finnish', 2, '(n != 1)'),
'fil': ('Filipino; Pilipino', 2, '(n > 1)'),
'fo': ('Faroese', 2, '(n != 1)'),
'fr': ('French', 2, '(n > 1)'),
'fur': ('Friulian', 2, '(n != 1)'),
'fy': ('Frisian', 2, '(n != 1)'),
'ga': ('Irish', 5, 'n==1 ? 0 : n==2 ? 1 : (n>2 && n<7) ? 2 :(n>6 && n<11) ? 3 : 4'),
'gd': ('Gaelic; Scottish Gaelic', 4, '(n==1 || n==11) ? 0 : (n==2 || n==12) ? 1 : (n > 2 && n < 20) ? 2 : 3'),
'gl': ('Galician', 2, '(n != 1)'),
'gu': ('Gujarati', 2, '(n != 1)'),
'gun': ('Gun', 2, '(n > 1)'),
'ha': ('Hausa', 2, '(n != 1)'),
'he': ('Hebrew', 2, '(n != 1)'),
'hi': ('Hindi', 2, '(n != 1)'),
'hne': ('Chhattisgarhi', 2, '(n != 1)'),
'hr': ('Croatian', 3, '(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2)'),
'ht': ('Haitian; Haitian Creole', 2, '(n != 1)'),
'hu': ('Hungarian', 2, '(n != 1)'),
'hy': ('Armenian', 1, '0'),
'ia': | ("Interlingua (International Auxiliary Language Association)", 2, '(n != 1)'),
'id': ('Indonesian', 1, '0'),
'is': ('Icelandic', 2, '(n != 1)'),
'it': ('Italian', 2, ' | (n != 1)'),
'ja': ('Japanese', 1, '0'),
'jbo': ('Lojban', 1, '0'),
'jv': ('Javanese', 2, '(n != 1)'),
'ka': ('Georgian', 1, '0'),
'kab': ('Kabyle', 2, '(n != 1)'),
'kk': ('Kazakh', 2, 'n != 1'),
'kl': ('Greenlandic', 2, '(n != 1)'),
'km': ('Central Khmer', 1, '0'),
'kn': ('Kannada', 2, '(n != 1)'),
'ko': ('Korean', 1, '0'),
'kok': ('Konkani', 2, '(n != 1)'),
'ks': ('Kashmiri', 2, '(n != 1)'),
'ku': ('Kurdish', 2, '(n != 1)'),
'kw': ('Cornish', 4, '(n==1) ? 0 : (n==2) ? 1 : (n == 3) ? 2 : 3'),
'ky': ('Kirghiz; Kyrgyz', 2, 'n != 1'),
'lb': ('Luxembourgish; Letzeburgesch', 2, '(n != 1)'),
'ln': ('Lingala', 2, '(n > 1)'),
'lo': ('Lao', 1, '0'),
'lt': ('Lithuanian', 3, '(n%10==1 && n%100!=11 ? 0 : n%10>=2 && (n%100<10 || n%100>=20) ? 1 : 2)'),
'lv': ('Latvian', 3, '(n%10==1 && n%100!=11 ? 0 : n != 0 ? 1 : 2)'),
'mai': ('Maithili', 2, '(n != 1)'),
'me': ('Montenegrin', 3, 'n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2'),
'mfe': ('Morisyen', 2, '(n > 1)'),
'mg': ('Malagasy', 2, '(n > 1)'),
'mi': ('Maori', 2, '(n > 1)'),
'mk': ('Macedonian', 2, '(n==1 || n%10==1 ? 0 : 1)'),
'ml': ('Malayalam', 2, '(n != 1)'),
'mn': ('Mongolian', 2, '(n != 1)'),
'mni': ('Meithei (Manipuri)', 2, '(n != 1)'),
'mnk': ('Mandinka', 3, '(n==0 ? 0 : n==1 ? 1 : 2)'),
'mr': ('Marathi', 2, '(n != 1)'),
'ms': ('Malay', 1, '0'),
'mt': ('Maltese', 4,
'(n==1 ? 0 : n==0 || ( n%100>1 && n%100<11) ? 1 : (n%100>10 && n%100<20 ) ? 2 : 3)'),
'my': ('Burmese', 1, '0'),
'nah': ('Nahuatl languages', 2, '(n != 1)'),
'nap': ('Neapolitan', 2, '(n != 1)'),
'nb': ('Bokmål, Norwegian; Norwegian Bokmål', 2, '(n != 1)'),
'ne': ('Nepali', 2, '(n != 1)'),
'nl': ('Dutch; Flemish', 2, '(n != 1)'),
'nn': ('Norwegian Nynorsk; Nynorsk, Norwegian', 2, '(n != 1)'),
'nqo': ("N'Ko", 2, '(n > 1)'),
'nso': ('Pedi; Sepedi; Northern Sotho', 2, '(n != 1)'),
'oc': ('Occitan (post 1500)', 2, '(n > 1)'),
'or': ('Odia', 2, '(n != 1)'),
'pa': ('Panjabi; Punjabi', 2, '(n != 1)'),
'pap': ('Papiamento', 2, '(n != 1)'),
'pl': ('Polish', 3,
'(n==1 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2)'),
'pms': ('Piemontese', 2, '(n != 1)'),
'ps': ('Pushto; Pashto', 2, '(n != 1)'),
'pt': ('Portuguese', 2, '(n != 1)'),
'pt_BR': ('Portuguese (Brazil)', 2, '(n > 1)'),
'rm': ('Romansh', 2, '(n != 1)'),
'ro': ('Romanian', 3, '(n==1 ? 0 : (n==0 || (n%100 > 0 && n%100 < 20)) ? 1 : 2);'),
'ru': ('Russian', 3,
'(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2)'),
'rw': ('Kinyarwanda', 2, '(n != 1)'),
'sa': ('Sanskrit', 3, '(n==1 ? 0 : n==2 ? 1 : 2)'),
'sah': ('Yakut', 1, '0'),
'sat': ('Santali', 2, '(n != 1)'),
'sco': ('Scots', 2, '(n != 1)'),
'sd': ('Sindhi', 2, '(n != 1)'),
'se': ('Northern Sami', 2, '(n != 1)'),
'si': ('Sinhala; Sinhalese', 2, '(n != 1)'),
'sk': ('Slovak', 3, '(n==1) ? 0 : (n>=2 && n<=4) ? 1 : 2'),
'sl': ('Slovenian', 4, '(n%100==1 ? 0 : n%100==2 ? 1 : n%100==3 || n%100==4 ? 2 : 3)'),
'so': ('Somali', 2, '(n != 1)'),
'son': ('Songhai languages', 1, '0'),
'sq': ('Albanian', 2, '(n != 1)'),
'sr': ('Serbian', 3,
'(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2)'),
'st': ('Sotho, Southern', 2, '(n != 1)'),
'su': ('Sundanese', 1, '0'),
'sv': ('Swedish', 2, '(n != 1)'),
'sw': ('Swahili', 2, '(n != 1)'),
'szl': ('Silesian', 3,
'(n==1 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2)'),
'ta': ('Tamil', 2, '(n != 1)'),
'te': ('Telugu', 2, '(n != 1)'),
'tg': ('Tajik', 1, '0'),
'th': ('Thai', 1, '0'),
'ti': ('Tigrinya', 2, '(n > 1)'),
'tk': ('Turkmen', 2, '(n != 1)'),
' |
mpurg/qtools | qscripts-cli/q_setprot.py | Python | mit | 3,879 | 0.004383 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# MIT License
#
# Copyright (c) 2018 Miha Purg <mi | ha.p | urg@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
#
#
# Takes as arguments:
# - pdb structure
# - file containing residue indexes that should be charged
# (each in new line or space separated)
#
# Renames the ARG,GLU,ASP,LYS that should be neutral to ARN,GLH,ASH and LYN
# and vice versa
#
from __future__ import absolute_import, print_function
from __future__ import division, unicode_literals
from qscripts_config import __version__, QScriptsConfig as QScfg
import sys
import os
import argparse
from Qpyl.common import backup_file, get_version_full
parser = argparse.ArgumentParser(description="""
Simple script for ionizing residues. It basically changes all the specified
residues to ARG,GLU,ASP,LYS, while the rest are renamed to ARN,GLH,ASH,LYN.
""", add_help=False)
reqarg = parser.add_argument_group("Required")
reqarg.add_argument("pdb", help="PDB structure file")
reqarg.add_argument("resids", help="Text file with space or newline separated "
"indexes of residues that should be "
"IONIZED. All others will be set to "
"their neutral form.")
reqarg.add_argument("outfn", help="Output filename")
optarg = parser.add_argument_group("Optional")
optarg.add_argument("-v", "--version", action="version",
version=get_version_full())
optarg.add_argument("-h", "--help", action="help", help="show this "
"help message and exit")
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
if not os.path.lexists(args.pdb):
print("FATAL! File '{}' doesn't exist.".format(args.pdb))
sys.exit(1)
if not os.path.lexists(args.resids):
print("FATAL! File '{}' doesn't exist.".format(args.resids))
sys.exit(1)
chres = ("ARG", "GLU", "ASP", "LYS") # charged names
nres = ("ARN", "GLH", "ASH", "LYN") # neutral names
pdb_lines = open(args.pdb, 'r').readlines()
charged_residues = open(args.resids, 'r').read().split()
new_pdb = ""
for line in pdb_lines:
ri = line[22:26].strip()
rn = line[17:20]
new_line=""
if ri in charged_residues:
if rn in nres:
new_rn = chres[ nres.index(rn) ]
print("Changing: %s%s to %s%s" % (rn,ri, new_rn, ri))
new_line=line.replace(rn, new_rn)
else:
new_line=line
else:
if rn in chres:
new_rn = nres[ chres.index(rn) ]
new_line=line.replace(rn, new_rn)
else:
new_line = line
new_pdb += new_line
backup = backup_file(args.outfn)
if backup:
print("Backed up '{}' to '{}'".format(args.outfn, backup))
open(args.outfn, 'w').write(new_pdb)
print("Wrote " + args.outfn)
|
tarthy6/dozer-thesis | scripts/watch-mem-usage.py | Python | gpl-2.0 | 965 | 0.041451 | # collect data about max memory usage of processes matching some patterns
import psutil, re, operator, time, sys
sampleTime=.5 # seconds
# matching lines will be taken in account
#cmdPattern='.*cc1plus.*src/([a-zA-Z0-9_-]\.cpp).*'
cmdPattern=r'.*cc1plus.* (\S+/)?([^/ ]+\.cpp).*'
# group in the pattern which identifies the process (e.g. source code file)
cmdKeyGroup=2
m | axMem={}
while True:
try:
for p in psutil.process_iter():
m=re.match(cmdPattern,' '.join(p.cmdline))
if not m: continue
key=m.group(cmdKeyGroup)
mem=p.get_memory_info()[1] # tuple of RSS (resident set size) and VMS (virtual memory size)
if key not in maxMem:
print 'New process with key',key
maxMem[key]=mem
elif maxMem[key]<mem: maxMem[key]=mem
time.sleep(sampleTime)
except (KeyboardInterrupt,SystemExit):
# print summary, exit
f | or k,v in sorted(maxMem.iteritems(),key=operator.itemgetter(1)):
print '{:>10.1f} {}'.format(1e-6*v,k)
sys.exit(0)
|
jrmendozat/mtvm | Vehiculo/models.py | Python | gpl-2.0 | 2,597 | 0.00154 | from django.db import models
# Create your models here.
class Tipo_Vehiculo(models.Model):
"""docstring for Tipo_Vehiculo"""
def __init__(self, *args, **kwargs):
super(Tipo_Vehiculo, self).__init__(*args, **kwargs)
tipo_vehiculo = models.CharField(max_length=100, unique=True)
adicional1 = models.CharField(max_length=250, blank=True)
adicional2 = models.CharField(max_length=250, blank=True)
adicional3 = models.CharField(max_length=250, blank=True)
adicional4 = models.CharField(max_length=250, blank=True)
activo = models.BooleanField(default=True)
def __unicode__(self):
return self.tipo_vehiculo
class Meta:
verbose_name_plural = "Tipos de Vehiculos"
class Modelo_Vehiculo(models.Model):
"""docstring for Modelo_Vehiculo"""
def __init__(self, *args, **kwargs):
super(Modelo_Vehiculo, self).__init__(*args, **kwargs)
modelo_vehiculo = models.CharField(max_length=100)
capacidad_peso = models.IntegerField()
capacidad_x = models.DecimalField(max_digits=6, decimal_places=2)
capacidad_y = models.DecimalField(max_digits=6, decimal_places=2)
capacidad_z = models.DecimalField(max_digits=6, decimal_places=2)
capacidad_m3 = models.DecimalField(max_digits=6, decimal_places=2)
adicional1 = models.CharField(max_length=250, blank=True)
adicional2 = models.CharField(max_length=250, blank=True)
adicional3 = models.CharField(max_length=250, blank=True)
adicional4 = models.CharField(max_length=250, blank=True)
activo = models.BooleanField(default=True)
def __unicode__(self):
return self.modelo_vehiculo
class Meta:
verbose_name_plural = "Modelos de Vehiculos"
class Vehiculo(models.Model):
"""docstring for Vehiculo"""
def __init__(self, *args, **kwargs):
super(Vehiculo, self).__init__(*args, **kwargs)
numero_vehiculo = models | .CharField(max_length=10)
#mantenimiento_vehiculo = models.ForeignKey()
vehiculo = models.CharField(max_length=100)
patente = models.CharField(max_length=100)
tipo_vehiculo = models.ForeignKey(Tipo_Vehiculo)
| modelo_vehiculo = models.ForeignKey(Modelo_Vehiculo)
adicional1 = models.CharField(max_length=250, blank=True)
adicional2 = models.CharField(max_length=250, blank=True)
adicional3 = models.CharField(max_length=250, blank=True)
adicional4 = models.CharField(max_length=250, blank=True)
activo = models.BooleanField(default=True)
def __unicode__(self):
return self.vehiculo
class Meta:
verbose_name_plural = "Vehiculos"
|
DedMemez/ODS-August-2017 | dna/DNAFlatDoor.py | Python | apache-2.0 | 561 | 0.005348 | # Fuck you Disyer. Stealing my fucking paypal. GET FUCKED: toontown.dna.DNAFlatDoor
from panda3d.core import DecalEffect
import DNADoor
class DNAFlatDoor(DNADoor.DNADoor):
COMPONENT_CODE = 17
def traverse(self, nodePath, dnaStorage):
node = assetStorage.findNode(self.code, self.getName())
node.reparentTo(nodePath, 0)
node.setScale(hidden, 1 | , 1, 1)
node.setPosHpr((0.5, 0 | , 0), (0, 0, 0))
node.setColor(self.color)
node.getNode(0).setEffect(DecalEffect.make())
node.flattenStrong() |
moehuster/python | SimpleGUICS2Pygame/script/SimpleGUICS2Pygame_check.py | Python | gpl-2.0 | 3,377 | 0 | #!/usr/bin/env python
# -*- coding: latin-1 -*-
"""
script/SimpleGUICS2Pygame_check.py
(December 15, 2013)
Piece of SimpleGUICS2Pygame.
https://bitbucket.org/OPiMedia/simpleguics2pygame
GPLv3 --- Copyright (C) 2013 Olivier Pirson
http://www.opimedia.be/
"""
from __future__ import print_function
from sys import version
########
# Main #
########
if __name__ == '__main__':
print("""script/SimpleGUICS2Pygame_check.py (December 13, 2013)
======================================================
python - version""", version)
print('\n')
try:
cmd = 'import matplotlib'
import matplotlib
print(cmd, 'ok - Version', matplotlib.__version__)
except Exception as e:
print(cmd, 'FAILED!', e)
print('\n')
try:
cmd = 'import pygame'
import pygame
print(cmd, 'ok - Version', pygame.version.ver)
except Exception as e:
print(cmd, 'FAILED!', e)
try:
cmd = 'pygame.init()'
pygame.init()
print(cmd, 'ok')
except Exception as e:
print(cmd, 'FAILED!', e)
print('\n')
try:
cmd = 'import SimpleGUICS2Pygame'
import SimpleGUICS2Pygame
print(cmd, 'ok - Version', SimpleGUICS2Pygame._VERSION)
except Exception as e:
print(cmd, 'FAILED!', e)
print()
try:
cmd = 'import SimpleGUICS2Pygame.codeskulptor'
import SimpleGUICS2Pygame.codeskulptor
print(cmd, 'ok')
except Exception as e:
print(cmd, 'FAILED!', e)
try:
cmd = 'import SimpleGUICS2Pygame.codeskulptor_lib'
import SimpleGUICS2Pygame.codeskulptor_lib
print(cmd, 'ok')
except Exception as e:
print(cmd, 'FAILED!', e)
try:
cmd = 'import SimpleGUICS2Pygame.numeric'
import SimpleGUICS2Pygame.numeric
print(cmd, 'ok')
except Exception as e:
print(cmd, 'FAILED!', e)
try:
cmd = 'import SimpleGUICS2Pygame.simplegui_lib'
import SimpleGUICS2Pygame.simplegui_lib
print(c | md, 'ok')
except Exception as e:
print(cmd, 'FAILED!', e)
try:
cmd = 'import SimpleGUICS2Pygame.simplegui_lib_draw'
import SimpleGUICS2Pygame.simplegui_lib_draw
print(cmd, 'ok')
except Exception as e:
print(cmd, 'FAILED!', e)
try:
cmd = 'import SimpleGUICS2Pygame.simplegui_lib_fps'
import SimpleGUICS2Pygame.simplegui_lib_fps
print(cmd, 'ok')
except | Exception as e:
print(cmd, 'FAILED!', e)
try:
cmd = 'import SimpleGUICS2Pygame.simplegui_lib_keys'
import SimpleGUICS2Pygame.simplegui_lib_keys
print(cmd, 'ok')
except Exception as e:
print(cmd, 'FAILED!', e)
try:
cmd = 'import SimpleGUICS2Pygame.simplegui_lib_loader'
import SimpleGUICS2Pygame.simplegui_lib_loader
print(cmd, 'ok')
except Exception as e:
print(cmd, 'FAILED!', e)
try:
cmd = 'import SimpleGUICS2Pygame.simpleguics2pygame'
import SimpleGUICS2Pygame.simpleguics2pygame
print(cmd, 'ok')
except Exception as e:
print(cmd, 'FAILED!', e)
try:
cmd = 'import SimpleGUICS2Pygame.simpleplot'
import SimpleGUICS2Pygame.simpleplot
print(cmd, 'ok')
except Exception as e:
print(cmd, 'FAILED!', e)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.